1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10 // stores that can be put together into vector-stores. Next, it attempts to
11 // construct vectorizable tree using the use-def chains. If a profitable tree
12 // was found, the SLP vectorizer performs vectorization on the tree.
13 //
14 // The pass is inspired by the work described in the paper:
15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
16 //
17 //===----------------------------------------------------------------------===//
18
19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/DenseSet.h"
22 #include "llvm/ADT/Optional.h"
23 #include "llvm/ADT/PostOrderIterator.h"
24 #include "llvm/ADT/PriorityQueue.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/SetOperations.h"
27 #include "llvm/ADT/SetVector.h"
28 #include "llvm/ADT/SmallBitVector.h"
29 #include "llvm/ADT/SmallPtrSet.h"
30 #include "llvm/ADT/SmallSet.h"
31 #include "llvm/ADT/SmallString.h"
32 #include "llvm/ADT/Statistic.h"
33 #include "llvm/ADT/iterator.h"
34 #include "llvm/ADT/iterator_range.h"
35 #include "llvm/Analysis/AliasAnalysis.h"
36 #include "llvm/Analysis/AssumptionCache.h"
37 #include "llvm/Analysis/CodeMetrics.h"
38 #include "llvm/Analysis/DemandedBits.h"
39 #include "llvm/Analysis/GlobalsModRef.h"
40 #include "llvm/Analysis/IVDescriptors.h"
41 #include "llvm/Analysis/LoopAccessAnalysis.h"
42 #include "llvm/Analysis/LoopInfo.h"
43 #include "llvm/Analysis/MemoryLocation.h"
44 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
45 #include "llvm/Analysis/ScalarEvolution.h"
46 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
47 #include "llvm/Analysis/TargetLibraryInfo.h"
48 #include "llvm/Analysis/TargetTransformInfo.h"
49 #include "llvm/Analysis/ValueTracking.h"
50 #include "llvm/Analysis/VectorUtils.h"
51 #include "llvm/IR/Attributes.h"
52 #include "llvm/IR/BasicBlock.h"
53 #include "llvm/IR/Constant.h"
54 #include "llvm/IR/Constants.h"
55 #include "llvm/IR/DataLayout.h"
56 #include "llvm/IR/DerivedTypes.h"
57 #include "llvm/IR/Dominators.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/IRBuilder.h"
60 #include "llvm/IR/InstrTypes.h"
61 #include "llvm/IR/Instruction.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/Module.h"
66 #include "llvm/IR/Operator.h"
67 #include "llvm/IR/PatternMatch.h"
68 #include "llvm/IR/Type.h"
69 #include "llvm/IR/Use.h"
70 #include "llvm/IR/User.h"
71 #include "llvm/IR/Value.h"
72 #include "llvm/IR/ValueHandle.h"
73 #ifdef EXPENSIVE_CHECKS
74 #include "llvm/IR/Verifier.h"
75 #endif
76 #include "llvm/Pass.h"
77 #include "llvm/Support/Casting.h"
78 #include "llvm/Support/CommandLine.h"
79 #include "llvm/Support/Compiler.h"
80 #include "llvm/Support/DOTGraphTraits.h"
81 #include "llvm/Support/Debug.h"
82 #include "llvm/Support/ErrorHandling.h"
83 #include "llvm/Support/GraphWriter.h"
84 #include "llvm/Support/InstructionCost.h"
85 #include "llvm/Support/KnownBits.h"
86 #include "llvm/Support/MathExtras.h"
87 #include "llvm/Support/raw_ostream.h"
88 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
89 #include "llvm/Transforms/Utils/Local.h"
90 #include "llvm/Transforms/Utils/LoopUtils.h"
91 #include "llvm/Transforms/Vectorize.h"
92 #include <algorithm>
93 #include <cassert>
94 #include <cstdint>
95 #include <iterator>
96 #include <memory>
97 #include <set>
98 #include <string>
99 #include <tuple>
100 #include <utility>
101 #include <vector>
102
103 using namespace llvm;
104 using namespace llvm::PatternMatch;
105 using namespace slpvectorizer;
106
107 #define SV_NAME "slp-vectorizer"
108 #define DEBUG_TYPE "SLP"
109
110 STATISTIC(NumVectorInstructions, "Number of vector instructions generated");
111
112 cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden,
113 cl::desc("Run the SLP vectorization passes"));
114
115 static cl::opt<int>
116 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
117 cl::desc("Only vectorize if you gain more than this "
118 "number "));
119
120 static cl::opt<bool>
121 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden,
122 cl::desc("Attempt to vectorize horizontal reductions"));
123
124 static cl::opt<bool> ShouldStartVectorizeHorAtStore(
125 "slp-vectorize-hor-store", cl::init(false), cl::Hidden,
126 cl::desc(
127 "Attempt to vectorize horizontal reductions feeding into a store"));
128
129 static cl::opt<int>
130 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden,
131 cl::desc("Attempt to vectorize for this register size in bits"));
132
133 static cl::opt<unsigned>
134 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden,
135 cl::desc("Maximum SLP vectorization factor (0=unlimited)"));
136
137 static cl::opt<int>
138 MaxStoreLookup("slp-max-store-lookup", cl::init(32), cl::Hidden,
139 cl::desc("Maximum depth of the lookup for consecutive stores."));
140
141 /// Limits the size of scheduling regions in a block.
142 /// It avoid long compile times for _very_ large blocks where vector
143 /// instructions are spread over a wide range.
144 /// This limit is way higher than needed by real-world functions.
145 static cl::opt<int>
146 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden,
147 cl::desc("Limit the size of the SLP scheduling region per block"));
148
149 static cl::opt<int> MinVectorRegSizeOption(
150 "slp-min-reg-size", cl::init(128), cl::Hidden,
151 cl::desc("Attempt to vectorize for this register size in bits"));
152
153 static cl::opt<unsigned> RecursionMaxDepth(
154 "slp-recursion-max-depth", cl::init(12), cl::Hidden,
155 cl::desc("Limit the recursion depth when building a vectorizable tree"));
156
157 static cl::opt<unsigned> MinTreeSize(
158 "slp-min-tree-size", cl::init(3), cl::Hidden,
159 cl::desc("Only vectorize small trees if they are fully vectorizable"));
160
161 // The maximum depth that the look-ahead score heuristic will explore.
162 // The higher this value, the higher the compilation time overhead.
163 static cl::opt<int> LookAheadMaxDepth(
164 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden,
165 cl::desc("The maximum look-ahead depth for operand reordering scores"));
166
167 // The maximum depth that the look-ahead score heuristic will explore
168 // when it probing among candidates for vectorization tree roots.
169 // The higher this value, the higher the compilation time overhead but unlike
170 // similar limit for operands ordering this is less frequently used, hence
171 // impact of higher value is less noticeable.
172 static cl::opt<int> RootLookAheadMaxDepth(
173 "slp-max-root-look-ahead-depth", cl::init(2), cl::Hidden,
174 cl::desc("The maximum look-ahead depth for searching best rooting option"));
175
176 static cl::opt<bool>
177 ViewSLPTree("view-slp-tree", cl::Hidden,
178 cl::desc("Display the SLP trees with Graphviz"));
179
180 // Limit the number of alias checks. The limit is chosen so that
181 // it has no negative effect on the llvm benchmarks.
182 static const unsigned AliasedCheckLimit = 10;
183
184 // Another limit for the alias checks: The maximum distance between load/store
185 // instructions where alias checks are done.
186 // This limit is useful for very large basic blocks.
187 static const unsigned MaxMemDepDistance = 160;
188
189 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling
190 /// regions to be handled.
191 static const int MinScheduleRegionSize = 16;
192
193 /// Predicate for the element types that the SLP vectorizer supports.
194 ///
195 /// The most important thing to filter here are types which are invalid in LLVM
196 /// vectors. We also filter target specific types which have absolutely no
197 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
198 /// avoids spending time checking the cost model and realizing that they will
199 /// be inevitably scalarized.
isValidElementType(Type * Ty)200 static bool isValidElementType(Type *Ty) {
201 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
202 !Ty->isPPC_FP128Ty();
203 }
204
205 /// \returns True if the value is a constant (but not globals/constant
206 /// expressions).
isConstant(Value * V)207 static bool isConstant(Value *V) {
208 return isa<Constant>(V) && !isa<ConstantExpr>(V) && !isa<GlobalValue>(V);
209 }
210
211 /// Checks if \p V is one of vector-like instructions, i.e. undef,
212 /// insertelement/extractelement with constant indices for fixed vector type or
213 /// extractvalue instruction.
isVectorLikeInstWithConstOps(Value * V)214 static bool isVectorLikeInstWithConstOps(Value *V) {
215 if (!isa<InsertElementInst, ExtractElementInst>(V) &&
216 !isa<ExtractValueInst, UndefValue>(V))
217 return false;
218 auto *I = dyn_cast<Instruction>(V);
219 if (!I || isa<ExtractValueInst>(I))
220 return true;
221 if (!isa<FixedVectorType>(I->getOperand(0)->getType()))
222 return false;
223 if (isa<ExtractElementInst>(I))
224 return isConstant(I->getOperand(1));
225 assert(isa<InsertElementInst>(V) && "Expected only insertelement.");
226 return isConstant(I->getOperand(2));
227 }
228
229 /// \returns true if all of the instructions in \p VL are in the same block or
230 /// false otherwise.
allSameBlock(ArrayRef<Value * > VL)231 static bool allSameBlock(ArrayRef<Value *> VL) {
232 Instruction *I0 = dyn_cast<Instruction>(VL[0]);
233 if (!I0)
234 return false;
235 if (all_of(VL, isVectorLikeInstWithConstOps))
236 return true;
237
238 BasicBlock *BB = I0->getParent();
239 for (int I = 1, E = VL.size(); I < E; I++) {
240 auto *II = dyn_cast<Instruction>(VL[I]);
241 if (!II)
242 return false;
243
244 if (BB != II->getParent())
245 return false;
246 }
247 return true;
248 }
249
250 /// \returns True if all of the values in \p VL are constants (but not
251 /// globals/constant expressions).
allConstant(ArrayRef<Value * > VL)252 static bool allConstant(ArrayRef<Value *> VL) {
253 // Constant expressions and globals can't be vectorized like normal integer/FP
254 // constants.
255 return all_of(VL, isConstant);
256 }
257
258 /// \returns True if all of the values in \p VL are identical or some of them
259 /// are UndefValue.
isSplat(ArrayRef<Value * > VL)260 static bool isSplat(ArrayRef<Value *> VL) {
261 Value *FirstNonUndef = nullptr;
262 for (Value *V : VL) {
263 if (isa<UndefValue>(V))
264 continue;
265 if (!FirstNonUndef) {
266 FirstNonUndef = V;
267 continue;
268 }
269 if (V != FirstNonUndef)
270 return false;
271 }
272 return FirstNonUndef != nullptr;
273 }
274
275 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator.
isCommutative(Instruction * I)276 static bool isCommutative(Instruction *I) {
277 if (auto *Cmp = dyn_cast<CmpInst>(I))
278 return Cmp->isCommutative();
279 if (auto *BO = dyn_cast<BinaryOperator>(I))
280 return BO->isCommutative();
281 // TODO: This should check for generic Instruction::isCommutative(), but
282 // we need to confirm that the caller code correctly handles Intrinsics
283 // for example (does not have 2 operands).
284 return false;
285 }
286
287 /// Checks if the given value is actually an undefined constant vector.
isUndefVector(const Value * V)288 static bool isUndefVector(const Value *V) {
289 if (isa<UndefValue>(V))
290 return true;
291 auto *C = dyn_cast<Constant>(V);
292 if (!C)
293 return false;
294 if (!C->containsUndefOrPoisonElement())
295 return false;
296 auto *VecTy = dyn_cast<FixedVectorType>(C->getType());
297 if (!VecTy)
298 return false;
299 for (unsigned I = 0, E = VecTy->getNumElements(); I != E; ++I) {
300 if (Constant *Elem = C->getAggregateElement(I))
301 if (!isa<UndefValue>(Elem))
302 return false;
303 }
304 return true;
305 }
306
307 /// Checks if the vector of instructions can be represented as a shuffle, like:
308 /// %x0 = extractelement <4 x i8> %x, i32 0
309 /// %x3 = extractelement <4 x i8> %x, i32 3
310 /// %y1 = extractelement <4 x i8> %y, i32 1
311 /// %y2 = extractelement <4 x i8> %y, i32 2
312 /// %x0x0 = mul i8 %x0, %x0
313 /// %x3x3 = mul i8 %x3, %x3
314 /// %y1y1 = mul i8 %y1, %y1
315 /// %y2y2 = mul i8 %y2, %y2
316 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0
317 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1
318 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2
319 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3
320 /// ret <4 x i8> %ins4
321 /// can be transformed into:
322 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5,
323 /// i32 6>
324 /// %2 = mul <4 x i8> %1, %1
325 /// ret <4 x i8> %2
326 /// We convert this initially to something like:
327 /// %x0 = extractelement <4 x i8> %x, i32 0
328 /// %x3 = extractelement <4 x i8> %x, i32 3
329 /// %y1 = extractelement <4 x i8> %y, i32 1
330 /// %y2 = extractelement <4 x i8> %y, i32 2
331 /// %1 = insertelement <4 x i8> poison, i8 %x0, i32 0
332 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1
333 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2
334 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3
335 /// %5 = mul <4 x i8> %4, %4
336 /// %6 = extractelement <4 x i8> %5, i32 0
337 /// %ins1 = insertelement <4 x i8> poison, i8 %6, i32 0
338 /// %7 = extractelement <4 x i8> %5, i32 1
339 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1
340 /// %8 = extractelement <4 x i8> %5, i32 2
341 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2
342 /// %9 = extractelement <4 x i8> %5, i32 3
343 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3
344 /// ret <4 x i8> %ins4
345 /// InstCombiner transforms this into a shuffle and vector mul
346 /// Mask will return the Shuffle Mask equivalent to the extracted elements.
347 /// TODO: Can we split off and reuse the shuffle mask detection from
348 /// TargetTransformInfo::getInstructionThroughput?
349 static Optional<TargetTransformInfo::ShuffleKind>
isFixedVectorShuffle(ArrayRef<Value * > VL,SmallVectorImpl<int> & Mask)350 isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) {
351 const auto *It =
352 find_if(VL, [](Value *V) { return isa<ExtractElementInst>(V); });
353 if (It == VL.end())
354 return None;
355 auto *EI0 = cast<ExtractElementInst>(*It);
356 if (isa<ScalableVectorType>(EI0->getVectorOperandType()))
357 return None;
358 unsigned Size =
359 cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements();
360 Value *Vec1 = nullptr;
361 Value *Vec2 = nullptr;
362 enum ShuffleMode { Unknown, Select, Permute };
363 ShuffleMode CommonShuffleMode = Unknown;
364 Mask.assign(VL.size(), UndefMaskElem);
365 for (unsigned I = 0, E = VL.size(); I < E; ++I) {
366 // Undef can be represented as an undef element in a vector.
367 if (isa<UndefValue>(VL[I]))
368 continue;
369 auto *EI = cast<ExtractElementInst>(VL[I]);
370 if (isa<ScalableVectorType>(EI->getVectorOperandType()))
371 return None;
372 auto *Vec = EI->getVectorOperand();
373 // We can extractelement from undef or poison vector.
374 if (isUndefVector(Vec))
375 continue;
376 // All vector operands must have the same number of vector elements.
377 if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size)
378 return None;
379 if (isa<UndefValue>(EI->getIndexOperand()))
380 continue;
381 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand());
382 if (!Idx)
383 return None;
384 // Undefined behavior if Idx is negative or >= Size.
385 if (Idx->getValue().uge(Size))
386 continue;
387 unsigned IntIdx = Idx->getValue().getZExtValue();
388 Mask[I] = IntIdx;
389 // For correct shuffling we have to have at most 2 different vector operands
390 // in all extractelement instructions.
391 if (!Vec1 || Vec1 == Vec) {
392 Vec1 = Vec;
393 } else if (!Vec2 || Vec2 == Vec) {
394 Vec2 = Vec;
395 Mask[I] += Size;
396 } else {
397 return None;
398 }
399 if (CommonShuffleMode == Permute)
400 continue;
401 // If the extract index is not the same as the operation number, it is a
402 // permutation.
403 if (IntIdx != I) {
404 CommonShuffleMode = Permute;
405 continue;
406 }
407 CommonShuffleMode = Select;
408 }
409 // If we're not crossing lanes in different vectors, consider it as blending.
410 if (CommonShuffleMode == Select && Vec2)
411 return TargetTransformInfo::SK_Select;
412 // If Vec2 was never used, we have a permutation of a single vector, otherwise
413 // we have permutation of 2 vectors.
414 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc
415 : TargetTransformInfo::SK_PermuteSingleSrc;
416 }
417
418 namespace {
419
420 /// Main data required for vectorization of instructions.
421 struct InstructionsState {
422 /// The very first instruction in the list with the main opcode.
423 Value *OpValue = nullptr;
424
425 /// The main/alternate instruction.
426 Instruction *MainOp = nullptr;
427 Instruction *AltOp = nullptr;
428
429 /// The main/alternate opcodes for the list of instructions.
getOpcode__anon9e073aa40211::InstructionsState430 unsigned getOpcode() const {
431 return MainOp ? MainOp->getOpcode() : 0;
432 }
433
getAltOpcode__anon9e073aa40211::InstructionsState434 unsigned getAltOpcode() const {
435 return AltOp ? AltOp->getOpcode() : 0;
436 }
437
438 /// Some of the instructions in the list have alternate opcodes.
isAltShuffle__anon9e073aa40211::InstructionsState439 bool isAltShuffle() const { return AltOp != MainOp; }
440
isOpcodeOrAlt__anon9e073aa40211::InstructionsState441 bool isOpcodeOrAlt(Instruction *I) const {
442 unsigned CheckedOpcode = I->getOpcode();
443 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode;
444 }
445
446 InstructionsState() = delete;
InstructionsState__anon9e073aa40211::InstructionsState447 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp)
448 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {}
449 };
450
451 } // end anonymous namespace
452
453 /// Chooses the correct key for scheduling data. If \p Op has the same (or
454 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p
455 /// OpValue.
isOneOf(const InstructionsState & S,Value * Op)456 static Value *isOneOf(const InstructionsState &S, Value *Op) {
457 auto *I = dyn_cast<Instruction>(Op);
458 if (I && S.isOpcodeOrAlt(I))
459 return Op;
460 return S.OpValue;
461 }
462
463 /// \returns true if \p Opcode is allowed as part of of the main/alternate
464 /// instruction for SLP vectorization.
465 ///
466 /// Example of unsupported opcode is SDIV that can potentially cause UB if the
467 /// "shuffled out" lane would result in division by zero.
isValidForAlternation(unsigned Opcode)468 static bool isValidForAlternation(unsigned Opcode) {
469 if (Instruction::isIntDivRem(Opcode))
470 return false;
471
472 return true;
473 }
474
475 static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
476 unsigned BaseIndex = 0);
477
478 /// Checks if the provided operands of 2 cmp instructions are compatible, i.e.
479 /// compatible instructions or constants, or just some other regular values.
areCompatibleCmpOps(Value * BaseOp0,Value * BaseOp1,Value * Op0,Value * Op1)480 static bool areCompatibleCmpOps(Value *BaseOp0, Value *BaseOp1, Value *Op0,
481 Value *Op1) {
482 return (isConstant(BaseOp0) && isConstant(Op0)) ||
483 (isConstant(BaseOp1) && isConstant(Op1)) ||
484 (!isa<Instruction>(BaseOp0) && !isa<Instruction>(Op0) &&
485 !isa<Instruction>(BaseOp1) && !isa<Instruction>(Op1)) ||
486 getSameOpcode({BaseOp0, Op0}).getOpcode() ||
487 getSameOpcode({BaseOp1, Op1}).getOpcode();
488 }
489
490 /// \returns analysis of the Instructions in \p VL described in
491 /// InstructionsState, the Opcode that we suppose the whole list
492 /// could be vectorized even if its structure is diverse.
getSameOpcode(ArrayRef<Value * > VL,unsigned BaseIndex)493 static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
494 unsigned BaseIndex) {
495 // Make sure these are all Instructions.
496 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); }))
497 return InstructionsState(VL[BaseIndex], nullptr, nullptr);
498
499 bool IsCastOp = isa<CastInst>(VL[BaseIndex]);
500 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]);
501 bool IsCmpOp = isa<CmpInst>(VL[BaseIndex]);
502 CmpInst::Predicate BasePred =
503 IsCmpOp ? cast<CmpInst>(VL[BaseIndex])->getPredicate()
504 : CmpInst::BAD_ICMP_PREDICATE;
505 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode();
506 unsigned AltOpcode = Opcode;
507 unsigned AltIndex = BaseIndex;
508
509 // Check for one alternate opcode from another BinaryOperator.
510 // TODO - generalize to support all operators (types, calls etc.).
511 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) {
512 unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode();
513 if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) {
514 if (InstOpcode == Opcode || InstOpcode == AltOpcode)
515 continue;
516 if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) &&
517 isValidForAlternation(Opcode)) {
518 AltOpcode = InstOpcode;
519 AltIndex = Cnt;
520 continue;
521 }
522 } else if (IsCastOp && isa<CastInst>(VL[Cnt])) {
523 Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType();
524 Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType();
525 if (Ty0 == Ty1) {
526 if (InstOpcode == Opcode || InstOpcode == AltOpcode)
527 continue;
528 if (Opcode == AltOpcode) {
529 assert(isValidForAlternation(Opcode) &&
530 isValidForAlternation(InstOpcode) &&
531 "Cast isn't safe for alternation, logic needs to be updated!");
532 AltOpcode = InstOpcode;
533 AltIndex = Cnt;
534 continue;
535 }
536 }
537 } else if (IsCmpOp && isa<CmpInst>(VL[Cnt])) {
538 auto *BaseInst = cast<Instruction>(VL[BaseIndex]);
539 auto *Inst = cast<Instruction>(VL[Cnt]);
540 Type *Ty0 = BaseInst->getOperand(0)->getType();
541 Type *Ty1 = Inst->getOperand(0)->getType();
542 if (Ty0 == Ty1) {
543 Value *BaseOp0 = BaseInst->getOperand(0);
544 Value *BaseOp1 = BaseInst->getOperand(1);
545 Value *Op0 = Inst->getOperand(0);
546 Value *Op1 = Inst->getOperand(1);
547 CmpInst::Predicate CurrentPred =
548 cast<CmpInst>(VL[Cnt])->getPredicate();
549 CmpInst::Predicate SwappedCurrentPred =
550 CmpInst::getSwappedPredicate(CurrentPred);
551 // Check for compatible operands. If the corresponding operands are not
552 // compatible - need to perform alternate vectorization.
553 if (InstOpcode == Opcode) {
554 if (BasePred == CurrentPred &&
555 areCompatibleCmpOps(BaseOp0, BaseOp1, Op0, Op1))
556 continue;
557 if (BasePred == SwappedCurrentPred &&
558 areCompatibleCmpOps(BaseOp0, BaseOp1, Op1, Op0))
559 continue;
560 if (E == 2 &&
561 (BasePred == CurrentPred || BasePred == SwappedCurrentPred))
562 continue;
563 auto *AltInst = cast<CmpInst>(VL[AltIndex]);
564 CmpInst::Predicate AltPred = AltInst->getPredicate();
565 Value *AltOp0 = AltInst->getOperand(0);
566 Value *AltOp1 = AltInst->getOperand(1);
567 // Check if operands are compatible with alternate operands.
568 if (AltPred == CurrentPred &&
569 areCompatibleCmpOps(AltOp0, AltOp1, Op0, Op1))
570 continue;
571 if (AltPred == SwappedCurrentPred &&
572 areCompatibleCmpOps(AltOp0, AltOp1, Op1, Op0))
573 continue;
574 }
575 if (BaseIndex == AltIndex && BasePred != CurrentPred) {
576 assert(isValidForAlternation(Opcode) &&
577 isValidForAlternation(InstOpcode) &&
578 "Cast isn't safe for alternation, logic needs to be updated!");
579 AltIndex = Cnt;
580 continue;
581 }
582 auto *AltInst = cast<CmpInst>(VL[AltIndex]);
583 CmpInst::Predicate AltPred = AltInst->getPredicate();
584 if (BasePred == CurrentPred || BasePred == SwappedCurrentPred ||
585 AltPred == CurrentPred || AltPred == SwappedCurrentPred)
586 continue;
587 }
588 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode)
589 continue;
590 return InstructionsState(VL[BaseIndex], nullptr, nullptr);
591 }
592
593 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]),
594 cast<Instruction>(VL[AltIndex]));
595 }
596
597 /// \returns true if all of the values in \p VL have the same type or false
598 /// otherwise.
allSameType(ArrayRef<Value * > VL)599 static bool allSameType(ArrayRef<Value *> VL) {
600 Type *Ty = VL[0]->getType();
601 for (int i = 1, e = VL.size(); i < e; i++)
602 if (VL[i]->getType() != Ty)
603 return false;
604
605 return true;
606 }
607
608 /// \returns True if Extract{Value,Element} instruction extracts element Idx.
getExtractIndex(Instruction * E)609 static Optional<unsigned> getExtractIndex(Instruction *E) {
610 unsigned Opcode = E->getOpcode();
611 assert((Opcode == Instruction::ExtractElement ||
612 Opcode == Instruction::ExtractValue) &&
613 "Expected extractelement or extractvalue instruction.");
614 if (Opcode == Instruction::ExtractElement) {
615 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1));
616 if (!CI)
617 return None;
618 return CI->getZExtValue();
619 }
620 ExtractValueInst *EI = cast<ExtractValueInst>(E);
621 if (EI->getNumIndices() != 1)
622 return None;
623 return *EI->idx_begin();
624 }
625
626 /// \returns True if in-tree use also needs extract. This refers to
627 /// possible scalar operand in vectorized instruction.
InTreeUserNeedToExtract(Value * Scalar,Instruction * UserInst,TargetLibraryInfo * TLI)628 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
629 TargetLibraryInfo *TLI) {
630 unsigned Opcode = UserInst->getOpcode();
631 switch (Opcode) {
632 case Instruction::Load: {
633 LoadInst *LI = cast<LoadInst>(UserInst);
634 return (LI->getPointerOperand() == Scalar);
635 }
636 case Instruction::Store: {
637 StoreInst *SI = cast<StoreInst>(UserInst);
638 return (SI->getPointerOperand() == Scalar);
639 }
640 case Instruction::Call: {
641 CallInst *CI = cast<CallInst>(UserInst);
642 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
643 for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) {
644 if (isVectorIntrinsicWithScalarOpAtArg(ID, i))
645 return (CI->getArgOperand(i) == Scalar);
646 }
647 LLVM_FALLTHROUGH;
648 }
649 default:
650 return false;
651 }
652 }
653
654 /// \returns the AA location that is being access by the instruction.
getLocation(Instruction * I)655 static MemoryLocation getLocation(Instruction *I) {
656 if (StoreInst *SI = dyn_cast<StoreInst>(I))
657 return MemoryLocation::get(SI);
658 if (LoadInst *LI = dyn_cast<LoadInst>(I))
659 return MemoryLocation::get(LI);
660 return MemoryLocation();
661 }
662
663 /// \returns True if the instruction is not a volatile or atomic load/store.
isSimple(Instruction * I)664 static bool isSimple(Instruction *I) {
665 if (LoadInst *LI = dyn_cast<LoadInst>(I))
666 return LI->isSimple();
667 if (StoreInst *SI = dyn_cast<StoreInst>(I))
668 return SI->isSimple();
669 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
670 return !MI->isVolatile();
671 return true;
672 }
673
674 /// Shuffles \p Mask in accordance with the given \p SubMask.
addMask(SmallVectorImpl<int> & Mask,ArrayRef<int> SubMask)675 static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask) {
676 if (SubMask.empty())
677 return;
678 if (Mask.empty()) {
679 Mask.append(SubMask.begin(), SubMask.end());
680 return;
681 }
682 SmallVector<int> NewMask(SubMask.size(), UndefMaskElem);
683 int TermValue = std::min(Mask.size(), SubMask.size());
684 for (int I = 0, E = SubMask.size(); I < E; ++I) {
685 if (SubMask[I] >= TermValue || SubMask[I] == UndefMaskElem ||
686 Mask[SubMask[I]] >= TermValue)
687 continue;
688 NewMask[I] = Mask[SubMask[I]];
689 }
690 Mask.swap(NewMask);
691 }
692
693 /// Order may have elements assigned special value (size) which is out of
694 /// bounds. Such indices only appear on places which correspond to undef values
695 /// (see canReuseExtract for details) and used in order to avoid undef values
696 /// have effect on operands ordering.
697 /// The first loop below simply finds all unused indices and then the next loop
698 /// nest assigns these indices for undef values positions.
699 /// As an example below Order has two undef positions and they have assigned
700 /// values 3 and 7 respectively:
701 /// before: 6 9 5 4 9 2 1 0
702 /// after: 6 3 5 4 7 2 1 0
fixupOrderingIndices(SmallVectorImpl<unsigned> & Order)703 static void fixupOrderingIndices(SmallVectorImpl<unsigned> &Order) {
704 const unsigned Sz = Order.size();
705 SmallBitVector UnusedIndices(Sz, /*t=*/true);
706 SmallBitVector MaskedIndices(Sz);
707 for (unsigned I = 0; I < Sz; ++I) {
708 if (Order[I] < Sz)
709 UnusedIndices.reset(Order[I]);
710 else
711 MaskedIndices.set(I);
712 }
713 if (MaskedIndices.none())
714 return;
715 assert(UnusedIndices.count() == MaskedIndices.count() &&
716 "Non-synced masked/available indices.");
717 int Idx = UnusedIndices.find_first();
718 int MIdx = MaskedIndices.find_first();
719 while (MIdx >= 0) {
720 assert(Idx >= 0 && "Indices must be synced.");
721 Order[MIdx] = Idx;
722 Idx = UnusedIndices.find_next(Idx);
723 MIdx = MaskedIndices.find_next(MIdx);
724 }
725 }
726
727 namespace llvm {
728
inversePermutation(ArrayRef<unsigned> Indices,SmallVectorImpl<int> & Mask)729 static void inversePermutation(ArrayRef<unsigned> Indices,
730 SmallVectorImpl<int> &Mask) {
731 Mask.clear();
732 const unsigned E = Indices.size();
733 Mask.resize(E, UndefMaskElem);
734 for (unsigned I = 0; I < E; ++I)
735 Mask[Indices[I]] = I;
736 }
737
738 /// \returns inserting index of InsertElement or InsertValue instruction,
739 /// using Offset as base offset for index.
getInsertIndex(const Value * InsertInst,unsigned Offset=0)740 static Optional<unsigned> getInsertIndex(const Value *InsertInst,
741 unsigned Offset = 0) {
742 int Index = Offset;
743 if (const auto *IE = dyn_cast<InsertElementInst>(InsertInst)) {
744 if (const auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2))) {
745 auto *VT = cast<FixedVectorType>(IE->getType());
746 if (CI->getValue().uge(VT->getNumElements()))
747 return None;
748 Index *= VT->getNumElements();
749 Index += CI->getZExtValue();
750 return Index;
751 }
752 return None;
753 }
754
755 const auto *IV = cast<InsertValueInst>(InsertInst);
756 Type *CurrentType = IV->getType();
757 for (unsigned I : IV->indices()) {
758 if (const auto *ST = dyn_cast<StructType>(CurrentType)) {
759 Index *= ST->getNumElements();
760 CurrentType = ST->getElementType(I);
761 } else if (const auto *AT = dyn_cast<ArrayType>(CurrentType)) {
762 Index *= AT->getNumElements();
763 CurrentType = AT->getElementType();
764 } else {
765 return None;
766 }
767 Index += I;
768 }
769 return Index;
770 }
771
772 /// Reorders the list of scalars in accordance with the given \p Mask.
reorderScalars(SmallVectorImpl<Value * > & Scalars,ArrayRef<int> Mask)773 static void reorderScalars(SmallVectorImpl<Value *> &Scalars,
774 ArrayRef<int> Mask) {
775 assert(!Mask.empty() && "Expected non-empty mask.");
776 SmallVector<Value *> Prev(Scalars.size(),
777 UndefValue::get(Scalars.front()->getType()));
778 Prev.swap(Scalars);
779 for (unsigned I = 0, E = Prev.size(); I < E; ++I)
780 if (Mask[I] != UndefMaskElem)
781 Scalars[Mask[I]] = Prev[I];
782 }
783
784 /// Checks if the provided value does not require scheduling. It does not
785 /// require scheduling if this is not an instruction or it is an instruction
786 /// that does not read/write memory and all operands are either not instructions
787 /// or phi nodes or instructions from different blocks.
areAllOperandsNonInsts(Value * V)788 static bool areAllOperandsNonInsts(Value *V) {
789 auto *I = dyn_cast<Instruction>(V);
790 if (!I)
791 return true;
792 return !mayHaveNonDefUseDependency(*I) &&
793 all_of(I->operands(), [I](Value *V) {
794 auto *IO = dyn_cast<Instruction>(V);
795 if (!IO)
796 return true;
797 return isa<PHINode>(IO) || IO->getParent() != I->getParent();
798 });
799 }
800
801 /// Checks if the provided value does not require scheduling. It does not
802 /// require scheduling if this is not an instruction or it is an instruction
803 /// that does not read/write memory and all users are phi nodes or instructions
804 /// from the different blocks.
isUsedOutsideBlock(Value * V)805 static bool isUsedOutsideBlock(Value *V) {
806 auto *I = dyn_cast<Instruction>(V);
807 if (!I)
808 return true;
809 // Limits the number of uses to save compile time.
810 constexpr int UsesLimit = 8;
811 return !I->mayReadOrWriteMemory() && !I->hasNUsesOrMore(UsesLimit) &&
812 all_of(I->users(), [I](User *U) {
813 auto *IU = dyn_cast<Instruction>(U);
814 if (!IU)
815 return true;
816 return IU->getParent() != I->getParent() || isa<PHINode>(IU);
817 });
818 }
819
820 /// Checks if the specified value does not require scheduling. It does not
821 /// require scheduling if all operands and all users do not need to be scheduled
822 /// in the current basic block.
doesNotNeedToBeScheduled(Value * V)823 static bool doesNotNeedToBeScheduled(Value *V) {
824 return areAllOperandsNonInsts(V) && isUsedOutsideBlock(V);
825 }
826
827 /// Checks if the specified array of instructions does not require scheduling.
828 /// It is so if all either instructions have operands that do not require
829 /// scheduling or their users do not require scheduling since they are phis or
830 /// in other basic blocks.
doesNotNeedToSchedule(ArrayRef<Value * > VL)831 static bool doesNotNeedToSchedule(ArrayRef<Value *> VL) {
832 return !VL.empty() &&
833 (all_of(VL, isUsedOutsideBlock) || all_of(VL, areAllOperandsNonInsts));
834 }
835
836 namespace slpvectorizer {
837
838 /// Bottom Up SLP Vectorizer.
839 class BoUpSLP {
840 struct TreeEntry;
841 struct ScheduleData;
842
843 public:
844 using ValueList = SmallVector<Value *, 8>;
845 using InstrList = SmallVector<Instruction *, 16>;
846 using ValueSet = SmallPtrSet<Value *, 16>;
847 using StoreList = SmallVector<StoreInst *, 8>;
848 using ExtraValueToDebugLocsMap =
849 MapVector<Value *, SmallVector<Instruction *, 2>>;
850 using OrdersType = SmallVector<unsigned, 4>;
851
BoUpSLP(Function * Func,ScalarEvolution * Se,TargetTransformInfo * Tti,TargetLibraryInfo * TLi,AAResults * Aa,LoopInfo * Li,DominatorTree * Dt,AssumptionCache * AC,DemandedBits * DB,const DataLayout * DL,OptimizationRemarkEmitter * ORE)852 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti,
853 TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li,
854 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB,
855 const DataLayout *DL, OptimizationRemarkEmitter *ORE)
856 : BatchAA(*Aa), F(Func), SE(Se), TTI(Tti), TLI(TLi), LI(Li),
857 DT(Dt), AC(AC), DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) {
858 CodeMetrics::collectEphemeralValues(F, AC, EphValues);
859 // Use the vector register size specified by the target unless overridden
860 // by a command-line option.
861 // TODO: It would be better to limit the vectorization factor based on
862 // data type rather than just register size. For example, x86 AVX has
863 // 256-bit registers, but it does not support integer operations
864 // at that width (that requires AVX2).
865 if (MaxVectorRegSizeOption.getNumOccurrences())
866 MaxVecRegSize = MaxVectorRegSizeOption;
867 else
868 MaxVecRegSize =
869 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
870 .getFixedSize();
871
872 if (MinVectorRegSizeOption.getNumOccurrences())
873 MinVecRegSize = MinVectorRegSizeOption;
874 else
875 MinVecRegSize = TTI->getMinVectorRegisterBitWidth();
876 }
877
878 /// Vectorize the tree that starts with the elements in \p VL.
879 /// Returns the vectorized root.
880 Value *vectorizeTree();
881
882 /// Vectorize the tree but with the list of externally used values \p
883 /// ExternallyUsedValues. Values in this MapVector can be replaced but the
884 /// generated extractvalue instructions.
885 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues);
886
887 /// \returns the cost incurred by unwanted spills and fills, caused by
888 /// holding live values over call sites.
889 InstructionCost getSpillCost() const;
890
891 /// \returns the vectorization cost of the subtree that starts at \p VL.
892 /// A negative number means that this is profitable.
893 InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = None);
894
895 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
896 /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
897 void buildTree(ArrayRef<Value *> Roots,
898 const SmallDenseSet<Value *> &UserIgnoreLst);
899
900 /// Construct a vectorizable tree that starts at \p Roots.
901 void buildTree(ArrayRef<Value *> Roots);
902
903 /// Builds external uses of the vectorized scalars, i.e. the list of
904 /// vectorized scalars to be extracted, their lanes and their scalar users. \p
905 /// ExternallyUsedValues contains additional list of external uses to handle
906 /// vectorization of reductions.
907 void
908 buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {});
909
910 /// Clear the internal data structures that are created by 'buildTree'.
deleteTree()911 void deleteTree() {
912 VectorizableTree.clear();
913 ScalarToTreeEntry.clear();
914 MustGather.clear();
915 ExternalUses.clear();
916 for (auto &Iter : BlocksSchedules) {
917 BlockScheduling *BS = Iter.second.get();
918 BS->clear();
919 }
920 MinBWs.clear();
921 InstrElementSize.clear();
922 UserIgnoreList = nullptr;
923 }
924
getTreeSize() const925 unsigned getTreeSize() const { return VectorizableTree.size(); }
926
927 /// Perform LICM and CSE on the newly generated gather sequences.
928 void optimizeGatherSequence();
929
930 /// Checks if the specified gather tree entry \p TE can be represented as a
931 /// shuffled vector entry + (possibly) permutation with other gathers. It
932 /// implements the checks only for possibly ordered scalars (Loads,
933 /// ExtractElement, ExtractValue), which can be part of the graph.
934 Optional<OrdersType> findReusedOrderedScalars(const TreeEntry &TE);
935
936 /// Sort loads into increasing pointers offsets to allow greater clustering.
937 Optional<OrdersType> findPartiallyOrderedLoads(const TreeEntry &TE);
938
939 /// Gets reordering data for the given tree entry. If the entry is vectorized
940 /// - just return ReorderIndices, otherwise check if the scalars can be
941 /// reordered and return the most optimal order.
942 /// \param TopToBottom If true, include the order of vectorized stores and
943 /// insertelement nodes, otherwise skip them.
944 Optional<OrdersType> getReorderingData(const TreeEntry &TE, bool TopToBottom);
945
946 /// Reorders the current graph to the most profitable order starting from the
947 /// root node to the leaf nodes. The best order is chosen only from the nodes
948 /// of the same size (vectorization factor). Smaller nodes are considered
949 /// parts of subgraph with smaller VF and they are reordered independently. We
950 /// can make it because we still need to extend smaller nodes to the wider VF
951 /// and we can merge reordering shuffles with the widening shuffles.
952 void reorderTopToBottom();
953
954 /// Reorders the current graph to the most profitable order starting from
955 /// leaves to the root. It allows to rotate small subgraphs and reduce the
956 /// number of reshuffles if the leaf nodes use the same order. In this case we
957 /// can merge the orders and just shuffle user node instead of shuffling its
958 /// operands. Plus, even the leaf nodes have different orders, it allows to
959 /// sink reordering in the graph closer to the root node and merge it later
960 /// during analysis.
961 void reorderBottomToTop(bool IgnoreReorder = false);
962
963 /// \return The vector element size in bits to use when vectorizing the
964 /// expression tree ending at \p V. If V is a store, the size is the width of
965 /// the stored value. Otherwise, the size is the width of the largest loaded
966 /// value reaching V. This method is used by the vectorizer to calculate
967 /// vectorization factors.
968 unsigned getVectorElementSize(Value *V);
969
970 /// Compute the minimum type sizes required to represent the entries in a
971 /// vectorizable tree.
972 void computeMinimumValueSizes();
973
974 // \returns maximum vector register size as set by TTI or overridden by cl::opt.
getMaxVecRegSize() const975 unsigned getMaxVecRegSize() const {
976 return MaxVecRegSize;
977 }
978
979 // \returns minimum vector register size as set by cl::opt.
getMinVecRegSize() const980 unsigned getMinVecRegSize() const {
981 return MinVecRegSize;
982 }
983
getMinVF(unsigned Sz) const984 unsigned getMinVF(unsigned Sz) const {
985 return std::max(2U, getMinVecRegSize() / Sz);
986 }
987
getMaximumVF(unsigned ElemWidth,unsigned Opcode) const988 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
989 unsigned MaxVF = MaxVFOption.getNumOccurrences() ?
990 MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode);
991 return MaxVF ? MaxVF : UINT_MAX;
992 }
993
994 /// Check if homogeneous aggregate is isomorphic to some VectorType.
995 /// Accepts homogeneous multidimensional aggregate of scalars/vectors like
996 /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> },
997 /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on.
998 ///
999 /// \returns number of elements in vector if isomorphism exists, 0 otherwise.
1000 unsigned canMapToVector(Type *T, const DataLayout &DL) const;
1001
1002 /// \returns True if the VectorizableTree is both tiny and not fully
1003 /// vectorizable. We do not vectorize such trees.
1004 bool isTreeTinyAndNotFullyVectorizable(bool ForReduction = false) const;
1005
1006 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values
1007 /// can be load combined in the backend. Load combining may not be allowed in
1008 /// the IR optimizer, so we do not want to alter the pattern. For example,
1009 /// partially transforming a scalar bswap() pattern into vector code is
1010 /// effectively impossible for the backend to undo.
1011 /// TODO: If load combining is allowed in the IR optimizer, this analysis
1012 /// may not be necessary.
1013 bool isLoadCombineReductionCandidate(RecurKind RdxKind) const;
1014
1015 /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values
1016 /// can be load combined in the backend. Load combining may not be allowed in
1017 /// the IR optimizer, so we do not want to alter the pattern. For example,
1018 /// partially transforming a scalar bswap() pattern into vector code is
1019 /// effectively impossible for the backend to undo.
1020 /// TODO: If load combining is allowed in the IR optimizer, this analysis
1021 /// may not be necessary.
1022 bool isLoadCombineCandidate() const;
1023
getORE()1024 OptimizationRemarkEmitter *getORE() { return ORE; }
1025
1026 /// This structure holds any data we need about the edges being traversed
1027 /// during buildTree_rec(). We keep track of:
1028 /// (i) the user TreeEntry index, and
1029 /// (ii) the index of the edge.
1030 struct EdgeInfo {
1031 EdgeInfo() = default;
EdgeInfollvm::slpvectorizer::BoUpSLP::EdgeInfo1032 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx)
1033 : UserTE(UserTE), EdgeIdx(EdgeIdx) {}
1034 /// The user TreeEntry.
1035 TreeEntry *UserTE = nullptr;
1036 /// The operand index of the use.
1037 unsigned EdgeIdx = UINT_MAX;
1038 #ifndef NDEBUG
operator <<(raw_ostream & OS,const BoUpSLP::EdgeInfo & EI)1039 friend inline raw_ostream &operator<<(raw_ostream &OS,
1040 const BoUpSLP::EdgeInfo &EI) {
1041 EI.dump(OS);
1042 return OS;
1043 }
1044 /// Debug print.
dumpllvm::slpvectorizer::BoUpSLP::EdgeInfo1045 void dump(raw_ostream &OS) const {
1046 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null")
1047 << " EdgeIdx:" << EdgeIdx << "}";
1048 }
dumpllvm::slpvectorizer::BoUpSLP::EdgeInfo1049 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); }
1050 #endif
1051 };
1052
1053 /// A helper class used for scoring candidates for two consecutive lanes.
1054 class LookAheadHeuristics {
1055 const DataLayout &DL;
1056 ScalarEvolution &SE;
1057 const BoUpSLP &R;
1058 int NumLanes; // Total number of lanes (aka vectorization factor).
1059 int MaxLevel; // The maximum recursion depth for accumulating score.
1060
1061 public:
LookAheadHeuristics(const DataLayout & DL,ScalarEvolution & SE,const BoUpSLP & R,int NumLanes,int MaxLevel)1062 LookAheadHeuristics(const DataLayout &DL, ScalarEvolution &SE,
1063 const BoUpSLP &R, int NumLanes, int MaxLevel)
1064 : DL(DL), SE(SE), R(R), NumLanes(NumLanes), MaxLevel(MaxLevel) {}
1065
1066 // The hard-coded scores listed here are not very important, though it shall
1067 // be higher for better matches to improve the resulting cost. When
1068 // computing the scores of matching one sub-tree with another, we are
1069 // basically counting the number of values that are matching. So even if all
1070 // scores are set to 1, we would still get a decent matching result.
1071 // However, sometimes we have to break ties. For example we may have to
1072 // choose between matching loads vs matching opcodes. This is what these
1073 // scores are helping us with: they provide the order of preference. Also,
1074 // this is important if the scalar is externally used or used in another
1075 // tree entry node in the different lane.
1076
1077 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]).
1078 static const int ScoreConsecutiveLoads = 4;
1079 /// The same load multiple times. This should have a better score than
1080 /// `ScoreSplat` because it in x86 for a 2-lane vector we can represent it
1081 /// with `movddup (%reg), xmm0` which has a throughput of 0.5 versus 0.5 for
1082 /// a vector load and 1.0 for a broadcast.
1083 static const int ScoreSplatLoads = 3;
1084 /// Loads from reversed memory addresses, e.g. load(A[i+1]), load(A[i]).
1085 static const int ScoreReversedLoads = 3;
1086 /// ExtractElementInst from same vector and consecutive indexes.
1087 static const int ScoreConsecutiveExtracts = 4;
1088 /// ExtractElementInst from same vector and reversed indices.
1089 static const int ScoreReversedExtracts = 3;
1090 /// Constants.
1091 static const int ScoreConstants = 2;
1092 /// Instructions with the same opcode.
1093 static const int ScoreSameOpcode = 2;
1094 /// Instructions with alt opcodes (e.g, add + sub).
1095 static const int ScoreAltOpcodes = 1;
1096 /// Identical instructions (a.k.a. splat or broadcast).
1097 static const int ScoreSplat = 1;
1098 /// Matching with an undef is preferable to failing.
1099 static const int ScoreUndef = 1;
1100 /// Score for failing to find a decent match.
1101 static const int ScoreFail = 0;
1102 /// Score if all users are vectorized.
1103 static const int ScoreAllUserVectorized = 1;
1104
1105 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes.
1106 /// \p U1 and \p U2 are the users of \p V1 and \p V2.
1107 /// Also, checks if \p V1 and \p V2 are compatible with instructions in \p
1108 /// MainAltOps.
getShallowScore(Value * V1,Value * V2,Instruction * U1,Instruction * U2,ArrayRef<Value * > MainAltOps) const1109 int getShallowScore(Value *V1, Value *V2, Instruction *U1, Instruction *U2,
1110 ArrayRef<Value *> MainAltOps) const {
1111 if (V1 == V2) {
1112 if (isa<LoadInst>(V1)) {
1113 // Retruns true if the users of V1 and V2 won't need to be extracted.
1114 auto AllUsersAreInternal = [U1, U2, this](Value *V1, Value *V2) {
1115 // Bail out if we have too many uses to save compilation time.
1116 static constexpr unsigned Limit = 8;
1117 if (V1->hasNUsesOrMore(Limit) || V2->hasNUsesOrMore(Limit))
1118 return false;
1119
1120 auto AllUsersVectorized = [U1, U2, this](Value *V) {
1121 return llvm::all_of(V->users(), [U1, U2, this](Value *U) {
1122 return U == U1 || U == U2 || R.getTreeEntry(U) != nullptr;
1123 });
1124 };
1125 return AllUsersVectorized(V1) && AllUsersVectorized(V2);
1126 };
1127 // A broadcast of a load can be cheaper on some targets.
1128 if (R.TTI->isLegalBroadcastLoad(V1->getType(),
1129 ElementCount::getFixed(NumLanes)) &&
1130 ((int)V1->getNumUses() == NumLanes ||
1131 AllUsersAreInternal(V1, V2)))
1132 return LookAheadHeuristics::ScoreSplatLoads;
1133 }
1134 return LookAheadHeuristics::ScoreSplat;
1135 }
1136
1137 auto *LI1 = dyn_cast<LoadInst>(V1);
1138 auto *LI2 = dyn_cast<LoadInst>(V2);
1139 if (LI1 && LI2) {
1140 if (LI1->getParent() != LI2->getParent())
1141 return LookAheadHeuristics::ScoreFail;
1142
1143 Optional<int> Dist = getPointersDiff(
1144 LI1->getType(), LI1->getPointerOperand(), LI2->getType(),
1145 LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true);
1146 if (!Dist || *Dist == 0)
1147 return LookAheadHeuristics::ScoreFail;
1148 // The distance is too large - still may be profitable to use masked
1149 // loads/gathers.
1150 if (std::abs(*Dist) > NumLanes / 2)
1151 return LookAheadHeuristics::ScoreAltOpcodes;
1152 // This still will detect consecutive loads, but we might have "holes"
1153 // in some cases. It is ok for non-power-2 vectorization and may produce
1154 // better results. It should not affect current vectorization.
1155 return (*Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveLoads
1156 : LookAheadHeuristics::ScoreReversedLoads;
1157 }
1158
1159 auto *C1 = dyn_cast<Constant>(V1);
1160 auto *C2 = dyn_cast<Constant>(V2);
1161 if (C1 && C2)
1162 return LookAheadHeuristics::ScoreConstants;
1163
1164 // Extracts from consecutive indexes of the same vector better score as
1165 // the extracts could be optimized away.
1166 Value *EV1;
1167 ConstantInt *Ex1Idx;
1168 if (match(V1, m_ExtractElt(m_Value(EV1), m_ConstantInt(Ex1Idx)))) {
1169 // Undefs are always profitable for extractelements.
1170 if (isa<UndefValue>(V2))
1171 return LookAheadHeuristics::ScoreConsecutiveExtracts;
1172 Value *EV2 = nullptr;
1173 ConstantInt *Ex2Idx = nullptr;
1174 if (match(V2,
1175 m_ExtractElt(m_Value(EV2), m_CombineOr(m_ConstantInt(Ex2Idx),
1176 m_Undef())))) {
1177 // Undefs are always profitable for extractelements.
1178 if (!Ex2Idx)
1179 return LookAheadHeuristics::ScoreConsecutiveExtracts;
1180 if (isUndefVector(EV2) && EV2->getType() == EV1->getType())
1181 return LookAheadHeuristics::ScoreConsecutiveExtracts;
1182 if (EV2 == EV1) {
1183 int Idx1 = Ex1Idx->getZExtValue();
1184 int Idx2 = Ex2Idx->getZExtValue();
1185 int Dist = Idx2 - Idx1;
1186 // The distance is too large - still may be profitable to use
1187 // shuffles.
1188 if (std::abs(Dist) == 0)
1189 return LookAheadHeuristics::ScoreSplat;
1190 if (std::abs(Dist) > NumLanes / 2)
1191 return LookAheadHeuristics::ScoreSameOpcode;
1192 return (Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveExtracts
1193 : LookAheadHeuristics::ScoreReversedExtracts;
1194 }
1195 return LookAheadHeuristics::ScoreAltOpcodes;
1196 }
1197 return LookAheadHeuristics::ScoreFail;
1198 }
1199
1200 auto *I1 = dyn_cast<Instruction>(V1);
1201 auto *I2 = dyn_cast<Instruction>(V2);
1202 if (I1 && I2) {
1203 if (I1->getParent() != I2->getParent())
1204 return LookAheadHeuristics::ScoreFail;
1205 SmallVector<Value *, 4> Ops(MainAltOps.begin(), MainAltOps.end());
1206 Ops.push_back(I1);
1207 Ops.push_back(I2);
1208 InstructionsState S = getSameOpcode(Ops);
1209 // Note: Only consider instructions with <= 2 operands to avoid
1210 // complexity explosion.
1211 if (S.getOpcode() &&
1212 (S.MainOp->getNumOperands() <= 2 || !MainAltOps.empty() ||
1213 !S.isAltShuffle()) &&
1214 all_of(Ops, [&S](Value *V) {
1215 return cast<Instruction>(V)->getNumOperands() ==
1216 S.MainOp->getNumOperands();
1217 }))
1218 return S.isAltShuffle() ? LookAheadHeuristics::ScoreAltOpcodes
1219 : LookAheadHeuristics::ScoreSameOpcode;
1220 }
1221
1222 if (isa<UndefValue>(V2))
1223 return LookAheadHeuristics::ScoreUndef;
1224
1225 return LookAheadHeuristics::ScoreFail;
1226 }
1227
1228 /// Go through the operands of \p LHS and \p RHS recursively until
1229 /// MaxLevel, and return the cummulative score. \p U1 and \p U2 are
1230 /// the users of \p LHS and \p RHS (that is \p LHS and \p RHS are operands
1231 /// of \p U1 and \p U2), except at the beginning of the recursion where
1232 /// these are set to nullptr.
1233 ///
1234 /// For example:
1235 /// \verbatim
1236 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1]
1237 /// \ / \ / \ / \ /
1238 /// + + + +
1239 /// G1 G2 G3 G4
1240 /// \endverbatim
1241 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at
1242 /// each level recursively, accumulating the score. It starts from matching
1243 /// the additions at level 0, then moves on to the loads (level 1). The
1244 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and
1245 /// {B[0],B[1]} match with LookAheadHeuristics::ScoreConsecutiveLoads, while
1246 /// {A[0],C[0]} has a score of LookAheadHeuristics::ScoreFail.
1247 /// Please note that the order of the operands does not matter, as we
1248 /// evaluate the score of all profitable combinations of operands. In
1249 /// other words the score of G1 and G4 is the same as G1 and G2. This
1250 /// heuristic is based on ideas described in:
1251 /// Look-ahead SLP: Auto-vectorization in the presence of commutative
1252 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha,
1253 /// Luís F. W. Góes
getScoreAtLevelRec(Value * LHS,Value * RHS,Instruction * U1,Instruction * U2,int CurrLevel,ArrayRef<Value * > MainAltOps) const1254 int getScoreAtLevelRec(Value *LHS, Value *RHS, Instruction *U1,
1255 Instruction *U2, int CurrLevel,
1256 ArrayRef<Value *> MainAltOps) const {
1257
1258 // Get the shallow score of V1 and V2.
1259 int ShallowScoreAtThisLevel =
1260 getShallowScore(LHS, RHS, U1, U2, MainAltOps);
1261
1262 // If reached MaxLevel,
1263 // or if V1 and V2 are not instructions,
1264 // or if they are SPLAT,
1265 // or if they are not consecutive,
1266 // or if profitable to vectorize loads or extractelements, early return
1267 // the current cost.
1268 auto *I1 = dyn_cast<Instruction>(LHS);
1269 auto *I2 = dyn_cast<Instruction>(RHS);
1270 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 ||
1271 ShallowScoreAtThisLevel == LookAheadHeuristics::ScoreFail ||
1272 (((isa<LoadInst>(I1) && isa<LoadInst>(I2)) ||
1273 (I1->getNumOperands() > 2 && I2->getNumOperands() > 2) ||
1274 (isa<ExtractElementInst>(I1) && isa<ExtractElementInst>(I2))) &&
1275 ShallowScoreAtThisLevel))
1276 return ShallowScoreAtThisLevel;
1277 assert(I1 && I2 && "Should have early exited.");
1278
1279 // Contains the I2 operand indexes that got matched with I1 operands.
1280 SmallSet<unsigned, 4> Op2Used;
1281
1282 // Recursion towards the operands of I1 and I2. We are trying all possible
1283 // operand pairs, and keeping track of the best score.
1284 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands();
1285 OpIdx1 != NumOperands1; ++OpIdx1) {
1286 // Try to pair op1I with the best operand of I2.
1287 int MaxTmpScore = 0;
1288 unsigned MaxOpIdx2 = 0;
1289 bool FoundBest = false;
1290 // If I2 is commutative try all combinations.
1291 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1;
1292 unsigned ToIdx = isCommutative(I2)
1293 ? I2->getNumOperands()
1294 : std::min(I2->getNumOperands(), OpIdx1 + 1);
1295 assert(FromIdx <= ToIdx && "Bad index");
1296 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) {
1297 // Skip operands already paired with OpIdx1.
1298 if (Op2Used.count(OpIdx2))
1299 continue;
1300 // Recursively calculate the cost at each level
1301 int TmpScore =
1302 getScoreAtLevelRec(I1->getOperand(OpIdx1), I2->getOperand(OpIdx2),
1303 I1, I2, CurrLevel + 1, None);
1304 // Look for the best score.
1305 if (TmpScore > LookAheadHeuristics::ScoreFail &&
1306 TmpScore > MaxTmpScore) {
1307 MaxTmpScore = TmpScore;
1308 MaxOpIdx2 = OpIdx2;
1309 FoundBest = true;
1310 }
1311 }
1312 if (FoundBest) {
1313 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it.
1314 Op2Used.insert(MaxOpIdx2);
1315 ShallowScoreAtThisLevel += MaxTmpScore;
1316 }
1317 }
1318 return ShallowScoreAtThisLevel;
1319 }
1320 };
1321 /// A helper data structure to hold the operands of a vector of instructions.
1322 /// This supports a fixed vector length for all operand vectors.
1323 class VLOperands {
1324 /// For each operand we need (i) the value, and (ii) the opcode that it
1325 /// would be attached to if the expression was in a left-linearized form.
1326 /// This is required to avoid illegal operand reordering.
1327 /// For example:
1328 /// \verbatim
1329 /// 0 Op1
1330 /// |/
1331 /// Op1 Op2 Linearized + Op2
1332 /// \ / ----------> |/
1333 /// - -
1334 ///
1335 /// Op1 - Op2 (0 + Op1) - Op2
1336 /// \endverbatim
1337 ///
1338 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'.
1339 ///
1340 /// Another way to think of this is to track all the operations across the
1341 /// path from the operand all the way to the root of the tree and to
1342 /// calculate the operation that corresponds to this path. For example, the
1343 /// path from Op2 to the root crosses the RHS of the '-', therefore the
1344 /// corresponding operation is a '-' (which matches the one in the
1345 /// linearized tree, as shown above).
1346 ///
1347 /// For lack of a better term, we refer to this operation as Accumulated
1348 /// Path Operation (APO).
1349 struct OperandData {
1350 OperandData() = default;
OperandDatallvm::slpvectorizer::BoUpSLP::VLOperands::OperandData1351 OperandData(Value *V, bool APO, bool IsUsed)
1352 : V(V), APO(APO), IsUsed(IsUsed) {}
1353 /// The operand value.
1354 Value *V = nullptr;
1355 /// TreeEntries only allow a single opcode, or an alternate sequence of
1356 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the
1357 /// APO. It is set to 'true' if 'V' is attached to an inverse operation
1358 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise
1359 /// (e.g., Add/Mul)
1360 bool APO = false;
1361 /// Helper data for the reordering function.
1362 bool IsUsed = false;
1363 };
1364
1365 /// During operand reordering, we are trying to select the operand at lane
1366 /// that matches best with the operand at the neighboring lane. Our
1367 /// selection is based on the type of value we are looking for. For example,
1368 /// if the neighboring lane has a load, we need to look for a load that is
1369 /// accessing a consecutive address. These strategies are summarized in the
1370 /// 'ReorderingMode' enumerator.
1371 enum class ReorderingMode {
1372 Load, ///< Matching loads to consecutive memory addresses
1373 Opcode, ///< Matching instructions based on opcode (same or alternate)
1374 Constant, ///< Matching constants
1375 Splat, ///< Matching the same instruction multiple times (broadcast)
1376 Failed, ///< We failed to create a vectorizable group
1377 };
1378
1379 using OperandDataVec = SmallVector<OperandData, 2>;
1380
1381 /// A vector of operand vectors.
1382 SmallVector<OperandDataVec, 4> OpsVec;
1383
1384 const DataLayout &DL;
1385 ScalarEvolution &SE;
1386 const BoUpSLP &R;
1387
1388 /// \returns the operand data at \p OpIdx and \p Lane.
getData(unsigned OpIdx,unsigned Lane)1389 OperandData &getData(unsigned OpIdx, unsigned Lane) {
1390 return OpsVec[OpIdx][Lane];
1391 }
1392
1393 /// \returns the operand data at \p OpIdx and \p Lane. Const version.
getData(unsigned OpIdx,unsigned Lane) const1394 const OperandData &getData(unsigned OpIdx, unsigned Lane) const {
1395 return OpsVec[OpIdx][Lane];
1396 }
1397
1398 /// Clears the used flag for all entries.
clearUsed()1399 void clearUsed() {
1400 for (unsigned OpIdx = 0, NumOperands = getNumOperands();
1401 OpIdx != NumOperands; ++OpIdx)
1402 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes;
1403 ++Lane)
1404 OpsVec[OpIdx][Lane].IsUsed = false;
1405 }
1406
1407 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2.
swap(unsigned OpIdx1,unsigned OpIdx2,unsigned Lane)1408 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) {
1409 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]);
1410 }
1411
1412 /// \param Lane lane of the operands under analysis.
1413 /// \param OpIdx operand index in \p Lane lane we're looking the best
1414 /// candidate for.
1415 /// \param Idx operand index of the current candidate value.
1416 /// \returns The additional score due to possible broadcasting of the
1417 /// elements in the lane. It is more profitable to have power-of-2 unique
1418 /// elements in the lane, it will be vectorized with higher probability
1419 /// after removing duplicates. Currently the SLP vectorizer supports only
1420 /// vectorization of the power-of-2 number of unique scalars.
getSplatScore(unsigned Lane,unsigned OpIdx,unsigned Idx) const1421 int getSplatScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const {
1422 Value *IdxLaneV = getData(Idx, Lane).V;
1423 if (!isa<Instruction>(IdxLaneV) || IdxLaneV == getData(OpIdx, Lane).V)
1424 return 0;
1425 SmallPtrSet<Value *, 4> Uniques;
1426 for (unsigned Ln = 0, E = getNumLanes(); Ln < E; ++Ln) {
1427 if (Ln == Lane)
1428 continue;
1429 Value *OpIdxLnV = getData(OpIdx, Ln).V;
1430 if (!isa<Instruction>(OpIdxLnV))
1431 return 0;
1432 Uniques.insert(OpIdxLnV);
1433 }
1434 int UniquesCount = Uniques.size();
1435 int UniquesCntWithIdxLaneV =
1436 Uniques.contains(IdxLaneV) ? UniquesCount : UniquesCount + 1;
1437 Value *OpIdxLaneV = getData(OpIdx, Lane).V;
1438 int UniquesCntWithOpIdxLaneV =
1439 Uniques.contains(OpIdxLaneV) ? UniquesCount : UniquesCount + 1;
1440 if (UniquesCntWithIdxLaneV == UniquesCntWithOpIdxLaneV)
1441 return 0;
1442 return (PowerOf2Ceil(UniquesCntWithOpIdxLaneV) -
1443 UniquesCntWithOpIdxLaneV) -
1444 (PowerOf2Ceil(UniquesCntWithIdxLaneV) - UniquesCntWithIdxLaneV);
1445 }
1446
1447 /// \param Lane lane of the operands under analysis.
1448 /// \param OpIdx operand index in \p Lane lane we're looking the best
1449 /// candidate for.
1450 /// \param Idx operand index of the current candidate value.
1451 /// \returns The additional score for the scalar which users are all
1452 /// vectorized.
getExternalUseScore(unsigned Lane,unsigned OpIdx,unsigned Idx) const1453 int getExternalUseScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const {
1454 Value *IdxLaneV = getData(Idx, Lane).V;
1455 Value *OpIdxLaneV = getData(OpIdx, Lane).V;
1456 // Do not care about number of uses for vector-like instructions
1457 // (extractelement/extractvalue with constant indices), they are extracts
1458 // themselves and already externally used. Vectorization of such
1459 // instructions does not add extra extractelement instruction, just may
1460 // remove it.
1461 if (isVectorLikeInstWithConstOps(IdxLaneV) &&
1462 isVectorLikeInstWithConstOps(OpIdxLaneV))
1463 return LookAheadHeuristics::ScoreAllUserVectorized;
1464 auto *IdxLaneI = dyn_cast<Instruction>(IdxLaneV);
1465 if (!IdxLaneI || !isa<Instruction>(OpIdxLaneV))
1466 return 0;
1467 return R.areAllUsersVectorized(IdxLaneI, None)
1468 ? LookAheadHeuristics::ScoreAllUserVectorized
1469 : 0;
1470 }
1471
1472 /// Score scaling factor for fully compatible instructions but with
1473 /// different number of external uses. Allows better selection of the
1474 /// instructions with less external uses.
1475 static const int ScoreScaleFactor = 10;
1476
1477 /// \Returns the look-ahead score, which tells us how much the sub-trees
1478 /// rooted at \p LHS and \p RHS match, the more they match the higher the
1479 /// score. This helps break ties in an informed way when we cannot decide on
1480 /// the order of the operands by just considering the immediate
1481 /// predecessors.
getLookAheadScore(Value * LHS,Value * RHS,ArrayRef<Value * > MainAltOps,int Lane,unsigned OpIdx,unsigned Idx,bool & IsUsed)1482 int getLookAheadScore(Value *LHS, Value *RHS, ArrayRef<Value *> MainAltOps,
1483 int Lane, unsigned OpIdx, unsigned Idx,
1484 bool &IsUsed) {
1485 LookAheadHeuristics LookAhead(DL, SE, R, getNumLanes(),
1486 LookAheadMaxDepth);
1487 // Keep track of the instruction stack as we recurse into the operands
1488 // during the look-ahead score exploration.
1489 int Score =
1490 LookAhead.getScoreAtLevelRec(LHS, RHS, /*U1=*/nullptr, /*U2=*/nullptr,
1491 /*CurrLevel=*/1, MainAltOps);
1492 if (Score) {
1493 int SplatScore = getSplatScore(Lane, OpIdx, Idx);
1494 if (Score <= -SplatScore) {
1495 // Set the minimum score for splat-like sequence to avoid setting
1496 // failed state.
1497 Score = 1;
1498 } else {
1499 Score += SplatScore;
1500 // Scale score to see the difference between different operands
1501 // and similar operands but all vectorized/not all vectorized
1502 // uses. It does not affect actual selection of the best
1503 // compatible operand in general, just allows to select the
1504 // operand with all vectorized uses.
1505 Score *= ScoreScaleFactor;
1506 Score += getExternalUseScore(Lane, OpIdx, Idx);
1507 IsUsed = true;
1508 }
1509 }
1510 return Score;
1511 }
1512
1513 /// Best defined scores per lanes between the passes. Used to choose the
1514 /// best operand (with the highest score) between the passes.
1515 /// The key - {Operand Index, Lane}.
1516 /// The value - the best score between the passes for the lane and the
1517 /// operand.
1518 SmallDenseMap<std::pair<unsigned, unsigned>, unsigned, 8>
1519 BestScoresPerLanes;
1520
1521 // Search all operands in Ops[*][Lane] for the one that matches best
1522 // Ops[OpIdx][LastLane] and return its opreand index.
1523 // If no good match can be found, return None.
getBestOperand(unsigned OpIdx,int Lane,int LastLane,ArrayRef<ReorderingMode> ReorderingModes,ArrayRef<Value * > MainAltOps)1524 Optional<unsigned> getBestOperand(unsigned OpIdx, int Lane, int LastLane,
1525 ArrayRef<ReorderingMode> ReorderingModes,
1526 ArrayRef<Value *> MainAltOps) {
1527 unsigned NumOperands = getNumOperands();
1528
1529 // The operand of the previous lane at OpIdx.
1530 Value *OpLastLane = getData(OpIdx, LastLane).V;
1531
1532 // Our strategy mode for OpIdx.
1533 ReorderingMode RMode = ReorderingModes[OpIdx];
1534 if (RMode == ReorderingMode::Failed)
1535 return None;
1536
1537 // The linearized opcode of the operand at OpIdx, Lane.
1538 bool OpIdxAPO = getData(OpIdx, Lane).APO;
1539
1540 // The best operand index and its score.
1541 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we
1542 // are using the score to differentiate between the two.
1543 struct BestOpData {
1544 Optional<unsigned> Idx = None;
1545 unsigned Score = 0;
1546 } BestOp;
1547 BestOp.Score =
1548 BestScoresPerLanes.try_emplace(std::make_pair(OpIdx, Lane), 0)
1549 .first->second;
1550
1551 // Track if the operand must be marked as used. If the operand is set to
1552 // Score 1 explicitly (because of non power-of-2 unique scalars, we may
1553 // want to reestimate the operands again on the following iterations).
1554 bool IsUsed =
1555 RMode == ReorderingMode::Splat || RMode == ReorderingMode::Constant;
1556 // Iterate through all unused operands and look for the best.
1557 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) {
1558 // Get the operand at Idx and Lane.
1559 OperandData &OpData = getData(Idx, Lane);
1560 Value *Op = OpData.V;
1561 bool OpAPO = OpData.APO;
1562
1563 // Skip already selected operands.
1564 if (OpData.IsUsed)
1565 continue;
1566
1567 // Skip if we are trying to move the operand to a position with a
1568 // different opcode in the linearized tree form. This would break the
1569 // semantics.
1570 if (OpAPO != OpIdxAPO)
1571 continue;
1572
1573 // Look for an operand that matches the current mode.
1574 switch (RMode) {
1575 case ReorderingMode::Load:
1576 case ReorderingMode::Constant:
1577 case ReorderingMode::Opcode: {
1578 bool LeftToRight = Lane > LastLane;
1579 Value *OpLeft = (LeftToRight) ? OpLastLane : Op;
1580 Value *OpRight = (LeftToRight) ? Op : OpLastLane;
1581 int Score = getLookAheadScore(OpLeft, OpRight, MainAltOps, Lane,
1582 OpIdx, Idx, IsUsed);
1583 if (Score > static_cast<int>(BestOp.Score)) {
1584 BestOp.Idx = Idx;
1585 BestOp.Score = Score;
1586 BestScoresPerLanes[std::make_pair(OpIdx, Lane)] = Score;
1587 }
1588 break;
1589 }
1590 case ReorderingMode::Splat:
1591 if (Op == OpLastLane)
1592 BestOp.Idx = Idx;
1593 break;
1594 case ReorderingMode::Failed:
1595 llvm_unreachable("Not expected Failed reordering mode.");
1596 }
1597 }
1598
1599 if (BestOp.Idx) {
1600 getData(*BestOp.Idx, Lane).IsUsed = IsUsed;
1601 return BestOp.Idx;
1602 }
1603 // If we could not find a good match return None.
1604 return None;
1605 }
1606
1607 /// Helper for reorderOperandVecs.
1608 /// \returns the lane that we should start reordering from. This is the one
1609 /// which has the least number of operands that can freely move about or
1610 /// less profitable because it already has the most optimal set of operands.
getBestLaneToStartReordering() const1611 unsigned getBestLaneToStartReordering() const {
1612 unsigned Min = UINT_MAX;
1613 unsigned SameOpNumber = 0;
1614 // std::pair<unsigned, unsigned> is used to implement a simple voting
1615 // algorithm and choose the lane with the least number of operands that
1616 // can freely move about or less profitable because it already has the
1617 // most optimal set of operands. The first unsigned is a counter for
1618 // voting, the second unsigned is the counter of lanes with instructions
1619 // with same/alternate opcodes and same parent basic block.
1620 MapVector<unsigned, std::pair<unsigned, unsigned>> HashMap;
1621 // Try to be closer to the original results, if we have multiple lanes
1622 // with same cost. If 2 lanes have the same cost, use the one with the
1623 // lowest index.
1624 for (int I = getNumLanes(); I > 0; --I) {
1625 unsigned Lane = I - 1;
1626 OperandsOrderData NumFreeOpsHash =
1627 getMaxNumOperandsThatCanBeReordered(Lane);
1628 // Compare the number of operands that can move and choose the one with
1629 // the least number.
1630 if (NumFreeOpsHash.NumOfAPOs < Min) {
1631 Min = NumFreeOpsHash.NumOfAPOs;
1632 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent;
1633 HashMap.clear();
1634 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
1635 } else if (NumFreeOpsHash.NumOfAPOs == Min &&
1636 NumFreeOpsHash.NumOpsWithSameOpcodeParent < SameOpNumber) {
1637 // Select the most optimal lane in terms of number of operands that
1638 // should be moved around.
1639 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent;
1640 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
1641 } else if (NumFreeOpsHash.NumOfAPOs == Min &&
1642 NumFreeOpsHash.NumOpsWithSameOpcodeParent == SameOpNumber) {
1643 auto It = HashMap.find(NumFreeOpsHash.Hash);
1644 if (It == HashMap.end())
1645 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
1646 else
1647 ++It->second.first;
1648 }
1649 }
1650 // Select the lane with the minimum counter.
1651 unsigned BestLane = 0;
1652 unsigned CntMin = UINT_MAX;
1653 for (const auto &Data : reverse(HashMap)) {
1654 if (Data.second.first < CntMin) {
1655 CntMin = Data.second.first;
1656 BestLane = Data.second.second;
1657 }
1658 }
1659 return BestLane;
1660 }
1661
1662 /// Data structure that helps to reorder operands.
1663 struct OperandsOrderData {
1664 /// The best number of operands with the same APOs, which can be
1665 /// reordered.
1666 unsigned NumOfAPOs = UINT_MAX;
1667 /// Number of operands with the same/alternate instruction opcode and
1668 /// parent.
1669 unsigned NumOpsWithSameOpcodeParent = 0;
1670 /// Hash for the actual operands ordering.
1671 /// Used to count operands, actually their position id and opcode
1672 /// value. It is used in the voting mechanism to find the lane with the
1673 /// least number of operands that can freely move about or less profitable
1674 /// because it already has the most optimal set of operands. Can be
1675 /// replaced with SmallVector<unsigned> instead but hash code is faster
1676 /// and requires less memory.
1677 unsigned Hash = 0;
1678 };
1679 /// \returns the maximum number of operands that are allowed to be reordered
1680 /// for \p Lane and the number of compatible instructions(with the same
1681 /// parent/opcode). This is used as a heuristic for selecting the first lane
1682 /// to start operand reordering.
getMaxNumOperandsThatCanBeReordered(unsigned Lane) const1683 OperandsOrderData getMaxNumOperandsThatCanBeReordered(unsigned Lane) const {
1684 unsigned CntTrue = 0;
1685 unsigned NumOperands = getNumOperands();
1686 // Operands with the same APO can be reordered. We therefore need to count
1687 // how many of them we have for each APO, like this: Cnt[APO] = x.
1688 // Since we only have two APOs, namely true and false, we can avoid using
1689 // a map. Instead we can simply count the number of operands that
1690 // correspond to one of them (in this case the 'true' APO), and calculate
1691 // the other by subtracting it from the total number of operands.
1692 // Operands with the same instruction opcode and parent are more
1693 // profitable since we don't need to move them in many cases, with a high
1694 // probability such lane already can be vectorized effectively.
1695 bool AllUndefs = true;
1696 unsigned NumOpsWithSameOpcodeParent = 0;
1697 Instruction *OpcodeI = nullptr;
1698 BasicBlock *Parent = nullptr;
1699 unsigned Hash = 0;
1700 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1701 const OperandData &OpData = getData(OpIdx, Lane);
1702 if (OpData.APO)
1703 ++CntTrue;
1704 // Use Boyer-Moore majority voting for finding the majority opcode and
1705 // the number of times it occurs.
1706 if (auto *I = dyn_cast<Instruction>(OpData.V)) {
1707 if (!OpcodeI || !getSameOpcode({OpcodeI, I}).getOpcode() ||
1708 I->getParent() != Parent) {
1709 if (NumOpsWithSameOpcodeParent == 0) {
1710 NumOpsWithSameOpcodeParent = 1;
1711 OpcodeI = I;
1712 Parent = I->getParent();
1713 } else {
1714 --NumOpsWithSameOpcodeParent;
1715 }
1716 } else {
1717 ++NumOpsWithSameOpcodeParent;
1718 }
1719 }
1720 Hash = hash_combine(
1721 Hash, hash_value((OpIdx + 1) * (OpData.V->getValueID() + 1)));
1722 AllUndefs = AllUndefs && isa<UndefValue>(OpData.V);
1723 }
1724 if (AllUndefs)
1725 return {};
1726 OperandsOrderData Data;
1727 Data.NumOfAPOs = std::max(CntTrue, NumOperands - CntTrue);
1728 Data.NumOpsWithSameOpcodeParent = NumOpsWithSameOpcodeParent;
1729 Data.Hash = Hash;
1730 return Data;
1731 }
1732
1733 /// Go through the instructions in VL and append their operands.
appendOperandsOfVL(ArrayRef<Value * > VL)1734 void appendOperandsOfVL(ArrayRef<Value *> VL) {
1735 assert(!VL.empty() && "Bad VL");
1736 assert((empty() || VL.size() == getNumLanes()) &&
1737 "Expected same number of lanes");
1738 assert(isa<Instruction>(VL[0]) && "Expected instruction");
1739 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands();
1740 OpsVec.resize(NumOperands);
1741 unsigned NumLanes = VL.size();
1742 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1743 OpsVec[OpIdx].resize(NumLanes);
1744 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1745 assert(isa<Instruction>(VL[Lane]) && "Expected instruction");
1746 // Our tree has just 3 nodes: the root and two operands.
1747 // It is therefore trivial to get the APO. We only need to check the
1748 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or
1749 // RHS operand. The LHS operand of both add and sub is never attached
1750 // to an inversese operation in the linearized form, therefore its APO
1751 // is false. The RHS is true only if VL[Lane] is an inverse operation.
1752
1753 // Since operand reordering is performed on groups of commutative
1754 // operations or alternating sequences (e.g., +, -), we can safely
1755 // tell the inverse operations by checking commutativity.
1756 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane]));
1757 bool APO = (OpIdx == 0) ? false : IsInverseOperation;
1758 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx),
1759 APO, false};
1760 }
1761 }
1762 }
1763
1764 /// \returns the number of operands.
getNumOperands() const1765 unsigned getNumOperands() const { return OpsVec.size(); }
1766
1767 /// \returns the number of lanes.
getNumLanes() const1768 unsigned getNumLanes() const { return OpsVec[0].size(); }
1769
1770 /// \returns the operand value at \p OpIdx and \p Lane.
getValue(unsigned OpIdx,unsigned Lane) const1771 Value *getValue(unsigned OpIdx, unsigned Lane) const {
1772 return getData(OpIdx, Lane).V;
1773 }
1774
1775 /// \returns true if the data structure is empty.
empty() const1776 bool empty() const { return OpsVec.empty(); }
1777
1778 /// Clears the data.
clear()1779 void clear() { OpsVec.clear(); }
1780
1781 /// \Returns true if there are enough operands identical to \p Op to fill
1782 /// the whole vector.
1783 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow.
shouldBroadcast(Value * Op,unsigned OpIdx,unsigned Lane)1784 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) {
1785 bool OpAPO = getData(OpIdx, Lane).APO;
1786 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) {
1787 if (Ln == Lane)
1788 continue;
1789 // This is set to true if we found a candidate for broadcast at Lane.
1790 bool FoundCandidate = false;
1791 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) {
1792 OperandData &Data = getData(OpI, Ln);
1793 if (Data.APO != OpAPO || Data.IsUsed)
1794 continue;
1795 if (Data.V == Op) {
1796 FoundCandidate = true;
1797 Data.IsUsed = true;
1798 break;
1799 }
1800 }
1801 if (!FoundCandidate)
1802 return false;
1803 }
1804 return true;
1805 }
1806
1807 public:
1808 /// Initialize with all the operands of the instruction vector \p RootVL.
VLOperands(ArrayRef<Value * > RootVL,const DataLayout & DL,ScalarEvolution & SE,const BoUpSLP & R)1809 VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL,
1810 ScalarEvolution &SE, const BoUpSLP &R)
1811 : DL(DL), SE(SE), R(R) {
1812 // Append all the operands of RootVL.
1813 appendOperandsOfVL(RootVL);
1814 }
1815
1816 /// \Returns a value vector with the operands across all lanes for the
1817 /// opearnd at \p OpIdx.
getVL(unsigned OpIdx) const1818 ValueList getVL(unsigned OpIdx) const {
1819 ValueList OpVL(OpsVec[OpIdx].size());
1820 assert(OpsVec[OpIdx].size() == getNumLanes() &&
1821 "Expected same num of lanes across all operands");
1822 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane)
1823 OpVL[Lane] = OpsVec[OpIdx][Lane].V;
1824 return OpVL;
1825 }
1826
1827 // Performs operand reordering for 2 or more operands.
1828 // The original operands are in OrigOps[OpIdx][Lane].
1829 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'.
reorder()1830 void reorder() {
1831 unsigned NumOperands = getNumOperands();
1832 unsigned NumLanes = getNumLanes();
1833 // Each operand has its own mode. We are using this mode to help us select
1834 // the instructions for each lane, so that they match best with the ones
1835 // we have selected so far.
1836 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands);
1837
1838 // This is a greedy single-pass algorithm. We are going over each lane
1839 // once and deciding on the best order right away with no back-tracking.
1840 // However, in order to increase its effectiveness, we start with the lane
1841 // that has operands that can move the least. For example, given the
1842 // following lanes:
1843 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd
1844 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st
1845 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd
1846 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th
1847 // we will start at Lane 1, since the operands of the subtraction cannot
1848 // be reordered. Then we will visit the rest of the lanes in a circular
1849 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3.
1850
1851 // Find the first lane that we will start our search from.
1852 unsigned FirstLane = getBestLaneToStartReordering();
1853
1854 // Initialize the modes.
1855 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1856 Value *OpLane0 = getValue(OpIdx, FirstLane);
1857 // Keep track if we have instructions with all the same opcode on one
1858 // side.
1859 if (isa<LoadInst>(OpLane0))
1860 ReorderingModes[OpIdx] = ReorderingMode::Load;
1861 else if (isa<Instruction>(OpLane0)) {
1862 // Check if OpLane0 should be broadcast.
1863 if (shouldBroadcast(OpLane0, OpIdx, FirstLane))
1864 ReorderingModes[OpIdx] = ReorderingMode::Splat;
1865 else
1866 ReorderingModes[OpIdx] = ReorderingMode::Opcode;
1867 }
1868 else if (isa<Constant>(OpLane0))
1869 ReorderingModes[OpIdx] = ReorderingMode::Constant;
1870 else if (isa<Argument>(OpLane0))
1871 // Our best hope is a Splat. It may save some cost in some cases.
1872 ReorderingModes[OpIdx] = ReorderingMode::Splat;
1873 else
1874 // NOTE: This should be unreachable.
1875 ReorderingModes[OpIdx] = ReorderingMode::Failed;
1876 }
1877
1878 // Check that we don't have same operands. No need to reorder if operands
1879 // are just perfect diamond or shuffled diamond match. Do not do it only
1880 // for possible broadcasts or non-power of 2 number of scalars (just for
1881 // now).
1882 auto &&SkipReordering = [this]() {
1883 SmallPtrSet<Value *, 4> UniqueValues;
1884 ArrayRef<OperandData> Op0 = OpsVec.front();
1885 for (const OperandData &Data : Op0)
1886 UniqueValues.insert(Data.V);
1887 for (ArrayRef<OperandData> Op : drop_begin(OpsVec, 1)) {
1888 if (any_of(Op, [&UniqueValues](const OperandData &Data) {
1889 return !UniqueValues.contains(Data.V);
1890 }))
1891 return false;
1892 }
1893 // TODO: Check if we can remove a check for non-power-2 number of
1894 // scalars after full support of non-power-2 vectorization.
1895 return UniqueValues.size() != 2 && isPowerOf2_32(UniqueValues.size());
1896 };
1897
1898 // If the initial strategy fails for any of the operand indexes, then we
1899 // perform reordering again in a second pass. This helps avoid assigning
1900 // high priority to the failed strategy, and should improve reordering for
1901 // the non-failed operand indexes.
1902 for (int Pass = 0; Pass != 2; ++Pass) {
1903 // Check if no need to reorder operands since they're are perfect or
1904 // shuffled diamond match.
1905 // Need to to do it to avoid extra external use cost counting for
1906 // shuffled matches, which may cause regressions.
1907 if (SkipReordering())
1908 break;
1909 // Skip the second pass if the first pass did not fail.
1910 bool StrategyFailed = false;
1911 // Mark all operand data as free to use.
1912 clearUsed();
1913 // We keep the original operand order for the FirstLane, so reorder the
1914 // rest of the lanes. We are visiting the nodes in a circular fashion,
1915 // using FirstLane as the center point and increasing the radius
1916 // distance.
1917 SmallVector<SmallVector<Value *, 2>> MainAltOps(NumOperands);
1918 for (unsigned I = 0; I < NumOperands; ++I)
1919 MainAltOps[I].push_back(getData(I, FirstLane).V);
1920
1921 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) {
1922 // Visit the lane on the right and then the lane on the left.
1923 for (int Direction : {+1, -1}) {
1924 int Lane = FirstLane + Direction * Distance;
1925 if (Lane < 0 || Lane >= (int)NumLanes)
1926 continue;
1927 int LastLane = Lane - Direction;
1928 assert(LastLane >= 0 && LastLane < (int)NumLanes &&
1929 "Out of bounds");
1930 // Look for a good match for each operand.
1931 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
1932 // Search for the operand that matches SortedOps[OpIdx][Lane-1].
1933 Optional<unsigned> BestIdx = getBestOperand(
1934 OpIdx, Lane, LastLane, ReorderingModes, MainAltOps[OpIdx]);
1935 // By not selecting a value, we allow the operands that follow to
1936 // select a better matching value. We will get a non-null value in
1937 // the next run of getBestOperand().
1938 if (BestIdx) {
1939 // Swap the current operand with the one returned by
1940 // getBestOperand().
1941 swap(OpIdx, *BestIdx, Lane);
1942 } else {
1943 // We failed to find a best operand, set mode to 'Failed'.
1944 ReorderingModes[OpIdx] = ReorderingMode::Failed;
1945 // Enable the second pass.
1946 StrategyFailed = true;
1947 }
1948 // Try to get the alternate opcode and follow it during analysis.
1949 if (MainAltOps[OpIdx].size() != 2) {
1950 OperandData &AltOp = getData(OpIdx, Lane);
1951 InstructionsState OpS =
1952 getSameOpcode({MainAltOps[OpIdx].front(), AltOp.V});
1953 if (OpS.getOpcode() && OpS.isAltShuffle())
1954 MainAltOps[OpIdx].push_back(AltOp.V);
1955 }
1956 }
1957 }
1958 }
1959 // Skip second pass if the strategy did not fail.
1960 if (!StrategyFailed)
1961 break;
1962 }
1963 }
1964
1965 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
getModeStr(ReorderingMode RMode)1966 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) {
1967 switch (RMode) {
1968 case ReorderingMode::Load:
1969 return "Load";
1970 case ReorderingMode::Opcode:
1971 return "Opcode";
1972 case ReorderingMode::Constant:
1973 return "Constant";
1974 case ReorderingMode::Splat:
1975 return "Splat";
1976 case ReorderingMode::Failed:
1977 return "Failed";
1978 }
1979 llvm_unreachable("Unimplemented Reordering Type");
1980 }
1981
printMode(ReorderingMode RMode,raw_ostream & OS)1982 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode,
1983 raw_ostream &OS) {
1984 return OS << getModeStr(RMode);
1985 }
1986
1987 /// Debug print.
dumpMode(ReorderingMode RMode)1988 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) {
1989 printMode(RMode, dbgs());
1990 }
1991
operator <<(raw_ostream & OS,ReorderingMode RMode)1992 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) {
1993 return printMode(RMode, OS);
1994 }
1995
print(raw_ostream & OS) const1996 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const {
1997 const unsigned Indent = 2;
1998 unsigned Cnt = 0;
1999 for (const OperandDataVec &OpDataVec : OpsVec) {
2000 OS << "Operand " << Cnt++ << "\n";
2001 for (const OperandData &OpData : OpDataVec) {
2002 OS.indent(Indent) << "{";
2003 if (Value *V = OpData.V)
2004 OS << *V;
2005 else
2006 OS << "null";
2007 OS << ", APO:" << OpData.APO << "}\n";
2008 }
2009 OS << "\n";
2010 }
2011 return OS;
2012 }
2013
2014 /// Debug print.
dump() const2015 LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
2016 #endif
2017 };
2018
2019 /// Evaluate each pair in \p Candidates and return index into \p Candidates
2020 /// for a pair which have highest score deemed to have best chance to form
2021 /// root of profitable tree to vectorize. Return None if no candidate scored
2022 /// above the LookAheadHeuristics::ScoreFail.
2023 /// \param Limit Lower limit of the cost, considered to be good enough score.
2024 Optional<int>
findBestRootPair(ArrayRef<std::pair<Value *,Value * >> Candidates,int Limit=LookAheadHeuristics::ScoreFail)2025 findBestRootPair(ArrayRef<std::pair<Value *, Value *>> Candidates,
2026 int Limit = LookAheadHeuristics::ScoreFail) {
2027 LookAheadHeuristics LookAhead(*DL, *SE, *this, /*NumLanes=*/2,
2028 RootLookAheadMaxDepth);
2029 int BestScore = Limit;
2030 Optional<int> Index = None;
2031 for (int I : seq<int>(0, Candidates.size())) {
2032 int Score = LookAhead.getScoreAtLevelRec(Candidates[I].first,
2033 Candidates[I].second,
2034 /*U1=*/nullptr, /*U2=*/nullptr,
2035 /*Level=*/1, None);
2036 if (Score > BestScore) {
2037 BestScore = Score;
2038 Index = I;
2039 }
2040 }
2041 return Index;
2042 }
2043
2044 /// Checks if the instruction is marked for deletion.
isDeleted(Instruction * I) const2045 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); }
2046
2047 /// Removes an instruction from its block and eventually deletes it.
2048 /// It's like Instruction::eraseFromParent() except that the actual deletion
2049 /// is delayed until BoUpSLP is destructed.
eraseInstruction(Instruction * I)2050 void eraseInstruction(Instruction *I) {
2051 DeletedInstructions.insert(I);
2052 }
2053
2054 /// Checks if the instruction was already analyzed for being possible
2055 /// reduction root.
isAnalyzedReductionRoot(Instruction * I) const2056 bool isAnalyzedReductionRoot(Instruction *I) const {
2057 return AnalyzedReductionsRoots.count(I);
2058 }
2059 /// Register given instruction as already analyzed for being possible
2060 /// reduction root.
analyzedReductionRoot(Instruction * I)2061 void analyzedReductionRoot(Instruction *I) {
2062 AnalyzedReductionsRoots.insert(I);
2063 }
2064 /// Checks if the provided list of reduced values was checked already for
2065 /// vectorization.
areAnalyzedReductionVals(ArrayRef<Value * > VL)2066 bool areAnalyzedReductionVals(ArrayRef<Value *> VL) {
2067 return AnalyzedReductionVals.contains(hash_value(VL));
2068 }
2069 /// Adds the list of reduced values to list of already checked values for the
2070 /// vectorization.
analyzedReductionVals(ArrayRef<Value * > VL)2071 void analyzedReductionVals(ArrayRef<Value *> VL) {
2072 AnalyzedReductionVals.insert(hash_value(VL));
2073 }
2074 /// Clear the list of the analyzed reduction root instructions.
clearReductionData()2075 void clearReductionData() {
2076 AnalyzedReductionsRoots.clear();
2077 AnalyzedReductionVals.clear();
2078 }
2079 /// Checks if the given value is gathered in one of the nodes.
isAnyGathered(const SmallDenseSet<Value * > & Vals) const2080 bool isAnyGathered(const SmallDenseSet<Value *> &Vals) const {
2081 return any_of(MustGather, [&](Value *V) { return Vals.contains(V); });
2082 }
2083
2084 ~BoUpSLP();
2085
2086 private:
2087 /// Check if the operands on the edges \p Edges of the \p UserTE allows
2088 /// reordering (i.e. the operands can be reordered because they have only one
2089 /// user and reordarable).
2090 /// \param ReorderableGathers List of all gather nodes that require reordering
2091 /// (e.g., gather of extractlements or partially vectorizable loads).
2092 /// \param GatherOps List of gather operand nodes for \p UserTE that require
2093 /// reordering, subset of \p NonVectorized.
2094 bool
2095 canReorderOperands(TreeEntry *UserTE,
2096 SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges,
2097 ArrayRef<TreeEntry *> ReorderableGathers,
2098 SmallVectorImpl<TreeEntry *> &GatherOps);
2099
2100 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph,
2101 /// if any. If it is not vectorized (gather node), returns nullptr.
getVectorizedOperand(TreeEntry * UserTE,unsigned OpIdx)2102 TreeEntry *getVectorizedOperand(TreeEntry *UserTE, unsigned OpIdx) {
2103 ArrayRef<Value *> VL = UserTE->getOperand(OpIdx);
2104 TreeEntry *TE = nullptr;
2105 const auto *It = find_if(VL, [this, &TE](Value *V) {
2106 TE = getTreeEntry(V);
2107 return TE;
2108 });
2109 if (It != VL.end() && TE->isSame(VL))
2110 return TE;
2111 return nullptr;
2112 }
2113
2114 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph,
2115 /// if any. If it is not vectorized (gather node), returns nullptr.
getVectorizedOperand(const TreeEntry * UserTE,unsigned OpIdx) const2116 const TreeEntry *getVectorizedOperand(const TreeEntry *UserTE,
2117 unsigned OpIdx) const {
2118 return const_cast<BoUpSLP *>(this)->getVectorizedOperand(
2119 const_cast<TreeEntry *>(UserTE), OpIdx);
2120 }
2121
2122 /// Checks if all users of \p I are the part of the vectorization tree.
2123 bool areAllUsersVectorized(Instruction *I,
2124 ArrayRef<Value *> VectorizedVals) const;
2125
2126 /// \returns the cost of the vectorizable entry.
2127 InstructionCost getEntryCost(const TreeEntry *E,
2128 ArrayRef<Value *> VectorizedVals);
2129
2130 /// This is the recursive part of buildTree.
2131 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth,
2132 const EdgeInfo &EI);
2133
2134 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can
2135 /// be vectorized to use the original vector (or aggregate "bitcast" to a
2136 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise
2137 /// returns false, setting \p CurrentOrder to either an empty vector or a
2138 /// non-identity permutation that allows to reuse extract instructions.
2139 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
2140 SmallVectorImpl<unsigned> &CurrentOrder) const;
2141
2142 /// Vectorize a single entry in the tree.
2143 Value *vectorizeTree(TreeEntry *E);
2144
2145 /// Vectorize a single entry in the tree, starting in \p VL.
2146 Value *vectorizeTree(ArrayRef<Value *> VL);
2147
2148 /// Create a new vector from a list of scalar values. Produces a sequence
2149 /// which exploits values reused across lanes, and arranges the inserts
2150 /// for ease of later optimization.
2151 Value *createBuildVector(ArrayRef<Value *> VL);
2152
2153 /// \returns the scalarization cost for this type. Scalarization in this
2154 /// context means the creation of vectors from a group of scalars. If \p
2155 /// NeedToShuffle is true, need to add a cost of reshuffling some of the
2156 /// vector elements.
2157 InstructionCost getGatherCost(FixedVectorType *Ty,
2158 const APInt &ShuffledIndices,
2159 bool NeedToShuffle) const;
2160
2161 /// Checks if the gathered \p VL can be represented as shuffle(s) of previous
2162 /// tree entries.
2163 /// \returns ShuffleKind, if gathered values can be represented as shuffles of
2164 /// previous tree entries. \p Mask is filled with the shuffle mask.
2165 Optional<TargetTransformInfo::ShuffleKind>
2166 isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask,
2167 SmallVectorImpl<const TreeEntry *> &Entries);
2168
2169 /// \returns the scalarization cost for this list of values. Assuming that
2170 /// this subtree gets vectorized, we may need to extract the values from the
2171 /// roots. This method calculates the cost of extracting the values.
2172 InstructionCost getGatherCost(ArrayRef<Value *> VL) const;
2173
2174 /// Set the Builder insert point to one after the last instruction in
2175 /// the bundle
2176 void setInsertPointAfterBundle(const TreeEntry *E);
2177
2178 /// \returns a vector from a collection of scalars in \p VL.
2179 Value *gather(ArrayRef<Value *> VL);
2180
2181 /// \returns whether the VectorizableTree is fully vectorizable and will
2182 /// be beneficial even the tree height is tiny.
2183 bool isFullyVectorizableTinyTree(bool ForReduction) const;
2184
2185 /// Reorder commutative or alt operands to get better probability of
2186 /// generating vectorized code.
2187 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
2188 SmallVectorImpl<Value *> &Left,
2189 SmallVectorImpl<Value *> &Right,
2190 const DataLayout &DL,
2191 ScalarEvolution &SE,
2192 const BoUpSLP &R);
2193
2194 /// Helper for `findExternalStoreUsersReorderIndices()`. It iterates over the
2195 /// users of \p TE and collects the stores. It returns the map from the store
2196 /// pointers to the collected stores.
2197 DenseMap<Value *, SmallVector<StoreInst *, 4>>
2198 collectUserStores(const BoUpSLP::TreeEntry *TE) const;
2199
2200 /// Helper for `findExternalStoreUsersReorderIndices()`. It checks if the
2201 /// stores in \p StoresVec can for a vector instruction. If so it returns true
2202 /// and populates \p ReorderIndices with the shuffle indices of the the stores
2203 /// when compared to the sorted vector.
2204 bool CanFormVector(const SmallVector<StoreInst *, 4> &StoresVec,
2205 OrdersType &ReorderIndices) const;
2206
2207 /// Iterates through the users of \p TE, looking for scalar stores that can be
2208 /// potentially vectorized in a future SLP-tree. If found, it keeps track of
2209 /// their order and builds an order index vector for each store bundle. It
2210 /// returns all these order vectors found.
2211 /// We run this after the tree has formed, otherwise we may come across user
2212 /// instructions that are not yet in the tree.
2213 SmallVector<OrdersType, 1>
2214 findExternalStoreUsersReorderIndices(TreeEntry *TE) const;
2215
2216 struct TreeEntry {
2217 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>;
TreeEntryllvm::slpvectorizer::BoUpSLP::TreeEntry2218 TreeEntry(VecTreeTy &Container) : Container(Container) {}
2219
2220 /// \returns true if the scalars in VL are equal to this entry.
isSamellvm::slpvectorizer::BoUpSLP::TreeEntry2221 bool isSame(ArrayRef<Value *> VL) const {
2222 auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) {
2223 if (Mask.size() != VL.size() && VL.size() == Scalars.size())
2224 return std::equal(VL.begin(), VL.end(), Scalars.begin());
2225 return VL.size() == Mask.size() &&
2226 std::equal(VL.begin(), VL.end(), Mask.begin(),
2227 [Scalars](Value *V, int Idx) {
2228 return (isa<UndefValue>(V) &&
2229 Idx == UndefMaskElem) ||
2230 (Idx != UndefMaskElem && V == Scalars[Idx]);
2231 });
2232 };
2233 if (!ReorderIndices.empty()) {
2234 // TODO: implement matching if the nodes are just reordered, still can
2235 // treat the vector as the same if the list of scalars matches VL
2236 // directly, without reordering.
2237 SmallVector<int> Mask;
2238 inversePermutation(ReorderIndices, Mask);
2239 if (VL.size() == Scalars.size())
2240 return IsSame(Scalars, Mask);
2241 if (VL.size() == ReuseShuffleIndices.size()) {
2242 ::addMask(Mask, ReuseShuffleIndices);
2243 return IsSame(Scalars, Mask);
2244 }
2245 return false;
2246 }
2247 return IsSame(Scalars, ReuseShuffleIndices);
2248 }
2249
2250 /// \returns true if current entry has same operands as \p TE.
hasEqualOperandsllvm::slpvectorizer::BoUpSLP::TreeEntry2251 bool hasEqualOperands(const TreeEntry &TE) const {
2252 if (TE.getNumOperands() != getNumOperands())
2253 return false;
2254 SmallBitVector Used(getNumOperands());
2255 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
2256 unsigned PrevCount = Used.count();
2257 for (unsigned K = 0; K < E; ++K) {
2258 if (Used.test(K))
2259 continue;
2260 if (getOperand(K) == TE.getOperand(I)) {
2261 Used.set(K);
2262 break;
2263 }
2264 }
2265 // Check if we actually found the matching operand.
2266 if (PrevCount == Used.count())
2267 return false;
2268 }
2269 return true;
2270 }
2271
2272 /// \return Final vectorization factor for the node. Defined by the total
2273 /// number of vectorized scalars, including those, used several times in the
2274 /// entry and counted in the \a ReuseShuffleIndices, if any.
getVectorFactorllvm::slpvectorizer::BoUpSLP::TreeEntry2275 unsigned getVectorFactor() const {
2276 if (!ReuseShuffleIndices.empty())
2277 return ReuseShuffleIndices.size();
2278 return Scalars.size();
2279 };
2280
2281 /// A vector of scalars.
2282 ValueList Scalars;
2283
2284 /// The Scalars are vectorized into this value. It is initialized to Null.
2285 Value *VectorizedValue = nullptr;
2286
2287 /// Do we need to gather this sequence or vectorize it
2288 /// (either with vector instruction or with scatter/gather
2289 /// intrinsics for store/load)?
2290 enum EntryState { Vectorize, ScatterVectorize, NeedToGather };
2291 EntryState State;
2292
2293 /// Does this sequence require some shuffling?
2294 SmallVector<int, 4> ReuseShuffleIndices;
2295
2296 /// Does this entry require reordering?
2297 SmallVector<unsigned, 4> ReorderIndices;
2298
2299 /// Points back to the VectorizableTree.
2300 ///
2301 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has
2302 /// to be a pointer and needs to be able to initialize the child iterator.
2303 /// Thus we need a reference back to the container to translate the indices
2304 /// to entries.
2305 VecTreeTy &Container;
2306
2307 /// The TreeEntry index containing the user of this entry. We can actually
2308 /// have multiple users so the data structure is not truly a tree.
2309 SmallVector<EdgeInfo, 1> UserTreeIndices;
2310
2311 /// The index of this treeEntry in VectorizableTree.
2312 int Idx = -1;
2313
2314 private:
2315 /// The operands of each instruction in each lane Operands[op_index][lane].
2316 /// Note: This helps avoid the replication of the code that performs the
2317 /// reordering of operands during buildTree_rec() and vectorizeTree().
2318 SmallVector<ValueList, 2> Operands;
2319
2320 /// The main/alternate instruction.
2321 Instruction *MainOp = nullptr;
2322 Instruction *AltOp = nullptr;
2323
2324 public:
2325 /// Set this bundle's \p OpIdx'th operand to \p OpVL.
setOperandllvm::slpvectorizer::BoUpSLP::TreeEntry2326 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) {
2327 if (Operands.size() < OpIdx + 1)
2328 Operands.resize(OpIdx + 1);
2329 assert(Operands[OpIdx].empty() && "Already resized?");
2330 assert(OpVL.size() <= Scalars.size() &&
2331 "Number of operands is greater than the number of scalars.");
2332 Operands[OpIdx].resize(OpVL.size());
2333 copy(OpVL, Operands[OpIdx].begin());
2334 }
2335
2336 /// Set the operands of this bundle in their original order.
setOperandsInOrderllvm::slpvectorizer::BoUpSLP::TreeEntry2337 void setOperandsInOrder() {
2338 assert(Operands.empty() && "Already initialized?");
2339 auto *I0 = cast<Instruction>(Scalars[0]);
2340 Operands.resize(I0->getNumOperands());
2341 unsigned NumLanes = Scalars.size();
2342 for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands();
2343 OpIdx != NumOperands; ++OpIdx) {
2344 Operands[OpIdx].resize(NumLanes);
2345 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
2346 auto *I = cast<Instruction>(Scalars[Lane]);
2347 assert(I->getNumOperands() == NumOperands &&
2348 "Expected same number of operands");
2349 Operands[OpIdx][Lane] = I->getOperand(OpIdx);
2350 }
2351 }
2352 }
2353
2354 /// Reorders operands of the node to the given mask \p Mask.
reorderOperandsllvm::slpvectorizer::BoUpSLP::TreeEntry2355 void reorderOperands(ArrayRef<int> Mask) {
2356 for (ValueList &Operand : Operands)
2357 reorderScalars(Operand, Mask);
2358 }
2359
2360 /// \returns the \p OpIdx operand of this TreeEntry.
getOperandllvm::slpvectorizer::BoUpSLP::TreeEntry2361 ValueList &getOperand(unsigned OpIdx) {
2362 assert(OpIdx < Operands.size() && "Off bounds");
2363 return Operands[OpIdx];
2364 }
2365
2366 /// \returns the \p OpIdx operand of this TreeEntry.
getOperandllvm::slpvectorizer::BoUpSLP::TreeEntry2367 ArrayRef<Value *> getOperand(unsigned OpIdx) const {
2368 assert(OpIdx < Operands.size() && "Off bounds");
2369 return Operands[OpIdx];
2370 }
2371
2372 /// \returns the number of operands.
getNumOperandsllvm::slpvectorizer::BoUpSLP::TreeEntry2373 unsigned getNumOperands() const { return Operands.size(); }
2374
2375 /// \return the single \p OpIdx operand.
getSingleOperandllvm::slpvectorizer::BoUpSLP::TreeEntry2376 Value *getSingleOperand(unsigned OpIdx) const {
2377 assert(OpIdx < Operands.size() && "Off bounds");
2378 assert(!Operands[OpIdx].empty() && "No operand available");
2379 return Operands[OpIdx][0];
2380 }
2381
2382 /// Some of the instructions in the list have alternate opcodes.
isAltShufflellvm::slpvectorizer::BoUpSLP::TreeEntry2383 bool isAltShuffle() const { return MainOp != AltOp; }
2384
isOpcodeOrAltllvm::slpvectorizer::BoUpSLP::TreeEntry2385 bool isOpcodeOrAlt(Instruction *I) const {
2386 unsigned CheckedOpcode = I->getOpcode();
2387 return (getOpcode() == CheckedOpcode ||
2388 getAltOpcode() == CheckedOpcode);
2389 }
2390
2391 /// Chooses the correct key for scheduling data. If \p Op has the same (or
2392 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is
2393 /// \p OpValue.
isOneOfllvm::slpvectorizer::BoUpSLP::TreeEntry2394 Value *isOneOf(Value *Op) const {
2395 auto *I = dyn_cast<Instruction>(Op);
2396 if (I && isOpcodeOrAlt(I))
2397 return Op;
2398 return MainOp;
2399 }
2400
setOperationsllvm::slpvectorizer::BoUpSLP::TreeEntry2401 void setOperations(const InstructionsState &S) {
2402 MainOp = S.MainOp;
2403 AltOp = S.AltOp;
2404 }
2405
getMainOpllvm::slpvectorizer::BoUpSLP::TreeEntry2406 Instruction *getMainOp() const {
2407 return MainOp;
2408 }
2409
getAltOpllvm::slpvectorizer::BoUpSLP::TreeEntry2410 Instruction *getAltOp() const {
2411 return AltOp;
2412 }
2413
2414 /// The main/alternate opcodes for the list of instructions.
getOpcodellvm::slpvectorizer::BoUpSLP::TreeEntry2415 unsigned getOpcode() const {
2416 return MainOp ? MainOp->getOpcode() : 0;
2417 }
2418
getAltOpcodellvm::slpvectorizer::BoUpSLP::TreeEntry2419 unsigned getAltOpcode() const {
2420 return AltOp ? AltOp->getOpcode() : 0;
2421 }
2422
2423 /// When ReuseReorderShuffleIndices is empty it just returns position of \p
2424 /// V within vector of Scalars. Otherwise, try to remap on its reuse index.
findLaneForValuellvm::slpvectorizer::BoUpSLP::TreeEntry2425 int findLaneForValue(Value *V) const {
2426 unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V));
2427 assert(FoundLane < Scalars.size() && "Couldn't find extract lane");
2428 if (!ReorderIndices.empty())
2429 FoundLane = ReorderIndices[FoundLane];
2430 assert(FoundLane < Scalars.size() && "Couldn't find extract lane");
2431 if (!ReuseShuffleIndices.empty()) {
2432 FoundLane = std::distance(ReuseShuffleIndices.begin(),
2433 find(ReuseShuffleIndices, FoundLane));
2434 }
2435 return FoundLane;
2436 }
2437
2438 #ifndef NDEBUG
2439 /// Debug printer.
dumpllvm::slpvectorizer::BoUpSLP::TreeEntry2440 LLVM_DUMP_METHOD void dump() const {
2441 dbgs() << Idx << ".\n";
2442 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) {
2443 dbgs() << "Operand " << OpI << ":\n";
2444 for (const Value *V : Operands[OpI])
2445 dbgs().indent(2) << *V << "\n";
2446 }
2447 dbgs() << "Scalars: \n";
2448 for (Value *V : Scalars)
2449 dbgs().indent(2) << *V << "\n";
2450 dbgs() << "State: ";
2451 switch (State) {
2452 case Vectorize:
2453 dbgs() << "Vectorize\n";
2454 break;
2455 case ScatterVectorize:
2456 dbgs() << "ScatterVectorize\n";
2457 break;
2458 case NeedToGather:
2459 dbgs() << "NeedToGather\n";
2460 break;
2461 }
2462 dbgs() << "MainOp: ";
2463 if (MainOp)
2464 dbgs() << *MainOp << "\n";
2465 else
2466 dbgs() << "NULL\n";
2467 dbgs() << "AltOp: ";
2468 if (AltOp)
2469 dbgs() << *AltOp << "\n";
2470 else
2471 dbgs() << "NULL\n";
2472 dbgs() << "VectorizedValue: ";
2473 if (VectorizedValue)
2474 dbgs() << *VectorizedValue << "\n";
2475 else
2476 dbgs() << "NULL\n";
2477 dbgs() << "ReuseShuffleIndices: ";
2478 if (ReuseShuffleIndices.empty())
2479 dbgs() << "Empty";
2480 else
2481 for (int ReuseIdx : ReuseShuffleIndices)
2482 dbgs() << ReuseIdx << ", ";
2483 dbgs() << "\n";
2484 dbgs() << "ReorderIndices: ";
2485 for (unsigned ReorderIdx : ReorderIndices)
2486 dbgs() << ReorderIdx << ", ";
2487 dbgs() << "\n";
2488 dbgs() << "UserTreeIndices: ";
2489 for (const auto &EInfo : UserTreeIndices)
2490 dbgs() << EInfo << ", ";
2491 dbgs() << "\n";
2492 }
2493 #endif
2494 };
2495
2496 #ifndef NDEBUG
dumpTreeCosts(const TreeEntry * E,InstructionCost ReuseShuffleCost,InstructionCost VecCost,InstructionCost ScalarCost) const2497 void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost,
2498 InstructionCost VecCost,
2499 InstructionCost ScalarCost) const {
2500 dbgs() << "SLP: Calculated costs for Tree:\n"; E->dump();
2501 dbgs() << "SLP: Costs:\n";
2502 dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n";
2503 dbgs() << "SLP: VectorCost = " << VecCost << "\n";
2504 dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n";
2505 dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = " <<
2506 ReuseShuffleCost + VecCost - ScalarCost << "\n";
2507 }
2508 #endif
2509
2510 /// Create a new VectorizableTree entry.
newTreeEntry(ArrayRef<Value * > VL,Optional<ScheduleData * > Bundle,const InstructionsState & S,const EdgeInfo & UserTreeIdx,ArrayRef<int> ReuseShuffleIndices=None,ArrayRef<unsigned> ReorderIndices=None)2511 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle,
2512 const InstructionsState &S,
2513 const EdgeInfo &UserTreeIdx,
2514 ArrayRef<int> ReuseShuffleIndices = None,
2515 ArrayRef<unsigned> ReorderIndices = None) {
2516 TreeEntry::EntryState EntryState =
2517 Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather;
2518 return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx,
2519 ReuseShuffleIndices, ReorderIndices);
2520 }
2521
newTreeEntry(ArrayRef<Value * > VL,TreeEntry::EntryState EntryState,Optional<ScheduleData * > Bundle,const InstructionsState & S,const EdgeInfo & UserTreeIdx,ArrayRef<int> ReuseShuffleIndices=None,ArrayRef<unsigned> ReorderIndices=None)2522 TreeEntry *newTreeEntry(ArrayRef<Value *> VL,
2523 TreeEntry::EntryState EntryState,
2524 Optional<ScheduleData *> Bundle,
2525 const InstructionsState &S,
2526 const EdgeInfo &UserTreeIdx,
2527 ArrayRef<int> ReuseShuffleIndices = None,
2528 ArrayRef<unsigned> ReorderIndices = None) {
2529 assert(((!Bundle && EntryState == TreeEntry::NeedToGather) ||
2530 (Bundle && EntryState != TreeEntry::NeedToGather)) &&
2531 "Need to vectorize gather entry?");
2532 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree));
2533 TreeEntry *Last = VectorizableTree.back().get();
2534 Last->Idx = VectorizableTree.size() - 1;
2535 Last->State = EntryState;
2536 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(),
2537 ReuseShuffleIndices.end());
2538 if (ReorderIndices.empty()) {
2539 Last->Scalars.assign(VL.begin(), VL.end());
2540 Last->setOperations(S);
2541 } else {
2542 // Reorder scalars and build final mask.
2543 Last->Scalars.assign(VL.size(), nullptr);
2544 transform(ReorderIndices, Last->Scalars.begin(),
2545 [VL](unsigned Idx) -> Value * {
2546 if (Idx >= VL.size())
2547 return UndefValue::get(VL.front()->getType());
2548 return VL[Idx];
2549 });
2550 InstructionsState S = getSameOpcode(Last->Scalars);
2551 Last->setOperations(S);
2552 Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end());
2553 }
2554 if (Last->State != TreeEntry::NeedToGather) {
2555 for (Value *V : VL) {
2556 assert(!getTreeEntry(V) && "Scalar already in tree!");
2557 ScalarToTreeEntry[V] = Last;
2558 }
2559 // Update the scheduler bundle to point to this TreeEntry.
2560 ScheduleData *BundleMember = *Bundle;
2561 assert((BundleMember || isa<PHINode>(S.MainOp) ||
2562 isVectorLikeInstWithConstOps(S.MainOp) ||
2563 doesNotNeedToSchedule(VL)) &&
2564 "Bundle and VL out of sync");
2565 if (BundleMember) {
2566 for (Value *V : VL) {
2567 if (doesNotNeedToBeScheduled(V))
2568 continue;
2569 assert(BundleMember && "Unexpected end of bundle.");
2570 BundleMember->TE = Last;
2571 BundleMember = BundleMember->NextInBundle;
2572 }
2573 }
2574 assert(!BundleMember && "Bundle and VL out of sync");
2575 } else {
2576 MustGather.insert(VL.begin(), VL.end());
2577 }
2578
2579 if (UserTreeIdx.UserTE)
2580 Last->UserTreeIndices.push_back(UserTreeIdx);
2581
2582 return Last;
2583 }
2584
2585 /// -- Vectorization State --
2586 /// Holds all of the tree entries.
2587 TreeEntry::VecTreeTy VectorizableTree;
2588
2589 #ifndef NDEBUG
2590 /// Debug printer.
dumpVectorizableTree() const2591 LLVM_DUMP_METHOD void dumpVectorizableTree() const {
2592 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) {
2593 VectorizableTree[Id]->dump();
2594 dbgs() << "\n";
2595 }
2596 }
2597 #endif
2598
getTreeEntry(Value * V)2599 TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); }
2600
getTreeEntry(Value * V) const2601 const TreeEntry *getTreeEntry(Value *V) const {
2602 return ScalarToTreeEntry.lookup(V);
2603 }
2604
2605 /// Maps a specific scalar to its tree entry.
2606 SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry;
2607
2608 /// Maps a value to the proposed vectorizable size.
2609 SmallDenseMap<Value *, unsigned> InstrElementSize;
2610
2611 /// A list of scalars that we found that we need to keep as scalars.
2612 ValueSet MustGather;
2613
2614 /// This POD struct describes one external user in the vectorized tree.
2615 struct ExternalUser {
ExternalUserllvm::slpvectorizer::BoUpSLP::ExternalUser2616 ExternalUser(Value *S, llvm::User *U, int L)
2617 : Scalar(S), User(U), Lane(L) {}
2618
2619 // Which scalar in our function.
2620 Value *Scalar;
2621
2622 // Which user that uses the scalar.
2623 llvm::User *User;
2624
2625 // Which lane does the scalar belong to.
2626 int Lane;
2627 };
2628 using UserList = SmallVector<ExternalUser, 16>;
2629
2630 /// Checks if two instructions may access the same memory.
2631 ///
2632 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
2633 /// is invariant in the calling loop.
isAliased(const MemoryLocation & Loc1,Instruction * Inst1,Instruction * Inst2)2634 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1,
2635 Instruction *Inst2) {
2636 // First check if the result is already in the cache.
2637 AliasCacheKey key = std::make_pair(Inst1, Inst2);
2638 Optional<bool> &result = AliasCache[key];
2639 if (result) {
2640 return result.value();
2641 }
2642 bool aliased = true;
2643 if (Loc1.Ptr && isSimple(Inst1))
2644 aliased = isModOrRefSet(BatchAA.getModRefInfo(Inst2, Loc1));
2645 // Store the result in the cache.
2646 result = aliased;
2647 return aliased;
2648 }
2649
2650 using AliasCacheKey = std::pair<Instruction *, Instruction *>;
2651
2652 /// Cache for alias results.
2653 /// TODO: consider moving this to the AliasAnalysis itself.
2654 DenseMap<AliasCacheKey, Optional<bool>> AliasCache;
2655
2656 // Cache for pointerMayBeCaptured calls inside AA. This is preserved
2657 // globally through SLP because we don't perform any action which
2658 // invalidates capture results.
2659 BatchAAResults BatchAA;
2660
2661 /// Temporary store for deleted instructions. Instructions will be deleted
2662 /// eventually when the BoUpSLP is destructed. The deferral is required to
2663 /// ensure that there are no incorrect collisions in the AliasCache, which
2664 /// can happen if a new instruction is allocated at the same address as a
2665 /// previously deleted instruction.
2666 DenseSet<Instruction *> DeletedInstructions;
2667
2668 /// Set of the instruction, being analyzed already for reductions.
2669 SmallPtrSet<Instruction *, 16> AnalyzedReductionsRoots;
2670
2671 /// Set of hashes for the list of reduction values already being analyzed.
2672 DenseSet<size_t> AnalyzedReductionVals;
2673
2674 /// A list of values that need to extracted out of the tree.
2675 /// This list holds pairs of (Internal Scalar : External User). External User
2676 /// can be nullptr, it means that this Internal Scalar will be used later,
2677 /// after vectorization.
2678 UserList ExternalUses;
2679
2680 /// Values used only by @llvm.assume calls.
2681 SmallPtrSet<const Value *, 32> EphValues;
2682
2683 /// Holds all of the instructions that we gathered.
2684 SetVector<Instruction *> GatherShuffleSeq;
2685
2686 /// A list of blocks that we are going to CSE.
2687 SetVector<BasicBlock *> CSEBlocks;
2688
2689 /// Contains all scheduling relevant data for an instruction.
2690 /// A ScheduleData either represents a single instruction or a member of an
2691 /// instruction bundle (= a group of instructions which is combined into a
2692 /// vector instruction).
2693 struct ScheduleData {
2694 // The initial value for the dependency counters. It means that the
2695 // dependencies are not calculated yet.
2696 enum { InvalidDeps = -1 };
2697
2698 ScheduleData() = default;
2699
initllvm::slpvectorizer::BoUpSLP::ScheduleData2700 void init(int BlockSchedulingRegionID, Value *OpVal) {
2701 FirstInBundle = this;
2702 NextInBundle = nullptr;
2703 NextLoadStore = nullptr;
2704 IsScheduled = false;
2705 SchedulingRegionID = BlockSchedulingRegionID;
2706 clearDependencies();
2707 OpValue = OpVal;
2708 TE = nullptr;
2709 }
2710
2711 /// Verify basic self consistency properties
verifyllvm::slpvectorizer::BoUpSLP::ScheduleData2712 void verify() {
2713 if (hasValidDependencies()) {
2714 assert(UnscheduledDeps <= Dependencies && "invariant");
2715 } else {
2716 assert(UnscheduledDeps == Dependencies && "invariant");
2717 }
2718
2719 if (IsScheduled) {
2720 assert(isSchedulingEntity() &&
2721 "unexpected scheduled state");
2722 for (const ScheduleData *BundleMember = this; BundleMember;
2723 BundleMember = BundleMember->NextInBundle) {
2724 assert(BundleMember->hasValidDependencies() &&
2725 BundleMember->UnscheduledDeps == 0 &&
2726 "unexpected scheduled state");
2727 assert((BundleMember == this || !BundleMember->IsScheduled) &&
2728 "only bundle is marked scheduled");
2729 }
2730 }
2731
2732 assert(Inst->getParent() == FirstInBundle->Inst->getParent() &&
2733 "all bundle members must be in same basic block");
2734 }
2735
2736 /// Returns true if the dependency information has been calculated.
2737 /// Note that depenendency validity can vary between instructions within
2738 /// a single bundle.
hasValidDependenciesllvm::slpvectorizer::BoUpSLP::ScheduleData2739 bool hasValidDependencies() const { return Dependencies != InvalidDeps; }
2740
2741 /// Returns true for single instructions and for bundle representatives
2742 /// (= the head of a bundle).
isSchedulingEntityllvm::slpvectorizer::BoUpSLP::ScheduleData2743 bool isSchedulingEntity() const { return FirstInBundle == this; }
2744
2745 /// Returns true if it represents an instruction bundle and not only a
2746 /// single instruction.
isPartOfBundlellvm::slpvectorizer::BoUpSLP::ScheduleData2747 bool isPartOfBundle() const {
2748 return NextInBundle != nullptr || FirstInBundle != this || TE;
2749 }
2750
2751 /// Returns true if it is ready for scheduling, i.e. it has no more
2752 /// unscheduled depending instructions/bundles.
isReadyllvm::slpvectorizer::BoUpSLP::ScheduleData2753 bool isReady() const {
2754 assert(isSchedulingEntity() &&
2755 "can't consider non-scheduling entity for ready list");
2756 return unscheduledDepsInBundle() == 0 && !IsScheduled;
2757 }
2758
2759 /// Modifies the number of unscheduled dependencies for this instruction,
2760 /// and returns the number of remaining dependencies for the containing
2761 /// bundle.
incrementUnscheduledDepsllvm::slpvectorizer::BoUpSLP::ScheduleData2762 int incrementUnscheduledDeps(int Incr) {
2763 assert(hasValidDependencies() &&
2764 "increment of unscheduled deps would be meaningless");
2765 UnscheduledDeps += Incr;
2766 return FirstInBundle->unscheduledDepsInBundle();
2767 }
2768
2769 /// Sets the number of unscheduled dependencies to the number of
2770 /// dependencies.
resetUnscheduledDepsllvm::slpvectorizer::BoUpSLP::ScheduleData2771 void resetUnscheduledDeps() {
2772 UnscheduledDeps = Dependencies;
2773 }
2774
2775 /// Clears all dependency information.
clearDependenciesllvm::slpvectorizer::BoUpSLP::ScheduleData2776 void clearDependencies() {
2777 Dependencies = InvalidDeps;
2778 resetUnscheduledDeps();
2779 MemoryDependencies.clear();
2780 ControlDependencies.clear();
2781 }
2782
unscheduledDepsInBundlellvm::slpvectorizer::BoUpSLP::ScheduleData2783 int unscheduledDepsInBundle() const {
2784 assert(isSchedulingEntity() && "only meaningful on the bundle");
2785 int Sum = 0;
2786 for (const ScheduleData *BundleMember = this; BundleMember;
2787 BundleMember = BundleMember->NextInBundle) {
2788 if (BundleMember->UnscheduledDeps == InvalidDeps)
2789 return InvalidDeps;
2790 Sum += BundleMember->UnscheduledDeps;
2791 }
2792 return Sum;
2793 }
2794
dumpllvm::slpvectorizer::BoUpSLP::ScheduleData2795 void dump(raw_ostream &os) const {
2796 if (!isSchedulingEntity()) {
2797 os << "/ " << *Inst;
2798 } else if (NextInBundle) {
2799 os << '[' << *Inst;
2800 ScheduleData *SD = NextInBundle;
2801 while (SD) {
2802 os << ';' << *SD->Inst;
2803 SD = SD->NextInBundle;
2804 }
2805 os << ']';
2806 } else {
2807 os << *Inst;
2808 }
2809 }
2810
2811 Instruction *Inst = nullptr;
2812
2813 /// Opcode of the current instruction in the schedule data.
2814 Value *OpValue = nullptr;
2815
2816 /// The TreeEntry that this instruction corresponds to.
2817 TreeEntry *TE = nullptr;
2818
2819 /// Points to the head in an instruction bundle (and always to this for
2820 /// single instructions).
2821 ScheduleData *FirstInBundle = nullptr;
2822
2823 /// Single linked list of all instructions in a bundle. Null if it is a
2824 /// single instruction.
2825 ScheduleData *NextInBundle = nullptr;
2826
2827 /// Single linked list of all memory instructions (e.g. load, store, call)
2828 /// in the block - until the end of the scheduling region.
2829 ScheduleData *NextLoadStore = nullptr;
2830
2831 /// The dependent memory instructions.
2832 /// This list is derived on demand in calculateDependencies().
2833 SmallVector<ScheduleData *, 4> MemoryDependencies;
2834
2835 /// List of instructions which this instruction could be control dependent
2836 /// on. Allowing such nodes to be scheduled below this one could introduce
2837 /// a runtime fault which didn't exist in the original program.
2838 /// ex: this is a load or udiv following a readonly call which inf loops
2839 SmallVector<ScheduleData *, 4> ControlDependencies;
2840
2841 /// This ScheduleData is in the current scheduling region if this matches
2842 /// the current SchedulingRegionID of BlockScheduling.
2843 int SchedulingRegionID = 0;
2844
2845 /// Used for getting a "good" final ordering of instructions.
2846 int SchedulingPriority = 0;
2847
2848 /// The number of dependencies. Constitutes of the number of users of the
2849 /// instruction plus the number of dependent memory instructions (if any).
2850 /// This value is calculated on demand.
2851 /// If InvalidDeps, the number of dependencies is not calculated yet.
2852 int Dependencies = InvalidDeps;
2853
2854 /// The number of dependencies minus the number of dependencies of scheduled
2855 /// instructions. As soon as this is zero, the instruction/bundle gets ready
2856 /// for scheduling.
2857 /// Note that this is negative as long as Dependencies is not calculated.
2858 int UnscheduledDeps = InvalidDeps;
2859
2860 /// True if this instruction is scheduled (or considered as scheduled in the
2861 /// dry-run).
2862 bool IsScheduled = false;
2863 };
2864
2865 #ifndef NDEBUG
operator <<(raw_ostream & os,const BoUpSLP::ScheduleData & SD)2866 friend inline raw_ostream &operator<<(raw_ostream &os,
2867 const BoUpSLP::ScheduleData &SD) {
2868 SD.dump(os);
2869 return os;
2870 }
2871 #endif
2872
2873 friend struct GraphTraits<BoUpSLP *>;
2874 friend struct DOTGraphTraits<BoUpSLP *>;
2875
2876 /// Contains all scheduling data for a basic block.
2877 /// It does not schedules instructions, which are not memory read/write
2878 /// instructions and their operands are either constants, or arguments, or
2879 /// phis, or instructions from others blocks, or their users are phis or from
2880 /// the other blocks. The resulting vector instructions can be placed at the
2881 /// beginning of the basic block without scheduling (if operands does not need
2882 /// to be scheduled) or at the end of the block (if users are outside of the
2883 /// block). It allows to save some compile time and memory used by the
2884 /// compiler.
2885 /// ScheduleData is assigned for each instruction in between the boundaries of
2886 /// the tree entry, even for those, which are not part of the graph. It is
2887 /// required to correctly follow the dependencies between the instructions and
2888 /// their correct scheduling. The ScheduleData is not allocated for the
2889 /// instructions, which do not require scheduling, like phis, nodes with
2890 /// extractelements/insertelements only or nodes with instructions, with
2891 /// uses/operands outside of the block.
2892 struct BlockScheduling {
BlockSchedulingllvm::slpvectorizer::BoUpSLP::BlockScheduling2893 BlockScheduling(BasicBlock *BB)
2894 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {}
2895
clearllvm::slpvectorizer::BoUpSLP::BlockScheduling2896 void clear() {
2897 ReadyInsts.clear();
2898 ScheduleStart = nullptr;
2899 ScheduleEnd = nullptr;
2900 FirstLoadStoreInRegion = nullptr;
2901 LastLoadStoreInRegion = nullptr;
2902 RegionHasStackSave = false;
2903
2904 // Reduce the maximum schedule region size by the size of the
2905 // previous scheduling run.
2906 ScheduleRegionSizeLimit -= ScheduleRegionSize;
2907 if (ScheduleRegionSizeLimit < MinScheduleRegionSize)
2908 ScheduleRegionSizeLimit = MinScheduleRegionSize;
2909 ScheduleRegionSize = 0;
2910
2911 // Make a new scheduling region, i.e. all existing ScheduleData is not
2912 // in the new region yet.
2913 ++SchedulingRegionID;
2914 }
2915
getScheduleDatallvm::slpvectorizer::BoUpSLP::BlockScheduling2916 ScheduleData *getScheduleData(Instruction *I) {
2917 if (BB != I->getParent())
2918 // Avoid lookup if can't possibly be in map.
2919 return nullptr;
2920 ScheduleData *SD = ScheduleDataMap.lookup(I);
2921 if (SD && isInSchedulingRegion(SD))
2922 return SD;
2923 return nullptr;
2924 }
2925
getScheduleDatallvm::slpvectorizer::BoUpSLP::BlockScheduling2926 ScheduleData *getScheduleData(Value *V) {
2927 if (auto *I = dyn_cast<Instruction>(V))
2928 return getScheduleData(I);
2929 return nullptr;
2930 }
2931
getScheduleDatallvm::slpvectorizer::BoUpSLP::BlockScheduling2932 ScheduleData *getScheduleData(Value *V, Value *Key) {
2933 if (V == Key)
2934 return getScheduleData(V);
2935 auto I = ExtraScheduleDataMap.find(V);
2936 if (I != ExtraScheduleDataMap.end()) {
2937 ScheduleData *SD = I->second.lookup(Key);
2938 if (SD && isInSchedulingRegion(SD))
2939 return SD;
2940 }
2941 return nullptr;
2942 }
2943
isInSchedulingRegionllvm::slpvectorizer::BoUpSLP::BlockScheduling2944 bool isInSchedulingRegion(ScheduleData *SD) const {
2945 return SD->SchedulingRegionID == SchedulingRegionID;
2946 }
2947
2948 /// Marks an instruction as scheduled and puts all dependent ready
2949 /// instructions into the ready-list.
2950 template <typename ReadyListType>
schedulellvm::slpvectorizer::BoUpSLP::BlockScheduling2951 void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
2952 SD->IsScheduled = true;
2953 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n");
2954
2955 for (ScheduleData *BundleMember = SD; BundleMember;
2956 BundleMember = BundleMember->NextInBundle) {
2957 if (BundleMember->Inst != BundleMember->OpValue)
2958 continue;
2959
2960 // Handle the def-use chain dependencies.
2961
2962 // Decrement the unscheduled counter and insert to ready list if ready.
2963 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) {
2964 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) {
2965 if (OpDef && OpDef->hasValidDependencies() &&
2966 OpDef->incrementUnscheduledDeps(-1) == 0) {
2967 // There are no more unscheduled dependencies after
2968 // decrementing, so we can put the dependent instruction
2969 // into the ready list.
2970 ScheduleData *DepBundle = OpDef->FirstInBundle;
2971 assert(!DepBundle->IsScheduled &&
2972 "already scheduled bundle gets ready");
2973 ReadyList.insert(DepBundle);
2974 LLVM_DEBUG(dbgs()
2975 << "SLP: gets ready (def): " << *DepBundle << "\n");
2976 }
2977 });
2978 };
2979
2980 // If BundleMember is a vector bundle, its operands may have been
2981 // reordered during buildTree(). We therefore need to get its operands
2982 // through the TreeEntry.
2983 if (TreeEntry *TE = BundleMember->TE) {
2984 // Need to search for the lane since the tree entry can be reordered.
2985 int Lane = std::distance(TE->Scalars.begin(),
2986 find(TE->Scalars, BundleMember->Inst));
2987 assert(Lane >= 0 && "Lane not set");
2988
2989 // Since vectorization tree is being built recursively this assertion
2990 // ensures that the tree entry has all operands set before reaching
2991 // this code. Couple of exceptions known at the moment are extracts
2992 // where their second (immediate) operand is not added. Since
2993 // immediates do not affect scheduler behavior this is considered
2994 // okay.
2995 auto *In = BundleMember->Inst;
2996 assert(In &&
2997 (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) ||
2998 In->getNumOperands() == TE->getNumOperands()) &&
2999 "Missed TreeEntry operands?");
3000 (void)In; // fake use to avoid build failure when assertions disabled
3001
3002 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands();
3003 OpIdx != NumOperands; ++OpIdx)
3004 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane]))
3005 DecrUnsched(I);
3006 } else {
3007 // If BundleMember is a stand-alone instruction, no operand reordering
3008 // has taken place, so we directly access its operands.
3009 for (Use &U : BundleMember->Inst->operands())
3010 if (auto *I = dyn_cast<Instruction>(U.get()))
3011 DecrUnsched(I);
3012 }
3013 // Handle the memory dependencies.
3014 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) {
3015 if (MemoryDepSD->hasValidDependencies() &&
3016 MemoryDepSD->incrementUnscheduledDeps(-1) == 0) {
3017 // There are no more unscheduled dependencies after decrementing,
3018 // so we can put the dependent instruction into the ready list.
3019 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle;
3020 assert(!DepBundle->IsScheduled &&
3021 "already scheduled bundle gets ready");
3022 ReadyList.insert(DepBundle);
3023 LLVM_DEBUG(dbgs()
3024 << "SLP: gets ready (mem): " << *DepBundle << "\n");
3025 }
3026 }
3027 // Handle the control dependencies.
3028 for (ScheduleData *DepSD : BundleMember->ControlDependencies) {
3029 if (DepSD->incrementUnscheduledDeps(-1) == 0) {
3030 // There are no more unscheduled dependencies after decrementing,
3031 // so we can put the dependent instruction into the ready list.
3032 ScheduleData *DepBundle = DepSD->FirstInBundle;
3033 assert(!DepBundle->IsScheduled &&
3034 "already scheduled bundle gets ready");
3035 ReadyList.insert(DepBundle);
3036 LLVM_DEBUG(dbgs()
3037 << "SLP: gets ready (ctl): " << *DepBundle << "\n");
3038 }
3039 }
3040
3041 }
3042 }
3043
3044 /// Verify basic self consistency properties of the data structure.
verifyllvm::slpvectorizer::BoUpSLP::BlockScheduling3045 void verify() {
3046 if (!ScheduleStart)
3047 return;
3048
3049 assert(ScheduleStart->getParent() == ScheduleEnd->getParent() &&
3050 ScheduleStart->comesBefore(ScheduleEnd) &&
3051 "Not a valid scheduling region?");
3052
3053 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
3054 auto *SD = getScheduleData(I);
3055 if (!SD)
3056 continue;
3057 assert(isInSchedulingRegion(SD) &&
3058 "primary schedule data not in window?");
3059 assert(isInSchedulingRegion(SD->FirstInBundle) &&
3060 "entire bundle in window!");
3061 (void)SD;
3062 doForAllOpcodes(I, [](ScheduleData *SD) { SD->verify(); });
3063 }
3064
3065 for (auto *SD : ReadyInsts) {
3066 assert(SD->isSchedulingEntity() && SD->isReady() &&
3067 "item in ready list not ready?");
3068 (void)SD;
3069 }
3070 }
3071
doForAllOpcodesllvm::slpvectorizer::BoUpSLP::BlockScheduling3072 void doForAllOpcodes(Value *V,
3073 function_ref<void(ScheduleData *SD)> Action) {
3074 if (ScheduleData *SD = getScheduleData(V))
3075 Action(SD);
3076 auto I = ExtraScheduleDataMap.find(V);
3077 if (I != ExtraScheduleDataMap.end())
3078 for (auto &P : I->second)
3079 if (isInSchedulingRegion(P.second))
3080 Action(P.second);
3081 }
3082
3083 /// Put all instructions into the ReadyList which are ready for scheduling.
3084 template <typename ReadyListType>
initialFillReadyListllvm::slpvectorizer::BoUpSLP::BlockScheduling3085 void initialFillReadyList(ReadyListType &ReadyList) {
3086 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
3087 doForAllOpcodes(I, [&](ScheduleData *SD) {
3088 if (SD->isSchedulingEntity() && SD->hasValidDependencies() &&
3089 SD->isReady()) {
3090 ReadyList.insert(SD);
3091 LLVM_DEBUG(dbgs()
3092 << "SLP: initially in ready list: " << *SD << "\n");
3093 }
3094 });
3095 }
3096 }
3097
3098 /// Build a bundle from the ScheduleData nodes corresponding to the
3099 /// scalar instruction for each lane.
3100 ScheduleData *buildBundle(ArrayRef<Value *> VL);
3101
3102 /// Checks if a bundle of instructions can be scheduled, i.e. has no
3103 /// cyclic dependencies. This is only a dry-run, no instructions are
3104 /// actually moved at this stage.
3105 /// \returns the scheduling bundle. The returned Optional value is non-None
3106 /// if \p VL is allowed to be scheduled.
3107 Optional<ScheduleData *>
3108 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
3109 const InstructionsState &S);
3110
3111 /// Un-bundles a group of instructions.
3112 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue);
3113
3114 /// Allocates schedule data chunk.
3115 ScheduleData *allocateScheduleDataChunks();
3116
3117 /// Extends the scheduling region so that V is inside the region.
3118 /// \returns true if the region size is within the limit.
3119 bool extendSchedulingRegion(Value *V, const InstructionsState &S);
3120
3121 /// Initialize the ScheduleData structures for new instructions in the
3122 /// scheduling region.
3123 void initScheduleData(Instruction *FromI, Instruction *ToI,
3124 ScheduleData *PrevLoadStore,
3125 ScheduleData *NextLoadStore);
3126
3127 /// Updates the dependency information of a bundle and of all instructions/
3128 /// bundles which depend on the original bundle.
3129 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList,
3130 BoUpSLP *SLP);
3131
3132 /// Sets all instruction in the scheduling region to un-scheduled.
3133 void resetSchedule();
3134
3135 BasicBlock *BB;
3136
3137 /// Simple memory allocation for ScheduleData.
3138 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks;
3139
3140 /// The size of a ScheduleData array in ScheduleDataChunks.
3141 int ChunkSize;
3142
3143 /// The allocator position in the current chunk, which is the last entry
3144 /// of ScheduleDataChunks.
3145 int ChunkPos;
3146
3147 /// Attaches ScheduleData to Instruction.
3148 /// Note that the mapping survives during all vectorization iterations, i.e.
3149 /// ScheduleData structures are recycled.
3150 DenseMap<Instruction *, ScheduleData *> ScheduleDataMap;
3151
3152 /// Attaches ScheduleData to Instruction with the leading key.
3153 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>>
3154 ExtraScheduleDataMap;
3155
3156 /// The ready-list for scheduling (only used for the dry-run).
3157 SetVector<ScheduleData *> ReadyInsts;
3158
3159 /// The first instruction of the scheduling region.
3160 Instruction *ScheduleStart = nullptr;
3161
3162 /// The first instruction _after_ the scheduling region.
3163 Instruction *ScheduleEnd = nullptr;
3164
3165 /// The first memory accessing instruction in the scheduling region
3166 /// (can be null).
3167 ScheduleData *FirstLoadStoreInRegion = nullptr;
3168
3169 /// The last memory accessing instruction in the scheduling region
3170 /// (can be null).
3171 ScheduleData *LastLoadStoreInRegion = nullptr;
3172
3173 /// Is there an llvm.stacksave or llvm.stackrestore in the scheduling
3174 /// region? Used to optimize the dependence calculation for the
3175 /// common case where there isn't.
3176 bool RegionHasStackSave = false;
3177
3178 /// The current size of the scheduling region.
3179 int ScheduleRegionSize = 0;
3180
3181 /// The maximum size allowed for the scheduling region.
3182 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget;
3183
3184 /// The ID of the scheduling region. For a new vectorization iteration this
3185 /// is incremented which "removes" all ScheduleData from the region.
3186 /// Make sure that the initial SchedulingRegionID is greater than the
3187 /// initial SchedulingRegionID in ScheduleData (which is 0).
3188 int SchedulingRegionID = 1;
3189 };
3190
3191 /// Attaches the BlockScheduling structures to basic blocks.
3192 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules;
3193
3194 /// Performs the "real" scheduling. Done before vectorization is actually
3195 /// performed in a basic block.
3196 void scheduleBlock(BlockScheduling *BS);
3197
3198 /// List of users to ignore during scheduling and that don't need extracting.
3199 const SmallDenseSet<Value *> *UserIgnoreList = nullptr;
3200
3201 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of
3202 /// sorted SmallVectors of unsigned.
3203 struct OrdersTypeDenseMapInfo {
getEmptyKeyllvm::slpvectorizer::BoUpSLP::OrdersTypeDenseMapInfo3204 static OrdersType getEmptyKey() {
3205 OrdersType V;
3206 V.push_back(~1U);
3207 return V;
3208 }
3209
getTombstoneKeyllvm::slpvectorizer::BoUpSLP::OrdersTypeDenseMapInfo3210 static OrdersType getTombstoneKey() {
3211 OrdersType V;
3212 V.push_back(~2U);
3213 return V;
3214 }
3215
getHashValuellvm::slpvectorizer::BoUpSLP::OrdersTypeDenseMapInfo3216 static unsigned getHashValue(const OrdersType &V) {
3217 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
3218 }
3219
isEqualllvm::slpvectorizer::BoUpSLP::OrdersTypeDenseMapInfo3220 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) {
3221 return LHS == RHS;
3222 }
3223 };
3224
3225 // Analysis and block reference.
3226 Function *F;
3227 ScalarEvolution *SE;
3228 TargetTransformInfo *TTI;
3229 TargetLibraryInfo *TLI;
3230 LoopInfo *LI;
3231 DominatorTree *DT;
3232 AssumptionCache *AC;
3233 DemandedBits *DB;
3234 const DataLayout *DL;
3235 OptimizationRemarkEmitter *ORE;
3236
3237 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt.
3238 unsigned MinVecRegSize; // Set by cl::opt (default: 128).
3239
3240 /// Instruction builder to construct the vectorized tree.
3241 IRBuilder<> Builder;
3242
3243 /// A map of scalar integer values to the smallest bit width with which they
3244 /// can legally be represented. The values map to (width, signed) pairs,
3245 /// where "width" indicates the minimum bit width and "signed" is True if the
3246 /// value must be signed-extended, rather than zero-extended, back to its
3247 /// original width.
3248 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs;
3249 };
3250
3251 } // end namespace slpvectorizer
3252
3253 template <> struct GraphTraits<BoUpSLP *> {
3254 using TreeEntry = BoUpSLP::TreeEntry;
3255
3256 /// NodeRef has to be a pointer per the GraphWriter.
3257 using NodeRef = TreeEntry *;
3258
3259 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy;
3260
3261 /// Add the VectorizableTree to the index iterator to be able to return
3262 /// TreeEntry pointers.
3263 struct ChildIteratorType
3264 : public iterator_adaptor_base<
3265 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> {
3266 ContainerTy &VectorizableTree;
3267
ChildIteratorTypellvm::GraphTraits::ChildIteratorType3268 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W,
3269 ContainerTy &VT)
3270 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {}
3271
operator *llvm::GraphTraits::ChildIteratorType3272 NodeRef operator*() { return I->UserTE; }
3273 };
3274
getEntryNodellvm::GraphTraits3275 static NodeRef getEntryNode(BoUpSLP &R) {
3276 return R.VectorizableTree[0].get();
3277 }
3278
child_beginllvm::GraphTraits3279 static ChildIteratorType child_begin(NodeRef N) {
3280 return {N->UserTreeIndices.begin(), N->Container};
3281 }
3282
child_endllvm::GraphTraits3283 static ChildIteratorType child_end(NodeRef N) {
3284 return {N->UserTreeIndices.end(), N->Container};
3285 }
3286
3287 /// For the node iterator we just need to turn the TreeEntry iterator into a
3288 /// TreeEntry* iterator so that it dereferences to NodeRef.
3289 class nodes_iterator {
3290 using ItTy = ContainerTy::iterator;
3291 ItTy It;
3292
3293 public:
nodes_iterator(const ItTy & It2)3294 nodes_iterator(const ItTy &It2) : It(It2) {}
operator *()3295 NodeRef operator*() { return It->get(); }
operator ++()3296 nodes_iterator operator++() {
3297 ++It;
3298 return *this;
3299 }
operator !=(const nodes_iterator & N2) const3300 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; }
3301 };
3302
nodes_beginllvm::GraphTraits3303 static nodes_iterator nodes_begin(BoUpSLP *R) {
3304 return nodes_iterator(R->VectorizableTree.begin());
3305 }
3306
nodes_endllvm::GraphTraits3307 static nodes_iterator nodes_end(BoUpSLP *R) {
3308 return nodes_iterator(R->VectorizableTree.end());
3309 }
3310
sizellvm::GraphTraits3311 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); }
3312 };
3313
3314 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits {
3315 using TreeEntry = BoUpSLP::TreeEntry;
3316
DOTGraphTraitsllvm::DOTGraphTraits3317 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
3318
getNodeLabelllvm::DOTGraphTraits3319 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) {
3320 std::string Str;
3321 raw_string_ostream OS(Str);
3322 if (isSplat(Entry->Scalars))
3323 OS << "<splat> ";
3324 for (auto V : Entry->Scalars) {
3325 OS << *V;
3326 if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) {
3327 return EU.Scalar == V;
3328 }))
3329 OS << " <extract>";
3330 OS << "\n";
3331 }
3332 return Str;
3333 }
3334
getNodeAttributesllvm::DOTGraphTraits3335 static std::string getNodeAttributes(const TreeEntry *Entry,
3336 const BoUpSLP *) {
3337 if (Entry->State == TreeEntry::NeedToGather)
3338 return "color=red";
3339 return "";
3340 }
3341 };
3342
3343 } // end namespace llvm
3344
~BoUpSLP()3345 BoUpSLP::~BoUpSLP() {
3346 SmallVector<WeakTrackingVH> DeadInsts;
3347 for (auto *I : DeletedInstructions) {
3348 for (Use &U : I->operands()) {
3349 auto *Op = dyn_cast<Instruction>(U.get());
3350 if (Op && !DeletedInstructions.count(Op) && Op->hasOneUser() &&
3351 wouldInstructionBeTriviallyDead(Op, TLI))
3352 DeadInsts.emplace_back(Op);
3353 }
3354 I->dropAllReferences();
3355 }
3356 for (auto *I : DeletedInstructions) {
3357 assert(I->use_empty() &&
3358 "trying to erase instruction with users.");
3359 I->eraseFromParent();
3360 }
3361
3362 // Cleanup any dead scalar code feeding the vectorized instructions
3363 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI);
3364
3365 #ifdef EXPENSIVE_CHECKS
3366 // If we could guarantee that this call is not extremely slow, we could
3367 // remove the ifdef limitation (see PR47712).
3368 assert(!verifyFunction(*F, &dbgs()));
3369 #endif
3370 }
3371
3372 /// Reorders the given \p Reuses mask according to the given \p Mask. \p Reuses
3373 /// contains original mask for the scalars reused in the node. Procedure
3374 /// transform this mask in accordance with the given \p Mask.
reorderReuses(SmallVectorImpl<int> & Reuses,ArrayRef<int> Mask)3375 static void reorderReuses(SmallVectorImpl<int> &Reuses, ArrayRef<int> Mask) {
3376 assert(!Mask.empty() && Reuses.size() == Mask.size() &&
3377 "Expected non-empty mask.");
3378 SmallVector<int> Prev(Reuses.begin(), Reuses.end());
3379 Prev.swap(Reuses);
3380 for (unsigned I = 0, E = Prev.size(); I < E; ++I)
3381 if (Mask[I] != UndefMaskElem)
3382 Reuses[Mask[I]] = Prev[I];
3383 }
3384
3385 /// Reorders the given \p Order according to the given \p Mask. \p Order - is
3386 /// the original order of the scalars. Procedure transforms the provided order
3387 /// in accordance with the given \p Mask. If the resulting \p Order is just an
3388 /// identity order, \p Order is cleared.
reorderOrder(SmallVectorImpl<unsigned> & Order,ArrayRef<int> Mask)3389 static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask) {
3390 assert(!Mask.empty() && "Expected non-empty mask.");
3391 SmallVector<int> MaskOrder;
3392 if (Order.empty()) {
3393 MaskOrder.resize(Mask.size());
3394 std::iota(MaskOrder.begin(), MaskOrder.end(), 0);
3395 } else {
3396 inversePermutation(Order, MaskOrder);
3397 }
3398 reorderReuses(MaskOrder, Mask);
3399 if (ShuffleVectorInst::isIdentityMask(MaskOrder)) {
3400 Order.clear();
3401 return;
3402 }
3403 Order.assign(Mask.size(), Mask.size());
3404 for (unsigned I = 0, E = Mask.size(); I < E; ++I)
3405 if (MaskOrder[I] != UndefMaskElem)
3406 Order[MaskOrder[I]] = I;
3407 fixupOrderingIndices(Order);
3408 }
3409
3410 Optional<BoUpSLP::OrdersType>
findReusedOrderedScalars(const BoUpSLP::TreeEntry & TE)3411 BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) {
3412 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only.");
3413 unsigned NumScalars = TE.Scalars.size();
3414 OrdersType CurrentOrder(NumScalars, NumScalars);
3415 SmallVector<int> Positions;
3416 SmallBitVector UsedPositions(NumScalars);
3417 const TreeEntry *STE = nullptr;
3418 // Try to find all gathered scalars that are gets vectorized in other
3419 // vectorize node. Here we can have only one single tree vector node to
3420 // correctly identify order of the gathered scalars.
3421 for (unsigned I = 0; I < NumScalars; ++I) {
3422 Value *V = TE.Scalars[I];
3423 if (!isa<LoadInst, ExtractElementInst, ExtractValueInst>(V))
3424 continue;
3425 if (const auto *LocalSTE = getTreeEntry(V)) {
3426 if (!STE)
3427 STE = LocalSTE;
3428 else if (STE != LocalSTE)
3429 // Take the order only from the single vector node.
3430 return None;
3431 unsigned Lane =
3432 std::distance(STE->Scalars.begin(), find(STE->Scalars, V));
3433 if (Lane >= NumScalars)
3434 return None;
3435 if (CurrentOrder[Lane] != NumScalars) {
3436 if (Lane != I)
3437 continue;
3438 UsedPositions.reset(CurrentOrder[Lane]);
3439 }
3440 // The partial identity (where only some elements of the gather node are
3441 // in the identity order) is good.
3442 CurrentOrder[Lane] = I;
3443 UsedPositions.set(I);
3444 }
3445 }
3446 // Need to keep the order if we have a vector entry and at least 2 scalars or
3447 // the vectorized entry has just 2 scalars.
3448 if (STE && (UsedPositions.count() > 1 || STE->Scalars.size() == 2)) {
3449 auto &&IsIdentityOrder = [NumScalars](ArrayRef<unsigned> CurrentOrder) {
3450 for (unsigned I = 0; I < NumScalars; ++I)
3451 if (CurrentOrder[I] != I && CurrentOrder[I] != NumScalars)
3452 return false;
3453 return true;
3454 };
3455 if (IsIdentityOrder(CurrentOrder)) {
3456 CurrentOrder.clear();
3457 return CurrentOrder;
3458 }
3459 auto *It = CurrentOrder.begin();
3460 for (unsigned I = 0; I < NumScalars;) {
3461 if (UsedPositions.test(I)) {
3462 ++I;
3463 continue;
3464 }
3465 if (*It == NumScalars) {
3466 *It = I;
3467 ++I;
3468 }
3469 ++It;
3470 }
3471 return CurrentOrder;
3472 }
3473 return None;
3474 }
3475
3476 namespace {
3477 /// Tracks the state we can represent the loads in the given sequence.
3478 enum class LoadsState { Gather, Vectorize, ScatterVectorize };
3479 } // anonymous namespace
3480
3481 /// Checks if the given array of loads can be represented as a vectorized,
3482 /// scatter or just simple gather.
canVectorizeLoads(ArrayRef<Value * > VL,const Value * VL0,const TargetTransformInfo & TTI,const DataLayout & DL,ScalarEvolution & SE,LoopInfo & LI,SmallVectorImpl<unsigned> & Order,SmallVectorImpl<Value * > & PointerOps)3483 static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0,
3484 const TargetTransformInfo &TTI,
3485 const DataLayout &DL, ScalarEvolution &SE,
3486 LoopInfo &LI,
3487 SmallVectorImpl<unsigned> &Order,
3488 SmallVectorImpl<Value *> &PointerOps) {
3489 // Check that a vectorized load would load the same memory as a scalar
3490 // load. For example, we don't want to vectorize loads that are smaller
3491 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
3492 // treats loading/storing it as an i8 struct. If we vectorize loads/stores
3493 // from such a struct, we read/write packed bits disagreeing with the
3494 // unvectorized version.
3495 Type *ScalarTy = VL0->getType();
3496
3497 if (DL.getTypeSizeInBits(ScalarTy) != DL.getTypeAllocSizeInBits(ScalarTy))
3498 return LoadsState::Gather;
3499
3500 // Make sure all loads in the bundle are simple - we can't vectorize
3501 // atomic or volatile loads.
3502 PointerOps.clear();
3503 PointerOps.resize(VL.size());
3504 auto *POIter = PointerOps.begin();
3505 for (Value *V : VL) {
3506 auto *L = cast<LoadInst>(V);
3507 if (!L->isSimple())
3508 return LoadsState::Gather;
3509 *POIter = L->getPointerOperand();
3510 ++POIter;
3511 }
3512
3513 Order.clear();
3514 // Check the order of pointer operands or that all pointers are the same.
3515 bool IsSorted = sortPtrAccesses(PointerOps, ScalarTy, DL, SE, Order);
3516 if (IsSorted || all_of(PointerOps, [&PointerOps](Value *P) {
3517 if (getUnderlyingObject(P) != getUnderlyingObject(PointerOps.front()))
3518 return false;
3519 auto *GEP = dyn_cast<GetElementPtrInst>(P);
3520 if (!GEP)
3521 return false;
3522 auto *GEP0 = cast<GetElementPtrInst>(PointerOps.front());
3523 return GEP->getNumOperands() == 2 &&
3524 ((isConstant(GEP->getOperand(1)) &&
3525 isConstant(GEP0->getOperand(1))) ||
3526 getSameOpcode({GEP->getOperand(1), GEP0->getOperand(1)})
3527 .getOpcode());
3528 })) {
3529 if (IsSorted) {
3530 Value *Ptr0;
3531 Value *PtrN;
3532 if (Order.empty()) {
3533 Ptr0 = PointerOps.front();
3534 PtrN = PointerOps.back();
3535 } else {
3536 Ptr0 = PointerOps[Order.front()];
3537 PtrN = PointerOps[Order.back()];
3538 }
3539 Optional<int> Diff =
3540 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE);
3541 // Check that the sorted loads are consecutive.
3542 if (static_cast<unsigned>(*Diff) == VL.size() - 1)
3543 return LoadsState::Vectorize;
3544 }
3545 // TODO: need to improve analysis of the pointers, if not all of them are
3546 // GEPs or have > 2 operands, we end up with a gather node, which just
3547 // increases the cost.
3548 Loop *L = LI.getLoopFor(cast<LoadInst>(VL0)->getParent());
3549 bool ProfitableGatherPointers =
3550 static_cast<unsigned>(count_if(PointerOps, [L](Value *V) {
3551 return L && L->isLoopInvariant(V);
3552 })) <= VL.size() / 2 && VL.size() > 2;
3553 if (ProfitableGatherPointers || all_of(PointerOps, [IsSorted](Value *P) {
3554 auto *GEP = dyn_cast<GetElementPtrInst>(P);
3555 return (IsSorted && !GEP && doesNotNeedToBeScheduled(P)) ||
3556 (GEP && GEP->getNumOperands() == 2);
3557 })) {
3558 Align CommonAlignment = cast<LoadInst>(VL0)->getAlign();
3559 for (Value *V : VL)
3560 CommonAlignment =
3561 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign());
3562 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
3563 if (TTI.isLegalMaskedGather(VecTy, CommonAlignment) &&
3564 !TTI.forceScalarizeMaskedGather(VecTy, CommonAlignment))
3565 return LoadsState::ScatterVectorize;
3566 }
3567 }
3568
3569 return LoadsState::Gather;
3570 }
3571
clusterSortPtrAccesses(ArrayRef<Value * > VL,Type * ElemTy,const DataLayout & DL,ScalarEvolution & SE,SmallVectorImpl<unsigned> & SortedIndices)3572 bool clusterSortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy,
3573 const DataLayout &DL, ScalarEvolution &SE,
3574 SmallVectorImpl<unsigned> &SortedIndices) {
3575 assert(llvm::all_of(
3576 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
3577 "Expected list of pointer operands.");
3578 // Map from bases to a vector of (Ptr, Offset, OrigIdx), which we insert each
3579 // Ptr into, sort and return the sorted indices with values next to one
3580 // another.
3581 MapVector<Value *, SmallVector<std::tuple<Value *, int, unsigned>>> Bases;
3582 Bases[VL[0]].push_back(std::make_tuple(VL[0], 0U, 0U));
3583
3584 unsigned Cnt = 1;
3585 for (Value *Ptr : VL.drop_front()) {
3586 bool Found = any_of(Bases, [&](auto &Base) {
3587 Optional<int> Diff =
3588 getPointersDiff(ElemTy, Base.first, ElemTy, Ptr, DL, SE,
3589 /*StrictCheck=*/true);
3590 if (!Diff)
3591 return false;
3592
3593 Base.second.emplace_back(Ptr, *Diff, Cnt++);
3594 return true;
3595 });
3596
3597 if (!Found) {
3598 // If we haven't found enough to usefully cluster, return early.
3599 if (Bases.size() > VL.size() / 2 - 1)
3600 return false;
3601
3602 // Not found already - add a new Base
3603 Bases[Ptr].emplace_back(Ptr, 0, Cnt++);
3604 }
3605 }
3606
3607 // For each of the bases sort the pointers by Offset and check if any of the
3608 // base become consecutively allocated.
3609 bool AnyConsecutive = false;
3610 for (auto &Base : Bases) {
3611 auto &Vec = Base.second;
3612 if (Vec.size() > 1) {
3613 llvm::stable_sort(Vec, [](const std::tuple<Value *, int, unsigned> &X,
3614 const std::tuple<Value *, int, unsigned> &Y) {
3615 return std::get<1>(X) < std::get<1>(Y);
3616 });
3617 int InitialOffset = std::get<1>(Vec[0]);
3618 AnyConsecutive |= all_of(enumerate(Vec), [InitialOffset](auto &P) {
3619 return std::get<1>(P.value()) == int(P.index()) + InitialOffset;
3620 });
3621 }
3622 }
3623
3624 // Fill SortedIndices array only if it looks worth-while to sort the ptrs.
3625 SortedIndices.clear();
3626 if (!AnyConsecutive)
3627 return false;
3628
3629 for (auto &Base : Bases) {
3630 for (auto &T : Base.second)
3631 SortedIndices.push_back(std::get<2>(T));
3632 }
3633
3634 assert(SortedIndices.size() == VL.size() &&
3635 "Expected SortedIndices to be the size of VL");
3636 return true;
3637 }
3638
3639 Optional<BoUpSLP::OrdersType>
findPartiallyOrderedLoads(const BoUpSLP::TreeEntry & TE)3640 BoUpSLP::findPartiallyOrderedLoads(const BoUpSLP::TreeEntry &TE) {
3641 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only.");
3642 Type *ScalarTy = TE.Scalars[0]->getType();
3643
3644 SmallVector<Value *> Ptrs;
3645 Ptrs.reserve(TE.Scalars.size());
3646 for (Value *V : TE.Scalars) {
3647 auto *L = dyn_cast<LoadInst>(V);
3648 if (!L || !L->isSimple())
3649 return None;
3650 Ptrs.push_back(L->getPointerOperand());
3651 }
3652
3653 BoUpSLP::OrdersType Order;
3654 if (clusterSortPtrAccesses(Ptrs, ScalarTy, *DL, *SE, Order))
3655 return Order;
3656 return None;
3657 }
3658
getReorderingData(const TreeEntry & TE,bool TopToBottom)3659 Optional<BoUpSLP::OrdersType> BoUpSLP::getReorderingData(const TreeEntry &TE,
3660 bool TopToBottom) {
3661 // No need to reorder if need to shuffle reuses, still need to shuffle the
3662 // node.
3663 if (!TE.ReuseShuffleIndices.empty())
3664 return None;
3665 if (TE.State == TreeEntry::Vectorize &&
3666 (isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE.getMainOp()) ||
3667 (TopToBottom && isa<StoreInst, InsertElementInst>(TE.getMainOp()))) &&
3668 !TE.isAltShuffle())
3669 return TE.ReorderIndices;
3670 if (TE.State == TreeEntry::NeedToGather) {
3671 // TODO: add analysis of other gather nodes with extractelement
3672 // instructions and other values/instructions, not only undefs.
3673 if (((TE.getOpcode() == Instruction::ExtractElement &&
3674 !TE.isAltShuffle()) ||
3675 (all_of(TE.Scalars,
3676 [](Value *V) {
3677 return isa<UndefValue, ExtractElementInst>(V);
3678 }) &&
3679 any_of(TE.Scalars,
3680 [](Value *V) { return isa<ExtractElementInst>(V); }))) &&
3681 all_of(TE.Scalars,
3682 [](Value *V) {
3683 auto *EE = dyn_cast<ExtractElementInst>(V);
3684 return !EE || isa<FixedVectorType>(EE->getVectorOperandType());
3685 }) &&
3686 allSameType(TE.Scalars)) {
3687 // Check that gather of extractelements can be represented as
3688 // just a shuffle of a single vector.
3689 OrdersType CurrentOrder;
3690 bool Reuse = canReuseExtract(TE.Scalars, TE.getMainOp(), CurrentOrder);
3691 if (Reuse || !CurrentOrder.empty()) {
3692 if (!CurrentOrder.empty())
3693 fixupOrderingIndices(CurrentOrder);
3694 return CurrentOrder;
3695 }
3696 }
3697 if (Optional<OrdersType> CurrentOrder = findReusedOrderedScalars(TE))
3698 return CurrentOrder;
3699 if (TE.Scalars.size() >= 4)
3700 if (Optional<OrdersType> Order = findPartiallyOrderedLoads(TE))
3701 return Order;
3702 }
3703 return None;
3704 }
3705
reorderTopToBottom()3706 void BoUpSLP::reorderTopToBottom() {
3707 // Maps VF to the graph nodes.
3708 DenseMap<unsigned, SetVector<TreeEntry *>> VFToOrderedEntries;
3709 // ExtractElement gather nodes which can be vectorized and need to handle
3710 // their ordering.
3711 DenseMap<const TreeEntry *, OrdersType> GathersToOrders;
3712
3713 // AltShuffles can also have a preferred ordering that leads to fewer
3714 // instructions, e.g., the addsub instruction in x86.
3715 DenseMap<const TreeEntry *, OrdersType> AltShufflesToOrders;
3716
3717 // Maps a TreeEntry to the reorder indices of external users.
3718 DenseMap<const TreeEntry *, SmallVector<OrdersType, 1>>
3719 ExternalUserReorderMap;
3720 // FIXME: Workaround for syntax error reported by MSVC buildbots.
3721 TargetTransformInfo &TTIRef = *TTI;
3722 // Find all reorderable nodes with the given VF.
3723 // Currently the are vectorized stores,loads,extracts + some gathering of
3724 // extracts.
3725 for_each(VectorizableTree, [this, &TTIRef, &VFToOrderedEntries,
3726 &GathersToOrders, &ExternalUserReorderMap,
3727 &AltShufflesToOrders](
3728 const std::unique_ptr<TreeEntry> &TE) {
3729 // Look for external users that will probably be vectorized.
3730 SmallVector<OrdersType, 1> ExternalUserReorderIndices =
3731 findExternalStoreUsersReorderIndices(TE.get());
3732 if (!ExternalUserReorderIndices.empty()) {
3733 VFToOrderedEntries[TE->Scalars.size()].insert(TE.get());
3734 ExternalUserReorderMap.try_emplace(TE.get(),
3735 std::move(ExternalUserReorderIndices));
3736 }
3737
3738 // Patterns like [fadd,fsub] can be combined into a single instruction in
3739 // x86. Reordering them into [fsub,fadd] blocks this pattern. So we need
3740 // to take into account their order when looking for the most used order.
3741 if (TE->isAltShuffle()) {
3742 VectorType *VecTy =
3743 FixedVectorType::get(TE->Scalars[0]->getType(), TE->Scalars.size());
3744 unsigned Opcode0 = TE->getOpcode();
3745 unsigned Opcode1 = TE->getAltOpcode();
3746 // The opcode mask selects between the two opcodes.
3747 SmallBitVector OpcodeMask(TE->Scalars.size(), false);
3748 for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size()))
3749 if (cast<Instruction>(TE->Scalars[Lane])->getOpcode() == Opcode1)
3750 OpcodeMask.set(Lane);
3751 // If this pattern is supported by the target then we consider the order.
3752 if (TTIRef.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask)) {
3753 VFToOrderedEntries[TE->Scalars.size()].insert(TE.get());
3754 AltShufflesToOrders.try_emplace(TE.get(), OrdersType());
3755 }
3756 // TODO: Check the reverse order too.
3757 }
3758
3759 if (Optional<OrdersType> CurrentOrder =
3760 getReorderingData(*TE, /*TopToBottom=*/true)) {
3761 // Do not include ordering for nodes used in the alt opcode vectorization,
3762 // better to reorder them during bottom-to-top stage. If follow the order
3763 // here, it causes reordering of the whole graph though actually it is
3764 // profitable just to reorder the subgraph that starts from the alternate
3765 // opcode vectorization node. Such nodes already end-up with the shuffle
3766 // instruction and it is just enough to change this shuffle rather than
3767 // rotate the scalars for the whole graph.
3768 unsigned Cnt = 0;
3769 const TreeEntry *UserTE = TE.get();
3770 while (UserTE && Cnt < RecursionMaxDepth) {
3771 if (UserTE->UserTreeIndices.size() != 1)
3772 break;
3773 if (all_of(UserTE->UserTreeIndices, [](const EdgeInfo &EI) {
3774 return EI.UserTE->State == TreeEntry::Vectorize &&
3775 EI.UserTE->isAltShuffle() && EI.UserTE->Idx != 0;
3776 }))
3777 return;
3778 UserTE = UserTE->UserTreeIndices.back().UserTE;
3779 ++Cnt;
3780 }
3781 VFToOrderedEntries[TE->Scalars.size()].insert(TE.get());
3782 if (TE->State != TreeEntry::Vectorize)
3783 GathersToOrders.try_emplace(TE.get(), *CurrentOrder);
3784 }
3785 });
3786
3787 // Reorder the graph nodes according to their vectorization factor.
3788 for (unsigned VF = VectorizableTree.front()->Scalars.size(); VF > 1;
3789 VF /= 2) {
3790 auto It = VFToOrderedEntries.find(VF);
3791 if (It == VFToOrderedEntries.end())
3792 continue;
3793 // Try to find the most profitable order. We just are looking for the most
3794 // used order and reorder scalar elements in the nodes according to this
3795 // mostly used order.
3796 ArrayRef<TreeEntry *> OrderedEntries = It->second.getArrayRef();
3797 // All operands are reordered and used only in this node - propagate the
3798 // most used order to the user node.
3799 MapVector<OrdersType, unsigned,
3800 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>>
3801 OrdersUses;
3802 SmallPtrSet<const TreeEntry *, 4> VisitedOps;
3803 for (const TreeEntry *OpTE : OrderedEntries) {
3804 // No need to reorder this nodes, still need to extend and to use shuffle,
3805 // just need to merge reordering shuffle and the reuse shuffle.
3806 if (!OpTE->ReuseShuffleIndices.empty())
3807 continue;
3808 // Count number of orders uses.
3809 const auto &Order = [OpTE, &GathersToOrders,
3810 &AltShufflesToOrders]() -> const OrdersType & {
3811 if (OpTE->State == TreeEntry::NeedToGather) {
3812 auto It = GathersToOrders.find(OpTE);
3813 if (It != GathersToOrders.end())
3814 return It->second;
3815 }
3816 if (OpTE->isAltShuffle()) {
3817 auto It = AltShufflesToOrders.find(OpTE);
3818 if (It != AltShufflesToOrders.end())
3819 return It->second;
3820 }
3821 return OpTE->ReorderIndices;
3822 }();
3823 // First consider the order of the external scalar users.
3824 auto It = ExternalUserReorderMap.find(OpTE);
3825 if (It != ExternalUserReorderMap.end()) {
3826 const auto &ExternalUserReorderIndices = It->second;
3827 for (const OrdersType &ExtOrder : ExternalUserReorderIndices)
3828 ++OrdersUses.insert(std::make_pair(ExtOrder, 0)).first->second;
3829 // No other useful reorder data in this entry.
3830 if (Order.empty())
3831 continue;
3832 }
3833 // Stores actually store the mask, not the order, need to invert.
3834 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() &&
3835 OpTE->getOpcode() == Instruction::Store && !Order.empty()) {
3836 SmallVector<int> Mask;
3837 inversePermutation(Order, Mask);
3838 unsigned E = Order.size();
3839 OrdersType CurrentOrder(E, E);
3840 transform(Mask, CurrentOrder.begin(), [E](int Idx) {
3841 return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx);
3842 });
3843 fixupOrderingIndices(CurrentOrder);
3844 ++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second;
3845 } else {
3846 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second;
3847 }
3848 }
3849 // Set order of the user node.
3850 if (OrdersUses.empty())
3851 continue;
3852 // Choose the most used order.
3853 ArrayRef<unsigned> BestOrder = OrdersUses.front().first;
3854 unsigned Cnt = OrdersUses.front().second;
3855 for (const auto &Pair : drop_begin(OrdersUses)) {
3856 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) {
3857 BestOrder = Pair.first;
3858 Cnt = Pair.second;
3859 }
3860 }
3861 // Set order of the user node.
3862 if (BestOrder.empty())
3863 continue;
3864 SmallVector<int> Mask;
3865 inversePermutation(BestOrder, Mask);
3866 SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem);
3867 unsigned E = BestOrder.size();
3868 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) {
3869 return I < E ? static_cast<int>(I) : UndefMaskElem;
3870 });
3871 // Do an actual reordering, if profitable.
3872 for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
3873 // Just do the reordering for the nodes with the given VF.
3874 if (TE->Scalars.size() != VF) {
3875 if (TE->ReuseShuffleIndices.size() == VF) {
3876 // Need to reorder the reuses masks of the operands with smaller VF to
3877 // be able to find the match between the graph nodes and scalar
3878 // operands of the given node during vectorization/cost estimation.
3879 assert(all_of(TE->UserTreeIndices,
3880 [VF, &TE](const EdgeInfo &EI) {
3881 return EI.UserTE->Scalars.size() == VF ||
3882 EI.UserTE->Scalars.size() ==
3883 TE->Scalars.size();
3884 }) &&
3885 "All users must be of VF size.");
3886 // Update ordering of the operands with the smaller VF than the given
3887 // one.
3888 reorderReuses(TE->ReuseShuffleIndices, Mask);
3889 }
3890 continue;
3891 }
3892 if (TE->State == TreeEntry::Vectorize &&
3893 isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst,
3894 InsertElementInst>(TE->getMainOp()) &&
3895 !TE->isAltShuffle()) {
3896 // Build correct orders for extract{element,value}, loads and
3897 // stores.
3898 reorderOrder(TE->ReorderIndices, Mask);
3899 if (isa<InsertElementInst, StoreInst>(TE->getMainOp()))
3900 TE->reorderOperands(Mask);
3901 } else {
3902 // Reorder the node and its operands.
3903 TE->reorderOperands(Mask);
3904 assert(TE->ReorderIndices.empty() &&
3905 "Expected empty reorder sequence.");
3906 reorderScalars(TE->Scalars, Mask);
3907 }
3908 if (!TE->ReuseShuffleIndices.empty()) {
3909 // Apply reversed order to keep the original ordering of the reused
3910 // elements to avoid extra reorder indices shuffling.
3911 OrdersType CurrentOrder;
3912 reorderOrder(CurrentOrder, MaskOrder);
3913 SmallVector<int> NewReuses;
3914 inversePermutation(CurrentOrder, NewReuses);
3915 addMask(NewReuses, TE->ReuseShuffleIndices);
3916 TE->ReuseShuffleIndices.swap(NewReuses);
3917 }
3918 }
3919 }
3920 }
3921
canReorderOperands(TreeEntry * UserTE,SmallVectorImpl<std::pair<unsigned,TreeEntry * >> & Edges,ArrayRef<TreeEntry * > ReorderableGathers,SmallVectorImpl<TreeEntry * > & GatherOps)3922 bool BoUpSLP::canReorderOperands(
3923 TreeEntry *UserTE, SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges,
3924 ArrayRef<TreeEntry *> ReorderableGathers,
3925 SmallVectorImpl<TreeEntry *> &GatherOps) {
3926 for (unsigned I = 0, E = UserTE->getNumOperands(); I < E; ++I) {
3927 if (any_of(Edges, [I](const std::pair<unsigned, TreeEntry *> &OpData) {
3928 return OpData.first == I &&
3929 OpData.second->State == TreeEntry::Vectorize;
3930 }))
3931 continue;
3932 if (TreeEntry *TE = getVectorizedOperand(UserTE, I)) {
3933 // Do not reorder if operand node is used by many user nodes.
3934 if (any_of(TE->UserTreeIndices,
3935 [UserTE](const EdgeInfo &EI) { return EI.UserTE != UserTE; }))
3936 return false;
3937 // Add the node to the list of the ordered nodes with the identity
3938 // order.
3939 Edges.emplace_back(I, TE);
3940 // Add ScatterVectorize nodes to the list of operands, where just
3941 // reordering of the scalars is required. Similar to the gathers, so
3942 // simply add to the list of gathered ops.
3943 // If there are reused scalars, process this node as a regular vectorize
3944 // node, just reorder reuses mask.
3945 if (TE->State != TreeEntry::Vectorize && TE->ReuseShuffleIndices.empty())
3946 GatherOps.push_back(TE);
3947 continue;
3948 }
3949 TreeEntry *Gather = nullptr;
3950 if (count_if(ReorderableGathers,
3951 [&Gather, UserTE, I](TreeEntry *TE) {
3952 assert(TE->State != TreeEntry::Vectorize &&
3953 "Only non-vectorized nodes are expected.");
3954 if (any_of(TE->UserTreeIndices,
3955 [UserTE, I](const EdgeInfo &EI) {
3956 return EI.UserTE == UserTE && EI.EdgeIdx == I;
3957 })) {
3958 assert(TE->isSame(UserTE->getOperand(I)) &&
3959 "Operand entry does not match operands.");
3960 Gather = TE;
3961 return true;
3962 }
3963 return false;
3964 }) > 1 &&
3965 !all_of(UserTE->getOperand(I), isConstant))
3966 return false;
3967 if (Gather)
3968 GatherOps.push_back(Gather);
3969 }
3970 return true;
3971 }
3972
reorderBottomToTop(bool IgnoreReorder)3973 void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) {
3974 SetVector<TreeEntry *> OrderedEntries;
3975 DenseMap<const TreeEntry *, OrdersType> GathersToOrders;
3976 // Find all reorderable leaf nodes with the given VF.
3977 // Currently the are vectorized loads,extracts without alternate operands +
3978 // some gathering of extracts.
3979 SmallVector<TreeEntry *> NonVectorized;
3980 for_each(VectorizableTree, [this, &OrderedEntries, &GathersToOrders,
3981 &NonVectorized](
3982 const std::unique_ptr<TreeEntry> &TE) {
3983 if (TE->State != TreeEntry::Vectorize)
3984 NonVectorized.push_back(TE.get());
3985 if (Optional<OrdersType> CurrentOrder =
3986 getReorderingData(*TE, /*TopToBottom=*/false)) {
3987 OrderedEntries.insert(TE.get());
3988 if (TE->State != TreeEntry::Vectorize)
3989 GathersToOrders.try_emplace(TE.get(), *CurrentOrder);
3990 }
3991 });
3992
3993 // 1. Propagate order to the graph nodes, which use only reordered nodes.
3994 // I.e., if the node has operands, that are reordered, try to make at least
3995 // one operand order in the natural order and reorder others + reorder the
3996 // user node itself.
3997 SmallPtrSet<const TreeEntry *, 4> Visited;
3998 while (!OrderedEntries.empty()) {
3999 // 1. Filter out only reordered nodes.
4000 // 2. If the entry has multiple uses - skip it and jump to the next node.
4001 DenseMap<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>> Users;
4002 SmallVector<TreeEntry *> Filtered;
4003 for (TreeEntry *TE : OrderedEntries) {
4004 if (!(TE->State == TreeEntry::Vectorize ||
4005 (TE->State == TreeEntry::NeedToGather &&
4006 GathersToOrders.count(TE))) ||
4007 TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() ||
4008 !all_of(drop_begin(TE->UserTreeIndices),
4009 [TE](const EdgeInfo &EI) {
4010 return EI.UserTE == TE->UserTreeIndices.front().UserTE;
4011 }) ||
4012 !Visited.insert(TE).second) {
4013 Filtered.push_back(TE);
4014 continue;
4015 }
4016 // Build a map between user nodes and their operands order to speedup
4017 // search. The graph currently does not provide this dependency directly.
4018 for (EdgeInfo &EI : TE->UserTreeIndices) {
4019 TreeEntry *UserTE = EI.UserTE;
4020 auto It = Users.find(UserTE);
4021 if (It == Users.end())
4022 It = Users.insert({UserTE, {}}).first;
4023 It->second.emplace_back(EI.EdgeIdx, TE);
4024 }
4025 }
4026 // Erase filtered entries.
4027 for_each(Filtered,
4028 [&OrderedEntries](TreeEntry *TE) { OrderedEntries.remove(TE); });
4029 SmallVector<
4030 std::pair<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>>>
4031 UsersVec(Users.begin(), Users.end());
4032 sort(UsersVec, [](const auto &Data1, const auto &Data2) {
4033 return Data1.first->Idx > Data2.first->Idx;
4034 });
4035 for (auto &Data : UsersVec) {
4036 // Check that operands are used only in the User node.
4037 SmallVector<TreeEntry *> GatherOps;
4038 if (!canReorderOperands(Data.first, Data.second, NonVectorized,
4039 GatherOps)) {
4040 for_each(Data.second,
4041 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) {
4042 OrderedEntries.remove(Op.second);
4043 });
4044 continue;
4045 }
4046 // All operands are reordered and used only in this node - propagate the
4047 // most used order to the user node.
4048 MapVector<OrdersType, unsigned,
4049 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>>
4050 OrdersUses;
4051 // Do the analysis for each tree entry only once, otherwise the order of
4052 // the same node my be considered several times, though might be not
4053 // profitable.
4054 SmallPtrSet<const TreeEntry *, 4> VisitedOps;
4055 SmallPtrSet<const TreeEntry *, 4> VisitedUsers;
4056 for (const auto &Op : Data.second) {
4057 TreeEntry *OpTE = Op.second;
4058 if (!VisitedOps.insert(OpTE).second)
4059 continue;
4060 if (!OpTE->ReuseShuffleIndices.empty())
4061 continue;
4062 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & {
4063 if (OpTE->State == TreeEntry::NeedToGather)
4064 return GathersToOrders.find(OpTE)->second;
4065 return OpTE->ReorderIndices;
4066 }();
4067 unsigned NumOps = count_if(
4068 Data.second, [OpTE](const std::pair<unsigned, TreeEntry *> &P) {
4069 return P.second == OpTE;
4070 });
4071 // Stores actually store the mask, not the order, need to invert.
4072 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() &&
4073 OpTE->getOpcode() == Instruction::Store && !Order.empty()) {
4074 SmallVector<int> Mask;
4075 inversePermutation(Order, Mask);
4076 unsigned E = Order.size();
4077 OrdersType CurrentOrder(E, E);
4078 transform(Mask, CurrentOrder.begin(), [E](int Idx) {
4079 return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx);
4080 });
4081 fixupOrderingIndices(CurrentOrder);
4082 OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second +=
4083 NumOps;
4084 } else {
4085 OrdersUses.insert(std::make_pair(Order, 0)).first->second += NumOps;
4086 }
4087 auto Res = OrdersUses.insert(std::make_pair(OrdersType(), 0));
4088 const auto &&AllowsReordering = [IgnoreReorder, &GathersToOrders](
4089 const TreeEntry *TE) {
4090 if (!TE->ReorderIndices.empty() || !TE->ReuseShuffleIndices.empty() ||
4091 (TE->State == TreeEntry::Vectorize && TE->isAltShuffle()) ||
4092 (IgnoreReorder && TE->Idx == 0))
4093 return true;
4094 if (TE->State == TreeEntry::NeedToGather) {
4095 auto It = GathersToOrders.find(TE);
4096 if (It != GathersToOrders.end())
4097 return !It->second.empty();
4098 return true;
4099 }
4100 return false;
4101 };
4102 for (const EdgeInfo &EI : OpTE->UserTreeIndices) {
4103 TreeEntry *UserTE = EI.UserTE;
4104 if (!VisitedUsers.insert(UserTE).second)
4105 continue;
4106 // May reorder user node if it requires reordering, has reused
4107 // scalars, is an alternate op vectorize node or its op nodes require
4108 // reordering.
4109 if (AllowsReordering(UserTE))
4110 continue;
4111 // Check if users allow reordering.
4112 // Currently look up just 1 level of operands to avoid increase of
4113 // the compile time.
4114 // Profitable to reorder if definitely more operands allow
4115 // reordering rather than those with natural order.
4116 ArrayRef<std::pair<unsigned, TreeEntry *>> Ops = Users[UserTE];
4117 if (static_cast<unsigned>(count_if(
4118 Ops, [UserTE, &AllowsReordering](
4119 const std::pair<unsigned, TreeEntry *> &Op) {
4120 return AllowsReordering(Op.second) &&
4121 all_of(Op.second->UserTreeIndices,
4122 [UserTE](const EdgeInfo &EI) {
4123 return EI.UserTE == UserTE;
4124 });
4125 })) <= Ops.size() / 2)
4126 ++Res.first->second;
4127 }
4128 }
4129 // If no orders - skip current nodes and jump to the next one, if any.
4130 if (OrdersUses.empty()) {
4131 for_each(Data.second,
4132 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) {
4133 OrderedEntries.remove(Op.second);
4134 });
4135 continue;
4136 }
4137 // Choose the best order.
4138 ArrayRef<unsigned> BestOrder = OrdersUses.front().first;
4139 unsigned Cnt = OrdersUses.front().second;
4140 for (const auto &Pair : drop_begin(OrdersUses)) {
4141 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) {
4142 BestOrder = Pair.first;
4143 Cnt = Pair.second;
4144 }
4145 }
4146 // Set order of the user node (reordering of operands and user nodes).
4147 if (BestOrder.empty()) {
4148 for_each(Data.second,
4149 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) {
4150 OrderedEntries.remove(Op.second);
4151 });
4152 continue;
4153 }
4154 // Erase operands from OrderedEntries list and adjust their orders.
4155 VisitedOps.clear();
4156 SmallVector<int> Mask;
4157 inversePermutation(BestOrder, Mask);
4158 SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem);
4159 unsigned E = BestOrder.size();
4160 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) {
4161 return I < E ? static_cast<int>(I) : UndefMaskElem;
4162 });
4163 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) {
4164 TreeEntry *TE = Op.second;
4165 OrderedEntries.remove(TE);
4166 if (!VisitedOps.insert(TE).second)
4167 continue;
4168 if (TE->ReuseShuffleIndices.size() == BestOrder.size()) {
4169 // Just reorder reuses indices.
4170 reorderReuses(TE->ReuseShuffleIndices, Mask);
4171 continue;
4172 }
4173 // Gathers are processed separately.
4174 if (TE->State != TreeEntry::Vectorize)
4175 continue;
4176 assert((BestOrder.size() == TE->ReorderIndices.size() ||
4177 TE->ReorderIndices.empty()) &&
4178 "Non-matching sizes of user/operand entries.");
4179 reorderOrder(TE->ReorderIndices, Mask);
4180 if (IgnoreReorder && TE == VectorizableTree.front().get())
4181 IgnoreReorder = false;
4182 }
4183 // For gathers just need to reorder its scalars.
4184 for (TreeEntry *Gather : GatherOps) {
4185 assert(Gather->ReorderIndices.empty() &&
4186 "Unexpected reordering of gathers.");
4187 if (!Gather->ReuseShuffleIndices.empty()) {
4188 // Just reorder reuses indices.
4189 reorderReuses(Gather->ReuseShuffleIndices, Mask);
4190 continue;
4191 }
4192 reorderScalars(Gather->Scalars, Mask);
4193 OrderedEntries.remove(Gather);
4194 }
4195 // Reorder operands of the user node and set the ordering for the user
4196 // node itself.
4197 if (Data.first->State != TreeEntry::Vectorize ||
4198 !isa<ExtractElementInst, ExtractValueInst, LoadInst>(
4199 Data.first->getMainOp()) ||
4200 Data.first->isAltShuffle())
4201 Data.first->reorderOperands(Mask);
4202 if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) ||
4203 Data.first->isAltShuffle()) {
4204 reorderScalars(Data.first->Scalars, Mask);
4205 reorderOrder(Data.first->ReorderIndices, MaskOrder);
4206 if (Data.first->ReuseShuffleIndices.empty() &&
4207 !Data.first->ReorderIndices.empty() &&
4208 !Data.first->isAltShuffle()) {
4209 // Insert user node to the list to try to sink reordering deeper in
4210 // the graph.
4211 OrderedEntries.insert(Data.first);
4212 }
4213 } else {
4214 reorderOrder(Data.first->ReorderIndices, Mask);
4215 }
4216 }
4217 }
4218 // If the reordering is unnecessary, just remove the reorder.
4219 if (IgnoreReorder && !VectorizableTree.front()->ReorderIndices.empty() &&
4220 VectorizableTree.front()->ReuseShuffleIndices.empty())
4221 VectorizableTree.front()->ReorderIndices.clear();
4222 }
4223
buildExternalUses(const ExtraValueToDebugLocsMap & ExternallyUsedValues)4224 void BoUpSLP::buildExternalUses(
4225 const ExtraValueToDebugLocsMap &ExternallyUsedValues) {
4226 // Collect the values that we need to extract from the tree.
4227 for (auto &TEPtr : VectorizableTree) {
4228 TreeEntry *Entry = TEPtr.get();
4229
4230 // No need to handle users of gathered values.
4231 if (Entry->State == TreeEntry::NeedToGather)
4232 continue;
4233
4234 // For each lane:
4235 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
4236 Value *Scalar = Entry->Scalars[Lane];
4237 int FoundLane = Entry->findLaneForValue(Scalar);
4238
4239 // Check if the scalar is externally used as an extra arg.
4240 auto ExtI = ExternallyUsedValues.find(Scalar);
4241 if (ExtI != ExternallyUsedValues.end()) {
4242 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane "
4243 << Lane << " from " << *Scalar << ".\n");
4244 ExternalUses.emplace_back(Scalar, nullptr, FoundLane);
4245 }
4246 for (User *U : Scalar->users()) {
4247 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
4248
4249 Instruction *UserInst = dyn_cast<Instruction>(U);
4250 if (!UserInst)
4251 continue;
4252
4253 if (isDeleted(UserInst))
4254 continue;
4255
4256 // Skip in-tree scalars that become vectors
4257 if (TreeEntry *UseEntry = getTreeEntry(U)) {
4258 Value *UseScalar = UseEntry->Scalars[0];
4259 // Some in-tree scalars will remain as scalar in vectorized
4260 // instructions. If that is the case, the one in Lane 0 will
4261 // be used.
4262 if (UseScalar != U ||
4263 UseEntry->State == TreeEntry::ScatterVectorize ||
4264 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) {
4265 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
4266 << ".\n");
4267 assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state");
4268 continue;
4269 }
4270 }
4271
4272 // Ignore users in the user ignore list.
4273 if (UserIgnoreList && UserIgnoreList->contains(UserInst))
4274 continue;
4275
4276 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane "
4277 << Lane << " from " << *Scalar << ".\n");
4278 ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane));
4279 }
4280 }
4281 }
4282 }
4283
4284 DenseMap<Value *, SmallVector<StoreInst *, 4>>
collectUserStores(const BoUpSLP::TreeEntry * TE) const4285 BoUpSLP::collectUserStores(const BoUpSLP::TreeEntry *TE) const {
4286 DenseMap<Value *, SmallVector<StoreInst *, 4>> PtrToStoresMap;
4287 for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size())) {
4288 Value *V = TE->Scalars[Lane];
4289 // To save compilation time we don't visit if we have too many users.
4290 static constexpr unsigned UsersLimit = 4;
4291 if (V->hasNUsesOrMore(UsersLimit))
4292 break;
4293
4294 // Collect stores per pointer object.
4295 for (User *U : V->users()) {
4296 auto *SI = dyn_cast<StoreInst>(U);
4297 if (SI == nullptr || !SI->isSimple() ||
4298 !isValidElementType(SI->getValueOperand()->getType()))
4299 continue;
4300 // Skip entry if already
4301 if (getTreeEntry(U))
4302 continue;
4303
4304 Value *Ptr = getUnderlyingObject(SI->getPointerOperand());
4305 auto &StoresVec = PtrToStoresMap[Ptr];
4306 // For now just keep one store per pointer object per lane.
4307 // TODO: Extend this to support multiple stores per pointer per lane
4308 if (StoresVec.size() > Lane)
4309 continue;
4310 // Skip if in different BBs.
4311 if (!StoresVec.empty() &&
4312 SI->getParent() != StoresVec.back()->getParent())
4313 continue;
4314 // Make sure that the stores are of the same type.
4315 if (!StoresVec.empty() &&
4316 SI->getValueOperand()->getType() !=
4317 StoresVec.back()->getValueOperand()->getType())
4318 continue;
4319 StoresVec.push_back(SI);
4320 }
4321 }
4322 return PtrToStoresMap;
4323 }
4324
CanFormVector(const SmallVector<StoreInst *,4> & StoresVec,OrdersType & ReorderIndices) const4325 bool BoUpSLP::CanFormVector(const SmallVector<StoreInst *, 4> &StoresVec,
4326 OrdersType &ReorderIndices) const {
4327 // We check whether the stores in StoreVec can form a vector by sorting them
4328 // and checking whether they are consecutive.
4329
4330 // To avoid calling getPointersDiff() while sorting we create a vector of
4331 // pairs {store, offset from first} and sort this instead.
4332 SmallVector<std::pair<StoreInst *, int>, 4> StoreOffsetVec(StoresVec.size());
4333 StoreInst *S0 = StoresVec[0];
4334 StoreOffsetVec[0] = {S0, 0};
4335 Type *S0Ty = S0->getValueOperand()->getType();
4336 Value *S0Ptr = S0->getPointerOperand();
4337 for (unsigned Idx : seq<unsigned>(1, StoresVec.size())) {
4338 StoreInst *SI = StoresVec[Idx];
4339 Optional<int> Diff =
4340 getPointersDiff(S0Ty, S0Ptr, SI->getValueOperand()->getType(),
4341 SI->getPointerOperand(), *DL, *SE,
4342 /*StrictCheck=*/true);
4343 // We failed to compare the pointers so just abandon this StoresVec.
4344 if (!Diff)
4345 return false;
4346 StoreOffsetVec[Idx] = {StoresVec[Idx], *Diff};
4347 }
4348
4349 // Sort the vector based on the pointers. We create a copy because we may
4350 // need the original later for calculating the reorder (shuffle) indices.
4351 stable_sort(StoreOffsetVec, [](const std::pair<StoreInst *, int> &Pair1,
4352 const std::pair<StoreInst *, int> &Pair2) {
4353 int Offset1 = Pair1.second;
4354 int Offset2 = Pair2.second;
4355 return Offset1 < Offset2;
4356 });
4357
4358 // Check if the stores are consecutive by checking if their difference is 1.
4359 for (unsigned Idx : seq<unsigned>(1, StoreOffsetVec.size()))
4360 if (StoreOffsetVec[Idx].second != StoreOffsetVec[Idx-1].second + 1)
4361 return false;
4362
4363 // Calculate the shuffle indices according to their offset against the sorted
4364 // StoreOffsetVec.
4365 ReorderIndices.reserve(StoresVec.size());
4366 for (StoreInst *SI : StoresVec) {
4367 unsigned Idx = find_if(StoreOffsetVec,
4368 [SI](const std::pair<StoreInst *, int> &Pair) {
4369 return Pair.first == SI;
4370 }) -
4371 StoreOffsetVec.begin();
4372 ReorderIndices.push_back(Idx);
4373 }
4374 // Identity order (e.g., {0,1,2,3}) is modeled as an empty OrdersType in
4375 // reorderTopToBottom() and reorderBottomToTop(), so we are following the
4376 // same convention here.
4377 auto IsIdentityOrder = [](const OrdersType &Order) {
4378 for (unsigned Idx : seq<unsigned>(0, Order.size()))
4379 if (Idx != Order[Idx])
4380 return false;
4381 return true;
4382 };
4383 if (IsIdentityOrder(ReorderIndices))
4384 ReorderIndices.clear();
4385
4386 return true;
4387 }
4388
4389 #ifndef NDEBUG
dumpOrder(const BoUpSLP::OrdersType & Order)4390 LLVM_DUMP_METHOD static void dumpOrder(const BoUpSLP::OrdersType &Order) {
4391 for (unsigned Idx : Order)
4392 dbgs() << Idx << ", ";
4393 dbgs() << "\n";
4394 }
4395 #endif
4396
4397 SmallVector<BoUpSLP::OrdersType, 1>
findExternalStoreUsersReorderIndices(TreeEntry * TE) const4398 BoUpSLP::findExternalStoreUsersReorderIndices(TreeEntry *TE) const {
4399 unsigned NumLanes = TE->Scalars.size();
4400
4401 DenseMap<Value *, SmallVector<StoreInst *, 4>> PtrToStoresMap =
4402 collectUserStores(TE);
4403
4404 // Holds the reorder indices for each candidate store vector that is a user of
4405 // the current TreeEntry.
4406 SmallVector<OrdersType, 1> ExternalReorderIndices;
4407
4408 // Now inspect the stores collected per pointer and look for vectorization
4409 // candidates. For each candidate calculate the reorder index vector and push
4410 // it into `ExternalReorderIndices`
4411 for (const auto &Pair : PtrToStoresMap) {
4412 auto &StoresVec = Pair.second;
4413 // If we have fewer than NumLanes stores, then we can't form a vector.
4414 if (StoresVec.size() != NumLanes)
4415 continue;
4416
4417 // If the stores are not consecutive then abandon this StoresVec.
4418 OrdersType ReorderIndices;
4419 if (!CanFormVector(StoresVec, ReorderIndices))
4420 continue;
4421
4422 // We now know that the scalars in StoresVec can form a vector instruction,
4423 // so set the reorder indices.
4424 ExternalReorderIndices.push_back(ReorderIndices);
4425 }
4426 return ExternalReorderIndices;
4427 }
4428
buildTree(ArrayRef<Value * > Roots,const SmallDenseSet<Value * > & UserIgnoreLst)4429 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
4430 const SmallDenseSet<Value *> &UserIgnoreLst) {
4431 deleteTree();
4432 UserIgnoreList = &UserIgnoreLst;
4433 if (!allSameType(Roots))
4434 return;
4435 buildTree_rec(Roots, 0, EdgeInfo());
4436 }
4437
buildTree(ArrayRef<Value * > Roots)4438 void BoUpSLP::buildTree(ArrayRef<Value *> Roots) {
4439 deleteTree();
4440 if (!allSameType(Roots))
4441 return;
4442 buildTree_rec(Roots, 0, EdgeInfo());
4443 }
4444
4445 /// \return true if the specified list of values has only one instruction that
4446 /// requires scheduling, false otherwise.
4447 #ifndef NDEBUG
needToScheduleSingleInstruction(ArrayRef<Value * > VL)4448 static bool needToScheduleSingleInstruction(ArrayRef<Value *> VL) {
4449 Value *NeedsScheduling = nullptr;
4450 for (Value *V : VL) {
4451 if (doesNotNeedToBeScheduled(V))
4452 continue;
4453 if (!NeedsScheduling) {
4454 NeedsScheduling = V;
4455 continue;
4456 }
4457 return false;
4458 }
4459 return NeedsScheduling;
4460 }
4461 #endif
4462
4463 /// Generates key/subkey pair for the given value to provide effective sorting
4464 /// of the values and better detection of the vectorizable values sequences. The
4465 /// keys/subkeys can be used for better sorting of the values themselves (keys)
4466 /// and in values subgroups (subkeys).
generateKeySubkey(Value * V,const TargetLibraryInfo * TLI,function_ref<hash_code (size_t,LoadInst *)> LoadsSubkeyGenerator,bool AllowAlternate)4467 static std::pair<size_t, size_t> generateKeySubkey(
4468 Value *V, const TargetLibraryInfo *TLI,
4469 function_ref<hash_code(size_t, LoadInst *)> LoadsSubkeyGenerator,
4470 bool AllowAlternate) {
4471 hash_code Key = hash_value(V->getValueID() + 2);
4472 hash_code SubKey = hash_value(0);
4473 // Sort the loads by the distance between the pointers.
4474 if (auto *LI = dyn_cast<LoadInst>(V)) {
4475 Key = hash_combine(hash_value(Instruction::Load), Key);
4476 if (LI->isSimple())
4477 SubKey = hash_value(LoadsSubkeyGenerator(Key, LI));
4478 else
4479 SubKey = hash_value(LI);
4480 } else if (isVectorLikeInstWithConstOps(V)) {
4481 // Sort extracts by the vector operands.
4482 if (isa<ExtractElementInst, UndefValue>(V))
4483 Key = hash_value(Value::UndefValueVal + 1);
4484 if (auto *EI = dyn_cast<ExtractElementInst>(V)) {
4485 if (!isUndefVector(EI->getVectorOperand()) &&
4486 !isa<UndefValue>(EI->getIndexOperand()))
4487 SubKey = hash_value(EI->getVectorOperand());
4488 }
4489 } else if (auto *I = dyn_cast<Instruction>(V)) {
4490 // Sort other instructions just by the opcodes except for CMPInst.
4491 // For CMP also sort by the predicate kind.
4492 if ((isa<BinaryOperator>(I) || isa<CastInst>(I)) &&
4493 isValidForAlternation(I->getOpcode())) {
4494 if (AllowAlternate)
4495 Key = hash_value(isa<BinaryOperator>(I) ? 1 : 0);
4496 else
4497 Key = hash_combine(hash_value(I->getOpcode()), Key);
4498 SubKey = hash_combine(
4499 hash_value(I->getOpcode()), hash_value(I->getType()),
4500 hash_value(isa<BinaryOperator>(I)
4501 ? I->getType()
4502 : cast<CastInst>(I)->getOperand(0)->getType()));
4503 // For casts, look through the only operand to improve compile time.
4504 if (isa<CastInst>(I)) {
4505 std::pair<size_t, size_t> OpVals =
4506 generateKeySubkey(I->getOperand(0), TLI, LoadsSubkeyGenerator,
4507 /*=AllowAlternate*/ true);
4508 Key = hash_combine(OpVals.first, Key);
4509 SubKey = hash_combine(OpVals.first, SubKey);
4510 }
4511 } else if (auto *CI = dyn_cast<CmpInst>(I)) {
4512 CmpInst::Predicate Pred = CI->getPredicate();
4513 if (CI->isCommutative())
4514 Pred = std::min(Pred, CmpInst::getInversePredicate(Pred));
4515 CmpInst::Predicate SwapPred = CmpInst::getSwappedPredicate(Pred);
4516 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Pred),
4517 hash_value(SwapPred),
4518 hash_value(CI->getOperand(0)->getType()));
4519 } else if (auto *Call = dyn_cast<CallInst>(I)) {
4520 Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, TLI);
4521 if (isTriviallyVectorizable(ID)) {
4522 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(ID));
4523 } else if (!VFDatabase(*Call).getMappings(*Call).empty()) {
4524 SubKey = hash_combine(hash_value(I->getOpcode()),
4525 hash_value(Call->getCalledFunction()));
4526 } else {
4527 Key = hash_combine(hash_value(Call), Key);
4528 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Call));
4529 }
4530 for (const CallBase::BundleOpInfo &Op : Call->bundle_op_infos())
4531 SubKey = hash_combine(hash_value(Op.Begin), hash_value(Op.End),
4532 hash_value(Op.Tag), SubKey);
4533 } else if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) {
4534 if (Gep->getNumOperands() == 2 && isa<ConstantInt>(Gep->getOperand(1)))
4535 SubKey = hash_value(Gep->getPointerOperand());
4536 else
4537 SubKey = hash_value(Gep);
4538 } else if (BinaryOperator::isIntDivRem(I->getOpcode()) &&
4539 !isa<ConstantInt>(I->getOperand(1))) {
4540 // Do not try to vectorize instructions with potentially high cost.
4541 SubKey = hash_value(I);
4542 } else {
4543 SubKey = hash_value(I->getOpcode());
4544 }
4545 Key = hash_combine(hash_value(I->getParent()), Key);
4546 }
4547 return std::make_pair(Key, SubKey);
4548 }
4549
buildTree_rec(ArrayRef<Value * > VL,unsigned Depth,const EdgeInfo & UserTreeIdx)4550 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
4551 const EdgeInfo &UserTreeIdx) {
4552 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!");
4553
4554 SmallVector<int> ReuseShuffleIndicies;
4555 SmallVector<Value *> UniqueValues;
4556 auto &&TryToFindDuplicates = [&VL, &ReuseShuffleIndicies, &UniqueValues,
4557 &UserTreeIdx,
4558 this](const InstructionsState &S) {
4559 // Check that every instruction appears once in this bundle.
4560 DenseMap<Value *, unsigned> UniquePositions;
4561 for (Value *V : VL) {
4562 if (isConstant(V)) {
4563 ReuseShuffleIndicies.emplace_back(
4564 isa<UndefValue>(V) ? UndefMaskElem : UniqueValues.size());
4565 UniqueValues.emplace_back(V);
4566 continue;
4567 }
4568 auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
4569 ReuseShuffleIndicies.emplace_back(Res.first->second);
4570 if (Res.second)
4571 UniqueValues.emplace_back(V);
4572 }
4573 size_t NumUniqueScalarValues = UniqueValues.size();
4574 if (NumUniqueScalarValues == VL.size()) {
4575 ReuseShuffleIndicies.clear();
4576 } else {
4577 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n");
4578 if (NumUniqueScalarValues <= 1 ||
4579 (UniquePositions.size() == 1 && all_of(UniqueValues,
4580 [](Value *V) {
4581 return isa<UndefValue>(V) ||
4582 !isConstant(V);
4583 })) ||
4584 !llvm::isPowerOf2_32(NumUniqueScalarValues)) {
4585 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
4586 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
4587 return false;
4588 }
4589 VL = UniqueValues;
4590 }
4591 return true;
4592 };
4593
4594 InstructionsState S = getSameOpcode(VL);
4595
4596 // Gather if we hit the RecursionMaxDepth, unless this is a load (or z/sext of
4597 // a load), in which case peek through to include it in the tree, without
4598 // ballooning over-budget.
4599 if (Depth >= RecursionMaxDepth &&
4600 !(S.MainOp && isa<Instruction>(S.MainOp) && S.MainOp == S.AltOp &&
4601 VL.size() >= 4 &&
4602 (match(S.MainOp, m_Load(m_Value())) || all_of(VL, [&S](const Value *I) {
4603 return match(I,
4604 m_OneUse(m_ZExtOrSExt(m_OneUse(m_Load(m_Value()))))) &&
4605 cast<Instruction>(I)->getOpcode() ==
4606 cast<Instruction>(S.MainOp)->getOpcode();
4607 })))) {
4608 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
4609 if (TryToFindDuplicates(S))
4610 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4611 ReuseShuffleIndicies);
4612 return;
4613 }
4614
4615 // Don't handle scalable vectors
4616 if (S.getOpcode() == Instruction::ExtractElement &&
4617 isa<ScalableVectorType>(
4618 cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) {
4619 LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n");
4620 if (TryToFindDuplicates(S))
4621 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4622 ReuseShuffleIndicies);
4623 return;
4624 }
4625
4626 // Don't handle vectors.
4627 if (S.OpValue->getType()->isVectorTy() &&
4628 !isa<InsertElementInst>(S.OpValue)) {
4629 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
4630 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
4631 return;
4632 }
4633
4634 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue))
4635 if (SI->getValueOperand()->getType()->isVectorTy()) {
4636 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
4637 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
4638 return;
4639 }
4640
4641 // If all of the operands are identical or constant we have a simple solution.
4642 // If we deal with insert/extract instructions, they all must have constant
4643 // indices, otherwise we should gather them, not try to vectorize.
4644 // If alternate op node with 2 elements with gathered operands - do not
4645 // vectorize.
4646 auto &&NotProfitableForVectorization = [&S, this,
4647 Depth](ArrayRef<Value *> VL) {
4648 if (!S.getOpcode() || !S.isAltShuffle() || VL.size() > 2)
4649 return false;
4650 if (VectorizableTree.size() < MinTreeSize)
4651 return false;
4652 if (Depth >= RecursionMaxDepth - 1)
4653 return true;
4654 // Check if all operands are extracts, part of vector node or can build a
4655 // regular vectorize node.
4656 SmallVector<unsigned, 2> InstsCount(VL.size(), 0);
4657 for (Value *V : VL) {
4658 auto *I = cast<Instruction>(V);
4659 InstsCount.push_back(count_if(I->operand_values(), [](Value *Op) {
4660 return isa<Instruction>(Op) || isVectorLikeInstWithConstOps(Op);
4661 }));
4662 }
4663 bool IsCommutative = isCommutative(S.MainOp) || isCommutative(S.AltOp);
4664 if ((IsCommutative &&
4665 std::accumulate(InstsCount.begin(), InstsCount.end(), 0) < 2) ||
4666 (!IsCommutative &&
4667 all_of(InstsCount, [](unsigned ICnt) { return ICnt < 2; })))
4668 return true;
4669 assert(VL.size() == 2 && "Expected only 2 alternate op instructions.");
4670 SmallVector<SmallVector<std::pair<Value *, Value *>>> Candidates;
4671 auto *I1 = cast<Instruction>(VL.front());
4672 auto *I2 = cast<Instruction>(VL.back());
4673 for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op)
4674 Candidates.emplace_back().emplace_back(I1->getOperand(Op),
4675 I2->getOperand(Op));
4676 if (static_cast<unsigned>(count_if(
4677 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) {
4678 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat);
4679 })) >= S.MainOp->getNumOperands() / 2)
4680 return false;
4681 if (S.MainOp->getNumOperands() > 2)
4682 return true;
4683 if (IsCommutative) {
4684 // Check permuted operands.
4685 Candidates.clear();
4686 for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op)
4687 Candidates.emplace_back().emplace_back(I1->getOperand(Op),
4688 I2->getOperand((Op + 1) % E));
4689 if (any_of(
4690 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) {
4691 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat);
4692 }))
4693 return false;
4694 }
4695 return true;
4696 };
4697 SmallVector<unsigned> SortedIndices;
4698 BasicBlock *BB = nullptr;
4699 bool IsScatterVectorizeUserTE =
4700 UserTreeIdx.UserTE &&
4701 UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize;
4702 bool AreAllSameInsts =
4703 (S.getOpcode() && allSameBlock(VL)) ||
4704 (S.OpValue->getType()->isPointerTy() && IsScatterVectorizeUserTE &&
4705 VL.size() > 2 &&
4706 all_of(VL,
4707 [&BB](Value *V) {
4708 auto *I = dyn_cast<GetElementPtrInst>(V);
4709 if (!I)
4710 return doesNotNeedToBeScheduled(V);
4711 if (!BB)
4712 BB = I->getParent();
4713 return BB == I->getParent() && I->getNumOperands() == 2;
4714 }) &&
4715 BB &&
4716 sortPtrAccesses(VL, UserTreeIdx.UserTE->getMainOp()->getType(), *DL, *SE,
4717 SortedIndices));
4718 if (allConstant(VL) || isSplat(VL) || !AreAllSameInsts ||
4719 (isa<InsertElementInst, ExtractValueInst, ExtractElementInst>(
4720 S.OpValue) &&
4721 !all_of(VL, isVectorLikeInstWithConstOps)) ||
4722 NotProfitableForVectorization(VL)) {
4723 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O, small shuffle. \n");
4724 if (TryToFindDuplicates(S))
4725 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4726 ReuseShuffleIndicies);
4727 return;
4728 }
4729
4730 // We now know that this is a vector of instructions of the same type from
4731 // the same block.
4732
4733 // Don't vectorize ephemeral values.
4734 if (!EphValues.empty()) {
4735 for (Value *V : VL) {
4736 if (EphValues.count(V)) {
4737 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
4738 << ") is ephemeral.\n");
4739 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
4740 return;
4741 }
4742 }
4743 }
4744
4745 // Check if this is a duplicate of another entry.
4746 if (TreeEntry *E = getTreeEntry(S.OpValue)) {
4747 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n");
4748 if (!E->isSame(VL)) {
4749 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
4750 if (TryToFindDuplicates(S))
4751 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4752 ReuseShuffleIndicies);
4753 return;
4754 }
4755 // Record the reuse of the tree node. FIXME, currently this is only used to
4756 // properly draw the graph rather than for the actual vectorization.
4757 E->UserTreeIndices.push_back(UserTreeIdx);
4758 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue
4759 << ".\n");
4760 return;
4761 }
4762
4763 // Check that none of the instructions in the bundle are already in the tree.
4764 for (Value *V : VL) {
4765 if (!IsScatterVectorizeUserTE && !isa<Instruction>(V))
4766 continue;
4767 if (getTreeEntry(V)) {
4768 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
4769 << ") is already in tree.\n");
4770 if (TryToFindDuplicates(S))
4771 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4772 ReuseShuffleIndicies);
4773 return;
4774 }
4775 }
4776
4777 // The reduction nodes (stored in UserIgnoreList) also should stay scalar.
4778 if (UserIgnoreList && !UserIgnoreList->empty()) {
4779 for (Value *V : VL) {
4780 if (UserIgnoreList && UserIgnoreList->contains(V)) {
4781 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
4782 if (TryToFindDuplicates(S))
4783 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4784 ReuseShuffleIndicies);
4785 return;
4786 }
4787 }
4788 }
4789
4790 // Special processing for sorted pointers for ScatterVectorize node with
4791 // constant indeces only.
4792 if (AreAllSameInsts && !(S.getOpcode() && allSameBlock(VL)) &&
4793 UserTreeIdx.UserTE &&
4794 UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize) {
4795 assert(S.OpValue->getType()->isPointerTy() &&
4796 count_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }) >=
4797 2 &&
4798 "Expected pointers only.");
4799 // Reset S to make it GetElementPtr kind of node.
4800 const auto *It = find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); });
4801 assert(It != VL.end() && "Expected at least one GEP.");
4802 S = getSameOpcode(*It);
4803 }
4804
4805 // Check that all of the users of the scalars that we want to vectorize are
4806 // schedulable.
4807 auto *VL0 = cast<Instruction>(S.OpValue);
4808 BB = VL0->getParent();
4809
4810 if (!DT->isReachableFromEntry(BB)) {
4811 // Don't go into unreachable blocks. They may contain instructions with
4812 // dependency cycles which confuse the final scheduling.
4813 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
4814 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
4815 return;
4816 }
4817
4818 // Don't go into catchswitch blocks, which can happen with PHIs.
4819 // Such blocks can only have PHIs and the catchswitch. There is no
4820 // place to insert a shuffle if we need to, so just avoid that issue.
4821 if (isa<CatchSwitchInst>(BB->getTerminator())) {
4822 LLVM_DEBUG(dbgs() << "SLP: bundle in catchswitch block.\n");
4823 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
4824 return;
4825 }
4826
4827 // Check that every instruction appears once in this bundle.
4828 if (!TryToFindDuplicates(S))
4829 return;
4830
4831 auto &BSRef = BlocksSchedules[BB];
4832 if (!BSRef)
4833 BSRef = std::make_unique<BlockScheduling>(BB);
4834
4835 BlockScheduling &BS = *BSRef;
4836
4837 Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S);
4838 #ifdef EXPENSIVE_CHECKS
4839 // Make sure we didn't break any internal invariants
4840 BS.verify();
4841 #endif
4842 if (!Bundle) {
4843 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
4844 assert((!BS.getScheduleData(VL0) ||
4845 !BS.getScheduleData(VL0)->isPartOfBundle()) &&
4846 "tryScheduleBundle should cancelScheduling on failure");
4847 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4848 ReuseShuffleIndicies);
4849 return;
4850 }
4851 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
4852
4853 unsigned ShuffleOrOp = S.isAltShuffle() ?
4854 (unsigned) Instruction::ShuffleVector : S.getOpcode();
4855 switch (ShuffleOrOp) {
4856 case Instruction::PHI: {
4857 auto *PH = cast<PHINode>(VL0);
4858
4859 // Check for terminator values (e.g. invoke).
4860 for (Value *V : VL)
4861 for (Value *Incoming : cast<PHINode>(V)->incoming_values()) {
4862 Instruction *Term = dyn_cast<Instruction>(Incoming);
4863 if (Term && Term->isTerminator()) {
4864 LLVM_DEBUG(dbgs()
4865 << "SLP: Need to swizzle PHINodes (terminator use).\n");
4866 BS.cancelScheduling(VL, VL0);
4867 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4868 ReuseShuffleIndicies);
4869 return;
4870 }
4871 }
4872
4873 TreeEntry *TE =
4874 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies);
4875 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
4876
4877 // Keeps the reordered operands to avoid code duplication.
4878 SmallVector<ValueList, 2> OperandsVec;
4879 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) {
4880 if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) {
4881 ValueList Operands(VL.size(), PoisonValue::get(PH->getType()));
4882 TE->setOperand(I, Operands);
4883 OperandsVec.push_back(Operands);
4884 continue;
4885 }
4886 ValueList Operands;
4887 // Prepare the operand vector.
4888 for (Value *V : VL)
4889 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(
4890 PH->getIncomingBlock(I)));
4891 TE->setOperand(I, Operands);
4892 OperandsVec.push_back(Operands);
4893 }
4894 for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx)
4895 buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx});
4896 return;
4897 }
4898 case Instruction::ExtractValue:
4899 case Instruction::ExtractElement: {
4900 OrdersType CurrentOrder;
4901 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder);
4902 if (Reuse) {
4903 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n");
4904 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
4905 ReuseShuffleIndicies);
4906 // This is a special case, as it does not gather, but at the same time
4907 // we are not extending buildTree_rec() towards the operands.
4908 ValueList Op0;
4909 Op0.assign(VL.size(), VL0->getOperand(0));
4910 VectorizableTree.back()->setOperand(0, Op0);
4911 return;
4912 }
4913 if (!CurrentOrder.empty()) {
4914 LLVM_DEBUG({
4915 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
4916 "with order";
4917 for (unsigned Idx : CurrentOrder)
4918 dbgs() << " " << Idx;
4919 dbgs() << "\n";
4920 });
4921 fixupOrderingIndices(CurrentOrder);
4922 // Insert new order with initial value 0, if it does not exist,
4923 // otherwise return the iterator to the existing one.
4924 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
4925 ReuseShuffleIndicies, CurrentOrder);
4926 // This is a special case, as it does not gather, but at the same time
4927 // we are not extending buildTree_rec() towards the operands.
4928 ValueList Op0;
4929 Op0.assign(VL.size(), VL0->getOperand(0));
4930 VectorizableTree.back()->setOperand(0, Op0);
4931 return;
4932 }
4933 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n");
4934 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
4935 ReuseShuffleIndicies);
4936 BS.cancelScheduling(VL, VL0);
4937 return;
4938 }
4939 case Instruction::InsertElement: {
4940 assert(ReuseShuffleIndicies.empty() && "All inserts should be unique");
4941
4942 // Check that we have a buildvector and not a shuffle of 2 or more
4943 // different vectors.
4944 ValueSet SourceVectors;
4945 for (Value *V : VL) {
4946 SourceVectors.insert(cast<Instruction>(V)->getOperand(0));
4947 assert(getInsertIndex(V) != None && "Non-constant or undef index?");
4948 }
4949
4950 if (count_if(VL, [&SourceVectors](Value *V) {
4951 return !SourceVectors.contains(V);
4952 }) >= 2) {
4953 // Found 2nd source vector - cancel.
4954 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with "
4955 "different source vectors.\n");
4956 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx);
4957 BS.cancelScheduling(VL, VL0);
4958 return;
4959 }
4960
4961 auto OrdCompare = [](const std::pair<int, int> &P1,
4962 const std::pair<int, int> &P2) {
4963 return P1.first > P2.first;
4964 };
4965 PriorityQueue<std::pair<int, int>, SmallVector<std::pair<int, int>>,
4966 decltype(OrdCompare)>
4967 Indices(OrdCompare);
4968 for (int I = 0, E = VL.size(); I < E; ++I) {
4969 unsigned Idx = *getInsertIndex(VL[I]);
4970 Indices.emplace(Idx, I);
4971 }
4972 OrdersType CurrentOrder(VL.size(), VL.size());
4973 bool IsIdentity = true;
4974 for (int I = 0, E = VL.size(); I < E; ++I) {
4975 CurrentOrder[Indices.top().second] = I;
4976 IsIdentity &= Indices.top().second == I;
4977 Indices.pop();
4978 }
4979 if (IsIdentity)
4980 CurrentOrder.clear();
4981 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
4982 None, CurrentOrder);
4983 LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n");
4984
4985 constexpr int NumOps = 2;
4986 ValueList VectorOperands[NumOps];
4987 for (int I = 0; I < NumOps; ++I) {
4988 for (Value *V : VL)
4989 VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I));
4990
4991 TE->setOperand(I, VectorOperands[I]);
4992 }
4993 buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, NumOps - 1});
4994 return;
4995 }
4996 case Instruction::Load: {
4997 // Check that a vectorized load would load the same memory as a scalar
4998 // load. For example, we don't want to vectorize loads that are smaller
4999 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
5000 // treats loading/storing it as an i8 struct. If we vectorize loads/stores
5001 // from such a struct, we read/write packed bits disagreeing with the
5002 // unvectorized version.
5003 SmallVector<Value *> PointerOps;
5004 OrdersType CurrentOrder;
5005 TreeEntry *TE = nullptr;
5006 switch (canVectorizeLoads(VL, VL0, *TTI, *DL, *SE, *LI, CurrentOrder,
5007 PointerOps)) {
5008 case LoadsState::Vectorize:
5009 if (CurrentOrder.empty()) {
5010 // Original loads are consecutive and does not require reordering.
5011 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5012 ReuseShuffleIndicies);
5013 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n");
5014 } else {
5015 fixupOrderingIndices(CurrentOrder);
5016 // Need to reorder.
5017 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5018 ReuseShuffleIndicies, CurrentOrder);
5019 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n");
5020 }
5021 TE->setOperandsInOrder();
5022 break;
5023 case LoadsState::ScatterVectorize:
5024 // Vectorizing non-consecutive loads with `llvm.masked.gather`.
5025 TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S,
5026 UserTreeIdx, ReuseShuffleIndicies);
5027 TE->setOperandsInOrder();
5028 buildTree_rec(PointerOps, Depth + 1, {TE, 0});
5029 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n");
5030 break;
5031 case LoadsState::Gather:
5032 BS.cancelScheduling(VL, VL0);
5033 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5034 ReuseShuffleIndicies);
5035 #ifndef NDEBUG
5036 Type *ScalarTy = VL0->getType();
5037 if (DL->getTypeSizeInBits(ScalarTy) !=
5038 DL->getTypeAllocSizeInBits(ScalarTy))
5039 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
5040 else if (any_of(VL, [](Value *V) {
5041 return !cast<LoadInst>(V)->isSimple();
5042 }))
5043 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
5044 else
5045 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
5046 #endif // NDEBUG
5047 break;
5048 }
5049 return;
5050 }
5051 case Instruction::ZExt:
5052 case Instruction::SExt:
5053 case Instruction::FPToUI:
5054 case Instruction::FPToSI:
5055 case Instruction::FPExt:
5056 case Instruction::PtrToInt:
5057 case Instruction::IntToPtr:
5058 case Instruction::SIToFP:
5059 case Instruction::UIToFP:
5060 case Instruction::Trunc:
5061 case Instruction::FPTrunc:
5062 case Instruction::BitCast: {
5063 Type *SrcTy = VL0->getOperand(0)->getType();
5064 for (Value *V : VL) {
5065 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType();
5066 if (Ty != SrcTy || !isValidElementType(Ty)) {
5067 BS.cancelScheduling(VL, VL0);
5068 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5069 ReuseShuffleIndicies);
5070 LLVM_DEBUG(dbgs()
5071 << "SLP: Gathering casts with different src types.\n");
5072 return;
5073 }
5074 }
5075 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5076 ReuseShuffleIndicies);
5077 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n");
5078
5079 TE->setOperandsInOrder();
5080 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
5081 ValueList Operands;
5082 // Prepare the operand vector.
5083 for (Value *V : VL)
5084 Operands.push_back(cast<Instruction>(V)->getOperand(i));
5085
5086 buildTree_rec(Operands, Depth + 1, {TE, i});
5087 }
5088 return;
5089 }
5090 case Instruction::ICmp:
5091 case Instruction::FCmp: {
5092 // Check that all of the compares have the same predicate.
5093 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
5094 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0);
5095 Type *ComparedTy = VL0->getOperand(0)->getType();
5096 for (Value *V : VL) {
5097 CmpInst *Cmp = cast<CmpInst>(V);
5098 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) ||
5099 Cmp->getOperand(0)->getType() != ComparedTy) {
5100 BS.cancelScheduling(VL, VL0);
5101 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5102 ReuseShuffleIndicies);
5103 LLVM_DEBUG(dbgs()
5104 << "SLP: Gathering cmp with different predicate.\n");
5105 return;
5106 }
5107 }
5108
5109 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5110 ReuseShuffleIndicies);
5111 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n");
5112
5113 ValueList Left, Right;
5114 if (cast<CmpInst>(VL0)->isCommutative()) {
5115 // Commutative predicate - collect + sort operands of the instructions
5116 // so that each side is more likely to have the same opcode.
5117 assert(P0 == SwapP0 && "Commutative Predicate mismatch");
5118 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
5119 } else {
5120 // Collect operands - commute if it uses the swapped predicate.
5121 for (Value *V : VL) {
5122 auto *Cmp = cast<CmpInst>(V);
5123 Value *LHS = Cmp->getOperand(0);
5124 Value *RHS = Cmp->getOperand(1);
5125 if (Cmp->getPredicate() != P0)
5126 std::swap(LHS, RHS);
5127 Left.push_back(LHS);
5128 Right.push_back(RHS);
5129 }
5130 }
5131 TE->setOperand(0, Left);
5132 TE->setOperand(1, Right);
5133 buildTree_rec(Left, Depth + 1, {TE, 0});
5134 buildTree_rec(Right, Depth + 1, {TE, 1});
5135 return;
5136 }
5137 case Instruction::Select:
5138 case Instruction::FNeg:
5139 case Instruction::Add:
5140 case Instruction::FAdd:
5141 case Instruction::Sub:
5142 case Instruction::FSub:
5143 case Instruction::Mul:
5144 case Instruction::FMul:
5145 case Instruction::UDiv:
5146 case Instruction::SDiv:
5147 case Instruction::FDiv:
5148 case Instruction::URem:
5149 case Instruction::SRem:
5150 case Instruction::FRem:
5151 case Instruction::Shl:
5152 case Instruction::LShr:
5153 case Instruction::AShr:
5154 case Instruction::And:
5155 case Instruction::Or:
5156 case Instruction::Xor: {
5157 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5158 ReuseShuffleIndicies);
5159 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n");
5160
5161 // Sort operands of the instructions so that each side is more likely to
5162 // have the same opcode.
5163 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
5164 ValueList Left, Right;
5165 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
5166 TE->setOperand(0, Left);
5167 TE->setOperand(1, Right);
5168 buildTree_rec(Left, Depth + 1, {TE, 0});
5169 buildTree_rec(Right, Depth + 1, {TE, 1});
5170 return;
5171 }
5172
5173 TE->setOperandsInOrder();
5174 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
5175 ValueList Operands;
5176 // Prepare the operand vector.
5177 for (Value *V : VL)
5178 Operands.push_back(cast<Instruction>(V)->getOperand(i));
5179
5180 buildTree_rec(Operands, Depth + 1, {TE, i});
5181 }
5182 return;
5183 }
5184 case Instruction::GetElementPtr: {
5185 // We don't combine GEPs with complicated (nested) indexing.
5186 for (Value *V : VL) {
5187 auto *I = dyn_cast<GetElementPtrInst>(V);
5188 if (!I)
5189 continue;
5190 if (I->getNumOperands() != 2) {
5191 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
5192 BS.cancelScheduling(VL, VL0);
5193 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5194 ReuseShuffleIndicies);
5195 return;
5196 }
5197 }
5198
5199 // We can't combine several GEPs into one vector if they operate on
5200 // different types.
5201 Type *Ty0 = cast<GEPOperator>(VL0)->getSourceElementType();
5202 for (Value *V : VL) {
5203 auto *GEP = dyn_cast<GEPOperator>(V);
5204 if (!GEP)
5205 continue;
5206 Type *CurTy = GEP->getSourceElementType();
5207 if (Ty0 != CurTy) {
5208 LLVM_DEBUG(dbgs()
5209 << "SLP: not-vectorizable GEP (different types).\n");
5210 BS.cancelScheduling(VL, VL0);
5211 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5212 ReuseShuffleIndicies);
5213 return;
5214 }
5215 }
5216
5217 // We don't combine GEPs with non-constant indexes.
5218 Type *Ty1 = VL0->getOperand(1)->getType();
5219 for (Value *V : VL) {
5220 auto *I = dyn_cast<GetElementPtrInst>(V);
5221 if (!I)
5222 continue;
5223 auto *Op = I->getOperand(1);
5224 if ((!IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) ||
5225 (Op->getType() != Ty1 &&
5226 ((IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) ||
5227 Op->getType()->getScalarSizeInBits() >
5228 DL->getIndexSizeInBits(
5229 V->getType()->getPointerAddressSpace())))) {
5230 LLVM_DEBUG(dbgs()
5231 << "SLP: not-vectorizable GEP (non-constant indexes).\n");
5232 BS.cancelScheduling(VL, VL0);
5233 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5234 ReuseShuffleIndicies);
5235 return;
5236 }
5237 }
5238
5239 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5240 ReuseShuffleIndicies);
5241 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
5242 SmallVector<ValueList, 2> Operands(2);
5243 // Prepare the operand vector for pointer operands.
5244 for (Value *V : VL) {
5245 auto *GEP = dyn_cast<GetElementPtrInst>(V);
5246 if (!GEP) {
5247 Operands.front().push_back(V);
5248 continue;
5249 }
5250 Operands.front().push_back(GEP->getPointerOperand());
5251 }
5252 TE->setOperand(0, Operands.front());
5253 // Need to cast all indices to the same type before vectorization to
5254 // avoid crash.
5255 // Required to be able to find correct matches between different gather
5256 // nodes and reuse the vectorized values rather than trying to gather them
5257 // again.
5258 int IndexIdx = 1;
5259 Type *VL0Ty = VL0->getOperand(IndexIdx)->getType();
5260 Type *Ty = all_of(VL,
5261 [VL0Ty, IndexIdx](Value *V) {
5262 auto *GEP = dyn_cast<GetElementPtrInst>(V);
5263 if (!GEP)
5264 return true;
5265 return VL0Ty == GEP->getOperand(IndexIdx)->getType();
5266 })
5267 ? VL0Ty
5268 : DL->getIndexType(cast<GetElementPtrInst>(VL0)
5269 ->getPointerOperandType()
5270 ->getScalarType());
5271 // Prepare the operand vector.
5272 for (Value *V : VL) {
5273 auto *I = dyn_cast<GetElementPtrInst>(V);
5274 if (!I) {
5275 Operands.back().push_back(
5276 ConstantInt::get(Ty, 0, /*isSigned=*/false));
5277 continue;
5278 }
5279 auto *Op = I->getOperand(IndexIdx);
5280 auto *CI = dyn_cast<ConstantInt>(Op);
5281 if (!CI)
5282 Operands.back().push_back(Op);
5283 else
5284 Operands.back().push_back(ConstantExpr::getIntegerCast(
5285 CI, Ty, CI->getValue().isSignBitSet()));
5286 }
5287 TE->setOperand(IndexIdx, Operands.back());
5288
5289 for (unsigned I = 0, Ops = Operands.size(); I < Ops; ++I)
5290 buildTree_rec(Operands[I], Depth + 1, {TE, I});
5291 return;
5292 }
5293 case Instruction::Store: {
5294 // Check if the stores are consecutive or if we need to swizzle them.
5295 llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType();
5296 // Avoid types that are padded when being allocated as scalars, while
5297 // being packed together in a vector (such as i1).
5298 if (DL->getTypeSizeInBits(ScalarTy) !=
5299 DL->getTypeAllocSizeInBits(ScalarTy)) {
5300 BS.cancelScheduling(VL, VL0);
5301 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5302 ReuseShuffleIndicies);
5303 LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n");
5304 return;
5305 }
5306 // Make sure all stores in the bundle are simple - we can't vectorize
5307 // atomic or volatile stores.
5308 SmallVector<Value *, 4> PointerOps(VL.size());
5309 ValueList Operands(VL.size());
5310 auto POIter = PointerOps.begin();
5311 auto OIter = Operands.begin();
5312 for (Value *V : VL) {
5313 auto *SI = cast<StoreInst>(V);
5314 if (!SI->isSimple()) {
5315 BS.cancelScheduling(VL, VL0);
5316 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5317 ReuseShuffleIndicies);
5318 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n");
5319 return;
5320 }
5321 *POIter = SI->getPointerOperand();
5322 *OIter = SI->getValueOperand();
5323 ++POIter;
5324 ++OIter;
5325 }
5326
5327 OrdersType CurrentOrder;
5328 // Check the order of pointer operands.
5329 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) {
5330 Value *Ptr0;
5331 Value *PtrN;
5332 if (CurrentOrder.empty()) {
5333 Ptr0 = PointerOps.front();
5334 PtrN = PointerOps.back();
5335 } else {
5336 Ptr0 = PointerOps[CurrentOrder.front()];
5337 PtrN = PointerOps[CurrentOrder.back()];
5338 }
5339 Optional<int> Dist =
5340 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE);
5341 // Check that the sorted pointer operands are consecutive.
5342 if (static_cast<unsigned>(*Dist) == VL.size() - 1) {
5343 if (CurrentOrder.empty()) {
5344 // Original stores are consecutive and does not require reordering.
5345 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S,
5346 UserTreeIdx, ReuseShuffleIndicies);
5347 TE->setOperandsInOrder();
5348 buildTree_rec(Operands, Depth + 1, {TE, 0});
5349 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
5350 } else {
5351 fixupOrderingIndices(CurrentOrder);
5352 TreeEntry *TE =
5353 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5354 ReuseShuffleIndicies, CurrentOrder);
5355 TE->setOperandsInOrder();
5356 buildTree_rec(Operands, Depth + 1, {TE, 0});
5357 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n");
5358 }
5359 return;
5360 }
5361 }
5362
5363 BS.cancelScheduling(VL, VL0);
5364 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5365 ReuseShuffleIndicies);
5366 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
5367 return;
5368 }
5369 case Instruction::Call: {
5370 // Check if the calls are all to the same vectorizable intrinsic or
5371 // library function.
5372 CallInst *CI = cast<CallInst>(VL0);
5373 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
5374
5375 VFShape Shape = VFShape::get(
5376 *CI, ElementCount::getFixed(static_cast<unsigned int>(VL.size())),
5377 false /*HasGlobalPred*/);
5378 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
5379
5380 if (!VecFunc && !isTriviallyVectorizable(ID)) {
5381 BS.cancelScheduling(VL, VL0);
5382 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5383 ReuseShuffleIndicies);
5384 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
5385 return;
5386 }
5387 Function *F = CI->getCalledFunction();
5388 unsigned NumArgs = CI->arg_size();
5389 SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr);
5390 for (unsigned j = 0; j != NumArgs; ++j)
5391 if (isVectorIntrinsicWithScalarOpAtArg(ID, j))
5392 ScalarArgs[j] = CI->getArgOperand(j);
5393 for (Value *V : VL) {
5394 CallInst *CI2 = dyn_cast<CallInst>(V);
5395 if (!CI2 || CI2->getCalledFunction() != F ||
5396 getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
5397 (VecFunc &&
5398 VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) ||
5399 !CI->hasIdenticalOperandBundleSchema(*CI2)) {
5400 BS.cancelScheduling(VL, VL0);
5401 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5402 ReuseShuffleIndicies);
5403 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V
5404 << "\n");
5405 return;
5406 }
5407 // Some intrinsics have scalar arguments and should be same in order for
5408 // them to be vectorized.
5409 for (unsigned j = 0; j != NumArgs; ++j) {
5410 if (isVectorIntrinsicWithScalarOpAtArg(ID, j)) {
5411 Value *A1J = CI2->getArgOperand(j);
5412 if (ScalarArgs[j] != A1J) {
5413 BS.cancelScheduling(VL, VL0);
5414 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5415 ReuseShuffleIndicies);
5416 LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
5417 << " argument " << ScalarArgs[j] << "!=" << A1J
5418 << "\n");
5419 return;
5420 }
5421 }
5422 }
5423 // Verify that the bundle operands are identical between the two calls.
5424 if (CI->hasOperandBundles() &&
5425 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(),
5426 CI->op_begin() + CI->getBundleOperandsEndIndex(),
5427 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
5428 BS.cancelScheduling(VL, VL0);
5429 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5430 ReuseShuffleIndicies);
5431 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:"
5432 << *CI << "!=" << *V << '\n');
5433 return;
5434 }
5435 }
5436
5437 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5438 ReuseShuffleIndicies);
5439 TE->setOperandsInOrder();
5440 for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) {
5441 // For scalar operands no need to to create an entry since no need to
5442 // vectorize it.
5443 if (isVectorIntrinsicWithScalarOpAtArg(ID, i))
5444 continue;
5445 ValueList Operands;
5446 // Prepare the operand vector.
5447 for (Value *V : VL) {
5448 auto *CI2 = cast<CallInst>(V);
5449 Operands.push_back(CI2->getArgOperand(i));
5450 }
5451 buildTree_rec(Operands, Depth + 1, {TE, i});
5452 }
5453 return;
5454 }
5455 case Instruction::ShuffleVector: {
5456 // If this is not an alternate sequence of opcode like add-sub
5457 // then do not vectorize this instruction.
5458 if (!S.isAltShuffle()) {
5459 BS.cancelScheduling(VL, VL0);
5460 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5461 ReuseShuffleIndicies);
5462 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
5463 return;
5464 }
5465 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
5466 ReuseShuffleIndicies);
5467 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
5468
5469 // Reorder operands if reordering would enable vectorization.
5470 auto *CI = dyn_cast<CmpInst>(VL0);
5471 if (isa<BinaryOperator>(VL0) || CI) {
5472 ValueList Left, Right;
5473 if (!CI || all_of(VL, [](Value *V) {
5474 return cast<CmpInst>(V)->isCommutative();
5475 })) {
5476 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this);
5477 } else {
5478 CmpInst::Predicate P0 = CI->getPredicate();
5479 CmpInst::Predicate AltP0 = cast<CmpInst>(S.AltOp)->getPredicate();
5480 assert(P0 != AltP0 &&
5481 "Expected different main/alternate predicates.");
5482 CmpInst::Predicate AltP0Swapped = CmpInst::getSwappedPredicate(AltP0);
5483 Value *BaseOp0 = VL0->getOperand(0);
5484 Value *BaseOp1 = VL0->getOperand(1);
5485 // Collect operands - commute if it uses the swapped predicate or
5486 // alternate operation.
5487 for (Value *V : VL) {
5488 auto *Cmp = cast<CmpInst>(V);
5489 Value *LHS = Cmp->getOperand(0);
5490 Value *RHS = Cmp->getOperand(1);
5491 CmpInst::Predicate CurrentPred = Cmp->getPredicate();
5492 if (P0 == AltP0Swapped) {
5493 if (CI != Cmp && S.AltOp != Cmp &&
5494 ((P0 == CurrentPred &&
5495 !areCompatibleCmpOps(BaseOp0, BaseOp1, LHS, RHS)) ||
5496 (AltP0 == CurrentPred &&
5497 areCompatibleCmpOps(BaseOp0, BaseOp1, LHS, RHS))))
5498 std::swap(LHS, RHS);
5499 } else if (P0 != CurrentPred && AltP0 != CurrentPred) {
5500 std::swap(LHS, RHS);
5501 }
5502 Left.push_back(LHS);
5503 Right.push_back(RHS);
5504 }
5505 }
5506 TE->setOperand(0, Left);
5507 TE->setOperand(1, Right);
5508 buildTree_rec(Left, Depth + 1, {TE, 0});
5509 buildTree_rec(Right, Depth + 1, {TE, 1});
5510 return;
5511 }
5512
5513 TE->setOperandsInOrder();
5514 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
5515 ValueList Operands;
5516 // Prepare the operand vector.
5517 for (Value *V : VL)
5518 Operands.push_back(cast<Instruction>(V)->getOperand(i));
5519
5520 buildTree_rec(Operands, Depth + 1, {TE, i});
5521 }
5522 return;
5523 }
5524 default:
5525 BS.cancelScheduling(VL, VL0);
5526 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx,
5527 ReuseShuffleIndicies);
5528 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
5529 return;
5530 }
5531 }
5532
canMapToVector(Type * T,const DataLayout & DL) const5533 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const {
5534 unsigned N = 1;
5535 Type *EltTy = T;
5536
5537 while (isa<StructType>(EltTy) || isa<ArrayType>(EltTy) ||
5538 isa<VectorType>(EltTy)) {
5539 if (auto *ST = dyn_cast<StructType>(EltTy)) {
5540 // Check that struct is homogeneous.
5541 for (const auto *Ty : ST->elements())
5542 if (Ty != *ST->element_begin())
5543 return 0;
5544 N *= ST->getNumElements();
5545 EltTy = *ST->element_begin();
5546 } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) {
5547 N *= AT->getNumElements();
5548 EltTy = AT->getElementType();
5549 } else {
5550 auto *VT = cast<FixedVectorType>(EltTy);
5551 N *= VT->getNumElements();
5552 EltTy = VT->getElementType();
5553 }
5554 }
5555
5556 if (!isValidElementType(EltTy))
5557 return 0;
5558 uint64_t VTSize = DL.getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N));
5559 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T))
5560 return 0;
5561 return N;
5562 }
5563
canReuseExtract(ArrayRef<Value * > VL,Value * OpValue,SmallVectorImpl<unsigned> & CurrentOrder) const5564 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
5565 SmallVectorImpl<unsigned> &CurrentOrder) const {
5566 const auto *It = find_if(VL, [](Value *V) {
5567 return isa<ExtractElementInst, ExtractValueInst>(V);
5568 });
5569 assert(It != VL.end() && "Expected at least one extract instruction.");
5570 auto *E0 = cast<Instruction>(*It);
5571 assert(all_of(VL,
5572 [](Value *V) {
5573 return isa<UndefValue, ExtractElementInst, ExtractValueInst>(
5574 V);
5575 }) &&
5576 "Invalid opcode");
5577 // Check if all of the extracts come from the same vector and from the
5578 // correct offset.
5579 Value *Vec = E0->getOperand(0);
5580
5581 CurrentOrder.clear();
5582
5583 // We have to extract from a vector/aggregate with the same number of elements.
5584 unsigned NElts;
5585 if (E0->getOpcode() == Instruction::ExtractValue) {
5586 const DataLayout &DL = E0->getModule()->getDataLayout();
5587 NElts = canMapToVector(Vec->getType(), DL);
5588 if (!NElts)
5589 return false;
5590 // Check if load can be rewritten as load of vector.
5591 LoadInst *LI = dyn_cast<LoadInst>(Vec);
5592 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size()))
5593 return false;
5594 } else {
5595 NElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
5596 }
5597
5598 if (NElts != VL.size())
5599 return false;
5600
5601 // Check that all of the indices extract from the correct offset.
5602 bool ShouldKeepOrder = true;
5603 unsigned E = VL.size();
5604 // Assign to all items the initial value E + 1 so we can check if the extract
5605 // instruction index was used already.
5606 // Also, later we can check that all the indices are used and we have a
5607 // consecutive access in the extract instructions, by checking that no
5608 // element of CurrentOrder still has value E + 1.
5609 CurrentOrder.assign(E, E);
5610 unsigned I = 0;
5611 for (; I < E; ++I) {
5612 auto *Inst = dyn_cast<Instruction>(VL[I]);
5613 if (!Inst)
5614 continue;
5615 if (Inst->getOperand(0) != Vec)
5616 break;
5617 if (auto *EE = dyn_cast<ExtractElementInst>(Inst))
5618 if (isa<UndefValue>(EE->getIndexOperand()))
5619 continue;
5620 Optional<unsigned> Idx = getExtractIndex(Inst);
5621 if (!Idx)
5622 break;
5623 const unsigned ExtIdx = *Idx;
5624 if (ExtIdx != I) {
5625 if (ExtIdx >= E || CurrentOrder[ExtIdx] != E)
5626 break;
5627 ShouldKeepOrder = false;
5628 CurrentOrder[ExtIdx] = I;
5629 } else {
5630 if (CurrentOrder[I] != E)
5631 break;
5632 CurrentOrder[I] = I;
5633 }
5634 }
5635 if (I < E) {
5636 CurrentOrder.clear();
5637 return false;
5638 }
5639 if (ShouldKeepOrder)
5640 CurrentOrder.clear();
5641
5642 return ShouldKeepOrder;
5643 }
5644
areAllUsersVectorized(Instruction * I,ArrayRef<Value * > VectorizedVals) const5645 bool BoUpSLP::areAllUsersVectorized(Instruction *I,
5646 ArrayRef<Value *> VectorizedVals) const {
5647 return (I->hasOneUse() && is_contained(VectorizedVals, I)) ||
5648 all_of(I->users(), [this](User *U) {
5649 return ScalarToTreeEntry.count(U) > 0 ||
5650 isVectorLikeInstWithConstOps(U) ||
5651 (isa<ExtractElementInst>(U) && MustGather.contains(U));
5652 });
5653 }
5654
5655 static std::pair<InstructionCost, InstructionCost>
getVectorCallCosts(CallInst * CI,FixedVectorType * VecTy,TargetTransformInfo * TTI,TargetLibraryInfo * TLI)5656 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy,
5657 TargetTransformInfo *TTI, TargetLibraryInfo *TLI) {
5658 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
5659
5660 // Calculate the cost of the scalar and vector calls.
5661 SmallVector<Type *, 4> VecTys;
5662 for (Use &Arg : CI->args())
5663 VecTys.push_back(
5664 FixedVectorType::get(Arg->getType(), VecTy->getNumElements()));
5665 FastMathFlags FMF;
5666 if (auto *FPCI = dyn_cast<FPMathOperator>(CI))
5667 FMF = FPCI->getFastMathFlags();
5668 SmallVector<const Value *> Arguments(CI->args());
5669 IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF,
5670 dyn_cast<IntrinsicInst>(CI));
5671 auto IntrinsicCost =
5672 TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput);
5673
5674 auto Shape = VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>(
5675 VecTy->getNumElements())),
5676 false /*HasGlobalPred*/);
5677 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
5678 auto LibCost = IntrinsicCost;
5679 if (!CI->isNoBuiltin() && VecFunc) {
5680 // Calculate the cost of the vector library call.
5681 // If the corresponding vector call is cheaper, return its cost.
5682 LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys,
5683 TTI::TCK_RecipThroughput);
5684 }
5685 return {IntrinsicCost, LibCost};
5686 }
5687
5688 /// Compute the cost of creating a vector of type \p VecTy containing the
5689 /// extracted values from \p VL.
5690 static InstructionCost
computeExtractCost(ArrayRef<Value * > VL,FixedVectorType * VecTy,TargetTransformInfo::ShuffleKind ShuffleKind,ArrayRef<int> Mask,TargetTransformInfo & TTI)5691 computeExtractCost(ArrayRef<Value *> VL, FixedVectorType *VecTy,
5692 TargetTransformInfo::ShuffleKind ShuffleKind,
5693 ArrayRef<int> Mask, TargetTransformInfo &TTI) {
5694 unsigned NumOfParts = TTI.getNumberOfParts(VecTy);
5695
5696 if (ShuffleKind != TargetTransformInfo::SK_PermuteSingleSrc || !NumOfParts ||
5697 VecTy->getNumElements() < NumOfParts)
5698 return TTI.getShuffleCost(ShuffleKind, VecTy, Mask);
5699
5700 bool AllConsecutive = true;
5701 unsigned EltsPerVector = VecTy->getNumElements() / NumOfParts;
5702 unsigned Idx = -1;
5703 InstructionCost Cost = 0;
5704
5705 // Process extracts in blocks of EltsPerVector to check if the source vector
5706 // operand can be re-used directly. If not, add the cost of creating a shuffle
5707 // to extract the values into a vector register.
5708 SmallVector<int> RegMask(EltsPerVector, UndefMaskElem);
5709 for (auto *V : VL) {
5710 ++Idx;
5711
5712 // Reached the start of a new vector registers.
5713 if (Idx % EltsPerVector == 0) {
5714 RegMask.assign(EltsPerVector, UndefMaskElem);
5715 AllConsecutive = true;
5716 continue;
5717 }
5718
5719 // Need to exclude undefs from analysis.
5720 if (isa<UndefValue>(V) || Mask[Idx] == UndefMaskElem)
5721 continue;
5722
5723 // Check all extracts for a vector register on the target directly
5724 // extract values in order.
5725 unsigned CurrentIdx = *getExtractIndex(cast<Instruction>(V));
5726 if (!isa<UndefValue>(VL[Idx - 1]) && Mask[Idx - 1] != UndefMaskElem) {
5727 unsigned PrevIdx = *getExtractIndex(cast<Instruction>(VL[Idx - 1]));
5728 AllConsecutive &= PrevIdx + 1 == CurrentIdx &&
5729 CurrentIdx % EltsPerVector == Idx % EltsPerVector;
5730 RegMask[Idx % EltsPerVector] = CurrentIdx % EltsPerVector;
5731 }
5732
5733 if (AllConsecutive)
5734 continue;
5735
5736 // Skip all indices, except for the last index per vector block.
5737 if ((Idx + 1) % EltsPerVector != 0 && Idx + 1 != VL.size())
5738 continue;
5739
5740 // If we have a series of extracts which are not consecutive and hence
5741 // cannot re-use the source vector register directly, compute the shuffle
5742 // cost to extract the vector with EltsPerVector elements.
5743 Cost += TTI.getShuffleCost(
5744 TargetTransformInfo::SK_PermuteSingleSrc,
5745 FixedVectorType::get(VecTy->getElementType(), EltsPerVector), RegMask);
5746 }
5747 return Cost;
5748 }
5749
5750 /// Build shuffle mask for shuffle graph entries and lists of main and alternate
5751 /// operations operands.
5752 static void
buildShuffleEntryMask(ArrayRef<Value * > VL,ArrayRef<unsigned> ReorderIndices,ArrayRef<int> ReusesIndices,const function_ref<bool (Instruction *)> IsAltOp,SmallVectorImpl<int> & Mask,SmallVectorImpl<Value * > * OpScalars=nullptr,SmallVectorImpl<Value * > * AltScalars=nullptr)5753 buildShuffleEntryMask(ArrayRef<Value *> VL, ArrayRef<unsigned> ReorderIndices,
5754 ArrayRef<int> ReusesIndices,
5755 const function_ref<bool(Instruction *)> IsAltOp,
5756 SmallVectorImpl<int> &Mask,
5757 SmallVectorImpl<Value *> *OpScalars = nullptr,
5758 SmallVectorImpl<Value *> *AltScalars = nullptr) {
5759 unsigned Sz = VL.size();
5760 Mask.assign(Sz, UndefMaskElem);
5761 SmallVector<int> OrderMask;
5762 if (!ReorderIndices.empty())
5763 inversePermutation(ReorderIndices, OrderMask);
5764 for (unsigned I = 0; I < Sz; ++I) {
5765 unsigned Idx = I;
5766 if (!ReorderIndices.empty())
5767 Idx = OrderMask[I];
5768 auto *OpInst = cast<Instruction>(VL[Idx]);
5769 if (IsAltOp(OpInst)) {
5770 Mask[I] = Sz + Idx;
5771 if (AltScalars)
5772 AltScalars->push_back(OpInst);
5773 } else {
5774 Mask[I] = Idx;
5775 if (OpScalars)
5776 OpScalars->push_back(OpInst);
5777 }
5778 }
5779 if (!ReusesIndices.empty()) {
5780 SmallVector<int> NewMask(ReusesIndices.size(), UndefMaskElem);
5781 transform(ReusesIndices, NewMask.begin(), [&Mask](int Idx) {
5782 return Idx != UndefMaskElem ? Mask[Idx] : UndefMaskElem;
5783 });
5784 Mask.swap(NewMask);
5785 }
5786 }
5787
5788 /// Checks if the specified instruction \p I is an alternate operation for the
5789 /// given \p MainOp and \p AltOp instructions.
isAlternateInstruction(const Instruction * I,const Instruction * MainOp,const Instruction * AltOp)5790 static bool isAlternateInstruction(const Instruction *I,
5791 const Instruction *MainOp,
5792 const Instruction *AltOp) {
5793 if (auto *CI0 = dyn_cast<CmpInst>(MainOp)) {
5794 auto *AltCI0 = cast<CmpInst>(AltOp);
5795 auto *CI = cast<CmpInst>(I);
5796 CmpInst::Predicate P0 = CI0->getPredicate();
5797 CmpInst::Predicate AltP0 = AltCI0->getPredicate();
5798 assert(P0 != AltP0 && "Expected different main/alternate predicates.");
5799 CmpInst::Predicate AltP0Swapped = CmpInst::getSwappedPredicate(AltP0);
5800 CmpInst::Predicate CurrentPred = CI->getPredicate();
5801 if (P0 == AltP0Swapped)
5802 return I == AltCI0 ||
5803 (I != MainOp &&
5804 !areCompatibleCmpOps(CI0->getOperand(0), CI0->getOperand(1),
5805 CI->getOperand(0), CI->getOperand(1)));
5806 return AltP0 == CurrentPred || AltP0Swapped == CurrentPred;
5807 }
5808 return I->getOpcode() == AltOp->getOpcode();
5809 }
5810
getEntryCost(const TreeEntry * E,ArrayRef<Value * > VectorizedVals)5811 InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
5812 ArrayRef<Value *> VectorizedVals) {
5813 ArrayRef<Value*> VL = E->Scalars;
5814
5815 Type *ScalarTy = VL[0]->getType();
5816 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
5817 ScalarTy = SI->getValueOperand()->getType();
5818 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0]))
5819 ScalarTy = CI->getOperand(0)->getType();
5820 else if (auto *IE = dyn_cast<InsertElementInst>(VL[0]))
5821 ScalarTy = IE->getOperand(1)->getType();
5822 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
5823 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
5824
5825 // If we have computed a smaller type for the expression, update VecTy so
5826 // that the costs will be accurate.
5827 if (MinBWs.count(VL[0]))
5828 VecTy = FixedVectorType::get(
5829 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size());
5830 unsigned EntryVF = E->getVectorFactor();
5831 auto *FinalVecTy = FixedVectorType::get(VecTy->getElementType(), EntryVF);
5832
5833 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
5834 // FIXME: it tries to fix a problem with MSVC buildbots.
5835 TargetTransformInfo &TTIRef = *TTI;
5836 auto &&AdjustExtractsCost = [this, &TTIRef, CostKind, VL, VecTy,
5837 VectorizedVals, E](InstructionCost &Cost) {
5838 DenseMap<Value *, int> ExtractVectorsTys;
5839 SmallPtrSet<Value *, 4> CheckedExtracts;
5840 for (auto *V : VL) {
5841 if (isa<UndefValue>(V))
5842 continue;
5843 // If all users of instruction are going to be vectorized and this
5844 // instruction itself is not going to be vectorized, consider this
5845 // instruction as dead and remove its cost from the final cost of the
5846 // vectorized tree.
5847 // Also, avoid adjusting the cost for extractelements with multiple uses
5848 // in different graph entries.
5849 const TreeEntry *VE = getTreeEntry(V);
5850 if (!CheckedExtracts.insert(V).second ||
5851 !areAllUsersVectorized(cast<Instruction>(V), VectorizedVals) ||
5852 (VE && VE != E))
5853 continue;
5854 auto *EE = cast<ExtractElementInst>(V);
5855 Optional<unsigned> EEIdx = getExtractIndex(EE);
5856 if (!EEIdx)
5857 continue;
5858 unsigned Idx = *EEIdx;
5859 if (TTIRef.getNumberOfParts(VecTy) !=
5860 TTIRef.getNumberOfParts(EE->getVectorOperandType())) {
5861 auto It =
5862 ExtractVectorsTys.try_emplace(EE->getVectorOperand(), Idx).first;
5863 It->getSecond() = std::min<int>(It->second, Idx);
5864 }
5865 // Take credit for instruction that will become dead.
5866 if (EE->hasOneUse()) {
5867 Instruction *Ext = EE->user_back();
5868 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
5869 all_of(Ext->users(),
5870 [](User *U) { return isa<GetElementPtrInst>(U); })) {
5871 // Use getExtractWithExtendCost() to calculate the cost of
5872 // extractelement/ext pair.
5873 Cost -=
5874 TTIRef.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(),
5875 EE->getVectorOperandType(), Idx);
5876 // Add back the cost of s|zext which is subtracted separately.
5877 Cost += TTIRef.getCastInstrCost(
5878 Ext->getOpcode(), Ext->getType(), EE->getType(),
5879 TTI::getCastContextHint(Ext), CostKind, Ext);
5880 continue;
5881 }
5882 }
5883 Cost -= TTIRef.getVectorInstrCost(Instruction::ExtractElement,
5884 EE->getVectorOperandType(), Idx);
5885 }
5886 // Add a cost for subvector extracts/inserts if required.
5887 for (const auto &Data : ExtractVectorsTys) {
5888 auto *EEVTy = cast<FixedVectorType>(Data.first->getType());
5889 unsigned NumElts = VecTy->getNumElements();
5890 if (Data.second % NumElts == 0)
5891 continue;
5892 if (TTIRef.getNumberOfParts(EEVTy) > TTIRef.getNumberOfParts(VecTy)) {
5893 unsigned Idx = (Data.second / NumElts) * NumElts;
5894 unsigned EENumElts = EEVTy->getNumElements();
5895 if (Idx + NumElts <= EENumElts) {
5896 Cost +=
5897 TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
5898 EEVTy, None, Idx, VecTy);
5899 } else {
5900 // Need to round up the subvector type vectorization factor to avoid a
5901 // crash in cost model functions. Make SubVT so that Idx + VF of SubVT
5902 // <= EENumElts.
5903 auto *SubVT =
5904 FixedVectorType::get(VecTy->getElementType(), EENumElts - Idx);
5905 Cost +=
5906 TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector,
5907 EEVTy, None, Idx, SubVT);
5908 }
5909 } else {
5910 Cost += TTIRef.getShuffleCost(TargetTransformInfo::SK_InsertSubvector,
5911 VecTy, None, 0, EEVTy);
5912 }
5913 }
5914 };
5915 if (E->State == TreeEntry::NeedToGather) {
5916 if (allConstant(VL))
5917 return 0;
5918 if (isa<InsertElementInst>(VL[0]))
5919 return InstructionCost::getInvalid();
5920 SmallVector<int> Mask;
5921 SmallVector<const TreeEntry *> Entries;
5922 Optional<TargetTransformInfo::ShuffleKind> Shuffle =
5923 isGatherShuffledEntry(E, Mask, Entries);
5924 if (Shuffle) {
5925 InstructionCost GatherCost = 0;
5926 if (ShuffleVectorInst::isIdentityMask(Mask)) {
5927 // Perfect match in the graph, will reuse the previously vectorized
5928 // node. Cost is 0.
5929 LLVM_DEBUG(
5930 dbgs()
5931 << "SLP: perfect diamond match for gather bundle that starts with "
5932 << *VL.front() << ".\n");
5933 if (NeedToShuffleReuses)
5934 GatherCost =
5935 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc,
5936 FinalVecTy, E->ReuseShuffleIndices);
5937 } else {
5938 LLVM_DEBUG(dbgs() << "SLP: shuffled " << Entries.size()
5939 << " entries for bundle that starts with "
5940 << *VL.front() << ".\n");
5941 // Detected that instead of gather we can emit a shuffle of single/two
5942 // previously vectorized nodes. Add the cost of the permutation rather
5943 // than gather.
5944 ::addMask(Mask, E->ReuseShuffleIndices);
5945 GatherCost = TTI->getShuffleCost(*Shuffle, FinalVecTy, Mask);
5946 }
5947 return GatherCost;
5948 }
5949 if ((E->getOpcode() == Instruction::ExtractElement ||
5950 all_of(E->Scalars,
5951 [](Value *V) {
5952 return isa<ExtractElementInst, UndefValue>(V);
5953 })) &&
5954 allSameType(VL)) {
5955 // Check that gather of extractelements can be represented as just a
5956 // shuffle of a single/two vectors the scalars are extracted from.
5957 SmallVector<int> Mask;
5958 Optional<TargetTransformInfo::ShuffleKind> ShuffleKind =
5959 isFixedVectorShuffle(VL, Mask);
5960 if (ShuffleKind) {
5961 // Found the bunch of extractelement instructions that must be gathered
5962 // into a vector and can be represented as a permutation elements in a
5963 // single input vector or of 2 input vectors.
5964 InstructionCost Cost =
5965 computeExtractCost(VL, VecTy, *ShuffleKind, Mask, *TTI);
5966 AdjustExtractsCost(Cost);
5967 if (NeedToShuffleReuses)
5968 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc,
5969 FinalVecTy, E->ReuseShuffleIndices);
5970 return Cost;
5971 }
5972 }
5973 if (isSplat(VL)) {
5974 // Found the broadcasting of the single scalar, calculate the cost as the
5975 // broadcast.
5976 assert(VecTy == FinalVecTy &&
5977 "No reused scalars expected for broadcast.");
5978 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy,
5979 /*Mask=*/None, /*Index=*/0,
5980 /*SubTp=*/nullptr, /*Args=*/VL[0]);
5981 }
5982 InstructionCost ReuseShuffleCost = 0;
5983 if (NeedToShuffleReuses)
5984 ReuseShuffleCost = TTI->getShuffleCost(
5985 TTI::SK_PermuteSingleSrc, FinalVecTy, E->ReuseShuffleIndices);
5986 // Improve gather cost for gather of loads, if we can group some of the
5987 // loads into vector loads.
5988 if (VL.size() > 2 && E->getOpcode() == Instruction::Load &&
5989 !E->isAltShuffle()) {
5990 BoUpSLP::ValueSet VectorizedLoads;
5991 unsigned StartIdx = 0;
5992 unsigned VF = VL.size() / 2;
5993 unsigned VectorizedCnt = 0;
5994 unsigned ScatterVectorizeCnt = 0;
5995 const unsigned Sz = DL->getTypeSizeInBits(E->getMainOp()->getType());
5996 for (unsigned MinVF = getMinVF(2 * Sz); VF >= MinVF; VF /= 2) {
5997 for (unsigned Cnt = StartIdx, End = VL.size(); Cnt + VF <= End;
5998 Cnt += VF) {
5999 ArrayRef<Value *> Slice = VL.slice(Cnt, VF);
6000 if (!VectorizedLoads.count(Slice.front()) &&
6001 !VectorizedLoads.count(Slice.back()) && allSameBlock(Slice)) {
6002 SmallVector<Value *> PointerOps;
6003 OrdersType CurrentOrder;
6004 LoadsState LS =
6005 canVectorizeLoads(Slice, Slice.front(), *TTI, *DL, *SE, *LI,
6006 CurrentOrder, PointerOps);
6007 switch (LS) {
6008 case LoadsState::Vectorize:
6009 case LoadsState::ScatterVectorize:
6010 // Mark the vectorized loads so that we don't vectorize them
6011 // again.
6012 if (LS == LoadsState::Vectorize)
6013 ++VectorizedCnt;
6014 else
6015 ++ScatterVectorizeCnt;
6016 VectorizedLoads.insert(Slice.begin(), Slice.end());
6017 // If we vectorized initial block, no need to try to vectorize it
6018 // again.
6019 if (Cnt == StartIdx)
6020 StartIdx += VF;
6021 break;
6022 case LoadsState::Gather:
6023 break;
6024 }
6025 }
6026 }
6027 // Check if the whole array was vectorized already - exit.
6028 if (StartIdx >= VL.size())
6029 break;
6030 // Found vectorizable parts - exit.
6031 if (!VectorizedLoads.empty())
6032 break;
6033 }
6034 if (!VectorizedLoads.empty()) {
6035 InstructionCost GatherCost = 0;
6036 unsigned NumParts = TTI->getNumberOfParts(VecTy);
6037 bool NeedInsertSubvectorAnalysis =
6038 !NumParts || (VL.size() / VF) > NumParts;
6039 // Get the cost for gathered loads.
6040 for (unsigned I = 0, End = VL.size(); I < End; I += VF) {
6041 if (VectorizedLoads.contains(VL[I]))
6042 continue;
6043 GatherCost += getGatherCost(VL.slice(I, VF));
6044 }
6045 // The cost for vectorized loads.
6046 InstructionCost ScalarsCost = 0;
6047 for (Value *V : VectorizedLoads) {
6048 auto *LI = cast<LoadInst>(V);
6049 ScalarsCost += TTI->getMemoryOpCost(
6050 Instruction::Load, LI->getType(), LI->getAlign(),
6051 LI->getPointerAddressSpace(), CostKind, LI);
6052 }
6053 auto *LI = cast<LoadInst>(E->getMainOp());
6054 auto *LoadTy = FixedVectorType::get(LI->getType(), VF);
6055 Align Alignment = LI->getAlign();
6056 GatherCost +=
6057 VectorizedCnt *
6058 TTI->getMemoryOpCost(Instruction::Load, LoadTy, Alignment,
6059 LI->getPointerAddressSpace(), CostKind, LI);
6060 GatherCost += ScatterVectorizeCnt *
6061 TTI->getGatherScatterOpCost(
6062 Instruction::Load, LoadTy, LI->getPointerOperand(),
6063 /*VariableMask=*/false, Alignment, CostKind, LI);
6064 if (NeedInsertSubvectorAnalysis) {
6065 // Add the cost for the subvectors insert.
6066 for (int I = VF, E = VL.size(); I < E; I += VF)
6067 GatherCost += TTI->getShuffleCost(TTI::SK_InsertSubvector, VecTy,
6068 None, I, LoadTy);
6069 }
6070 return ReuseShuffleCost + GatherCost - ScalarsCost;
6071 }
6072 }
6073 return ReuseShuffleCost + getGatherCost(VL);
6074 }
6075 InstructionCost CommonCost = 0;
6076 SmallVector<int> Mask;
6077 if (!E->ReorderIndices.empty()) {
6078 SmallVector<int> NewMask;
6079 if (E->getOpcode() == Instruction::Store) {
6080 // For stores the order is actually a mask.
6081 NewMask.resize(E->ReorderIndices.size());
6082 copy(E->ReorderIndices, NewMask.begin());
6083 } else {
6084 inversePermutation(E->ReorderIndices, NewMask);
6085 }
6086 ::addMask(Mask, NewMask);
6087 }
6088 if (NeedToShuffleReuses)
6089 ::addMask(Mask, E->ReuseShuffleIndices);
6090 if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask))
6091 CommonCost =
6092 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask);
6093 assert((E->State == TreeEntry::Vectorize ||
6094 E->State == TreeEntry::ScatterVectorize) &&
6095 "Unhandled state");
6096 assert(E->getOpcode() &&
6097 ((allSameType(VL) && allSameBlock(VL)) ||
6098 (E->getOpcode() == Instruction::GetElementPtr &&
6099 E->getMainOp()->getType()->isPointerTy())) &&
6100 "Invalid VL");
6101 Instruction *VL0 = E->getMainOp();
6102 unsigned ShuffleOrOp =
6103 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
6104 switch (ShuffleOrOp) {
6105 case Instruction::PHI:
6106 return 0;
6107
6108 case Instruction::ExtractValue:
6109 case Instruction::ExtractElement: {
6110 // The common cost of removal ExtractElement/ExtractValue instructions +
6111 // the cost of shuffles, if required to resuffle the original vector.
6112 if (NeedToShuffleReuses) {
6113 unsigned Idx = 0;
6114 for (unsigned I : E->ReuseShuffleIndices) {
6115 if (ShuffleOrOp == Instruction::ExtractElement) {
6116 auto *EE = cast<ExtractElementInst>(VL[I]);
6117 CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement,
6118 EE->getVectorOperandType(),
6119 *getExtractIndex(EE));
6120 } else {
6121 CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement,
6122 VecTy, Idx);
6123 ++Idx;
6124 }
6125 }
6126 Idx = EntryVF;
6127 for (Value *V : VL) {
6128 if (ShuffleOrOp == Instruction::ExtractElement) {
6129 auto *EE = cast<ExtractElementInst>(V);
6130 CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement,
6131 EE->getVectorOperandType(),
6132 *getExtractIndex(EE));
6133 } else {
6134 --Idx;
6135 CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement,
6136 VecTy, Idx);
6137 }
6138 }
6139 }
6140 if (ShuffleOrOp == Instruction::ExtractValue) {
6141 for (unsigned I = 0, E = VL.size(); I < E; ++I) {
6142 auto *EI = cast<Instruction>(VL[I]);
6143 // Take credit for instruction that will become dead.
6144 if (EI->hasOneUse()) {
6145 Instruction *Ext = EI->user_back();
6146 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
6147 all_of(Ext->users(),
6148 [](User *U) { return isa<GetElementPtrInst>(U); })) {
6149 // Use getExtractWithExtendCost() to calculate the cost of
6150 // extractelement/ext pair.
6151 CommonCost -= TTI->getExtractWithExtendCost(
6152 Ext->getOpcode(), Ext->getType(), VecTy, I);
6153 // Add back the cost of s|zext which is subtracted separately.
6154 CommonCost += TTI->getCastInstrCost(
6155 Ext->getOpcode(), Ext->getType(), EI->getType(),
6156 TTI::getCastContextHint(Ext), CostKind, Ext);
6157 continue;
6158 }
6159 }
6160 CommonCost -=
6161 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, I);
6162 }
6163 } else {
6164 AdjustExtractsCost(CommonCost);
6165 }
6166 return CommonCost;
6167 }
6168 case Instruction::InsertElement: {
6169 assert(E->ReuseShuffleIndices.empty() &&
6170 "Unique insertelements only are expected.");
6171 auto *SrcVecTy = cast<FixedVectorType>(VL0->getType());
6172 unsigned const NumElts = SrcVecTy->getNumElements();
6173 unsigned const NumScalars = VL.size();
6174
6175 unsigned NumOfParts = TTI->getNumberOfParts(SrcVecTy);
6176
6177 unsigned OffsetBeg = *getInsertIndex(VL.front());
6178 unsigned OffsetEnd = OffsetBeg;
6179 for (Value *V : VL.drop_front()) {
6180 unsigned Idx = *getInsertIndex(V);
6181 if (OffsetBeg > Idx)
6182 OffsetBeg = Idx;
6183 else if (OffsetEnd < Idx)
6184 OffsetEnd = Idx;
6185 }
6186 unsigned VecScalarsSz = PowerOf2Ceil(NumElts);
6187 if (NumOfParts > 0)
6188 VecScalarsSz = PowerOf2Ceil((NumElts + NumOfParts - 1) / NumOfParts);
6189 unsigned VecSz =
6190 (1 + OffsetEnd / VecScalarsSz - OffsetBeg / VecScalarsSz) *
6191 VecScalarsSz;
6192 unsigned Offset = VecScalarsSz * (OffsetBeg / VecScalarsSz);
6193 unsigned InsertVecSz = std::min<unsigned>(
6194 PowerOf2Ceil(OffsetEnd - OffsetBeg + 1),
6195 ((OffsetEnd - OffsetBeg + VecScalarsSz) / VecScalarsSz) *
6196 VecScalarsSz);
6197 bool IsWholeSubvector =
6198 OffsetBeg == Offset && ((OffsetEnd + 1) % VecScalarsSz == 0);
6199 // Check if we can safely insert a subvector. If it is not possible, just
6200 // generate a whole-sized vector and shuffle the source vector and the new
6201 // subvector.
6202 if (OffsetBeg + InsertVecSz > VecSz) {
6203 // Align OffsetBeg to generate correct mask.
6204 OffsetBeg = alignDown(OffsetBeg, VecSz, Offset);
6205 InsertVecSz = VecSz;
6206 }
6207
6208 APInt DemandedElts = APInt::getZero(NumElts);
6209 // TODO: Add support for Instruction::InsertValue.
6210 SmallVector<int> Mask;
6211 if (!E->ReorderIndices.empty()) {
6212 inversePermutation(E->ReorderIndices, Mask);
6213 Mask.append(InsertVecSz - Mask.size(), UndefMaskElem);
6214 } else {
6215 Mask.assign(VecSz, UndefMaskElem);
6216 std::iota(Mask.begin(), std::next(Mask.begin(), InsertVecSz), 0);
6217 }
6218 bool IsIdentity = true;
6219 SmallVector<int> PrevMask(InsertVecSz, UndefMaskElem);
6220 Mask.swap(PrevMask);
6221 for (unsigned I = 0; I < NumScalars; ++I) {
6222 unsigned InsertIdx = *getInsertIndex(VL[PrevMask[I]]);
6223 DemandedElts.setBit(InsertIdx);
6224 IsIdentity &= InsertIdx - OffsetBeg == I;
6225 Mask[InsertIdx - OffsetBeg] = I;
6226 }
6227 assert(Offset < NumElts && "Failed to find vector index offset");
6228
6229 InstructionCost Cost = 0;
6230 Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts,
6231 /*Insert*/ true, /*Extract*/ false);
6232
6233 // First cost - resize to actual vector size if not identity shuffle or
6234 // need to shift the vector.
6235 // Do not calculate the cost if the actual size is the register size and
6236 // we can merge this shuffle with the following SK_Select.
6237 auto *InsertVecTy =
6238 FixedVectorType::get(SrcVecTy->getElementType(), InsertVecSz);
6239 if (!IsIdentity)
6240 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc,
6241 InsertVecTy, Mask);
6242 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) {
6243 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0));
6244 }));
6245 // Second cost - permutation with subvector, if some elements are from the
6246 // initial vector or inserting a subvector.
6247 // TODO: Implement the analysis of the FirstInsert->getOperand(0)
6248 // subvector of ActualVecTy.
6249 if (!isUndefVector(FirstInsert->getOperand(0)) && NumScalars != NumElts &&
6250 !IsWholeSubvector) {
6251 if (InsertVecSz != VecSz) {
6252 auto *ActualVecTy =
6253 FixedVectorType::get(SrcVecTy->getElementType(), VecSz);
6254 Cost += TTI->getShuffleCost(TTI::SK_InsertSubvector, ActualVecTy,
6255 None, OffsetBeg - Offset, InsertVecTy);
6256 } else {
6257 for (unsigned I = 0, End = OffsetBeg - Offset; I < End; ++I)
6258 Mask[I] = I;
6259 for (unsigned I = OffsetBeg - Offset, End = OffsetEnd - Offset;
6260 I <= End; ++I)
6261 if (Mask[I] != UndefMaskElem)
6262 Mask[I] = I + VecSz;
6263 for (unsigned I = OffsetEnd + 1 - Offset; I < VecSz; ++I)
6264 Mask[I] = I;
6265 Cost += TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, InsertVecTy, Mask);
6266 }
6267 }
6268 return Cost;
6269 }
6270 case Instruction::ZExt:
6271 case Instruction::SExt:
6272 case Instruction::FPToUI:
6273 case Instruction::FPToSI:
6274 case Instruction::FPExt:
6275 case Instruction::PtrToInt:
6276 case Instruction::IntToPtr:
6277 case Instruction::SIToFP:
6278 case Instruction::UIToFP:
6279 case Instruction::Trunc:
6280 case Instruction::FPTrunc:
6281 case Instruction::BitCast: {
6282 Type *SrcTy = VL0->getOperand(0)->getType();
6283 InstructionCost ScalarEltCost =
6284 TTI->getCastInstrCost(E->getOpcode(), ScalarTy, SrcTy,
6285 TTI::getCastContextHint(VL0), CostKind, VL0);
6286 if (NeedToShuffleReuses) {
6287 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
6288 }
6289
6290 // Calculate the cost of this instruction.
6291 InstructionCost ScalarCost = VL.size() * ScalarEltCost;
6292
6293 auto *SrcVecTy = FixedVectorType::get(SrcTy, VL.size());
6294 InstructionCost VecCost = 0;
6295 // Check if the values are candidates to demote.
6296 if (!MinBWs.count(VL0) || VecTy != SrcVecTy) {
6297 VecCost = CommonCost + TTI->getCastInstrCost(
6298 E->getOpcode(), VecTy, SrcVecTy,
6299 TTI::getCastContextHint(VL0), CostKind, VL0);
6300 }
6301 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
6302 return VecCost - ScalarCost;
6303 }
6304 case Instruction::FCmp:
6305 case Instruction::ICmp:
6306 case Instruction::Select: {
6307 // Calculate the cost of this instruction.
6308 InstructionCost ScalarEltCost =
6309 TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, Builder.getInt1Ty(),
6310 CmpInst::BAD_ICMP_PREDICATE, CostKind, VL0);
6311 if (NeedToShuffleReuses) {
6312 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
6313 }
6314 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size());
6315 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost;
6316
6317 // Check if all entries in VL are either compares or selects with compares
6318 // as condition that have the same predicates.
6319 CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE;
6320 bool First = true;
6321 for (auto *V : VL) {
6322 CmpInst::Predicate CurrentPred;
6323 auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value());
6324 if ((!match(V, m_Select(MatchCmp, m_Value(), m_Value())) &&
6325 !match(V, MatchCmp)) ||
6326 (!First && VecPred != CurrentPred)) {
6327 VecPred = CmpInst::BAD_ICMP_PREDICATE;
6328 break;
6329 }
6330 First = false;
6331 VecPred = CurrentPred;
6332 }
6333
6334 InstructionCost VecCost = TTI->getCmpSelInstrCost(
6335 E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0);
6336 // Check if it is possible and profitable to use min/max for selects in
6337 // VL.
6338 //
6339 auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL);
6340 if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) {
6341 IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy,
6342 {VecTy, VecTy});
6343 InstructionCost IntrinsicCost =
6344 TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
6345 // If the selects are the only uses of the compares, they will be dead
6346 // and we can adjust the cost by removing their cost.
6347 if (IntrinsicAndUse.second)
6348 IntrinsicCost -= TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy,
6349 MaskTy, VecPred, CostKind);
6350 VecCost = std::min(VecCost, IntrinsicCost);
6351 }
6352 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
6353 return CommonCost + VecCost - ScalarCost;
6354 }
6355 case Instruction::FNeg:
6356 case Instruction::Add:
6357 case Instruction::FAdd:
6358 case Instruction::Sub:
6359 case Instruction::FSub:
6360 case Instruction::Mul:
6361 case Instruction::FMul:
6362 case Instruction::UDiv:
6363 case Instruction::SDiv:
6364 case Instruction::FDiv:
6365 case Instruction::URem:
6366 case Instruction::SRem:
6367 case Instruction::FRem:
6368 case Instruction::Shl:
6369 case Instruction::LShr:
6370 case Instruction::AShr:
6371 case Instruction::And:
6372 case Instruction::Or:
6373 case Instruction::Xor: {
6374 // Certain instructions can be cheaper to vectorize if they have a
6375 // constant second vector operand.
6376 TargetTransformInfo::OperandValueKind Op1VK =
6377 TargetTransformInfo::OK_AnyValue;
6378 TargetTransformInfo::OperandValueKind Op2VK =
6379 TargetTransformInfo::OK_UniformConstantValue;
6380 TargetTransformInfo::OperandValueProperties Op1VP =
6381 TargetTransformInfo::OP_None;
6382 TargetTransformInfo::OperandValueProperties Op2VP =
6383 TargetTransformInfo::OP_PowerOf2;
6384
6385 // If all operands are exactly the same ConstantInt then set the
6386 // operand kind to OK_UniformConstantValue.
6387 // If instead not all operands are constants, then set the operand kind
6388 // to OK_AnyValue. If all operands are constants but not the same,
6389 // then set the operand kind to OK_NonUniformConstantValue.
6390 ConstantInt *CInt0 = nullptr;
6391 for (unsigned i = 0, e = VL.size(); i < e; ++i) {
6392 const Instruction *I = cast<Instruction>(VL[i]);
6393 unsigned OpIdx = isa<BinaryOperator>(I) ? 1 : 0;
6394 ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(OpIdx));
6395 if (!CInt) {
6396 Op2VK = TargetTransformInfo::OK_AnyValue;
6397 Op2VP = TargetTransformInfo::OP_None;
6398 break;
6399 }
6400 if (Op2VP == TargetTransformInfo::OP_PowerOf2 &&
6401 !CInt->getValue().isPowerOf2())
6402 Op2VP = TargetTransformInfo::OP_None;
6403 if (i == 0) {
6404 CInt0 = CInt;
6405 continue;
6406 }
6407 if (CInt0 != CInt)
6408 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
6409 }
6410
6411 SmallVector<const Value *, 4> Operands(VL0->operand_values());
6412 InstructionCost ScalarEltCost =
6413 TTI->getArithmeticInstrCost(E->getOpcode(), ScalarTy, CostKind, Op1VK,
6414 Op2VK, Op1VP, Op2VP, Operands, VL0);
6415 if (NeedToShuffleReuses) {
6416 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
6417 }
6418 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost;
6419 InstructionCost VecCost =
6420 TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind, Op1VK,
6421 Op2VK, Op1VP, Op2VP, Operands, VL0);
6422 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
6423 return CommonCost + VecCost - ScalarCost;
6424 }
6425 case Instruction::GetElementPtr: {
6426 TargetTransformInfo::OperandValueKind Op1VK =
6427 TargetTransformInfo::OK_AnyValue;
6428 TargetTransformInfo::OperandValueKind Op2VK =
6429 any_of(VL,
6430 [](Value *V) {
6431 return isa<GetElementPtrInst>(V) &&
6432 !isConstant(
6433 cast<GetElementPtrInst>(V)->getOperand(1));
6434 })
6435 ? TargetTransformInfo::OK_AnyValue
6436 : TargetTransformInfo::OK_UniformConstantValue;
6437
6438 InstructionCost ScalarEltCost = TTI->getArithmeticInstrCost(
6439 Instruction::Add, ScalarTy, CostKind, Op1VK, Op2VK);
6440 if (NeedToShuffleReuses) {
6441 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
6442 }
6443 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost;
6444 InstructionCost VecCost = TTI->getArithmeticInstrCost(
6445 Instruction::Add, VecTy, CostKind, Op1VK, Op2VK);
6446 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
6447 return CommonCost + VecCost - ScalarCost;
6448 }
6449 case Instruction::Load: {
6450 // Cost of wide load - cost of scalar loads.
6451 Align Alignment = cast<LoadInst>(VL0)->getAlign();
6452 InstructionCost ScalarEltCost = TTI->getMemoryOpCost(
6453 Instruction::Load, ScalarTy, Alignment, 0, CostKind, VL0);
6454 if (NeedToShuffleReuses) {
6455 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
6456 }
6457 InstructionCost ScalarLdCost = VecTy->getNumElements() * ScalarEltCost;
6458 InstructionCost VecLdCost;
6459 if (E->State == TreeEntry::Vectorize) {
6460 VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, Alignment, 0,
6461 CostKind, VL0);
6462 } else {
6463 assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState");
6464 Align CommonAlignment = Alignment;
6465 for (Value *V : VL)
6466 CommonAlignment =
6467 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign());
6468 VecLdCost = TTI->getGatherScatterOpCost(
6469 Instruction::Load, VecTy, cast<LoadInst>(VL0)->getPointerOperand(),
6470 /*VariableMask=*/false, CommonAlignment, CostKind, VL0);
6471 }
6472 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecLdCost, ScalarLdCost));
6473 return CommonCost + VecLdCost - ScalarLdCost;
6474 }
6475 case Instruction::Store: {
6476 // We know that we can merge the stores. Calculate the cost.
6477 bool IsReorder = !E->ReorderIndices.empty();
6478 auto *SI =
6479 cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0);
6480 Align Alignment = SI->getAlign();
6481 InstructionCost ScalarEltCost = TTI->getMemoryOpCost(
6482 Instruction::Store, ScalarTy, Alignment, 0, CostKind, VL0);
6483 InstructionCost ScalarStCost = VecTy->getNumElements() * ScalarEltCost;
6484 InstructionCost VecStCost = TTI->getMemoryOpCost(
6485 Instruction::Store, VecTy, Alignment, 0, CostKind, VL0);
6486 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecStCost, ScalarStCost));
6487 return CommonCost + VecStCost - ScalarStCost;
6488 }
6489 case Instruction::Call: {
6490 CallInst *CI = cast<CallInst>(VL0);
6491 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
6492
6493 // Calculate the cost of the scalar and vector calls.
6494 IntrinsicCostAttributes CostAttrs(ID, *CI, 1);
6495 InstructionCost ScalarEltCost =
6496 TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
6497 if (NeedToShuffleReuses) {
6498 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
6499 }
6500 InstructionCost ScalarCallCost = VecTy->getNumElements() * ScalarEltCost;
6501
6502 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI);
6503 InstructionCost VecCallCost =
6504 std::min(VecCallCosts.first, VecCallCosts.second);
6505
6506 LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost
6507 << " (" << VecCallCost << "-" << ScalarCallCost << ")"
6508 << " for " << *CI << "\n");
6509
6510 return CommonCost + VecCallCost - ScalarCallCost;
6511 }
6512 case Instruction::ShuffleVector: {
6513 assert(E->isAltShuffle() &&
6514 ((Instruction::isBinaryOp(E->getOpcode()) &&
6515 Instruction::isBinaryOp(E->getAltOpcode())) ||
6516 (Instruction::isCast(E->getOpcode()) &&
6517 Instruction::isCast(E->getAltOpcode())) ||
6518 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) &&
6519 "Invalid Shuffle Vector Operand");
6520 InstructionCost ScalarCost = 0;
6521 if (NeedToShuffleReuses) {
6522 for (unsigned Idx : E->ReuseShuffleIndices) {
6523 Instruction *I = cast<Instruction>(VL[Idx]);
6524 CommonCost -= TTI->getInstructionCost(I, CostKind);
6525 }
6526 for (Value *V : VL) {
6527 Instruction *I = cast<Instruction>(V);
6528 CommonCost += TTI->getInstructionCost(I, CostKind);
6529 }
6530 }
6531 for (Value *V : VL) {
6532 Instruction *I = cast<Instruction>(V);
6533 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode");
6534 ScalarCost += TTI->getInstructionCost(I, CostKind);
6535 }
6536 // VecCost is equal to sum of the cost of creating 2 vectors
6537 // and the cost of creating shuffle.
6538 InstructionCost VecCost = 0;
6539 // Try to find the previous shuffle node with the same operands and same
6540 // main/alternate ops.
6541 auto &&TryFindNodeWithEqualOperands = [this, E]() {
6542 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
6543 if (TE.get() == E)
6544 break;
6545 if (TE->isAltShuffle() &&
6546 ((TE->getOpcode() == E->getOpcode() &&
6547 TE->getAltOpcode() == E->getAltOpcode()) ||
6548 (TE->getOpcode() == E->getAltOpcode() &&
6549 TE->getAltOpcode() == E->getOpcode())) &&
6550 TE->hasEqualOperands(*E))
6551 return true;
6552 }
6553 return false;
6554 };
6555 if (TryFindNodeWithEqualOperands()) {
6556 LLVM_DEBUG({
6557 dbgs() << "SLP: diamond match for alternate node found.\n";
6558 E->dump();
6559 });
6560 // No need to add new vector costs here since we're going to reuse
6561 // same main/alternate vector ops, just do different shuffling.
6562 } else if (Instruction::isBinaryOp(E->getOpcode())) {
6563 VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind);
6564 VecCost += TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy,
6565 CostKind);
6566 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) {
6567 VecCost = TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy,
6568 Builder.getInt1Ty(),
6569 CI0->getPredicate(), CostKind, VL0);
6570 VecCost += TTI->getCmpSelInstrCost(
6571 E->getOpcode(), ScalarTy, Builder.getInt1Ty(),
6572 cast<CmpInst>(E->getAltOp())->getPredicate(), CostKind,
6573 E->getAltOp());
6574 } else {
6575 Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType();
6576 Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType();
6577 auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size());
6578 auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size());
6579 VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty,
6580 TTI::CastContextHint::None, CostKind);
6581 VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty,
6582 TTI::CastContextHint::None, CostKind);
6583 }
6584
6585 if (E->ReuseShuffleIndices.empty()) {
6586 CommonCost =
6587 TTI->getShuffleCost(TargetTransformInfo::SK_Select, FinalVecTy);
6588 } else {
6589 SmallVector<int> Mask;
6590 buildShuffleEntryMask(
6591 E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices,
6592 [E](Instruction *I) {
6593 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode");
6594 return I->getOpcode() == E->getAltOpcode();
6595 },
6596 Mask);
6597 CommonCost = TTI->getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc,
6598 FinalVecTy, Mask);
6599 }
6600 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost));
6601 return CommonCost + VecCost - ScalarCost;
6602 }
6603 default:
6604 llvm_unreachable("Unknown instruction");
6605 }
6606 }
6607
isFullyVectorizableTinyTree(bool ForReduction) const6608 bool BoUpSLP::isFullyVectorizableTinyTree(bool ForReduction) const {
6609 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height "
6610 << VectorizableTree.size() << " is fully vectorizable .\n");
6611
6612 auto &&AreVectorizableGathers = [this](const TreeEntry *TE, unsigned Limit) {
6613 SmallVector<int> Mask;
6614 return TE->State == TreeEntry::NeedToGather &&
6615 !any_of(TE->Scalars,
6616 [this](Value *V) { return EphValues.contains(V); }) &&
6617 (allConstant(TE->Scalars) || isSplat(TE->Scalars) ||
6618 TE->Scalars.size() < Limit ||
6619 ((TE->getOpcode() == Instruction::ExtractElement ||
6620 all_of(TE->Scalars,
6621 [](Value *V) {
6622 return isa<ExtractElementInst, UndefValue>(V);
6623 })) &&
6624 isFixedVectorShuffle(TE->Scalars, Mask)) ||
6625 (TE->State == TreeEntry::NeedToGather &&
6626 TE->getOpcode() == Instruction::Load && !TE->isAltShuffle()));
6627 };
6628
6629 // We only handle trees of heights 1 and 2.
6630 if (VectorizableTree.size() == 1 &&
6631 (VectorizableTree[0]->State == TreeEntry::Vectorize ||
6632 (ForReduction &&
6633 AreVectorizableGathers(VectorizableTree[0].get(),
6634 VectorizableTree[0]->Scalars.size()) &&
6635 VectorizableTree[0]->getVectorFactor() > 2)))
6636 return true;
6637
6638 if (VectorizableTree.size() != 2)
6639 return false;
6640
6641 // Handle splat and all-constants stores. Also try to vectorize tiny trees
6642 // with the second gather nodes if they have less scalar operands rather than
6643 // the initial tree element (may be profitable to shuffle the second gather)
6644 // or they are extractelements, which form shuffle.
6645 SmallVector<int> Mask;
6646 if (VectorizableTree[0]->State == TreeEntry::Vectorize &&
6647 AreVectorizableGathers(VectorizableTree[1].get(),
6648 VectorizableTree[0]->Scalars.size()))
6649 return true;
6650
6651 // Gathering cost would be too much for tiny trees.
6652 if (VectorizableTree[0]->State == TreeEntry::NeedToGather ||
6653 (VectorizableTree[1]->State == TreeEntry::NeedToGather &&
6654 VectorizableTree[0]->State != TreeEntry::ScatterVectorize))
6655 return false;
6656
6657 return true;
6658 }
6659
isLoadCombineCandidateImpl(Value * Root,unsigned NumElts,TargetTransformInfo * TTI,bool MustMatchOrInst)6660 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts,
6661 TargetTransformInfo *TTI,
6662 bool MustMatchOrInst) {
6663 // Look past the root to find a source value. Arbitrarily follow the
6664 // path through operand 0 of any 'or'. Also, peek through optional
6665 // shift-left-by-multiple-of-8-bits.
6666 Value *ZextLoad = Root;
6667 const APInt *ShAmtC;
6668 bool FoundOr = false;
6669 while (!isa<ConstantExpr>(ZextLoad) &&
6670 (match(ZextLoad, m_Or(m_Value(), m_Value())) ||
6671 (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) &&
6672 ShAmtC->urem(8) == 0))) {
6673 auto *BinOp = cast<BinaryOperator>(ZextLoad);
6674 ZextLoad = BinOp->getOperand(0);
6675 if (BinOp->getOpcode() == Instruction::Or)
6676 FoundOr = true;
6677 }
6678 // Check if the input is an extended load of the required or/shift expression.
6679 Value *Load;
6680 if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root ||
6681 !match(ZextLoad, m_ZExt(m_Value(Load))) || !isa<LoadInst>(Load))
6682 return false;
6683
6684 // Require that the total load bit width is a legal integer type.
6685 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target.
6686 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it.
6687 Type *SrcTy = Load->getType();
6688 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts;
6689 if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth)))
6690 return false;
6691
6692 // Everything matched - assume that we can fold the whole sequence using
6693 // load combining.
6694 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at "
6695 << *(cast<Instruction>(Root)) << "\n");
6696
6697 return true;
6698 }
6699
isLoadCombineReductionCandidate(RecurKind RdxKind) const6700 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const {
6701 if (RdxKind != RecurKind::Or)
6702 return false;
6703
6704 unsigned NumElts = VectorizableTree[0]->Scalars.size();
6705 Value *FirstReduced = VectorizableTree[0]->Scalars[0];
6706 return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI,
6707 /* MatchOr */ false);
6708 }
6709
isLoadCombineCandidate() const6710 bool BoUpSLP::isLoadCombineCandidate() const {
6711 // Peek through a final sequence of stores and check if all operations are
6712 // likely to be load-combined.
6713 unsigned NumElts = VectorizableTree[0]->Scalars.size();
6714 for (Value *Scalar : VectorizableTree[0]->Scalars) {
6715 Value *X;
6716 if (!match(Scalar, m_Store(m_Value(X), m_Value())) ||
6717 !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true))
6718 return false;
6719 }
6720 return true;
6721 }
6722
isTreeTinyAndNotFullyVectorizable(bool ForReduction) const6723 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const {
6724 // No need to vectorize inserts of gathered values.
6725 if (VectorizableTree.size() == 2 &&
6726 isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) &&
6727 VectorizableTree[1]->State == TreeEntry::NeedToGather &&
6728 (VectorizableTree[1]->getVectorFactor() <= 2 ||
6729 !(isSplat(VectorizableTree[1]->Scalars) ||
6730 allConstant(VectorizableTree[1]->Scalars))))
6731 return true;
6732
6733 // We can vectorize the tree if its size is greater than or equal to the
6734 // minimum size specified by the MinTreeSize command line option.
6735 if (VectorizableTree.size() >= MinTreeSize)
6736 return false;
6737
6738 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we
6739 // can vectorize it if we can prove it fully vectorizable.
6740 if (isFullyVectorizableTinyTree(ForReduction))
6741 return false;
6742
6743 assert(VectorizableTree.empty()
6744 ? ExternalUses.empty()
6745 : true && "We shouldn't have any external users");
6746
6747 // Otherwise, we can't vectorize the tree. It is both tiny and not fully
6748 // vectorizable.
6749 return true;
6750 }
6751
getSpillCost() const6752 InstructionCost BoUpSLP::getSpillCost() const {
6753 // Walk from the bottom of the tree to the top, tracking which values are
6754 // live. When we see a call instruction that is not part of our tree,
6755 // query TTI to see if there is a cost to keeping values live over it
6756 // (for example, if spills and fills are required).
6757 unsigned BundleWidth = VectorizableTree.front()->Scalars.size();
6758 InstructionCost Cost = 0;
6759
6760 SmallPtrSet<Instruction*, 4> LiveValues;
6761 Instruction *PrevInst = nullptr;
6762
6763 // The entries in VectorizableTree are not necessarily ordered by their
6764 // position in basic blocks. Collect them and order them by dominance so later
6765 // instructions are guaranteed to be visited first. For instructions in
6766 // different basic blocks, we only scan to the beginning of the block, so
6767 // their order does not matter, as long as all instructions in a basic block
6768 // are grouped together. Using dominance ensures a deterministic order.
6769 SmallVector<Instruction *, 16> OrderedScalars;
6770 for (const auto &TEPtr : VectorizableTree) {
6771 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]);
6772 if (!Inst)
6773 continue;
6774 OrderedScalars.push_back(Inst);
6775 }
6776 llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) {
6777 auto *NodeA = DT->getNode(A->getParent());
6778 auto *NodeB = DT->getNode(B->getParent());
6779 assert(NodeA && "Should only process reachable instructions");
6780 assert(NodeB && "Should only process reachable instructions");
6781 assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&
6782 "Different nodes should have different DFS numbers");
6783 if (NodeA != NodeB)
6784 return NodeA->getDFSNumIn() < NodeB->getDFSNumIn();
6785 return B->comesBefore(A);
6786 });
6787
6788 for (Instruction *Inst : OrderedScalars) {
6789 if (!PrevInst) {
6790 PrevInst = Inst;
6791 continue;
6792 }
6793
6794 // Update LiveValues.
6795 LiveValues.erase(PrevInst);
6796 for (auto &J : PrevInst->operands()) {
6797 if (isa<Instruction>(&*J) && getTreeEntry(&*J))
6798 LiveValues.insert(cast<Instruction>(&*J));
6799 }
6800
6801 LLVM_DEBUG({
6802 dbgs() << "SLP: #LV: " << LiveValues.size();
6803 for (auto *X : LiveValues)
6804 dbgs() << " " << X->getName();
6805 dbgs() << ", Looking at ";
6806 Inst->dump();
6807 });
6808
6809 // Now find the sequence of instructions between PrevInst and Inst.
6810 unsigned NumCalls = 0;
6811 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(),
6812 PrevInstIt =
6813 PrevInst->getIterator().getReverse();
6814 while (InstIt != PrevInstIt) {
6815 if (PrevInstIt == PrevInst->getParent()->rend()) {
6816 PrevInstIt = Inst->getParent()->rbegin();
6817 continue;
6818 }
6819
6820 // Debug information does not impact spill cost.
6821 if ((isa<CallInst>(&*PrevInstIt) &&
6822 !isa<DbgInfoIntrinsic>(&*PrevInstIt)) &&
6823 &*PrevInstIt != PrevInst)
6824 NumCalls++;
6825
6826 ++PrevInstIt;
6827 }
6828
6829 if (NumCalls) {
6830 SmallVector<Type*, 4> V;
6831 for (auto *II : LiveValues) {
6832 auto *ScalarTy = II->getType();
6833 if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy))
6834 ScalarTy = VectorTy->getElementType();
6835 V.push_back(FixedVectorType::get(ScalarTy, BundleWidth));
6836 }
6837 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V);
6838 }
6839
6840 PrevInst = Inst;
6841 }
6842
6843 return Cost;
6844 }
6845
6846 /// Check if two insertelement instructions are from the same buildvector.
areTwoInsertFromSameBuildVector(InsertElementInst * VU,InsertElementInst * V)6847 static bool areTwoInsertFromSameBuildVector(InsertElementInst *VU,
6848 InsertElementInst *V) {
6849 // Instructions must be from the same basic blocks.
6850 if (VU->getParent() != V->getParent())
6851 return false;
6852 // Checks if 2 insertelements are from the same buildvector.
6853 if (VU->getType() != V->getType())
6854 return false;
6855 // Multiple used inserts are separate nodes.
6856 if (!VU->hasOneUse() && !V->hasOneUse())
6857 return false;
6858 auto *IE1 = VU;
6859 auto *IE2 = V;
6860 unsigned Idx1 = *getInsertIndex(IE1);
6861 unsigned Idx2 = *getInsertIndex(IE2);
6862 // Go through the vector operand of insertelement instructions trying to find
6863 // either VU as the original vector for IE2 or V as the original vector for
6864 // IE1.
6865 do {
6866 if (IE2 == VU)
6867 return VU->hasOneUse();
6868 if (IE1 == V)
6869 return V->hasOneUse();
6870 if (IE1) {
6871 if ((IE1 != VU && !IE1->hasOneUse()) ||
6872 getInsertIndex(IE1).value_or(Idx2) == Idx2)
6873 IE1 = nullptr;
6874 else
6875 IE1 = dyn_cast<InsertElementInst>(IE1->getOperand(0));
6876 }
6877 if (IE2) {
6878 if ((IE2 != V && !IE2->hasOneUse()) ||
6879 getInsertIndex(IE2).value_or(Idx1) == Idx1)
6880 IE2 = nullptr;
6881 else
6882 IE2 = dyn_cast<InsertElementInst>(IE2->getOperand(0));
6883 }
6884 } while (IE1 || IE2);
6885 return false;
6886 }
6887
6888 /// Checks if the \p IE1 instructions is followed by \p IE2 instruction in the
6889 /// buildvector sequence.
isFirstInsertElement(const InsertElementInst * IE1,const InsertElementInst * IE2)6890 static bool isFirstInsertElement(const InsertElementInst *IE1,
6891 const InsertElementInst *IE2) {
6892 if (IE1 == IE2)
6893 return false;
6894 const auto *I1 = IE1;
6895 const auto *I2 = IE2;
6896 const InsertElementInst *PrevI1;
6897 const InsertElementInst *PrevI2;
6898 unsigned Idx1 = *getInsertIndex(IE1);
6899 unsigned Idx2 = *getInsertIndex(IE2);
6900 do {
6901 if (I2 == IE1)
6902 return true;
6903 if (I1 == IE2)
6904 return false;
6905 PrevI1 = I1;
6906 PrevI2 = I2;
6907 if (I1 && (I1 == IE1 || I1->hasOneUse()) &&
6908 getInsertIndex(I1).value_or(Idx2) != Idx2)
6909 I1 = dyn_cast<InsertElementInst>(I1->getOperand(0));
6910 if (I2 && ((I2 == IE2 || I2->hasOneUse())) &&
6911 getInsertIndex(I2).value_or(Idx1) != Idx1)
6912 I2 = dyn_cast<InsertElementInst>(I2->getOperand(0));
6913 } while ((I1 && PrevI1 != I1) || (I2 && PrevI2 != I2));
6914 llvm_unreachable("Two different buildvectors not expected.");
6915 }
6916
6917 namespace {
6918 /// Returns incoming Value *, if the requested type is Value * too, or a default
6919 /// value, otherwise.
6920 struct ValueSelect {
6921 template <typename U>
6922 static typename std::enable_if<std::is_same<Value *, U>::value, Value *>::type
get__anon9e073aa45e11::ValueSelect6923 get(Value *V) {
6924 return V;
6925 }
6926 template <typename U>
6927 static typename std::enable_if<!std::is_same<Value *, U>::value, U>::type
get__anon9e073aa45e11::ValueSelect6928 get(Value *) {
6929 return U();
6930 }
6931 };
6932 } // namespace
6933
6934 /// Does the analysis of the provided shuffle masks and performs the requested
6935 /// actions on the vectors with the given shuffle masks. It tries to do it in
6936 /// several steps.
6937 /// 1. If the Base vector is not undef vector, resizing the very first mask to
6938 /// have common VF and perform action for 2 input vectors (including non-undef
6939 /// Base). Other shuffle masks are combined with the resulting after the 1 stage
6940 /// and processed as a shuffle of 2 elements.
6941 /// 2. If the Base is undef vector and have only 1 shuffle mask, perform the
6942 /// action only for 1 vector with the given mask, if it is not the identity
6943 /// mask.
6944 /// 3. If > 2 masks are used, perform the remaining shuffle actions for 2
6945 /// vectors, combing the masks properly between the steps.
6946 template <typename T>
performExtractsShuffleAction(MutableArrayRef<std::pair<T *,SmallVector<int>>> ShuffleMask,Value * Base,function_ref<unsigned (T *)> GetVF,function_ref<std::pair<T *,bool> (T *,ArrayRef<int>)> ResizeAction,function_ref<T * (ArrayRef<int>,ArrayRef<T * >)> Action)6947 static T *performExtractsShuffleAction(
6948 MutableArrayRef<std::pair<T *, SmallVector<int>>> ShuffleMask, Value *Base,
6949 function_ref<unsigned(T *)> GetVF,
6950 function_ref<std::pair<T *, bool>(T *, ArrayRef<int>)> ResizeAction,
6951 function_ref<T *(ArrayRef<int>, ArrayRef<T *>)> Action) {
6952 assert(!ShuffleMask.empty() && "Empty list of shuffles for inserts.");
6953 SmallVector<int> Mask(ShuffleMask.begin()->second);
6954 auto VMIt = std::next(ShuffleMask.begin());
6955 T *Prev = nullptr;
6956 bool IsBaseNotUndef = !isUndefVector(Base);
6957 if (IsBaseNotUndef) {
6958 // Base is not undef, need to combine it with the next subvectors.
6959 std::pair<T *, bool> Res = ResizeAction(ShuffleMask.begin()->first, Mask);
6960 for (unsigned Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) {
6961 if (Mask[Idx] == UndefMaskElem)
6962 Mask[Idx] = Idx;
6963 else
6964 Mask[Idx] = (Res.second ? Idx : Mask[Idx]) + VF;
6965 }
6966 auto *V = ValueSelect::get<T *>(Base);
6967 (void)V;
6968 assert((!V || GetVF(V) == Mask.size()) &&
6969 "Expected base vector of VF number of elements.");
6970 Prev = Action(Mask, {nullptr, Res.first});
6971 } else if (ShuffleMask.size() == 1) {
6972 // Base is undef and only 1 vector is shuffled - perform the action only for
6973 // single vector, if the mask is not the identity mask.
6974 std::pair<T *, bool> Res = ResizeAction(ShuffleMask.begin()->first, Mask);
6975 if (Res.second)
6976 // Identity mask is found.
6977 Prev = Res.first;
6978 else
6979 Prev = Action(Mask, {ShuffleMask.begin()->first});
6980 } else {
6981 // Base is undef and at least 2 input vectors shuffled - perform 2 vectors
6982 // shuffles step by step, combining shuffle between the steps.
6983 unsigned Vec1VF = GetVF(ShuffleMask.begin()->first);
6984 unsigned Vec2VF = GetVF(VMIt->first);
6985 if (Vec1VF == Vec2VF) {
6986 // No need to resize the input vectors since they are of the same size, we
6987 // can shuffle them directly.
6988 ArrayRef<int> SecMask = VMIt->second;
6989 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) {
6990 if (SecMask[I] != UndefMaskElem) {
6991 assert(Mask[I] == UndefMaskElem && "Multiple uses of scalars.");
6992 Mask[I] = SecMask[I] + Vec1VF;
6993 }
6994 }
6995 Prev = Action(Mask, {ShuffleMask.begin()->first, VMIt->first});
6996 } else {
6997 // Vectors of different sizes - resize and reshuffle.
6998 std::pair<T *, bool> Res1 =
6999 ResizeAction(ShuffleMask.begin()->first, Mask);
7000 std::pair<T *, bool> Res2 = ResizeAction(VMIt->first, VMIt->second);
7001 ArrayRef<int> SecMask = VMIt->second;
7002 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) {
7003 if (Mask[I] != UndefMaskElem) {
7004 assert(SecMask[I] == UndefMaskElem && "Multiple uses of scalars.");
7005 if (Res1.second)
7006 Mask[I] = I;
7007 } else if (SecMask[I] != UndefMaskElem) {
7008 assert(Mask[I] == UndefMaskElem && "Multiple uses of scalars.");
7009 Mask[I] = (Res2.second ? I : SecMask[I]) + VF;
7010 }
7011 }
7012 Prev = Action(Mask, {Res1.first, Res2.first});
7013 }
7014 VMIt = std::next(VMIt);
7015 }
7016 // Perform requested actions for the remaining masks/vectors.
7017 for (auto E = ShuffleMask.end(); VMIt != E; ++VMIt) {
7018 // Shuffle other input vectors, if any.
7019 std::pair<T *, bool> Res = ResizeAction(VMIt->first, VMIt->second);
7020 ArrayRef<int> SecMask = VMIt->second;
7021 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) {
7022 if (SecMask[I] != UndefMaskElem) {
7023 assert((Mask[I] == UndefMaskElem || IsBaseNotUndef) &&
7024 "Multiple uses of scalars.");
7025 Mask[I] = (Res.second ? I : SecMask[I]) + VF;
7026 } else if (Mask[I] != UndefMaskElem) {
7027 Mask[I] = I;
7028 }
7029 }
7030 Prev = Action(Mask, {Prev, Res.first});
7031 }
7032 return Prev;
7033 }
7034
getTreeCost(ArrayRef<Value * > VectorizedVals)7035 InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
7036 InstructionCost Cost = 0;
7037 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size "
7038 << VectorizableTree.size() << ".\n");
7039
7040 unsigned BundleWidth = VectorizableTree[0]->Scalars.size();
7041
7042 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) {
7043 TreeEntry &TE = *VectorizableTree[I];
7044
7045 InstructionCost C = getEntryCost(&TE, VectorizedVals);
7046 Cost += C;
7047 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
7048 << " for bundle that starts with " << *TE.Scalars[0]
7049 << ".\n"
7050 << "SLP: Current total cost = " << Cost << "\n");
7051 }
7052
7053 SmallPtrSet<Value *, 16> ExtractCostCalculated;
7054 InstructionCost ExtractCost = 0;
7055 SmallVector<MapVector<const TreeEntry *, SmallVector<int>>> ShuffleMasks;
7056 SmallVector<std::pair<Value *, const TreeEntry *>> FirstUsers;
7057 SmallVector<APInt> DemandedElts;
7058 for (ExternalUser &EU : ExternalUses) {
7059 // We only add extract cost once for the same scalar.
7060 if (!isa_and_nonnull<InsertElementInst>(EU.User) &&
7061 !ExtractCostCalculated.insert(EU.Scalar).second)
7062 continue;
7063
7064 // Uses by ephemeral values are free (because the ephemeral value will be
7065 // removed prior to code generation, and so the extraction will be
7066 // removed as well).
7067 if (EphValues.count(EU.User))
7068 continue;
7069
7070 // No extract cost for vector "scalar"
7071 if (isa<FixedVectorType>(EU.Scalar->getType()))
7072 continue;
7073
7074 // Already counted the cost for external uses when tried to adjust the cost
7075 // for extractelements, no need to add it again.
7076 if (isa<ExtractElementInst>(EU.Scalar))
7077 continue;
7078
7079 // If found user is an insertelement, do not calculate extract cost but try
7080 // to detect it as a final shuffled/identity match.
7081 if (auto *VU = dyn_cast_or_null<InsertElementInst>(EU.User)) {
7082 if (auto *FTy = dyn_cast<FixedVectorType>(VU->getType())) {
7083 Optional<unsigned> InsertIdx = getInsertIndex(VU);
7084 if (InsertIdx) {
7085 const TreeEntry *ScalarTE = getTreeEntry(EU.Scalar);
7086 auto *It =
7087 find_if(FirstUsers,
7088 [VU](const std::pair<Value *, const TreeEntry *> &Pair) {
7089 return areTwoInsertFromSameBuildVector(
7090 VU, cast<InsertElementInst>(Pair.first));
7091 });
7092 int VecId = -1;
7093 if (It == FirstUsers.end()) {
7094 (void)ShuffleMasks.emplace_back();
7095 SmallVectorImpl<int> &Mask = ShuffleMasks.back()[ScalarTE];
7096 if (Mask.empty())
7097 Mask.assign(FTy->getNumElements(), UndefMaskElem);
7098 // Find the insertvector, vectorized in tree, if any.
7099 Value *Base = VU;
7100 while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) {
7101 if (IEBase != EU.User &&
7102 (!IEBase->hasOneUse() ||
7103 getInsertIndex(IEBase).value_or(*InsertIdx) == *InsertIdx))
7104 break;
7105 // Build the mask for the vectorized insertelement instructions.
7106 if (const TreeEntry *E = getTreeEntry(IEBase)) {
7107 VU = IEBase;
7108 do {
7109 IEBase = cast<InsertElementInst>(Base);
7110 int Idx = *getInsertIndex(IEBase);
7111 assert(Mask[Idx] == UndefMaskElem &&
7112 "InsertElementInstruction used already.");
7113 Mask[Idx] = Idx;
7114 Base = IEBase->getOperand(0);
7115 } while (E == getTreeEntry(Base));
7116 break;
7117 }
7118 Base = cast<InsertElementInst>(Base)->getOperand(0);
7119 }
7120 FirstUsers.emplace_back(VU, ScalarTE);
7121 DemandedElts.push_back(APInt::getZero(FTy->getNumElements()));
7122 VecId = FirstUsers.size() - 1;
7123 } else {
7124 if (isFirstInsertElement(VU, cast<InsertElementInst>(It->first)))
7125 It->first = VU;
7126 VecId = std::distance(FirstUsers.begin(), It);
7127 }
7128 int InIdx = *InsertIdx;
7129 SmallVectorImpl<int> &Mask = ShuffleMasks[VecId][ScalarTE];
7130 if (Mask.empty())
7131 Mask.assign(FTy->getNumElements(), UndefMaskElem);
7132 Mask[InIdx] = EU.Lane;
7133 DemandedElts[VecId].setBit(InIdx);
7134 continue;
7135 }
7136 }
7137 }
7138
7139 // If we plan to rewrite the tree in a smaller type, we will need to sign
7140 // extend the extracted value back to the original type. Here, we account
7141 // for the extract and the added cost of the sign extend if needed.
7142 auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth);
7143 auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
7144 if (MinBWs.count(ScalarRoot)) {
7145 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
7146 auto Extend =
7147 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt;
7148 VecTy = FixedVectorType::get(MinTy, BundleWidth);
7149 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(),
7150 VecTy, EU.Lane);
7151 } else {
7152 ExtractCost +=
7153 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane);
7154 }
7155 }
7156
7157 InstructionCost SpillCost = getSpillCost();
7158 Cost += SpillCost + ExtractCost;
7159 auto &&ResizeToVF = [this, &Cost](const TreeEntry *TE, ArrayRef<int> Mask) {
7160 InstructionCost C = 0;
7161 unsigned VF = Mask.size();
7162 unsigned VecVF = TE->getVectorFactor();
7163 if (VF != VecVF &&
7164 (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); }) ||
7165 (all_of(Mask,
7166 [VF](int Idx) { return Idx < 2 * static_cast<int>(VF); }) &&
7167 !ShuffleVectorInst::isIdentityMask(Mask)))) {
7168 SmallVector<int> OrigMask(VecVF, UndefMaskElem);
7169 std::copy(Mask.begin(), std::next(Mask.begin(), std::min(VF, VecVF)),
7170 OrigMask.begin());
7171 C = TTI->getShuffleCost(
7172 TTI::SK_PermuteSingleSrc,
7173 FixedVectorType::get(TE->getMainOp()->getType(), VecVF), OrigMask);
7174 LLVM_DEBUG(
7175 dbgs() << "SLP: Adding cost " << C
7176 << " for final shuffle of insertelement external users.\n";
7177 TE->dump(); dbgs() << "SLP: Current total cost = " << Cost << "\n");
7178 Cost += C;
7179 return std::make_pair(TE, true);
7180 }
7181 return std::make_pair(TE, false);
7182 };
7183 // Calculate the cost of the reshuffled vectors, if any.
7184 for (int I = 0, E = FirstUsers.size(); I < E; ++I) {
7185 Value *Base = cast<Instruction>(FirstUsers[I].first)->getOperand(0);
7186 unsigned VF = ShuffleMasks[I].begin()->second.size();
7187 auto *FTy = FixedVectorType::get(
7188 cast<VectorType>(FirstUsers[I].first->getType())->getElementType(), VF);
7189 auto Vector = ShuffleMasks[I].takeVector();
7190 auto &&EstimateShufflesCost = [this, FTy,
7191 &Cost](ArrayRef<int> Mask,
7192 ArrayRef<const TreeEntry *> TEs) {
7193 assert((TEs.size() == 1 || TEs.size() == 2) &&
7194 "Expected exactly 1 or 2 tree entries.");
7195 if (TEs.size() == 1) {
7196 int Limit = 2 * Mask.size();
7197 if (!all_of(Mask, [Limit](int Idx) { return Idx < Limit; }) ||
7198 !ShuffleVectorInst::isIdentityMask(Mask)) {
7199 InstructionCost C =
7200 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FTy, Mask);
7201 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
7202 << " for final shuffle of insertelement "
7203 "external users.\n";
7204 TEs.front()->dump();
7205 dbgs() << "SLP: Current total cost = " << Cost << "\n");
7206 Cost += C;
7207 }
7208 } else {
7209 InstructionCost C =
7210 TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, FTy, Mask);
7211 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
7212 << " for final shuffle of vector node and external "
7213 "insertelement users.\n";
7214 if (TEs.front()) { TEs.front()->dump(); } TEs.back()->dump();
7215 dbgs() << "SLP: Current total cost = " << Cost << "\n");
7216 Cost += C;
7217 }
7218 return TEs.back();
7219 };
7220 (void)performExtractsShuffleAction<const TreeEntry>(
7221 makeMutableArrayRef(Vector.data(), Vector.size()), Base,
7222 [](const TreeEntry *E) { return E->getVectorFactor(); }, ResizeToVF,
7223 EstimateShufflesCost);
7224 InstructionCost InsertCost = TTI->getScalarizationOverhead(
7225 cast<FixedVectorType>(FirstUsers[I].first->getType()), DemandedElts[I],
7226 /*Insert*/ true, /*Extract*/ false);
7227 Cost -= InsertCost;
7228 }
7229
7230 #ifndef NDEBUG
7231 SmallString<256> Str;
7232 {
7233 raw_svector_ostream OS(Str);
7234 OS << "SLP: Spill Cost = " << SpillCost << ".\n"
7235 << "SLP: Extract Cost = " << ExtractCost << ".\n"
7236 << "SLP: Total Cost = " << Cost << ".\n";
7237 }
7238 LLVM_DEBUG(dbgs() << Str);
7239 if (ViewSLPTree)
7240 ViewGraph(this, "SLP" + F->getName(), false, Str);
7241 #endif
7242
7243 return Cost;
7244 }
7245
7246 Optional<TargetTransformInfo::ShuffleKind>
isGatherShuffledEntry(const TreeEntry * TE,SmallVectorImpl<int> & Mask,SmallVectorImpl<const TreeEntry * > & Entries)7247 BoUpSLP::isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask,
7248 SmallVectorImpl<const TreeEntry *> &Entries) {
7249 // TODO: currently checking only for Scalars in the tree entry, need to count
7250 // reused elements too for better cost estimation.
7251 Mask.assign(TE->Scalars.size(), UndefMaskElem);
7252 Entries.clear();
7253 // Build a lists of values to tree entries.
7254 DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>> ValueToTEs;
7255 for (const std::unique_ptr<TreeEntry> &EntryPtr : VectorizableTree) {
7256 if (EntryPtr.get() == TE)
7257 break;
7258 if (EntryPtr->State != TreeEntry::NeedToGather)
7259 continue;
7260 for (Value *V : EntryPtr->Scalars)
7261 ValueToTEs.try_emplace(V).first->getSecond().insert(EntryPtr.get());
7262 }
7263 // Find all tree entries used by the gathered values. If no common entries
7264 // found - not a shuffle.
7265 // Here we build a set of tree nodes for each gathered value and trying to
7266 // find the intersection between these sets. If we have at least one common
7267 // tree node for each gathered value - we have just a permutation of the
7268 // single vector. If we have 2 different sets, we're in situation where we
7269 // have a permutation of 2 input vectors.
7270 SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs;
7271 DenseMap<Value *, int> UsedValuesEntry;
7272 for (Value *V : TE->Scalars) {
7273 if (isa<UndefValue>(V))
7274 continue;
7275 // Build a list of tree entries where V is used.
7276 SmallPtrSet<const TreeEntry *, 4> VToTEs;
7277 auto It = ValueToTEs.find(V);
7278 if (It != ValueToTEs.end())
7279 VToTEs = It->second;
7280 if (const TreeEntry *VTE = getTreeEntry(V))
7281 VToTEs.insert(VTE);
7282 if (VToTEs.empty())
7283 return None;
7284 if (UsedTEs.empty()) {
7285 // The first iteration, just insert the list of nodes to vector.
7286 UsedTEs.push_back(VToTEs);
7287 } else {
7288 // Need to check if there are any previously used tree nodes which use V.
7289 // If there are no such nodes, consider that we have another one input
7290 // vector.
7291 SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs);
7292 unsigned Idx = 0;
7293 for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) {
7294 // Do we have a non-empty intersection of previously listed tree entries
7295 // and tree entries using current V?
7296 set_intersect(VToTEs, Set);
7297 if (!VToTEs.empty()) {
7298 // Yes, write the new subset and continue analysis for the next
7299 // scalar.
7300 Set.swap(VToTEs);
7301 break;
7302 }
7303 VToTEs = SavedVToTEs;
7304 ++Idx;
7305 }
7306 // No non-empty intersection found - need to add a second set of possible
7307 // source vectors.
7308 if (Idx == UsedTEs.size()) {
7309 // If the number of input vectors is greater than 2 - not a permutation,
7310 // fallback to the regular gather.
7311 if (UsedTEs.size() == 2)
7312 return None;
7313 UsedTEs.push_back(SavedVToTEs);
7314 Idx = UsedTEs.size() - 1;
7315 }
7316 UsedValuesEntry.try_emplace(V, Idx);
7317 }
7318 }
7319
7320 if (UsedTEs.empty()) {
7321 assert(all_of(TE->Scalars, UndefValue::classof) &&
7322 "Expected vector of undefs only.");
7323 return None;
7324 }
7325
7326 unsigned VF = 0;
7327 if (UsedTEs.size() == 1) {
7328 // Try to find the perfect match in another gather node at first.
7329 auto It = find_if(UsedTEs.front(), [TE](const TreeEntry *EntryPtr) {
7330 return EntryPtr->isSame(TE->Scalars);
7331 });
7332 if (It != UsedTEs.front().end()) {
7333 Entries.push_back(*It);
7334 std::iota(Mask.begin(), Mask.end(), 0);
7335 return TargetTransformInfo::SK_PermuteSingleSrc;
7336 }
7337 // No perfect match, just shuffle, so choose the first tree node.
7338 Entries.push_back(*UsedTEs.front().begin());
7339 } else {
7340 // Try to find nodes with the same vector factor.
7341 assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries.");
7342 DenseMap<int, const TreeEntry *> VFToTE;
7343 for (const TreeEntry *TE : UsedTEs.front())
7344 VFToTE.try_emplace(TE->getVectorFactor(), TE);
7345 for (const TreeEntry *TE : UsedTEs.back()) {
7346 auto It = VFToTE.find(TE->getVectorFactor());
7347 if (It != VFToTE.end()) {
7348 VF = It->first;
7349 Entries.push_back(It->second);
7350 Entries.push_back(TE);
7351 break;
7352 }
7353 }
7354 // No 2 source vectors with the same vector factor - give up and do regular
7355 // gather.
7356 if (Entries.empty())
7357 return None;
7358 }
7359
7360 // Build a shuffle mask for better cost estimation and vector emission.
7361 for (int I = 0, E = TE->Scalars.size(); I < E; ++I) {
7362 Value *V = TE->Scalars[I];
7363 if (isa<UndefValue>(V))
7364 continue;
7365 unsigned Idx = UsedValuesEntry.lookup(V);
7366 const TreeEntry *VTE = Entries[Idx];
7367 int FoundLane = VTE->findLaneForValue(V);
7368 Mask[I] = Idx * VF + FoundLane;
7369 // Extra check required by isSingleSourceMaskImpl function (called by
7370 // ShuffleVectorInst::isSingleSourceMask).
7371 if (Mask[I] >= 2 * E)
7372 return None;
7373 }
7374 switch (Entries.size()) {
7375 case 1:
7376 return TargetTransformInfo::SK_PermuteSingleSrc;
7377 case 2:
7378 return TargetTransformInfo::SK_PermuteTwoSrc;
7379 default:
7380 break;
7381 }
7382 return None;
7383 }
7384
getGatherCost(FixedVectorType * Ty,const APInt & ShuffledIndices,bool NeedToShuffle) const7385 InstructionCost BoUpSLP::getGatherCost(FixedVectorType *Ty,
7386 const APInt &ShuffledIndices,
7387 bool NeedToShuffle) const {
7388 InstructionCost Cost =
7389 TTI->getScalarizationOverhead(Ty, ~ShuffledIndices, /*Insert*/ true,
7390 /*Extract*/ false);
7391 if (NeedToShuffle)
7392 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty);
7393 return Cost;
7394 }
7395
getGatherCost(ArrayRef<Value * > VL) const7396 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const {
7397 // Find the type of the operands in VL.
7398 Type *ScalarTy = VL[0]->getType();
7399 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
7400 ScalarTy = SI->getValueOperand()->getType();
7401 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size());
7402 bool DuplicateNonConst = false;
7403 // Find the cost of inserting/extracting values from the vector.
7404 // Check if the same elements are inserted several times and count them as
7405 // shuffle candidates.
7406 APInt ShuffledElements = APInt::getZero(VL.size());
7407 DenseSet<Value *> UniqueElements;
7408 // Iterate in reverse order to consider insert elements with the high cost.
7409 for (unsigned I = VL.size(); I > 0; --I) {
7410 unsigned Idx = I - 1;
7411 // No need to shuffle duplicates for constants.
7412 if (isConstant(VL[Idx])) {
7413 ShuffledElements.setBit(Idx);
7414 continue;
7415 }
7416 if (!UniqueElements.insert(VL[Idx]).second) {
7417 DuplicateNonConst = true;
7418 ShuffledElements.setBit(Idx);
7419 }
7420 }
7421 return getGatherCost(VecTy, ShuffledElements, DuplicateNonConst);
7422 }
7423
7424 // Perform operand reordering on the instructions in VL and return the reordered
7425 // operands in Left and Right.
reorderInputsAccordingToOpcode(ArrayRef<Value * > VL,SmallVectorImpl<Value * > & Left,SmallVectorImpl<Value * > & Right,const DataLayout & DL,ScalarEvolution & SE,const BoUpSLP & R)7426 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
7427 SmallVectorImpl<Value *> &Left,
7428 SmallVectorImpl<Value *> &Right,
7429 const DataLayout &DL,
7430 ScalarEvolution &SE,
7431 const BoUpSLP &R) {
7432 if (VL.empty())
7433 return;
7434 VLOperands Ops(VL, DL, SE, R);
7435 // Reorder the operands in place.
7436 Ops.reorder();
7437 Left = Ops.getVL(0);
7438 Right = Ops.getVL(1);
7439 }
7440
setInsertPointAfterBundle(const TreeEntry * E)7441 void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) {
7442 // Get the basic block this bundle is in. All instructions in the bundle
7443 // should be in this block (except for extractelement-like instructions with
7444 // constant indeces).
7445 auto *Front = E->getMainOp();
7446 auto *BB = Front->getParent();
7447 assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool {
7448 if (E->getOpcode() == Instruction::GetElementPtr &&
7449 !isa<GetElementPtrInst>(V))
7450 return true;
7451 auto *I = cast<Instruction>(V);
7452 return !E->isOpcodeOrAlt(I) || I->getParent() == BB ||
7453 isVectorLikeInstWithConstOps(I);
7454 }));
7455
7456 auto &&FindLastInst = [E, Front, this, &BB]() {
7457 Instruction *LastInst = Front;
7458 for (Value *V : E->Scalars) {
7459 auto *I = dyn_cast<Instruction>(V);
7460 if (!I)
7461 continue;
7462 if (LastInst->getParent() == I->getParent()) {
7463 if (LastInst->comesBefore(I))
7464 LastInst = I;
7465 continue;
7466 }
7467 assert(isVectorLikeInstWithConstOps(LastInst) &&
7468 isVectorLikeInstWithConstOps(I) &&
7469 "Expected vector-like insts only.");
7470 if (!DT->isReachableFromEntry(LastInst->getParent())) {
7471 LastInst = I;
7472 continue;
7473 }
7474 if (!DT->isReachableFromEntry(I->getParent()))
7475 continue;
7476 auto *NodeA = DT->getNode(LastInst->getParent());
7477 auto *NodeB = DT->getNode(I->getParent());
7478 assert(NodeA && "Should only process reachable instructions");
7479 assert(NodeB && "Should only process reachable instructions");
7480 assert((NodeA == NodeB) ==
7481 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&
7482 "Different nodes should have different DFS numbers");
7483 if (NodeA->getDFSNumIn() < NodeB->getDFSNumIn())
7484 LastInst = I;
7485 }
7486 BB = LastInst->getParent();
7487 return LastInst;
7488 };
7489
7490 auto &&FindFirstInst = [E, Front]() {
7491 Instruction *FirstInst = Front;
7492 for (Value *V : E->Scalars) {
7493 auto *I = dyn_cast<Instruction>(V);
7494 if (!I)
7495 continue;
7496 if (I->comesBefore(FirstInst))
7497 FirstInst = I;
7498 }
7499 return FirstInst;
7500 };
7501
7502 // Set the insert point to the beginning of the basic block if the entry
7503 // should not be scheduled.
7504 if (E->State != TreeEntry::NeedToGather &&
7505 doesNotNeedToSchedule(E->Scalars)) {
7506 Instruction *InsertInst;
7507 if (all_of(E->Scalars, isUsedOutsideBlock))
7508 InsertInst = FindLastInst();
7509 else
7510 InsertInst = FindFirstInst();
7511 // If the instruction is PHI, set the insert point after all the PHIs.
7512 if (isa<PHINode>(InsertInst))
7513 InsertInst = BB->getFirstNonPHI();
7514 BasicBlock::iterator InsertPt = InsertInst->getIterator();
7515 Builder.SetInsertPoint(BB, InsertPt);
7516 Builder.SetCurrentDebugLocation(Front->getDebugLoc());
7517 return;
7518 }
7519
7520 // The last instruction in the bundle in program order.
7521 Instruction *LastInst = nullptr;
7522
7523 // Find the last instruction. The common case should be that BB has been
7524 // scheduled, and the last instruction is VL.back(). So we start with
7525 // VL.back() and iterate over schedule data until we reach the end of the
7526 // bundle. The end of the bundle is marked by null ScheduleData.
7527 if (BlocksSchedules.count(BB)) {
7528 Value *V = E->isOneOf(E->Scalars.back());
7529 if (doesNotNeedToBeScheduled(V))
7530 V = *find_if_not(E->Scalars, doesNotNeedToBeScheduled);
7531 auto *Bundle = BlocksSchedules[BB]->getScheduleData(V);
7532 if (Bundle && Bundle->isPartOfBundle())
7533 for (; Bundle; Bundle = Bundle->NextInBundle)
7534 if (Bundle->OpValue == Bundle->Inst)
7535 LastInst = Bundle->Inst;
7536 }
7537
7538 // LastInst can still be null at this point if there's either not an entry
7539 // for BB in BlocksSchedules or there's no ScheduleData available for
7540 // VL.back(). This can be the case if buildTree_rec aborts for various
7541 // reasons (e.g., the maximum recursion depth is reached, the maximum region
7542 // size is reached, etc.). ScheduleData is initialized in the scheduling
7543 // "dry-run".
7544 //
7545 // If this happens, we can still find the last instruction by brute force. We
7546 // iterate forwards from Front (inclusive) until we either see all
7547 // instructions in the bundle or reach the end of the block. If Front is the
7548 // last instruction in program order, LastInst will be set to Front, and we
7549 // will visit all the remaining instructions in the block.
7550 //
7551 // One of the reasons we exit early from buildTree_rec is to place an upper
7552 // bound on compile-time. Thus, taking an additional compile-time hit here is
7553 // not ideal. However, this should be exceedingly rare since it requires that
7554 // we both exit early from buildTree_rec and that the bundle be out-of-order
7555 // (causing us to iterate all the way to the end of the block).
7556 if (!LastInst) {
7557 LastInst = FindLastInst();
7558 // If the instruction is PHI, set the insert point after all the PHIs.
7559 if (isa<PHINode>(LastInst))
7560 LastInst = BB->getFirstNonPHI()->getPrevNode();
7561 }
7562 assert(LastInst && "Failed to find last instruction in bundle");
7563
7564 // Set the insertion point after the last instruction in the bundle. Set the
7565 // debug location to Front.
7566 Builder.SetInsertPoint(BB, std::next(LastInst->getIterator()));
7567 Builder.SetCurrentDebugLocation(Front->getDebugLoc());
7568 }
7569
gather(ArrayRef<Value * > VL)7570 Value *BoUpSLP::gather(ArrayRef<Value *> VL) {
7571 // List of instructions/lanes from current block and/or the blocks which are
7572 // part of the current loop. These instructions will be inserted at the end to
7573 // make it possible to optimize loops and hoist invariant instructions out of
7574 // the loops body with better chances for success.
7575 SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts;
7576 SmallSet<int, 4> PostponedIndices;
7577 Loop *L = LI->getLoopFor(Builder.GetInsertBlock());
7578 auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) {
7579 SmallPtrSet<BasicBlock *, 4> Visited;
7580 while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second)
7581 InsertBB = InsertBB->getSinglePredecessor();
7582 return InsertBB && InsertBB == InstBB;
7583 };
7584 for (int I = 0, E = VL.size(); I < E; ++I) {
7585 if (auto *Inst = dyn_cast<Instruction>(VL[I]))
7586 if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) ||
7587 getTreeEntry(Inst) || (L && (L->contains(Inst)))) &&
7588 PostponedIndices.insert(I).second)
7589 PostponedInsts.emplace_back(Inst, I);
7590 }
7591
7592 auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos) {
7593 Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(Pos));
7594 auto *InsElt = dyn_cast<InsertElementInst>(Vec);
7595 if (!InsElt)
7596 return Vec;
7597 GatherShuffleSeq.insert(InsElt);
7598 CSEBlocks.insert(InsElt->getParent());
7599 // Add to our 'need-to-extract' list.
7600 if (TreeEntry *Entry = getTreeEntry(V)) {
7601 // Find which lane we need to extract.
7602 unsigned FoundLane = Entry->findLaneForValue(V);
7603 ExternalUses.emplace_back(V, InsElt, FoundLane);
7604 }
7605 return Vec;
7606 };
7607 Value *Val0 =
7608 isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0];
7609 FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size());
7610 Value *Vec = PoisonValue::get(VecTy);
7611 SmallVector<int> NonConsts;
7612 // Insert constant values at first.
7613 for (int I = 0, E = VL.size(); I < E; ++I) {
7614 if (PostponedIndices.contains(I))
7615 continue;
7616 if (!isConstant(VL[I])) {
7617 NonConsts.push_back(I);
7618 continue;
7619 }
7620 Vec = CreateInsertElement(Vec, VL[I], I);
7621 }
7622 // Insert non-constant values.
7623 for (int I : NonConsts)
7624 Vec = CreateInsertElement(Vec, VL[I], I);
7625 // Append instructions, which are/may be part of the loop, in the end to make
7626 // it possible to hoist non-loop-based instructions.
7627 for (const std::pair<Value *, unsigned> &Pair : PostponedInsts)
7628 Vec = CreateInsertElement(Vec, Pair.first, Pair.second);
7629
7630 return Vec;
7631 }
7632
7633 namespace {
7634 /// Merges shuffle masks and emits final shuffle instruction, if required.
7635 class ShuffleInstructionBuilder {
7636 IRBuilderBase &Builder;
7637 const unsigned VF = 0;
7638 bool IsFinalized = false;
7639 SmallVector<int, 4> Mask;
7640 /// Holds all of the instructions that we gathered.
7641 SetVector<Instruction *> &GatherShuffleSeq;
7642 /// A list of blocks that we are going to CSE.
7643 SetVector<BasicBlock *> &CSEBlocks;
7644
7645 public:
ShuffleInstructionBuilder(IRBuilderBase & Builder,unsigned VF,SetVector<Instruction * > & GatherShuffleSeq,SetVector<BasicBlock * > & CSEBlocks)7646 ShuffleInstructionBuilder(IRBuilderBase &Builder, unsigned VF,
7647 SetVector<Instruction *> &GatherShuffleSeq,
7648 SetVector<BasicBlock *> &CSEBlocks)
7649 : Builder(Builder), VF(VF), GatherShuffleSeq(GatherShuffleSeq),
7650 CSEBlocks(CSEBlocks) {}
7651
7652 /// Adds a mask, inverting it before applying.
addInversedMask(ArrayRef<unsigned> SubMask)7653 void addInversedMask(ArrayRef<unsigned> SubMask) {
7654 if (SubMask.empty())
7655 return;
7656 SmallVector<int, 4> NewMask;
7657 inversePermutation(SubMask, NewMask);
7658 addMask(NewMask);
7659 }
7660
7661 /// Functions adds masks, merging them into single one.
addMask(ArrayRef<unsigned> SubMask)7662 void addMask(ArrayRef<unsigned> SubMask) {
7663 SmallVector<int, 4> NewMask(SubMask.begin(), SubMask.end());
7664 addMask(NewMask);
7665 }
7666
addMask(ArrayRef<int> SubMask)7667 void addMask(ArrayRef<int> SubMask) { ::addMask(Mask, SubMask); }
7668
finalize(Value * V)7669 Value *finalize(Value *V) {
7670 IsFinalized = true;
7671 unsigned ValueVF = cast<FixedVectorType>(V->getType())->getNumElements();
7672 if (VF == ValueVF && Mask.empty())
7673 return V;
7674 SmallVector<int, 4> NormalizedMask(VF, UndefMaskElem);
7675 std::iota(NormalizedMask.begin(), NormalizedMask.end(), 0);
7676 addMask(NormalizedMask);
7677
7678 if (VF == ValueVF && ShuffleVectorInst::isIdentityMask(Mask))
7679 return V;
7680 Value *Vec = Builder.CreateShuffleVector(V, Mask, "shuffle");
7681 if (auto *I = dyn_cast<Instruction>(Vec)) {
7682 GatherShuffleSeq.insert(I);
7683 CSEBlocks.insert(I->getParent());
7684 }
7685 return Vec;
7686 }
7687
~ShuffleInstructionBuilder()7688 ~ShuffleInstructionBuilder() {
7689 assert((IsFinalized || Mask.empty()) &&
7690 "Shuffle construction must be finalized.");
7691 }
7692 };
7693 } // namespace
7694
vectorizeTree(ArrayRef<Value * > VL)7695 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
7696 const unsigned VF = VL.size();
7697 InstructionsState S = getSameOpcode(VL);
7698 // Special processing for GEPs bundle, which may include non-gep values.
7699 if (!S.getOpcode() && VL.front()->getType()->isPointerTy()) {
7700 const auto *It =
7701 find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); });
7702 if (It != VL.end())
7703 S = getSameOpcode(*It);
7704 }
7705 if (S.getOpcode()) {
7706 if (TreeEntry *E = getTreeEntry(S.OpValue))
7707 if (E->isSame(VL)) {
7708 Value *V = vectorizeTree(E);
7709 if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) {
7710 if (!E->ReuseShuffleIndices.empty()) {
7711 // Reshuffle to get only unique values.
7712 // If some of the scalars are duplicated in the vectorization tree
7713 // entry, we do not vectorize them but instead generate a mask for
7714 // the reuses. But if there are several users of the same entry,
7715 // they may have different vectorization factors. This is especially
7716 // important for PHI nodes. In this case, we need to adapt the
7717 // resulting instruction for the user vectorization factor and have
7718 // to reshuffle it again to take only unique elements of the vector.
7719 // Without this code the function incorrectly returns reduced vector
7720 // instruction with the same elements, not with the unique ones.
7721
7722 // block:
7723 // %phi = phi <2 x > { .., %entry} {%shuffle, %block}
7724 // %2 = shuffle <2 x > %phi, poison, <4 x > <1, 1, 0, 0>
7725 // ... (use %2)
7726 // %shuffle = shuffle <2 x> %2, poison, <2 x> {2, 0}
7727 // br %block
7728 SmallVector<int> UniqueIdxs(VF, UndefMaskElem);
7729 SmallSet<int, 4> UsedIdxs;
7730 int Pos = 0;
7731 int Sz = VL.size();
7732 for (int Idx : E->ReuseShuffleIndices) {
7733 if (Idx != Sz && Idx != UndefMaskElem &&
7734 UsedIdxs.insert(Idx).second)
7735 UniqueIdxs[Idx] = Pos;
7736 ++Pos;
7737 }
7738 assert(VF >= UsedIdxs.size() && "Expected vectorization factor "
7739 "less than original vector size.");
7740 UniqueIdxs.append(VF - UsedIdxs.size(), UndefMaskElem);
7741 V = Builder.CreateShuffleVector(V, UniqueIdxs, "shrink.shuffle");
7742 } else {
7743 assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() &&
7744 "Expected vectorization factor less "
7745 "than original vector size.");
7746 SmallVector<int> UniformMask(VF, 0);
7747 std::iota(UniformMask.begin(), UniformMask.end(), 0);
7748 V = Builder.CreateShuffleVector(V, UniformMask, "shrink.shuffle");
7749 }
7750 if (auto *I = dyn_cast<Instruction>(V)) {
7751 GatherShuffleSeq.insert(I);
7752 CSEBlocks.insert(I->getParent());
7753 }
7754 }
7755 return V;
7756 }
7757 }
7758
7759 // Can't vectorize this, so simply build a new vector with each lane
7760 // corresponding to the requested value.
7761 return createBuildVector(VL);
7762 }
createBuildVector(ArrayRef<Value * > VL)7763 Value *BoUpSLP::createBuildVector(ArrayRef<Value *> VL) {
7764 assert(any_of(VectorizableTree,
7765 [VL](const std::unique_ptr<TreeEntry> &TE) {
7766 return TE->State == TreeEntry::NeedToGather && TE->isSame(VL);
7767 }) &&
7768 "Non-matching gather node.");
7769 unsigned VF = VL.size();
7770 // Exploit possible reuse of values across lanes.
7771 SmallVector<int> ReuseShuffleIndicies;
7772 SmallVector<Value *> UniqueValues;
7773 if (VL.size() > 2) {
7774 DenseMap<Value *, unsigned> UniquePositions;
7775 unsigned NumValues =
7776 std::distance(VL.begin(), find_if(reverse(VL), [](Value *V) {
7777 return !isa<UndefValue>(V);
7778 }).base());
7779 VF = std::max<unsigned>(VF, PowerOf2Ceil(NumValues));
7780 int UniqueVals = 0;
7781 for (Value *V : VL.drop_back(VL.size() - VF)) {
7782 if (isa<UndefValue>(V)) {
7783 ReuseShuffleIndicies.emplace_back(UndefMaskElem);
7784 continue;
7785 }
7786 if (isConstant(V)) {
7787 ReuseShuffleIndicies.emplace_back(UniqueValues.size());
7788 UniqueValues.emplace_back(V);
7789 continue;
7790 }
7791 auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
7792 ReuseShuffleIndicies.emplace_back(Res.first->second);
7793 if (Res.second) {
7794 UniqueValues.emplace_back(V);
7795 ++UniqueVals;
7796 }
7797 }
7798 if (UniqueVals == 1 && UniqueValues.size() == 1) {
7799 // Emit pure splat vector.
7800 ReuseShuffleIndicies.append(VF - ReuseShuffleIndicies.size(),
7801 UndefMaskElem);
7802 } else if (UniqueValues.size() >= VF - 1 || UniqueValues.size() <= 1) {
7803 if (UniqueValues.empty()) {
7804 assert(all_of(VL, UndefValue::classof) && "Expected list of undefs.");
7805 NumValues = VF;
7806 }
7807 ReuseShuffleIndicies.clear();
7808 UniqueValues.clear();
7809 UniqueValues.append(VL.begin(), std::next(VL.begin(), NumValues));
7810 }
7811 UniqueValues.append(VF - UniqueValues.size(),
7812 PoisonValue::get(VL[0]->getType()));
7813 VL = UniqueValues;
7814 }
7815
7816 ShuffleInstructionBuilder ShuffleBuilder(Builder, VF, GatherShuffleSeq,
7817 CSEBlocks);
7818 Value *Vec = gather(VL);
7819 if (!ReuseShuffleIndicies.empty()) {
7820 ShuffleBuilder.addMask(ReuseShuffleIndicies);
7821 Vec = ShuffleBuilder.finalize(Vec);
7822 }
7823 return Vec;
7824 }
7825
vectorizeTree(TreeEntry * E)7826 Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
7827 IRBuilder<>::InsertPointGuard Guard(Builder);
7828
7829 if (E->VectorizedValue) {
7830 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
7831 return E->VectorizedValue;
7832 }
7833
7834 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
7835 unsigned VF = E->getVectorFactor();
7836 ShuffleInstructionBuilder ShuffleBuilder(Builder, VF, GatherShuffleSeq,
7837 CSEBlocks);
7838 if (E->State == TreeEntry::NeedToGather) {
7839 if (E->getMainOp())
7840 setInsertPointAfterBundle(E);
7841 Value *Vec;
7842 SmallVector<int> Mask;
7843 SmallVector<const TreeEntry *> Entries;
7844 Optional<TargetTransformInfo::ShuffleKind> Shuffle =
7845 isGatherShuffledEntry(E, Mask, Entries);
7846 if (Shuffle) {
7847 assert((Entries.size() == 1 || Entries.size() == 2) &&
7848 "Expected shuffle of 1 or 2 entries.");
7849 Vec = Builder.CreateShuffleVector(Entries.front()->VectorizedValue,
7850 Entries.back()->VectorizedValue, Mask);
7851 if (auto *I = dyn_cast<Instruction>(Vec)) {
7852 GatherShuffleSeq.insert(I);
7853 CSEBlocks.insert(I->getParent());
7854 }
7855 } else {
7856 Vec = gather(E->Scalars);
7857 }
7858 if (NeedToShuffleReuses) {
7859 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
7860 Vec = ShuffleBuilder.finalize(Vec);
7861 }
7862 E->VectorizedValue = Vec;
7863 return Vec;
7864 }
7865
7866 assert((E->State == TreeEntry::Vectorize ||
7867 E->State == TreeEntry::ScatterVectorize) &&
7868 "Unhandled state");
7869 unsigned ShuffleOrOp =
7870 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
7871 Instruction *VL0 = E->getMainOp();
7872 Type *ScalarTy = VL0->getType();
7873 if (auto *Store = dyn_cast<StoreInst>(VL0))
7874 ScalarTy = Store->getValueOperand()->getType();
7875 else if (auto *IE = dyn_cast<InsertElementInst>(VL0))
7876 ScalarTy = IE->getOperand(1)->getType();
7877 auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size());
7878 switch (ShuffleOrOp) {
7879 case Instruction::PHI: {
7880 assert((E->ReorderIndices.empty() ||
7881 E != VectorizableTree.front().get() ||
7882 !E->UserTreeIndices.empty()) &&
7883 "PHI reordering is free.");
7884 auto *PH = cast<PHINode>(VL0);
7885 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI());
7886 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
7887 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
7888 Value *V = NewPhi;
7889
7890 // Adjust insertion point once all PHI's have been generated.
7891 Builder.SetInsertPoint(&*PH->getParent()->getFirstInsertionPt());
7892 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
7893
7894 ShuffleBuilder.addInversedMask(E->ReorderIndices);
7895 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
7896 V = ShuffleBuilder.finalize(V);
7897
7898 E->VectorizedValue = V;
7899
7900 // PHINodes may have multiple entries from the same block. We want to
7901 // visit every block once.
7902 SmallPtrSet<BasicBlock*, 4> VisitedBBs;
7903
7904 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
7905 ValueList Operands;
7906 BasicBlock *IBB = PH->getIncomingBlock(i);
7907
7908 if (!VisitedBBs.insert(IBB).second) {
7909 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
7910 continue;
7911 }
7912
7913 Builder.SetInsertPoint(IBB->getTerminator());
7914 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
7915 Value *Vec = vectorizeTree(E->getOperand(i));
7916 NewPhi->addIncoming(Vec, IBB);
7917 }
7918
7919 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&
7920 "Invalid number of incoming values");
7921 return V;
7922 }
7923
7924 case Instruction::ExtractElement: {
7925 Value *V = E->getSingleOperand(0);
7926 Builder.SetInsertPoint(VL0);
7927 ShuffleBuilder.addInversedMask(E->ReorderIndices);
7928 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
7929 V = ShuffleBuilder.finalize(V);
7930 E->VectorizedValue = V;
7931 return V;
7932 }
7933 case Instruction::ExtractValue: {
7934 auto *LI = cast<LoadInst>(E->getSingleOperand(0));
7935 Builder.SetInsertPoint(LI);
7936 auto *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace());
7937 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy);
7938 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign());
7939 Value *NewV = propagateMetadata(V, E->Scalars);
7940 ShuffleBuilder.addInversedMask(E->ReorderIndices);
7941 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
7942 NewV = ShuffleBuilder.finalize(NewV);
7943 E->VectorizedValue = NewV;
7944 return NewV;
7945 }
7946 case Instruction::InsertElement: {
7947 assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique");
7948 Builder.SetInsertPoint(cast<Instruction>(E->Scalars.back()));
7949 Value *V = vectorizeTree(E->getOperand(1));
7950
7951 // Create InsertVector shuffle if necessary
7952 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) {
7953 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0));
7954 }));
7955 const unsigned NumElts =
7956 cast<FixedVectorType>(FirstInsert->getType())->getNumElements();
7957 const unsigned NumScalars = E->Scalars.size();
7958
7959 unsigned Offset = *getInsertIndex(VL0);
7960 assert(Offset < NumElts && "Failed to find vector index offset");
7961
7962 // Create shuffle to resize vector
7963 SmallVector<int> Mask;
7964 if (!E->ReorderIndices.empty()) {
7965 inversePermutation(E->ReorderIndices, Mask);
7966 Mask.append(NumElts - NumScalars, UndefMaskElem);
7967 } else {
7968 Mask.assign(NumElts, UndefMaskElem);
7969 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0);
7970 }
7971 // Create InsertVector shuffle if necessary
7972 bool IsIdentity = true;
7973 SmallVector<int> PrevMask(NumElts, UndefMaskElem);
7974 Mask.swap(PrevMask);
7975 for (unsigned I = 0; I < NumScalars; ++I) {
7976 Value *Scalar = E->Scalars[PrevMask[I]];
7977 unsigned InsertIdx = *getInsertIndex(Scalar);
7978 IsIdentity &= InsertIdx - Offset == I;
7979 Mask[InsertIdx - Offset] = I;
7980 }
7981 if (!IsIdentity || NumElts != NumScalars) {
7982 V = Builder.CreateShuffleVector(V, Mask);
7983 if (auto *I = dyn_cast<Instruction>(V)) {
7984 GatherShuffleSeq.insert(I);
7985 CSEBlocks.insert(I->getParent());
7986 }
7987 }
7988
7989 if ((!IsIdentity || Offset != 0 ||
7990 !isUndefVector(FirstInsert->getOperand(0))) &&
7991 NumElts != NumScalars) {
7992 SmallVector<int> InsertMask(NumElts);
7993 std::iota(InsertMask.begin(), InsertMask.end(), 0);
7994 for (unsigned I = 0; I < NumElts; I++) {
7995 if (Mask[I] != UndefMaskElem)
7996 InsertMask[Offset + I] = NumElts + I;
7997 }
7998
7999 V = Builder.CreateShuffleVector(
8000 FirstInsert->getOperand(0), V, InsertMask,
8001 cast<Instruction>(E->Scalars.back())->getName());
8002 if (auto *I = dyn_cast<Instruction>(V)) {
8003 GatherShuffleSeq.insert(I);
8004 CSEBlocks.insert(I->getParent());
8005 }
8006 }
8007
8008 ++NumVectorInstructions;
8009 E->VectorizedValue = V;
8010 return V;
8011 }
8012 case Instruction::ZExt:
8013 case Instruction::SExt:
8014 case Instruction::FPToUI:
8015 case Instruction::FPToSI:
8016 case Instruction::FPExt:
8017 case Instruction::PtrToInt:
8018 case Instruction::IntToPtr:
8019 case Instruction::SIToFP:
8020 case Instruction::UIToFP:
8021 case Instruction::Trunc:
8022 case Instruction::FPTrunc:
8023 case Instruction::BitCast: {
8024 setInsertPointAfterBundle(E);
8025
8026 Value *InVec = vectorizeTree(E->getOperand(0));
8027
8028 if (E->VectorizedValue) {
8029 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
8030 return E->VectorizedValue;
8031 }
8032
8033 auto *CI = cast<CastInst>(VL0);
8034 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy);
8035 ShuffleBuilder.addInversedMask(E->ReorderIndices);
8036 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
8037 V = ShuffleBuilder.finalize(V);
8038
8039 E->VectorizedValue = V;
8040 ++NumVectorInstructions;
8041 return V;
8042 }
8043 case Instruction::FCmp:
8044 case Instruction::ICmp: {
8045 setInsertPointAfterBundle(E);
8046
8047 Value *L = vectorizeTree(E->getOperand(0));
8048 Value *R = vectorizeTree(E->getOperand(1));
8049
8050 if (E->VectorizedValue) {
8051 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
8052 return E->VectorizedValue;
8053 }
8054
8055 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
8056 Value *V = Builder.CreateCmp(P0, L, R);
8057 propagateIRFlags(V, E->Scalars, VL0);
8058 ShuffleBuilder.addInversedMask(E->ReorderIndices);
8059 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
8060 V = ShuffleBuilder.finalize(V);
8061
8062 E->VectorizedValue = V;
8063 ++NumVectorInstructions;
8064 return V;
8065 }
8066 case Instruction::Select: {
8067 setInsertPointAfterBundle(E);
8068
8069 Value *Cond = vectorizeTree(E->getOperand(0));
8070 Value *True = vectorizeTree(E->getOperand(1));
8071 Value *False = vectorizeTree(E->getOperand(2));
8072
8073 if (E->VectorizedValue) {
8074 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
8075 return E->VectorizedValue;
8076 }
8077
8078 Value *V = Builder.CreateSelect(Cond, True, False);
8079 ShuffleBuilder.addInversedMask(E->ReorderIndices);
8080 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
8081 V = ShuffleBuilder.finalize(V);
8082
8083 E->VectorizedValue = V;
8084 ++NumVectorInstructions;
8085 return V;
8086 }
8087 case Instruction::FNeg: {
8088 setInsertPointAfterBundle(E);
8089
8090 Value *Op = vectorizeTree(E->getOperand(0));
8091
8092 if (E->VectorizedValue) {
8093 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
8094 return E->VectorizedValue;
8095 }
8096
8097 Value *V = Builder.CreateUnOp(
8098 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op);
8099 propagateIRFlags(V, E->Scalars, VL0);
8100 if (auto *I = dyn_cast<Instruction>(V))
8101 V = propagateMetadata(I, E->Scalars);
8102
8103 ShuffleBuilder.addInversedMask(E->ReorderIndices);
8104 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
8105 V = ShuffleBuilder.finalize(V);
8106
8107 E->VectorizedValue = V;
8108 ++NumVectorInstructions;
8109
8110 return V;
8111 }
8112 case Instruction::Add:
8113 case Instruction::FAdd:
8114 case Instruction::Sub:
8115 case Instruction::FSub:
8116 case Instruction::Mul:
8117 case Instruction::FMul:
8118 case Instruction::UDiv:
8119 case Instruction::SDiv:
8120 case Instruction::FDiv:
8121 case Instruction::URem:
8122 case Instruction::SRem:
8123 case Instruction::FRem:
8124 case Instruction::Shl:
8125 case Instruction::LShr:
8126 case Instruction::AShr:
8127 case Instruction::And:
8128 case Instruction::Or:
8129 case Instruction::Xor: {
8130 setInsertPointAfterBundle(E);
8131
8132 Value *LHS = vectorizeTree(E->getOperand(0));
8133 Value *RHS = vectorizeTree(E->getOperand(1));
8134
8135 if (E->VectorizedValue) {
8136 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
8137 return E->VectorizedValue;
8138 }
8139
8140 Value *V = Builder.CreateBinOp(
8141 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS,
8142 RHS);
8143 propagateIRFlags(V, E->Scalars, VL0);
8144 if (auto *I = dyn_cast<Instruction>(V))
8145 V = propagateMetadata(I, E->Scalars);
8146
8147 ShuffleBuilder.addInversedMask(E->ReorderIndices);
8148 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
8149 V = ShuffleBuilder.finalize(V);
8150
8151 E->VectorizedValue = V;
8152 ++NumVectorInstructions;
8153
8154 return V;
8155 }
8156 case Instruction::Load: {
8157 // Loads are inserted at the head of the tree because we don't want to
8158 // sink them all the way down past store instructions.
8159 setInsertPointAfterBundle(E);
8160
8161 LoadInst *LI = cast<LoadInst>(VL0);
8162 Instruction *NewLI;
8163 unsigned AS = LI->getPointerAddressSpace();
8164 Value *PO = LI->getPointerOperand();
8165 if (E->State == TreeEntry::Vectorize) {
8166 Value *VecPtr = Builder.CreateBitCast(PO, VecTy->getPointerTo(AS));
8167 NewLI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign());
8168
8169 // The pointer operand uses an in-tree scalar so we add the new BitCast
8170 // or LoadInst to ExternalUses list to make sure that an extract will
8171 // be generated in the future.
8172 if (TreeEntry *Entry = getTreeEntry(PO)) {
8173 // Find which lane we need to extract.
8174 unsigned FoundLane = Entry->findLaneForValue(PO);
8175 ExternalUses.emplace_back(
8176 PO, PO != VecPtr ? cast<User>(VecPtr) : NewLI, FoundLane);
8177 }
8178 } else {
8179 assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state");
8180 Value *VecPtr = vectorizeTree(E->getOperand(0));
8181 // Use the minimum alignment of the gathered loads.
8182 Align CommonAlignment = LI->getAlign();
8183 for (Value *V : E->Scalars)
8184 CommonAlignment =
8185 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign());
8186 NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment);
8187 }
8188 Value *V = propagateMetadata(NewLI, E->Scalars);
8189
8190 ShuffleBuilder.addInversedMask(E->ReorderIndices);
8191 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
8192 V = ShuffleBuilder.finalize(V);
8193 E->VectorizedValue = V;
8194 ++NumVectorInstructions;
8195 return V;
8196 }
8197 case Instruction::Store: {
8198 auto *SI = cast<StoreInst>(VL0);
8199 unsigned AS = SI->getPointerAddressSpace();
8200
8201 setInsertPointAfterBundle(E);
8202
8203 Value *VecValue = vectorizeTree(E->getOperand(0));
8204 ShuffleBuilder.addMask(E->ReorderIndices);
8205 VecValue = ShuffleBuilder.finalize(VecValue);
8206
8207 Value *ScalarPtr = SI->getPointerOperand();
8208 Value *VecPtr = Builder.CreateBitCast(
8209 ScalarPtr, VecValue->getType()->getPointerTo(AS));
8210 StoreInst *ST =
8211 Builder.CreateAlignedStore(VecValue, VecPtr, SI->getAlign());
8212
8213 // The pointer operand uses an in-tree scalar, so add the new BitCast or
8214 // StoreInst to ExternalUses to make sure that an extract will be
8215 // generated in the future.
8216 if (TreeEntry *Entry = getTreeEntry(ScalarPtr)) {
8217 // Find which lane we need to extract.
8218 unsigned FoundLane = Entry->findLaneForValue(ScalarPtr);
8219 ExternalUses.push_back(ExternalUser(
8220 ScalarPtr, ScalarPtr != VecPtr ? cast<User>(VecPtr) : ST,
8221 FoundLane));
8222 }
8223
8224 Value *V = propagateMetadata(ST, E->Scalars);
8225
8226 E->VectorizedValue = V;
8227 ++NumVectorInstructions;
8228 return V;
8229 }
8230 case Instruction::GetElementPtr: {
8231 auto *GEP0 = cast<GetElementPtrInst>(VL0);
8232 setInsertPointAfterBundle(E);
8233
8234 Value *Op0 = vectorizeTree(E->getOperand(0));
8235
8236 SmallVector<Value *> OpVecs;
8237 for (int J = 1, N = GEP0->getNumOperands(); J < N; ++J) {
8238 Value *OpVec = vectorizeTree(E->getOperand(J));
8239 OpVecs.push_back(OpVec);
8240 }
8241
8242 Value *V = Builder.CreateGEP(GEP0->getSourceElementType(), Op0, OpVecs);
8243 if (Instruction *I = dyn_cast<GetElementPtrInst>(V)) {
8244 SmallVector<Value *> GEPs;
8245 for (Value *V : E->Scalars) {
8246 if (isa<GetElementPtrInst>(V))
8247 GEPs.push_back(V);
8248 }
8249 V = propagateMetadata(I, GEPs);
8250 }
8251
8252 ShuffleBuilder.addInversedMask(E->ReorderIndices);
8253 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
8254 V = ShuffleBuilder.finalize(V);
8255
8256 E->VectorizedValue = V;
8257 ++NumVectorInstructions;
8258
8259 return V;
8260 }
8261 case Instruction::Call: {
8262 CallInst *CI = cast<CallInst>(VL0);
8263 setInsertPointAfterBundle(E);
8264
8265 Intrinsic::ID IID = Intrinsic::not_intrinsic;
8266 if (Function *FI = CI->getCalledFunction())
8267 IID = FI->getIntrinsicID();
8268
8269 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8270
8271 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI);
8272 bool UseIntrinsic = ID != Intrinsic::not_intrinsic &&
8273 VecCallCosts.first <= VecCallCosts.second;
8274
8275 Value *ScalarArg = nullptr;
8276 std::vector<Value *> OpVecs;
8277 SmallVector<Type *, 2> TysForDecl =
8278 {FixedVectorType::get(CI->getType(), E->Scalars.size())};
8279 for (int j = 0, e = CI->arg_size(); j < e; ++j) {
8280 ValueList OpVL;
8281 // Some intrinsics have scalar arguments. This argument should not be
8282 // vectorized.
8283 if (UseIntrinsic && isVectorIntrinsicWithScalarOpAtArg(IID, j)) {
8284 CallInst *CEI = cast<CallInst>(VL0);
8285 ScalarArg = CEI->getArgOperand(j);
8286 OpVecs.push_back(CEI->getArgOperand(j));
8287 if (isVectorIntrinsicWithOverloadTypeAtArg(IID, j))
8288 TysForDecl.push_back(ScalarArg->getType());
8289 continue;
8290 }
8291
8292 Value *OpVec = vectorizeTree(E->getOperand(j));
8293 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n");
8294 OpVecs.push_back(OpVec);
8295 if (isVectorIntrinsicWithOverloadTypeAtArg(IID, j))
8296 TysForDecl.push_back(OpVec->getType());
8297 }
8298
8299 Function *CF;
8300 if (!UseIntrinsic) {
8301 VFShape Shape =
8302 VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>(
8303 VecTy->getNumElements())),
8304 false /*HasGlobalPred*/);
8305 CF = VFDatabase(*CI).getVectorizedFunction(Shape);
8306 } else {
8307 CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl);
8308 }
8309
8310 SmallVector<OperandBundleDef, 1> OpBundles;
8311 CI->getOperandBundlesAsDefs(OpBundles);
8312 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles);
8313
8314 // The scalar argument uses an in-tree scalar so we add the new vectorized
8315 // call to ExternalUses list to make sure that an extract will be
8316 // generated in the future.
8317 if (ScalarArg) {
8318 if (TreeEntry *Entry = getTreeEntry(ScalarArg)) {
8319 // Find which lane we need to extract.
8320 unsigned FoundLane = Entry->findLaneForValue(ScalarArg);
8321 ExternalUses.push_back(
8322 ExternalUser(ScalarArg, cast<User>(V), FoundLane));
8323 }
8324 }
8325
8326 propagateIRFlags(V, E->Scalars, VL0);
8327 ShuffleBuilder.addInversedMask(E->ReorderIndices);
8328 ShuffleBuilder.addMask(E->ReuseShuffleIndices);
8329 V = ShuffleBuilder.finalize(V);
8330
8331 E->VectorizedValue = V;
8332 ++NumVectorInstructions;
8333 return V;
8334 }
8335 case Instruction::ShuffleVector: {
8336 assert(E->isAltShuffle() &&
8337 ((Instruction::isBinaryOp(E->getOpcode()) &&
8338 Instruction::isBinaryOp(E->getAltOpcode())) ||
8339 (Instruction::isCast(E->getOpcode()) &&
8340 Instruction::isCast(E->getAltOpcode())) ||
8341 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) &&
8342 "Invalid Shuffle Vector Operand");
8343
8344 Value *LHS = nullptr, *RHS = nullptr;
8345 if (Instruction::isBinaryOp(E->getOpcode()) || isa<CmpInst>(VL0)) {
8346 setInsertPointAfterBundle(E);
8347 LHS = vectorizeTree(E->getOperand(0));
8348 RHS = vectorizeTree(E->getOperand(1));
8349 } else {
8350 setInsertPointAfterBundle(E);
8351 LHS = vectorizeTree(E->getOperand(0));
8352 }
8353
8354 if (E->VectorizedValue) {
8355 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
8356 return E->VectorizedValue;
8357 }
8358
8359 Value *V0, *V1;
8360 if (Instruction::isBinaryOp(E->getOpcode())) {
8361 V0 = Builder.CreateBinOp(
8362 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS);
8363 V1 = Builder.CreateBinOp(
8364 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS);
8365 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) {
8366 V0 = Builder.CreateCmp(CI0->getPredicate(), LHS, RHS);
8367 auto *AltCI = cast<CmpInst>(E->getAltOp());
8368 CmpInst::Predicate AltPred = AltCI->getPredicate();
8369 V1 = Builder.CreateCmp(AltPred, LHS, RHS);
8370 } else {
8371 V0 = Builder.CreateCast(
8372 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy);
8373 V1 = Builder.CreateCast(
8374 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy);
8375 }
8376 // Add V0 and V1 to later analysis to try to find and remove matching
8377 // instruction, if any.
8378 for (Value *V : {V0, V1}) {
8379 if (auto *I = dyn_cast<Instruction>(V)) {
8380 GatherShuffleSeq.insert(I);
8381 CSEBlocks.insert(I->getParent());
8382 }
8383 }
8384
8385 // Create shuffle to take alternate operations from the vector.
8386 // Also, gather up main and alt scalar ops to propagate IR flags to
8387 // each vector operation.
8388 ValueList OpScalars, AltScalars;
8389 SmallVector<int> Mask;
8390 buildShuffleEntryMask(
8391 E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices,
8392 [E](Instruction *I) {
8393 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode");
8394 return isAlternateInstruction(I, E->getMainOp(), E->getAltOp());
8395 },
8396 Mask, &OpScalars, &AltScalars);
8397
8398 propagateIRFlags(V0, OpScalars);
8399 propagateIRFlags(V1, AltScalars);
8400
8401 Value *V = Builder.CreateShuffleVector(V0, V1, Mask);
8402 if (auto *I = dyn_cast<Instruction>(V)) {
8403 V = propagateMetadata(I, E->Scalars);
8404 GatherShuffleSeq.insert(I);
8405 CSEBlocks.insert(I->getParent());
8406 }
8407 V = ShuffleBuilder.finalize(V);
8408
8409 E->VectorizedValue = V;
8410 ++NumVectorInstructions;
8411
8412 return V;
8413 }
8414 default:
8415 llvm_unreachable("unknown inst");
8416 }
8417 return nullptr;
8418 }
8419
vectorizeTree()8420 Value *BoUpSLP::vectorizeTree() {
8421 ExtraValueToDebugLocsMap ExternallyUsedValues;
8422 return vectorizeTree(ExternallyUsedValues);
8423 }
8424
8425 namespace {
8426 /// Data type for handling buildvector sequences with the reused scalars from
8427 /// other tree entries.
8428 struct ShuffledInsertData {
8429 /// List of insertelements to be replaced by shuffles.
8430 SmallVector<InsertElementInst *> InsertElements;
8431 /// The parent vectors and shuffle mask for the given list of inserts.
8432 MapVector<Value *, SmallVector<int>> ValueMasks;
8433 };
8434 } // namespace
8435
8436 Value *
vectorizeTree(ExtraValueToDebugLocsMap & ExternallyUsedValues)8437 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) {
8438 // All blocks must be scheduled before any instructions are inserted.
8439 for (auto &BSIter : BlocksSchedules) {
8440 scheduleBlock(BSIter.second.get());
8441 }
8442
8443 Builder.SetInsertPoint(&F->getEntryBlock().front());
8444 auto *VectorRoot = vectorizeTree(VectorizableTree[0].get());
8445
8446 // If the vectorized tree can be rewritten in a smaller type, we truncate the
8447 // vectorized root. InstCombine will then rewrite the entire expression. We
8448 // sign extend the extracted values below.
8449 auto *ScalarRoot = VectorizableTree[0]->Scalars[0];
8450 if (MinBWs.count(ScalarRoot)) {
8451 if (auto *I = dyn_cast<Instruction>(VectorRoot)) {
8452 // If current instr is a phi and not the last phi, insert it after the
8453 // last phi node.
8454 if (isa<PHINode>(I))
8455 Builder.SetInsertPoint(&*I->getParent()->getFirstInsertionPt());
8456 else
8457 Builder.SetInsertPoint(&*++BasicBlock::iterator(I));
8458 }
8459 auto BundleWidth = VectorizableTree[0]->Scalars.size();
8460 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first);
8461 auto *VecTy = FixedVectorType::get(MinTy, BundleWidth);
8462 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy);
8463 VectorizableTree[0]->VectorizedValue = Trunc;
8464 }
8465
8466 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size()
8467 << " values .\n");
8468
8469 SmallVector<ShuffledInsertData> ShuffledInserts;
8470 // Maps vector instruction to original insertelement instruction
8471 DenseMap<Value *, InsertElementInst *> VectorToInsertElement;
8472 // Extract all of the elements with the external uses.
8473 for (const auto &ExternalUse : ExternalUses) {
8474 Value *Scalar = ExternalUse.Scalar;
8475 llvm::User *User = ExternalUse.User;
8476
8477 // Skip users that we already RAUW. This happens when one instruction
8478 // has multiple uses of the same value.
8479 if (User && !is_contained(Scalar->users(), User))
8480 continue;
8481 TreeEntry *E = getTreeEntry(Scalar);
8482 assert(E && "Invalid scalar");
8483 assert(E->State != TreeEntry::NeedToGather &&
8484 "Extracting from a gather list");
8485 // Non-instruction pointers are not deleted, just skip them.
8486 if (E->getOpcode() == Instruction::GetElementPtr &&
8487 !isa<GetElementPtrInst>(Scalar))
8488 continue;
8489
8490 Value *Vec = E->VectorizedValue;
8491 assert(Vec && "Can't find vectorizable value");
8492
8493 Value *Lane = Builder.getInt32(ExternalUse.Lane);
8494 auto ExtractAndExtendIfNeeded = [&](Value *Vec) {
8495 if (Scalar->getType() != Vec->getType()) {
8496 Value *Ex;
8497 // "Reuse" the existing extract to improve final codegen.
8498 if (auto *ES = dyn_cast<ExtractElementInst>(Scalar)) {
8499 Ex = Builder.CreateExtractElement(ES->getOperand(0),
8500 ES->getOperand(1));
8501 } else {
8502 Ex = Builder.CreateExtractElement(Vec, Lane);
8503 }
8504 // If necessary, sign-extend or zero-extend ScalarRoot
8505 // to the larger type.
8506 if (!MinBWs.count(ScalarRoot))
8507 return Ex;
8508 if (MinBWs[ScalarRoot].second)
8509 return Builder.CreateSExt(Ex, Scalar->getType());
8510 return Builder.CreateZExt(Ex, Scalar->getType());
8511 }
8512 assert(isa<FixedVectorType>(Scalar->getType()) &&
8513 isa<InsertElementInst>(Scalar) &&
8514 "In-tree scalar of vector type is not insertelement?");
8515 auto *IE = cast<InsertElementInst>(Scalar);
8516 VectorToInsertElement.try_emplace(Vec, IE);
8517 return Vec;
8518 };
8519 // If User == nullptr, the Scalar is used as extra arg. Generate
8520 // ExtractElement instruction and update the record for this scalar in
8521 // ExternallyUsedValues.
8522 if (!User) {
8523 assert(ExternallyUsedValues.count(Scalar) &&
8524 "Scalar with nullptr as an external user must be registered in "
8525 "ExternallyUsedValues map");
8526 if (auto *VecI = dyn_cast<Instruction>(Vec)) {
8527 Builder.SetInsertPoint(VecI->getParent(),
8528 std::next(VecI->getIterator()));
8529 } else {
8530 Builder.SetInsertPoint(&F->getEntryBlock().front());
8531 }
8532 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
8533 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent());
8534 auto &NewInstLocs = ExternallyUsedValues[NewInst];
8535 auto It = ExternallyUsedValues.find(Scalar);
8536 assert(It != ExternallyUsedValues.end() &&
8537 "Externally used scalar is not found in ExternallyUsedValues");
8538 NewInstLocs.append(It->second);
8539 ExternallyUsedValues.erase(Scalar);
8540 // Required to update internally referenced instructions.
8541 Scalar->replaceAllUsesWith(NewInst);
8542 continue;
8543 }
8544
8545 if (auto *VU = dyn_cast<InsertElementInst>(User)) {
8546 // Skip if the scalar is another vector op or Vec is not an instruction.
8547 if (!Scalar->getType()->isVectorTy() && isa<Instruction>(Vec)) {
8548 if (auto *FTy = dyn_cast<FixedVectorType>(User->getType())) {
8549 Optional<unsigned> InsertIdx = getInsertIndex(VU);
8550 if (InsertIdx) {
8551 // Need to use original vector, if the root is truncated.
8552 if (MinBWs.count(Scalar) &&
8553 VectorizableTree[0]->VectorizedValue == Vec)
8554 Vec = VectorRoot;
8555 auto *It =
8556 find_if(ShuffledInserts, [VU](const ShuffledInsertData &Data) {
8557 // Checks if 2 insertelements are from the same buildvector.
8558 InsertElementInst *VecInsert = Data.InsertElements.front();
8559 return areTwoInsertFromSameBuildVector(VU, VecInsert);
8560 });
8561 unsigned Idx = *InsertIdx;
8562 if (It == ShuffledInserts.end()) {
8563 (void)ShuffledInserts.emplace_back();
8564 It = std::next(ShuffledInserts.begin(),
8565 ShuffledInserts.size() - 1);
8566 SmallVectorImpl<int> &Mask = It->ValueMasks[Vec];
8567 if (Mask.empty())
8568 Mask.assign(FTy->getNumElements(), UndefMaskElem);
8569 // Find the insertvector, vectorized in tree, if any.
8570 Value *Base = VU;
8571 while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) {
8572 if (IEBase != User &&
8573 (!IEBase->hasOneUse() ||
8574 getInsertIndex(IEBase).value_or(Idx) == Idx))
8575 break;
8576 // Build the mask for the vectorized insertelement instructions.
8577 if (const TreeEntry *E = getTreeEntry(IEBase)) {
8578 do {
8579 IEBase = cast<InsertElementInst>(Base);
8580 int IEIdx = *getInsertIndex(IEBase);
8581 assert(Mask[Idx] == UndefMaskElem &&
8582 "InsertElementInstruction used already.");
8583 Mask[IEIdx] = IEIdx;
8584 Base = IEBase->getOperand(0);
8585 } while (E == getTreeEntry(Base));
8586 break;
8587 }
8588 Base = cast<InsertElementInst>(Base)->getOperand(0);
8589 // After the vectorization the def-use chain has changed, need
8590 // to look through original insertelement instructions, if they
8591 // get replaced by vector instructions.
8592 auto It = VectorToInsertElement.find(Base);
8593 if (It != VectorToInsertElement.end())
8594 Base = It->second;
8595 }
8596 }
8597 SmallVectorImpl<int> &Mask = It->ValueMasks[Vec];
8598 if (Mask.empty())
8599 Mask.assign(FTy->getNumElements(), UndefMaskElem);
8600 Mask[Idx] = ExternalUse.Lane;
8601 It->InsertElements.push_back(cast<InsertElementInst>(User));
8602 continue;
8603 }
8604 }
8605 }
8606 }
8607
8608 // Generate extracts for out-of-tree users.
8609 // Find the insertion point for the extractelement lane.
8610 if (auto *VecI = dyn_cast<Instruction>(Vec)) {
8611 if (PHINode *PH = dyn_cast<PHINode>(User)) {
8612 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) {
8613 if (PH->getIncomingValue(i) == Scalar) {
8614 Instruction *IncomingTerminator =
8615 PH->getIncomingBlock(i)->getTerminator();
8616 if (isa<CatchSwitchInst>(IncomingTerminator)) {
8617 Builder.SetInsertPoint(VecI->getParent(),
8618 std::next(VecI->getIterator()));
8619 } else {
8620 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator());
8621 }
8622 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
8623 CSEBlocks.insert(PH->getIncomingBlock(i));
8624 PH->setOperand(i, NewInst);
8625 }
8626 }
8627 } else {
8628 Builder.SetInsertPoint(cast<Instruction>(User));
8629 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
8630 CSEBlocks.insert(cast<Instruction>(User)->getParent());
8631 User->replaceUsesOfWith(Scalar, NewInst);
8632 }
8633 } else {
8634 Builder.SetInsertPoint(&F->getEntryBlock().front());
8635 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
8636 CSEBlocks.insert(&F->getEntryBlock());
8637 User->replaceUsesOfWith(Scalar, NewInst);
8638 }
8639
8640 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n");
8641 }
8642
8643 // Checks if the mask is an identity mask.
8644 auto &&IsIdentityMask = [](ArrayRef<int> Mask, FixedVectorType *VecTy) {
8645 int Limit = Mask.size();
8646 return VecTy->getNumElements() == Mask.size() &&
8647 all_of(Mask, [Limit](int Idx) { return Idx < Limit; }) &&
8648 ShuffleVectorInst::isIdentityMask(Mask);
8649 };
8650 // Tries to combine 2 different masks into single one.
8651 auto &&CombineMasks = [](SmallVectorImpl<int> &Mask, ArrayRef<int> ExtMask) {
8652 SmallVector<int> NewMask(ExtMask.size(), UndefMaskElem);
8653 for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) {
8654 if (ExtMask[I] == UndefMaskElem)
8655 continue;
8656 NewMask[I] = Mask[ExtMask[I]];
8657 }
8658 Mask.swap(NewMask);
8659 };
8660 // Peek through shuffles, trying to simplify the final shuffle code.
8661 auto &&PeekThroughShuffles =
8662 [&IsIdentityMask, &CombineMasks](Value *&V, SmallVectorImpl<int> &Mask,
8663 bool CheckForLengthChange = false) {
8664 while (auto *SV = dyn_cast<ShuffleVectorInst>(V)) {
8665 // Exit if not a fixed vector type or changing size shuffle.
8666 if (!isa<FixedVectorType>(SV->getType()) ||
8667 (CheckForLengthChange && SV->changesLength()))
8668 break;
8669 // Exit if the identity or broadcast mask is found.
8670 if (IsIdentityMask(Mask, cast<FixedVectorType>(SV->getType())) ||
8671 SV->isZeroEltSplat())
8672 break;
8673 bool IsOp1Undef = isUndefVector(SV->getOperand(0));
8674 bool IsOp2Undef = isUndefVector(SV->getOperand(1));
8675 if (!IsOp1Undef && !IsOp2Undef)
8676 break;
8677 SmallVector<int> ShuffleMask(SV->getShuffleMask().begin(),
8678 SV->getShuffleMask().end());
8679 CombineMasks(ShuffleMask, Mask);
8680 Mask.swap(ShuffleMask);
8681 if (IsOp2Undef)
8682 V = SV->getOperand(0);
8683 else
8684 V = SV->getOperand(1);
8685 }
8686 };
8687 // Smart shuffle instruction emission, walks through shuffles trees and
8688 // tries to find the best matching vector for the actual shuffle
8689 // instruction.
8690 auto &&CreateShuffle = [this, &IsIdentityMask, &PeekThroughShuffles,
8691 &CombineMasks](Value *V1, Value *V2,
8692 ArrayRef<int> Mask) -> Value * {
8693 assert(V1 && "Expected at least one vector value.");
8694 if (V2 && !isUndefVector(V2)) {
8695 // Peek through shuffles.
8696 Value *Op1 = V1;
8697 Value *Op2 = V2;
8698 int VF =
8699 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
8700 SmallVector<int> CombinedMask1(Mask.size(), UndefMaskElem);
8701 SmallVector<int> CombinedMask2(Mask.size(), UndefMaskElem);
8702 for (int I = 0, E = Mask.size(); I < E; ++I) {
8703 if (Mask[I] < VF)
8704 CombinedMask1[I] = Mask[I];
8705 else
8706 CombinedMask2[I] = Mask[I] - VF;
8707 }
8708 Value *PrevOp1;
8709 Value *PrevOp2;
8710 do {
8711 PrevOp1 = Op1;
8712 PrevOp2 = Op2;
8713 PeekThroughShuffles(Op1, CombinedMask1, /*CheckForLengthChange=*/true);
8714 PeekThroughShuffles(Op2, CombinedMask2, /*CheckForLengthChange=*/true);
8715 // Check if we have 2 resizing shuffles - need to peek through operands
8716 // again.
8717 if (auto *SV1 = dyn_cast<ShuffleVectorInst>(Op1))
8718 if (auto *SV2 = dyn_cast<ShuffleVectorInst>(Op2))
8719 if (SV1->getOperand(0)->getType() ==
8720 SV2->getOperand(0)->getType() &&
8721 SV1->getOperand(0)->getType() != SV1->getType() &&
8722 isUndefVector(SV1->getOperand(1)) &&
8723 isUndefVector(SV2->getOperand(1))) {
8724 Op1 = SV1->getOperand(0);
8725 Op2 = SV2->getOperand(0);
8726 SmallVector<int> ShuffleMask1(SV1->getShuffleMask().begin(),
8727 SV1->getShuffleMask().end());
8728 CombineMasks(ShuffleMask1, CombinedMask1);
8729 CombinedMask1.swap(ShuffleMask1);
8730 SmallVector<int> ShuffleMask2(SV2->getShuffleMask().begin(),
8731 SV2->getShuffleMask().end());
8732 CombineMasks(ShuffleMask2, CombinedMask2);
8733 CombinedMask2.swap(ShuffleMask2);
8734 }
8735 } while (PrevOp1 != Op1 || PrevOp2 != Op2);
8736 VF = cast<VectorType>(Op1->getType())
8737 ->getElementCount()
8738 .getKnownMinValue();
8739 for (int I = 0, E = Mask.size(); I < E; ++I) {
8740 if (CombinedMask2[I] != UndefMaskElem) {
8741 assert(CombinedMask1[I] == UndefMaskElem &&
8742 "Expected undefined mask element");
8743 CombinedMask1[I] = CombinedMask2[I] + (Op1 == Op2 ? 0 : VF);
8744 }
8745 }
8746 Value *Vec = Builder.CreateShuffleVector(
8747 Op1, Op1 == Op2 ? PoisonValue::get(Op1->getType()) : Op2,
8748 CombinedMask1);
8749 if (auto *I = dyn_cast<Instruction>(Vec)) {
8750 GatherShuffleSeq.insert(I);
8751 CSEBlocks.insert(I->getParent());
8752 }
8753 return Vec;
8754 }
8755 if (isa<PoisonValue>(V1))
8756 return PoisonValue::get(FixedVectorType::get(
8757 cast<VectorType>(V1->getType())->getElementType(), Mask.size()));
8758 Value *Op = V1;
8759 SmallVector<int> CombinedMask(Mask.begin(), Mask.end());
8760 PeekThroughShuffles(Op, CombinedMask);
8761 if (!isa<FixedVectorType>(Op->getType()) ||
8762 !IsIdentityMask(CombinedMask, cast<FixedVectorType>(Op->getType()))) {
8763 Value *Vec = Builder.CreateShuffleVector(Op, CombinedMask);
8764 if (auto *I = dyn_cast<Instruction>(Vec)) {
8765 GatherShuffleSeq.insert(I);
8766 CSEBlocks.insert(I->getParent());
8767 }
8768 return Vec;
8769 }
8770 return Op;
8771 };
8772
8773 auto &&ResizeToVF = [&CreateShuffle](Value *Vec, ArrayRef<int> Mask) {
8774 unsigned VF = Mask.size();
8775 unsigned VecVF = cast<FixedVectorType>(Vec->getType())->getNumElements();
8776 if (VF != VecVF) {
8777 if (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); })) {
8778 Vec = CreateShuffle(Vec, nullptr, Mask);
8779 return std::make_pair(Vec, true);
8780 }
8781 SmallVector<int> ResizeMask(VF, UndefMaskElem);
8782 for (unsigned I = 0; I < VF; ++I) {
8783 if (Mask[I] != UndefMaskElem)
8784 ResizeMask[Mask[I]] = Mask[I];
8785 }
8786 Vec = CreateShuffle(Vec, nullptr, ResizeMask);
8787 }
8788
8789 return std::make_pair(Vec, false);
8790 };
8791 // Perform shuffling of the vectorize tree entries for better handling of
8792 // external extracts.
8793 for (int I = 0, E = ShuffledInserts.size(); I < E; ++I) {
8794 // Find the first and the last instruction in the list of insertelements.
8795 sort(ShuffledInserts[I].InsertElements, isFirstInsertElement);
8796 InsertElementInst *FirstInsert = ShuffledInserts[I].InsertElements.front();
8797 InsertElementInst *LastInsert = ShuffledInserts[I].InsertElements.back();
8798 Builder.SetInsertPoint(LastInsert);
8799 auto Vector = ShuffledInserts[I].ValueMasks.takeVector();
8800 Value *NewInst = performExtractsShuffleAction<Value>(
8801 makeMutableArrayRef(Vector.data(), Vector.size()),
8802 FirstInsert->getOperand(0),
8803 [](Value *Vec) {
8804 return cast<VectorType>(Vec->getType())
8805 ->getElementCount()
8806 .getKnownMinValue();
8807 },
8808 ResizeToVF,
8809 [FirstInsert, &CreateShuffle](ArrayRef<int> Mask,
8810 ArrayRef<Value *> Vals) {
8811 assert((Vals.size() == 1 || Vals.size() == 2) &&
8812 "Expected exactly 1 or 2 input values.");
8813 if (Vals.size() == 1) {
8814 // Do not create shuffle if the mask is a simple identity
8815 // non-resizing mask.
8816 if (Mask.size() != cast<FixedVectorType>(Vals.front()->getType())
8817 ->getNumElements() ||
8818 !ShuffleVectorInst::isIdentityMask(Mask))
8819 return CreateShuffle(Vals.front(), nullptr, Mask);
8820 return Vals.front();
8821 }
8822 return CreateShuffle(Vals.front() ? Vals.front()
8823 : FirstInsert->getOperand(0),
8824 Vals.back(), Mask);
8825 });
8826 auto It = ShuffledInserts[I].InsertElements.rbegin();
8827 // Rebuild buildvector chain.
8828 InsertElementInst *II = nullptr;
8829 if (It != ShuffledInserts[I].InsertElements.rend())
8830 II = *It;
8831 SmallVector<Instruction *> Inserts;
8832 while (It != ShuffledInserts[I].InsertElements.rend()) {
8833 assert(II && "Must be an insertelement instruction.");
8834 if (*It == II)
8835 ++It;
8836 else
8837 Inserts.push_back(cast<Instruction>(II));
8838 II = dyn_cast<InsertElementInst>(II->getOperand(0));
8839 }
8840 for (Instruction *II : reverse(Inserts)) {
8841 II->replaceUsesOfWith(II->getOperand(0), NewInst);
8842 if (auto *NewI = dyn_cast<Instruction>(NewInst))
8843 if (II->getParent() == NewI->getParent() && II->comesBefore(NewI))
8844 II->moveAfter(NewI);
8845 NewInst = II;
8846 }
8847 LastInsert->replaceAllUsesWith(NewInst);
8848 for (InsertElementInst *IE : reverse(ShuffledInserts[I].InsertElements)) {
8849 IE->replaceUsesOfWith(IE->getOperand(0),
8850 PoisonValue::get(IE->getOperand(0)->getType()));
8851 IE->replaceUsesOfWith(IE->getOperand(1),
8852 PoisonValue::get(IE->getOperand(1)->getType()));
8853 eraseInstruction(IE);
8854 }
8855 CSEBlocks.insert(LastInsert->getParent());
8856 }
8857
8858 // For each vectorized value:
8859 for (auto &TEPtr : VectorizableTree) {
8860 TreeEntry *Entry = TEPtr.get();
8861
8862 // No need to handle users of gathered values.
8863 if (Entry->State == TreeEntry::NeedToGather)
8864 continue;
8865
8866 assert(Entry->VectorizedValue && "Can't find vectorizable value");
8867
8868 // For each lane:
8869 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
8870 Value *Scalar = Entry->Scalars[Lane];
8871
8872 if (Entry->getOpcode() == Instruction::GetElementPtr &&
8873 !isa<GetElementPtrInst>(Scalar))
8874 continue;
8875 #ifndef NDEBUG
8876 Type *Ty = Scalar->getType();
8877 if (!Ty->isVoidTy()) {
8878 for (User *U : Scalar->users()) {
8879 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
8880
8881 // It is legal to delete users in the ignorelist.
8882 assert((getTreeEntry(U) ||
8883 (UserIgnoreList && UserIgnoreList->contains(U)) ||
8884 (isa_and_nonnull<Instruction>(U) &&
8885 isDeleted(cast<Instruction>(U)))) &&
8886 "Deleting out-of-tree value");
8887 }
8888 }
8889 #endif
8890 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
8891 eraseInstruction(cast<Instruction>(Scalar));
8892 }
8893 }
8894
8895 Builder.ClearInsertionPoint();
8896 InstrElementSize.clear();
8897
8898 return VectorizableTree[0]->VectorizedValue;
8899 }
8900
optimizeGatherSequence()8901 void BoUpSLP::optimizeGatherSequence() {
8902 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherShuffleSeq.size()
8903 << " gather sequences instructions.\n");
8904 // LICM InsertElementInst sequences.
8905 for (Instruction *I : GatherShuffleSeq) {
8906 if (isDeleted(I))
8907 continue;
8908
8909 // Check if this block is inside a loop.
8910 Loop *L = LI->getLoopFor(I->getParent());
8911 if (!L)
8912 continue;
8913
8914 // Check if it has a preheader.
8915 BasicBlock *PreHeader = L->getLoopPreheader();
8916 if (!PreHeader)
8917 continue;
8918
8919 // If the vector or the element that we insert into it are
8920 // instructions that are defined in this basic block then we can't
8921 // hoist this instruction.
8922 if (any_of(I->operands(), [L](Value *V) {
8923 auto *OpI = dyn_cast<Instruction>(V);
8924 return OpI && L->contains(OpI);
8925 }))
8926 continue;
8927
8928 // We can hoist this instruction. Move it to the pre-header.
8929 I->moveBefore(PreHeader->getTerminator());
8930 }
8931
8932 // Make a list of all reachable blocks in our CSE queue.
8933 SmallVector<const DomTreeNode *, 8> CSEWorkList;
8934 CSEWorkList.reserve(CSEBlocks.size());
8935 for (BasicBlock *BB : CSEBlocks)
8936 if (DomTreeNode *N = DT->getNode(BB)) {
8937 assert(DT->isReachableFromEntry(N));
8938 CSEWorkList.push_back(N);
8939 }
8940
8941 // Sort blocks by domination. This ensures we visit a block after all blocks
8942 // dominating it are visited.
8943 llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) {
8944 assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) &&
8945 "Different nodes should have different DFS numbers");
8946 return A->getDFSNumIn() < B->getDFSNumIn();
8947 });
8948
8949 // Less defined shuffles can be replaced by the more defined copies.
8950 // Between two shuffles one is less defined if it has the same vector operands
8951 // and its mask indeces are the same as in the first one or undefs. E.g.
8952 // shuffle %0, poison, <0, 0, 0, undef> is less defined than shuffle %0,
8953 // poison, <0, 0, 0, 0>.
8954 auto &&IsIdenticalOrLessDefined = [this](Instruction *I1, Instruction *I2,
8955 SmallVectorImpl<int> &NewMask) {
8956 if (I1->getType() != I2->getType())
8957 return false;
8958 auto *SI1 = dyn_cast<ShuffleVectorInst>(I1);
8959 auto *SI2 = dyn_cast<ShuffleVectorInst>(I2);
8960 if (!SI1 || !SI2)
8961 return I1->isIdenticalTo(I2);
8962 if (SI1->isIdenticalTo(SI2))
8963 return true;
8964 for (int I = 0, E = SI1->getNumOperands(); I < E; ++I)
8965 if (SI1->getOperand(I) != SI2->getOperand(I))
8966 return false;
8967 // Check if the second instruction is more defined than the first one.
8968 NewMask.assign(SI2->getShuffleMask().begin(), SI2->getShuffleMask().end());
8969 ArrayRef<int> SM1 = SI1->getShuffleMask();
8970 // Count trailing undefs in the mask to check the final number of used
8971 // registers.
8972 unsigned LastUndefsCnt = 0;
8973 for (int I = 0, E = NewMask.size(); I < E; ++I) {
8974 if (SM1[I] == UndefMaskElem)
8975 ++LastUndefsCnt;
8976 else
8977 LastUndefsCnt = 0;
8978 if (NewMask[I] != UndefMaskElem && SM1[I] != UndefMaskElem &&
8979 NewMask[I] != SM1[I])
8980 return false;
8981 if (NewMask[I] == UndefMaskElem)
8982 NewMask[I] = SM1[I];
8983 }
8984 // Check if the last undefs actually change the final number of used vector
8985 // registers.
8986 return SM1.size() - LastUndefsCnt > 1 &&
8987 TTI->getNumberOfParts(SI1->getType()) ==
8988 TTI->getNumberOfParts(
8989 FixedVectorType::get(SI1->getType()->getElementType(),
8990 SM1.size() - LastUndefsCnt));
8991 };
8992 // Perform O(N^2) search over the gather/shuffle sequences and merge identical
8993 // instructions. TODO: We can further optimize this scan if we split the
8994 // instructions into different buckets based on the insert lane.
8995 SmallVector<Instruction *, 16> Visited;
8996 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) {
8997 assert(*I &&
8998 (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&
8999 "Worklist not sorted properly!");
9000 BasicBlock *BB = (*I)->getBlock();
9001 // For all instructions in blocks containing gather sequences:
9002 for (Instruction &In : llvm::make_early_inc_range(*BB)) {
9003 if (isDeleted(&In))
9004 continue;
9005 if (!isa<InsertElementInst>(&In) && !isa<ExtractElementInst>(&In) &&
9006 !isa<ShuffleVectorInst>(&In) && !GatherShuffleSeq.contains(&In))
9007 continue;
9008
9009 // Check if we can replace this instruction with any of the
9010 // visited instructions.
9011 bool Replaced = false;
9012 for (Instruction *&V : Visited) {
9013 SmallVector<int> NewMask;
9014 if (IsIdenticalOrLessDefined(&In, V, NewMask) &&
9015 DT->dominates(V->getParent(), In.getParent())) {
9016 In.replaceAllUsesWith(V);
9017 eraseInstruction(&In);
9018 if (auto *SI = dyn_cast<ShuffleVectorInst>(V))
9019 if (!NewMask.empty())
9020 SI->setShuffleMask(NewMask);
9021 Replaced = true;
9022 break;
9023 }
9024 if (isa<ShuffleVectorInst>(In) && isa<ShuffleVectorInst>(V) &&
9025 GatherShuffleSeq.contains(V) &&
9026 IsIdenticalOrLessDefined(V, &In, NewMask) &&
9027 DT->dominates(In.getParent(), V->getParent())) {
9028 In.moveAfter(V);
9029 V->replaceAllUsesWith(&In);
9030 eraseInstruction(V);
9031 if (auto *SI = dyn_cast<ShuffleVectorInst>(&In))
9032 if (!NewMask.empty())
9033 SI->setShuffleMask(NewMask);
9034 V = &In;
9035 Replaced = true;
9036 break;
9037 }
9038 }
9039 if (!Replaced) {
9040 assert(!is_contained(Visited, &In));
9041 Visited.push_back(&In);
9042 }
9043 }
9044 }
9045 CSEBlocks.clear();
9046 GatherShuffleSeq.clear();
9047 }
9048
9049 BoUpSLP::ScheduleData *
buildBundle(ArrayRef<Value * > VL)9050 BoUpSLP::BlockScheduling::buildBundle(ArrayRef<Value *> VL) {
9051 ScheduleData *Bundle = nullptr;
9052 ScheduleData *PrevInBundle = nullptr;
9053 for (Value *V : VL) {
9054 if (doesNotNeedToBeScheduled(V))
9055 continue;
9056 ScheduleData *BundleMember = getScheduleData(V);
9057 assert(BundleMember &&
9058 "no ScheduleData for bundle member "
9059 "(maybe not in same basic block)");
9060 assert(BundleMember->isSchedulingEntity() &&
9061 "bundle member already part of other bundle");
9062 if (PrevInBundle) {
9063 PrevInBundle->NextInBundle = BundleMember;
9064 } else {
9065 Bundle = BundleMember;
9066 }
9067
9068 // Group the instructions to a bundle.
9069 BundleMember->FirstInBundle = Bundle;
9070 PrevInBundle = BundleMember;
9071 }
9072 assert(Bundle && "Failed to find schedule bundle");
9073 return Bundle;
9074 }
9075
9076 // Groups the instructions to a bundle (which is then a single scheduling entity)
9077 // and schedules instructions until the bundle gets ready.
9078 Optional<BoUpSLP::ScheduleData *>
tryScheduleBundle(ArrayRef<Value * > VL,BoUpSLP * SLP,const InstructionsState & S)9079 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
9080 const InstructionsState &S) {
9081 // No need to schedule PHIs, insertelement, extractelement and extractvalue
9082 // instructions.
9083 if (isa<PHINode>(S.OpValue) || isVectorLikeInstWithConstOps(S.OpValue) ||
9084 doesNotNeedToSchedule(VL))
9085 return nullptr;
9086
9087 // Initialize the instruction bundle.
9088 Instruction *OldScheduleEnd = ScheduleEnd;
9089 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n");
9090
9091 auto TryScheduleBundleImpl = [this, OldScheduleEnd, SLP](bool ReSchedule,
9092 ScheduleData *Bundle) {
9093 // The scheduling region got new instructions at the lower end (or it is a
9094 // new region for the first bundle). This makes it necessary to
9095 // recalculate all dependencies.
9096 // It is seldom that this needs to be done a second time after adding the
9097 // initial bundle to the region.
9098 if (ScheduleEnd != OldScheduleEnd) {
9099 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode())
9100 doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); });
9101 ReSchedule = true;
9102 }
9103 if (Bundle) {
9104 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle
9105 << " in block " << BB->getName() << "\n");
9106 calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP);
9107 }
9108
9109 if (ReSchedule) {
9110 resetSchedule();
9111 initialFillReadyList(ReadyInsts);
9112 }
9113
9114 // Now try to schedule the new bundle or (if no bundle) just calculate
9115 // dependencies. As soon as the bundle is "ready" it means that there are no
9116 // cyclic dependencies and we can schedule it. Note that's important that we
9117 // don't "schedule" the bundle yet (see cancelScheduling).
9118 while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) &&
9119 !ReadyInsts.empty()) {
9120 ScheduleData *Picked = ReadyInsts.pop_back_val();
9121 assert(Picked->isSchedulingEntity() && Picked->isReady() &&
9122 "must be ready to schedule");
9123 schedule(Picked, ReadyInsts);
9124 }
9125 };
9126
9127 // Make sure that the scheduling region contains all
9128 // instructions of the bundle.
9129 for (Value *V : VL) {
9130 if (doesNotNeedToBeScheduled(V))
9131 continue;
9132 if (!extendSchedulingRegion(V, S)) {
9133 // If the scheduling region got new instructions at the lower end (or it
9134 // is a new region for the first bundle). This makes it necessary to
9135 // recalculate all dependencies.
9136 // Otherwise the compiler may crash trying to incorrectly calculate
9137 // dependencies and emit instruction in the wrong order at the actual
9138 // scheduling.
9139 TryScheduleBundleImpl(/*ReSchedule=*/false, nullptr);
9140 return None;
9141 }
9142 }
9143
9144 bool ReSchedule = false;
9145 for (Value *V : VL) {
9146 if (doesNotNeedToBeScheduled(V))
9147 continue;
9148 ScheduleData *BundleMember = getScheduleData(V);
9149 assert(BundleMember &&
9150 "no ScheduleData for bundle member (maybe not in same basic block)");
9151
9152 // Make sure we don't leave the pieces of the bundle in the ready list when
9153 // whole bundle might not be ready.
9154 ReadyInsts.remove(BundleMember);
9155
9156 if (!BundleMember->IsScheduled)
9157 continue;
9158 // A bundle member was scheduled as single instruction before and now
9159 // needs to be scheduled as part of the bundle. We just get rid of the
9160 // existing schedule.
9161 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember
9162 << " was already scheduled\n");
9163 ReSchedule = true;
9164 }
9165
9166 auto *Bundle = buildBundle(VL);
9167 TryScheduleBundleImpl(ReSchedule, Bundle);
9168 if (!Bundle->isReady()) {
9169 cancelScheduling(VL, S.OpValue);
9170 return None;
9171 }
9172 return Bundle;
9173 }
9174
cancelScheduling(ArrayRef<Value * > VL,Value * OpValue)9175 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL,
9176 Value *OpValue) {
9177 if (isa<PHINode>(OpValue) || isVectorLikeInstWithConstOps(OpValue) ||
9178 doesNotNeedToSchedule(VL))
9179 return;
9180
9181 if (doesNotNeedToBeScheduled(OpValue))
9182 OpValue = *find_if_not(VL, doesNotNeedToBeScheduled);
9183 ScheduleData *Bundle = getScheduleData(OpValue);
9184 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n");
9185 assert(!Bundle->IsScheduled &&
9186 "Can't cancel bundle which is already scheduled");
9187 assert(Bundle->isSchedulingEntity() &&
9188 (Bundle->isPartOfBundle() || needToScheduleSingleInstruction(VL)) &&
9189 "tried to unbundle something which is not a bundle");
9190
9191 // Remove the bundle from the ready list.
9192 if (Bundle->isReady())
9193 ReadyInsts.remove(Bundle);
9194
9195 // Un-bundle: make single instructions out of the bundle.
9196 ScheduleData *BundleMember = Bundle;
9197 while (BundleMember) {
9198 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links");
9199 BundleMember->FirstInBundle = BundleMember;
9200 ScheduleData *Next = BundleMember->NextInBundle;
9201 BundleMember->NextInBundle = nullptr;
9202 BundleMember->TE = nullptr;
9203 if (BundleMember->unscheduledDepsInBundle() == 0) {
9204 ReadyInsts.insert(BundleMember);
9205 }
9206 BundleMember = Next;
9207 }
9208 }
9209
allocateScheduleDataChunks()9210 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() {
9211 // Allocate a new ScheduleData for the instruction.
9212 if (ChunkPos >= ChunkSize) {
9213 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize));
9214 ChunkPos = 0;
9215 }
9216 return &(ScheduleDataChunks.back()[ChunkPos++]);
9217 }
9218
extendSchedulingRegion(Value * V,const InstructionsState & S)9219 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V,
9220 const InstructionsState &S) {
9221 if (getScheduleData(V, isOneOf(S, V)))
9222 return true;
9223 Instruction *I = dyn_cast<Instruction>(V);
9224 assert(I && "bundle member must be an instruction");
9225 assert(!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) &&
9226 !doesNotNeedToBeScheduled(I) &&
9227 "phi nodes/insertelements/extractelements/extractvalues don't need to "
9228 "be scheduled");
9229 auto &&CheckScheduleForI = [this, &S](Instruction *I) -> bool {
9230 ScheduleData *ISD = getScheduleData(I);
9231 if (!ISD)
9232 return false;
9233 assert(isInSchedulingRegion(ISD) &&
9234 "ScheduleData not in scheduling region");
9235 ScheduleData *SD = allocateScheduleDataChunks();
9236 SD->Inst = I;
9237 SD->init(SchedulingRegionID, S.OpValue);
9238 ExtraScheduleDataMap[I][S.OpValue] = SD;
9239 return true;
9240 };
9241 if (CheckScheduleForI(I))
9242 return true;
9243 if (!ScheduleStart) {
9244 // It's the first instruction in the new region.
9245 initScheduleData(I, I->getNextNode(), nullptr, nullptr);
9246 ScheduleStart = I;
9247 ScheduleEnd = I->getNextNode();
9248 if (isOneOf(S, I) != I)
9249 CheckScheduleForI(I);
9250 assert(ScheduleEnd && "tried to vectorize a terminator?");
9251 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n");
9252 return true;
9253 }
9254 // Search up and down at the same time, because we don't know if the new
9255 // instruction is above or below the existing scheduling region.
9256 BasicBlock::reverse_iterator UpIter =
9257 ++ScheduleStart->getIterator().getReverse();
9258 BasicBlock::reverse_iterator UpperEnd = BB->rend();
9259 BasicBlock::iterator DownIter = ScheduleEnd->getIterator();
9260 BasicBlock::iterator LowerEnd = BB->end();
9261 while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I &&
9262 &*DownIter != I) {
9263 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) {
9264 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n");
9265 return false;
9266 }
9267
9268 ++UpIter;
9269 ++DownIter;
9270 }
9271 if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) {
9272 assert(I->getParent() == ScheduleStart->getParent() &&
9273 "Instruction is in wrong basic block.");
9274 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion);
9275 ScheduleStart = I;
9276 if (isOneOf(S, I) != I)
9277 CheckScheduleForI(I);
9278 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I
9279 << "\n");
9280 return true;
9281 }
9282 assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) &&
9283 "Expected to reach top of the basic block or instruction down the "
9284 "lower end.");
9285 assert(I->getParent() == ScheduleEnd->getParent() &&
9286 "Instruction is in wrong basic block.");
9287 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion,
9288 nullptr);
9289 ScheduleEnd = I->getNextNode();
9290 if (isOneOf(S, I) != I)
9291 CheckScheduleForI(I);
9292 assert(ScheduleEnd && "tried to vectorize a terminator?");
9293 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n");
9294 return true;
9295 }
9296
initScheduleData(Instruction * FromI,Instruction * ToI,ScheduleData * PrevLoadStore,ScheduleData * NextLoadStore)9297 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI,
9298 Instruction *ToI,
9299 ScheduleData *PrevLoadStore,
9300 ScheduleData *NextLoadStore) {
9301 ScheduleData *CurrentLoadStore = PrevLoadStore;
9302 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) {
9303 // No need to allocate data for non-schedulable instructions.
9304 if (doesNotNeedToBeScheduled(I))
9305 continue;
9306 ScheduleData *SD = ScheduleDataMap.lookup(I);
9307 if (!SD) {
9308 SD = allocateScheduleDataChunks();
9309 ScheduleDataMap[I] = SD;
9310 SD->Inst = I;
9311 }
9312 assert(!isInSchedulingRegion(SD) &&
9313 "new ScheduleData already in scheduling region");
9314 SD->init(SchedulingRegionID, I);
9315
9316 if (I->mayReadOrWriteMemory() &&
9317 (!isa<IntrinsicInst>(I) ||
9318 (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect &&
9319 cast<IntrinsicInst>(I)->getIntrinsicID() !=
9320 Intrinsic::pseudoprobe))) {
9321 // Update the linked list of memory accessing instructions.
9322 if (CurrentLoadStore) {
9323 CurrentLoadStore->NextLoadStore = SD;
9324 } else {
9325 FirstLoadStoreInRegion = SD;
9326 }
9327 CurrentLoadStore = SD;
9328 }
9329
9330 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) ||
9331 match(I, m_Intrinsic<Intrinsic::stackrestore>()))
9332 RegionHasStackSave = true;
9333 }
9334 if (NextLoadStore) {
9335 if (CurrentLoadStore)
9336 CurrentLoadStore->NextLoadStore = NextLoadStore;
9337 } else {
9338 LastLoadStoreInRegion = CurrentLoadStore;
9339 }
9340 }
9341
calculateDependencies(ScheduleData * SD,bool InsertInReadyList,BoUpSLP * SLP)9342 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
9343 bool InsertInReadyList,
9344 BoUpSLP *SLP) {
9345 assert(SD->isSchedulingEntity());
9346
9347 SmallVector<ScheduleData *, 10> WorkList;
9348 WorkList.push_back(SD);
9349
9350 while (!WorkList.empty()) {
9351 ScheduleData *SD = WorkList.pop_back_val();
9352 for (ScheduleData *BundleMember = SD; BundleMember;
9353 BundleMember = BundleMember->NextInBundle) {
9354 assert(isInSchedulingRegion(BundleMember));
9355 if (BundleMember->hasValidDependencies())
9356 continue;
9357
9358 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember
9359 << "\n");
9360 BundleMember->Dependencies = 0;
9361 BundleMember->resetUnscheduledDeps();
9362
9363 // Handle def-use chain dependencies.
9364 if (BundleMember->OpValue != BundleMember->Inst) {
9365 if (ScheduleData *UseSD = getScheduleData(BundleMember->Inst)) {
9366 BundleMember->Dependencies++;
9367 ScheduleData *DestBundle = UseSD->FirstInBundle;
9368 if (!DestBundle->IsScheduled)
9369 BundleMember->incrementUnscheduledDeps(1);
9370 if (!DestBundle->hasValidDependencies())
9371 WorkList.push_back(DestBundle);
9372 }
9373 } else {
9374 for (User *U : BundleMember->Inst->users()) {
9375 if (ScheduleData *UseSD = getScheduleData(cast<Instruction>(U))) {
9376 BundleMember->Dependencies++;
9377 ScheduleData *DestBundle = UseSD->FirstInBundle;
9378 if (!DestBundle->IsScheduled)
9379 BundleMember->incrementUnscheduledDeps(1);
9380 if (!DestBundle->hasValidDependencies())
9381 WorkList.push_back(DestBundle);
9382 }
9383 }
9384 }
9385
9386 auto makeControlDependent = [&](Instruction *I) {
9387 auto *DepDest = getScheduleData(I);
9388 assert(DepDest && "must be in schedule window");
9389 DepDest->ControlDependencies.push_back(BundleMember);
9390 BundleMember->Dependencies++;
9391 ScheduleData *DestBundle = DepDest->FirstInBundle;
9392 if (!DestBundle->IsScheduled)
9393 BundleMember->incrementUnscheduledDeps(1);
9394 if (!DestBundle->hasValidDependencies())
9395 WorkList.push_back(DestBundle);
9396 };
9397
9398 // Any instruction which isn't safe to speculate at the begining of the
9399 // block is control dependend on any early exit or non-willreturn call
9400 // which proceeds it.
9401 if (!isGuaranteedToTransferExecutionToSuccessor(BundleMember->Inst)) {
9402 for (Instruction *I = BundleMember->Inst->getNextNode();
9403 I != ScheduleEnd; I = I->getNextNode()) {
9404 if (isSafeToSpeculativelyExecute(I, &*BB->begin()))
9405 continue;
9406
9407 // Add the dependency
9408 makeControlDependent(I);
9409
9410 if (!isGuaranteedToTransferExecutionToSuccessor(I))
9411 // Everything past here must be control dependent on I.
9412 break;
9413 }
9414 }
9415
9416 if (RegionHasStackSave) {
9417 // If we have an inalloc alloca instruction, it needs to be scheduled
9418 // after any preceeding stacksave. We also need to prevent any alloca
9419 // from reordering above a preceeding stackrestore.
9420 if (match(BundleMember->Inst, m_Intrinsic<Intrinsic::stacksave>()) ||
9421 match(BundleMember->Inst, m_Intrinsic<Intrinsic::stackrestore>())) {
9422 for (Instruction *I = BundleMember->Inst->getNextNode();
9423 I != ScheduleEnd; I = I->getNextNode()) {
9424 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) ||
9425 match(I, m_Intrinsic<Intrinsic::stackrestore>()))
9426 // Any allocas past here must be control dependent on I, and I
9427 // must be memory dependend on BundleMember->Inst.
9428 break;
9429
9430 if (!isa<AllocaInst>(I))
9431 continue;
9432
9433 // Add the dependency
9434 makeControlDependent(I);
9435 }
9436 }
9437
9438 // In addition to the cases handle just above, we need to prevent
9439 // allocas from moving below a stacksave. The stackrestore case
9440 // is currently thought to be conservatism.
9441 if (isa<AllocaInst>(BundleMember->Inst)) {
9442 for (Instruction *I = BundleMember->Inst->getNextNode();
9443 I != ScheduleEnd; I = I->getNextNode()) {
9444 if (!match(I, m_Intrinsic<Intrinsic::stacksave>()) &&
9445 !match(I, m_Intrinsic<Intrinsic::stackrestore>()))
9446 continue;
9447
9448 // Add the dependency
9449 makeControlDependent(I);
9450 break;
9451 }
9452 }
9453 }
9454
9455 // Handle the memory dependencies (if any).
9456 ScheduleData *DepDest = BundleMember->NextLoadStore;
9457 if (!DepDest)
9458 continue;
9459 Instruction *SrcInst = BundleMember->Inst;
9460 assert(SrcInst->mayReadOrWriteMemory() &&
9461 "NextLoadStore list for non memory effecting bundle?");
9462 MemoryLocation SrcLoc = getLocation(SrcInst);
9463 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
9464 unsigned numAliased = 0;
9465 unsigned DistToSrc = 1;
9466
9467 for ( ; DepDest; DepDest = DepDest->NextLoadStore) {
9468 assert(isInSchedulingRegion(DepDest));
9469
9470 // We have two limits to reduce the complexity:
9471 // 1) AliasedCheckLimit: It's a small limit to reduce calls to
9472 // SLP->isAliased (which is the expensive part in this loop).
9473 // 2) MaxMemDepDistance: It's for very large blocks and it aborts
9474 // the whole loop (even if the loop is fast, it's quadratic).
9475 // It's important for the loop break condition (see below) to
9476 // check this limit even between two read-only instructions.
9477 if (DistToSrc >= MaxMemDepDistance ||
9478 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
9479 (numAliased >= AliasedCheckLimit ||
9480 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
9481
9482 // We increment the counter only if the locations are aliased
9483 // (instead of counting all alias checks). This gives a better
9484 // balance between reduced runtime and accurate dependencies.
9485 numAliased++;
9486
9487 DepDest->MemoryDependencies.push_back(BundleMember);
9488 BundleMember->Dependencies++;
9489 ScheduleData *DestBundle = DepDest->FirstInBundle;
9490 if (!DestBundle->IsScheduled) {
9491 BundleMember->incrementUnscheduledDeps(1);
9492 }
9493 if (!DestBundle->hasValidDependencies()) {
9494 WorkList.push_back(DestBundle);
9495 }
9496 }
9497
9498 // Example, explaining the loop break condition: Let's assume our
9499 // starting instruction is i0 and MaxMemDepDistance = 3.
9500 //
9501 // +--------v--v--v
9502 // i0,i1,i2,i3,i4,i5,i6,i7,i8
9503 // +--------^--^--^
9504 //
9505 // MaxMemDepDistance let us stop alias-checking at i3 and we add
9506 // dependencies from i0 to i3,i4,.. (even if they are not aliased).
9507 // Previously we already added dependencies from i3 to i6,i7,i8
9508 // (because of MaxMemDepDistance). As we added a dependency from
9509 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
9510 // and we can abort this loop at i6.
9511 if (DistToSrc >= 2 * MaxMemDepDistance)
9512 break;
9513 DistToSrc++;
9514 }
9515 }
9516 if (InsertInReadyList && SD->isReady()) {
9517 ReadyInsts.insert(SD);
9518 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst
9519 << "\n");
9520 }
9521 }
9522 }
9523
resetSchedule()9524 void BoUpSLP::BlockScheduling::resetSchedule() {
9525 assert(ScheduleStart &&
9526 "tried to reset schedule on block which has not been scheduled");
9527 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
9528 doForAllOpcodes(I, [&](ScheduleData *SD) {
9529 assert(isInSchedulingRegion(SD) &&
9530 "ScheduleData not in scheduling region");
9531 SD->IsScheduled = false;
9532 SD->resetUnscheduledDeps();
9533 });
9534 }
9535 ReadyInsts.clear();
9536 }
9537
scheduleBlock(BlockScheduling * BS)9538 void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
9539 if (!BS->ScheduleStart)
9540 return;
9541
9542 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n");
9543
9544 // A key point - if we got here, pre-scheduling was able to find a valid
9545 // scheduling of the sub-graph of the scheduling window which consists
9546 // of all vector bundles and their transitive users. As such, we do not
9547 // need to reschedule anything *outside of* that subgraph.
9548
9549 BS->resetSchedule();
9550
9551 // For the real scheduling we use a more sophisticated ready-list: it is
9552 // sorted by the original instruction location. This lets the final schedule
9553 // be as close as possible to the original instruction order.
9554 // WARNING: If changing this order causes a correctness issue, that means
9555 // there is some missing dependence edge in the schedule data graph.
9556 struct ScheduleDataCompare {
9557 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const {
9558 return SD2->SchedulingPriority < SD1->SchedulingPriority;
9559 }
9560 };
9561 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts;
9562
9563 // Ensure that all dependency data is updated (for nodes in the sub-graph)
9564 // and fill the ready-list with initial instructions.
9565 int Idx = 0;
9566 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd;
9567 I = I->getNextNode()) {
9568 BS->doForAllOpcodes(I, [this, &Idx, BS](ScheduleData *SD) {
9569 TreeEntry *SDTE = getTreeEntry(SD->Inst);
9570 (void)SDTE;
9571 assert((isVectorLikeInstWithConstOps(SD->Inst) ||
9572 SD->isPartOfBundle() ==
9573 (SDTE && !doesNotNeedToSchedule(SDTE->Scalars))) &&
9574 "scheduler and vectorizer bundle mismatch");
9575 SD->FirstInBundle->SchedulingPriority = Idx++;
9576
9577 if (SD->isSchedulingEntity() && SD->isPartOfBundle())
9578 BS->calculateDependencies(SD, false, this);
9579 });
9580 }
9581 BS->initialFillReadyList(ReadyInsts);
9582
9583 Instruction *LastScheduledInst = BS->ScheduleEnd;
9584
9585 // Do the "real" scheduling.
9586 while (!ReadyInsts.empty()) {
9587 ScheduleData *picked = *ReadyInsts.begin();
9588 ReadyInsts.erase(ReadyInsts.begin());
9589
9590 // Move the scheduled instruction(s) to their dedicated places, if not
9591 // there yet.
9592 for (ScheduleData *BundleMember = picked; BundleMember;
9593 BundleMember = BundleMember->NextInBundle) {
9594 Instruction *pickedInst = BundleMember->Inst;
9595 if (pickedInst->getNextNode() != LastScheduledInst)
9596 pickedInst->moveBefore(LastScheduledInst);
9597 LastScheduledInst = pickedInst;
9598 }
9599
9600 BS->schedule(picked, ReadyInsts);
9601 }
9602
9603 // Check that we didn't break any of our invariants.
9604 #ifdef EXPENSIVE_CHECKS
9605 BS->verify();
9606 #endif
9607
9608 #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
9609 // Check that all schedulable entities got scheduled
9610 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; I = I->getNextNode()) {
9611 BS->doForAllOpcodes(I, [&](ScheduleData *SD) {
9612 if (SD->isSchedulingEntity() && SD->hasValidDependencies()) {
9613 assert(SD->IsScheduled && "must be scheduled at this point");
9614 }
9615 });
9616 }
9617 #endif
9618
9619 // Avoid duplicate scheduling of the block.
9620 BS->ScheduleStart = nullptr;
9621 }
9622
getVectorElementSize(Value * V)9623 unsigned BoUpSLP::getVectorElementSize(Value *V) {
9624 // If V is a store, just return the width of the stored value (or value
9625 // truncated just before storing) without traversing the expression tree.
9626 // This is the common case.
9627 if (auto *Store = dyn_cast<StoreInst>(V))
9628 return DL->getTypeSizeInBits(Store->getValueOperand()->getType());
9629
9630 if (auto *IEI = dyn_cast<InsertElementInst>(V))
9631 return getVectorElementSize(IEI->getOperand(1));
9632
9633 auto E = InstrElementSize.find(V);
9634 if (E != InstrElementSize.end())
9635 return E->second;
9636
9637 // If V is not a store, we can traverse the expression tree to find loads
9638 // that feed it. The type of the loaded value may indicate a more suitable
9639 // width than V's type. We want to base the vector element size on the width
9640 // of memory operations where possible.
9641 SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist;
9642 SmallPtrSet<Instruction *, 16> Visited;
9643 if (auto *I = dyn_cast<Instruction>(V)) {
9644 Worklist.emplace_back(I, I->getParent());
9645 Visited.insert(I);
9646 }
9647
9648 // Traverse the expression tree in bottom-up order looking for loads. If we
9649 // encounter an instruction we don't yet handle, we give up.
9650 auto Width = 0u;
9651 while (!Worklist.empty()) {
9652 Instruction *I;
9653 BasicBlock *Parent;
9654 std::tie(I, Parent) = Worklist.pop_back_val();
9655
9656 // We should only be looking at scalar instructions here. If the current
9657 // instruction has a vector type, skip.
9658 auto *Ty = I->getType();
9659 if (isa<VectorType>(Ty))
9660 continue;
9661
9662 // If the current instruction is a load, update MaxWidth to reflect the
9663 // width of the loaded value.
9664 if (isa<LoadInst>(I) || isa<ExtractElementInst>(I) ||
9665 isa<ExtractValueInst>(I))
9666 Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty));
9667
9668 // Otherwise, we need to visit the operands of the instruction. We only
9669 // handle the interesting cases from buildTree here. If an operand is an
9670 // instruction we haven't yet visited and from the same basic block as the
9671 // user or the use is a PHI node, we add it to the worklist.
9672 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
9673 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I) ||
9674 isa<UnaryOperator>(I)) {
9675 for (Use &U : I->operands())
9676 if (auto *J = dyn_cast<Instruction>(U.get()))
9677 if (Visited.insert(J).second &&
9678 (isa<PHINode>(I) || J->getParent() == Parent))
9679 Worklist.emplace_back(J, J->getParent());
9680 } else {
9681 break;
9682 }
9683 }
9684
9685 // If we didn't encounter a memory access in the expression tree, or if we
9686 // gave up for some reason, just return the width of V. Otherwise, return the
9687 // maximum width we found.
9688 if (!Width) {
9689 if (auto *CI = dyn_cast<CmpInst>(V))
9690 V = CI->getOperand(0);
9691 Width = DL->getTypeSizeInBits(V->getType());
9692 }
9693
9694 for (Instruction *I : Visited)
9695 InstrElementSize[I] = Width;
9696
9697 return Width;
9698 }
9699
9700 // Determine if a value V in a vectorizable expression Expr can be demoted to a
9701 // smaller type with a truncation. We collect the values that will be demoted
9702 // in ToDemote and additional roots that require investigating in Roots.
collectValuesToDemote(Value * V,SmallPtrSetImpl<Value * > & Expr,SmallVectorImpl<Value * > & ToDemote,SmallVectorImpl<Value * > & Roots)9703 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr,
9704 SmallVectorImpl<Value *> &ToDemote,
9705 SmallVectorImpl<Value *> &Roots) {
9706 // We can always demote constants.
9707 if (isa<Constant>(V)) {
9708 ToDemote.push_back(V);
9709 return true;
9710 }
9711
9712 // If the value is not an instruction in the expression with only one use, it
9713 // cannot be demoted.
9714 auto *I = dyn_cast<Instruction>(V);
9715 if (!I || !I->hasOneUse() || !Expr.count(I))
9716 return false;
9717
9718 switch (I->getOpcode()) {
9719
9720 // We can always demote truncations and extensions. Since truncations can
9721 // seed additional demotion, we save the truncated value.
9722 case Instruction::Trunc:
9723 Roots.push_back(I->getOperand(0));
9724 break;
9725 case Instruction::ZExt:
9726 case Instruction::SExt:
9727 if (isa<ExtractElementInst>(I->getOperand(0)) ||
9728 isa<InsertElementInst>(I->getOperand(0)))
9729 return false;
9730 break;
9731
9732 // We can demote certain binary operations if we can demote both of their
9733 // operands.
9734 case Instruction::Add:
9735 case Instruction::Sub:
9736 case Instruction::Mul:
9737 case Instruction::And:
9738 case Instruction::Or:
9739 case Instruction::Xor:
9740 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) ||
9741 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots))
9742 return false;
9743 break;
9744
9745 // We can demote selects if we can demote their true and false values.
9746 case Instruction::Select: {
9747 SelectInst *SI = cast<SelectInst>(I);
9748 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) ||
9749 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots))
9750 return false;
9751 break;
9752 }
9753
9754 // We can demote phis if we can demote all their incoming operands. Note that
9755 // we don't need to worry about cycles since we ensure single use above.
9756 case Instruction::PHI: {
9757 PHINode *PN = cast<PHINode>(I);
9758 for (Value *IncValue : PN->incoming_values())
9759 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots))
9760 return false;
9761 break;
9762 }
9763
9764 // Otherwise, conservatively give up.
9765 default:
9766 return false;
9767 }
9768
9769 // Record the value that we can demote.
9770 ToDemote.push_back(V);
9771 return true;
9772 }
9773
computeMinimumValueSizes()9774 void BoUpSLP::computeMinimumValueSizes() {
9775 // If there are no external uses, the expression tree must be rooted by a
9776 // store. We can't demote in-memory values, so there is nothing to do here.
9777 if (ExternalUses.empty())
9778 return;
9779
9780 // We only attempt to truncate integer expressions.
9781 auto &TreeRoot = VectorizableTree[0]->Scalars;
9782 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType());
9783 if (!TreeRootIT)
9784 return;
9785
9786 // If the expression is not rooted by a store, these roots should have
9787 // external uses. We will rely on InstCombine to rewrite the expression in
9788 // the narrower type. However, InstCombine only rewrites single-use values.
9789 // This means that if a tree entry other than a root is used externally, it
9790 // must have multiple uses and InstCombine will not rewrite it. The code
9791 // below ensures that only the roots are used externally.
9792 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end());
9793 for (auto &EU : ExternalUses)
9794 if (!Expr.erase(EU.Scalar))
9795 return;
9796 if (!Expr.empty())
9797 return;
9798
9799 // Collect the scalar values of the vectorizable expression. We will use this
9800 // context to determine which values can be demoted. If we see a truncation,
9801 // we mark it as seeding another demotion.
9802 for (auto &EntryPtr : VectorizableTree)
9803 Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end());
9804
9805 // Ensure the roots of the vectorizable tree don't form a cycle. They must
9806 // have a single external user that is not in the vectorizable tree.
9807 for (auto *Root : TreeRoot)
9808 if (!Root->hasOneUse() || Expr.count(*Root->user_begin()))
9809 return;
9810
9811 // Conservatively determine if we can actually truncate the roots of the
9812 // expression. Collect the values that can be demoted in ToDemote and
9813 // additional roots that require investigating in Roots.
9814 SmallVector<Value *, 32> ToDemote;
9815 SmallVector<Value *, 4> Roots;
9816 for (auto *Root : TreeRoot)
9817 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots))
9818 return;
9819
9820 // The maximum bit width required to represent all the values that can be
9821 // demoted without loss of precision. It would be safe to truncate the roots
9822 // of the expression to this width.
9823 auto MaxBitWidth = 8u;
9824
9825 // We first check if all the bits of the roots are demanded. If they're not,
9826 // we can truncate the roots to this narrower type.
9827 for (auto *Root : TreeRoot) {
9828 auto Mask = DB->getDemandedBits(cast<Instruction>(Root));
9829 MaxBitWidth = std::max<unsigned>(
9830 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth);
9831 }
9832
9833 // True if the roots can be zero-extended back to their original type, rather
9834 // than sign-extended. We know that if the leading bits are not demanded, we
9835 // can safely zero-extend. So we initialize IsKnownPositive to True.
9836 bool IsKnownPositive = true;
9837
9838 // If all the bits of the roots are demanded, we can try a little harder to
9839 // compute a narrower type. This can happen, for example, if the roots are
9840 // getelementptr indices. InstCombine promotes these indices to the pointer
9841 // width. Thus, all their bits are technically demanded even though the
9842 // address computation might be vectorized in a smaller type.
9843 //
9844 // We start by looking at each entry that can be demoted. We compute the
9845 // maximum bit width required to store the scalar by using ValueTracking to
9846 // compute the number of high-order bits we can truncate.
9847 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) &&
9848 llvm::all_of(TreeRoot, [](Value *R) {
9849 assert(R->hasOneUse() && "Root should have only one use!");
9850 return isa<GetElementPtrInst>(R->user_back());
9851 })) {
9852 MaxBitWidth = 8u;
9853
9854 // Determine if the sign bit of all the roots is known to be zero. If not,
9855 // IsKnownPositive is set to False.
9856 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) {
9857 KnownBits Known = computeKnownBits(R, *DL);
9858 return Known.isNonNegative();
9859 });
9860
9861 // Determine the maximum number of bits required to store the scalar
9862 // values.
9863 for (auto *Scalar : ToDemote) {
9864 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT);
9865 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType());
9866 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth);
9867 }
9868
9869 // If we can't prove that the sign bit is zero, we must add one to the
9870 // maximum bit width to account for the unknown sign bit. This preserves
9871 // the existing sign bit so we can safely sign-extend the root back to the
9872 // original type. Otherwise, if we know the sign bit is zero, we will
9873 // zero-extend the root instead.
9874 //
9875 // FIXME: This is somewhat suboptimal, as there will be cases where adding
9876 // one to the maximum bit width will yield a larger-than-necessary
9877 // type. In general, we need to add an extra bit only if we can't
9878 // prove that the upper bit of the original type is equal to the
9879 // upper bit of the proposed smaller type. If these two bits are the
9880 // same (either zero or one) we know that sign-extending from the
9881 // smaller type will result in the same value. Here, since we can't
9882 // yet prove this, we are just making the proposed smaller type
9883 // larger to ensure correctness.
9884 if (!IsKnownPositive)
9885 ++MaxBitWidth;
9886 }
9887
9888 // Round MaxBitWidth up to the next power-of-two.
9889 if (!isPowerOf2_64(MaxBitWidth))
9890 MaxBitWidth = NextPowerOf2(MaxBitWidth);
9891
9892 // If the maximum bit width we compute is less than the with of the roots'
9893 // type, we can proceed with the narrowing. Otherwise, do nothing.
9894 if (MaxBitWidth >= TreeRootIT->getBitWidth())
9895 return;
9896
9897 // If we can truncate the root, we must collect additional values that might
9898 // be demoted as a result. That is, those seeded by truncations we will
9899 // modify.
9900 while (!Roots.empty())
9901 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots);
9902
9903 // Finally, map the values we can demote to the maximum bit with we computed.
9904 for (auto *Scalar : ToDemote)
9905 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive);
9906 }
9907
9908 namespace {
9909
9910 /// The SLPVectorizer Pass.
9911 struct SLPVectorizer : public FunctionPass {
9912 SLPVectorizerPass Impl;
9913
9914 /// Pass identification, replacement for typeid
9915 static char ID;
9916
SLPVectorizer__anon9e073aa48a11::SLPVectorizer9917 explicit SLPVectorizer() : FunctionPass(ID) {
9918 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry());
9919 }
9920
doInitialization__anon9e073aa48a11::SLPVectorizer9921 bool doInitialization(Module &M) override { return false; }
9922
runOnFunction__anon9e073aa48a11::SLPVectorizer9923 bool runOnFunction(Function &F) override {
9924 if (skipFunction(F))
9925 return false;
9926
9927 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
9928 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
9929 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
9930 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
9931 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
9932 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
9933 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
9934 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
9935 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
9936 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
9937
9938 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE);
9939 }
9940
getAnalysisUsage__anon9e073aa48a11::SLPVectorizer9941 void getAnalysisUsage(AnalysisUsage &AU) const override {
9942 FunctionPass::getAnalysisUsage(AU);
9943 AU.addRequired<AssumptionCacheTracker>();
9944 AU.addRequired<ScalarEvolutionWrapperPass>();
9945 AU.addRequired<AAResultsWrapperPass>();
9946 AU.addRequired<TargetTransformInfoWrapperPass>();
9947 AU.addRequired<LoopInfoWrapperPass>();
9948 AU.addRequired<DominatorTreeWrapperPass>();
9949 AU.addRequired<DemandedBitsWrapperPass>();
9950 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
9951 AU.addRequired<InjectTLIMappingsLegacy>();
9952 AU.addPreserved<LoopInfoWrapperPass>();
9953 AU.addPreserved<DominatorTreeWrapperPass>();
9954 AU.addPreserved<AAResultsWrapperPass>();
9955 AU.addPreserved<GlobalsAAWrapperPass>();
9956 AU.setPreservesCFG();
9957 }
9958 };
9959
9960 } // end anonymous namespace
9961
run(Function & F,FunctionAnalysisManager & AM)9962 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) {
9963 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
9964 auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
9965 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F);
9966 auto *AA = &AM.getResult<AAManager>(F);
9967 auto *LI = &AM.getResult<LoopAnalysis>(F);
9968 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
9969 auto *AC = &AM.getResult<AssumptionAnalysis>(F);
9970 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F);
9971 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
9972
9973 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE);
9974 if (!Changed)
9975 return PreservedAnalyses::all();
9976
9977 PreservedAnalyses PA;
9978 PA.preserveSet<CFGAnalyses>();
9979 return PA;
9980 }
9981
runImpl(Function & F,ScalarEvolution * SE_,TargetTransformInfo * TTI_,TargetLibraryInfo * TLI_,AAResults * AA_,LoopInfo * LI_,DominatorTree * DT_,AssumptionCache * AC_,DemandedBits * DB_,OptimizationRemarkEmitter * ORE_)9982 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
9983 TargetTransformInfo *TTI_,
9984 TargetLibraryInfo *TLI_, AAResults *AA_,
9985 LoopInfo *LI_, DominatorTree *DT_,
9986 AssumptionCache *AC_, DemandedBits *DB_,
9987 OptimizationRemarkEmitter *ORE_) {
9988 if (!RunSLPVectorization)
9989 return false;
9990 SE = SE_;
9991 TTI = TTI_;
9992 TLI = TLI_;
9993 AA = AA_;
9994 LI = LI_;
9995 DT = DT_;
9996 AC = AC_;
9997 DB = DB_;
9998 DL = &F.getParent()->getDataLayout();
9999
10000 Stores.clear();
10001 GEPs.clear();
10002 bool Changed = false;
10003
10004 // If the target claims to have no vector registers don't attempt
10005 // vectorization.
10006 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) {
10007 LLVM_DEBUG(
10008 dbgs() << "SLP: Didn't find any vector registers for target, abort.\n");
10009 return false;
10010 }
10011
10012 // Don't vectorize when the attribute NoImplicitFloat is used.
10013 if (F.hasFnAttribute(Attribute::NoImplicitFloat))
10014 return false;
10015
10016 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
10017
10018 // Use the bottom up slp vectorizer to construct chains that start with
10019 // store instructions.
10020 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_);
10021
10022 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to
10023 // delete instructions.
10024
10025 // Update DFS numbers now so that we can use them for ordering.
10026 DT->updateDFSNumbers();
10027
10028 // Scan the blocks in the function in post order.
10029 for (auto BB : post_order(&F.getEntryBlock())) {
10030 // Start new block - clear the list of reduction roots.
10031 R.clearReductionData();
10032 collectSeedInstructions(BB);
10033
10034 // Vectorize trees that end at stores.
10035 if (!Stores.empty()) {
10036 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()
10037 << " underlying objects.\n");
10038 Changed |= vectorizeStoreChains(R);
10039 }
10040
10041 // Vectorize trees that end at reductions.
10042 Changed |= vectorizeChainsInBlock(BB, R);
10043
10044 // Vectorize the index computations of getelementptr instructions. This
10045 // is primarily intended to catch gather-like idioms ending at
10046 // non-consecutive loads.
10047 if (!GEPs.empty()) {
10048 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()
10049 << " underlying objects.\n");
10050 Changed |= vectorizeGEPIndices(BB, R);
10051 }
10052 }
10053
10054 if (Changed) {
10055 R.optimizeGatherSequence();
10056 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
10057 }
10058 return Changed;
10059 }
10060
vectorizeStoreChain(ArrayRef<Value * > Chain,BoUpSLP & R,unsigned Idx,unsigned MinVF)10061 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
10062 unsigned Idx, unsigned MinVF) {
10063 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size()
10064 << "\n");
10065 const unsigned Sz = R.getVectorElementSize(Chain[0]);
10066 unsigned VF = Chain.size();
10067
10068 if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF)
10069 return false;
10070
10071 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx
10072 << "\n");
10073
10074 R.buildTree(Chain);
10075 if (R.isTreeTinyAndNotFullyVectorizable())
10076 return false;
10077 if (R.isLoadCombineCandidate())
10078 return false;
10079 R.reorderTopToBottom();
10080 R.reorderBottomToTop();
10081 R.buildExternalUses();
10082
10083 R.computeMinimumValueSizes();
10084
10085 InstructionCost Cost = R.getTreeCost();
10086
10087 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF =" << VF << "\n");
10088 if (Cost < -SLPCostThreshold) {
10089 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n");
10090
10091 using namespace ore;
10092
10093 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized",
10094 cast<StoreInst>(Chain[0]))
10095 << "Stores SLP vectorized with cost " << NV("Cost", Cost)
10096 << " and with tree size "
10097 << NV("TreeSize", R.getTreeSize()));
10098
10099 R.vectorizeTree();
10100 return true;
10101 }
10102
10103 return false;
10104 }
10105
vectorizeStores(ArrayRef<StoreInst * > Stores,BoUpSLP & R)10106 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores,
10107 BoUpSLP &R) {
10108 // We may run into multiple chains that merge into a single chain. We mark the
10109 // stores that we vectorized so that we don't visit the same store twice.
10110 BoUpSLP::ValueSet VectorizedStores;
10111 bool Changed = false;
10112
10113 int E = Stores.size();
10114 SmallBitVector Tails(E, false);
10115 int MaxIter = MaxStoreLookup.getValue();
10116 SmallVector<std::pair<int, int>, 16> ConsecutiveChain(
10117 E, std::make_pair(E, INT_MAX));
10118 SmallVector<SmallBitVector, 4> CheckedPairs(E, SmallBitVector(E, false));
10119 int IterCnt;
10120 auto &&FindConsecutiveAccess = [this, &Stores, &Tails, &IterCnt, MaxIter,
10121 &CheckedPairs,
10122 &ConsecutiveChain](int K, int Idx) {
10123 if (IterCnt >= MaxIter)
10124 return true;
10125 if (CheckedPairs[Idx].test(K))
10126 return ConsecutiveChain[K].second == 1 &&
10127 ConsecutiveChain[K].first == Idx;
10128 ++IterCnt;
10129 CheckedPairs[Idx].set(K);
10130 CheckedPairs[K].set(Idx);
10131 Optional<int> Diff = getPointersDiff(
10132 Stores[K]->getValueOperand()->getType(), Stores[K]->getPointerOperand(),
10133 Stores[Idx]->getValueOperand()->getType(),
10134 Stores[Idx]->getPointerOperand(), *DL, *SE, /*StrictCheck=*/true);
10135 if (!Diff || *Diff == 0)
10136 return false;
10137 int Val = *Diff;
10138 if (Val < 0) {
10139 if (ConsecutiveChain[Idx].second > -Val) {
10140 Tails.set(K);
10141 ConsecutiveChain[Idx] = std::make_pair(K, -Val);
10142 }
10143 return false;
10144 }
10145 if (ConsecutiveChain[K].second <= Val)
10146 return false;
10147
10148 Tails.set(Idx);
10149 ConsecutiveChain[K] = std::make_pair(Idx, Val);
10150 return Val == 1;
10151 };
10152 // Do a quadratic search on all of the given stores in reverse order and find
10153 // all of the pairs of stores that follow each other.
10154 for (int Idx = E - 1; Idx >= 0; --Idx) {
10155 // If a store has multiple consecutive store candidates, search according
10156 // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ...
10157 // This is because usually pairing with immediate succeeding or preceding
10158 // candidate create the best chance to find slp vectorization opportunity.
10159 const int MaxLookDepth = std::max(E - Idx, Idx + 1);
10160 IterCnt = 0;
10161 for (int Offset = 1, F = MaxLookDepth; Offset < F; ++Offset)
10162 if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) ||
10163 (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx)))
10164 break;
10165 }
10166
10167 // Tracks if we tried to vectorize stores starting from the given tail
10168 // already.
10169 SmallBitVector TriedTails(E, false);
10170 // For stores that start but don't end a link in the chain:
10171 for (int Cnt = E; Cnt > 0; --Cnt) {
10172 int I = Cnt - 1;
10173 if (ConsecutiveChain[I].first == E || Tails.test(I))
10174 continue;
10175 // We found a store instr that starts a chain. Now follow the chain and try
10176 // to vectorize it.
10177 BoUpSLP::ValueList Operands;
10178 // Collect the chain into a list.
10179 while (I != E && !VectorizedStores.count(Stores[I])) {
10180 Operands.push_back(Stores[I]);
10181 Tails.set(I);
10182 if (ConsecutiveChain[I].second != 1) {
10183 // Mark the new end in the chain and go back, if required. It might be
10184 // required if the original stores come in reversed order, for example.
10185 if (ConsecutiveChain[I].first != E &&
10186 Tails.test(ConsecutiveChain[I].first) && !TriedTails.test(I) &&
10187 !VectorizedStores.count(Stores[ConsecutiveChain[I].first])) {
10188 TriedTails.set(I);
10189 Tails.reset(ConsecutiveChain[I].first);
10190 if (Cnt < ConsecutiveChain[I].first + 2)
10191 Cnt = ConsecutiveChain[I].first + 2;
10192 }
10193 break;
10194 }
10195 // Move to the next value in the chain.
10196 I = ConsecutiveChain[I].first;
10197 }
10198 assert(!Operands.empty() && "Expected non-empty list of stores.");
10199
10200 unsigned MaxVecRegSize = R.getMaxVecRegSize();
10201 unsigned EltSize = R.getVectorElementSize(Operands[0]);
10202 unsigned MaxElts = llvm::PowerOf2Floor(MaxVecRegSize / EltSize);
10203
10204 unsigned MaxVF = std::min(R.getMaximumVF(EltSize, Instruction::Store),
10205 MaxElts);
10206 auto *Store = cast<StoreInst>(Operands[0]);
10207 Type *StoreTy = Store->getValueOperand()->getType();
10208 Type *ValueTy = StoreTy;
10209 if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand()))
10210 ValueTy = Trunc->getSrcTy();
10211 unsigned MinVF = TTI->getStoreMinimumVF(
10212 R.getMinVF(DL->getTypeSizeInBits(ValueTy)), StoreTy, ValueTy);
10213
10214 // FIXME: Is division-by-2 the correct step? Should we assert that the
10215 // register size is a power-of-2?
10216 unsigned StartIdx = 0;
10217 for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) {
10218 for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) {
10219 ArrayRef<Value *> Slice = makeArrayRef(Operands).slice(Cnt, Size);
10220 if (!VectorizedStores.count(Slice.front()) &&
10221 !VectorizedStores.count(Slice.back()) &&
10222 vectorizeStoreChain(Slice, R, Cnt, MinVF)) {
10223 // Mark the vectorized stores so that we don't vectorize them again.
10224 VectorizedStores.insert(Slice.begin(), Slice.end());
10225 Changed = true;
10226 // If we vectorized initial block, no need to try to vectorize it
10227 // again.
10228 if (Cnt == StartIdx)
10229 StartIdx += Size;
10230 Cnt += Size;
10231 continue;
10232 }
10233 ++Cnt;
10234 }
10235 // Check if the whole array was vectorized already - exit.
10236 if (StartIdx >= Operands.size())
10237 break;
10238 }
10239 }
10240
10241 return Changed;
10242 }
10243
collectSeedInstructions(BasicBlock * BB)10244 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) {
10245 // Initialize the collections. We will make a single pass over the block.
10246 Stores.clear();
10247 GEPs.clear();
10248
10249 // Visit the store and getelementptr instructions in BB and organize them in
10250 // Stores and GEPs according to the underlying objects of their pointer
10251 // operands.
10252 for (Instruction &I : *BB) {
10253 // Ignore store instructions that are volatile or have a pointer operand
10254 // that doesn't point to a scalar type.
10255 if (auto *SI = dyn_cast<StoreInst>(&I)) {
10256 if (!SI->isSimple())
10257 continue;
10258 if (!isValidElementType(SI->getValueOperand()->getType()))
10259 continue;
10260 Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI);
10261 }
10262
10263 // Ignore getelementptr instructions that have more than one index, a
10264 // constant index, or a pointer operand that doesn't point to a scalar
10265 // type.
10266 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
10267 auto Idx = GEP->idx_begin()->get();
10268 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx))
10269 continue;
10270 if (!isValidElementType(Idx->getType()))
10271 continue;
10272 if (GEP->getType()->isVectorTy())
10273 continue;
10274 GEPs[GEP->getPointerOperand()].push_back(GEP);
10275 }
10276 }
10277 }
10278
tryToVectorizePair(Value * A,Value * B,BoUpSLP & R)10279 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) {
10280 if (!A || !B)
10281 return false;
10282 if (isa<InsertElementInst>(A) || isa<InsertElementInst>(B))
10283 return false;
10284 Value *VL[] = {A, B};
10285 return tryToVectorizeList(VL, R);
10286 }
10287
tryToVectorizeList(ArrayRef<Value * > VL,BoUpSLP & R,bool LimitForRegisterSize)10288 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
10289 bool LimitForRegisterSize) {
10290 if (VL.size() < 2)
10291 return false;
10292
10293 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = "
10294 << VL.size() << ".\n");
10295
10296 // Check that all of the parts are instructions of the same type,
10297 // we permit an alternate opcode via InstructionsState.
10298 InstructionsState S = getSameOpcode(VL);
10299 if (!S.getOpcode())
10300 return false;
10301
10302 Instruction *I0 = cast<Instruction>(S.OpValue);
10303 // Make sure invalid types (including vector type) are rejected before
10304 // determining vectorization factor for scalar instructions.
10305 for (Value *V : VL) {
10306 Type *Ty = V->getType();
10307 if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) {
10308 // NOTE: the following will give user internal llvm type name, which may
10309 // not be useful.
10310 R.getORE()->emit([&]() {
10311 std::string type_str;
10312 llvm::raw_string_ostream rso(type_str);
10313 Ty->print(rso);
10314 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0)
10315 << "Cannot SLP vectorize list: type "
10316 << rso.str() + " is unsupported by vectorizer";
10317 });
10318 return false;
10319 }
10320 }
10321
10322 unsigned Sz = R.getVectorElementSize(I0);
10323 unsigned MinVF = R.getMinVF(Sz);
10324 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF);
10325 MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF);
10326 if (MaxVF < 2) {
10327 R.getORE()->emit([&]() {
10328 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0)
10329 << "Cannot SLP vectorize list: vectorization factor "
10330 << "less than 2 is not supported";
10331 });
10332 return false;
10333 }
10334
10335 bool Changed = false;
10336 bool CandidateFound = false;
10337 InstructionCost MinCost = SLPCostThreshold.getValue();
10338 Type *ScalarTy = VL[0]->getType();
10339 if (auto *IE = dyn_cast<InsertElementInst>(VL[0]))
10340 ScalarTy = IE->getOperand(1)->getType();
10341
10342 unsigned NextInst = 0, MaxInst = VL.size();
10343 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) {
10344 // No actual vectorization should happen, if number of parts is the same as
10345 // provided vectorization factor (i.e. the scalar type is used for vector
10346 // code during codegen).
10347 auto *VecTy = FixedVectorType::get(ScalarTy, VF);
10348 if (TTI->getNumberOfParts(VecTy) == VF)
10349 continue;
10350 for (unsigned I = NextInst; I < MaxInst; ++I) {
10351 unsigned OpsWidth = 0;
10352
10353 if (I + VF > MaxInst)
10354 OpsWidth = MaxInst - I;
10355 else
10356 OpsWidth = VF;
10357
10358 if (!isPowerOf2_32(OpsWidth))
10359 continue;
10360
10361 if ((LimitForRegisterSize && OpsWidth < MaxVF) ||
10362 (VF > MinVF && OpsWidth <= VF / 2) || (VF == MinVF && OpsWidth < 2))
10363 break;
10364
10365 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth);
10366 // Check that a previous iteration of this loop did not delete the Value.
10367 if (llvm::any_of(Ops, [&R](Value *V) {
10368 auto *I = dyn_cast<Instruction>(V);
10369 return I && R.isDeleted(I);
10370 }))
10371 continue;
10372
10373 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "
10374 << "\n");
10375
10376 R.buildTree(Ops);
10377 if (R.isTreeTinyAndNotFullyVectorizable())
10378 continue;
10379 R.reorderTopToBottom();
10380 R.reorderBottomToTop(!isa<InsertElementInst>(Ops.front()));
10381 R.buildExternalUses();
10382
10383 R.computeMinimumValueSizes();
10384 InstructionCost Cost = R.getTreeCost();
10385 CandidateFound = true;
10386 MinCost = std::min(MinCost, Cost);
10387
10388 if (Cost < -SLPCostThreshold) {
10389 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n");
10390 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList",
10391 cast<Instruction>(Ops[0]))
10392 << "SLP vectorized with cost " << ore::NV("Cost", Cost)
10393 << " and with tree size "
10394 << ore::NV("TreeSize", R.getTreeSize()));
10395
10396 R.vectorizeTree();
10397 // Move to the next bundle.
10398 I += VF - 1;
10399 NextInst = I + 1;
10400 Changed = true;
10401 }
10402 }
10403 }
10404
10405 if (!Changed && CandidateFound) {
10406 R.getORE()->emit([&]() {
10407 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0)
10408 << "List vectorization was possible but not beneficial with cost "
10409 << ore::NV("Cost", MinCost) << " >= "
10410 << ore::NV("Treshold", -SLPCostThreshold);
10411 });
10412 } else if (!Changed) {
10413 R.getORE()->emit([&]() {
10414 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0)
10415 << "Cannot SLP vectorize list: vectorization was impossible"
10416 << " with available vectorization factors";
10417 });
10418 }
10419 return Changed;
10420 }
10421
tryToVectorize(Instruction * I,BoUpSLP & R)10422 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) {
10423 if (!I)
10424 return false;
10425
10426 if ((!isa<BinaryOperator>(I) && !isa<CmpInst>(I)) ||
10427 isa<VectorType>(I->getType()))
10428 return false;
10429
10430 Value *P = I->getParent();
10431
10432 // Vectorize in current basic block only.
10433 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0));
10434 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1));
10435 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P)
10436 return false;
10437
10438 // First collect all possible candidates
10439 SmallVector<std::pair<Value *, Value *>, 4> Candidates;
10440 Candidates.emplace_back(Op0, Op1);
10441
10442 auto *A = dyn_cast<BinaryOperator>(Op0);
10443 auto *B = dyn_cast<BinaryOperator>(Op1);
10444 // Try to skip B.
10445 if (A && B && B->hasOneUse()) {
10446 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0));
10447 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1));
10448 if (B0 && B0->getParent() == P)
10449 Candidates.emplace_back(A, B0);
10450 if (B1 && B1->getParent() == P)
10451 Candidates.emplace_back(A, B1);
10452 }
10453 // Try to skip A.
10454 if (B && A && A->hasOneUse()) {
10455 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0));
10456 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1));
10457 if (A0 && A0->getParent() == P)
10458 Candidates.emplace_back(A0, B);
10459 if (A1 && A1->getParent() == P)
10460 Candidates.emplace_back(A1, B);
10461 }
10462
10463 if (Candidates.size() == 1)
10464 return tryToVectorizePair(Op0, Op1, R);
10465
10466 // We have multiple options. Try to pick the single best.
10467 Optional<int> BestCandidate = R.findBestRootPair(Candidates);
10468 if (!BestCandidate)
10469 return false;
10470 return tryToVectorizePair(Candidates[*BestCandidate].first,
10471 Candidates[*BestCandidate].second, R);
10472 }
10473
10474 namespace {
10475
10476 /// Model horizontal reductions.
10477 ///
10478 /// A horizontal reduction is a tree of reduction instructions that has values
10479 /// that can be put into a vector as its leaves. For example:
10480 ///
10481 /// mul mul mul mul
10482 /// \ / \ /
10483 /// + +
10484 /// \ /
10485 /// +
10486 /// This tree has "mul" as its leaf values and "+" as its reduction
10487 /// instructions. A reduction can feed into a store or a binary operation
10488 /// feeding a phi.
10489 /// ...
10490 /// \ /
10491 /// +
10492 /// |
10493 /// phi +=
10494 ///
10495 /// Or:
10496 /// ...
10497 /// \ /
10498 /// +
10499 /// |
10500 /// *p =
10501 ///
10502 class HorizontalReduction {
10503 using ReductionOpsType = SmallVector<Value *, 16>;
10504 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>;
10505 ReductionOpsListType ReductionOps;
10506 /// List of possibly reduced values.
10507 SmallVector<SmallVector<Value *>> ReducedVals;
10508 /// Maps reduced value to the corresponding reduction operation.
10509 DenseMap<Value *, SmallVector<Instruction *>> ReducedValsToOps;
10510 // Use map vector to make stable output.
10511 MapVector<Instruction *, Value *> ExtraArgs;
10512 WeakTrackingVH ReductionRoot;
10513 /// The type of reduction operation.
10514 RecurKind RdxKind;
10515
isCmpSelMinMax(Instruction * I)10516 static bool isCmpSelMinMax(Instruction *I) {
10517 return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) &&
10518 RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I));
10519 }
10520
10521 // And/or are potentially poison-safe logical patterns like:
10522 // select x, y, false
10523 // select x, true, y
isBoolLogicOp(Instruction * I)10524 static bool isBoolLogicOp(Instruction *I) {
10525 return match(I, m_LogicalAnd(m_Value(), m_Value())) ||
10526 match(I, m_LogicalOr(m_Value(), m_Value()));
10527 }
10528
10529 /// Checks if instruction is associative and can be vectorized.
isVectorizable(RecurKind Kind,Instruction * I)10530 static bool isVectorizable(RecurKind Kind, Instruction *I) {
10531 if (Kind == RecurKind::None)
10532 return false;
10533
10534 // Integer ops that map to select instructions or intrinsics are fine.
10535 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) ||
10536 isBoolLogicOp(I))
10537 return true;
10538
10539 if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) {
10540 // FP min/max are associative except for NaN and -0.0. We do not
10541 // have to rule out -0.0 here because the intrinsic semantics do not
10542 // specify a fixed result for it.
10543 return I->getFastMathFlags().noNaNs();
10544 }
10545
10546 return I->isAssociative();
10547 }
10548
getRdxOperand(Instruction * I,unsigned Index)10549 static Value *getRdxOperand(Instruction *I, unsigned Index) {
10550 // Poison-safe 'or' takes the form: select X, true, Y
10551 // To make that work with the normal operand processing, we skip the
10552 // true value operand.
10553 // TODO: Change the code and data structures to handle this without a hack.
10554 if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1)
10555 return I->getOperand(2);
10556 return I->getOperand(Index);
10557 }
10558
10559 /// Creates reduction operation with the current opcode.
createOp(IRBuilder<> & Builder,RecurKind Kind,Value * LHS,Value * RHS,const Twine & Name,bool UseSelect)10560 static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS,
10561 Value *RHS, const Twine &Name, bool UseSelect) {
10562 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind);
10563 switch (Kind) {
10564 case RecurKind::Or:
10565 if (UseSelect &&
10566 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType()))
10567 return Builder.CreateSelect(LHS, Builder.getTrue(), RHS, Name);
10568 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
10569 Name);
10570 case RecurKind::And:
10571 if (UseSelect &&
10572 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType()))
10573 return Builder.CreateSelect(LHS, RHS, Builder.getFalse(), Name);
10574 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
10575 Name);
10576 case RecurKind::Add:
10577 case RecurKind::Mul:
10578 case RecurKind::Xor:
10579 case RecurKind::FAdd:
10580 case RecurKind::FMul:
10581 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
10582 Name);
10583 case RecurKind::FMax:
10584 return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS);
10585 case RecurKind::FMin:
10586 return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS);
10587 case RecurKind::SMax:
10588 if (UseSelect) {
10589 Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name);
10590 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
10591 }
10592 return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS);
10593 case RecurKind::SMin:
10594 if (UseSelect) {
10595 Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name);
10596 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
10597 }
10598 return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS);
10599 case RecurKind::UMax:
10600 if (UseSelect) {
10601 Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name);
10602 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
10603 }
10604 return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS);
10605 case RecurKind::UMin:
10606 if (UseSelect) {
10607 Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name);
10608 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
10609 }
10610 return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS);
10611 default:
10612 llvm_unreachable("Unknown reduction operation.");
10613 }
10614 }
10615
10616 /// Creates reduction operation with the current opcode with the IR flags
10617 /// from \p ReductionOps, dropping nuw/nsw flags.
createOp(IRBuilder<> & Builder,RecurKind RdxKind,Value * LHS,Value * RHS,const Twine & Name,const ReductionOpsListType & ReductionOps)10618 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS,
10619 Value *RHS, const Twine &Name,
10620 const ReductionOpsListType &ReductionOps) {
10621 bool UseSelect = ReductionOps.size() == 2 ||
10622 // Logical or/and.
10623 (ReductionOps.size() == 1 &&
10624 isa<SelectInst>(ReductionOps.front().front()));
10625 assert((!UseSelect || ReductionOps.size() != 2 ||
10626 isa<SelectInst>(ReductionOps[1][0])) &&
10627 "Expected cmp + select pairs for reduction");
10628 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect);
10629 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) {
10630 if (auto *Sel = dyn_cast<SelectInst>(Op)) {
10631 propagateIRFlags(Sel->getCondition(), ReductionOps[0], nullptr,
10632 /*IncludeWrapFlags=*/false);
10633 propagateIRFlags(Op, ReductionOps[1], nullptr,
10634 /*IncludeWrapFlags=*/false);
10635 return Op;
10636 }
10637 }
10638 propagateIRFlags(Op, ReductionOps[0], nullptr, /*IncludeWrapFlags=*/false);
10639 return Op;
10640 }
10641
getRdxKind(Value * V)10642 static RecurKind getRdxKind(Value *V) {
10643 auto *I = dyn_cast<Instruction>(V);
10644 if (!I)
10645 return RecurKind::None;
10646 if (match(I, m_Add(m_Value(), m_Value())))
10647 return RecurKind::Add;
10648 if (match(I, m_Mul(m_Value(), m_Value())))
10649 return RecurKind::Mul;
10650 if (match(I, m_And(m_Value(), m_Value())) ||
10651 match(I, m_LogicalAnd(m_Value(), m_Value())))
10652 return RecurKind::And;
10653 if (match(I, m_Or(m_Value(), m_Value())) ||
10654 match(I, m_LogicalOr(m_Value(), m_Value())))
10655 return RecurKind::Or;
10656 if (match(I, m_Xor(m_Value(), m_Value())))
10657 return RecurKind::Xor;
10658 if (match(I, m_FAdd(m_Value(), m_Value())))
10659 return RecurKind::FAdd;
10660 if (match(I, m_FMul(m_Value(), m_Value())))
10661 return RecurKind::FMul;
10662
10663 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value())))
10664 return RecurKind::FMax;
10665 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value())))
10666 return RecurKind::FMin;
10667
10668 // This matches either cmp+select or intrinsics. SLP is expected to handle
10669 // either form.
10670 // TODO: If we are canonicalizing to intrinsics, we can remove several
10671 // special-case paths that deal with selects.
10672 if (match(I, m_SMax(m_Value(), m_Value())))
10673 return RecurKind::SMax;
10674 if (match(I, m_SMin(m_Value(), m_Value())))
10675 return RecurKind::SMin;
10676 if (match(I, m_UMax(m_Value(), m_Value())))
10677 return RecurKind::UMax;
10678 if (match(I, m_UMin(m_Value(), m_Value())))
10679 return RecurKind::UMin;
10680
10681 if (auto *Select = dyn_cast<SelectInst>(I)) {
10682 // Try harder: look for min/max pattern based on instructions producing
10683 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2).
10684 // During the intermediate stages of SLP, it's very common to have
10685 // pattern like this (since optimizeGatherSequence is run only once
10686 // at the end):
10687 // %1 = extractelement <2 x i32> %a, i32 0
10688 // %2 = extractelement <2 x i32> %a, i32 1
10689 // %cond = icmp sgt i32 %1, %2
10690 // %3 = extractelement <2 x i32> %a, i32 0
10691 // %4 = extractelement <2 x i32> %a, i32 1
10692 // %select = select i1 %cond, i32 %3, i32 %4
10693 CmpInst::Predicate Pred;
10694 Instruction *L1;
10695 Instruction *L2;
10696
10697 Value *LHS = Select->getTrueValue();
10698 Value *RHS = Select->getFalseValue();
10699 Value *Cond = Select->getCondition();
10700
10701 // TODO: Support inverse predicates.
10702 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) {
10703 if (!isa<ExtractElementInst>(RHS) ||
10704 !L2->isIdenticalTo(cast<Instruction>(RHS)))
10705 return RecurKind::None;
10706 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) {
10707 if (!isa<ExtractElementInst>(LHS) ||
10708 !L1->isIdenticalTo(cast<Instruction>(LHS)))
10709 return RecurKind::None;
10710 } else {
10711 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS))
10712 return RecurKind::None;
10713 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) ||
10714 !L1->isIdenticalTo(cast<Instruction>(LHS)) ||
10715 !L2->isIdenticalTo(cast<Instruction>(RHS)))
10716 return RecurKind::None;
10717 }
10718
10719 switch (Pred) {
10720 default:
10721 return RecurKind::None;
10722 case CmpInst::ICMP_SGT:
10723 case CmpInst::ICMP_SGE:
10724 return RecurKind::SMax;
10725 case CmpInst::ICMP_SLT:
10726 case CmpInst::ICMP_SLE:
10727 return RecurKind::SMin;
10728 case CmpInst::ICMP_UGT:
10729 case CmpInst::ICMP_UGE:
10730 return RecurKind::UMax;
10731 case CmpInst::ICMP_ULT:
10732 case CmpInst::ICMP_ULE:
10733 return RecurKind::UMin;
10734 }
10735 }
10736 return RecurKind::None;
10737 }
10738
10739 /// Get the index of the first operand.
getFirstOperandIndex(Instruction * I)10740 static unsigned getFirstOperandIndex(Instruction *I) {
10741 return isCmpSelMinMax(I) ? 1 : 0;
10742 }
10743
10744 /// Total number of operands in the reduction operation.
getNumberOfOperands(Instruction * I)10745 static unsigned getNumberOfOperands(Instruction *I) {
10746 return isCmpSelMinMax(I) ? 3 : 2;
10747 }
10748
10749 /// Checks if the instruction is in basic block \p BB.
10750 /// For a cmp+sel min/max reduction check that both ops are in \p BB.
hasSameParent(Instruction * I,BasicBlock * BB)10751 static bool hasSameParent(Instruction *I, BasicBlock *BB) {
10752 if (isCmpSelMinMax(I) || (isBoolLogicOp(I) && isa<SelectInst>(I))) {
10753 auto *Sel = cast<SelectInst>(I);
10754 auto *Cmp = dyn_cast<Instruction>(Sel->getCondition());
10755 return Sel->getParent() == BB && Cmp && Cmp->getParent() == BB;
10756 }
10757 return I->getParent() == BB;
10758 }
10759
10760 /// Expected number of uses for reduction operations/reduced values.
hasRequiredNumberOfUses(bool IsCmpSelMinMax,Instruction * I)10761 static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) {
10762 if (IsCmpSelMinMax) {
10763 // SelectInst must be used twice while the condition op must have single
10764 // use only.
10765 if (auto *Sel = dyn_cast<SelectInst>(I))
10766 return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse();
10767 return I->hasNUses(2);
10768 }
10769
10770 // Arithmetic reduction operation must be used once only.
10771 return I->hasOneUse();
10772 }
10773
10774 /// Initializes the list of reduction operations.
initReductionOps(Instruction * I)10775 void initReductionOps(Instruction *I) {
10776 if (isCmpSelMinMax(I))
10777 ReductionOps.assign(2, ReductionOpsType());
10778 else
10779 ReductionOps.assign(1, ReductionOpsType());
10780 }
10781
10782 /// Add all reduction operations for the reduction instruction \p I.
addReductionOps(Instruction * I)10783 void addReductionOps(Instruction *I) {
10784 if (isCmpSelMinMax(I)) {
10785 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition());
10786 ReductionOps[1].emplace_back(I);
10787 } else {
10788 ReductionOps[0].emplace_back(I);
10789 }
10790 }
10791
getLHS(RecurKind Kind,Instruction * I)10792 static Value *getLHS(RecurKind Kind, Instruction *I) {
10793 if (Kind == RecurKind::None)
10794 return nullptr;
10795 return I->getOperand(getFirstOperandIndex(I));
10796 }
getRHS(RecurKind Kind,Instruction * I)10797 static Value *getRHS(RecurKind Kind, Instruction *I) {
10798 if (Kind == RecurKind::None)
10799 return nullptr;
10800 return I->getOperand(getFirstOperandIndex(I) + 1);
10801 }
10802
10803 public:
10804 HorizontalReduction() = default;
10805
10806 /// Try to find a reduction tree.
matchAssociativeReduction(PHINode * Phi,Instruction * Inst,ScalarEvolution & SE,const DataLayout & DL,const TargetLibraryInfo & TLI)10807 bool matchAssociativeReduction(PHINode *Phi, Instruction *Inst,
10808 ScalarEvolution &SE, const DataLayout &DL,
10809 const TargetLibraryInfo &TLI) {
10810 assert((!Phi || is_contained(Phi->operands(), Inst)) &&
10811 "Phi needs to use the binary operator");
10812 assert((isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) ||
10813 isa<IntrinsicInst>(Inst)) &&
10814 "Expected binop, select, or intrinsic for reduction matching");
10815 RdxKind = getRdxKind(Inst);
10816
10817 // We could have a initial reductions that is not an add.
10818 // r *= v1 + v2 + v3 + v4
10819 // In such a case start looking for a tree rooted in the first '+'.
10820 if (Phi) {
10821 if (getLHS(RdxKind, Inst) == Phi) {
10822 Phi = nullptr;
10823 Inst = dyn_cast<Instruction>(getRHS(RdxKind, Inst));
10824 if (!Inst)
10825 return false;
10826 RdxKind = getRdxKind(Inst);
10827 } else if (getRHS(RdxKind, Inst) == Phi) {
10828 Phi = nullptr;
10829 Inst = dyn_cast<Instruction>(getLHS(RdxKind, Inst));
10830 if (!Inst)
10831 return false;
10832 RdxKind = getRdxKind(Inst);
10833 }
10834 }
10835
10836 if (!isVectorizable(RdxKind, Inst))
10837 return false;
10838
10839 // Analyze "regular" integer/FP types for reductions - no target-specific
10840 // types or pointers.
10841 Type *Ty = Inst->getType();
10842 if (!isValidElementType(Ty) || Ty->isPointerTy())
10843 return false;
10844
10845 // Though the ultimate reduction may have multiple uses, its condition must
10846 // have only single use.
10847 if (auto *Sel = dyn_cast<SelectInst>(Inst))
10848 if (!Sel->getCondition()->hasOneUse())
10849 return false;
10850
10851 ReductionRoot = Inst;
10852
10853 // Iterate through all the operands of the possible reduction tree and
10854 // gather all the reduced values, sorting them by their value id.
10855 BasicBlock *BB = Inst->getParent();
10856 bool IsCmpSelMinMax = isCmpSelMinMax(Inst);
10857 SmallVector<Instruction *> Worklist(1, Inst);
10858 // Checks if the operands of the \p TreeN instruction are also reduction
10859 // operations or should be treated as reduced values or an extra argument,
10860 // which is not part of the reduction.
10861 auto &&CheckOperands = [this, IsCmpSelMinMax,
10862 BB](Instruction *TreeN,
10863 SmallVectorImpl<Value *> &ExtraArgs,
10864 SmallVectorImpl<Value *> &PossibleReducedVals,
10865 SmallVectorImpl<Instruction *> &ReductionOps) {
10866 for (int I = getFirstOperandIndex(TreeN),
10867 End = getNumberOfOperands(TreeN);
10868 I < End; ++I) {
10869 Value *EdgeVal = getRdxOperand(TreeN, I);
10870 ReducedValsToOps[EdgeVal].push_back(TreeN);
10871 auto *EdgeInst = dyn_cast<Instruction>(EdgeVal);
10872 // Edge has wrong parent - mark as an extra argument.
10873 if (EdgeInst && !isVectorLikeInstWithConstOps(EdgeInst) &&
10874 !hasSameParent(EdgeInst, BB)) {
10875 ExtraArgs.push_back(EdgeVal);
10876 continue;
10877 }
10878 // If the edge is not an instruction, or it is different from the main
10879 // reduction opcode or has too many uses - possible reduced value.
10880 if (!EdgeInst || getRdxKind(EdgeInst) != RdxKind ||
10881 IsCmpSelMinMax != isCmpSelMinMax(EdgeInst) ||
10882 !hasRequiredNumberOfUses(IsCmpSelMinMax, EdgeInst) ||
10883 !isVectorizable(getRdxKind(EdgeInst), EdgeInst)) {
10884 PossibleReducedVals.push_back(EdgeVal);
10885 continue;
10886 }
10887 ReductionOps.push_back(EdgeInst);
10888 }
10889 };
10890 // Try to regroup reduced values so that it gets more profitable to try to
10891 // reduce them. Values are grouped by their value ids, instructions - by
10892 // instruction op id and/or alternate op id, plus do extra analysis for
10893 // loads (grouping them by the distabce between pointers) and cmp
10894 // instructions (grouping them by the predicate).
10895 MapVector<size_t, MapVector<size_t, MapVector<Value *, unsigned>>>
10896 PossibleReducedVals;
10897 initReductionOps(Inst);
10898 while (!Worklist.empty()) {
10899 Instruction *TreeN = Worklist.pop_back_val();
10900 SmallVector<Value *> Args;
10901 SmallVector<Value *> PossibleRedVals;
10902 SmallVector<Instruction *> PossibleReductionOps;
10903 CheckOperands(TreeN, Args, PossibleRedVals, PossibleReductionOps);
10904 // If too many extra args - mark the instruction itself as a reduction
10905 // value, not a reduction operation.
10906 if (Args.size() < 2) {
10907 addReductionOps(TreeN);
10908 // Add extra args.
10909 if (!Args.empty()) {
10910 assert(Args.size() == 1 && "Expected only single argument.");
10911 ExtraArgs[TreeN] = Args.front();
10912 }
10913 // Add reduction values. The values are sorted for better vectorization
10914 // results.
10915 for (Value *V : PossibleRedVals) {
10916 size_t Key, Idx;
10917 std::tie(Key, Idx) = generateKeySubkey(
10918 V, &TLI,
10919 [&PossibleReducedVals, &DL, &SE](size_t Key, LoadInst *LI) {
10920 auto It = PossibleReducedVals.find(Key);
10921 if (It != PossibleReducedVals.end()) {
10922 for (const auto &LoadData : It->second) {
10923 auto *RLI = cast<LoadInst>(LoadData.second.front().first);
10924 if (getPointersDiff(RLI->getType(),
10925 RLI->getPointerOperand(), LI->getType(),
10926 LI->getPointerOperand(), DL, SE,
10927 /*StrictCheck=*/true))
10928 return hash_value(RLI->getPointerOperand());
10929 }
10930 }
10931 return hash_value(LI->getPointerOperand());
10932 },
10933 /*AllowAlternate=*/false);
10934 ++PossibleReducedVals[Key][Idx]
10935 .insert(std::make_pair(V, 0))
10936 .first->second;
10937 }
10938 Worklist.append(PossibleReductionOps.rbegin(),
10939 PossibleReductionOps.rend());
10940 } else {
10941 size_t Key, Idx;
10942 std::tie(Key, Idx) = generateKeySubkey(
10943 TreeN, &TLI,
10944 [&PossibleReducedVals, &DL, &SE](size_t Key, LoadInst *LI) {
10945 auto It = PossibleReducedVals.find(Key);
10946 if (It != PossibleReducedVals.end()) {
10947 for (const auto &LoadData : It->second) {
10948 auto *RLI = cast<LoadInst>(LoadData.second.front().first);
10949 if (getPointersDiff(RLI->getType(), RLI->getPointerOperand(),
10950 LI->getType(), LI->getPointerOperand(),
10951 DL, SE, /*StrictCheck=*/true))
10952 return hash_value(RLI->getPointerOperand());
10953 }
10954 }
10955 return hash_value(LI->getPointerOperand());
10956 },
10957 /*AllowAlternate=*/false);
10958 ++PossibleReducedVals[Key][Idx]
10959 .insert(std::make_pair(TreeN, 0))
10960 .first->second;
10961 }
10962 }
10963 auto PossibleReducedValsVect = PossibleReducedVals.takeVector();
10964 // Sort values by the total number of values kinds to start the reduction
10965 // from the longest possible reduced values sequences.
10966 for (auto &PossibleReducedVals : PossibleReducedValsVect) {
10967 auto PossibleRedVals = PossibleReducedVals.second.takeVector();
10968 SmallVector<SmallVector<Value *>> PossibleRedValsVect;
10969 for (auto It = PossibleRedVals.begin(), E = PossibleRedVals.end();
10970 It != E; ++It) {
10971 PossibleRedValsVect.emplace_back();
10972 auto RedValsVect = It->second.takeVector();
10973 stable_sort(RedValsVect, llvm::less_second());
10974 for (const std::pair<Value *, unsigned> &Data : RedValsVect)
10975 PossibleRedValsVect.back().append(Data.second, Data.first);
10976 }
10977 stable_sort(PossibleRedValsVect, [](const auto &P1, const auto &P2) {
10978 return P1.size() > P2.size();
10979 });
10980 ReducedVals.emplace_back();
10981 for (ArrayRef<Value *> Data : PossibleRedValsVect)
10982 ReducedVals.back().append(Data.rbegin(), Data.rend());
10983 }
10984 // Sort the reduced values by number of same/alternate opcode and/or pointer
10985 // operand.
10986 stable_sort(ReducedVals, [](ArrayRef<Value *> P1, ArrayRef<Value *> P2) {
10987 return P1.size() > P2.size();
10988 });
10989 return true;
10990 }
10991
10992 /// Attempt to vectorize the tree found by matchAssociativeReduction.
tryToReduce(BoUpSLP & V,TargetTransformInfo * TTI)10993 Value *tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) {
10994 constexpr int ReductionLimit = 4;
10995 constexpr unsigned RegMaxNumber = 4;
10996 constexpr unsigned RedValsMaxNumber = 128;
10997 // If there are a sufficient number of reduction values, reduce
10998 // to a nearby power-of-2. We can safely generate oversized
10999 // vectors and rely on the backend to split them to legal sizes.
11000 unsigned NumReducedVals = std::accumulate(
11001 ReducedVals.begin(), ReducedVals.end(), 0,
11002 [](int Num, ArrayRef<Value *> Vals) { return Num + Vals.size(); });
11003 if (NumReducedVals < ReductionLimit)
11004 return nullptr;
11005
11006 IRBuilder<> Builder(cast<Instruction>(ReductionRoot));
11007
11008 // Track the reduced values in case if they are replaced by extractelement
11009 // because of the vectorization.
11010 DenseMap<Value *, WeakTrackingVH> TrackedVals;
11011 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues;
11012 // The same extra argument may be used several times, so log each attempt
11013 // to use it.
11014 for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) {
11015 assert(Pair.first && "DebugLoc must be set.");
11016 ExternallyUsedValues[Pair.second].push_back(Pair.first);
11017 TrackedVals.try_emplace(Pair.second, Pair.second);
11018 }
11019
11020 // The compare instruction of a min/max is the insertion point for new
11021 // instructions and may be replaced with a new compare instruction.
11022 auto &&GetCmpForMinMaxReduction = [](Instruction *RdxRootInst) {
11023 assert(isa<SelectInst>(RdxRootInst) &&
11024 "Expected min/max reduction to have select root instruction");
11025 Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition();
11026 assert(isa<Instruction>(ScalarCond) &&
11027 "Expected min/max reduction to have compare condition");
11028 return cast<Instruction>(ScalarCond);
11029 };
11030
11031 // The reduction root is used as the insertion point for new instructions,
11032 // so set it as externally used to prevent it from being deleted.
11033 ExternallyUsedValues[ReductionRoot];
11034 SmallDenseSet<Value *> IgnoreList;
11035 for (ReductionOpsType &RdxOps : ReductionOps)
11036 for (Value *RdxOp : RdxOps) {
11037 if (!RdxOp)
11038 continue;
11039 IgnoreList.insert(RdxOp);
11040 }
11041 bool IsCmpSelMinMax = isCmpSelMinMax(cast<Instruction>(ReductionRoot));
11042
11043 // Need to track reduced vals, they may be changed during vectorization of
11044 // subvectors.
11045 for (ArrayRef<Value *> Candidates : ReducedVals)
11046 for (Value *V : Candidates)
11047 TrackedVals.try_emplace(V, V);
11048
11049 DenseMap<Value *, unsigned> VectorizedVals;
11050 Value *VectorizedTree = nullptr;
11051 bool CheckForReusedReductionOps = false;
11052 // Try to vectorize elements based on their type.
11053 for (unsigned I = 0, E = ReducedVals.size(); I < E; ++I) {
11054 ArrayRef<Value *> OrigReducedVals = ReducedVals[I];
11055 InstructionsState S = getSameOpcode(OrigReducedVals);
11056 SmallVector<Value *> Candidates;
11057 DenseMap<Value *, Value *> TrackedToOrig;
11058 for (unsigned Cnt = 0, Sz = OrigReducedVals.size(); Cnt < Sz; ++Cnt) {
11059 Value *RdxVal = TrackedVals.find(OrigReducedVals[Cnt])->second;
11060 // Check if the reduction value was not overriden by the extractelement
11061 // instruction because of the vectorization and exclude it, if it is not
11062 // compatible with other values.
11063 if (auto *Inst = dyn_cast<Instruction>(RdxVal))
11064 if (isVectorLikeInstWithConstOps(Inst) &&
11065 (!S.getOpcode() || !S.isOpcodeOrAlt(Inst)))
11066 continue;
11067 Candidates.push_back(RdxVal);
11068 TrackedToOrig.try_emplace(RdxVal, OrigReducedVals[Cnt]);
11069 }
11070 bool ShuffledExtracts = false;
11071 // Try to handle shuffled extractelements.
11072 if (S.getOpcode() == Instruction::ExtractElement && !S.isAltShuffle() &&
11073 I + 1 < E) {
11074 InstructionsState NextS = getSameOpcode(ReducedVals[I + 1]);
11075 if (NextS.getOpcode() == Instruction::ExtractElement &&
11076 !NextS.isAltShuffle()) {
11077 SmallVector<Value *> CommonCandidates(Candidates);
11078 for (Value *RV : ReducedVals[I + 1]) {
11079 Value *RdxVal = TrackedVals.find(RV)->second;
11080 // Check if the reduction value was not overriden by the
11081 // extractelement instruction because of the vectorization and
11082 // exclude it, if it is not compatible with other values.
11083 if (auto *Inst = dyn_cast<Instruction>(RdxVal))
11084 if (!NextS.getOpcode() || !NextS.isOpcodeOrAlt(Inst))
11085 continue;
11086 CommonCandidates.push_back(RdxVal);
11087 TrackedToOrig.try_emplace(RdxVal, RV);
11088 }
11089 SmallVector<int> Mask;
11090 if (isFixedVectorShuffle(CommonCandidates, Mask)) {
11091 ++I;
11092 Candidates.swap(CommonCandidates);
11093 ShuffledExtracts = true;
11094 }
11095 }
11096 }
11097 unsigned NumReducedVals = Candidates.size();
11098 if (NumReducedVals < ReductionLimit)
11099 continue;
11100
11101 unsigned MaxVecRegSize = V.getMaxVecRegSize();
11102 unsigned EltSize = V.getVectorElementSize(Candidates[0]);
11103 unsigned MaxElts = RegMaxNumber * PowerOf2Floor(MaxVecRegSize / EltSize);
11104
11105 unsigned ReduxWidth = std::min<unsigned>(
11106 PowerOf2Floor(NumReducedVals), std::max(RedValsMaxNumber, MaxElts));
11107 unsigned Start = 0;
11108 unsigned Pos = Start;
11109 // Restarts vectorization attempt with lower vector factor.
11110 unsigned PrevReduxWidth = ReduxWidth;
11111 bool CheckForReusedReductionOpsLocal = false;
11112 auto &&AdjustReducedVals = [&Pos, &Start, &ReduxWidth, NumReducedVals,
11113 &CheckForReusedReductionOpsLocal,
11114 &PrevReduxWidth, &V,
11115 &IgnoreList](bool IgnoreVL = false) {
11116 bool IsAnyRedOpGathered = !IgnoreVL && V.isAnyGathered(IgnoreList);
11117 if (!CheckForReusedReductionOpsLocal && PrevReduxWidth == ReduxWidth) {
11118 // Check if any of the reduction ops are gathered. If so, worth
11119 // trying again with less number of reduction ops.
11120 CheckForReusedReductionOpsLocal |= IsAnyRedOpGathered;
11121 }
11122 ++Pos;
11123 if (Pos < NumReducedVals - ReduxWidth + 1)
11124 return IsAnyRedOpGathered;
11125 Pos = Start;
11126 ReduxWidth /= 2;
11127 return IsAnyRedOpGathered;
11128 };
11129 while (Pos < NumReducedVals - ReduxWidth + 1 &&
11130 ReduxWidth >= ReductionLimit) {
11131 // Dependency in tree of the reduction ops - drop this attempt, try
11132 // later.
11133 if (CheckForReusedReductionOpsLocal && PrevReduxWidth != ReduxWidth &&
11134 Start == 0) {
11135 CheckForReusedReductionOps = true;
11136 break;
11137 }
11138 PrevReduxWidth = ReduxWidth;
11139 ArrayRef<Value *> VL(std::next(Candidates.begin(), Pos), ReduxWidth);
11140 // Beeing analyzed already - skip.
11141 if (V.areAnalyzedReductionVals(VL)) {
11142 (void)AdjustReducedVals(/*IgnoreVL=*/true);
11143 continue;
11144 }
11145 // Early exit if any of the reduction values were deleted during
11146 // previous vectorization attempts.
11147 if (any_of(VL, [&V](Value *RedVal) {
11148 auto *RedValI = dyn_cast<Instruction>(RedVal);
11149 if (!RedValI)
11150 return false;
11151 return V.isDeleted(RedValI);
11152 }))
11153 break;
11154 V.buildTree(VL, IgnoreList);
11155 if (V.isTreeTinyAndNotFullyVectorizable(/*ForReduction=*/true)) {
11156 if (!AdjustReducedVals())
11157 V.analyzedReductionVals(VL);
11158 continue;
11159 }
11160 if (V.isLoadCombineReductionCandidate(RdxKind)) {
11161 if (!AdjustReducedVals())
11162 V.analyzedReductionVals(VL);
11163 continue;
11164 }
11165 V.reorderTopToBottom();
11166 // No need to reorder the root node at all.
11167 V.reorderBottomToTop(/*IgnoreReorder=*/true);
11168 // Keep extracted other reduction values, if they are used in the
11169 // vectorization trees.
11170 BoUpSLP::ExtraValueToDebugLocsMap LocalExternallyUsedValues(
11171 ExternallyUsedValues);
11172 for (unsigned Cnt = 0, Sz = ReducedVals.size(); Cnt < Sz; ++Cnt) {
11173 if (Cnt == I || (ShuffledExtracts && Cnt == I - 1))
11174 continue;
11175 for_each(ReducedVals[Cnt],
11176 [&LocalExternallyUsedValues, &TrackedVals](Value *V) {
11177 if (isa<Instruction>(V))
11178 LocalExternallyUsedValues[TrackedVals[V]];
11179 });
11180 }
11181 // Number of uses of the candidates in the vector of values.
11182 SmallDenseMap<Value *, unsigned> NumUses;
11183 for (unsigned Cnt = 0; Cnt < Pos; ++Cnt) {
11184 Value *V = Candidates[Cnt];
11185 if (NumUses.count(V) > 0)
11186 continue;
11187 NumUses[V] = std::count(VL.begin(), VL.end(), V);
11188 }
11189 for (unsigned Cnt = Pos + ReduxWidth; Cnt < NumReducedVals; ++Cnt) {
11190 Value *V = Candidates[Cnt];
11191 if (NumUses.count(V) > 0)
11192 continue;
11193 NumUses[V] = std::count(VL.begin(), VL.end(), V);
11194 }
11195 // Gather externally used values.
11196 SmallPtrSet<Value *, 4> Visited;
11197 for (unsigned Cnt = 0; Cnt < Pos; ++Cnt) {
11198 Value *V = Candidates[Cnt];
11199 if (!Visited.insert(V).second)
11200 continue;
11201 unsigned NumOps = VectorizedVals.lookup(V) + NumUses[V];
11202 if (NumOps != ReducedValsToOps.find(V)->second.size())
11203 LocalExternallyUsedValues[V];
11204 }
11205 for (unsigned Cnt = Pos + ReduxWidth; Cnt < NumReducedVals; ++Cnt) {
11206 Value *V = Candidates[Cnt];
11207 if (!Visited.insert(V).second)
11208 continue;
11209 unsigned NumOps = VectorizedVals.lookup(V) + NumUses[V];
11210 if (NumOps != ReducedValsToOps.find(V)->second.size())
11211 LocalExternallyUsedValues[V];
11212 }
11213 V.buildExternalUses(LocalExternallyUsedValues);
11214
11215 V.computeMinimumValueSizes();
11216
11217 // Intersect the fast-math-flags from all reduction operations.
11218 FastMathFlags RdxFMF;
11219 RdxFMF.set();
11220 for (Value *U : IgnoreList)
11221 if (auto *FPMO = dyn_cast<FPMathOperator>(U))
11222 RdxFMF &= FPMO->getFastMathFlags();
11223 // Estimate cost.
11224 InstructionCost TreeCost = V.getTreeCost(VL);
11225 InstructionCost ReductionCost =
11226 getReductionCost(TTI, VL, ReduxWidth, RdxFMF);
11227 InstructionCost Cost = TreeCost + ReductionCost;
11228 if (!Cost.isValid()) {
11229 LLVM_DEBUG(dbgs() << "Encountered invalid baseline cost.\n");
11230 return nullptr;
11231 }
11232 if (Cost >= -SLPCostThreshold) {
11233 V.getORE()->emit([&]() {
11234 return OptimizationRemarkMissed(
11235 SV_NAME, "HorSLPNotBeneficial",
11236 ReducedValsToOps.find(VL[0])->second.front())
11237 << "Vectorizing horizontal reduction is possible "
11238 << "but not beneficial with cost " << ore::NV("Cost", Cost)
11239 << " and threshold "
11240 << ore::NV("Threshold", -SLPCostThreshold);
11241 });
11242 if (!AdjustReducedVals())
11243 V.analyzedReductionVals(VL);
11244 continue;
11245 }
11246
11247 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:"
11248 << Cost << ". (HorRdx)\n");
11249 V.getORE()->emit([&]() {
11250 return OptimizationRemark(
11251 SV_NAME, "VectorizedHorizontalReduction",
11252 ReducedValsToOps.find(VL[0])->second.front())
11253 << "Vectorized horizontal reduction with cost "
11254 << ore::NV("Cost", Cost) << " and with tree size "
11255 << ore::NV("TreeSize", V.getTreeSize());
11256 });
11257
11258 Builder.setFastMathFlags(RdxFMF);
11259
11260 // Vectorize a tree.
11261 Value *VectorizedRoot = V.vectorizeTree(LocalExternallyUsedValues);
11262
11263 // Emit a reduction. If the root is a select (min/max idiom), the insert
11264 // point is the compare condition of that select.
11265 Instruction *RdxRootInst = cast<Instruction>(ReductionRoot);
11266 if (IsCmpSelMinMax)
11267 Builder.SetInsertPoint(GetCmpForMinMaxReduction(RdxRootInst));
11268 else
11269 Builder.SetInsertPoint(RdxRootInst);
11270
11271 // To prevent poison from leaking across what used to be sequential,
11272 // safe, scalar boolean logic operations, the reduction operand must be
11273 // frozen.
11274 if (isa<SelectInst>(RdxRootInst) && isBoolLogicOp(RdxRootInst))
11275 VectorizedRoot = Builder.CreateFreeze(VectorizedRoot);
11276
11277 Value *ReducedSubTree =
11278 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI);
11279
11280 if (!VectorizedTree) {
11281 // Initialize the final value in the reduction.
11282 VectorizedTree = ReducedSubTree;
11283 } else {
11284 // Update the final value in the reduction.
11285 Builder.SetCurrentDebugLocation(
11286 cast<Instruction>(ReductionOps.front().front())->getDebugLoc());
11287 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree,
11288 ReducedSubTree, "op.rdx", ReductionOps);
11289 }
11290 // Count vectorized reduced values to exclude them from final reduction.
11291 for (Value *V : VL)
11292 ++VectorizedVals.try_emplace(TrackedToOrig.find(V)->second, 0)
11293 .first->getSecond();
11294 Pos += ReduxWidth;
11295 Start = Pos;
11296 ReduxWidth = PowerOf2Floor(NumReducedVals - Pos);
11297 }
11298 }
11299 if (VectorizedTree) {
11300 // Finish the reduction.
11301 // Need to add extra arguments and not vectorized possible reduction
11302 // values.
11303 // Try to avoid dependencies between the scalar remainders after
11304 // reductions.
11305 auto &&FinalGen =
11306 [this, &Builder,
11307 &TrackedVals](ArrayRef<std::pair<Instruction *, Value *>> InstVals) {
11308 unsigned Sz = InstVals.size();
11309 SmallVector<std::pair<Instruction *, Value *>> ExtraReds(Sz / 2 +
11310 Sz % 2);
11311 for (unsigned I = 0, E = (Sz / 2) * 2; I < E; I += 2) {
11312 Instruction *RedOp = InstVals[I + 1].first;
11313 Builder.SetCurrentDebugLocation(RedOp->getDebugLoc());
11314 Value *RdxVal1 = InstVals[I].second;
11315 Value *StableRdxVal1 = RdxVal1;
11316 auto It1 = TrackedVals.find(RdxVal1);
11317 if (It1 != TrackedVals.end())
11318 StableRdxVal1 = It1->second;
11319 Value *RdxVal2 = InstVals[I + 1].second;
11320 Value *StableRdxVal2 = RdxVal2;
11321 auto It2 = TrackedVals.find(RdxVal2);
11322 if (It2 != TrackedVals.end())
11323 StableRdxVal2 = It2->second;
11324 Value *ExtraRed = createOp(Builder, RdxKind, StableRdxVal1,
11325 StableRdxVal2, "op.rdx", ReductionOps);
11326 ExtraReds[I / 2] = std::make_pair(InstVals[I].first, ExtraRed);
11327 }
11328 if (Sz % 2 == 1)
11329 ExtraReds[Sz / 2] = InstVals.back();
11330 return ExtraReds;
11331 };
11332 SmallVector<std::pair<Instruction *, Value *>> ExtraReductions;
11333 SmallPtrSet<Value *, 8> Visited;
11334 for (ArrayRef<Value *> Candidates : ReducedVals) {
11335 for (Value *RdxVal : Candidates) {
11336 if (!Visited.insert(RdxVal).second)
11337 continue;
11338 unsigned NumOps = VectorizedVals.lookup(RdxVal);
11339 for (Instruction *RedOp :
11340 makeArrayRef(ReducedValsToOps.find(RdxVal)->second)
11341 .drop_back(NumOps))
11342 ExtraReductions.emplace_back(RedOp, RdxVal);
11343 }
11344 }
11345 for (auto &Pair : ExternallyUsedValues) {
11346 // Add each externally used value to the final reduction.
11347 for (auto *I : Pair.second)
11348 ExtraReductions.emplace_back(I, Pair.first);
11349 }
11350 // Iterate through all not-vectorized reduction values/extra arguments.
11351 while (ExtraReductions.size() > 1) {
11352 SmallVector<std::pair<Instruction *, Value *>> NewReds =
11353 FinalGen(ExtraReductions);
11354 ExtraReductions.swap(NewReds);
11355 }
11356 // Final reduction.
11357 if (ExtraReductions.size() == 1) {
11358 Instruction *RedOp = ExtraReductions.back().first;
11359 Builder.SetCurrentDebugLocation(RedOp->getDebugLoc());
11360 Value *RdxVal = ExtraReductions.back().second;
11361 Value *StableRdxVal = RdxVal;
11362 auto It = TrackedVals.find(RdxVal);
11363 if (It != TrackedVals.end())
11364 StableRdxVal = It->second;
11365 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree,
11366 StableRdxVal, "op.rdx", ReductionOps);
11367 }
11368
11369 ReductionRoot->replaceAllUsesWith(VectorizedTree);
11370
11371 // The original scalar reduction is expected to have no remaining
11372 // uses outside the reduction tree itself. Assert that we got this
11373 // correct, replace internal uses with undef, and mark for eventual
11374 // deletion.
11375 #ifndef NDEBUG
11376 SmallSet<Value *, 4> IgnoreSet;
11377 for (ArrayRef<Value *> RdxOps : ReductionOps)
11378 IgnoreSet.insert(RdxOps.begin(), RdxOps.end());
11379 #endif
11380 for (ArrayRef<Value *> RdxOps : ReductionOps) {
11381 for (Value *Ignore : RdxOps) {
11382 if (!Ignore)
11383 continue;
11384 #ifndef NDEBUG
11385 for (auto *U : Ignore->users()) {
11386 assert(IgnoreSet.count(U) &&
11387 "All users must be either in the reduction ops list.");
11388 }
11389 #endif
11390 if (!Ignore->use_empty()) {
11391 Value *Undef = UndefValue::get(Ignore->getType());
11392 Ignore->replaceAllUsesWith(Undef);
11393 }
11394 V.eraseInstruction(cast<Instruction>(Ignore));
11395 }
11396 }
11397 } else if (!CheckForReusedReductionOps) {
11398 for (ReductionOpsType &RdxOps : ReductionOps)
11399 for (Value *RdxOp : RdxOps)
11400 V.analyzedReductionRoot(cast<Instruction>(RdxOp));
11401 }
11402 return VectorizedTree;
11403 }
11404
11405 private:
11406 /// Calculate the cost of a reduction.
getReductionCost(TargetTransformInfo * TTI,ArrayRef<Value * > ReducedVals,unsigned ReduxWidth,FastMathFlags FMF)11407 InstructionCost getReductionCost(TargetTransformInfo *TTI,
11408 ArrayRef<Value *> ReducedVals,
11409 unsigned ReduxWidth, FastMathFlags FMF) {
11410 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
11411 Value *FirstReducedVal = ReducedVals.front();
11412 Type *ScalarTy = FirstReducedVal->getType();
11413 FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth);
11414 InstructionCost VectorCost = 0, ScalarCost;
11415 // If all of the reduced values are constant, the vector cost is 0, since
11416 // the reduction value can be calculated at the compile time.
11417 bool AllConsts = all_of(ReducedVals, isConstant);
11418 switch (RdxKind) {
11419 case RecurKind::Add:
11420 case RecurKind::Mul:
11421 case RecurKind::Or:
11422 case RecurKind::And:
11423 case RecurKind::Xor:
11424 case RecurKind::FAdd:
11425 case RecurKind::FMul: {
11426 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind);
11427 if (!AllConsts)
11428 VectorCost =
11429 TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF, CostKind);
11430 ScalarCost = TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy, CostKind);
11431 break;
11432 }
11433 case RecurKind::FMax:
11434 case RecurKind::FMin: {
11435 auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy);
11436 if (!AllConsts) {
11437 auto *VecCondTy =
11438 cast<VectorType>(CmpInst::makeCmpResultType(VectorTy));
11439 VectorCost =
11440 TTI->getMinMaxReductionCost(VectorTy, VecCondTy,
11441 /*IsUnsigned=*/false, CostKind);
11442 }
11443 CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind);
11444 ScalarCost = TTI->getCmpSelInstrCost(Instruction::FCmp, ScalarTy,
11445 SclCondTy, RdxPred, CostKind) +
11446 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
11447 SclCondTy, RdxPred, CostKind);
11448 break;
11449 }
11450 case RecurKind::SMax:
11451 case RecurKind::SMin:
11452 case RecurKind::UMax:
11453 case RecurKind::UMin: {
11454 auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy);
11455 if (!AllConsts) {
11456 auto *VecCondTy =
11457 cast<VectorType>(CmpInst::makeCmpResultType(VectorTy));
11458 bool IsUnsigned =
11459 RdxKind == RecurKind::UMax || RdxKind == RecurKind::UMin;
11460 VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy,
11461 IsUnsigned, CostKind);
11462 }
11463 CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind);
11464 ScalarCost = TTI->getCmpSelInstrCost(Instruction::ICmp, ScalarTy,
11465 SclCondTy, RdxPred, CostKind) +
11466 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy,
11467 SclCondTy, RdxPred, CostKind);
11468 break;
11469 }
11470 default:
11471 llvm_unreachable("Expected arithmetic or min/max reduction operation");
11472 }
11473
11474 // Scalar cost is repeated for N-1 elements.
11475 ScalarCost *= (ReduxWidth - 1);
11476 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost
11477 << " for reduction that starts with " << *FirstReducedVal
11478 << " (It is a splitting reduction)\n");
11479 return VectorCost - ScalarCost;
11480 }
11481
11482 /// Emit a horizontal reduction of the vectorized value.
emitReduction(Value * VectorizedValue,IRBuilder<> & Builder,unsigned ReduxWidth,const TargetTransformInfo * TTI)11483 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder,
11484 unsigned ReduxWidth, const TargetTransformInfo *TTI) {
11485 assert(VectorizedValue && "Need to have a vectorized tree node");
11486 assert(isPowerOf2_32(ReduxWidth) &&
11487 "We only handle power-of-two reductions for now");
11488 assert(RdxKind != RecurKind::FMulAdd &&
11489 "A call to the llvm.fmuladd intrinsic is not handled yet");
11490
11491 ++NumVectorInstructions;
11492 return createSimpleTargetReduction(Builder, TTI, VectorizedValue, RdxKind);
11493 }
11494 };
11495
11496 } // end anonymous namespace
11497
getAggregateSize(Instruction * InsertInst)11498 static Optional<unsigned> getAggregateSize(Instruction *InsertInst) {
11499 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst))
11500 return cast<FixedVectorType>(IE->getType())->getNumElements();
11501
11502 unsigned AggregateSize = 1;
11503 auto *IV = cast<InsertValueInst>(InsertInst);
11504 Type *CurrentType = IV->getType();
11505 do {
11506 if (auto *ST = dyn_cast<StructType>(CurrentType)) {
11507 for (auto *Elt : ST->elements())
11508 if (Elt != ST->getElementType(0)) // check homogeneity
11509 return None;
11510 AggregateSize *= ST->getNumElements();
11511 CurrentType = ST->getElementType(0);
11512 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) {
11513 AggregateSize *= AT->getNumElements();
11514 CurrentType = AT->getElementType();
11515 } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) {
11516 AggregateSize *= VT->getNumElements();
11517 return AggregateSize;
11518 } else if (CurrentType->isSingleValueType()) {
11519 return AggregateSize;
11520 } else {
11521 return None;
11522 }
11523 } while (true);
11524 }
11525
findBuildAggregate_rec(Instruction * LastInsertInst,TargetTransformInfo * TTI,SmallVectorImpl<Value * > & BuildVectorOpds,SmallVectorImpl<Value * > & InsertElts,unsigned OperandOffset)11526 static void findBuildAggregate_rec(Instruction *LastInsertInst,
11527 TargetTransformInfo *TTI,
11528 SmallVectorImpl<Value *> &BuildVectorOpds,
11529 SmallVectorImpl<Value *> &InsertElts,
11530 unsigned OperandOffset) {
11531 do {
11532 Value *InsertedOperand = LastInsertInst->getOperand(1);
11533 Optional<unsigned> OperandIndex =
11534 getInsertIndex(LastInsertInst, OperandOffset);
11535 if (!OperandIndex)
11536 return;
11537 if (isa<InsertElementInst>(InsertedOperand) ||
11538 isa<InsertValueInst>(InsertedOperand)) {
11539 findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI,
11540 BuildVectorOpds, InsertElts, *OperandIndex);
11541
11542 } else {
11543 BuildVectorOpds[*OperandIndex] = InsertedOperand;
11544 InsertElts[*OperandIndex] = LastInsertInst;
11545 }
11546 LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0));
11547 } while (LastInsertInst != nullptr &&
11548 (isa<InsertValueInst>(LastInsertInst) ||
11549 isa<InsertElementInst>(LastInsertInst)) &&
11550 LastInsertInst->hasOneUse());
11551 }
11552
11553 /// Recognize construction of vectors like
11554 /// %ra = insertelement <4 x float> poison, float %s0, i32 0
11555 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1
11556 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2
11557 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3
11558 /// starting from the last insertelement or insertvalue instruction.
11559 ///
11560 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>},
11561 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on.
11562 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples.
11563 ///
11564 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type.
11565 ///
11566 /// \return true if it matches.
findBuildAggregate(Instruction * LastInsertInst,TargetTransformInfo * TTI,SmallVectorImpl<Value * > & BuildVectorOpds,SmallVectorImpl<Value * > & InsertElts)11567 static bool findBuildAggregate(Instruction *LastInsertInst,
11568 TargetTransformInfo *TTI,
11569 SmallVectorImpl<Value *> &BuildVectorOpds,
11570 SmallVectorImpl<Value *> &InsertElts) {
11571
11572 assert((isa<InsertElementInst>(LastInsertInst) ||
11573 isa<InsertValueInst>(LastInsertInst)) &&
11574 "Expected insertelement or insertvalue instruction!");
11575
11576 assert((BuildVectorOpds.empty() && InsertElts.empty()) &&
11577 "Expected empty result vectors!");
11578
11579 Optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst);
11580 if (!AggregateSize)
11581 return false;
11582 BuildVectorOpds.resize(*AggregateSize);
11583 InsertElts.resize(*AggregateSize);
11584
11585 findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 0);
11586 llvm::erase_value(BuildVectorOpds, nullptr);
11587 llvm::erase_value(InsertElts, nullptr);
11588 if (BuildVectorOpds.size() >= 2)
11589 return true;
11590
11591 return false;
11592 }
11593
11594 /// Try and get a reduction value from a phi node.
11595 ///
11596 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions
11597 /// if they come from either \p ParentBB or a containing loop latch.
11598 ///
11599 /// \returns A candidate reduction value if possible, or \code nullptr \endcode
11600 /// if not possible.
getReductionValue(const DominatorTree * DT,PHINode * P,BasicBlock * ParentBB,LoopInfo * LI)11601 static Value *getReductionValue(const DominatorTree *DT, PHINode *P,
11602 BasicBlock *ParentBB, LoopInfo *LI) {
11603 // There are situations where the reduction value is not dominated by the
11604 // reduction phi. Vectorizing such cases has been reported to cause
11605 // miscompiles. See PR25787.
11606 auto DominatedReduxValue = [&](Value *R) {
11607 return isa<Instruction>(R) &&
11608 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent());
11609 };
11610
11611 Value *Rdx = nullptr;
11612
11613 // Return the incoming value if it comes from the same BB as the phi node.
11614 if (P->getIncomingBlock(0) == ParentBB) {
11615 Rdx = P->getIncomingValue(0);
11616 } else if (P->getIncomingBlock(1) == ParentBB) {
11617 Rdx = P->getIncomingValue(1);
11618 }
11619
11620 if (Rdx && DominatedReduxValue(Rdx))
11621 return Rdx;
11622
11623 // Otherwise, check whether we have a loop latch to look at.
11624 Loop *BBL = LI->getLoopFor(ParentBB);
11625 if (!BBL)
11626 return nullptr;
11627 BasicBlock *BBLatch = BBL->getLoopLatch();
11628 if (!BBLatch)
11629 return nullptr;
11630
11631 // There is a loop latch, return the incoming value if it comes from
11632 // that. This reduction pattern occasionally turns up.
11633 if (P->getIncomingBlock(0) == BBLatch) {
11634 Rdx = P->getIncomingValue(0);
11635 } else if (P->getIncomingBlock(1) == BBLatch) {
11636 Rdx = P->getIncomingValue(1);
11637 }
11638
11639 if (Rdx && DominatedReduxValue(Rdx))
11640 return Rdx;
11641
11642 return nullptr;
11643 }
11644
matchRdxBop(Instruction * I,Value * & V0,Value * & V1)11645 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) {
11646 if (match(I, m_BinOp(m_Value(V0), m_Value(V1))))
11647 return true;
11648 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1))))
11649 return true;
11650 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1))))
11651 return true;
11652 if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1))))
11653 return true;
11654 if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1))))
11655 return true;
11656 if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1))))
11657 return true;
11658 if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1))))
11659 return true;
11660 return false;
11661 }
11662
11663 /// Attempt to reduce a horizontal reduction.
11664 /// If it is legal to match a horizontal reduction feeding the phi node \a P
11665 /// with reduction operators \a Root (or one of its operands) in a basic block
11666 /// \a BB, then check if it can be done. If horizontal reduction is not found
11667 /// and root instruction is a binary operation, vectorization of the operands is
11668 /// attempted.
11669 /// \returns true if a horizontal reduction was matched and reduced or operands
11670 /// of one of the binary instruction were vectorized.
11671 /// \returns false if a horizontal reduction was not matched (or not possible)
11672 /// or no vectorization of any binary operation feeding \a Root instruction was
11673 /// performed.
tryToVectorizeHorReductionOrInstOperands(PHINode * P,Instruction * Root,BasicBlock * BB,BoUpSLP & R,TargetTransformInfo * TTI,ScalarEvolution & SE,const DataLayout & DL,const TargetLibraryInfo & TLI,const function_ref<bool (Instruction *,BoUpSLP &)> Vectorize)11674 static bool tryToVectorizeHorReductionOrInstOperands(
11675 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R,
11676 TargetTransformInfo *TTI, ScalarEvolution &SE, const DataLayout &DL,
11677 const TargetLibraryInfo &TLI,
11678 const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) {
11679 if (!ShouldVectorizeHor)
11680 return false;
11681
11682 if (!Root)
11683 return false;
11684
11685 if (Root->getParent() != BB || isa<PHINode>(Root))
11686 return false;
11687 // Start analysis starting from Root instruction. If horizontal reduction is
11688 // found, try to vectorize it. If it is not a horizontal reduction or
11689 // vectorization is not possible or not effective, and currently analyzed
11690 // instruction is a binary operation, try to vectorize the operands, using
11691 // pre-order DFS traversal order. If the operands were not vectorized, repeat
11692 // the same procedure considering each operand as a possible root of the
11693 // horizontal reduction.
11694 // Interrupt the process if the Root instruction itself was vectorized or all
11695 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized.
11696 // Skip the analysis of CmpInsts. Compiler implements postanalysis of the
11697 // CmpInsts so we can skip extra attempts in
11698 // tryToVectorizeHorReductionOrInstOperands and save compile time.
11699 std::queue<std::pair<Instruction *, unsigned>> Stack;
11700 Stack.emplace(Root, 0);
11701 SmallPtrSet<Value *, 8> VisitedInstrs;
11702 SmallVector<WeakTrackingVH> PostponedInsts;
11703 bool Res = false;
11704 auto &&TryToReduce = [TTI, &SE, &DL, &P, &R, &TLI](Instruction *Inst,
11705 Value *&B0,
11706 Value *&B1) -> Value * {
11707 if (R.isAnalyzedReductionRoot(Inst))
11708 return nullptr;
11709 bool IsBinop = matchRdxBop(Inst, B0, B1);
11710 bool IsSelect = match(Inst, m_Select(m_Value(), m_Value(), m_Value()));
11711 if (IsBinop || IsSelect) {
11712 HorizontalReduction HorRdx;
11713 if (HorRdx.matchAssociativeReduction(P, Inst, SE, DL, TLI))
11714 return HorRdx.tryToReduce(R, TTI);
11715 }
11716 return nullptr;
11717 };
11718 while (!Stack.empty()) {
11719 Instruction *Inst;
11720 unsigned Level;
11721 std::tie(Inst, Level) = Stack.front();
11722 Stack.pop();
11723 // Do not try to analyze instruction that has already been vectorized.
11724 // This may happen when we vectorize instruction operands on a previous
11725 // iteration while stack was populated before that happened.
11726 if (R.isDeleted(Inst))
11727 continue;
11728 Value *B0 = nullptr, *B1 = nullptr;
11729 if (Value *V = TryToReduce(Inst, B0, B1)) {
11730 Res = true;
11731 // Set P to nullptr to avoid re-analysis of phi node in
11732 // matchAssociativeReduction function unless this is the root node.
11733 P = nullptr;
11734 if (auto *I = dyn_cast<Instruction>(V)) {
11735 // Try to find another reduction.
11736 Stack.emplace(I, Level);
11737 continue;
11738 }
11739 } else {
11740 bool IsBinop = B0 && B1;
11741 if (P && IsBinop) {
11742 Inst = dyn_cast<Instruction>(B0);
11743 if (Inst == P)
11744 Inst = dyn_cast<Instruction>(B1);
11745 if (!Inst) {
11746 // Set P to nullptr to avoid re-analysis of phi node in
11747 // matchAssociativeReduction function unless this is the root node.
11748 P = nullptr;
11749 continue;
11750 }
11751 }
11752 // Set P to nullptr to avoid re-analysis of phi node in
11753 // matchAssociativeReduction function unless this is the root node.
11754 P = nullptr;
11755 // Do not try to vectorize CmpInst operands, this is done separately.
11756 // Final attempt for binop args vectorization should happen after the loop
11757 // to try to find reductions.
11758 if (!isa<CmpInst, InsertElementInst, InsertValueInst>(Inst))
11759 PostponedInsts.push_back(Inst);
11760 }
11761
11762 // Try to vectorize operands.
11763 // Continue analysis for the instruction from the same basic block only to
11764 // save compile time.
11765 if (++Level < RecursionMaxDepth)
11766 for (auto *Op : Inst->operand_values())
11767 if (VisitedInstrs.insert(Op).second)
11768 if (auto *I = dyn_cast<Instruction>(Op))
11769 // Do not try to vectorize CmpInst operands, this is done
11770 // separately.
11771 if (!isa<PHINode, CmpInst, InsertElementInst, InsertValueInst>(I) &&
11772 !R.isDeleted(I) && I->getParent() == BB)
11773 Stack.emplace(I, Level);
11774 }
11775 // Try to vectorized binops where reductions were not found.
11776 for (Value *V : PostponedInsts)
11777 if (auto *Inst = dyn_cast<Instruction>(V))
11778 if (!R.isDeleted(Inst))
11779 Res |= Vectorize(Inst, R);
11780 return Res;
11781 }
11782
vectorizeRootInstruction(PHINode * P,Value * V,BasicBlock * BB,BoUpSLP & R,TargetTransformInfo * TTI)11783 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V,
11784 BasicBlock *BB, BoUpSLP &R,
11785 TargetTransformInfo *TTI) {
11786 auto *I = dyn_cast_or_null<Instruction>(V);
11787 if (!I)
11788 return false;
11789
11790 if (!isa<BinaryOperator>(I))
11791 P = nullptr;
11792 // Try to match and vectorize a horizontal reduction.
11793 auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool {
11794 return tryToVectorize(I, R);
11795 };
11796 return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI, *SE, *DL,
11797 *TLI, ExtraVectorization);
11798 }
11799
vectorizeInsertValueInst(InsertValueInst * IVI,BasicBlock * BB,BoUpSLP & R)11800 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI,
11801 BasicBlock *BB, BoUpSLP &R) {
11802 const DataLayout &DL = BB->getModule()->getDataLayout();
11803 if (!R.canMapToVector(IVI->getType(), DL))
11804 return false;
11805
11806 SmallVector<Value *, 16> BuildVectorOpds;
11807 SmallVector<Value *, 16> BuildVectorInsts;
11808 if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts))
11809 return false;
11810
11811 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n");
11812 // Aggregate value is unlikely to be processed in vector register.
11813 return tryToVectorizeList(BuildVectorOpds, R);
11814 }
11815
vectorizeInsertElementInst(InsertElementInst * IEI,BasicBlock * BB,BoUpSLP & R)11816 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI,
11817 BasicBlock *BB, BoUpSLP &R) {
11818 SmallVector<Value *, 16> BuildVectorInsts;
11819 SmallVector<Value *, 16> BuildVectorOpds;
11820 SmallVector<int> Mask;
11821 if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) ||
11822 (llvm::all_of(
11823 BuildVectorOpds,
11824 [](Value *V) { return isa<ExtractElementInst, UndefValue>(V); }) &&
11825 isFixedVectorShuffle(BuildVectorOpds, Mask)))
11826 return false;
11827
11828 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n");
11829 return tryToVectorizeList(BuildVectorInsts, R);
11830 }
11831
11832 template <typename T>
11833 static bool
tryToVectorizeSequence(SmallVectorImpl<T * > & Incoming,function_ref<unsigned (T *)> Limit,function_ref<bool (T *,T *)> Comparator,function_ref<bool (T *,T *)> AreCompatible,function_ref<bool (ArrayRef<T * >,bool)> TryToVectorizeHelper,bool LimitForRegisterSize)11834 tryToVectorizeSequence(SmallVectorImpl<T *> &Incoming,
11835 function_ref<unsigned(T *)> Limit,
11836 function_ref<bool(T *, T *)> Comparator,
11837 function_ref<bool(T *, T *)> AreCompatible,
11838 function_ref<bool(ArrayRef<T *>, bool)> TryToVectorizeHelper,
11839 bool LimitForRegisterSize) {
11840 bool Changed = false;
11841 // Sort by type, parent, operands.
11842 stable_sort(Incoming, Comparator);
11843
11844 // Try to vectorize elements base on their type.
11845 SmallVector<T *> Candidates;
11846 for (auto *IncIt = Incoming.begin(), *E = Incoming.end(); IncIt != E;) {
11847 // Look for the next elements with the same type, parent and operand
11848 // kinds.
11849 auto *SameTypeIt = IncIt;
11850 while (SameTypeIt != E && AreCompatible(*SameTypeIt, *IncIt))
11851 ++SameTypeIt;
11852
11853 // Try to vectorize them.
11854 unsigned NumElts = (SameTypeIt - IncIt);
11855 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at nodes ("
11856 << NumElts << ")\n");
11857 // The vectorization is a 3-state attempt:
11858 // 1. Try to vectorize instructions with the same/alternate opcodes with the
11859 // size of maximal register at first.
11860 // 2. Try to vectorize remaining instructions with the same type, if
11861 // possible. This may result in the better vectorization results rather than
11862 // if we try just to vectorize instructions with the same/alternate opcodes.
11863 // 3. Final attempt to try to vectorize all instructions with the
11864 // same/alternate ops only, this may result in some extra final
11865 // vectorization.
11866 if (NumElts > 1 &&
11867 TryToVectorizeHelper(makeArrayRef(IncIt, NumElts), LimitForRegisterSize)) {
11868 // Success start over because instructions might have been changed.
11869 Changed = true;
11870 } else if (NumElts < Limit(*IncIt) &&
11871 (Candidates.empty() ||
11872 Candidates.front()->getType() == (*IncIt)->getType())) {
11873 Candidates.append(IncIt, std::next(IncIt, NumElts));
11874 }
11875 // Final attempt to vectorize instructions with the same types.
11876 if (Candidates.size() > 1 &&
11877 (SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType())) {
11878 if (TryToVectorizeHelper(Candidates, /*LimitForRegisterSize=*/false)) {
11879 // Success start over because instructions might have been changed.
11880 Changed = true;
11881 } else if (LimitForRegisterSize) {
11882 // Try to vectorize using small vectors.
11883 for (auto *It = Candidates.begin(), *End = Candidates.end();
11884 It != End;) {
11885 auto *SameTypeIt = It;
11886 while (SameTypeIt != End && AreCompatible(*SameTypeIt, *It))
11887 ++SameTypeIt;
11888 unsigned NumElts = (SameTypeIt - It);
11889 if (NumElts > 1 && TryToVectorizeHelper(makeArrayRef(It, NumElts),
11890 /*LimitForRegisterSize=*/false))
11891 Changed = true;
11892 It = SameTypeIt;
11893 }
11894 }
11895 Candidates.clear();
11896 }
11897
11898 // Start over at the next instruction of a different type (or the end).
11899 IncIt = SameTypeIt;
11900 }
11901 return Changed;
11902 }
11903
11904 /// Compare two cmp instructions. If IsCompatibility is true, function returns
11905 /// true if 2 cmps have same/swapped predicates and mos compatible corresponding
11906 /// operands. If IsCompatibility is false, function implements strict weak
11907 /// ordering relation between two cmp instructions, returning true if the first
11908 /// instruction is "less" than the second, i.e. its predicate is less than the
11909 /// predicate of the second or the operands IDs are less than the operands IDs
11910 /// of the second cmp instruction.
11911 template <bool IsCompatibility>
compareCmp(Value * V,Value * V2,function_ref<bool (Instruction *)> IsDeleted)11912 static bool compareCmp(Value *V, Value *V2,
11913 function_ref<bool(Instruction *)> IsDeleted) {
11914 auto *CI1 = cast<CmpInst>(V);
11915 auto *CI2 = cast<CmpInst>(V2);
11916 if (IsDeleted(CI2) || !isValidElementType(CI2->getType()))
11917 return false;
11918 if (CI1->getOperand(0)->getType()->getTypeID() <
11919 CI2->getOperand(0)->getType()->getTypeID())
11920 return !IsCompatibility;
11921 if (CI1->getOperand(0)->getType()->getTypeID() >
11922 CI2->getOperand(0)->getType()->getTypeID())
11923 return false;
11924 CmpInst::Predicate Pred1 = CI1->getPredicate();
11925 CmpInst::Predicate Pred2 = CI2->getPredicate();
11926 CmpInst::Predicate SwapPred1 = CmpInst::getSwappedPredicate(Pred1);
11927 CmpInst::Predicate SwapPred2 = CmpInst::getSwappedPredicate(Pred2);
11928 CmpInst::Predicate BasePred1 = std::min(Pred1, SwapPred1);
11929 CmpInst::Predicate BasePred2 = std::min(Pred2, SwapPred2);
11930 if (BasePred1 < BasePred2)
11931 return !IsCompatibility;
11932 if (BasePred1 > BasePred2)
11933 return false;
11934 // Compare operands.
11935 bool LEPreds = Pred1 <= Pred2;
11936 bool GEPreds = Pred1 >= Pred2;
11937 for (int I = 0, E = CI1->getNumOperands(); I < E; ++I) {
11938 auto *Op1 = CI1->getOperand(LEPreds ? I : E - I - 1);
11939 auto *Op2 = CI2->getOperand(GEPreds ? I : E - I - 1);
11940 if (Op1->getValueID() < Op2->getValueID())
11941 return !IsCompatibility;
11942 if (Op1->getValueID() > Op2->getValueID())
11943 return false;
11944 if (auto *I1 = dyn_cast<Instruction>(Op1))
11945 if (auto *I2 = dyn_cast<Instruction>(Op2)) {
11946 if (I1->getParent() != I2->getParent())
11947 return false;
11948 InstructionsState S = getSameOpcode({I1, I2});
11949 if (S.getOpcode())
11950 continue;
11951 return false;
11952 }
11953 }
11954 return IsCompatibility;
11955 }
11956
vectorizeSimpleInstructions(SmallVectorImpl<Instruction * > & Instructions,BasicBlock * BB,BoUpSLP & R,bool AtTerminator)11957 bool SLPVectorizerPass::vectorizeSimpleInstructions(
11958 SmallVectorImpl<Instruction *> &Instructions, BasicBlock *BB, BoUpSLP &R,
11959 bool AtTerminator) {
11960 bool OpsChanged = false;
11961 SmallVector<Instruction *, 4> PostponedCmps;
11962 for (auto *I : reverse(Instructions)) {
11963 if (R.isDeleted(I))
11964 continue;
11965 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) {
11966 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R);
11967 } else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) {
11968 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R);
11969 } else if (isa<CmpInst>(I)) {
11970 PostponedCmps.push_back(I);
11971 continue;
11972 }
11973 // Try to find reductions in buildvector sequnces.
11974 OpsChanged |= vectorizeRootInstruction(nullptr, I, BB, R, TTI);
11975 }
11976 if (AtTerminator) {
11977 // Try to find reductions first.
11978 for (Instruction *I : PostponedCmps) {
11979 if (R.isDeleted(I))
11980 continue;
11981 for (Value *Op : I->operands())
11982 OpsChanged |= vectorizeRootInstruction(nullptr, Op, BB, R, TTI);
11983 }
11984 // Try to vectorize operands as vector bundles.
11985 for (Instruction *I : PostponedCmps) {
11986 if (R.isDeleted(I))
11987 continue;
11988 OpsChanged |= tryToVectorize(I, R);
11989 }
11990 // Try to vectorize list of compares.
11991 // Sort by type, compare predicate, etc.
11992 auto &&CompareSorter = [&R](Value *V, Value *V2) {
11993 return compareCmp<false>(V, V2,
11994 [&R](Instruction *I) { return R.isDeleted(I); });
11995 };
11996
11997 auto &&AreCompatibleCompares = [&R](Value *V1, Value *V2) {
11998 if (V1 == V2)
11999 return true;
12000 return compareCmp<true>(V1, V2,
12001 [&R](Instruction *I) { return R.isDeleted(I); });
12002 };
12003 auto Limit = [&R](Value *V) {
12004 unsigned EltSize = R.getVectorElementSize(V);
12005 return std::max(2U, R.getMaxVecRegSize() / EltSize);
12006 };
12007
12008 SmallVector<Value *> Vals(PostponedCmps.begin(), PostponedCmps.end());
12009 OpsChanged |= tryToVectorizeSequence<Value>(
12010 Vals, Limit, CompareSorter, AreCompatibleCompares,
12011 [this, &R](ArrayRef<Value *> Candidates, bool LimitForRegisterSize) {
12012 // Exclude possible reductions from other blocks.
12013 bool ArePossiblyReducedInOtherBlock =
12014 any_of(Candidates, [](Value *V) {
12015 return any_of(V->users(), [V](User *U) {
12016 return isa<SelectInst>(U) &&
12017 cast<SelectInst>(U)->getParent() !=
12018 cast<Instruction>(V)->getParent();
12019 });
12020 });
12021 if (ArePossiblyReducedInOtherBlock)
12022 return false;
12023 return tryToVectorizeList(Candidates, R, LimitForRegisterSize);
12024 },
12025 /*LimitForRegisterSize=*/true);
12026 Instructions.clear();
12027 } else {
12028 // Insert in reverse order since the PostponedCmps vector was filled in
12029 // reverse order.
12030 Instructions.assign(PostponedCmps.rbegin(), PostponedCmps.rend());
12031 }
12032 return OpsChanged;
12033 }
12034
vectorizeChainsInBlock(BasicBlock * BB,BoUpSLP & R)12035 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
12036 bool Changed = false;
12037 SmallVector<Value *, 4> Incoming;
12038 SmallPtrSet<Value *, 16> VisitedInstrs;
12039 // Maps phi nodes to the non-phi nodes found in the use tree for each phi
12040 // node. Allows better to identify the chains that can be vectorized in the
12041 // better way.
12042 DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes;
12043 auto PHICompare = [this, &PHIToOpcodes](Value *V1, Value *V2) {
12044 assert(isValidElementType(V1->getType()) &&
12045 isValidElementType(V2->getType()) &&
12046 "Expected vectorizable types only.");
12047 // It is fine to compare type IDs here, since we expect only vectorizable
12048 // types, like ints, floats and pointers, we don't care about other type.
12049 if (V1->getType()->getTypeID() < V2->getType()->getTypeID())
12050 return true;
12051 if (V1->getType()->getTypeID() > V2->getType()->getTypeID())
12052 return false;
12053 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1];
12054 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2];
12055 if (Opcodes1.size() < Opcodes2.size())
12056 return true;
12057 if (Opcodes1.size() > Opcodes2.size())
12058 return false;
12059 Optional<bool> ConstOrder;
12060 for (int I = 0, E = Opcodes1.size(); I < E; ++I) {
12061 // Undefs are compatible with any other value.
12062 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) {
12063 if (!ConstOrder)
12064 ConstOrder =
12065 !isa<UndefValue>(Opcodes1[I]) && isa<UndefValue>(Opcodes2[I]);
12066 continue;
12067 }
12068 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I]))
12069 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) {
12070 DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent());
12071 DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent());
12072 if (!NodeI1)
12073 return NodeI2 != nullptr;
12074 if (!NodeI2)
12075 return false;
12076 assert((NodeI1 == NodeI2) ==
12077 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&
12078 "Different nodes should have different DFS numbers");
12079 if (NodeI1 != NodeI2)
12080 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn();
12081 InstructionsState S = getSameOpcode({I1, I2});
12082 if (S.getOpcode())
12083 continue;
12084 return I1->getOpcode() < I2->getOpcode();
12085 }
12086 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) {
12087 if (!ConstOrder)
12088 ConstOrder = Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID();
12089 continue;
12090 }
12091 if (Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID())
12092 return true;
12093 if (Opcodes1[I]->getValueID() > Opcodes2[I]->getValueID())
12094 return false;
12095 }
12096 return ConstOrder && *ConstOrder;
12097 };
12098 auto AreCompatiblePHIs = [&PHIToOpcodes](Value *V1, Value *V2) {
12099 if (V1 == V2)
12100 return true;
12101 if (V1->getType() != V2->getType())
12102 return false;
12103 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1];
12104 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2];
12105 if (Opcodes1.size() != Opcodes2.size())
12106 return false;
12107 for (int I = 0, E = Opcodes1.size(); I < E; ++I) {
12108 // Undefs are compatible with any other value.
12109 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I]))
12110 continue;
12111 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I]))
12112 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) {
12113 if (I1->getParent() != I2->getParent())
12114 return false;
12115 InstructionsState S = getSameOpcode({I1, I2});
12116 if (S.getOpcode())
12117 continue;
12118 return false;
12119 }
12120 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I]))
12121 continue;
12122 if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID())
12123 return false;
12124 }
12125 return true;
12126 };
12127 auto Limit = [&R](Value *V) {
12128 unsigned EltSize = R.getVectorElementSize(V);
12129 return std::max(2U, R.getMaxVecRegSize() / EltSize);
12130 };
12131
12132 bool HaveVectorizedPhiNodes = false;
12133 do {
12134 // Collect the incoming values from the PHIs.
12135 Incoming.clear();
12136 for (Instruction &I : *BB) {
12137 PHINode *P = dyn_cast<PHINode>(&I);
12138 if (!P)
12139 break;
12140
12141 // No need to analyze deleted, vectorized and non-vectorizable
12142 // instructions.
12143 if (!VisitedInstrs.count(P) && !R.isDeleted(P) &&
12144 isValidElementType(P->getType()))
12145 Incoming.push_back(P);
12146 }
12147
12148 // Find the corresponding non-phi nodes for better matching when trying to
12149 // build the tree.
12150 for (Value *V : Incoming) {
12151 SmallVectorImpl<Value *> &Opcodes =
12152 PHIToOpcodes.try_emplace(V).first->getSecond();
12153 if (!Opcodes.empty())
12154 continue;
12155 SmallVector<Value *, 4> Nodes(1, V);
12156 SmallPtrSet<Value *, 4> Visited;
12157 while (!Nodes.empty()) {
12158 auto *PHI = cast<PHINode>(Nodes.pop_back_val());
12159 if (!Visited.insert(PHI).second)
12160 continue;
12161 for (Value *V : PHI->incoming_values()) {
12162 if (auto *PHI1 = dyn_cast<PHINode>((V))) {
12163 Nodes.push_back(PHI1);
12164 continue;
12165 }
12166 Opcodes.emplace_back(V);
12167 }
12168 }
12169 }
12170
12171 HaveVectorizedPhiNodes = tryToVectorizeSequence<Value>(
12172 Incoming, Limit, PHICompare, AreCompatiblePHIs,
12173 [this, &R](ArrayRef<Value *> Candidates, bool LimitForRegisterSize) {
12174 return tryToVectorizeList(Candidates, R, LimitForRegisterSize);
12175 },
12176 /*LimitForRegisterSize=*/true);
12177 Changed |= HaveVectorizedPhiNodes;
12178 VisitedInstrs.insert(Incoming.begin(), Incoming.end());
12179 } while (HaveVectorizedPhiNodes);
12180
12181 VisitedInstrs.clear();
12182
12183 SmallVector<Instruction *, 8> PostProcessInstructions;
12184 SmallDenseSet<Instruction *, 4> KeyNodes;
12185 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
12186 // Skip instructions with scalable type. The num of elements is unknown at
12187 // compile-time for scalable type.
12188 if (isa<ScalableVectorType>(it->getType()))
12189 continue;
12190
12191 // Skip instructions marked for the deletion.
12192 if (R.isDeleted(&*it))
12193 continue;
12194 // We may go through BB multiple times so skip the one we have checked.
12195 if (!VisitedInstrs.insert(&*it).second) {
12196 if (it->use_empty() && KeyNodes.contains(&*it) &&
12197 vectorizeSimpleInstructions(PostProcessInstructions, BB, R,
12198 it->isTerminator())) {
12199 // We would like to start over since some instructions are deleted
12200 // and the iterator may become invalid value.
12201 Changed = true;
12202 it = BB->begin();
12203 e = BB->end();
12204 }
12205 continue;
12206 }
12207
12208 if (isa<DbgInfoIntrinsic>(it))
12209 continue;
12210
12211 // Try to vectorize reductions that use PHINodes.
12212 if (PHINode *P = dyn_cast<PHINode>(it)) {
12213 // Check that the PHI is a reduction PHI.
12214 if (P->getNumIncomingValues() == 2) {
12215 // Try to match and vectorize a horizontal reduction.
12216 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R,
12217 TTI)) {
12218 Changed = true;
12219 it = BB->begin();
12220 e = BB->end();
12221 continue;
12222 }
12223 }
12224 // Try to vectorize the incoming values of the PHI, to catch reductions
12225 // that feed into PHIs.
12226 for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) {
12227 // Skip if the incoming block is the current BB for now. Also, bypass
12228 // unreachable IR for efficiency and to avoid crashing.
12229 // TODO: Collect the skipped incoming values and try to vectorize them
12230 // after processing BB.
12231 if (BB == P->getIncomingBlock(I) ||
12232 !DT->isReachableFromEntry(P->getIncomingBlock(I)))
12233 continue;
12234
12235 Changed |= vectorizeRootInstruction(nullptr, P->getIncomingValue(I),
12236 P->getIncomingBlock(I), R, TTI);
12237 }
12238 continue;
12239 }
12240
12241 // Ran into an instruction without users, like terminator, or function call
12242 // with ignored return value, store. Ignore unused instructions (basing on
12243 // instruction type, except for CallInst and InvokeInst).
12244 if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) ||
12245 isa<InvokeInst>(it))) {
12246 KeyNodes.insert(&*it);
12247 bool OpsChanged = false;
12248 if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) {
12249 for (auto *V : it->operand_values()) {
12250 // Try to match and vectorize a horizontal reduction.
12251 OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI);
12252 }
12253 }
12254 // Start vectorization of post-process list of instructions from the
12255 // top-tree instructions to try to vectorize as many instructions as
12256 // possible.
12257 OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R,
12258 it->isTerminator());
12259 if (OpsChanged) {
12260 // We would like to start over since some instructions are deleted
12261 // and the iterator may become invalid value.
12262 Changed = true;
12263 it = BB->begin();
12264 e = BB->end();
12265 continue;
12266 }
12267 }
12268
12269 if (isa<InsertElementInst>(it) || isa<CmpInst>(it) ||
12270 isa<InsertValueInst>(it))
12271 PostProcessInstructions.push_back(&*it);
12272 }
12273
12274 return Changed;
12275 }
12276
vectorizeGEPIndices(BasicBlock * BB,BoUpSLP & R)12277 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) {
12278 auto Changed = false;
12279 for (auto &Entry : GEPs) {
12280 // If the getelementptr list has fewer than two elements, there's nothing
12281 // to do.
12282 if (Entry.second.size() < 2)
12283 continue;
12284
12285 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "
12286 << Entry.second.size() << ".\n");
12287
12288 // Process the GEP list in chunks suitable for the target's supported
12289 // vector size. If a vector register can't hold 1 element, we are done. We
12290 // are trying to vectorize the index computations, so the maximum number of
12291 // elements is based on the size of the index expression, rather than the
12292 // size of the GEP itself (the target's pointer size).
12293 unsigned MaxVecRegSize = R.getMaxVecRegSize();
12294 unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin());
12295 if (MaxVecRegSize < EltSize)
12296 continue;
12297
12298 unsigned MaxElts = MaxVecRegSize / EltSize;
12299 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) {
12300 auto Len = std::min<unsigned>(BE - BI, MaxElts);
12301 ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len);
12302
12303 // Initialize a set a candidate getelementptrs. Note that we use a
12304 // SetVector here to preserve program order. If the index computations
12305 // are vectorizable and begin with loads, we want to minimize the chance
12306 // of having to reorder them later.
12307 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end());
12308
12309 // Some of the candidates may have already been vectorized after we
12310 // initially collected them. If so, they are marked as deleted, so remove
12311 // them from the set of candidates.
12312 Candidates.remove_if(
12313 [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); });
12314
12315 // Remove from the set of candidates all pairs of getelementptrs with
12316 // constant differences. Such getelementptrs are likely not good
12317 // candidates for vectorization in a bottom-up phase since one can be
12318 // computed from the other. We also ensure all candidate getelementptr
12319 // indices are unique.
12320 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) {
12321 auto *GEPI = GEPList[I];
12322 if (!Candidates.count(GEPI))
12323 continue;
12324 auto *SCEVI = SE->getSCEV(GEPList[I]);
12325 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) {
12326 auto *GEPJ = GEPList[J];
12327 auto *SCEVJ = SE->getSCEV(GEPList[J]);
12328 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) {
12329 Candidates.remove(GEPI);
12330 Candidates.remove(GEPJ);
12331 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) {
12332 Candidates.remove(GEPJ);
12333 }
12334 }
12335 }
12336
12337 // We break out of the above computation as soon as we know there are
12338 // fewer than two candidates remaining.
12339 if (Candidates.size() < 2)
12340 continue;
12341
12342 // Add the single, non-constant index of each candidate to the bundle. We
12343 // ensured the indices met these constraints when we originally collected
12344 // the getelementptrs.
12345 SmallVector<Value *, 16> Bundle(Candidates.size());
12346 auto BundleIndex = 0u;
12347 for (auto *V : Candidates) {
12348 auto *GEP = cast<GetElementPtrInst>(V);
12349 auto *GEPIdx = GEP->idx_begin()->get();
12350 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx));
12351 Bundle[BundleIndex++] = GEPIdx;
12352 }
12353
12354 // Try and vectorize the indices. We are currently only interested in
12355 // gather-like cases of the form:
12356 //
12357 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ...
12358 //
12359 // where the loads of "a", the loads of "b", and the subtractions can be
12360 // performed in parallel. It's likely that detecting this pattern in a
12361 // bottom-up phase will be simpler and less costly than building a
12362 // full-blown top-down phase beginning at the consecutive loads.
12363 Changed |= tryToVectorizeList(Bundle, R);
12364 }
12365 }
12366 return Changed;
12367 }
12368
vectorizeStoreChains(BoUpSLP & R)12369 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) {
12370 bool Changed = false;
12371 // Sort by type, base pointers and values operand. Value operands must be
12372 // compatible (have the same opcode, same parent), otherwise it is
12373 // definitely not profitable to try to vectorize them.
12374 auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) {
12375 if (V->getPointerOperandType()->getTypeID() <
12376 V2->getPointerOperandType()->getTypeID())
12377 return true;
12378 if (V->getPointerOperandType()->getTypeID() >
12379 V2->getPointerOperandType()->getTypeID())
12380 return false;
12381 // UndefValues are compatible with all other values.
12382 if (isa<UndefValue>(V->getValueOperand()) ||
12383 isa<UndefValue>(V2->getValueOperand()))
12384 return false;
12385 if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand()))
12386 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) {
12387 DomTreeNodeBase<llvm::BasicBlock> *NodeI1 =
12388 DT->getNode(I1->getParent());
12389 DomTreeNodeBase<llvm::BasicBlock> *NodeI2 =
12390 DT->getNode(I2->getParent());
12391 assert(NodeI1 && "Should only process reachable instructions");
12392 assert(NodeI2 && "Should only process reachable instructions");
12393 assert((NodeI1 == NodeI2) ==
12394 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&
12395 "Different nodes should have different DFS numbers");
12396 if (NodeI1 != NodeI2)
12397 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn();
12398 InstructionsState S = getSameOpcode({I1, I2});
12399 if (S.getOpcode())
12400 return false;
12401 return I1->getOpcode() < I2->getOpcode();
12402 }
12403 if (isa<Constant>(V->getValueOperand()) &&
12404 isa<Constant>(V2->getValueOperand()))
12405 return false;
12406 return V->getValueOperand()->getValueID() <
12407 V2->getValueOperand()->getValueID();
12408 };
12409
12410 auto &&AreCompatibleStores = [](StoreInst *V1, StoreInst *V2) {
12411 if (V1 == V2)
12412 return true;
12413 if (V1->getPointerOperandType() != V2->getPointerOperandType())
12414 return false;
12415 // Undefs are compatible with any other value.
12416 if (isa<UndefValue>(V1->getValueOperand()) ||
12417 isa<UndefValue>(V2->getValueOperand()))
12418 return true;
12419 if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand()))
12420 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) {
12421 if (I1->getParent() != I2->getParent())
12422 return false;
12423 InstructionsState S = getSameOpcode({I1, I2});
12424 return S.getOpcode() > 0;
12425 }
12426 if (isa<Constant>(V1->getValueOperand()) &&
12427 isa<Constant>(V2->getValueOperand()))
12428 return true;
12429 return V1->getValueOperand()->getValueID() ==
12430 V2->getValueOperand()->getValueID();
12431 };
12432 auto Limit = [&R, this](StoreInst *SI) {
12433 unsigned EltSize = DL->getTypeSizeInBits(SI->getValueOperand()->getType());
12434 return R.getMinVF(EltSize);
12435 };
12436
12437 // Attempt to sort and vectorize each of the store-groups.
12438 for (auto &Pair : Stores) {
12439 if (Pair.second.size() < 2)
12440 continue;
12441
12442 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
12443 << Pair.second.size() << ".\n");
12444
12445 if (!isValidElementType(Pair.second.front()->getValueOperand()->getType()))
12446 continue;
12447
12448 Changed |= tryToVectorizeSequence<StoreInst>(
12449 Pair.second, Limit, StoreSorter, AreCompatibleStores,
12450 [this, &R](ArrayRef<StoreInst *> Candidates, bool) {
12451 return vectorizeStores(Candidates, R);
12452 },
12453 /*LimitForRegisterSize=*/false);
12454 }
12455 return Changed;
12456 }
12457
12458 char SLPVectorizer::ID = 0;
12459
12460 static const char lv_name[] = "SLP Vectorizer";
12461
INITIALIZE_PASS_BEGIN(SLPVectorizer,SV_NAME,lv_name,false,false)12462 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false)
12463 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
12464 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
12465 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
12466 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
12467 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
12468 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
12469 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
12470 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
12471 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false)
12472
12473 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); }
12474