1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 20 #include "llvm/ADT/DenseMap.h" 21 #include "llvm/ADT/DenseSet.h" 22 #include "llvm/ADT/Optional.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/PriorityQueue.h" 25 #include "llvm/ADT/STLExtras.h" 26 #include "llvm/ADT/SetOperations.h" 27 #include "llvm/ADT/SetVector.h" 28 #include "llvm/ADT/SmallBitVector.h" 29 #include "llvm/ADT/SmallPtrSet.h" 30 #include "llvm/ADT/SmallSet.h" 31 #include "llvm/ADT/SmallString.h" 32 #include "llvm/ADT/Statistic.h" 33 #include "llvm/ADT/iterator.h" 34 #include "llvm/ADT/iterator_range.h" 35 #include "llvm/Analysis/AliasAnalysis.h" 36 #include "llvm/Analysis/AssumptionCache.h" 37 #include "llvm/Analysis/CodeMetrics.h" 38 #include "llvm/Analysis/DemandedBits.h" 39 #include "llvm/Analysis/GlobalsModRef.h" 40 #include "llvm/Analysis/IVDescriptors.h" 41 #include "llvm/Analysis/LoopAccessAnalysis.h" 42 #include "llvm/Analysis/LoopInfo.h" 43 #include "llvm/Analysis/MemoryLocation.h" 44 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 45 #include "llvm/Analysis/ScalarEvolution.h" 46 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 47 #include "llvm/Analysis/TargetLibraryInfo.h" 48 #include "llvm/Analysis/TargetTransformInfo.h" 49 #include "llvm/Analysis/ValueTracking.h" 50 #include "llvm/Analysis/VectorUtils.h" 51 #include "llvm/IR/Attributes.h" 52 #include "llvm/IR/BasicBlock.h" 53 #include "llvm/IR/Constant.h" 54 #include "llvm/IR/Constants.h" 55 #include "llvm/IR/DataLayout.h" 56 #include "llvm/IR/DerivedTypes.h" 57 #include "llvm/IR/Dominators.h" 58 #include "llvm/IR/Function.h" 59 #include "llvm/IR/IRBuilder.h" 60 #include "llvm/IR/InstrTypes.h" 61 #include "llvm/IR/Instruction.h" 62 #include "llvm/IR/Instructions.h" 63 #include "llvm/IR/IntrinsicInst.h" 64 #include "llvm/IR/Intrinsics.h" 65 #include "llvm/IR/Module.h" 66 #include "llvm/IR/Operator.h" 67 #include "llvm/IR/PatternMatch.h" 68 #include "llvm/IR/Type.h" 69 #include "llvm/IR/Use.h" 70 #include "llvm/IR/User.h" 71 #include "llvm/IR/Value.h" 72 #include "llvm/IR/ValueHandle.h" 73 #ifdef EXPENSIVE_CHECKS 74 #include "llvm/IR/Verifier.h" 75 #endif 76 #include "llvm/Pass.h" 77 #include "llvm/Support/Casting.h" 78 #include "llvm/Support/CommandLine.h" 79 #include "llvm/Support/Compiler.h" 80 #include "llvm/Support/DOTGraphTraits.h" 81 #include "llvm/Support/Debug.h" 82 #include "llvm/Support/ErrorHandling.h" 83 #include "llvm/Support/GraphWriter.h" 84 #include "llvm/Support/InstructionCost.h" 85 #include "llvm/Support/KnownBits.h" 86 #include "llvm/Support/MathExtras.h" 87 #include "llvm/Support/raw_ostream.h" 88 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 89 #include "llvm/Transforms/Utils/Local.h" 90 #include "llvm/Transforms/Utils/LoopUtils.h" 91 #include "llvm/Transforms/Vectorize.h" 92 #include <algorithm> 93 #include <cassert> 94 #include <cstdint> 95 #include <iterator> 96 #include <memory> 97 #include <set> 98 #include <string> 99 #include <tuple> 100 #include <utility> 101 #include <vector> 102 103 using namespace llvm; 104 using namespace llvm::PatternMatch; 105 using namespace slpvectorizer; 106 107 #define SV_NAME "slp-vectorizer" 108 #define DEBUG_TYPE "SLP" 109 110 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 111 112 cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden, 113 cl::desc("Run the SLP vectorization passes")); 114 115 static cl::opt<int> 116 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 117 cl::desc("Only vectorize if you gain more than this " 118 "number ")); 119 120 static cl::opt<bool> 121 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 122 cl::desc("Attempt to vectorize horizontal reductions")); 123 124 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 125 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 126 cl::desc( 127 "Attempt to vectorize horizontal reductions feeding into a store")); 128 129 static cl::opt<int> 130 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 131 cl::desc("Attempt to vectorize for this register size in bits")); 132 133 static cl::opt<unsigned> 134 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden, 135 cl::desc("Maximum SLP vectorization factor (0=unlimited)")); 136 137 static cl::opt<int> 138 MaxStoreLookup("slp-max-store-lookup", cl::init(32), cl::Hidden, 139 cl::desc("Maximum depth of the lookup for consecutive stores.")); 140 141 /// Limits the size of scheduling regions in a block. 142 /// It avoid long compile times for _very_ large blocks where vector 143 /// instructions are spread over a wide range. 144 /// This limit is way higher than needed by real-world functions. 145 static cl::opt<int> 146 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 147 cl::desc("Limit the size of the SLP scheduling region per block")); 148 149 static cl::opt<int> MinVectorRegSizeOption( 150 "slp-min-reg-size", cl::init(128), cl::Hidden, 151 cl::desc("Attempt to vectorize for this register size in bits")); 152 153 static cl::opt<unsigned> RecursionMaxDepth( 154 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 155 cl::desc("Limit the recursion depth when building a vectorizable tree")); 156 157 static cl::opt<unsigned> MinTreeSize( 158 "slp-min-tree-size", cl::init(3), cl::Hidden, 159 cl::desc("Only vectorize small trees if they are fully vectorizable")); 160 161 // The maximum depth that the look-ahead score heuristic will explore. 162 // The higher this value, the higher the compilation time overhead. 163 static cl::opt<int> LookAheadMaxDepth( 164 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden, 165 cl::desc("The maximum look-ahead depth for operand reordering scores")); 166 167 // The maximum depth that the look-ahead score heuristic will explore 168 // when it probing among candidates for vectorization tree roots. 169 // The higher this value, the higher the compilation time overhead but unlike 170 // similar limit for operands ordering this is less frequently used, hence 171 // impact of higher value is less noticeable. 172 static cl::opt<int> RootLookAheadMaxDepth( 173 "slp-max-root-look-ahead-depth", cl::init(2), cl::Hidden, 174 cl::desc("The maximum look-ahead depth for searching best rooting option")); 175 176 static cl::opt<bool> 177 ViewSLPTree("view-slp-tree", cl::Hidden, 178 cl::desc("Display the SLP trees with Graphviz")); 179 180 // Limit the number of alias checks. The limit is chosen so that 181 // it has no negative effect on the llvm benchmarks. 182 static const unsigned AliasedCheckLimit = 10; 183 184 // Another limit for the alias checks: The maximum distance between load/store 185 // instructions where alias checks are done. 186 // This limit is useful for very large basic blocks. 187 static const unsigned MaxMemDepDistance = 160; 188 189 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 190 /// regions to be handled. 191 static const int MinScheduleRegionSize = 16; 192 193 /// Predicate for the element types that the SLP vectorizer supports. 194 /// 195 /// The most important thing to filter here are types which are invalid in LLVM 196 /// vectors. We also filter target specific types which have absolutely no 197 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 198 /// avoids spending time checking the cost model and realizing that they will 199 /// be inevitably scalarized. 200 static bool isValidElementType(Type *Ty) { 201 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 202 !Ty->isPPC_FP128Ty(); 203 } 204 205 /// \returns True if the value is a constant (but not globals/constant 206 /// expressions). 207 static bool isConstant(Value *V) { 208 return isa<Constant>(V) && !isa<ConstantExpr>(V) && !isa<GlobalValue>(V); 209 } 210 211 /// Checks if \p V is one of vector-like instructions, i.e. undef, 212 /// insertelement/extractelement with constant indices for fixed vector type or 213 /// extractvalue instruction. 214 static bool isVectorLikeInstWithConstOps(Value *V) { 215 if (!isa<InsertElementInst, ExtractElementInst>(V) && 216 !isa<ExtractValueInst, UndefValue>(V)) 217 return false; 218 auto *I = dyn_cast<Instruction>(V); 219 if (!I || isa<ExtractValueInst>(I)) 220 return true; 221 if (!isa<FixedVectorType>(I->getOperand(0)->getType())) 222 return false; 223 if (isa<ExtractElementInst>(I)) 224 return isConstant(I->getOperand(1)); 225 assert(isa<InsertElementInst>(V) && "Expected only insertelement."); 226 return isConstant(I->getOperand(2)); 227 } 228 229 /// \returns true if all of the instructions in \p VL are in the same block or 230 /// false otherwise. 231 static bool allSameBlock(ArrayRef<Value *> VL) { 232 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 233 if (!I0) 234 return false; 235 if (all_of(VL, isVectorLikeInstWithConstOps)) 236 return true; 237 238 BasicBlock *BB = I0->getParent(); 239 for (int I = 1, E = VL.size(); I < E; I++) { 240 auto *II = dyn_cast<Instruction>(VL[I]); 241 if (!II) 242 return false; 243 244 if (BB != II->getParent()) 245 return false; 246 } 247 return true; 248 } 249 250 /// \returns True if all of the values in \p VL are constants (but not 251 /// globals/constant expressions). 252 static bool allConstant(ArrayRef<Value *> VL) { 253 // Constant expressions and globals can't be vectorized like normal integer/FP 254 // constants. 255 return all_of(VL, isConstant); 256 } 257 258 /// \returns True if all of the values in \p VL are identical or some of them 259 /// are UndefValue. 260 static bool isSplat(ArrayRef<Value *> VL) { 261 Value *FirstNonUndef = nullptr; 262 for (Value *V : VL) { 263 if (isa<UndefValue>(V)) 264 continue; 265 if (!FirstNonUndef) { 266 FirstNonUndef = V; 267 continue; 268 } 269 if (V != FirstNonUndef) 270 return false; 271 } 272 return FirstNonUndef != nullptr; 273 } 274 275 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator. 276 static bool isCommutative(Instruction *I) { 277 if (auto *Cmp = dyn_cast<CmpInst>(I)) 278 return Cmp->isCommutative(); 279 if (auto *BO = dyn_cast<BinaryOperator>(I)) 280 return BO->isCommutative(); 281 // TODO: This should check for generic Instruction::isCommutative(), but 282 // we need to confirm that the caller code correctly handles Intrinsics 283 // for example (does not have 2 operands). 284 return false; 285 } 286 287 /// Checks if the given value is actually an undefined constant vector. 288 static bool isUndefVector(const Value *V) { 289 if (isa<UndefValue>(V)) 290 return true; 291 auto *C = dyn_cast<Constant>(V); 292 if (!C) 293 return false; 294 if (!C->containsUndefOrPoisonElement()) 295 return false; 296 auto *VecTy = dyn_cast<FixedVectorType>(C->getType()); 297 if (!VecTy) 298 return false; 299 for (unsigned I = 0, E = VecTy->getNumElements(); I != E; ++I) { 300 if (Constant *Elem = C->getAggregateElement(I)) 301 if (!isa<UndefValue>(Elem)) 302 return false; 303 } 304 return true; 305 } 306 307 /// Checks if the vector of instructions can be represented as a shuffle, like: 308 /// %x0 = extractelement <4 x i8> %x, i32 0 309 /// %x3 = extractelement <4 x i8> %x, i32 3 310 /// %y1 = extractelement <4 x i8> %y, i32 1 311 /// %y2 = extractelement <4 x i8> %y, i32 2 312 /// %x0x0 = mul i8 %x0, %x0 313 /// %x3x3 = mul i8 %x3, %x3 314 /// %y1y1 = mul i8 %y1, %y1 315 /// %y2y2 = mul i8 %y2, %y2 316 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0 317 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 318 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 319 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 320 /// ret <4 x i8> %ins4 321 /// can be transformed into: 322 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 323 /// i32 6> 324 /// %2 = mul <4 x i8> %1, %1 325 /// ret <4 x i8> %2 326 /// We convert this initially to something like: 327 /// %x0 = extractelement <4 x i8> %x, i32 0 328 /// %x3 = extractelement <4 x i8> %x, i32 3 329 /// %y1 = extractelement <4 x i8> %y, i32 1 330 /// %y2 = extractelement <4 x i8> %y, i32 2 331 /// %1 = insertelement <4 x i8> poison, i8 %x0, i32 0 332 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1 333 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2 334 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3 335 /// %5 = mul <4 x i8> %4, %4 336 /// %6 = extractelement <4 x i8> %5, i32 0 337 /// %ins1 = insertelement <4 x i8> poison, i8 %6, i32 0 338 /// %7 = extractelement <4 x i8> %5, i32 1 339 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1 340 /// %8 = extractelement <4 x i8> %5, i32 2 341 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2 342 /// %9 = extractelement <4 x i8> %5, i32 3 343 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3 344 /// ret <4 x i8> %ins4 345 /// InstCombiner transforms this into a shuffle and vector mul 346 /// Mask will return the Shuffle Mask equivalent to the extracted elements. 347 /// TODO: Can we split off and reuse the shuffle mask detection from 348 /// TargetTransformInfo::getInstructionThroughput? 349 static Optional<TargetTransformInfo::ShuffleKind> 350 isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) { 351 const auto *It = 352 find_if(VL, [](Value *V) { return isa<ExtractElementInst>(V); }); 353 if (It == VL.end()) 354 return None; 355 auto *EI0 = cast<ExtractElementInst>(*It); 356 if (isa<ScalableVectorType>(EI0->getVectorOperandType())) 357 return None; 358 unsigned Size = 359 cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements(); 360 Value *Vec1 = nullptr; 361 Value *Vec2 = nullptr; 362 enum ShuffleMode { Unknown, Select, Permute }; 363 ShuffleMode CommonShuffleMode = Unknown; 364 Mask.assign(VL.size(), UndefMaskElem); 365 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 366 // Undef can be represented as an undef element in a vector. 367 if (isa<UndefValue>(VL[I])) 368 continue; 369 auto *EI = cast<ExtractElementInst>(VL[I]); 370 if (isa<ScalableVectorType>(EI->getVectorOperandType())) 371 return None; 372 auto *Vec = EI->getVectorOperand(); 373 // We can extractelement from undef or poison vector. 374 if (isUndefVector(Vec)) 375 continue; 376 // All vector operands must have the same number of vector elements. 377 if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size) 378 return None; 379 if (isa<UndefValue>(EI->getIndexOperand())) 380 continue; 381 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 382 if (!Idx) 383 return None; 384 // Undefined behavior if Idx is negative or >= Size. 385 if (Idx->getValue().uge(Size)) 386 continue; 387 unsigned IntIdx = Idx->getValue().getZExtValue(); 388 Mask[I] = IntIdx; 389 // For correct shuffling we have to have at most 2 different vector operands 390 // in all extractelement instructions. 391 if (!Vec1 || Vec1 == Vec) { 392 Vec1 = Vec; 393 } else if (!Vec2 || Vec2 == Vec) { 394 Vec2 = Vec; 395 Mask[I] += Size; 396 } else { 397 return None; 398 } 399 if (CommonShuffleMode == Permute) 400 continue; 401 // If the extract index is not the same as the operation number, it is a 402 // permutation. 403 if (IntIdx != I) { 404 CommonShuffleMode = Permute; 405 continue; 406 } 407 CommonShuffleMode = Select; 408 } 409 // If we're not crossing lanes in different vectors, consider it as blending. 410 if (CommonShuffleMode == Select && Vec2) 411 return TargetTransformInfo::SK_Select; 412 // If Vec2 was never used, we have a permutation of a single vector, otherwise 413 // we have permutation of 2 vectors. 414 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 415 : TargetTransformInfo::SK_PermuteSingleSrc; 416 } 417 418 namespace { 419 420 /// Main data required for vectorization of instructions. 421 struct InstructionsState { 422 /// The very first instruction in the list with the main opcode. 423 Value *OpValue = nullptr; 424 425 /// The main/alternate instruction. 426 Instruction *MainOp = nullptr; 427 Instruction *AltOp = nullptr; 428 429 /// The main/alternate opcodes for the list of instructions. 430 unsigned getOpcode() const { 431 return MainOp ? MainOp->getOpcode() : 0; 432 } 433 434 unsigned getAltOpcode() const { 435 return AltOp ? AltOp->getOpcode() : 0; 436 } 437 438 /// Some of the instructions in the list have alternate opcodes. 439 bool isAltShuffle() const { return AltOp != MainOp; } 440 441 bool isOpcodeOrAlt(Instruction *I) const { 442 unsigned CheckedOpcode = I->getOpcode(); 443 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode; 444 } 445 446 InstructionsState() = delete; 447 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp) 448 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {} 449 }; 450 451 } // end anonymous namespace 452 453 /// Chooses the correct key for scheduling data. If \p Op has the same (or 454 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 455 /// OpValue. 456 static Value *isOneOf(const InstructionsState &S, Value *Op) { 457 auto *I = dyn_cast<Instruction>(Op); 458 if (I && S.isOpcodeOrAlt(I)) 459 return Op; 460 return S.OpValue; 461 } 462 463 /// \returns true if \p Opcode is allowed as part of of the main/alternate 464 /// instruction for SLP vectorization. 465 /// 466 /// Example of unsupported opcode is SDIV that can potentially cause UB if the 467 /// "shuffled out" lane would result in division by zero. 468 static bool isValidForAlternation(unsigned Opcode) { 469 if (Instruction::isIntDivRem(Opcode)) 470 return false; 471 472 return true; 473 } 474 475 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 476 unsigned BaseIndex = 0); 477 478 /// Checks if the provided operands of 2 cmp instructions are compatible, i.e. 479 /// compatible instructions or constants, or just some other regular values. 480 static bool areCompatibleCmpOps(Value *BaseOp0, Value *BaseOp1, Value *Op0, 481 Value *Op1) { 482 return (isConstant(BaseOp0) && isConstant(Op0)) || 483 (isConstant(BaseOp1) && isConstant(Op1)) || 484 (!isa<Instruction>(BaseOp0) && !isa<Instruction>(Op0) && 485 !isa<Instruction>(BaseOp1) && !isa<Instruction>(Op1)) || 486 getSameOpcode({BaseOp0, Op0}).getOpcode() || 487 getSameOpcode({BaseOp1, Op1}).getOpcode(); 488 } 489 490 /// \returns analysis of the Instructions in \p VL described in 491 /// InstructionsState, the Opcode that we suppose the whole list 492 /// could be vectorized even if its structure is diverse. 493 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 494 unsigned BaseIndex) { 495 // Make sure these are all Instructions. 496 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); })) 497 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 498 499 bool IsCastOp = isa<CastInst>(VL[BaseIndex]); 500 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]); 501 bool IsCmpOp = isa<CmpInst>(VL[BaseIndex]); 502 CmpInst::Predicate BasePred = 503 IsCmpOp ? cast<CmpInst>(VL[BaseIndex])->getPredicate() 504 : CmpInst::BAD_ICMP_PREDICATE; 505 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode(); 506 unsigned AltOpcode = Opcode; 507 unsigned AltIndex = BaseIndex; 508 509 // Check for one alternate opcode from another BinaryOperator. 510 // TODO - generalize to support all operators (types, calls etc.). 511 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { 512 unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode(); 513 if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) { 514 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 515 continue; 516 if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) && 517 isValidForAlternation(Opcode)) { 518 AltOpcode = InstOpcode; 519 AltIndex = Cnt; 520 continue; 521 } 522 } else if (IsCastOp && isa<CastInst>(VL[Cnt])) { 523 Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType(); 524 Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType(); 525 if (Ty0 == Ty1) { 526 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 527 continue; 528 if (Opcode == AltOpcode) { 529 assert(isValidForAlternation(Opcode) && 530 isValidForAlternation(InstOpcode) && 531 "Cast isn't safe for alternation, logic needs to be updated!"); 532 AltOpcode = InstOpcode; 533 AltIndex = Cnt; 534 continue; 535 } 536 } 537 } else if (IsCmpOp && isa<CmpInst>(VL[Cnt])) { 538 auto *BaseInst = cast<Instruction>(VL[BaseIndex]); 539 auto *Inst = cast<Instruction>(VL[Cnt]); 540 Type *Ty0 = BaseInst->getOperand(0)->getType(); 541 Type *Ty1 = Inst->getOperand(0)->getType(); 542 if (Ty0 == Ty1) { 543 Value *BaseOp0 = BaseInst->getOperand(0); 544 Value *BaseOp1 = BaseInst->getOperand(1); 545 Value *Op0 = Inst->getOperand(0); 546 Value *Op1 = Inst->getOperand(1); 547 CmpInst::Predicate CurrentPred = 548 cast<CmpInst>(VL[Cnt])->getPredicate(); 549 CmpInst::Predicate SwappedCurrentPred = 550 CmpInst::getSwappedPredicate(CurrentPred); 551 // Check for compatible operands. If the corresponding operands are not 552 // compatible - need to perform alternate vectorization. 553 if (InstOpcode == Opcode) { 554 if (BasePred == CurrentPred && 555 areCompatibleCmpOps(BaseOp0, BaseOp1, Op0, Op1)) 556 continue; 557 if (BasePred == SwappedCurrentPred && 558 areCompatibleCmpOps(BaseOp0, BaseOp1, Op1, Op0)) 559 continue; 560 if (E == 2 && 561 (BasePred == CurrentPred || BasePred == SwappedCurrentPred)) 562 continue; 563 auto *AltInst = cast<CmpInst>(VL[AltIndex]); 564 CmpInst::Predicate AltPred = AltInst->getPredicate(); 565 Value *AltOp0 = AltInst->getOperand(0); 566 Value *AltOp1 = AltInst->getOperand(1); 567 // Check if operands are compatible with alternate operands. 568 if (AltPred == CurrentPred && 569 areCompatibleCmpOps(AltOp0, AltOp1, Op0, Op1)) 570 continue; 571 if (AltPred == SwappedCurrentPred && 572 areCompatibleCmpOps(AltOp0, AltOp1, Op1, Op0)) 573 continue; 574 } 575 if (BaseIndex == AltIndex && BasePred != CurrentPred) { 576 assert(isValidForAlternation(Opcode) && 577 isValidForAlternation(InstOpcode) && 578 "Cast isn't safe for alternation, logic needs to be updated!"); 579 AltIndex = Cnt; 580 continue; 581 } 582 auto *AltInst = cast<CmpInst>(VL[AltIndex]); 583 CmpInst::Predicate AltPred = AltInst->getPredicate(); 584 if (BasePred == CurrentPred || BasePred == SwappedCurrentPred || 585 AltPred == CurrentPred || AltPred == SwappedCurrentPred) 586 continue; 587 } 588 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode) 589 continue; 590 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 591 } 592 593 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]), 594 cast<Instruction>(VL[AltIndex])); 595 } 596 597 /// \returns true if all of the values in \p VL have the same type or false 598 /// otherwise. 599 static bool allSameType(ArrayRef<Value *> VL) { 600 Type *Ty = VL[0]->getType(); 601 for (int i = 1, e = VL.size(); i < e; i++) 602 if (VL[i]->getType() != Ty) 603 return false; 604 605 return true; 606 } 607 608 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 609 static Optional<unsigned> getExtractIndex(Instruction *E) { 610 unsigned Opcode = E->getOpcode(); 611 assert((Opcode == Instruction::ExtractElement || 612 Opcode == Instruction::ExtractValue) && 613 "Expected extractelement or extractvalue instruction."); 614 if (Opcode == Instruction::ExtractElement) { 615 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 616 if (!CI) 617 return None; 618 return CI->getZExtValue(); 619 } 620 ExtractValueInst *EI = cast<ExtractValueInst>(E); 621 if (EI->getNumIndices() != 1) 622 return None; 623 return *EI->idx_begin(); 624 } 625 626 /// \returns True if in-tree use also needs extract. This refers to 627 /// possible scalar operand in vectorized instruction. 628 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 629 TargetLibraryInfo *TLI) { 630 unsigned Opcode = UserInst->getOpcode(); 631 switch (Opcode) { 632 case Instruction::Load: { 633 LoadInst *LI = cast<LoadInst>(UserInst); 634 return (LI->getPointerOperand() == Scalar); 635 } 636 case Instruction::Store: { 637 StoreInst *SI = cast<StoreInst>(UserInst); 638 return (SI->getPointerOperand() == Scalar); 639 } 640 case Instruction::Call: { 641 CallInst *CI = cast<CallInst>(UserInst); 642 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 643 for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) { 644 if (isVectorIntrinsicWithScalarOpAtArg(ID, i)) 645 return (CI->getArgOperand(i) == Scalar); 646 } 647 LLVM_FALLTHROUGH; 648 } 649 default: 650 return false; 651 } 652 } 653 654 /// \returns the AA location that is being access by the instruction. 655 static MemoryLocation getLocation(Instruction *I) { 656 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 657 return MemoryLocation::get(SI); 658 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 659 return MemoryLocation::get(LI); 660 return MemoryLocation(); 661 } 662 663 /// \returns True if the instruction is not a volatile or atomic load/store. 664 static bool isSimple(Instruction *I) { 665 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 666 return LI->isSimple(); 667 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 668 return SI->isSimple(); 669 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 670 return !MI->isVolatile(); 671 return true; 672 } 673 674 /// Shuffles \p Mask in accordance with the given \p SubMask. 675 static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask) { 676 if (SubMask.empty()) 677 return; 678 if (Mask.empty()) { 679 Mask.append(SubMask.begin(), SubMask.end()); 680 return; 681 } 682 SmallVector<int> NewMask(SubMask.size(), UndefMaskElem); 683 int TermValue = std::min(Mask.size(), SubMask.size()); 684 for (int I = 0, E = SubMask.size(); I < E; ++I) { 685 if (SubMask[I] >= TermValue || SubMask[I] == UndefMaskElem || 686 Mask[SubMask[I]] >= TermValue) 687 continue; 688 NewMask[I] = Mask[SubMask[I]]; 689 } 690 Mask.swap(NewMask); 691 } 692 693 /// Order may have elements assigned special value (size) which is out of 694 /// bounds. Such indices only appear on places which correspond to undef values 695 /// (see canReuseExtract for details) and used in order to avoid undef values 696 /// have effect on operands ordering. 697 /// The first loop below simply finds all unused indices and then the next loop 698 /// nest assigns these indices for undef values positions. 699 /// As an example below Order has two undef positions and they have assigned 700 /// values 3 and 7 respectively: 701 /// before: 6 9 5 4 9 2 1 0 702 /// after: 6 3 5 4 7 2 1 0 703 static void fixupOrderingIndices(SmallVectorImpl<unsigned> &Order) { 704 const unsigned Sz = Order.size(); 705 SmallBitVector UnusedIndices(Sz, /*t=*/true); 706 SmallBitVector MaskedIndices(Sz); 707 for (unsigned I = 0; I < Sz; ++I) { 708 if (Order[I] < Sz) 709 UnusedIndices.reset(Order[I]); 710 else 711 MaskedIndices.set(I); 712 } 713 if (MaskedIndices.none()) 714 return; 715 assert(UnusedIndices.count() == MaskedIndices.count() && 716 "Non-synced masked/available indices."); 717 int Idx = UnusedIndices.find_first(); 718 int MIdx = MaskedIndices.find_first(); 719 while (MIdx >= 0) { 720 assert(Idx >= 0 && "Indices must be synced."); 721 Order[MIdx] = Idx; 722 Idx = UnusedIndices.find_next(Idx); 723 MIdx = MaskedIndices.find_next(MIdx); 724 } 725 } 726 727 namespace llvm { 728 729 static void inversePermutation(ArrayRef<unsigned> Indices, 730 SmallVectorImpl<int> &Mask) { 731 Mask.clear(); 732 const unsigned E = Indices.size(); 733 Mask.resize(E, UndefMaskElem); 734 for (unsigned I = 0; I < E; ++I) 735 Mask[Indices[I]] = I; 736 } 737 738 /// \returns inserting index of InsertElement or InsertValue instruction, 739 /// using Offset as base offset for index. 740 static Optional<unsigned> getInsertIndex(const Value *InsertInst, 741 unsigned Offset = 0) { 742 int Index = Offset; 743 if (const auto *IE = dyn_cast<InsertElementInst>(InsertInst)) { 744 if (const auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2))) { 745 auto *VT = cast<FixedVectorType>(IE->getType()); 746 if (CI->getValue().uge(VT->getNumElements())) 747 return None; 748 Index *= VT->getNumElements(); 749 Index += CI->getZExtValue(); 750 return Index; 751 } 752 return None; 753 } 754 755 const auto *IV = cast<InsertValueInst>(InsertInst); 756 Type *CurrentType = IV->getType(); 757 for (unsigned I : IV->indices()) { 758 if (const auto *ST = dyn_cast<StructType>(CurrentType)) { 759 Index *= ST->getNumElements(); 760 CurrentType = ST->getElementType(I); 761 } else if (const auto *AT = dyn_cast<ArrayType>(CurrentType)) { 762 Index *= AT->getNumElements(); 763 CurrentType = AT->getElementType(); 764 } else { 765 return None; 766 } 767 Index += I; 768 } 769 return Index; 770 } 771 772 /// Reorders the list of scalars in accordance with the given \p Mask. 773 static void reorderScalars(SmallVectorImpl<Value *> &Scalars, 774 ArrayRef<int> Mask) { 775 assert(!Mask.empty() && "Expected non-empty mask."); 776 SmallVector<Value *> Prev(Scalars.size(), 777 UndefValue::get(Scalars.front()->getType())); 778 Prev.swap(Scalars); 779 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 780 if (Mask[I] != UndefMaskElem) 781 Scalars[Mask[I]] = Prev[I]; 782 } 783 784 /// Checks if the provided value does not require scheduling. It does not 785 /// require scheduling if this is not an instruction or it is an instruction 786 /// that does not read/write memory and all operands are either not instructions 787 /// or phi nodes or instructions from different blocks. 788 static bool areAllOperandsNonInsts(Value *V) { 789 auto *I = dyn_cast<Instruction>(V); 790 if (!I) 791 return true; 792 return !mayHaveNonDefUseDependency(*I) && 793 all_of(I->operands(), [I](Value *V) { 794 auto *IO = dyn_cast<Instruction>(V); 795 if (!IO) 796 return true; 797 return isa<PHINode>(IO) || IO->getParent() != I->getParent(); 798 }); 799 } 800 801 /// Checks if the provided value does not require scheduling. It does not 802 /// require scheduling if this is not an instruction or it is an instruction 803 /// that does not read/write memory and all users are phi nodes or instructions 804 /// from the different blocks. 805 static bool isUsedOutsideBlock(Value *V) { 806 auto *I = dyn_cast<Instruction>(V); 807 if (!I) 808 return true; 809 // Limits the number of uses to save compile time. 810 constexpr int UsesLimit = 8; 811 return !I->mayReadOrWriteMemory() && !I->hasNUsesOrMore(UsesLimit) && 812 all_of(I->users(), [I](User *U) { 813 auto *IU = dyn_cast<Instruction>(U); 814 if (!IU) 815 return true; 816 return IU->getParent() != I->getParent() || isa<PHINode>(IU); 817 }); 818 } 819 820 /// Checks if the specified value does not require scheduling. It does not 821 /// require scheduling if all operands and all users do not need to be scheduled 822 /// in the current basic block. 823 static bool doesNotNeedToBeScheduled(Value *V) { 824 return areAllOperandsNonInsts(V) && isUsedOutsideBlock(V); 825 } 826 827 /// Checks if the specified array of instructions does not require scheduling. 828 /// It is so if all either instructions have operands that do not require 829 /// scheduling or their users do not require scheduling since they are phis or 830 /// in other basic blocks. 831 static bool doesNotNeedToSchedule(ArrayRef<Value *> VL) { 832 return !VL.empty() && 833 (all_of(VL, isUsedOutsideBlock) || all_of(VL, areAllOperandsNonInsts)); 834 } 835 836 namespace slpvectorizer { 837 838 /// Bottom Up SLP Vectorizer. 839 class BoUpSLP { 840 struct TreeEntry; 841 struct ScheduleData; 842 843 public: 844 using ValueList = SmallVector<Value *, 8>; 845 using InstrList = SmallVector<Instruction *, 16>; 846 using ValueSet = SmallPtrSet<Value *, 16>; 847 using StoreList = SmallVector<StoreInst *, 8>; 848 using ExtraValueToDebugLocsMap = 849 MapVector<Value *, SmallVector<Instruction *, 2>>; 850 using OrdersType = SmallVector<unsigned, 4>; 851 852 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 853 TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li, 854 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 855 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 856 : BatchAA(*Aa), F(Func), SE(Se), TTI(Tti), TLI(TLi), LI(Li), 857 DT(Dt), AC(AC), DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { 858 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 859 // Use the vector register size specified by the target unless overridden 860 // by a command-line option. 861 // TODO: It would be better to limit the vectorization factor based on 862 // data type rather than just register size. For example, x86 AVX has 863 // 256-bit registers, but it does not support integer operations 864 // at that width (that requires AVX2). 865 if (MaxVectorRegSizeOption.getNumOccurrences()) 866 MaxVecRegSize = MaxVectorRegSizeOption; 867 else 868 MaxVecRegSize = 869 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 870 .getFixedSize(); 871 872 if (MinVectorRegSizeOption.getNumOccurrences()) 873 MinVecRegSize = MinVectorRegSizeOption; 874 else 875 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 876 } 877 878 /// Vectorize the tree that starts with the elements in \p VL. 879 /// Returns the vectorized root. 880 Value *vectorizeTree(); 881 882 /// Vectorize the tree but with the list of externally used values \p 883 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 884 /// generated extractvalue instructions. 885 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues); 886 887 /// \returns the cost incurred by unwanted spills and fills, caused by 888 /// holding live values over call sites. 889 InstructionCost getSpillCost() const; 890 891 /// \returns the vectorization cost of the subtree that starts at \p VL. 892 /// A negative number means that this is profitable. 893 InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = None); 894 895 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 896 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 897 void buildTree(ArrayRef<Value *> Roots, 898 const SmallDenseSet<Value *> &UserIgnoreLst); 899 900 /// Construct a vectorizable tree that starts at \p Roots. 901 void buildTree(ArrayRef<Value *> Roots); 902 903 /// Builds external uses of the vectorized scalars, i.e. the list of 904 /// vectorized scalars to be extracted, their lanes and their scalar users. \p 905 /// ExternallyUsedValues contains additional list of external uses to handle 906 /// vectorization of reductions. 907 void 908 buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {}); 909 910 /// Clear the internal data structures that are created by 'buildTree'. 911 void deleteTree() { 912 VectorizableTree.clear(); 913 ScalarToTreeEntry.clear(); 914 MustGather.clear(); 915 ExternalUses.clear(); 916 for (auto &Iter : BlocksSchedules) { 917 BlockScheduling *BS = Iter.second.get(); 918 BS->clear(); 919 } 920 MinBWs.clear(); 921 InstrElementSize.clear(); 922 UserIgnoreList = nullptr; 923 } 924 925 unsigned getTreeSize() const { return VectorizableTree.size(); } 926 927 /// Perform LICM and CSE on the newly generated gather sequences. 928 void optimizeGatherSequence(); 929 930 /// Checks if the specified gather tree entry \p TE can be represented as a 931 /// shuffled vector entry + (possibly) permutation with other gathers. It 932 /// implements the checks only for possibly ordered scalars (Loads, 933 /// ExtractElement, ExtractValue), which can be part of the graph. 934 Optional<OrdersType> findReusedOrderedScalars(const TreeEntry &TE); 935 936 /// Sort loads into increasing pointers offsets to allow greater clustering. 937 Optional<OrdersType> findPartiallyOrderedLoads(const TreeEntry &TE); 938 939 /// Gets reordering data for the given tree entry. If the entry is vectorized 940 /// - just return ReorderIndices, otherwise check if the scalars can be 941 /// reordered and return the most optimal order. 942 /// \param TopToBottom If true, include the order of vectorized stores and 943 /// insertelement nodes, otherwise skip them. 944 Optional<OrdersType> getReorderingData(const TreeEntry &TE, bool TopToBottom); 945 946 /// Reorders the current graph to the most profitable order starting from the 947 /// root node to the leaf nodes. The best order is chosen only from the nodes 948 /// of the same size (vectorization factor). Smaller nodes are considered 949 /// parts of subgraph with smaller VF and they are reordered independently. We 950 /// can make it because we still need to extend smaller nodes to the wider VF 951 /// and we can merge reordering shuffles with the widening shuffles. 952 void reorderTopToBottom(); 953 954 /// Reorders the current graph to the most profitable order starting from 955 /// leaves to the root. It allows to rotate small subgraphs and reduce the 956 /// number of reshuffles if the leaf nodes use the same order. In this case we 957 /// can merge the orders and just shuffle user node instead of shuffling its 958 /// operands. Plus, even the leaf nodes have different orders, it allows to 959 /// sink reordering in the graph closer to the root node and merge it later 960 /// during analysis. 961 void reorderBottomToTop(bool IgnoreReorder = false); 962 963 /// \return The vector element size in bits to use when vectorizing the 964 /// expression tree ending at \p V. If V is a store, the size is the width of 965 /// the stored value. Otherwise, the size is the width of the largest loaded 966 /// value reaching V. This method is used by the vectorizer to calculate 967 /// vectorization factors. 968 unsigned getVectorElementSize(Value *V); 969 970 /// Compute the minimum type sizes required to represent the entries in a 971 /// vectorizable tree. 972 void computeMinimumValueSizes(); 973 974 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 975 unsigned getMaxVecRegSize() const { 976 return MaxVecRegSize; 977 } 978 979 // \returns minimum vector register size as set by cl::opt. 980 unsigned getMinVecRegSize() const { 981 return MinVecRegSize; 982 } 983 984 unsigned getMinVF(unsigned Sz) const { 985 return std::max(2U, getMinVecRegSize() / Sz); 986 } 987 988 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { 989 unsigned MaxVF = MaxVFOption.getNumOccurrences() ? 990 MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode); 991 return MaxVF ? MaxVF : UINT_MAX; 992 } 993 994 /// Check if homogeneous aggregate is isomorphic to some VectorType. 995 /// Accepts homogeneous multidimensional aggregate of scalars/vectors like 996 /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> }, 997 /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on. 998 /// 999 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 1000 unsigned canMapToVector(Type *T, const DataLayout &DL) const; 1001 1002 /// \returns True if the VectorizableTree is both tiny and not fully 1003 /// vectorizable. We do not vectorize such trees. 1004 bool isTreeTinyAndNotFullyVectorizable(bool ForReduction = false) const; 1005 1006 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values 1007 /// can be load combined in the backend. Load combining may not be allowed in 1008 /// the IR optimizer, so we do not want to alter the pattern. For example, 1009 /// partially transforming a scalar bswap() pattern into vector code is 1010 /// effectively impossible for the backend to undo. 1011 /// TODO: If load combining is allowed in the IR optimizer, this analysis 1012 /// may not be necessary. 1013 bool isLoadCombineReductionCandidate(RecurKind RdxKind) const; 1014 1015 /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values 1016 /// can be load combined in the backend. Load combining may not be allowed in 1017 /// the IR optimizer, so we do not want to alter the pattern. For example, 1018 /// partially transforming a scalar bswap() pattern into vector code is 1019 /// effectively impossible for the backend to undo. 1020 /// TODO: If load combining is allowed in the IR optimizer, this analysis 1021 /// may not be necessary. 1022 bool isLoadCombineCandidate() const; 1023 1024 OptimizationRemarkEmitter *getORE() { return ORE; } 1025 1026 /// This structure holds any data we need about the edges being traversed 1027 /// during buildTree_rec(). We keep track of: 1028 /// (i) the user TreeEntry index, and 1029 /// (ii) the index of the edge. 1030 struct EdgeInfo { 1031 EdgeInfo() = default; 1032 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx) 1033 : UserTE(UserTE), EdgeIdx(EdgeIdx) {} 1034 /// The user TreeEntry. 1035 TreeEntry *UserTE = nullptr; 1036 /// The operand index of the use. 1037 unsigned EdgeIdx = UINT_MAX; 1038 #ifndef NDEBUG 1039 friend inline raw_ostream &operator<<(raw_ostream &OS, 1040 const BoUpSLP::EdgeInfo &EI) { 1041 EI.dump(OS); 1042 return OS; 1043 } 1044 /// Debug print. 1045 void dump(raw_ostream &OS) const { 1046 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null") 1047 << " EdgeIdx:" << EdgeIdx << "}"; 1048 } 1049 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); } 1050 #endif 1051 }; 1052 1053 /// A helper class used for scoring candidates for two consecutive lanes. 1054 class LookAheadHeuristics { 1055 const DataLayout &DL; 1056 ScalarEvolution &SE; 1057 const BoUpSLP &R; 1058 int NumLanes; // Total number of lanes (aka vectorization factor). 1059 int MaxLevel; // The maximum recursion depth for accumulating score. 1060 1061 public: 1062 LookAheadHeuristics(const DataLayout &DL, ScalarEvolution &SE, 1063 const BoUpSLP &R, int NumLanes, int MaxLevel) 1064 : DL(DL), SE(SE), R(R), NumLanes(NumLanes), MaxLevel(MaxLevel) {} 1065 1066 // The hard-coded scores listed here are not very important, though it shall 1067 // be higher for better matches to improve the resulting cost. When 1068 // computing the scores of matching one sub-tree with another, we are 1069 // basically counting the number of values that are matching. So even if all 1070 // scores are set to 1, we would still get a decent matching result. 1071 // However, sometimes we have to break ties. For example we may have to 1072 // choose between matching loads vs matching opcodes. This is what these 1073 // scores are helping us with: they provide the order of preference. Also, 1074 // this is important if the scalar is externally used or used in another 1075 // tree entry node in the different lane. 1076 1077 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]). 1078 static const int ScoreConsecutiveLoads = 4; 1079 /// The same load multiple times. This should have a better score than 1080 /// `ScoreSplat` because it in x86 for a 2-lane vector we can represent it 1081 /// with `movddup (%reg), xmm0` which has a throughput of 0.5 versus 0.5 for 1082 /// a vector load and 1.0 for a broadcast. 1083 static const int ScoreSplatLoads = 3; 1084 /// Loads from reversed memory addresses, e.g. load(A[i+1]), load(A[i]). 1085 static const int ScoreReversedLoads = 3; 1086 /// ExtractElementInst from same vector and consecutive indexes. 1087 static const int ScoreConsecutiveExtracts = 4; 1088 /// ExtractElementInst from same vector and reversed indices. 1089 static const int ScoreReversedExtracts = 3; 1090 /// Constants. 1091 static const int ScoreConstants = 2; 1092 /// Instructions with the same opcode. 1093 static const int ScoreSameOpcode = 2; 1094 /// Instructions with alt opcodes (e.g, add + sub). 1095 static const int ScoreAltOpcodes = 1; 1096 /// Identical instructions (a.k.a. splat or broadcast). 1097 static const int ScoreSplat = 1; 1098 /// Matching with an undef is preferable to failing. 1099 static const int ScoreUndef = 1; 1100 /// Score for failing to find a decent match. 1101 static const int ScoreFail = 0; 1102 /// Score if all users are vectorized. 1103 static const int ScoreAllUserVectorized = 1; 1104 1105 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes. 1106 /// \p U1 and \p U2 are the users of \p V1 and \p V2. 1107 /// Also, checks if \p V1 and \p V2 are compatible with instructions in \p 1108 /// MainAltOps. 1109 int getShallowScore(Value *V1, Value *V2, Instruction *U1, Instruction *U2, 1110 ArrayRef<Value *> MainAltOps) const { 1111 if (V1 == V2) { 1112 if (isa<LoadInst>(V1)) { 1113 // Retruns true if the users of V1 and V2 won't need to be extracted. 1114 auto AllUsersAreInternal = [U1, U2, this](Value *V1, Value *V2) { 1115 // Bail out if we have too many uses to save compilation time. 1116 static constexpr unsigned Limit = 8; 1117 if (V1->hasNUsesOrMore(Limit) || V2->hasNUsesOrMore(Limit)) 1118 return false; 1119 1120 auto AllUsersVectorized = [U1, U2, this](Value *V) { 1121 return llvm::all_of(V->users(), [U1, U2, this](Value *U) { 1122 return U == U1 || U == U2 || R.getTreeEntry(U) != nullptr; 1123 }); 1124 }; 1125 return AllUsersVectorized(V1) && AllUsersVectorized(V2); 1126 }; 1127 // A broadcast of a load can be cheaper on some targets. 1128 if (R.TTI->isLegalBroadcastLoad(V1->getType(), 1129 ElementCount::getFixed(NumLanes)) && 1130 ((int)V1->getNumUses() == NumLanes || 1131 AllUsersAreInternal(V1, V2))) 1132 return LookAheadHeuristics::ScoreSplatLoads; 1133 } 1134 return LookAheadHeuristics::ScoreSplat; 1135 } 1136 1137 auto *LI1 = dyn_cast<LoadInst>(V1); 1138 auto *LI2 = dyn_cast<LoadInst>(V2); 1139 if (LI1 && LI2) { 1140 if (LI1->getParent() != LI2->getParent()) 1141 return LookAheadHeuristics::ScoreFail; 1142 1143 Optional<int> Dist = getPointersDiff( 1144 LI1->getType(), LI1->getPointerOperand(), LI2->getType(), 1145 LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true); 1146 if (!Dist || *Dist == 0) 1147 return LookAheadHeuristics::ScoreFail; 1148 // The distance is too large - still may be profitable to use masked 1149 // loads/gathers. 1150 if (std::abs(*Dist) > NumLanes / 2) 1151 return LookAheadHeuristics::ScoreAltOpcodes; 1152 // This still will detect consecutive loads, but we might have "holes" 1153 // in some cases. It is ok for non-power-2 vectorization and may produce 1154 // better results. It should not affect current vectorization. 1155 return (*Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveLoads 1156 : LookAheadHeuristics::ScoreReversedLoads; 1157 } 1158 1159 auto *C1 = dyn_cast<Constant>(V1); 1160 auto *C2 = dyn_cast<Constant>(V2); 1161 if (C1 && C2) 1162 return LookAheadHeuristics::ScoreConstants; 1163 1164 // Extracts from consecutive indexes of the same vector better score as 1165 // the extracts could be optimized away. 1166 Value *EV1; 1167 ConstantInt *Ex1Idx; 1168 if (match(V1, m_ExtractElt(m_Value(EV1), m_ConstantInt(Ex1Idx)))) { 1169 // Undefs are always profitable for extractelements. 1170 if (isa<UndefValue>(V2)) 1171 return LookAheadHeuristics::ScoreConsecutiveExtracts; 1172 Value *EV2 = nullptr; 1173 ConstantInt *Ex2Idx = nullptr; 1174 if (match(V2, 1175 m_ExtractElt(m_Value(EV2), m_CombineOr(m_ConstantInt(Ex2Idx), 1176 m_Undef())))) { 1177 // Undefs are always profitable for extractelements. 1178 if (!Ex2Idx) 1179 return LookAheadHeuristics::ScoreConsecutiveExtracts; 1180 if (isUndefVector(EV2) && EV2->getType() == EV1->getType()) 1181 return LookAheadHeuristics::ScoreConsecutiveExtracts; 1182 if (EV2 == EV1) { 1183 int Idx1 = Ex1Idx->getZExtValue(); 1184 int Idx2 = Ex2Idx->getZExtValue(); 1185 int Dist = Idx2 - Idx1; 1186 // The distance is too large - still may be profitable to use 1187 // shuffles. 1188 if (std::abs(Dist) == 0) 1189 return LookAheadHeuristics::ScoreSplat; 1190 if (std::abs(Dist) > NumLanes / 2) 1191 return LookAheadHeuristics::ScoreSameOpcode; 1192 return (Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveExtracts 1193 : LookAheadHeuristics::ScoreReversedExtracts; 1194 } 1195 return LookAheadHeuristics::ScoreAltOpcodes; 1196 } 1197 return LookAheadHeuristics::ScoreFail; 1198 } 1199 1200 auto *I1 = dyn_cast<Instruction>(V1); 1201 auto *I2 = dyn_cast<Instruction>(V2); 1202 if (I1 && I2) { 1203 if (I1->getParent() != I2->getParent()) 1204 return LookAheadHeuristics::ScoreFail; 1205 SmallVector<Value *, 4> Ops(MainAltOps.begin(), MainAltOps.end()); 1206 Ops.push_back(I1); 1207 Ops.push_back(I2); 1208 InstructionsState S = getSameOpcode(Ops); 1209 // Note: Only consider instructions with <= 2 operands to avoid 1210 // complexity explosion. 1211 if (S.getOpcode() && 1212 (S.MainOp->getNumOperands() <= 2 || !MainAltOps.empty() || 1213 !S.isAltShuffle()) && 1214 all_of(Ops, [&S](Value *V) { 1215 return cast<Instruction>(V)->getNumOperands() == 1216 S.MainOp->getNumOperands(); 1217 })) 1218 return S.isAltShuffle() ? LookAheadHeuristics::ScoreAltOpcodes 1219 : LookAheadHeuristics::ScoreSameOpcode; 1220 } 1221 1222 if (isa<UndefValue>(V2)) 1223 return LookAheadHeuristics::ScoreUndef; 1224 1225 return LookAheadHeuristics::ScoreFail; 1226 } 1227 1228 /// Go through the operands of \p LHS and \p RHS recursively until 1229 /// MaxLevel, and return the cummulative score. \p U1 and \p U2 are 1230 /// the users of \p LHS and \p RHS (that is \p LHS and \p RHS are operands 1231 /// of \p U1 and \p U2), except at the beginning of the recursion where 1232 /// these are set to nullptr. 1233 /// 1234 /// For example: 1235 /// \verbatim 1236 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1] 1237 /// \ / \ / \ / \ / 1238 /// + + + + 1239 /// G1 G2 G3 G4 1240 /// \endverbatim 1241 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at 1242 /// each level recursively, accumulating the score. It starts from matching 1243 /// the additions at level 0, then moves on to the loads (level 1). The 1244 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and 1245 /// {B[0],B[1]} match with LookAheadHeuristics::ScoreConsecutiveLoads, while 1246 /// {A[0],C[0]} has a score of LookAheadHeuristics::ScoreFail. 1247 /// Please note that the order of the operands does not matter, as we 1248 /// evaluate the score of all profitable combinations of operands. In 1249 /// other words the score of G1 and G4 is the same as G1 and G2. This 1250 /// heuristic is based on ideas described in: 1251 /// Look-ahead SLP: Auto-vectorization in the presence of commutative 1252 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha, 1253 /// Luís F. W. Góes 1254 int getScoreAtLevelRec(Value *LHS, Value *RHS, Instruction *U1, 1255 Instruction *U2, int CurrLevel, 1256 ArrayRef<Value *> MainAltOps) const { 1257 1258 // Get the shallow score of V1 and V2. 1259 int ShallowScoreAtThisLevel = 1260 getShallowScore(LHS, RHS, U1, U2, MainAltOps); 1261 1262 // If reached MaxLevel, 1263 // or if V1 and V2 are not instructions, 1264 // or if they are SPLAT, 1265 // or if they are not consecutive, 1266 // or if profitable to vectorize loads or extractelements, early return 1267 // the current cost. 1268 auto *I1 = dyn_cast<Instruction>(LHS); 1269 auto *I2 = dyn_cast<Instruction>(RHS); 1270 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 || 1271 ShallowScoreAtThisLevel == LookAheadHeuristics::ScoreFail || 1272 (((isa<LoadInst>(I1) && isa<LoadInst>(I2)) || 1273 (I1->getNumOperands() > 2 && I2->getNumOperands() > 2) || 1274 (isa<ExtractElementInst>(I1) && isa<ExtractElementInst>(I2))) && 1275 ShallowScoreAtThisLevel)) 1276 return ShallowScoreAtThisLevel; 1277 assert(I1 && I2 && "Should have early exited."); 1278 1279 // Contains the I2 operand indexes that got matched with I1 operands. 1280 SmallSet<unsigned, 4> Op2Used; 1281 1282 // Recursion towards the operands of I1 and I2. We are trying all possible 1283 // operand pairs, and keeping track of the best score. 1284 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands(); 1285 OpIdx1 != NumOperands1; ++OpIdx1) { 1286 // Try to pair op1I with the best operand of I2. 1287 int MaxTmpScore = 0; 1288 unsigned MaxOpIdx2 = 0; 1289 bool FoundBest = false; 1290 // If I2 is commutative try all combinations. 1291 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1; 1292 unsigned ToIdx = isCommutative(I2) 1293 ? I2->getNumOperands() 1294 : std::min(I2->getNumOperands(), OpIdx1 + 1); 1295 assert(FromIdx <= ToIdx && "Bad index"); 1296 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) { 1297 // Skip operands already paired with OpIdx1. 1298 if (Op2Used.count(OpIdx2)) 1299 continue; 1300 // Recursively calculate the cost at each level 1301 int TmpScore = 1302 getScoreAtLevelRec(I1->getOperand(OpIdx1), I2->getOperand(OpIdx2), 1303 I1, I2, CurrLevel + 1, None); 1304 // Look for the best score. 1305 if (TmpScore > LookAheadHeuristics::ScoreFail && 1306 TmpScore > MaxTmpScore) { 1307 MaxTmpScore = TmpScore; 1308 MaxOpIdx2 = OpIdx2; 1309 FoundBest = true; 1310 } 1311 } 1312 if (FoundBest) { 1313 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it. 1314 Op2Used.insert(MaxOpIdx2); 1315 ShallowScoreAtThisLevel += MaxTmpScore; 1316 } 1317 } 1318 return ShallowScoreAtThisLevel; 1319 } 1320 }; 1321 /// A helper data structure to hold the operands of a vector of instructions. 1322 /// This supports a fixed vector length for all operand vectors. 1323 class VLOperands { 1324 /// For each operand we need (i) the value, and (ii) the opcode that it 1325 /// would be attached to if the expression was in a left-linearized form. 1326 /// This is required to avoid illegal operand reordering. 1327 /// For example: 1328 /// \verbatim 1329 /// 0 Op1 1330 /// |/ 1331 /// Op1 Op2 Linearized + Op2 1332 /// \ / ----------> |/ 1333 /// - - 1334 /// 1335 /// Op1 - Op2 (0 + Op1) - Op2 1336 /// \endverbatim 1337 /// 1338 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'. 1339 /// 1340 /// Another way to think of this is to track all the operations across the 1341 /// path from the operand all the way to the root of the tree and to 1342 /// calculate the operation that corresponds to this path. For example, the 1343 /// path from Op2 to the root crosses the RHS of the '-', therefore the 1344 /// corresponding operation is a '-' (which matches the one in the 1345 /// linearized tree, as shown above). 1346 /// 1347 /// For lack of a better term, we refer to this operation as Accumulated 1348 /// Path Operation (APO). 1349 struct OperandData { 1350 OperandData() = default; 1351 OperandData(Value *V, bool APO, bool IsUsed) 1352 : V(V), APO(APO), IsUsed(IsUsed) {} 1353 /// The operand value. 1354 Value *V = nullptr; 1355 /// TreeEntries only allow a single opcode, or an alternate sequence of 1356 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the 1357 /// APO. It is set to 'true' if 'V' is attached to an inverse operation 1358 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise 1359 /// (e.g., Add/Mul) 1360 bool APO = false; 1361 /// Helper data for the reordering function. 1362 bool IsUsed = false; 1363 }; 1364 1365 /// During operand reordering, we are trying to select the operand at lane 1366 /// that matches best with the operand at the neighboring lane. Our 1367 /// selection is based on the type of value we are looking for. For example, 1368 /// if the neighboring lane has a load, we need to look for a load that is 1369 /// accessing a consecutive address. These strategies are summarized in the 1370 /// 'ReorderingMode' enumerator. 1371 enum class ReorderingMode { 1372 Load, ///< Matching loads to consecutive memory addresses 1373 Opcode, ///< Matching instructions based on opcode (same or alternate) 1374 Constant, ///< Matching constants 1375 Splat, ///< Matching the same instruction multiple times (broadcast) 1376 Failed, ///< We failed to create a vectorizable group 1377 }; 1378 1379 using OperandDataVec = SmallVector<OperandData, 2>; 1380 1381 /// A vector of operand vectors. 1382 SmallVector<OperandDataVec, 4> OpsVec; 1383 1384 const DataLayout &DL; 1385 ScalarEvolution &SE; 1386 const BoUpSLP &R; 1387 1388 /// \returns the operand data at \p OpIdx and \p Lane. 1389 OperandData &getData(unsigned OpIdx, unsigned Lane) { 1390 return OpsVec[OpIdx][Lane]; 1391 } 1392 1393 /// \returns the operand data at \p OpIdx and \p Lane. Const version. 1394 const OperandData &getData(unsigned OpIdx, unsigned Lane) const { 1395 return OpsVec[OpIdx][Lane]; 1396 } 1397 1398 /// Clears the used flag for all entries. 1399 void clearUsed() { 1400 for (unsigned OpIdx = 0, NumOperands = getNumOperands(); 1401 OpIdx != NumOperands; ++OpIdx) 1402 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 1403 ++Lane) 1404 OpsVec[OpIdx][Lane].IsUsed = false; 1405 } 1406 1407 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2. 1408 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) { 1409 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]); 1410 } 1411 1412 /// \param Lane lane of the operands under analysis. 1413 /// \param OpIdx operand index in \p Lane lane we're looking the best 1414 /// candidate for. 1415 /// \param Idx operand index of the current candidate value. 1416 /// \returns The additional score due to possible broadcasting of the 1417 /// elements in the lane. It is more profitable to have power-of-2 unique 1418 /// elements in the lane, it will be vectorized with higher probability 1419 /// after removing duplicates. Currently the SLP vectorizer supports only 1420 /// vectorization of the power-of-2 number of unique scalars. 1421 int getSplatScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const { 1422 Value *IdxLaneV = getData(Idx, Lane).V; 1423 if (!isa<Instruction>(IdxLaneV) || IdxLaneV == getData(OpIdx, Lane).V) 1424 return 0; 1425 SmallPtrSet<Value *, 4> Uniques; 1426 for (unsigned Ln = 0, E = getNumLanes(); Ln < E; ++Ln) { 1427 if (Ln == Lane) 1428 continue; 1429 Value *OpIdxLnV = getData(OpIdx, Ln).V; 1430 if (!isa<Instruction>(OpIdxLnV)) 1431 return 0; 1432 Uniques.insert(OpIdxLnV); 1433 } 1434 int UniquesCount = Uniques.size(); 1435 int UniquesCntWithIdxLaneV = 1436 Uniques.contains(IdxLaneV) ? UniquesCount : UniquesCount + 1; 1437 Value *OpIdxLaneV = getData(OpIdx, Lane).V; 1438 int UniquesCntWithOpIdxLaneV = 1439 Uniques.contains(OpIdxLaneV) ? UniquesCount : UniquesCount + 1; 1440 if (UniquesCntWithIdxLaneV == UniquesCntWithOpIdxLaneV) 1441 return 0; 1442 return (PowerOf2Ceil(UniquesCntWithOpIdxLaneV) - 1443 UniquesCntWithOpIdxLaneV) - 1444 (PowerOf2Ceil(UniquesCntWithIdxLaneV) - UniquesCntWithIdxLaneV); 1445 } 1446 1447 /// \param Lane lane of the operands under analysis. 1448 /// \param OpIdx operand index in \p Lane lane we're looking the best 1449 /// candidate for. 1450 /// \param Idx operand index of the current candidate value. 1451 /// \returns The additional score for the scalar which users are all 1452 /// vectorized. 1453 int getExternalUseScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const { 1454 Value *IdxLaneV = getData(Idx, Lane).V; 1455 Value *OpIdxLaneV = getData(OpIdx, Lane).V; 1456 // Do not care about number of uses for vector-like instructions 1457 // (extractelement/extractvalue with constant indices), they are extracts 1458 // themselves and already externally used. Vectorization of such 1459 // instructions does not add extra extractelement instruction, just may 1460 // remove it. 1461 if (isVectorLikeInstWithConstOps(IdxLaneV) && 1462 isVectorLikeInstWithConstOps(OpIdxLaneV)) 1463 return LookAheadHeuristics::ScoreAllUserVectorized; 1464 auto *IdxLaneI = dyn_cast<Instruction>(IdxLaneV); 1465 if (!IdxLaneI || !isa<Instruction>(OpIdxLaneV)) 1466 return 0; 1467 return R.areAllUsersVectorized(IdxLaneI, None) 1468 ? LookAheadHeuristics::ScoreAllUserVectorized 1469 : 0; 1470 } 1471 1472 /// Score scaling factor for fully compatible instructions but with 1473 /// different number of external uses. Allows better selection of the 1474 /// instructions with less external uses. 1475 static const int ScoreScaleFactor = 10; 1476 1477 /// \Returns the look-ahead score, which tells us how much the sub-trees 1478 /// rooted at \p LHS and \p RHS match, the more they match the higher the 1479 /// score. This helps break ties in an informed way when we cannot decide on 1480 /// the order of the operands by just considering the immediate 1481 /// predecessors. 1482 int getLookAheadScore(Value *LHS, Value *RHS, ArrayRef<Value *> MainAltOps, 1483 int Lane, unsigned OpIdx, unsigned Idx, 1484 bool &IsUsed) { 1485 LookAheadHeuristics LookAhead(DL, SE, R, getNumLanes(), 1486 LookAheadMaxDepth); 1487 // Keep track of the instruction stack as we recurse into the operands 1488 // during the look-ahead score exploration. 1489 int Score = 1490 LookAhead.getScoreAtLevelRec(LHS, RHS, /*U1=*/nullptr, /*U2=*/nullptr, 1491 /*CurrLevel=*/1, MainAltOps); 1492 if (Score) { 1493 int SplatScore = getSplatScore(Lane, OpIdx, Idx); 1494 if (Score <= -SplatScore) { 1495 // Set the minimum score for splat-like sequence to avoid setting 1496 // failed state. 1497 Score = 1; 1498 } else { 1499 Score += SplatScore; 1500 // Scale score to see the difference between different operands 1501 // and similar operands but all vectorized/not all vectorized 1502 // uses. It does not affect actual selection of the best 1503 // compatible operand in general, just allows to select the 1504 // operand with all vectorized uses. 1505 Score *= ScoreScaleFactor; 1506 Score += getExternalUseScore(Lane, OpIdx, Idx); 1507 IsUsed = true; 1508 } 1509 } 1510 return Score; 1511 } 1512 1513 /// Best defined scores per lanes between the passes. Used to choose the 1514 /// best operand (with the highest score) between the passes. 1515 /// The key - {Operand Index, Lane}. 1516 /// The value - the best score between the passes for the lane and the 1517 /// operand. 1518 SmallDenseMap<std::pair<unsigned, unsigned>, unsigned, 8> 1519 BestScoresPerLanes; 1520 1521 // Search all operands in Ops[*][Lane] for the one that matches best 1522 // Ops[OpIdx][LastLane] and return its opreand index. 1523 // If no good match can be found, return None. 1524 Optional<unsigned> getBestOperand(unsigned OpIdx, int Lane, int LastLane, 1525 ArrayRef<ReorderingMode> ReorderingModes, 1526 ArrayRef<Value *> MainAltOps) { 1527 unsigned NumOperands = getNumOperands(); 1528 1529 // The operand of the previous lane at OpIdx. 1530 Value *OpLastLane = getData(OpIdx, LastLane).V; 1531 1532 // Our strategy mode for OpIdx. 1533 ReorderingMode RMode = ReorderingModes[OpIdx]; 1534 if (RMode == ReorderingMode::Failed) 1535 return None; 1536 1537 // The linearized opcode of the operand at OpIdx, Lane. 1538 bool OpIdxAPO = getData(OpIdx, Lane).APO; 1539 1540 // The best operand index and its score. 1541 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we 1542 // are using the score to differentiate between the two. 1543 struct BestOpData { 1544 Optional<unsigned> Idx = None; 1545 unsigned Score = 0; 1546 } BestOp; 1547 BestOp.Score = 1548 BestScoresPerLanes.try_emplace(std::make_pair(OpIdx, Lane), 0) 1549 .first->second; 1550 1551 // Track if the operand must be marked as used. If the operand is set to 1552 // Score 1 explicitly (because of non power-of-2 unique scalars, we may 1553 // want to reestimate the operands again on the following iterations). 1554 bool IsUsed = 1555 RMode == ReorderingMode::Splat || RMode == ReorderingMode::Constant; 1556 // Iterate through all unused operands and look for the best. 1557 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) { 1558 // Get the operand at Idx and Lane. 1559 OperandData &OpData = getData(Idx, Lane); 1560 Value *Op = OpData.V; 1561 bool OpAPO = OpData.APO; 1562 1563 // Skip already selected operands. 1564 if (OpData.IsUsed) 1565 continue; 1566 1567 // Skip if we are trying to move the operand to a position with a 1568 // different opcode in the linearized tree form. This would break the 1569 // semantics. 1570 if (OpAPO != OpIdxAPO) 1571 continue; 1572 1573 // Look for an operand that matches the current mode. 1574 switch (RMode) { 1575 case ReorderingMode::Load: 1576 case ReorderingMode::Constant: 1577 case ReorderingMode::Opcode: { 1578 bool LeftToRight = Lane > LastLane; 1579 Value *OpLeft = (LeftToRight) ? OpLastLane : Op; 1580 Value *OpRight = (LeftToRight) ? Op : OpLastLane; 1581 int Score = getLookAheadScore(OpLeft, OpRight, MainAltOps, Lane, 1582 OpIdx, Idx, IsUsed); 1583 if (Score > static_cast<int>(BestOp.Score)) { 1584 BestOp.Idx = Idx; 1585 BestOp.Score = Score; 1586 BestScoresPerLanes[std::make_pair(OpIdx, Lane)] = Score; 1587 } 1588 break; 1589 } 1590 case ReorderingMode::Splat: 1591 if (Op == OpLastLane) 1592 BestOp.Idx = Idx; 1593 break; 1594 case ReorderingMode::Failed: 1595 llvm_unreachable("Not expected Failed reordering mode."); 1596 } 1597 } 1598 1599 if (BestOp.Idx) { 1600 getData(BestOp.Idx.getValue(), Lane).IsUsed = IsUsed; 1601 return BestOp.Idx; 1602 } 1603 // If we could not find a good match return None. 1604 return None; 1605 } 1606 1607 /// Helper for reorderOperandVecs. 1608 /// \returns the lane that we should start reordering from. This is the one 1609 /// which has the least number of operands that can freely move about or 1610 /// less profitable because it already has the most optimal set of operands. 1611 unsigned getBestLaneToStartReordering() const { 1612 unsigned Min = UINT_MAX; 1613 unsigned SameOpNumber = 0; 1614 // std::pair<unsigned, unsigned> is used to implement a simple voting 1615 // algorithm and choose the lane with the least number of operands that 1616 // can freely move about or less profitable because it already has the 1617 // most optimal set of operands. The first unsigned is a counter for 1618 // voting, the second unsigned is the counter of lanes with instructions 1619 // with same/alternate opcodes and same parent basic block. 1620 MapVector<unsigned, std::pair<unsigned, unsigned>> HashMap; 1621 // Try to be closer to the original results, if we have multiple lanes 1622 // with same cost. If 2 lanes have the same cost, use the one with the 1623 // lowest index. 1624 for (int I = getNumLanes(); I > 0; --I) { 1625 unsigned Lane = I - 1; 1626 OperandsOrderData NumFreeOpsHash = 1627 getMaxNumOperandsThatCanBeReordered(Lane); 1628 // Compare the number of operands that can move and choose the one with 1629 // the least number. 1630 if (NumFreeOpsHash.NumOfAPOs < Min) { 1631 Min = NumFreeOpsHash.NumOfAPOs; 1632 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent; 1633 HashMap.clear(); 1634 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1635 } else if (NumFreeOpsHash.NumOfAPOs == Min && 1636 NumFreeOpsHash.NumOpsWithSameOpcodeParent < SameOpNumber) { 1637 // Select the most optimal lane in terms of number of operands that 1638 // should be moved around. 1639 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent; 1640 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1641 } else if (NumFreeOpsHash.NumOfAPOs == Min && 1642 NumFreeOpsHash.NumOpsWithSameOpcodeParent == SameOpNumber) { 1643 auto It = HashMap.find(NumFreeOpsHash.Hash); 1644 if (It == HashMap.end()) 1645 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1646 else 1647 ++It->second.first; 1648 } 1649 } 1650 // Select the lane with the minimum counter. 1651 unsigned BestLane = 0; 1652 unsigned CntMin = UINT_MAX; 1653 for (const auto &Data : reverse(HashMap)) { 1654 if (Data.second.first < CntMin) { 1655 CntMin = Data.second.first; 1656 BestLane = Data.second.second; 1657 } 1658 } 1659 return BestLane; 1660 } 1661 1662 /// Data structure that helps to reorder operands. 1663 struct OperandsOrderData { 1664 /// The best number of operands with the same APOs, which can be 1665 /// reordered. 1666 unsigned NumOfAPOs = UINT_MAX; 1667 /// Number of operands with the same/alternate instruction opcode and 1668 /// parent. 1669 unsigned NumOpsWithSameOpcodeParent = 0; 1670 /// Hash for the actual operands ordering. 1671 /// Used to count operands, actually their position id and opcode 1672 /// value. It is used in the voting mechanism to find the lane with the 1673 /// least number of operands that can freely move about or less profitable 1674 /// because it already has the most optimal set of operands. Can be 1675 /// replaced with SmallVector<unsigned> instead but hash code is faster 1676 /// and requires less memory. 1677 unsigned Hash = 0; 1678 }; 1679 /// \returns the maximum number of operands that are allowed to be reordered 1680 /// for \p Lane and the number of compatible instructions(with the same 1681 /// parent/opcode). This is used as a heuristic for selecting the first lane 1682 /// to start operand reordering. 1683 OperandsOrderData getMaxNumOperandsThatCanBeReordered(unsigned Lane) const { 1684 unsigned CntTrue = 0; 1685 unsigned NumOperands = getNumOperands(); 1686 // Operands with the same APO can be reordered. We therefore need to count 1687 // how many of them we have for each APO, like this: Cnt[APO] = x. 1688 // Since we only have two APOs, namely true and false, we can avoid using 1689 // a map. Instead we can simply count the number of operands that 1690 // correspond to one of them (in this case the 'true' APO), and calculate 1691 // the other by subtracting it from the total number of operands. 1692 // Operands with the same instruction opcode and parent are more 1693 // profitable since we don't need to move them in many cases, with a high 1694 // probability such lane already can be vectorized effectively. 1695 bool AllUndefs = true; 1696 unsigned NumOpsWithSameOpcodeParent = 0; 1697 Instruction *OpcodeI = nullptr; 1698 BasicBlock *Parent = nullptr; 1699 unsigned Hash = 0; 1700 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1701 const OperandData &OpData = getData(OpIdx, Lane); 1702 if (OpData.APO) 1703 ++CntTrue; 1704 // Use Boyer-Moore majority voting for finding the majority opcode and 1705 // the number of times it occurs. 1706 if (auto *I = dyn_cast<Instruction>(OpData.V)) { 1707 if (!OpcodeI || !getSameOpcode({OpcodeI, I}).getOpcode() || 1708 I->getParent() != Parent) { 1709 if (NumOpsWithSameOpcodeParent == 0) { 1710 NumOpsWithSameOpcodeParent = 1; 1711 OpcodeI = I; 1712 Parent = I->getParent(); 1713 } else { 1714 --NumOpsWithSameOpcodeParent; 1715 } 1716 } else { 1717 ++NumOpsWithSameOpcodeParent; 1718 } 1719 } 1720 Hash = hash_combine( 1721 Hash, hash_value((OpIdx + 1) * (OpData.V->getValueID() + 1))); 1722 AllUndefs = AllUndefs && isa<UndefValue>(OpData.V); 1723 } 1724 if (AllUndefs) 1725 return {}; 1726 OperandsOrderData Data; 1727 Data.NumOfAPOs = std::max(CntTrue, NumOperands - CntTrue); 1728 Data.NumOpsWithSameOpcodeParent = NumOpsWithSameOpcodeParent; 1729 Data.Hash = Hash; 1730 return Data; 1731 } 1732 1733 /// Go through the instructions in VL and append their operands. 1734 void appendOperandsOfVL(ArrayRef<Value *> VL) { 1735 assert(!VL.empty() && "Bad VL"); 1736 assert((empty() || VL.size() == getNumLanes()) && 1737 "Expected same number of lanes"); 1738 assert(isa<Instruction>(VL[0]) && "Expected instruction"); 1739 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands(); 1740 OpsVec.resize(NumOperands); 1741 unsigned NumLanes = VL.size(); 1742 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1743 OpsVec[OpIdx].resize(NumLanes); 1744 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1745 assert(isa<Instruction>(VL[Lane]) && "Expected instruction"); 1746 // Our tree has just 3 nodes: the root and two operands. 1747 // It is therefore trivial to get the APO. We only need to check the 1748 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or 1749 // RHS operand. The LHS operand of both add and sub is never attached 1750 // to an inversese operation in the linearized form, therefore its APO 1751 // is false. The RHS is true only if VL[Lane] is an inverse operation. 1752 1753 // Since operand reordering is performed on groups of commutative 1754 // operations or alternating sequences (e.g., +, -), we can safely 1755 // tell the inverse operations by checking commutativity. 1756 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane])); 1757 bool APO = (OpIdx == 0) ? false : IsInverseOperation; 1758 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx), 1759 APO, false}; 1760 } 1761 } 1762 } 1763 1764 /// \returns the number of operands. 1765 unsigned getNumOperands() const { return OpsVec.size(); } 1766 1767 /// \returns the number of lanes. 1768 unsigned getNumLanes() const { return OpsVec[0].size(); } 1769 1770 /// \returns the operand value at \p OpIdx and \p Lane. 1771 Value *getValue(unsigned OpIdx, unsigned Lane) const { 1772 return getData(OpIdx, Lane).V; 1773 } 1774 1775 /// \returns true if the data structure is empty. 1776 bool empty() const { return OpsVec.empty(); } 1777 1778 /// Clears the data. 1779 void clear() { OpsVec.clear(); } 1780 1781 /// \Returns true if there are enough operands identical to \p Op to fill 1782 /// the whole vector. 1783 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow. 1784 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) { 1785 bool OpAPO = getData(OpIdx, Lane).APO; 1786 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) { 1787 if (Ln == Lane) 1788 continue; 1789 // This is set to true if we found a candidate for broadcast at Lane. 1790 bool FoundCandidate = false; 1791 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) { 1792 OperandData &Data = getData(OpI, Ln); 1793 if (Data.APO != OpAPO || Data.IsUsed) 1794 continue; 1795 if (Data.V == Op) { 1796 FoundCandidate = true; 1797 Data.IsUsed = true; 1798 break; 1799 } 1800 } 1801 if (!FoundCandidate) 1802 return false; 1803 } 1804 return true; 1805 } 1806 1807 public: 1808 /// Initialize with all the operands of the instruction vector \p RootVL. 1809 VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL, 1810 ScalarEvolution &SE, const BoUpSLP &R) 1811 : DL(DL), SE(SE), R(R) { 1812 // Append all the operands of RootVL. 1813 appendOperandsOfVL(RootVL); 1814 } 1815 1816 /// \Returns a value vector with the operands across all lanes for the 1817 /// opearnd at \p OpIdx. 1818 ValueList getVL(unsigned OpIdx) const { 1819 ValueList OpVL(OpsVec[OpIdx].size()); 1820 assert(OpsVec[OpIdx].size() == getNumLanes() && 1821 "Expected same num of lanes across all operands"); 1822 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane) 1823 OpVL[Lane] = OpsVec[OpIdx][Lane].V; 1824 return OpVL; 1825 } 1826 1827 // Performs operand reordering for 2 or more operands. 1828 // The original operands are in OrigOps[OpIdx][Lane]. 1829 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'. 1830 void reorder() { 1831 unsigned NumOperands = getNumOperands(); 1832 unsigned NumLanes = getNumLanes(); 1833 // Each operand has its own mode. We are using this mode to help us select 1834 // the instructions for each lane, so that they match best with the ones 1835 // we have selected so far. 1836 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands); 1837 1838 // This is a greedy single-pass algorithm. We are going over each lane 1839 // once and deciding on the best order right away with no back-tracking. 1840 // However, in order to increase its effectiveness, we start with the lane 1841 // that has operands that can move the least. For example, given the 1842 // following lanes: 1843 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd 1844 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st 1845 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd 1846 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th 1847 // we will start at Lane 1, since the operands of the subtraction cannot 1848 // be reordered. Then we will visit the rest of the lanes in a circular 1849 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3. 1850 1851 // Find the first lane that we will start our search from. 1852 unsigned FirstLane = getBestLaneToStartReordering(); 1853 1854 // Initialize the modes. 1855 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1856 Value *OpLane0 = getValue(OpIdx, FirstLane); 1857 // Keep track if we have instructions with all the same opcode on one 1858 // side. 1859 if (isa<LoadInst>(OpLane0)) 1860 ReorderingModes[OpIdx] = ReorderingMode::Load; 1861 else if (isa<Instruction>(OpLane0)) { 1862 // Check if OpLane0 should be broadcast. 1863 if (shouldBroadcast(OpLane0, OpIdx, FirstLane)) 1864 ReorderingModes[OpIdx] = ReorderingMode::Splat; 1865 else 1866 ReorderingModes[OpIdx] = ReorderingMode::Opcode; 1867 } 1868 else if (isa<Constant>(OpLane0)) 1869 ReorderingModes[OpIdx] = ReorderingMode::Constant; 1870 else if (isa<Argument>(OpLane0)) 1871 // Our best hope is a Splat. It may save some cost in some cases. 1872 ReorderingModes[OpIdx] = ReorderingMode::Splat; 1873 else 1874 // NOTE: This should be unreachable. 1875 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1876 } 1877 1878 // Check that we don't have same operands. No need to reorder if operands 1879 // are just perfect diamond or shuffled diamond match. Do not do it only 1880 // for possible broadcasts or non-power of 2 number of scalars (just for 1881 // now). 1882 auto &&SkipReordering = [this]() { 1883 SmallPtrSet<Value *, 4> UniqueValues; 1884 ArrayRef<OperandData> Op0 = OpsVec.front(); 1885 for (const OperandData &Data : Op0) 1886 UniqueValues.insert(Data.V); 1887 for (ArrayRef<OperandData> Op : drop_begin(OpsVec, 1)) { 1888 if (any_of(Op, [&UniqueValues](const OperandData &Data) { 1889 return !UniqueValues.contains(Data.V); 1890 })) 1891 return false; 1892 } 1893 // TODO: Check if we can remove a check for non-power-2 number of 1894 // scalars after full support of non-power-2 vectorization. 1895 return UniqueValues.size() != 2 && isPowerOf2_32(UniqueValues.size()); 1896 }; 1897 1898 // If the initial strategy fails for any of the operand indexes, then we 1899 // perform reordering again in a second pass. This helps avoid assigning 1900 // high priority to the failed strategy, and should improve reordering for 1901 // the non-failed operand indexes. 1902 for (int Pass = 0; Pass != 2; ++Pass) { 1903 // Check if no need to reorder operands since they're are perfect or 1904 // shuffled diamond match. 1905 // Need to to do it to avoid extra external use cost counting for 1906 // shuffled matches, which may cause regressions. 1907 if (SkipReordering()) 1908 break; 1909 // Skip the second pass if the first pass did not fail. 1910 bool StrategyFailed = false; 1911 // Mark all operand data as free to use. 1912 clearUsed(); 1913 // We keep the original operand order for the FirstLane, so reorder the 1914 // rest of the lanes. We are visiting the nodes in a circular fashion, 1915 // using FirstLane as the center point and increasing the radius 1916 // distance. 1917 SmallVector<SmallVector<Value *, 2>> MainAltOps(NumOperands); 1918 for (unsigned I = 0; I < NumOperands; ++I) 1919 MainAltOps[I].push_back(getData(I, FirstLane).V); 1920 1921 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) { 1922 // Visit the lane on the right and then the lane on the left. 1923 for (int Direction : {+1, -1}) { 1924 int Lane = FirstLane + Direction * Distance; 1925 if (Lane < 0 || Lane >= (int)NumLanes) 1926 continue; 1927 int LastLane = Lane - Direction; 1928 assert(LastLane >= 0 && LastLane < (int)NumLanes && 1929 "Out of bounds"); 1930 // Look for a good match for each operand. 1931 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1932 // Search for the operand that matches SortedOps[OpIdx][Lane-1]. 1933 Optional<unsigned> BestIdx = getBestOperand( 1934 OpIdx, Lane, LastLane, ReorderingModes, MainAltOps[OpIdx]); 1935 // By not selecting a value, we allow the operands that follow to 1936 // select a better matching value. We will get a non-null value in 1937 // the next run of getBestOperand(). 1938 if (BestIdx) { 1939 // Swap the current operand with the one returned by 1940 // getBestOperand(). 1941 swap(OpIdx, BestIdx.getValue(), Lane); 1942 } else { 1943 // We failed to find a best operand, set mode to 'Failed'. 1944 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1945 // Enable the second pass. 1946 StrategyFailed = true; 1947 } 1948 // Try to get the alternate opcode and follow it during analysis. 1949 if (MainAltOps[OpIdx].size() != 2) { 1950 OperandData &AltOp = getData(OpIdx, Lane); 1951 InstructionsState OpS = 1952 getSameOpcode({MainAltOps[OpIdx].front(), AltOp.V}); 1953 if (OpS.getOpcode() && OpS.isAltShuffle()) 1954 MainAltOps[OpIdx].push_back(AltOp.V); 1955 } 1956 } 1957 } 1958 } 1959 // Skip second pass if the strategy did not fail. 1960 if (!StrategyFailed) 1961 break; 1962 } 1963 } 1964 1965 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1966 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) { 1967 switch (RMode) { 1968 case ReorderingMode::Load: 1969 return "Load"; 1970 case ReorderingMode::Opcode: 1971 return "Opcode"; 1972 case ReorderingMode::Constant: 1973 return "Constant"; 1974 case ReorderingMode::Splat: 1975 return "Splat"; 1976 case ReorderingMode::Failed: 1977 return "Failed"; 1978 } 1979 llvm_unreachable("Unimplemented Reordering Type"); 1980 } 1981 1982 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode, 1983 raw_ostream &OS) { 1984 return OS << getModeStr(RMode); 1985 } 1986 1987 /// Debug print. 1988 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) { 1989 printMode(RMode, dbgs()); 1990 } 1991 1992 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) { 1993 return printMode(RMode, OS); 1994 } 1995 1996 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const { 1997 const unsigned Indent = 2; 1998 unsigned Cnt = 0; 1999 for (const OperandDataVec &OpDataVec : OpsVec) { 2000 OS << "Operand " << Cnt++ << "\n"; 2001 for (const OperandData &OpData : OpDataVec) { 2002 OS.indent(Indent) << "{"; 2003 if (Value *V = OpData.V) 2004 OS << *V; 2005 else 2006 OS << "null"; 2007 OS << ", APO:" << OpData.APO << "}\n"; 2008 } 2009 OS << "\n"; 2010 } 2011 return OS; 2012 } 2013 2014 /// Debug print. 2015 LLVM_DUMP_METHOD void dump() const { print(dbgs()); } 2016 #endif 2017 }; 2018 2019 /// Evaluate each pair in \p Candidates and return index into \p Candidates 2020 /// for a pair which have highest score deemed to have best chance to form 2021 /// root of profitable tree to vectorize. Return None if no candidate scored 2022 /// above the LookAheadHeuristics::ScoreFail. 2023 /// \param Limit Lower limit of the cost, considered to be good enough score. 2024 Optional<int> 2025 findBestRootPair(ArrayRef<std::pair<Value *, Value *>> Candidates, 2026 int Limit = LookAheadHeuristics::ScoreFail) { 2027 LookAheadHeuristics LookAhead(*DL, *SE, *this, /*NumLanes=*/2, 2028 RootLookAheadMaxDepth); 2029 int BestScore = Limit; 2030 Optional<int> Index = None; 2031 for (int I : seq<int>(0, Candidates.size())) { 2032 int Score = LookAhead.getScoreAtLevelRec(Candidates[I].first, 2033 Candidates[I].second, 2034 /*U1=*/nullptr, /*U2=*/nullptr, 2035 /*Level=*/1, None); 2036 if (Score > BestScore) { 2037 BestScore = Score; 2038 Index = I; 2039 } 2040 } 2041 return Index; 2042 } 2043 2044 /// Checks if the instruction is marked for deletion. 2045 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); } 2046 2047 /// Removes an instruction from its block and eventually deletes it. 2048 /// It's like Instruction::eraseFromParent() except that the actual deletion 2049 /// is delayed until BoUpSLP is destructed. 2050 void eraseInstruction(Instruction *I) { 2051 DeletedInstructions.insert(I); 2052 } 2053 2054 /// Checks if the instruction was already analyzed for being possible 2055 /// reduction root. 2056 bool isAnalyzedReductionRoot(Instruction *I) const { 2057 return AnalyzedReductionsRoots.count(I); 2058 } 2059 /// Register given instruction as already analyzed for being possible 2060 /// reduction root. 2061 void analyzedReductionRoot(Instruction *I) { 2062 AnalyzedReductionsRoots.insert(I); 2063 } 2064 /// Checks if the provided list of reduced values was checked already for 2065 /// vectorization. 2066 bool areAnalyzedReductionVals(ArrayRef<Value *> VL) { 2067 return AnalyzedReductionVals.contains(hash_value(VL)); 2068 } 2069 /// Adds the list of reduced values to list of already checked values for the 2070 /// vectorization. 2071 void analyzedReductionVals(ArrayRef<Value *> VL) { 2072 AnalyzedReductionVals.insert(hash_value(VL)); 2073 } 2074 /// Clear the list of the analyzed reduction root instructions. 2075 void clearReductionData() { 2076 AnalyzedReductionsRoots.clear(); 2077 AnalyzedReductionVals.clear(); 2078 } 2079 /// Checks if the given value is gathered in one of the nodes. 2080 bool isAnyGathered(const SmallDenseSet<Value *> &Vals) const { 2081 return any_of(MustGather, [&](Value *V) { return Vals.contains(V); }); 2082 } 2083 2084 ~BoUpSLP(); 2085 2086 private: 2087 /// Check if the operands on the edges \p Edges of the \p UserTE allows 2088 /// reordering (i.e. the operands can be reordered because they have only one 2089 /// user and reordarable). 2090 /// \param ReorderableGathers List of all gather nodes that require reordering 2091 /// (e.g., gather of extractlements or partially vectorizable loads). 2092 /// \param GatherOps List of gather operand nodes for \p UserTE that require 2093 /// reordering, subset of \p NonVectorized. 2094 bool 2095 canReorderOperands(TreeEntry *UserTE, 2096 SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges, 2097 ArrayRef<TreeEntry *> ReorderableGathers, 2098 SmallVectorImpl<TreeEntry *> &GatherOps); 2099 2100 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph, 2101 /// if any. If it is not vectorized (gather node), returns nullptr. 2102 TreeEntry *getVectorizedOperand(TreeEntry *UserTE, unsigned OpIdx) { 2103 ArrayRef<Value *> VL = UserTE->getOperand(OpIdx); 2104 TreeEntry *TE = nullptr; 2105 const auto *It = find_if(VL, [this, &TE](Value *V) { 2106 TE = getTreeEntry(V); 2107 return TE; 2108 }); 2109 if (It != VL.end() && TE->isSame(VL)) 2110 return TE; 2111 return nullptr; 2112 } 2113 2114 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph, 2115 /// if any. If it is not vectorized (gather node), returns nullptr. 2116 const TreeEntry *getVectorizedOperand(const TreeEntry *UserTE, 2117 unsigned OpIdx) const { 2118 return const_cast<BoUpSLP *>(this)->getVectorizedOperand( 2119 const_cast<TreeEntry *>(UserTE), OpIdx); 2120 } 2121 2122 /// Checks if all users of \p I are the part of the vectorization tree. 2123 bool areAllUsersVectorized(Instruction *I, 2124 ArrayRef<Value *> VectorizedVals) const; 2125 2126 /// \returns the cost of the vectorizable entry. 2127 InstructionCost getEntryCost(const TreeEntry *E, 2128 ArrayRef<Value *> VectorizedVals); 2129 2130 /// This is the recursive part of buildTree. 2131 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, 2132 const EdgeInfo &EI); 2133 2134 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can 2135 /// be vectorized to use the original vector (or aggregate "bitcast" to a 2136 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise 2137 /// returns false, setting \p CurrentOrder to either an empty vector or a 2138 /// non-identity permutation that allows to reuse extract instructions. 2139 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 2140 SmallVectorImpl<unsigned> &CurrentOrder) const; 2141 2142 /// Vectorize a single entry in the tree. 2143 Value *vectorizeTree(TreeEntry *E); 2144 2145 /// Vectorize a single entry in the tree, starting in \p VL. 2146 Value *vectorizeTree(ArrayRef<Value *> VL); 2147 2148 /// Create a new vector from a list of scalar values. Produces a sequence 2149 /// which exploits values reused across lanes, and arranges the inserts 2150 /// for ease of later optimization. 2151 Value *createBuildVector(ArrayRef<Value *> VL); 2152 2153 /// \returns the scalarization cost for this type. Scalarization in this 2154 /// context means the creation of vectors from a group of scalars. If \p 2155 /// NeedToShuffle is true, need to add a cost of reshuffling some of the 2156 /// vector elements. 2157 InstructionCost getGatherCost(FixedVectorType *Ty, 2158 const APInt &ShuffledIndices, 2159 bool NeedToShuffle) const; 2160 2161 /// Checks if the gathered \p VL can be represented as shuffle(s) of previous 2162 /// tree entries. 2163 /// \returns ShuffleKind, if gathered values can be represented as shuffles of 2164 /// previous tree entries. \p Mask is filled with the shuffle mask. 2165 Optional<TargetTransformInfo::ShuffleKind> 2166 isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask, 2167 SmallVectorImpl<const TreeEntry *> &Entries); 2168 2169 /// \returns the scalarization cost for this list of values. Assuming that 2170 /// this subtree gets vectorized, we may need to extract the values from the 2171 /// roots. This method calculates the cost of extracting the values. 2172 InstructionCost getGatherCost(ArrayRef<Value *> VL) const; 2173 2174 /// Set the Builder insert point to one after the last instruction in 2175 /// the bundle 2176 void setInsertPointAfterBundle(const TreeEntry *E); 2177 2178 /// \returns a vector from a collection of scalars in \p VL. 2179 Value *gather(ArrayRef<Value *> VL); 2180 2181 /// \returns whether the VectorizableTree is fully vectorizable and will 2182 /// be beneficial even the tree height is tiny. 2183 bool isFullyVectorizableTinyTree(bool ForReduction) const; 2184 2185 /// Reorder commutative or alt operands to get better probability of 2186 /// generating vectorized code. 2187 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 2188 SmallVectorImpl<Value *> &Left, 2189 SmallVectorImpl<Value *> &Right, 2190 const DataLayout &DL, 2191 ScalarEvolution &SE, 2192 const BoUpSLP &R); 2193 2194 /// Helper for `findExternalStoreUsersReorderIndices()`. It iterates over the 2195 /// users of \p TE and collects the stores. It returns the map from the store 2196 /// pointers to the collected stores. 2197 DenseMap<Value *, SmallVector<StoreInst *, 4>> 2198 collectUserStores(const BoUpSLP::TreeEntry *TE) const; 2199 2200 /// Helper for `findExternalStoreUsersReorderIndices()`. It checks if the 2201 /// stores in \p StoresVec can for a vector instruction. If so it returns true 2202 /// and populates \p ReorderIndices with the shuffle indices of the the stores 2203 /// when compared to the sorted vector. 2204 bool CanFormVector(const SmallVector<StoreInst *, 4> &StoresVec, 2205 OrdersType &ReorderIndices) const; 2206 2207 /// Iterates through the users of \p TE, looking for scalar stores that can be 2208 /// potentially vectorized in a future SLP-tree. If found, it keeps track of 2209 /// their order and builds an order index vector for each store bundle. It 2210 /// returns all these order vectors found. 2211 /// We run this after the tree has formed, otherwise we may come across user 2212 /// instructions that are not yet in the tree. 2213 SmallVector<OrdersType, 1> 2214 findExternalStoreUsersReorderIndices(TreeEntry *TE) const; 2215 2216 struct TreeEntry { 2217 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>; 2218 TreeEntry(VecTreeTy &Container) : Container(Container) {} 2219 2220 /// \returns true if the scalars in VL are equal to this entry. 2221 bool isSame(ArrayRef<Value *> VL) const { 2222 auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) { 2223 if (Mask.size() != VL.size() && VL.size() == Scalars.size()) 2224 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 2225 return VL.size() == Mask.size() && 2226 std::equal(VL.begin(), VL.end(), Mask.begin(), 2227 [Scalars](Value *V, int Idx) { 2228 return (isa<UndefValue>(V) && 2229 Idx == UndefMaskElem) || 2230 (Idx != UndefMaskElem && V == Scalars[Idx]); 2231 }); 2232 }; 2233 if (!ReorderIndices.empty()) { 2234 // TODO: implement matching if the nodes are just reordered, still can 2235 // treat the vector as the same if the list of scalars matches VL 2236 // directly, without reordering. 2237 SmallVector<int> Mask; 2238 inversePermutation(ReorderIndices, Mask); 2239 if (VL.size() == Scalars.size()) 2240 return IsSame(Scalars, Mask); 2241 if (VL.size() == ReuseShuffleIndices.size()) { 2242 ::addMask(Mask, ReuseShuffleIndices); 2243 return IsSame(Scalars, Mask); 2244 } 2245 return false; 2246 } 2247 return IsSame(Scalars, ReuseShuffleIndices); 2248 } 2249 2250 /// \returns true if current entry has same operands as \p TE. 2251 bool hasEqualOperands(const TreeEntry &TE) const { 2252 if (TE.getNumOperands() != getNumOperands()) 2253 return false; 2254 SmallBitVector Used(getNumOperands()); 2255 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) { 2256 unsigned PrevCount = Used.count(); 2257 for (unsigned K = 0; K < E; ++K) { 2258 if (Used.test(K)) 2259 continue; 2260 if (getOperand(K) == TE.getOperand(I)) { 2261 Used.set(K); 2262 break; 2263 } 2264 } 2265 // Check if we actually found the matching operand. 2266 if (PrevCount == Used.count()) 2267 return false; 2268 } 2269 return true; 2270 } 2271 2272 /// \return Final vectorization factor for the node. Defined by the total 2273 /// number of vectorized scalars, including those, used several times in the 2274 /// entry and counted in the \a ReuseShuffleIndices, if any. 2275 unsigned getVectorFactor() const { 2276 if (!ReuseShuffleIndices.empty()) 2277 return ReuseShuffleIndices.size(); 2278 return Scalars.size(); 2279 }; 2280 2281 /// A vector of scalars. 2282 ValueList Scalars; 2283 2284 /// The Scalars are vectorized into this value. It is initialized to Null. 2285 Value *VectorizedValue = nullptr; 2286 2287 /// Do we need to gather this sequence or vectorize it 2288 /// (either with vector instruction or with scatter/gather 2289 /// intrinsics for store/load)? 2290 enum EntryState { Vectorize, ScatterVectorize, NeedToGather }; 2291 EntryState State; 2292 2293 /// Does this sequence require some shuffling? 2294 SmallVector<int, 4> ReuseShuffleIndices; 2295 2296 /// Does this entry require reordering? 2297 SmallVector<unsigned, 4> ReorderIndices; 2298 2299 /// Points back to the VectorizableTree. 2300 /// 2301 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 2302 /// to be a pointer and needs to be able to initialize the child iterator. 2303 /// Thus we need a reference back to the container to translate the indices 2304 /// to entries. 2305 VecTreeTy &Container; 2306 2307 /// The TreeEntry index containing the user of this entry. We can actually 2308 /// have multiple users so the data structure is not truly a tree. 2309 SmallVector<EdgeInfo, 1> UserTreeIndices; 2310 2311 /// The index of this treeEntry in VectorizableTree. 2312 int Idx = -1; 2313 2314 private: 2315 /// The operands of each instruction in each lane Operands[op_index][lane]. 2316 /// Note: This helps avoid the replication of the code that performs the 2317 /// reordering of operands during buildTree_rec() and vectorizeTree(). 2318 SmallVector<ValueList, 2> Operands; 2319 2320 /// The main/alternate instruction. 2321 Instruction *MainOp = nullptr; 2322 Instruction *AltOp = nullptr; 2323 2324 public: 2325 /// Set this bundle's \p OpIdx'th operand to \p OpVL. 2326 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) { 2327 if (Operands.size() < OpIdx + 1) 2328 Operands.resize(OpIdx + 1); 2329 assert(Operands[OpIdx].empty() && "Already resized?"); 2330 assert(OpVL.size() <= Scalars.size() && 2331 "Number of operands is greater than the number of scalars."); 2332 Operands[OpIdx].resize(OpVL.size()); 2333 copy(OpVL, Operands[OpIdx].begin()); 2334 } 2335 2336 /// Set the operands of this bundle in their original order. 2337 void setOperandsInOrder() { 2338 assert(Operands.empty() && "Already initialized?"); 2339 auto *I0 = cast<Instruction>(Scalars[0]); 2340 Operands.resize(I0->getNumOperands()); 2341 unsigned NumLanes = Scalars.size(); 2342 for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands(); 2343 OpIdx != NumOperands; ++OpIdx) { 2344 Operands[OpIdx].resize(NumLanes); 2345 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 2346 auto *I = cast<Instruction>(Scalars[Lane]); 2347 assert(I->getNumOperands() == NumOperands && 2348 "Expected same number of operands"); 2349 Operands[OpIdx][Lane] = I->getOperand(OpIdx); 2350 } 2351 } 2352 } 2353 2354 /// Reorders operands of the node to the given mask \p Mask. 2355 void reorderOperands(ArrayRef<int> Mask) { 2356 for (ValueList &Operand : Operands) 2357 reorderScalars(Operand, Mask); 2358 } 2359 2360 /// \returns the \p OpIdx operand of this TreeEntry. 2361 ValueList &getOperand(unsigned OpIdx) { 2362 assert(OpIdx < Operands.size() && "Off bounds"); 2363 return Operands[OpIdx]; 2364 } 2365 2366 /// \returns the \p OpIdx operand of this TreeEntry. 2367 ArrayRef<Value *> getOperand(unsigned OpIdx) const { 2368 assert(OpIdx < Operands.size() && "Off bounds"); 2369 return Operands[OpIdx]; 2370 } 2371 2372 /// \returns the number of operands. 2373 unsigned getNumOperands() const { return Operands.size(); } 2374 2375 /// \return the single \p OpIdx operand. 2376 Value *getSingleOperand(unsigned OpIdx) const { 2377 assert(OpIdx < Operands.size() && "Off bounds"); 2378 assert(!Operands[OpIdx].empty() && "No operand available"); 2379 return Operands[OpIdx][0]; 2380 } 2381 2382 /// Some of the instructions in the list have alternate opcodes. 2383 bool isAltShuffle() const { return MainOp != AltOp; } 2384 2385 bool isOpcodeOrAlt(Instruction *I) const { 2386 unsigned CheckedOpcode = I->getOpcode(); 2387 return (getOpcode() == CheckedOpcode || 2388 getAltOpcode() == CheckedOpcode); 2389 } 2390 2391 /// Chooses the correct key for scheduling data. If \p Op has the same (or 2392 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is 2393 /// \p OpValue. 2394 Value *isOneOf(Value *Op) const { 2395 auto *I = dyn_cast<Instruction>(Op); 2396 if (I && isOpcodeOrAlt(I)) 2397 return Op; 2398 return MainOp; 2399 } 2400 2401 void setOperations(const InstructionsState &S) { 2402 MainOp = S.MainOp; 2403 AltOp = S.AltOp; 2404 } 2405 2406 Instruction *getMainOp() const { 2407 return MainOp; 2408 } 2409 2410 Instruction *getAltOp() const { 2411 return AltOp; 2412 } 2413 2414 /// The main/alternate opcodes for the list of instructions. 2415 unsigned getOpcode() const { 2416 return MainOp ? MainOp->getOpcode() : 0; 2417 } 2418 2419 unsigned getAltOpcode() const { 2420 return AltOp ? AltOp->getOpcode() : 0; 2421 } 2422 2423 /// When ReuseReorderShuffleIndices is empty it just returns position of \p 2424 /// V within vector of Scalars. Otherwise, try to remap on its reuse index. 2425 int findLaneForValue(Value *V) const { 2426 unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V)); 2427 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 2428 if (!ReorderIndices.empty()) 2429 FoundLane = ReorderIndices[FoundLane]; 2430 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 2431 if (!ReuseShuffleIndices.empty()) { 2432 FoundLane = std::distance(ReuseShuffleIndices.begin(), 2433 find(ReuseShuffleIndices, FoundLane)); 2434 } 2435 return FoundLane; 2436 } 2437 2438 #ifndef NDEBUG 2439 /// Debug printer. 2440 LLVM_DUMP_METHOD void dump() const { 2441 dbgs() << Idx << ".\n"; 2442 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) { 2443 dbgs() << "Operand " << OpI << ":\n"; 2444 for (const Value *V : Operands[OpI]) 2445 dbgs().indent(2) << *V << "\n"; 2446 } 2447 dbgs() << "Scalars: \n"; 2448 for (Value *V : Scalars) 2449 dbgs().indent(2) << *V << "\n"; 2450 dbgs() << "State: "; 2451 switch (State) { 2452 case Vectorize: 2453 dbgs() << "Vectorize\n"; 2454 break; 2455 case ScatterVectorize: 2456 dbgs() << "ScatterVectorize\n"; 2457 break; 2458 case NeedToGather: 2459 dbgs() << "NeedToGather\n"; 2460 break; 2461 } 2462 dbgs() << "MainOp: "; 2463 if (MainOp) 2464 dbgs() << *MainOp << "\n"; 2465 else 2466 dbgs() << "NULL\n"; 2467 dbgs() << "AltOp: "; 2468 if (AltOp) 2469 dbgs() << *AltOp << "\n"; 2470 else 2471 dbgs() << "NULL\n"; 2472 dbgs() << "VectorizedValue: "; 2473 if (VectorizedValue) 2474 dbgs() << *VectorizedValue << "\n"; 2475 else 2476 dbgs() << "NULL\n"; 2477 dbgs() << "ReuseShuffleIndices: "; 2478 if (ReuseShuffleIndices.empty()) 2479 dbgs() << "Empty"; 2480 else 2481 for (int ReuseIdx : ReuseShuffleIndices) 2482 dbgs() << ReuseIdx << ", "; 2483 dbgs() << "\n"; 2484 dbgs() << "ReorderIndices: "; 2485 for (unsigned ReorderIdx : ReorderIndices) 2486 dbgs() << ReorderIdx << ", "; 2487 dbgs() << "\n"; 2488 dbgs() << "UserTreeIndices: "; 2489 for (const auto &EInfo : UserTreeIndices) 2490 dbgs() << EInfo << ", "; 2491 dbgs() << "\n"; 2492 } 2493 #endif 2494 }; 2495 2496 #ifndef NDEBUG 2497 void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost, 2498 InstructionCost VecCost, 2499 InstructionCost ScalarCost) const { 2500 dbgs() << "SLP: Calculated costs for Tree:\n"; E->dump(); 2501 dbgs() << "SLP: Costs:\n"; 2502 dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n"; 2503 dbgs() << "SLP: VectorCost = " << VecCost << "\n"; 2504 dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n"; 2505 dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = " << 2506 ReuseShuffleCost + VecCost - ScalarCost << "\n"; 2507 } 2508 #endif 2509 2510 /// Create a new VectorizableTree entry. 2511 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle, 2512 const InstructionsState &S, 2513 const EdgeInfo &UserTreeIdx, 2514 ArrayRef<int> ReuseShuffleIndices = None, 2515 ArrayRef<unsigned> ReorderIndices = None) { 2516 TreeEntry::EntryState EntryState = 2517 Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather; 2518 return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx, 2519 ReuseShuffleIndices, ReorderIndices); 2520 } 2521 2522 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, 2523 TreeEntry::EntryState EntryState, 2524 Optional<ScheduleData *> Bundle, 2525 const InstructionsState &S, 2526 const EdgeInfo &UserTreeIdx, 2527 ArrayRef<int> ReuseShuffleIndices = None, 2528 ArrayRef<unsigned> ReorderIndices = None) { 2529 assert(((!Bundle && EntryState == TreeEntry::NeedToGather) || 2530 (Bundle && EntryState != TreeEntry::NeedToGather)) && 2531 "Need to vectorize gather entry?"); 2532 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree)); 2533 TreeEntry *Last = VectorizableTree.back().get(); 2534 Last->Idx = VectorizableTree.size() - 1; 2535 Last->State = EntryState; 2536 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(), 2537 ReuseShuffleIndices.end()); 2538 if (ReorderIndices.empty()) { 2539 Last->Scalars.assign(VL.begin(), VL.end()); 2540 Last->setOperations(S); 2541 } else { 2542 // Reorder scalars and build final mask. 2543 Last->Scalars.assign(VL.size(), nullptr); 2544 transform(ReorderIndices, Last->Scalars.begin(), 2545 [VL](unsigned Idx) -> Value * { 2546 if (Idx >= VL.size()) 2547 return UndefValue::get(VL.front()->getType()); 2548 return VL[Idx]; 2549 }); 2550 InstructionsState S = getSameOpcode(Last->Scalars); 2551 Last->setOperations(S); 2552 Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end()); 2553 } 2554 if (Last->State != TreeEntry::NeedToGather) { 2555 for (Value *V : VL) { 2556 assert(!getTreeEntry(V) && "Scalar already in tree!"); 2557 ScalarToTreeEntry[V] = Last; 2558 } 2559 // Update the scheduler bundle to point to this TreeEntry. 2560 ScheduleData *BundleMember = Bundle.getValue(); 2561 assert((BundleMember || isa<PHINode>(S.MainOp) || 2562 isVectorLikeInstWithConstOps(S.MainOp) || 2563 doesNotNeedToSchedule(VL)) && 2564 "Bundle and VL out of sync"); 2565 if (BundleMember) { 2566 for (Value *V : VL) { 2567 if (doesNotNeedToBeScheduled(V)) 2568 continue; 2569 assert(BundleMember && "Unexpected end of bundle."); 2570 BundleMember->TE = Last; 2571 BundleMember = BundleMember->NextInBundle; 2572 } 2573 } 2574 assert(!BundleMember && "Bundle and VL out of sync"); 2575 } else { 2576 MustGather.insert(VL.begin(), VL.end()); 2577 } 2578 2579 if (UserTreeIdx.UserTE) 2580 Last->UserTreeIndices.push_back(UserTreeIdx); 2581 2582 return Last; 2583 } 2584 2585 /// -- Vectorization State -- 2586 /// Holds all of the tree entries. 2587 TreeEntry::VecTreeTy VectorizableTree; 2588 2589 #ifndef NDEBUG 2590 /// Debug printer. 2591 LLVM_DUMP_METHOD void dumpVectorizableTree() const { 2592 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) { 2593 VectorizableTree[Id]->dump(); 2594 dbgs() << "\n"; 2595 } 2596 } 2597 #endif 2598 2599 TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); } 2600 2601 const TreeEntry *getTreeEntry(Value *V) const { 2602 return ScalarToTreeEntry.lookup(V); 2603 } 2604 2605 /// Maps a specific scalar to its tree entry. 2606 SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry; 2607 2608 /// Maps a value to the proposed vectorizable size. 2609 SmallDenseMap<Value *, unsigned> InstrElementSize; 2610 2611 /// A list of scalars that we found that we need to keep as scalars. 2612 ValueSet MustGather; 2613 2614 /// This POD struct describes one external user in the vectorized tree. 2615 struct ExternalUser { 2616 ExternalUser(Value *S, llvm::User *U, int L) 2617 : Scalar(S), User(U), Lane(L) {} 2618 2619 // Which scalar in our function. 2620 Value *Scalar; 2621 2622 // Which user that uses the scalar. 2623 llvm::User *User; 2624 2625 // Which lane does the scalar belong to. 2626 int Lane; 2627 }; 2628 using UserList = SmallVector<ExternalUser, 16>; 2629 2630 /// Checks if two instructions may access the same memory. 2631 /// 2632 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 2633 /// is invariant in the calling loop. 2634 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 2635 Instruction *Inst2) { 2636 // First check if the result is already in the cache. 2637 AliasCacheKey key = std::make_pair(Inst1, Inst2); 2638 Optional<bool> &result = AliasCache[key]; 2639 if (result.hasValue()) { 2640 return result.getValue(); 2641 } 2642 bool aliased = true; 2643 if (Loc1.Ptr && isSimple(Inst1)) 2644 aliased = isModOrRefSet(BatchAA.getModRefInfo(Inst2, Loc1)); 2645 // Store the result in the cache. 2646 result = aliased; 2647 return aliased; 2648 } 2649 2650 using AliasCacheKey = std::pair<Instruction *, Instruction *>; 2651 2652 /// Cache for alias results. 2653 /// TODO: consider moving this to the AliasAnalysis itself. 2654 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 2655 2656 // Cache for pointerMayBeCaptured calls inside AA. This is preserved 2657 // globally through SLP because we don't perform any action which 2658 // invalidates capture results. 2659 BatchAAResults BatchAA; 2660 2661 /// Temporary store for deleted instructions. Instructions will be deleted 2662 /// eventually when the BoUpSLP is destructed. The deferral is required to 2663 /// ensure that there are no incorrect collisions in the AliasCache, which 2664 /// can happen if a new instruction is allocated at the same address as a 2665 /// previously deleted instruction. 2666 DenseSet<Instruction *> DeletedInstructions; 2667 2668 /// Set of the instruction, being analyzed already for reductions. 2669 SmallPtrSet<Instruction *, 16> AnalyzedReductionsRoots; 2670 2671 /// Set of hashes for the list of reduction values already being analyzed. 2672 DenseSet<size_t> AnalyzedReductionVals; 2673 2674 /// A list of values that need to extracted out of the tree. 2675 /// This list holds pairs of (Internal Scalar : External User). External User 2676 /// can be nullptr, it means that this Internal Scalar will be used later, 2677 /// after vectorization. 2678 UserList ExternalUses; 2679 2680 /// Values used only by @llvm.assume calls. 2681 SmallPtrSet<const Value *, 32> EphValues; 2682 2683 /// Holds all of the instructions that we gathered. 2684 SetVector<Instruction *> GatherShuffleSeq; 2685 2686 /// A list of blocks that we are going to CSE. 2687 SetVector<BasicBlock *> CSEBlocks; 2688 2689 /// Contains all scheduling relevant data for an instruction. 2690 /// A ScheduleData either represents a single instruction or a member of an 2691 /// instruction bundle (= a group of instructions which is combined into a 2692 /// vector instruction). 2693 struct ScheduleData { 2694 // The initial value for the dependency counters. It means that the 2695 // dependencies are not calculated yet. 2696 enum { InvalidDeps = -1 }; 2697 2698 ScheduleData() = default; 2699 2700 void init(int BlockSchedulingRegionID, Value *OpVal) { 2701 FirstInBundle = this; 2702 NextInBundle = nullptr; 2703 NextLoadStore = nullptr; 2704 IsScheduled = false; 2705 SchedulingRegionID = BlockSchedulingRegionID; 2706 clearDependencies(); 2707 OpValue = OpVal; 2708 TE = nullptr; 2709 } 2710 2711 /// Verify basic self consistency properties 2712 void verify() { 2713 if (hasValidDependencies()) { 2714 assert(UnscheduledDeps <= Dependencies && "invariant"); 2715 } else { 2716 assert(UnscheduledDeps == Dependencies && "invariant"); 2717 } 2718 2719 if (IsScheduled) { 2720 assert(isSchedulingEntity() && 2721 "unexpected scheduled state"); 2722 for (const ScheduleData *BundleMember = this; BundleMember; 2723 BundleMember = BundleMember->NextInBundle) { 2724 assert(BundleMember->hasValidDependencies() && 2725 BundleMember->UnscheduledDeps == 0 && 2726 "unexpected scheduled state"); 2727 assert((BundleMember == this || !BundleMember->IsScheduled) && 2728 "only bundle is marked scheduled"); 2729 } 2730 } 2731 2732 assert(Inst->getParent() == FirstInBundle->Inst->getParent() && 2733 "all bundle members must be in same basic block"); 2734 } 2735 2736 /// Returns true if the dependency information has been calculated. 2737 /// Note that depenendency validity can vary between instructions within 2738 /// a single bundle. 2739 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 2740 2741 /// Returns true for single instructions and for bundle representatives 2742 /// (= the head of a bundle). 2743 bool isSchedulingEntity() const { return FirstInBundle == this; } 2744 2745 /// Returns true if it represents an instruction bundle and not only a 2746 /// single instruction. 2747 bool isPartOfBundle() const { 2748 return NextInBundle != nullptr || FirstInBundle != this || TE; 2749 } 2750 2751 /// Returns true if it is ready for scheduling, i.e. it has no more 2752 /// unscheduled depending instructions/bundles. 2753 bool isReady() const { 2754 assert(isSchedulingEntity() && 2755 "can't consider non-scheduling entity for ready list"); 2756 return unscheduledDepsInBundle() == 0 && !IsScheduled; 2757 } 2758 2759 /// Modifies the number of unscheduled dependencies for this instruction, 2760 /// and returns the number of remaining dependencies for the containing 2761 /// bundle. 2762 int incrementUnscheduledDeps(int Incr) { 2763 assert(hasValidDependencies() && 2764 "increment of unscheduled deps would be meaningless"); 2765 UnscheduledDeps += Incr; 2766 return FirstInBundle->unscheduledDepsInBundle(); 2767 } 2768 2769 /// Sets the number of unscheduled dependencies to the number of 2770 /// dependencies. 2771 void resetUnscheduledDeps() { 2772 UnscheduledDeps = Dependencies; 2773 } 2774 2775 /// Clears all dependency information. 2776 void clearDependencies() { 2777 Dependencies = InvalidDeps; 2778 resetUnscheduledDeps(); 2779 MemoryDependencies.clear(); 2780 ControlDependencies.clear(); 2781 } 2782 2783 int unscheduledDepsInBundle() const { 2784 assert(isSchedulingEntity() && "only meaningful on the bundle"); 2785 int Sum = 0; 2786 for (const ScheduleData *BundleMember = this; BundleMember; 2787 BundleMember = BundleMember->NextInBundle) { 2788 if (BundleMember->UnscheduledDeps == InvalidDeps) 2789 return InvalidDeps; 2790 Sum += BundleMember->UnscheduledDeps; 2791 } 2792 return Sum; 2793 } 2794 2795 void dump(raw_ostream &os) const { 2796 if (!isSchedulingEntity()) { 2797 os << "/ " << *Inst; 2798 } else if (NextInBundle) { 2799 os << '[' << *Inst; 2800 ScheduleData *SD = NextInBundle; 2801 while (SD) { 2802 os << ';' << *SD->Inst; 2803 SD = SD->NextInBundle; 2804 } 2805 os << ']'; 2806 } else { 2807 os << *Inst; 2808 } 2809 } 2810 2811 Instruction *Inst = nullptr; 2812 2813 /// Opcode of the current instruction in the schedule data. 2814 Value *OpValue = nullptr; 2815 2816 /// The TreeEntry that this instruction corresponds to. 2817 TreeEntry *TE = nullptr; 2818 2819 /// Points to the head in an instruction bundle (and always to this for 2820 /// single instructions). 2821 ScheduleData *FirstInBundle = nullptr; 2822 2823 /// Single linked list of all instructions in a bundle. Null if it is a 2824 /// single instruction. 2825 ScheduleData *NextInBundle = nullptr; 2826 2827 /// Single linked list of all memory instructions (e.g. load, store, call) 2828 /// in the block - until the end of the scheduling region. 2829 ScheduleData *NextLoadStore = nullptr; 2830 2831 /// The dependent memory instructions. 2832 /// This list is derived on demand in calculateDependencies(). 2833 SmallVector<ScheduleData *, 4> MemoryDependencies; 2834 2835 /// List of instructions which this instruction could be control dependent 2836 /// on. Allowing such nodes to be scheduled below this one could introduce 2837 /// a runtime fault which didn't exist in the original program. 2838 /// ex: this is a load or udiv following a readonly call which inf loops 2839 SmallVector<ScheduleData *, 4> ControlDependencies; 2840 2841 /// This ScheduleData is in the current scheduling region if this matches 2842 /// the current SchedulingRegionID of BlockScheduling. 2843 int SchedulingRegionID = 0; 2844 2845 /// Used for getting a "good" final ordering of instructions. 2846 int SchedulingPriority = 0; 2847 2848 /// The number of dependencies. Constitutes of the number of users of the 2849 /// instruction plus the number of dependent memory instructions (if any). 2850 /// This value is calculated on demand. 2851 /// If InvalidDeps, the number of dependencies is not calculated yet. 2852 int Dependencies = InvalidDeps; 2853 2854 /// The number of dependencies minus the number of dependencies of scheduled 2855 /// instructions. As soon as this is zero, the instruction/bundle gets ready 2856 /// for scheduling. 2857 /// Note that this is negative as long as Dependencies is not calculated. 2858 int UnscheduledDeps = InvalidDeps; 2859 2860 /// True if this instruction is scheduled (or considered as scheduled in the 2861 /// dry-run). 2862 bool IsScheduled = false; 2863 }; 2864 2865 #ifndef NDEBUG 2866 friend inline raw_ostream &operator<<(raw_ostream &os, 2867 const BoUpSLP::ScheduleData &SD) { 2868 SD.dump(os); 2869 return os; 2870 } 2871 #endif 2872 2873 friend struct GraphTraits<BoUpSLP *>; 2874 friend struct DOTGraphTraits<BoUpSLP *>; 2875 2876 /// Contains all scheduling data for a basic block. 2877 /// It does not schedules instructions, which are not memory read/write 2878 /// instructions and their operands are either constants, or arguments, or 2879 /// phis, or instructions from others blocks, or their users are phis or from 2880 /// the other blocks. The resulting vector instructions can be placed at the 2881 /// beginning of the basic block without scheduling (if operands does not need 2882 /// to be scheduled) or at the end of the block (if users are outside of the 2883 /// block). It allows to save some compile time and memory used by the 2884 /// compiler. 2885 /// ScheduleData is assigned for each instruction in between the boundaries of 2886 /// the tree entry, even for those, which are not part of the graph. It is 2887 /// required to correctly follow the dependencies between the instructions and 2888 /// their correct scheduling. The ScheduleData is not allocated for the 2889 /// instructions, which do not require scheduling, like phis, nodes with 2890 /// extractelements/insertelements only or nodes with instructions, with 2891 /// uses/operands outside of the block. 2892 struct BlockScheduling { 2893 BlockScheduling(BasicBlock *BB) 2894 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} 2895 2896 void clear() { 2897 ReadyInsts.clear(); 2898 ScheduleStart = nullptr; 2899 ScheduleEnd = nullptr; 2900 FirstLoadStoreInRegion = nullptr; 2901 LastLoadStoreInRegion = nullptr; 2902 RegionHasStackSave = false; 2903 2904 // Reduce the maximum schedule region size by the size of the 2905 // previous scheduling run. 2906 ScheduleRegionSizeLimit -= ScheduleRegionSize; 2907 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 2908 ScheduleRegionSizeLimit = MinScheduleRegionSize; 2909 ScheduleRegionSize = 0; 2910 2911 // Make a new scheduling region, i.e. all existing ScheduleData is not 2912 // in the new region yet. 2913 ++SchedulingRegionID; 2914 } 2915 2916 ScheduleData *getScheduleData(Instruction *I) { 2917 if (BB != I->getParent()) 2918 // Avoid lookup if can't possibly be in map. 2919 return nullptr; 2920 ScheduleData *SD = ScheduleDataMap.lookup(I); 2921 if (SD && isInSchedulingRegion(SD)) 2922 return SD; 2923 return nullptr; 2924 } 2925 2926 ScheduleData *getScheduleData(Value *V) { 2927 if (auto *I = dyn_cast<Instruction>(V)) 2928 return getScheduleData(I); 2929 return nullptr; 2930 } 2931 2932 ScheduleData *getScheduleData(Value *V, Value *Key) { 2933 if (V == Key) 2934 return getScheduleData(V); 2935 auto I = ExtraScheduleDataMap.find(V); 2936 if (I != ExtraScheduleDataMap.end()) { 2937 ScheduleData *SD = I->second.lookup(Key); 2938 if (SD && isInSchedulingRegion(SD)) 2939 return SD; 2940 } 2941 return nullptr; 2942 } 2943 2944 bool isInSchedulingRegion(ScheduleData *SD) const { 2945 return SD->SchedulingRegionID == SchedulingRegionID; 2946 } 2947 2948 /// Marks an instruction as scheduled and puts all dependent ready 2949 /// instructions into the ready-list. 2950 template <typename ReadyListType> 2951 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 2952 SD->IsScheduled = true; 2953 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 2954 2955 for (ScheduleData *BundleMember = SD; BundleMember; 2956 BundleMember = BundleMember->NextInBundle) { 2957 if (BundleMember->Inst != BundleMember->OpValue) 2958 continue; 2959 2960 // Handle the def-use chain dependencies. 2961 2962 // Decrement the unscheduled counter and insert to ready list if ready. 2963 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) { 2964 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 2965 if (OpDef && OpDef->hasValidDependencies() && 2966 OpDef->incrementUnscheduledDeps(-1) == 0) { 2967 // There are no more unscheduled dependencies after 2968 // decrementing, so we can put the dependent instruction 2969 // into the ready list. 2970 ScheduleData *DepBundle = OpDef->FirstInBundle; 2971 assert(!DepBundle->IsScheduled && 2972 "already scheduled bundle gets ready"); 2973 ReadyList.insert(DepBundle); 2974 LLVM_DEBUG(dbgs() 2975 << "SLP: gets ready (def): " << *DepBundle << "\n"); 2976 } 2977 }); 2978 }; 2979 2980 // If BundleMember is a vector bundle, its operands may have been 2981 // reordered during buildTree(). We therefore need to get its operands 2982 // through the TreeEntry. 2983 if (TreeEntry *TE = BundleMember->TE) { 2984 // Need to search for the lane since the tree entry can be reordered. 2985 int Lane = std::distance(TE->Scalars.begin(), 2986 find(TE->Scalars, BundleMember->Inst)); 2987 assert(Lane >= 0 && "Lane not set"); 2988 2989 // Since vectorization tree is being built recursively this assertion 2990 // ensures that the tree entry has all operands set before reaching 2991 // this code. Couple of exceptions known at the moment are extracts 2992 // where their second (immediate) operand is not added. Since 2993 // immediates do not affect scheduler behavior this is considered 2994 // okay. 2995 auto *In = BundleMember->Inst; 2996 assert(In && 2997 (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) || 2998 In->getNumOperands() == TE->getNumOperands()) && 2999 "Missed TreeEntry operands?"); 3000 (void)In; // fake use to avoid build failure when assertions disabled 3001 3002 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands(); 3003 OpIdx != NumOperands; ++OpIdx) 3004 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane])) 3005 DecrUnsched(I); 3006 } else { 3007 // If BundleMember is a stand-alone instruction, no operand reordering 3008 // has taken place, so we directly access its operands. 3009 for (Use &U : BundleMember->Inst->operands()) 3010 if (auto *I = dyn_cast<Instruction>(U.get())) 3011 DecrUnsched(I); 3012 } 3013 // Handle the memory dependencies. 3014 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 3015 if (MemoryDepSD->hasValidDependencies() && 3016 MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 3017 // There are no more unscheduled dependencies after decrementing, 3018 // so we can put the dependent instruction into the ready list. 3019 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 3020 assert(!DepBundle->IsScheduled && 3021 "already scheduled bundle gets ready"); 3022 ReadyList.insert(DepBundle); 3023 LLVM_DEBUG(dbgs() 3024 << "SLP: gets ready (mem): " << *DepBundle << "\n"); 3025 } 3026 } 3027 // Handle the control dependencies. 3028 for (ScheduleData *DepSD : BundleMember->ControlDependencies) { 3029 if (DepSD->incrementUnscheduledDeps(-1) == 0) { 3030 // There are no more unscheduled dependencies after decrementing, 3031 // so we can put the dependent instruction into the ready list. 3032 ScheduleData *DepBundle = DepSD->FirstInBundle; 3033 assert(!DepBundle->IsScheduled && 3034 "already scheduled bundle gets ready"); 3035 ReadyList.insert(DepBundle); 3036 LLVM_DEBUG(dbgs() 3037 << "SLP: gets ready (ctl): " << *DepBundle << "\n"); 3038 } 3039 } 3040 3041 } 3042 } 3043 3044 /// Verify basic self consistency properties of the data structure. 3045 void verify() { 3046 if (!ScheduleStart) 3047 return; 3048 3049 assert(ScheduleStart->getParent() == ScheduleEnd->getParent() && 3050 ScheduleStart->comesBefore(ScheduleEnd) && 3051 "Not a valid scheduling region?"); 3052 3053 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3054 auto *SD = getScheduleData(I); 3055 if (!SD) 3056 continue; 3057 assert(isInSchedulingRegion(SD) && 3058 "primary schedule data not in window?"); 3059 assert(isInSchedulingRegion(SD->FirstInBundle) && 3060 "entire bundle in window!"); 3061 (void)SD; 3062 doForAllOpcodes(I, [](ScheduleData *SD) { SD->verify(); }); 3063 } 3064 3065 for (auto *SD : ReadyInsts) { 3066 assert(SD->isSchedulingEntity() && SD->isReady() && 3067 "item in ready list not ready?"); 3068 (void)SD; 3069 } 3070 } 3071 3072 void doForAllOpcodes(Value *V, 3073 function_ref<void(ScheduleData *SD)> Action) { 3074 if (ScheduleData *SD = getScheduleData(V)) 3075 Action(SD); 3076 auto I = ExtraScheduleDataMap.find(V); 3077 if (I != ExtraScheduleDataMap.end()) 3078 for (auto &P : I->second) 3079 if (isInSchedulingRegion(P.second)) 3080 Action(P.second); 3081 } 3082 3083 /// Put all instructions into the ReadyList which are ready for scheduling. 3084 template <typename ReadyListType> 3085 void initialFillReadyList(ReadyListType &ReadyList) { 3086 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3087 doForAllOpcodes(I, [&](ScheduleData *SD) { 3088 if (SD->isSchedulingEntity() && SD->hasValidDependencies() && 3089 SD->isReady()) { 3090 ReadyList.insert(SD); 3091 LLVM_DEBUG(dbgs() 3092 << "SLP: initially in ready list: " << *SD << "\n"); 3093 } 3094 }); 3095 } 3096 } 3097 3098 /// Build a bundle from the ScheduleData nodes corresponding to the 3099 /// scalar instruction for each lane. 3100 ScheduleData *buildBundle(ArrayRef<Value *> VL); 3101 3102 /// Checks if a bundle of instructions can be scheduled, i.e. has no 3103 /// cyclic dependencies. This is only a dry-run, no instructions are 3104 /// actually moved at this stage. 3105 /// \returns the scheduling bundle. The returned Optional value is non-None 3106 /// if \p VL is allowed to be scheduled. 3107 Optional<ScheduleData *> 3108 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 3109 const InstructionsState &S); 3110 3111 /// Un-bundles a group of instructions. 3112 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 3113 3114 /// Allocates schedule data chunk. 3115 ScheduleData *allocateScheduleDataChunks(); 3116 3117 /// Extends the scheduling region so that V is inside the region. 3118 /// \returns true if the region size is within the limit. 3119 bool extendSchedulingRegion(Value *V, const InstructionsState &S); 3120 3121 /// Initialize the ScheduleData structures for new instructions in the 3122 /// scheduling region. 3123 void initScheduleData(Instruction *FromI, Instruction *ToI, 3124 ScheduleData *PrevLoadStore, 3125 ScheduleData *NextLoadStore); 3126 3127 /// Updates the dependency information of a bundle and of all instructions/ 3128 /// bundles which depend on the original bundle. 3129 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 3130 BoUpSLP *SLP); 3131 3132 /// Sets all instruction in the scheduling region to un-scheduled. 3133 void resetSchedule(); 3134 3135 BasicBlock *BB; 3136 3137 /// Simple memory allocation for ScheduleData. 3138 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 3139 3140 /// The size of a ScheduleData array in ScheduleDataChunks. 3141 int ChunkSize; 3142 3143 /// The allocator position in the current chunk, which is the last entry 3144 /// of ScheduleDataChunks. 3145 int ChunkPos; 3146 3147 /// Attaches ScheduleData to Instruction. 3148 /// Note that the mapping survives during all vectorization iterations, i.e. 3149 /// ScheduleData structures are recycled. 3150 DenseMap<Instruction *, ScheduleData *> ScheduleDataMap; 3151 3152 /// Attaches ScheduleData to Instruction with the leading key. 3153 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 3154 ExtraScheduleDataMap; 3155 3156 /// The ready-list for scheduling (only used for the dry-run). 3157 SetVector<ScheduleData *> ReadyInsts; 3158 3159 /// The first instruction of the scheduling region. 3160 Instruction *ScheduleStart = nullptr; 3161 3162 /// The first instruction _after_ the scheduling region. 3163 Instruction *ScheduleEnd = nullptr; 3164 3165 /// The first memory accessing instruction in the scheduling region 3166 /// (can be null). 3167 ScheduleData *FirstLoadStoreInRegion = nullptr; 3168 3169 /// The last memory accessing instruction in the scheduling region 3170 /// (can be null). 3171 ScheduleData *LastLoadStoreInRegion = nullptr; 3172 3173 /// Is there an llvm.stacksave or llvm.stackrestore in the scheduling 3174 /// region? Used to optimize the dependence calculation for the 3175 /// common case where there isn't. 3176 bool RegionHasStackSave = false; 3177 3178 /// The current size of the scheduling region. 3179 int ScheduleRegionSize = 0; 3180 3181 /// The maximum size allowed for the scheduling region. 3182 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; 3183 3184 /// The ID of the scheduling region. For a new vectorization iteration this 3185 /// is incremented which "removes" all ScheduleData from the region. 3186 /// Make sure that the initial SchedulingRegionID is greater than the 3187 /// initial SchedulingRegionID in ScheduleData (which is 0). 3188 int SchedulingRegionID = 1; 3189 }; 3190 3191 /// Attaches the BlockScheduling structures to basic blocks. 3192 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 3193 3194 /// Performs the "real" scheduling. Done before vectorization is actually 3195 /// performed in a basic block. 3196 void scheduleBlock(BlockScheduling *BS); 3197 3198 /// List of users to ignore during scheduling and that don't need extracting. 3199 const SmallDenseSet<Value *> *UserIgnoreList = nullptr; 3200 3201 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of 3202 /// sorted SmallVectors of unsigned. 3203 struct OrdersTypeDenseMapInfo { 3204 static OrdersType getEmptyKey() { 3205 OrdersType V; 3206 V.push_back(~1U); 3207 return V; 3208 } 3209 3210 static OrdersType getTombstoneKey() { 3211 OrdersType V; 3212 V.push_back(~2U); 3213 return V; 3214 } 3215 3216 static unsigned getHashValue(const OrdersType &V) { 3217 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); 3218 } 3219 3220 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) { 3221 return LHS == RHS; 3222 } 3223 }; 3224 3225 // Analysis and block reference. 3226 Function *F; 3227 ScalarEvolution *SE; 3228 TargetTransformInfo *TTI; 3229 TargetLibraryInfo *TLI; 3230 LoopInfo *LI; 3231 DominatorTree *DT; 3232 AssumptionCache *AC; 3233 DemandedBits *DB; 3234 const DataLayout *DL; 3235 OptimizationRemarkEmitter *ORE; 3236 3237 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 3238 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 3239 3240 /// Instruction builder to construct the vectorized tree. 3241 IRBuilder<> Builder; 3242 3243 /// A map of scalar integer values to the smallest bit width with which they 3244 /// can legally be represented. The values map to (width, signed) pairs, 3245 /// where "width" indicates the minimum bit width and "signed" is True if the 3246 /// value must be signed-extended, rather than zero-extended, back to its 3247 /// original width. 3248 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs; 3249 }; 3250 3251 } // end namespace slpvectorizer 3252 3253 template <> struct GraphTraits<BoUpSLP *> { 3254 using TreeEntry = BoUpSLP::TreeEntry; 3255 3256 /// NodeRef has to be a pointer per the GraphWriter. 3257 using NodeRef = TreeEntry *; 3258 3259 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy; 3260 3261 /// Add the VectorizableTree to the index iterator to be able to return 3262 /// TreeEntry pointers. 3263 struct ChildIteratorType 3264 : public iterator_adaptor_base< 3265 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> { 3266 ContainerTy &VectorizableTree; 3267 3268 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W, 3269 ContainerTy &VT) 3270 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 3271 3272 NodeRef operator*() { return I->UserTE; } 3273 }; 3274 3275 static NodeRef getEntryNode(BoUpSLP &R) { 3276 return R.VectorizableTree[0].get(); 3277 } 3278 3279 static ChildIteratorType child_begin(NodeRef N) { 3280 return {N->UserTreeIndices.begin(), N->Container}; 3281 } 3282 3283 static ChildIteratorType child_end(NodeRef N) { 3284 return {N->UserTreeIndices.end(), N->Container}; 3285 } 3286 3287 /// For the node iterator we just need to turn the TreeEntry iterator into a 3288 /// TreeEntry* iterator so that it dereferences to NodeRef. 3289 class nodes_iterator { 3290 using ItTy = ContainerTy::iterator; 3291 ItTy It; 3292 3293 public: 3294 nodes_iterator(const ItTy &It2) : It(It2) {} 3295 NodeRef operator*() { return It->get(); } 3296 nodes_iterator operator++() { 3297 ++It; 3298 return *this; 3299 } 3300 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; } 3301 }; 3302 3303 static nodes_iterator nodes_begin(BoUpSLP *R) { 3304 return nodes_iterator(R->VectorizableTree.begin()); 3305 } 3306 3307 static nodes_iterator nodes_end(BoUpSLP *R) { 3308 return nodes_iterator(R->VectorizableTree.end()); 3309 } 3310 3311 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 3312 }; 3313 3314 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 3315 using TreeEntry = BoUpSLP::TreeEntry; 3316 3317 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 3318 3319 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 3320 std::string Str; 3321 raw_string_ostream OS(Str); 3322 if (isSplat(Entry->Scalars)) 3323 OS << "<splat> "; 3324 for (auto V : Entry->Scalars) { 3325 OS << *V; 3326 if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) { 3327 return EU.Scalar == V; 3328 })) 3329 OS << " <extract>"; 3330 OS << "\n"; 3331 } 3332 return Str; 3333 } 3334 3335 static std::string getNodeAttributes(const TreeEntry *Entry, 3336 const BoUpSLP *) { 3337 if (Entry->State == TreeEntry::NeedToGather) 3338 return "color=red"; 3339 return ""; 3340 } 3341 }; 3342 3343 } // end namespace llvm 3344 3345 BoUpSLP::~BoUpSLP() { 3346 SmallVector<WeakTrackingVH> DeadInsts; 3347 for (auto *I : DeletedInstructions) { 3348 for (Use &U : I->operands()) { 3349 auto *Op = dyn_cast<Instruction>(U.get()); 3350 if (Op && !DeletedInstructions.count(Op) && Op->hasOneUser() && 3351 wouldInstructionBeTriviallyDead(Op, TLI)) 3352 DeadInsts.emplace_back(Op); 3353 } 3354 I->dropAllReferences(); 3355 } 3356 for (auto *I : DeletedInstructions) { 3357 assert(I->use_empty() && 3358 "trying to erase instruction with users."); 3359 I->eraseFromParent(); 3360 } 3361 3362 // Cleanup any dead scalar code feeding the vectorized instructions 3363 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI); 3364 3365 #ifdef EXPENSIVE_CHECKS 3366 // If we could guarantee that this call is not extremely slow, we could 3367 // remove the ifdef limitation (see PR47712). 3368 assert(!verifyFunction(*F, &dbgs())); 3369 #endif 3370 } 3371 3372 /// Reorders the given \p Reuses mask according to the given \p Mask. \p Reuses 3373 /// contains original mask for the scalars reused in the node. Procedure 3374 /// transform this mask in accordance with the given \p Mask. 3375 static void reorderReuses(SmallVectorImpl<int> &Reuses, ArrayRef<int> Mask) { 3376 assert(!Mask.empty() && Reuses.size() == Mask.size() && 3377 "Expected non-empty mask."); 3378 SmallVector<int> Prev(Reuses.begin(), Reuses.end()); 3379 Prev.swap(Reuses); 3380 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 3381 if (Mask[I] != UndefMaskElem) 3382 Reuses[Mask[I]] = Prev[I]; 3383 } 3384 3385 /// Reorders the given \p Order according to the given \p Mask. \p Order - is 3386 /// the original order of the scalars. Procedure transforms the provided order 3387 /// in accordance with the given \p Mask. If the resulting \p Order is just an 3388 /// identity order, \p Order is cleared. 3389 static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask) { 3390 assert(!Mask.empty() && "Expected non-empty mask."); 3391 SmallVector<int> MaskOrder; 3392 if (Order.empty()) { 3393 MaskOrder.resize(Mask.size()); 3394 std::iota(MaskOrder.begin(), MaskOrder.end(), 0); 3395 } else { 3396 inversePermutation(Order, MaskOrder); 3397 } 3398 reorderReuses(MaskOrder, Mask); 3399 if (ShuffleVectorInst::isIdentityMask(MaskOrder)) { 3400 Order.clear(); 3401 return; 3402 } 3403 Order.assign(Mask.size(), Mask.size()); 3404 for (unsigned I = 0, E = Mask.size(); I < E; ++I) 3405 if (MaskOrder[I] != UndefMaskElem) 3406 Order[MaskOrder[I]] = I; 3407 fixupOrderingIndices(Order); 3408 } 3409 3410 Optional<BoUpSLP::OrdersType> 3411 BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) { 3412 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only."); 3413 unsigned NumScalars = TE.Scalars.size(); 3414 OrdersType CurrentOrder(NumScalars, NumScalars); 3415 SmallVector<int> Positions; 3416 SmallBitVector UsedPositions(NumScalars); 3417 const TreeEntry *STE = nullptr; 3418 // Try to find all gathered scalars that are gets vectorized in other 3419 // vectorize node. Here we can have only one single tree vector node to 3420 // correctly identify order of the gathered scalars. 3421 for (unsigned I = 0; I < NumScalars; ++I) { 3422 Value *V = TE.Scalars[I]; 3423 if (!isa<LoadInst, ExtractElementInst, ExtractValueInst>(V)) 3424 continue; 3425 if (const auto *LocalSTE = getTreeEntry(V)) { 3426 if (!STE) 3427 STE = LocalSTE; 3428 else if (STE != LocalSTE) 3429 // Take the order only from the single vector node. 3430 return None; 3431 unsigned Lane = 3432 std::distance(STE->Scalars.begin(), find(STE->Scalars, V)); 3433 if (Lane >= NumScalars) 3434 return None; 3435 if (CurrentOrder[Lane] != NumScalars) { 3436 if (Lane != I) 3437 continue; 3438 UsedPositions.reset(CurrentOrder[Lane]); 3439 } 3440 // The partial identity (where only some elements of the gather node are 3441 // in the identity order) is good. 3442 CurrentOrder[Lane] = I; 3443 UsedPositions.set(I); 3444 } 3445 } 3446 // Need to keep the order if we have a vector entry and at least 2 scalars or 3447 // the vectorized entry has just 2 scalars. 3448 if (STE && (UsedPositions.count() > 1 || STE->Scalars.size() == 2)) { 3449 auto &&IsIdentityOrder = [NumScalars](ArrayRef<unsigned> CurrentOrder) { 3450 for (unsigned I = 0; I < NumScalars; ++I) 3451 if (CurrentOrder[I] != I && CurrentOrder[I] != NumScalars) 3452 return false; 3453 return true; 3454 }; 3455 if (IsIdentityOrder(CurrentOrder)) { 3456 CurrentOrder.clear(); 3457 return CurrentOrder; 3458 } 3459 auto *It = CurrentOrder.begin(); 3460 for (unsigned I = 0; I < NumScalars;) { 3461 if (UsedPositions.test(I)) { 3462 ++I; 3463 continue; 3464 } 3465 if (*It == NumScalars) { 3466 *It = I; 3467 ++I; 3468 } 3469 ++It; 3470 } 3471 return CurrentOrder; 3472 } 3473 return None; 3474 } 3475 3476 bool clusterSortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy, 3477 const DataLayout &DL, ScalarEvolution &SE, 3478 SmallVectorImpl<unsigned> &SortedIndices) { 3479 assert(llvm::all_of( 3480 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) && 3481 "Expected list of pointer operands."); 3482 // Map from bases to a vector of (Ptr, Offset, OrigIdx), which we insert each 3483 // Ptr into, sort and return the sorted indices with values next to one 3484 // another. 3485 MapVector<Value *, SmallVector<std::tuple<Value *, int, unsigned>>> Bases; 3486 Bases[VL[0]].push_back(std::make_tuple(VL[0], 0U, 0U)); 3487 3488 unsigned Cnt = 1; 3489 for (Value *Ptr : VL.drop_front()) { 3490 bool Found = any_of(Bases, [&](auto &Base) { 3491 Optional<int> Diff = 3492 getPointersDiff(ElemTy, Base.first, ElemTy, Ptr, DL, SE, 3493 /*StrictCheck=*/true); 3494 if (!Diff) 3495 return false; 3496 3497 Base.second.emplace_back(Ptr, *Diff, Cnt++); 3498 return true; 3499 }); 3500 3501 if (!Found) { 3502 // If we haven't found enough to usefully cluster, return early. 3503 if (Bases.size() > VL.size() / 2 - 1) 3504 return false; 3505 3506 // Not found already - add a new Base 3507 Bases[Ptr].emplace_back(Ptr, 0, Cnt++); 3508 } 3509 } 3510 3511 // For each of the bases sort the pointers by Offset and check if any of the 3512 // base become consecutively allocated. 3513 bool AnyConsecutive = false; 3514 for (auto &Base : Bases) { 3515 auto &Vec = Base.second; 3516 if (Vec.size() > 1) { 3517 llvm::stable_sort(Vec, [](const std::tuple<Value *, int, unsigned> &X, 3518 const std::tuple<Value *, int, unsigned> &Y) { 3519 return std::get<1>(X) < std::get<1>(Y); 3520 }); 3521 int InitialOffset = std::get<1>(Vec[0]); 3522 AnyConsecutive |= all_of(enumerate(Vec), [InitialOffset](auto &P) { 3523 return std::get<1>(P.value()) == int(P.index()) + InitialOffset; 3524 }); 3525 } 3526 } 3527 3528 // Fill SortedIndices array only if it looks worth-while to sort the ptrs. 3529 SortedIndices.clear(); 3530 if (!AnyConsecutive) 3531 return false; 3532 3533 for (auto &Base : Bases) { 3534 for (auto &T : Base.second) 3535 SortedIndices.push_back(std::get<2>(T)); 3536 } 3537 3538 assert(SortedIndices.size() == VL.size() && 3539 "Expected SortedIndices to be the size of VL"); 3540 return true; 3541 } 3542 3543 Optional<BoUpSLP::OrdersType> 3544 BoUpSLP::findPartiallyOrderedLoads(const BoUpSLP::TreeEntry &TE) { 3545 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only."); 3546 Type *ScalarTy = TE.Scalars[0]->getType(); 3547 3548 SmallVector<Value *> Ptrs; 3549 Ptrs.reserve(TE.Scalars.size()); 3550 for (Value *V : TE.Scalars) { 3551 auto *L = dyn_cast<LoadInst>(V); 3552 if (!L || !L->isSimple()) 3553 return None; 3554 Ptrs.push_back(L->getPointerOperand()); 3555 } 3556 3557 BoUpSLP::OrdersType Order; 3558 if (clusterSortPtrAccesses(Ptrs, ScalarTy, *DL, *SE, Order)) 3559 return Order; 3560 return None; 3561 } 3562 3563 Optional<BoUpSLP::OrdersType> BoUpSLP::getReorderingData(const TreeEntry &TE, 3564 bool TopToBottom) { 3565 // No need to reorder if need to shuffle reuses, still need to shuffle the 3566 // node. 3567 if (!TE.ReuseShuffleIndices.empty()) 3568 return None; 3569 if (TE.State == TreeEntry::Vectorize && 3570 (isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE.getMainOp()) || 3571 (TopToBottom && isa<StoreInst, InsertElementInst>(TE.getMainOp()))) && 3572 !TE.isAltShuffle()) 3573 return TE.ReorderIndices; 3574 if (TE.State == TreeEntry::NeedToGather) { 3575 // TODO: add analysis of other gather nodes with extractelement 3576 // instructions and other values/instructions, not only undefs. 3577 if (((TE.getOpcode() == Instruction::ExtractElement && 3578 !TE.isAltShuffle()) || 3579 (all_of(TE.Scalars, 3580 [](Value *V) { 3581 return isa<UndefValue, ExtractElementInst>(V); 3582 }) && 3583 any_of(TE.Scalars, 3584 [](Value *V) { return isa<ExtractElementInst>(V); }))) && 3585 all_of(TE.Scalars, 3586 [](Value *V) { 3587 auto *EE = dyn_cast<ExtractElementInst>(V); 3588 return !EE || isa<FixedVectorType>(EE->getVectorOperandType()); 3589 }) && 3590 allSameType(TE.Scalars)) { 3591 // Check that gather of extractelements can be represented as 3592 // just a shuffle of a single vector. 3593 OrdersType CurrentOrder; 3594 bool Reuse = canReuseExtract(TE.Scalars, TE.getMainOp(), CurrentOrder); 3595 if (Reuse || !CurrentOrder.empty()) { 3596 if (!CurrentOrder.empty()) 3597 fixupOrderingIndices(CurrentOrder); 3598 return CurrentOrder; 3599 } 3600 } 3601 if (Optional<OrdersType> CurrentOrder = findReusedOrderedScalars(TE)) 3602 return CurrentOrder; 3603 if (TE.Scalars.size() >= 4) 3604 if (Optional<OrdersType> Order = findPartiallyOrderedLoads(TE)) 3605 return Order; 3606 } 3607 return None; 3608 } 3609 3610 void BoUpSLP::reorderTopToBottom() { 3611 // Maps VF to the graph nodes. 3612 DenseMap<unsigned, SetVector<TreeEntry *>> VFToOrderedEntries; 3613 // ExtractElement gather nodes which can be vectorized and need to handle 3614 // their ordering. 3615 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 3616 3617 // Maps a TreeEntry to the reorder indices of external users. 3618 DenseMap<const TreeEntry *, SmallVector<OrdersType, 1>> 3619 ExternalUserReorderMap; 3620 // Find all reorderable nodes with the given VF. 3621 // Currently the are vectorized stores,loads,extracts + some gathering of 3622 // extracts. 3623 for_each(VectorizableTree, [this, &VFToOrderedEntries, &GathersToOrders, 3624 &ExternalUserReorderMap]( 3625 const std::unique_ptr<TreeEntry> &TE) { 3626 // Look for external users that will probably be vectorized. 3627 SmallVector<OrdersType, 1> ExternalUserReorderIndices = 3628 findExternalStoreUsersReorderIndices(TE.get()); 3629 if (!ExternalUserReorderIndices.empty()) { 3630 VFToOrderedEntries[TE->Scalars.size()].insert(TE.get()); 3631 ExternalUserReorderMap.try_emplace(TE.get(), 3632 std::move(ExternalUserReorderIndices)); 3633 } 3634 3635 if (Optional<OrdersType> CurrentOrder = 3636 getReorderingData(*TE, /*TopToBottom=*/true)) { 3637 // Do not include ordering for nodes used in the alt opcode vectorization, 3638 // better to reorder them during bottom-to-top stage. If follow the order 3639 // here, it causes reordering of the whole graph though actually it is 3640 // profitable just to reorder the subgraph that starts from the alternate 3641 // opcode vectorization node. Such nodes already end-up with the shuffle 3642 // instruction and it is just enough to change this shuffle rather than 3643 // rotate the scalars for the whole graph. 3644 unsigned Cnt = 0; 3645 const TreeEntry *UserTE = TE.get(); 3646 while (UserTE && Cnt < RecursionMaxDepth) { 3647 if (UserTE->UserTreeIndices.size() != 1) 3648 break; 3649 if (all_of(UserTE->UserTreeIndices, [](const EdgeInfo &EI) { 3650 return EI.UserTE->State == TreeEntry::Vectorize && 3651 EI.UserTE->isAltShuffle() && EI.UserTE->Idx != 0; 3652 })) 3653 return; 3654 if (UserTE->UserTreeIndices.empty()) 3655 UserTE = nullptr; 3656 else 3657 UserTE = UserTE->UserTreeIndices.back().UserTE; 3658 ++Cnt; 3659 } 3660 VFToOrderedEntries[TE->Scalars.size()].insert(TE.get()); 3661 if (TE->State != TreeEntry::Vectorize) 3662 GathersToOrders.try_emplace(TE.get(), *CurrentOrder); 3663 } 3664 }); 3665 3666 // Reorder the graph nodes according to their vectorization factor. 3667 for (unsigned VF = VectorizableTree.front()->Scalars.size(); VF > 1; 3668 VF /= 2) { 3669 auto It = VFToOrderedEntries.find(VF); 3670 if (It == VFToOrderedEntries.end()) 3671 continue; 3672 // Try to find the most profitable order. We just are looking for the most 3673 // used order and reorder scalar elements in the nodes according to this 3674 // mostly used order. 3675 ArrayRef<TreeEntry *> OrderedEntries = It->second.getArrayRef(); 3676 // All operands are reordered and used only in this node - propagate the 3677 // most used order to the user node. 3678 MapVector<OrdersType, unsigned, 3679 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>> 3680 OrdersUses; 3681 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 3682 for (const TreeEntry *OpTE : OrderedEntries) { 3683 // No need to reorder this nodes, still need to extend and to use shuffle, 3684 // just need to merge reordering shuffle and the reuse shuffle. 3685 if (!OpTE->ReuseShuffleIndices.empty()) 3686 continue; 3687 // Count number of orders uses. 3688 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { 3689 if (OpTE->State == TreeEntry::NeedToGather) { 3690 auto It = GathersToOrders.find(OpTE); 3691 if (It != GathersToOrders.end()) 3692 return It->second; 3693 } 3694 return OpTE->ReorderIndices; 3695 }(); 3696 // First consider the order of the external scalar users. 3697 auto It = ExternalUserReorderMap.find(OpTE); 3698 if (It != ExternalUserReorderMap.end()) { 3699 const auto &ExternalUserReorderIndices = It->second; 3700 for (const OrdersType &ExtOrder : ExternalUserReorderIndices) 3701 ++OrdersUses.insert(std::make_pair(ExtOrder, 0)).first->second; 3702 // No other useful reorder data in this entry. 3703 if (Order.empty()) 3704 continue; 3705 } 3706 // Stores actually store the mask, not the order, need to invert. 3707 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 3708 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 3709 SmallVector<int> Mask; 3710 inversePermutation(Order, Mask); 3711 unsigned E = Order.size(); 3712 OrdersType CurrentOrder(E, E); 3713 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 3714 return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx); 3715 }); 3716 fixupOrderingIndices(CurrentOrder); 3717 ++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second; 3718 } else { 3719 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second; 3720 } 3721 } 3722 // Set order of the user node. 3723 if (OrdersUses.empty()) 3724 continue; 3725 // Choose the most used order. 3726 ArrayRef<unsigned> BestOrder = OrdersUses.front().first; 3727 unsigned Cnt = OrdersUses.front().second; 3728 for (const auto &Pair : drop_begin(OrdersUses)) { 3729 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 3730 BestOrder = Pair.first; 3731 Cnt = Pair.second; 3732 } 3733 } 3734 // Set order of the user node. 3735 if (BestOrder.empty()) 3736 continue; 3737 SmallVector<int> Mask; 3738 inversePermutation(BestOrder, Mask); 3739 SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem); 3740 unsigned E = BestOrder.size(); 3741 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 3742 return I < E ? static_cast<int>(I) : UndefMaskElem; 3743 }); 3744 // Do an actual reordering, if profitable. 3745 for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 3746 // Just do the reordering for the nodes with the given VF. 3747 if (TE->Scalars.size() != VF) { 3748 if (TE->ReuseShuffleIndices.size() == VF) { 3749 // Need to reorder the reuses masks of the operands with smaller VF to 3750 // be able to find the match between the graph nodes and scalar 3751 // operands of the given node during vectorization/cost estimation. 3752 assert(all_of(TE->UserTreeIndices, 3753 [VF, &TE](const EdgeInfo &EI) { 3754 return EI.UserTE->Scalars.size() == VF || 3755 EI.UserTE->Scalars.size() == 3756 TE->Scalars.size(); 3757 }) && 3758 "All users must be of VF size."); 3759 // Update ordering of the operands with the smaller VF than the given 3760 // one. 3761 reorderReuses(TE->ReuseShuffleIndices, Mask); 3762 } 3763 continue; 3764 } 3765 if (TE->State == TreeEntry::Vectorize && 3766 isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst, 3767 InsertElementInst>(TE->getMainOp()) && 3768 !TE->isAltShuffle()) { 3769 // Build correct orders for extract{element,value}, loads and 3770 // stores. 3771 reorderOrder(TE->ReorderIndices, Mask); 3772 if (isa<InsertElementInst, StoreInst>(TE->getMainOp())) 3773 TE->reorderOperands(Mask); 3774 } else { 3775 // Reorder the node and its operands. 3776 TE->reorderOperands(Mask); 3777 assert(TE->ReorderIndices.empty() && 3778 "Expected empty reorder sequence."); 3779 reorderScalars(TE->Scalars, Mask); 3780 } 3781 if (!TE->ReuseShuffleIndices.empty()) { 3782 // Apply reversed order to keep the original ordering of the reused 3783 // elements to avoid extra reorder indices shuffling. 3784 OrdersType CurrentOrder; 3785 reorderOrder(CurrentOrder, MaskOrder); 3786 SmallVector<int> NewReuses; 3787 inversePermutation(CurrentOrder, NewReuses); 3788 addMask(NewReuses, TE->ReuseShuffleIndices); 3789 TE->ReuseShuffleIndices.swap(NewReuses); 3790 } 3791 } 3792 } 3793 } 3794 3795 bool BoUpSLP::canReorderOperands( 3796 TreeEntry *UserTE, SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges, 3797 ArrayRef<TreeEntry *> ReorderableGathers, 3798 SmallVectorImpl<TreeEntry *> &GatherOps) { 3799 for (unsigned I = 0, E = UserTE->getNumOperands(); I < E; ++I) { 3800 if (any_of(Edges, [I](const std::pair<unsigned, TreeEntry *> &OpData) { 3801 return OpData.first == I && 3802 OpData.second->State == TreeEntry::Vectorize; 3803 })) 3804 continue; 3805 if (TreeEntry *TE = getVectorizedOperand(UserTE, I)) { 3806 // Do not reorder if operand node is used by many user nodes. 3807 if (any_of(TE->UserTreeIndices, 3808 [UserTE](const EdgeInfo &EI) { return EI.UserTE != UserTE; })) 3809 return false; 3810 // Add the node to the list of the ordered nodes with the identity 3811 // order. 3812 Edges.emplace_back(I, TE); 3813 continue; 3814 } 3815 ArrayRef<Value *> VL = UserTE->getOperand(I); 3816 TreeEntry *Gather = nullptr; 3817 if (count_if(ReorderableGathers, [VL, &Gather](TreeEntry *TE) { 3818 assert(TE->State != TreeEntry::Vectorize && 3819 "Only non-vectorized nodes are expected."); 3820 if (TE->isSame(VL)) { 3821 Gather = TE; 3822 return true; 3823 } 3824 return false; 3825 }) > 1) 3826 return false; 3827 if (Gather) 3828 GatherOps.push_back(Gather); 3829 } 3830 return true; 3831 } 3832 3833 void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) { 3834 SetVector<TreeEntry *> OrderedEntries; 3835 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 3836 // Find all reorderable leaf nodes with the given VF. 3837 // Currently the are vectorized loads,extracts without alternate operands + 3838 // some gathering of extracts. 3839 SmallVector<TreeEntry *> NonVectorized; 3840 for_each(VectorizableTree, [this, &OrderedEntries, &GathersToOrders, 3841 &NonVectorized]( 3842 const std::unique_ptr<TreeEntry> &TE) { 3843 if (TE->State != TreeEntry::Vectorize) 3844 NonVectorized.push_back(TE.get()); 3845 if (Optional<OrdersType> CurrentOrder = 3846 getReorderingData(*TE, /*TopToBottom=*/false)) { 3847 OrderedEntries.insert(TE.get()); 3848 if (TE->State != TreeEntry::Vectorize) 3849 GathersToOrders.try_emplace(TE.get(), *CurrentOrder); 3850 } 3851 }); 3852 3853 // 1. Propagate order to the graph nodes, which use only reordered nodes. 3854 // I.e., if the node has operands, that are reordered, try to make at least 3855 // one operand order in the natural order and reorder others + reorder the 3856 // user node itself. 3857 SmallPtrSet<const TreeEntry *, 4> Visited; 3858 while (!OrderedEntries.empty()) { 3859 // 1. Filter out only reordered nodes. 3860 // 2. If the entry has multiple uses - skip it and jump to the next node. 3861 MapVector<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>> Users; 3862 SmallVector<TreeEntry *> Filtered; 3863 for (TreeEntry *TE : OrderedEntries) { 3864 if (!(TE->State == TreeEntry::Vectorize || 3865 (TE->State == TreeEntry::NeedToGather && 3866 GathersToOrders.count(TE))) || 3867 TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() || 3868 !all_of(drop_begin(TE->UserTreeIndices), 3869 [TE](const EdgeInfo &EI) { 3870 return EI.UserTE == TE->UserTreeIndices.front().UserTE; 3871 }) || 3872 !Visited.insert(TE).second) { 3873 Filtered.push_back(TE); 3874 continue; 3875 } 3876 // Build a map between user nodes and their operands order to speedup 3877 // search. The graph currently does not provide this dependency directly. 3878 for (EdgeInfo &EI : TE->UserTreeIndices) { 3879 TreeEntry *UserTE = EI.UserTE; 3880 auto It = Users.find(UserTE); 3881 if (It == Users.end()) 3882 It = Users.insert({UserTE, {}}).first; 3883 It->second.emplace_back(EI.EdgeIdx, TE); 3884 } 3885 } 3886 // Erase filtered entries. 3887 for_each(Filtered, 3888 [&OrderedEntries](TreeEntry *TE) { OrderedEntries.remove(TE); }); 3889 for (auto &Data : Users) { 3890 // Check that operands are used only in the User node. 3891 SmallVector<TreeEntry *> GatherOps; 3892 if (!canReorderOperands(Data.first, Data.second, NonVectorized, 3893 GatherOps)) { 3894 for_each(Data.second, 3895 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { 3896 OrderedEntries.remove(Op.second); 3897 }); 3898 continue; 3899 } 3900 // All operands are reordered and used only in this node - propagate the 3901 // most used order to the user node. 3902 MapVector<OrdersType, unsigned, 3903 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>> 3904 OrdersUses; 3905 // Do the analysis for each tree entry only once, otherwise the order of 3906 // the same node my be considered several times, though might be not 3907 // profitable. 3908 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 3909 SmallPtrSet<const TreeEntry *, 4> VisitedUsers; 3910 for (const auto &Op : Data.second) { 3911 TreeEntry *OpTE = Op.second; 3912 if (!VisitedOps.insert(OpTE).second) 3913 continue; 3914 if (!OpTE->ReuseShuffleIndices.empty() || 3915 (IgnoreReorder && OpTE == VectorizableTree.front().get())) 3916 continue; 3917 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { 3918 if (OpTE->State == TreeEntry::NeedToGather) 3919 return GathersToOrders.find(OpTE)->second; 3920 return OpTE->ReorderIndices; 3921 }(); 3922 unsigned NumOps = count_if( 3923 Data.second, [OpTE](const std::pair<unsigned, TreeEntry *> &P) { 3924 return P.second == OpTE; 3925 }); 3926 // Stores actually store the mask, not the order, need to invert. 3927 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 3928 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 3929 SmallVector<int> Mask; 3930 inversePermutation(Order, Mask); 3931 unsigned E = Order.size(); 3932 OrdersType CurrentOrder(E, E); 3933 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 3934 return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx); 3935 }); 3936 fixupOrderingIndices(CurrentOrder); 3937 OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second += 3938 NumOps; 3939 } else { 3940 OrdersUses.insert(std::make_pair(Order, 0)).first->second += NumOps; 3941 } 3942 auto Res = OrdersUses.insert(std::make_pair(OrdersType(), 0)); 3943 const auto &&AllowsReordering = [IgnoreReorder, &GathersToOrders]( 3944 const TreeEntry *TE) { 3945 if (!TE->ReorderIndices.empty() || !TE->ReuseShuffleIndices.empty() || 3946 (TE->State == TreeEntry::Vectorize && TE->isAltShuffle()) || 3947 (IgnoreReorder && TE->Idx == 0)) 3948 return true; 3949 if (TE->State == TreeEntry::NeedToGather) { 3950 auto It = GathersToOrders.find(TE); 3951 if (It != GathersToOrders.end()) 3952 return !It->second.empty(); 3953 return true; 3954 } 3955 return false; 3956 }; 3957 for (const EdgeInfo &EI : OpTE->UserTreeIndices) { 3958 TreeEntry *UserTE = EI.UserTE; 3959 if (!VisitedUsers.insert(UserTE).second) 3960 continue; 3961 // May reorder user node if it requires reordering, has reused 3962 // scalars, is an alternate op vectorize node or its op nodes require 3963 // reordering. 3964 if (AllowsReordering(UserTE)) 3965 continue; 3966 // Check if users allow reordering. 3967 // Currently look up just 1 level of operands to avoid increase of 3968 // the compile time. 3969 // Profitable to reorder if definitely more operands allow 3970 // reordering rather than those with natural order. 3971 ArrayRef<std::pair<unsigned, TreeEntry *>> Ops = Users[UserTE]; 3972 if (static_cast<unsigned>(count_if( 3973 Ops, [UserTE, &AllowsReordering]( 3974 const std::pair<unsigned, TreeEntry *> &Op) { 3975 return AllowsReordering(Op.second) && 3976 all_of(Op.second->UserTreeIndices, 3977 [UserTE](const EdgeInfo &EI) { 3978 return EI.UserTE == UserTE; 3979 }); 3980 })) <= Ops.size() / 2) 3981 ++Res.first->second; 3982 } 3983 } 3984 // If no orders - skip current nodes and jump to the next one, if any. 3985 if (OrdersUses.empty()) { 3986 for_each(Data.second, 3987 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { 3988 OrderedEntries.remove(Op.second); 3989 }); 3990 continue; 3991 } 3992 // Choose the best order. 3993 ArrayRef<unsigned> BestOrder = OrdersUses.front().first; 3994 unsigned Cnt = OrdersUses.front().second; 3995 for (const auto &Pair : drop_begin(OrdersUses)) { 3996 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 3997 BestOrder = Pair.first; 3998 Cnt = Pair.second; 3999 } 4000 } 4001 // Set order of the user node (reordering of operands and user nodes). 4002 if (BestOrder.empty()) { 4003 for_each(Data.second, 4004 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { 4005 OrderedEntries.remove(Op.second); 4006 }); 4007 continue; 4008 } 4009 // Erase operands from OrderedEntries list and adjust their orders. 4010 VisitedOps.clear(); 4011 SmallVector<int> Mask; 4012 inversePermutation(BestOrder, Mask); 4013 SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem); 4014 unsigned E = BestOrder.size(); 4015 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 4016 return I < E ? static_cast<int>(I) : UndefMaskElem; 4017 }); 4018 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) { 4019 TreeEntry *TE = Op.second; 4020 OrderedEntries.remove(TE); 4021 if (!VisitedOps.insert(TE).second) 4022 continue; 4023 if (TE->ReuseShuffleIndices.size() == BestOrder.size()) { 4024 // Just reorder reuses indices. 4025 reorderReuses(TE->ReuseShuffleIndices, Mask); 4026 continue; 4027 } 4028 // Gathers are processed separately. 4029 if (TE->State != TreeEntry::Vectorize) 4030 continue; 4031 assert((BestOrder.size() == TE->ReorderIndices.size() || 4032 TE->ReorderIndices.empty()) && 4033 "Non-matching sizes of user/operand entries."); 4034 reorderOrder(TE->ReorderIndices, Mask); 4035 } 4036 // For gathers just need to reorder its scalars. 4037 for (TreeEntry *Gather : GatherOps) { 4038 assert(Gather->ReorderIndices.empty() && 4039 "Unexpected reordering of gathers."); 4040 if (!Gather->ReuseShuffleIndices.empty()) { 4041 // Just reorder reuses indices. 4042 reorderReuses(Gather->ReuseShuffleIndices, Mask); 4043 continue; 4044 } 4045 reorderScalars(Gather->Scalars, Mask); 4046 OrderedEntries.remove(Gather); 4047 } 4048 // Reorder operands of the user node and set the ordering for the user 4049 // node itself. 4050 if (Data.first->State != TreeEntry::Vectorize || 4051 !isa<ExtractElementInst, ExtractValueInst, LoadInst>( 4052 Data.first->getMainOp()) || 4053 Data.first->isAltShuffle()) 4054 Data.first->reorderOperands(Mask); 4055 if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) || 4056 Data.first->isAltShuffle()) { 4057 reorderScalars(Data.first->Scalars, Mask); 4058 reorderOrder(Data.first->ReorderIndices, MaskOrder); 4059 if (Data.first->ReuseShuffleIndices.empty() && 4060 !Data.first->ReorderIndices.empty() && 4061 !Data.first->isAltShuffle()) { 4062 // Insert user node to the list to try to sink reordering deeper in 4063 // the graph. 4064 OrderedEntries.insert(Data.first); 4065 } 4066 } else { 4067 reorderOrder(Data.first->ReorderIndices, Mask); 4068 } 4069 } 4070 } 4071 // If the reordering is unnecessary, just remove the reorder. 4072 if (IgnoreReorder && !VectorizableTree.front()->ReorderIndices.empty() && 4073 VectorizableTree.front()->ReuseShuffleIndices.empty()) 4074 VectorizableTree.front()->ReorderIndices.clear(); 4075 } 4076 4077 void BoUpSLP::buildExternalUses( 4078 const ExtraValueToDebugLocsMap &ExternallyUsedValues) { 4079 // Collect the values that we need to extract from the tree. 4080 for (auto &TEPtr : VectorizableTree) { 4081 TreeEntry *Entry = TEPtr.get(); 4082 4083 // No need to handle users of gathered values. 4084 if (Entry->State == TreeEntry::NeedToGather) 4085 continue; 4086 4087 // For each lane: 4088 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 4089 Value *Scalar = Entry->Scalars[Lane]; 4090 int FoundLane = Entry->findLaneForValue(Scalar); 4091 4092 // Check if the scalar is externally used as an extra arg. 4093 auto ExtI = ExternallyUsedValues.find(Scalar); 4094 if (ExtI != ExternallyUsedValues.end()) { 4095 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " 4096 << Lane << " from " << *Scalar << ".\n"); 4097 ExternalUses.emplace_back(Scalar, nullptr, FoundLane); 4098 } 4099 for (User *U : Scalar->users()) { 4100 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 4101 4102 Instruction *UserInst = dyn_cast<Instruction>(U); 4103 if (!UserInst) 4104 continue; 4105 4106 if (isDeleted(UserInst)) 4107 continue; 4108 4109 // Skip in-tree scalars that become vectors 4110 if (TreeEntry *UseEntry = getTreeEntry(U)) { 4111 Value *UseScalar = UseEntry->Scalars[0]; 4112 // Some in-tree scalars will remain as scalar in vectorized 4113 // instructions. If that is the case, the one in Lane 0 will 4114 // be used. 4115 if (UseScalar != U || 4116 UseEntry->State == TreeEntry::ScatterVectorize || 4117 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 4118 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 4119 << ".\n"); 4120 assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state"); 4121 continue; 4122 } 4123 } 4124 4125 // Ignore users in the user ignore list. 4126 if (UserIgnoreList && UserIgnoreList->contains(UserInst)) 4127 continue; 4128 4129 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " 4130 << Lane << " from " << *Scalar << ".\n"); 4131 ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane)); 4132 } 4133 } 4134 } 4135 } 4136 4137 DenseMap<Value *, SmallVector<StoreInst *, 4>> 4138 BoUpSLP::collectUserStores(const BoUpSLP::TreeEntry *TE) const { 4139 DenseMap<Value *, SmallVector<StoreInst *, 4>> PtrToStoresMap; 4140 for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size())) { 4141 Value *V = TE->Scalars[Lane]; 4142 // To save compilation time we don't visit if we have too many users. 4143 static constexpr unsigned UsersLimit = 4; 4144 if (V->hasNUsesOrMore(UsersLimit)) 4145 break; 4146 4147 // Collect stores per pointer object. 4148 for (User *U : V->users()) { 4149 auto *SI = dyn_cast<StoreInst>(U); 4150 if (SI == nullptr || !SI->isSimple() || 4151 !isValidElementType(SI->getValueOperand()->getType())) 4152 continue; 4153 // Skip entry if already 4154 if (getTreeEntry(U)) 4155 continue; 4156 4157 Value *Ptr = getUnderlyingObject(SI->getPointerOperand()); 4158 auto &StoresVec = PtrToStoresMap[Ptr]; 4159 // For now just keep one store per pointer object per lane. 4160 // TODO: Extend this to support multiple stores per pointer per lane 4161 if (StoresVec.size() > Lane) 4162 continue; 4163 // Skip if in different BBs. 4164 if (!StoresVec.empty() && 4165 SI->getParent() != StoresVec.back()->getParent()) 4166 continue; 4167 // Make sure that the stores are of the same type. 4168 if (!StoresVec.empty() && 4169 SI->getValueOperand()->getType() != 4170 StoresVec.back()->getValueOperand()->getType()) 4171 continue; 4172 StoresVec.push_back(SI); 4173 } 4174 } 4175 return PtrToStoresMap; 4176 } 4177 4178 bool BoUpSLP::CanFormVector(const SmallVector<StoreInst *, 4> &StoresVec, 4179 OrdersType &ReorderIndices) const { 4180 // We check whether the stores in StoreVec can form a vector by sorting them 4181 // and checking whether they are consecutive. 4182 4183 // To avoid calling getPointersDiff() while sorting we create a vector of 4184 // pairs {store, offset from first} and sort this instead. 4185 SmallVector<std::pair<StoreInst *, int>, 4> StoreOffsetVec(StoresVec.size()); 4186 StoreInst *S0 = StoresVec[0]; 4187 StoreOffsetVec[0] = {S0, 0}; 4188 Type *S0Ty = S0->getValueOperand()->getType(); 4189 Value *S0Ptr = S0->getPointerOperand(); 4190 for (unsigned Idx : seq<unsigned>(1, StoresVec.size())) { 4191 StoreInst *SI = StoresVec[Idx]; 4192 Optional<int> Diff = 4193 getPointersDiff(S0Ty, S0Ptr, SI->getValueOperand()->getType(), 4194 SI->getPointerOperand(), *DL, *SE, 4195 /*StrictCheck=*/true); 4196 // We failed to compare the pointers so just abandon this StoresVec. 4197 if (!Diff) 4198 return false; 4199 StoreOffsetVec[Idx] = {StoresVec[Idx], *Diff}; 4200 } 4201 4202 // Sort the vector based on the pointers. We create a copy because we may 4203 // need the original later for calculating the reorder (shuffle) indices. 4204 stable_sort(StoreOffsetVec, [](const std::pair<StoreInst *, int> &Pair1, 4205 const std::pair<StoreInst *, int> &Pair2) { 4206 int Offset1 = Pair1.second; 4207 int Offset2 = Pair2.second; 4208 return Offset1 < Offset2; 4209 }); 4210 4211 // Check if the stores are consecutive by checking if their difference is 1. 4212 for (unsigned Idx : seq<unsigned>(1, StoreOffsetVec.size())) 4213 if (StoreOffsetVec[Idx].second != StoreOffsetVec[Idx-1].second + 1) 4214 return false; 4215 4216 // Calculate the shuffle indices according to their offset against the sorted 4217 // StoreOffsetVec. 4218 ReorderIndices.reserve(StoresVec.size()); 4219 for (StoreInst *SI : StoresVec) { 4220 unsigned Idx = find_if(StoreOffsetVec, 4221 [SI](const std::pair<StoreInst *, int> &Pair) { 4222 return Pair.first == SI; 4223 }) - 4224 StoreOffsetVec.begin(); 4225 ReorderIndices.push_back(Idx); 4226 } 4227 // Identity order (e.g., {0,1,2,3}) is modeled as an empty OrdersType in 4228 // reorderTopToBottom() and reorderBottomToTop(), so we are following the 4229 // same convention here. 4230 auto IsIdentityOrder = [](const OrdersType &Order) { 4231 for (unsigned Idx : seq<unsigned>(0, Order.size())) 4232 if (Idx != Order[Idx]) 4233 return false; 4234 return true; 4235 }; 4236 if (IsIdentityOrder(ReorderIndices)) 4237 ReorderIndices.clear(); 4238 4239 return true; 4240 } 4241 4242 #ifndef NDEBUG 4243 LLVM_DUMP_METHOD static void dumpOrder(const BoUpSLP::OrdersType &Order) { 4244 for (unsigned Idx : Order) 4245 dbgs() << Idx << ", "; 4246 dbgs() << "\n"; 4247 } 4248 #endif 4249 4250 SmallVector<BoUpSLP::OrdersType, 1> 4251 BoUpSLP::findExternalStoreUsersReorderIndices(TreeEntry *TE) const { 4252 unsigned NumLanes = TE->Scalars.size(); 4253 4254 DenseMap<Value *, SmallVector<StoreInst *, 4>> PtrToStoresMap = 4255 collectUserStores(TE); 4256 4257 // Holds the reorder indices for each candidate store vector that is a user of 4258 // the current TreeEntry. 4259 SmallVector<OrdersType, 1> ExternalReorderIndices; 4260 4261 // Now inspect the stores collected per pointer and look for vectorization 4262 // candidates. For each candidate calculate the reorder index vector and push 4263 // it into `ExternalReorderIndices` 4264 for (const auto &Pair : PtrToStoresMap) { 4265 auto &StoresVec = Pair.second; 4266 // If we have fewer than NumLanes stores, then we can't form a vector. 4267 if (StoresVec.size() != NumLanes) 4268 continue; 4269 4270 // If the stores are not consecutive then abandon this StoresVec. 4271 OrdersType ReorderIndices; 4272 if (!CanFormVector(StoresVec, ReorderIndices)) 4273 continue; 4274 4275 // We now know that the scalars in StoresVec can form a vector instruction, 4276 // so set the reorder indices. 4277 ExternalReorderIndices.push_back(ReorderIndices); 4278 } 4279 return ExternalReorderIndices; 4280 } 4281 4282 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 4283 const SmallDenseSet<Value *> &UserIgnoreLst) { 4284 deleteTree(); 4285 UserIgnoreList = &UserIgnoreLst; 4286 if (!allSameType(Roots)) 4287 return; 4288 buildTree_rec(Roots, 0, EdgeInfo()); 4289 } 4290 4291 void BoUpSLP::buildTree(ArrayRef<Value *> Roots) { 4292 deleteTree(); 4293 if (!allSameType(Roots)) 4294 return; 4295 buildTree_rec(Roots, 0, EdgeInfo()); 4296 } 4297 4298 namespace { 4299 /// Tracks the state we can represent the loads in the given sequence. 4300 enum class LoadsState { Gather, Vectorize, ScatterVectorize }; 4301 } // anonymous namespace 4302 4303 /// Checks if the given array of loads can be represented as a vectorized, 4304 /// scatter or just simple gather. 4305 static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0, 4306 const TargetTransformInfo &TTI, 4307 const DataLayout &DL, ScalarEvolution &SE, 4308 SmallVectorImpl<unsigned> &Order, 4309 SmallVectorImpl<Value *> &PointerOps) { 4310 // Check that a vectorized load would load the same memory as a scalar 4311 // load. For example, we don't want to vectorize loads that are smaller 4312 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 4313 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 4314 // from such a struct, we read/write packed bits disagreeing with the 4315 // unvectorized version. 4316 Type *ScalarTy = VL0->getType(); 4317 4318 if (DL.getTypeSizeInBits(ScalarTy) != DL.getTypeAllocSizeInBits(ScalarTy)) 4319 return LoadsState::Gather; 4320 4321 // Make sure all loads in the bundle are simple - we can't vectorize 4322 // atomic or volatile loads. 4323 PointerOps.clear(); 4324 PointerOps.resize(VL.size()); 4325 auto *POIter = PointerOps.begin(); 4326 for (Value *V : VL) { 4327 auto *L = cast<LoadInst>(V); 4328 if (!L->isSimple()) 4329 return LoadsState::Gather; 4330 *POIter = L->getPointerOperand(); 4331 ++POIter; 4332 } 4333 4334 Order.clear(); 4335 // Check the order of pointer operands. 4336 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, DL, SE, Order)) { 4337 Value *Ptr0; 4338 Value *PtrN; 4339 if (Order.empty()) { 4340 Ptr0 = PointerOps.front(); 4341 PtrN = PointerOps.back(); 4342 } else { 4343 Ptr0 = PointerOps[Order.front()]; 4344 PtrN = PointerOps[Order.back()]; 4345 } 4346 Optional<int> Diff = 4347 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE); 4348 // Check that the sorted loads are consecutive. 4349 if (static_cast<unsigned>(*Diff) == VL.size() - 1) 4350 return LoadsState::Vectorize; 4351 Align CommonAlignment = cast<LoadInst>(VL0)->getAlign(); 4352 for (Value *V : VL) 4353 CommonAlignment = 4354 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 4355 if (TTI.isLegalMaskedGather(FixedVectorType::get(ScalarTy, VL.size()), 4356 CommonAlignment)) 4357 return LoadsState::ScatterVectorize; 4358 } 4359 4360 return LoadsState::Gather; 4361 } 4362 4363 /// \return true if the specified list of values has only one instruction that 4364 /// requires scheduling, false otherwise. 4365 #ifndef NDEBUG 4366 static bool needToScheduleSingleInstruction(ArrayRef<Value *> VL) { 4367 Value *NeedsScheduling = nullptr; 4368 for (Value *V : VL) { 4369 if (doesNotNeedToBeScheduled(V)) 4370 continue; 4371 if (!NeedsScheduling) { 4372 NeedsScheduling = V; 4373 continue; 4374 } 4375 return false; 4376 } 4377 return NeedsScheduling; 4378 } 4379 #endif 4380 4381 /// Generates key/subkey pair for the given value to provide effective sorting 4382 /// of the values and better detection of the vectorizable values sequences. The 4383 /// keys/subkeys can be used for better sorting of the values themselves (keys) 4384 /// and in values subgroups (subkeys). 4385 static std::pair<size_t, size_t> generateKeySubkey( 4386 Value *V, const TargetLibraryInfo *TLI, 4387 function_ref<hash_code(size_t, LoadInst *)> LoadsSubkeyGenerator, 4388 bool AllowAlternate) { 4389 hash_code Key = hash_value(V->getValueID() + 2); 4390 hash_code SubKey = hash_value(0); 4391 // Sort the loads by the distance between the pointers. 4392 if (auto *LI = dyn_cast<LoadInst>(V)) { 4393 Key = hash_combine(hash_value(Instruction::Load), Key); 4394 if (LI->isSimple()) 4395 SubKey = hash_value(LoadsSubkeyGenerator(Key, LI)); 4396 else 4397 SubKey = hash_value(LI); 4398 } else if (isVectorLikeInstWithConstOps(V)) { 4399 // Sort extracts by the vector operands. 4400 if (isa<ExtractElementInst, UndefValue>(V)) 4401 Key = hash_value(Value::UndefValueVal + 1); 4402 if (auto *EI = dyn_cast<ExtractElementInst>(V)) { 4403 if (!isUndefVector(EI->getVectorOperand()) && 4404 !isa<UndefValue>(EI->getIndexOperand())) 4405 SubKey = hash_value(EI->getVectorOperand()); 4406 } 4407 } else if (auto *I = dyn_cast<Instruction>(V)) { 4408 // Sort other instructions just by the opcodes except for CMPInst. 4409 // For CMP also sort by the predicate kind. 4410 if ((isa<BinaryOperator>(I) || isa<CastInst>(I)) && 4411 isValidForAlternation(I->getOpcode())) { 4412 if (AllowAlternate) 4413 Key = hash_value(isa<BinaryOperator>(I) ? 1 : 0); 4414 else 4415 Key = hash_combine(hash_value(I->getOpcode()), Key); 4416 SubKey = hash_combine( 4417 hash_value(I->getOpcode()), hash_value(I->getType()), 4418 hash_value(isa<BinaryOperator>(I) 4419 ? I->getType() 4420 : cast<CastInst>(I)->getOperand(0)->getType())); 4421 } else if (auto *CI = dyn_cast<CmpInst>(I)) { 4422 CmpInst::Predicate Pred = CI->getPredicate(); 4423 if (CI->isCommutative()) 4424 Pred = std::min(Pred, CmpInst::getInversePredicate(Pred)); 4425 CmpInst::Predicate SwapPred = CmpInst::getSwappedPredicate(Pred); 4426 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Pred), 4427 hash_value(SwapPred), 4428 hash_value(CI->getOperand(0)->getType())); 4429 } else if (auto *Call = dyn_cast<CallInst>(I)) { 4430 Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, TLI); 4431 if (isTriviallyVectorizable(ID)) 4432 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(ID)); 4433 else if (!VFDatabase(*Call).getMappings(*Call).empty()) 4434 SubKey = hash_combine(hash_value(I->getOpcode()), 4435 hash_value(Call->getCalledFunction())); 4436 else 4437 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Call)); 4438 for (const CallBase::BundleOpInfo &Op : Call->bundle_op_infos()) 4439 SubKey = hash_combine(hash_value(Op.Begin), hash_value(Op.End), 4440 hash_value(Op.Tag), SubKey); 4441 } else if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) { 4442 if (Gep->getNumOperands() == 2 && isa<ConstantInt>(Gep->getOperand(1))) 4443 SubKey = hash_value(Gep->getPointerOperand()); 4444 else 4445 SubKey = hash_value(Gep); 4446 } else if (BinaryOperator::isIntDivRem(I->getOpcode()) && 4447 !isa<ConstantInt>(I->getOperand(1))) { 4448 // Do not try to vectorize instructions with potentially high cost. 4449 SubKey = hash_value(I); 4450 } else { 4451 SubKey = hash_value(I->getOpcode()); 4452 } 4453 Key = hash_combine(hash_value(I->getParent()), Key); 4454 } 4455 return std::make_pair(Key, SubKey); 4456 } 4457 4458 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 4459 const EdgeInfo &UserTreeIdx) { 4460 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 4461 4462 SmallVector<int> ReuseShuffleIndicies; 4463 SmallVector<Value *> UniqueValues; 4464 auto &&TryToFindDuplicates = [&VL, &ReuseShuffleIndicies, &UniqueValues, 4465 &UserTreeIdx, 4466 this](const InstructionsState &S) { 4467 // Check that every instruction appears once in this bundle. 4468 DenseMap<Value *, unsigned> UniquePositions; 4469 for (Value *V : VL) { 4470 if (isConstant(V)) { 4471 ReuseShuffleIndicies.emplace_back( 4472 isa<UndefValue>(V) ? UndefMaskElem : UniqueValues.size()); 4473 UniqueValues.emplace_back(V); 4474 continue; 4475 } 4476 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 4477 ReuseShuffleIndicies.emplace_back(Res.first->second); 4478 if (Res.second) 4479 UniqueValues.emplace_back(V); 4480 } 4481 size_t NumUniqueScalarValues = UniqueValues.size(); 4482 if (NumUniqueScalarValues == VL.size()) { 4483 ReuseShuffleIndicies.clear(); 4484 } else { 4485 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n"); 4486 if (NumUniqueScalarValues <= 1 || 4487 (UniquePositions.size() == 1 && all_of(UniqueValues, 4488 [](Value *V) { 4489 return isa<UndefValue>(V) || 4490 !isConstant(V); 4491 })) || 4492 !llvm::isPowerOf2_32(NumUniqueScalarValues)) { 4493 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 4494 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 4495 return false; 4496 } 4497 VL = UniqueValues; 4498 } 4499 return true; 4500 }; 4501 4502 InstructionsState S = getSameOpcode(VL); 4503 if (Depth == RecursionMaxDepth) { 4504 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 4505 if (TryToFindDuplicates(S)) 4506 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4507 ReuseShuffleIndicies); 4508 return; 4509 } 4510 4511 // Don't handle scalable vectors 4512 if (S.getOpcode() == Instruction::ExtractElement && 4513 isa<ScalableVectorType>( 4514 cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) { 4515 LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n"); 4516 if (TryToFindDuplicates(S)) 4517 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4518 ReuseShuffleIndicies); 4519 return; 4520 } 4521 4522 // Don't handle vectors. 4523 if (S.OpValue->getType()->isVectorTy() && 4524 !isa<InsertElementInst>(S.OpValue)) { 4525 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 4526 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 4527 return; 4528 } 4529 4530 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 4531 if (SI->getValueOperand()->getType()->isVectorTy()) { 4532 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 4533 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 4534 return; 4535 } 4536 4537 // If all of the operands are identical or constant we have a simple solution. 4538 // If we deal with insert/extract instructions, they all must have constant 4539 // indices, otherwise we should gather them, not try to vectorize. 4540 // If alternate op node with 2 elements with gathered operands - do not 4541 // vectorize. 4542 auto &&NotProfitableForVectorization = [&S, this, 4543 Depth](ArrayRef<Value *> VL) { 4544 if (!S.getOpcode() || !S.isAltShuffle() || VL.size() > 2) 4545 return false; 4546 if (VectorizableTree.size() < MinTreeSize) 4547 return false; 4548 if (Depth >= RecursionMaxDepth - 1) 4549 return true; 4550 // Check if all operands are extracts, part of vector node or can build a 4551 // regular vectorize node. 4552 SmallVector<unsigned, 2> InstsCount(VL.size(), 0); 4553 for (Value *V : VL) { 4554 auto *I = cast<Instruction>(V); 4555 InstsCount.push_back(count_if(I->operand_values(), [](Value *Op) { 4556 return isa<Instruction>(Op) || isVectorLikeInstWithConstOps(Op); 4557 })); 4558 } 4559 bool IsCommutative = isCommutative(S.MainOp) || isCommutative(S.AltOp); 4560 if ((IsCommutative && 4561 std::accumulate(InstsCount.begin(), InstsCount.end(), 0) < 2) || 4562 (!IsCommutative && 4563 all_of(InstsCount, [](unsigned ICnt) { return ICnt < 2; }))) 4564 return true; 4565 assert(VL.size() == 2 && "Expected only 2 alternate op instructions."); 4566 SmallVector<SmallVector<std::pair<Value *, Value *>>> Candidates; 4567 auto *I1 = cast<Instruction>(VL.front()); 4568 auto *I2 = cast<Instruction>(VL.back()); 4569 for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op) 4570 Candidates.emplace_back().emplace_back(I1->getOperand(Op), 4571 I2->getOperand(Op)); 4572 if (count_if( 4573 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) { 4574 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat); 4575 }) >= S.MainOp->getNumOperands() / 2) 4576 return false; 4577 if (S.MainOp->getNumOperands() > 2) 4578 return true; 4579 if (IsCommutative) { 4580 // Check permuted operands. 4581 Candidates.clear(); 4582 for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op) 4583 Candidates.emplace_back().emplace_back(I1->getOperand(Op), 4584 I2->getOperand((Op + 1) % E)); 4585 if (any_of( 4586 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) { 4587 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat); 4588 })) 4589 return false; 4590 } 4591 return true; 4592 }; 4593 if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.getOpcode() || 4594 (isa<InsertElementInst, ExtractValueInst, ExtractElementInst>(S.MainOp) && 4595 !all_of(VL, isVectorLikeInstWithConstOps)) || 4596 NotProfitableForVectorization(VL)) { 4597 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O, small shuffle. \n"); 4598 if (TryToFindDuplicates(S)) 4599 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4600 ReuseShuffleIndicies); 4601 return; 4602 } 4603 4604 // We now know that this is a vector of instructions of the same type from 4605 // the same block. 4606 4607 // Don't vectorize ephemeral values. 4608 if (!EphValues.empty()) { 4609 for (Value *V : VL) { 4610 if (EphValues.count(V)) { 4611 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 4612 << ") is ephemeral.\n"); 4613 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 4614 return; 4615 } 4616 } 4617 } 4618 4619 // Check if this is a duplicate of another entry. 4620 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 4621 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n"); 4622 if (!E->isSame(VL)) { 4623 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 4624 if (TryToFindDuplicates(S)) 4625 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4626 ReuseShuffleIndicies); 4627 return; 4628 } 4629 // Record the reuse of the tree node. FIXME, currently this is only used to 4630 // properly draw the graph rather than for the actual vectorization. 4631 E->UserTreeIndices.push_back(UserTreeIdx); 4632 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue 4633 << ".\n"); 4634 return; 4635 } 4636 4637 // Check that none of the instructions in the bundle are already in the tree. 4638 for (Value *V : VL) { 4639 auto *I = dyn_cast<Instruction>(V); 4640 if (!I) 4641 continue; 4642 if (getTreeEntry(I)) { 4643 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 4644 << ") is already in tree.\n"); 4645 if (TryToFindDuplicates(S)) 4646 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4647 ReuseShuffleIndicies); 4648 return; 4649 } 4650 } 4651 4652 // The reduction nodes (stored in UserIgnoreList) also should stay scalar. 4653 if (UserIgnoreList && !UserIgnoreList->empty()) { 4654 for (Value *V : VL) { 4655 if (UserIgnoreList && UserIgnoreList->contains(V)) { 4656 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 4657 if (TryToFindDuplicates(S)) 4658 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4659 ReuseShuffleIndicies); 4660 return; 4661 } 4662 } 4663 } 4664 4665 // Check that all of the users of the scalars that we want to vectorize are 4666 // schedulable. 4667 auto *VL0 = cast<Instruction>(S.OpValue); 4668 BasicBlock *BB = VL0->getParent(); 4669 4670 if (!DT->isReachableFromEntry(BB)) { 4671 // Don't go into unreachable blocks. They may contain instructions with 4672 // dependency cycles which confuse the final scheduling. 4673 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 4674 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 4675 return; 4676 } 4677 4678 // Check that every instruction appears once in this bundle. 4679 if (!TryToFindDuplicates(S)) 4680 return; 4681 4682 auto &BSRef = BlocksSchedules[BB]; 4683 if (!BSRef) 4684 BSRef = std::make_unique<BlockScheduling>(BB); 4685 4686 BlockScheduling &BS = *BSRef; 4687 4688 Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S); 4689 #ifdef EXPENSIVE_CHECKS 4690 // Make sure we didn't break any internal invariants 4691 BS.verify(); 4692 #endif 4693 if (!Bundle) { 4694 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 4695 assert((!BS.getScheduleData(VL0) || 4696 !BS.getScheduleData(VL0)->isPartOfBundle()) && 4697 "tryScheduleBundle should cancelScheduling on failure"); 4698 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4699 ReuseShuffleIndicies); 4700 return; 4701 } 4702 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 4703 4704 unsigned ShuffleOrOp = S.isAltShuffle() ? 4705 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 4706 switch (ShuffleOrOp) { 4707 case Instruction::PHI: { 4708 auto *PH = cast<PHINode>(VL0); 4709 4710 // Check for terminator values (e.g. invoke). 4711 for (Value *V : VL) 4712 for (Value *Incoming : cast<PHINode>(V)->incoming_values()) { 4713 Instruction *Term = dyn_cast<Instruction>(Incoming); 4714 if (Term && Term->isTerminator()) { 4715 LLVM_DEBUG(dbgs() 4716 << "SLP: Need to swizzle PHINodes (terminator use).\n"); 4717 BS.cancelScheduling(VL, VL0); 4718 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4719 ReuseShuffleIndicies); 4720 return; 4721 } 4722 } 4723 4724 TreeEntry *TE = 4725 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies); 4726 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 4727 4728 // Keeps the reordered operands to avoid code duplication. 4729 SmallVector<ValueList, 2> OperandsVec; 4730 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { 4731 if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) { 4732 ValueList Operands(VL.size(), PoisonValue::get(PH->getType())); 4733 TE->setOperand(I, Operands); 4734 OperandsVec.push_back(Operands); 4735 continue; 4736 } 4737 ValueList Operands; 4738 // Prepare the operand vector. 4739 for (Value *V : VL) 4740 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock( 4741 PH->getIncomingBlock(I))); 4742 TE->setOperand(I, Operands); 4743 OperandsVec.push_back(Operands); 4744 } 4745 for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx) 4746 buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx}); 4747 return; 4748 } 4749 case Instruction::ExtractValue: 4750 case Instruction::ExtractElement: { 4751 OrdersType CurrentOrder; 4752 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder); 4753 if (Reuse) { 4754 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n"); 4755 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 4756 ReuseShuffleIndicies); 4757 // This is a special case, as it does not gather, but at the same time 4758 // we are not extending buildTree_rec() towards the operands. 4759 ValueList Op0; 4760 Op0.assign(VL.size(), VL0->getOperand(0)); 4761 VectorizableTree.back()->setOperand(0, Op0); 4762 return; 4763 } 4764 if (!CurrentOrder.empty()) { 4765 LLVM_DEBUG({ 4766 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " 4767 "with order"; 4768 for (unsigned Idx : CurrentOrder) 4769 dbgs() << " " << Idx; 4770 dbgs() << "\n"; 4771 }); 4772 fixupOrderingIndices(CurrentOrder); 4773 // Insert new order with initial value 0, if it does not exist, 4774 // otherwise return the iterator to the existing one. 4775 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 4776 ReuseShuffleIndicies, CurrentOrder); 4777 // This is a special case, as it does not gather, but at the same time 4778 // we are not extending buildTree_rec() towards the operands. 4779 ValueList Op0; 4780 Op0.assign(VL.size(), VL0->getOperand(0)); 4781 VectorizableTree.back()->setOperand(0, Op0); 4782 return; 4783 } 4784 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n"); 4785 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4786 ReuseShuffleIndicies); 4787 BS.cancelScheduling(VL, VL0); 4788 return; 4789 } 4790 case Instruction::InsertElement: { 4791 assert(ReuseShuffleIndicies.empty() && "All inserts should be unique"); 4792 4793 // Check that we have a buildvector and not a shuffle of 2 or more 4794 // different vectors. 4795 ValueSet SourceVectors; 4796 for (Value *V : VL) { 4797 SourceVectors.insert(cast<Instruction>(V)->getOperand(0)); 4798 assert(getInsertIndex(V) != None && "Non-constant or undef index?"); 4799 } 4800 4801 if (count_if(VL, [&SourceVectors](Value *V) { 4802 return !SourceVectors.contains(V); 4803 }) >= 2) { 4804 // Found 2nd source vector - cancel. 4805 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with " 4806 "different source vectors.\n"); 4807 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 4808 BS.cancelScheduling(VL, VL0); 4809 return; 4810 } 4811 4812 auto OrdCompare = [](const std::pair<int, int> &P1, 4813 const std::pair<int, int> &P2) { 4814 return P1.first > P2.first; 4815 }; 4816 PriorityQueue<std::pair<int, int>, SmallVector<std::pair<int, int>>, 4817 decltype(OrdCompare)> 4818 Indices(OrdCompare); 4819 for (int I = 0, E = VL.size(); I < E; ++I) { 4820 unsigned Idx = *getInsertIndex(VL[I]); 4821 Indices.emplace(Idx, I); 4822 } 4823 OrdersType CurrentOrder(VL.size(), VL.size()); 4824 bool IsIdentity = true; 4825 for (int I = 0, E = VL.size(); I < E; ++I) { 4826 CurrentOrder[Indices.top().second] = I; 4827 IsIdentity &= Indices.top().second == I; 4828 Indices.pop(); 4829 } 4830 if (IsIdentity) 4831 CurrentOrder.clear(); 4832 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 4833 None, CurrentOrder); 4834 LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n"); 4835 4836 constexpr int NumOps = 2; 4837 ValueList VectorOperands[NumOps]; 4838 for (int I = 0; I < NumOps; ++I) { 4839 for (Value *V : VL) 4840 VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I)); 4841 4842 TE->setOperand(I, VectorOperands[I]); 4843 } 4844 buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, NumOps - 1}); 4845 return; 4846 } 4847 case Instruction::Load: { 4848 // Check that a vectorized load would load the same memory as a scalar 4849 // load. For example, we don't want to vectorize loads that are smaller 4850 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 4851 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 4852 // from such a struct, we read/write packed bits disagreeing with the 4853 // unvectorized version. 4854 SmallVector<Value *> PointerOps; 4855 OrdersType CurrentOrder; 4856 TreeEntry *TE = nullptr; 4857 switch (canVectorizeLoads(VL, VL0, *TTI, *DL, *SE, CurrentOrder, 4858 PointerOps)) { 4859 case LoadsState::Vectorize: 4860 if (CurrentOrder.empty()) { 4861 // Original loads are consecutive and does not require reordering. 4862 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 4863 ReuseShuffleIndicies); 4864 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 4865 } else { 4866 fixupOrderingIndices(CurrentOrder); 4867 // Need to reorder. 4868 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 4869 ReuseShuffleIndicies, CurrentOrder); 4870 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n"); 4871 } 4872 TE->setOperandsInOrder(); 4873 break; 4874 case LoadsState::ScatterVectorize: 4875 // Vectorizing non-consecutive loads with `llvm.masked.gather`. 4876 TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S, 4877 UserTreeIdx, ReuseShuffleIndicies); 4878 TE->setOperandsInOrder(); 4879 buildTree_rec(PointerOps, Depth + 1, {TE, 0}); 4880 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n"); 4881 break; 4882 case LoadsState::Gather: 4883 BS.cancelScheduling(VL, VL0); 4884 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4885 ReuseShuffleIndicies); 4886 #ifndef NDEBUG 4887 Type *ScalarTy = VL0->getType(); 4888 if (DL->getTypeSizeInBits(ScalarTy) != 4889 DL->getTypeAllocSizeInBits(ScalarTy)) 4890 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 4891 else if (any_of(VL, [](Value *V) { 4892 return !cast<LoadInst>(V)->isSimple(); 4893 })) 4894 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 4895 else 4896 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 4897 #endif // NDEBUG 4898 break; 4899 } 4900 return; 4901 } 4902 case Instruction::ZExt: 4903 case Instruction::SExt: 4904 case Instruction::FPToUI: 4905 case Instruction::FPToSI: 4906 case Instruction::FPExt: 4907 case Instruction::PtrToInt: 4908 case Instruction::IntToPtr: 4909 case Instruction::SIToFP: 4910 case Instruction::UIToFP: 4911 case Instruction::Trunc: 4912 case Instruction::FPTrunc: 4913 case Instruction::BitCast: { 4914 Type *SrcTy = VL0->getOperand(0)->getType(); 4915 for (Value *V : VL) { 4916 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType(); 4917 if (Ty != SrcTy || !isValidElementType(Ty)) { 4918 BS.cancelScheduling(VL, VL0); 4919 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4920 ReuseShuffleIndicies); 4921 LLVM_DEBUG(dbgs() 4922 << "SLP: Gathering casts with different src types.\n"); 4923 return; 4924 } 4925 } 4926 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 4927 ReuseShuffleIndicies); 4928 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 4929 4930 TE->setOperandsInOrder(); 4931 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 4932 ValueList Operands; 4933 // Prepare the operand vector. 4934 for (Value *V : VL) 4935 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 4936 4937 buildTree_rec(Operands, Depth + 1, {TE, i}); 4938 } 4939 return; 4940 } 4941 case Instruction::ICmp: 4942 case Instruction::FCmp: { 4943 // Check that all of the compares have the same predicate. 4944 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 4945 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0); 4946 Type *ComparedTy = VL0->getOperand(0)->getType(); 4947 for (Value *V : VL) { 4948 CmpInst *Cmp = cast<CmpInst>(V); 4949 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) || 4950 Cmp->getOperand(0)->getType() != ComparedTy) { 4951 BS.cancelScheduling(VL, VL0); 4952 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4953 ReuseShuffleIndicies); 4954 LLVM_DEBUG(dbgs() 4955 << "SLP: Gathering cmp with different predicate.\n"); 4956 return; 4957 } 4958 } 4959 4960 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 4961 ReuseShuffleIndicies); 4962 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 4963 4964 ValueList Left, Right; 4965 if (cast<CmpInst>(VL0)->isCommutative()) { 4966 // Commutative predicate - collect + sort operands of the instructions 4967 // so that each side is more likely to have the same opcode. 4968 assert(P0 == SwapP0 && "Commutative Predicate mismatch"); 4969 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 4970 } else { 4971 // Collect operands - commute if it uses the swapped predicate. 4972 for (Value *V : VL) { 4973 auto *Cmp = cast<CmpInst>(V); 4974 Value *LHS = Cmp->getOperand(0); 4975 Value *RHS = Cmp->getOperand(1); 4976 if (Cmp->getPredicate() != P0) 4977 std::swap(LHS, RHS); 4978 Left.push_back(LHS); 4979 Right.push_back(RHS); 4980 } 4981 } 4982 TE->setOperand(0, Left); 4983 TE->setOperand(1, Right); 4984 buildTree_rec(Left, Depth + 1, {TE, 0}); 4985 buildTree_rec(Right, Depth + 1, {TE, 1}); 4986 return; 4987 } 4988 case Instruction::Select: 4989 case Instruction::FNeg: 4990 case Instruction::Add: 4991 case Instruction::FAdd: 4992 case Instruction::Sub: 4993 case Instruction::FSub: 4994 case Instruction::Mul: 4995 case Instruction::FMul: 4996 case Instruction::UDiv: 4997 case Instruction::SDiv: 4998 case Instruction::FDiv: 4999 case Instruction::URem: 5000 case Instruction::SRem: 5001 case Instruction::FRem: 5002 case Instruction::Shl: 5003 case Instruction::LShr: 5004 case Instruction::AShr: 5005 case Instruction::And: 5006 case Instruction::Or: 5007 case Instruction::Xor: { 5008 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5009 ReuseShuffleIndicies); 5010 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n"); 5011 5012 // Sort operands of the instructions so that each side is more likely to 5013 // have the same opcode. 5014 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 5015 ValueList Left, Right; 5016 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 5017 TE->setOperand(0, Left); 5018 TE->setOperand(1, Right); 5019 buildTree_rec(Left, Depth + 1, {TE, 0}); 5020 buildTree_rec(Right, Depth + 1, {TE, 1}); 5021 return; 5022 } 5023 5024 TE->setOperandsInOrder(); 5025 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 5026 ValueList Operands; 5027 // Prepare the operand vector. 5028 for (Value *V : VL) 5029 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 5030 5031 buildTree_rec(Operands, Depth + 1, {TE, i}); 5032 } 5033 return; 5034 } 5035 case Instruction::GetElementPtr: { 5036 // We don't combine GEPs with complicated (nested) indexing. 5037 for (Value *V : VL) { 5038 if (cast<Instruction>(V)->getNumOperands() != 2) { 5039 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 5040 BS.cancelScheduling(VL, VL0); 5041 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 5042 ReuseShuffleIndicies); 5043 return; 5044 } 5045 } 5046 5047 // We can't combine several GEPs into one vector if they operate on 5048 // different types. 5049 Type *Ty0 = cast<GEPOperator>(VL0)->getSourceElementType(); 5050 for (Value *V : VL) { 5051 Type *CurTy = cast<GEPOperator>(V)->getSourceElementType(); 5052 if (Ty0 != CurTy) { 5053 LLVM_DEBUG(dbgs() 5054 << "SLP: not-vectorizable GEP (different types).\n"); 5055 BS.cancelScheduling(VL, VL0); 5056 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 5057 ReuseShuffleIndicies); 5058 return; 5059 } 5060 } 5061 5062 // We don't combine GEPs with non-constant indexes. 5063 Type *Ty1 = VL0->getOperand(1)->getType(); 5064 for (Value *V : VL) { 5065 auto Op = cast<Instruction>(V)->getOperand(1); 5066 if (!isa<ConstantInt>(Op) || 5067 (Op->getType() != Ty1 && 5068 Op->getType()->getScalarSizeInBits() > 5069 DL->getIndexSizeInBits( 5070 V->getType()->getPointerAddressSpace()))) { 5071 LLVM_DEBUG(dbgs() 5072 << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 5073 BS.cancelScheduling(VL, VL0); 5074 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 5075 ReuseShuffleIndicies); 5076 return; 5077 } 5078 } 5079 5080 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5081 ReuseShuffleIndicies); 5082 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 5083 SmallVector<ValueList, 2> Operands(2); 5084 // Prepare the operand vector for pointer operands. 5085 for (Value *V : VL) 5086 Operands.front().push_back( 5087 cast<GetElementPtrInst>(V)->getPointerOperand()); 5088 TE->setOperand(0, Operands.front()); 5089 // Need to cast all indices to the same type before vectorization to 5090 // avoid crash. 5091 // Required to be able to find correct matches between different gather 5092 // nodes and reuse the vectorized values rather than trying to gather them 5093 // again. 5094 int IndexIdx = 1; 5095 Type *VL0Ty = VL0->getOperand(IndexIdx)->getType(); 5096 Type *Ty = all_of(VL, 5097 [VL0Ty, IndexIdx](Value *V) { 5098 return VL0Ty == cast<GetElementPtrInst>(V) 5099 ->getOperand(IndexIdx) 5100 ->getType(); 5101 }) 5102 ? VL0Ty 5103 : DL->getIndexType(cast<GetElementPtrInst>(VL0) 5104 ->getPointerOperandType() 5105 ->getScalarType()); 5106 // Prepare the operand vector. 5107 for (Value *V : VL) { 5108 auto *Op = cast<Instruction>(V)->getOperand(IndexIdx); 5109 auto *CI = cast<ConstantInt>(Op); 5110 Operands.back().push_back(ConstantExpr::getIntegerCast( 5111 CI, Ty, CI->getValue().isSignBitSet())); 5112 } 5113 TE->setOperand(IndexIdx, Operands.back()); 5114 5115 for (unsigned I = 0, Ops = Operands.size(); I < Ops; ++I) 5116 buildTree_rec(Operands[I], Depth + 1, {TE, I}); 5117 return; 5118 } 5119 case Instruction::Store: { 5120 // Check if the stores are consecutive or if we need to swizzle them. 5121 llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType(); 5122 // Avoid types that are padded when being allocated as scalars, while 5123 // being packed together in a vector (such as i1). 5124 if (DL->getTypeSizeInBits(ScalarTy) != 5125 DL->getTypeAllocSizeInBits(ScalarTy)) { 5126 BS.cancelScheduling(VL, VL0); 5127 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 5128 ReuseShuffleIndicies); 5129 LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n"); 5130 return; 5131 } 5132 // Make sure all stores in the bundle are simple - we can't vectorize 5133 // atomic or volatile stores. 5134 SmallVector<Value *, 4> PointerOps(VL.size()); 5135 ValueList Operands(VL.size()); 5136 auto POIter = PointerOps.begin(); 5137 auto OIter = Operands.begin(); 5138 for (Value *V : VL) { 5139 auto *SI = cast<StoreInst>(V); 5140 if (!SI->isSimple()) { 5141 BS.cancelScheduling(VL, VL0); 5142 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 5143 ReuseShuffleIndicies); 5144 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n"); 5145 return; 5146 } 5147 *POIter = SI->getPointerOperand(); 5148 *OIter = SI->getValueOperand(); 5149 ++POIter; 5150 ++OIter; 5151 } 5152 5153 OrdersType CurrentOrder; 5154 // Check the order of pointer operands. 5155 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) { 5156 Value *Ptr0; 5157 Value *PtrN; 5158 if (CurrentOrder.empty()) { 5159 Ptr0 = PointerOps.front(); 5160 PtrN = PointerOps.back(); 5161 } else { 5162 Ptr0 = PointerOps[CurrentOrder.front()]; 5163 PtrN = PointerOps[CurrentOrder.back()]; 5164 } 5165 Optional<int> Dist = 5166 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE); 5167 // Check that the sorted pointer operands are consecutive. 5168 if (static_cast<unsigned>(*Dist) == VL.size() - 1) { 5169 if (CurrentOrder.empty()) { 5170 // Original stores are consecutive and does not require reordering. 5171 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, 5172 UserTreeIdx, ReuseShuffleIndicies); 5173 TE->setOperandsInOrder(); 5174 buildTree_rec(Operands, Depth + 1, {TE, 0}); 5175 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 5176 } else { 5177 fixupOrderingIndices(CurrentOrder); 5178 TreeEntry *TE = 5179 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5180 ReuseShuffleIndicies, CurrentOrder); 5181 TE->setOperandsInOrder(); 5182 buildTree_rec(Operands, Depth + 1, {TE, 0}); 5183 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n"); 5184 } 5185 return; 5186 } 5187 } 5188 5189 BS.cancelScheduling(VL, VL0); 5190 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 5191 ReuseShuffleIndicies); 5192 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 5193 return; 5194 } 5195 case Instruction::Call: { 5196 // Check if the calls are all to the same vectorizable intrinsic or 5197 // library function. 5198 CallInst *CI = cast<CallInst>(VL0); 5199 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 5200 5201 VFShape Shape = VFShape::get( 5202 *CI, ElementCount::getFixed(static_cast<unsigned int>(VL.size())), 5203 false /*HasGlobalPred*/); 5204 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 5205 5206 if (!VecFunc && !isTriviallyVectorizable(ID)) { 5207 BS.cancelScheduling(VL, VL0); 5208 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 5209 ReuseShuffleIndicies); 5210 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 5211 return; 5212 } 5213 Function *F = CI->getCalledFunction(); 5214 unsigned NumArgs = CI->arg_size(); 5215 SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr); 5216 for (unsigned j = 0; j != NumArgs; ++j) 5217 if (isVectorIntrinsicWithScalarOpAtArg(ID, j)) 5218 ScalarArgs[j] = CI->getArgOperand(j); 5219 for (Value *V : VL) { 5220 CallInst *CI2 = dyn_cast<CallInst>(V); 5221 if (!CI2 || CI2->getCalledFunction() != F || 5222 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 5223 (VecFunc && 5224 VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) || 5225 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 5226 BS.cancelScheduling(VL, VL0); 5227 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 5228 ReuseShuffleIndicies); 5229 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V 5230 << "\n"); 5231 return; 5232 } 5233 // Some intrinsics have scalar arguments and should be same in order for 5234 // them to be vectorized. 5235 for (unsigned j = 0; j != NumArgs; ++j) { 5236 if (isVectorIntrinsicWithScalarOpAtArg(ID, j)) { 5237 Value *A1J = CI2->getArgOperand(j); 5238 if (ScalarArgs[j] != A1J) { 5239 BS.cancelScheduling(VL, VL0); 5240 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 5241 ReuseShuffleIndicies); 5242 LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 5243 << " argument " << ScalarArgs[j] << "!=" << A1J 5244 << "\n"); 5245 return; 5246 } 5247 } 5248 } 5249 // Verify that the bundle operands are identical between the two calls. 5250 if (CI->hasOperandBundles() && 5251 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 5252 CI->op_begin() + CI->getBundleOperandsEndIndex(), 5253 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 5254 BS.cancelScheduling(VL, VL0); 5255 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 5256 ReuseShuffleIndicies); 5257 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" 5258 << *CI << "!=" << *V << '\n'); 5259 return; 5260 } 5261 } 5262 5263 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5264 ReuseShuffleIndicies); 5265 TE->setOperandsInOrder(); 5266 for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) { 5267 // For scalar operands no need to to create an entry since no need to 5268 // vectorize it. 5269 if (isVectorIntrinsicWithScalarOpAtArg(ID, i)) 5270 continue; 5271 ValueList Operands; 5272 // Prepare the operand vector. 5273 for (Value *V : VL) { 5274 auto *CI2 = cast<CallInst>(V); 5275 Operands.push_back(CI2->getArgOperand(i)); 5276 } 5277 buildTree_rec(Operands, Depth + 1, {TE, i}); 5278 } 5279 return; 5280 } 5281 case Instruction::ShuffleVector: { 5282 // If this is not an alternate sequence of opcode like add-sub 5283 // then do not vectorize this instruction. 5284 if (!S.isAltShuffle()) { 5285 BS.cancelScheduling(VL, VL0); 5286 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 5287 ReuseShuffleIndicies); 5288 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 5289 return; 5290 } 5291 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5292 ReuseShuffleIndicies); 5293 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 5294 5295 // Reorder operands if reordering would enable vectorization. 5296 auto *CI = dyn_cast<CmpInst>(VL0); 5297 if (isa<BinaryOperator>(VL0) || CI) { 5298 ValueList Left, Right; 5299 if (!CI || all_of(VL, [](Value *V) { 5300 return cast<CmpInst>(V)->isCommutative(); 5301 })) { 5302 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 5303 } else { 5304 CmpInst::Predicate P0 = CI->getPredicate(); 5305 CmpInst::Predicate AltP0 = cast<CmpInst>(S.AltOp)->getPredicate(); 5306 assert(P0 != AltP0 && 5307 "Expected different main/alternate predicates."); 5308 CmpInst::Predicate AltP0Swapped = CmpInst::getSwappedPredicate(AltP0); 5309 Value *BaseOp0 = VL0->getOperand(0); 5310 Value *BaseOp1 = VL0->getOperand(1); 5311 // Collect operands - commute if it uses the swapped predicate or 5312 // alternate operation. 5313 for (Value *V : VL) { 5314 auto *Cmp = cast<CmpInst>(V); 5315 Value *LHS = Cmp->getOperand(0); 5316 Value *RHS = Cmp->getOperand(1); 5317 CmpInst::Predicate CurrentPred = Cmp->getPredicate(); 5318 if (P0 == AltP0Swapped) { 5319 if (CI != Cmp && S.AltOp != Cmp && 5320 ((P0 == CurrentPred && 5321 !areCompatibleCmpOps(BaseOp0, BaseOp1, LHS, RHS)) || 5322 (AltP0 == CurrentPred && 5323 areCompatibleCmpOps(BaseOp0, BaseOp1, LHS, RHS)))) 5324 std::swap(LHS, RHS); 5325 } else if (P0 != CurrentPred && AltP0 != CurrentPred) { 5326 std::swap(LHS, RHS); 5327 } 5328 Left.push_back(LHS); 5329 Right.push_back(RHS); 5330 } 5331 } 5332 TE->setOperand(0, Left); 5333 TE->setOperand(1, Right); 5334 buildTree_rec(Left, Depth + 1, {TE, 0}); 5335 buildTree_rec(Right, Depth + 1, {TE, 1}); 5336 return; 5337 } 5338 5339 TE->setOperandsInOrder(); 5340 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 5341 ValueList Operands; 5342 // Prepare the operand vector. 5343 for (Value *V : VL) 5344 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 5345 5346 buildTree_rec(Operands, Depth + 1, {TE, i}); 5347 } 5348 return; 5349 } 5350 default: 5351 BS.cancelScheduling(VL, VL0); 5352 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 5353 ReuseShuffleIndicies); 5354 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 5355 return; 5356 } 5357 } 5358 5359 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { 5360 unsigned N = 1; 5361 Type *EltTy = T; 5362 5363 while (isa<StructType>(EltTy) || isa<ArrayType>(EltTy) || 5364 isa<VectorType>(EltTy)) { 5365 if (auto *ST = dyn_cast<StructType>(EltTy)) { 5366 // Check that struct is homogeneous. 5367 for (const auto *Ty : ST->elements()) 5368 if (Ty != *ST->element_begin()) 5369 return 0; 5370 N *= ST->getNumElements(); 5371 EltTy = *ST->element_begin(); 5372 } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) { 5373 N *= AT->getNumElements(); 5374 EltTy = AT->getElementType(); 5375 } else { 5376 auto *VT = cast<FixedVectorType>(EltTy); 5377 N *= VT->getNumElements(); 5378 EltTy = VT->getElementType(); 5379 } 5380 } 5381 5382 if (!isValidElementType(EltTy)) 5383 return 0; 5384 uint64_t VTSize = DL.getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N)); 5385 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) 5386 return 0; 5387 return N; 5388 } 5389 5390 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 5391 SmallVectorImpl<unsigned> &CurrentOrder) const { 5392 const auto *It = find_if(VL, [](Value *V) { 5393 return isa<ExtractElementInst, ExtractValueInst>(V); 5394 }); 5395 assert(It != VL.end() && "Expected at least one extract instruction."); 5396 auto *E0 = cast<Instruction>(*It); 5397 assert(all_of(VL, 5398 [](Value *V) { 5399 return isa<UndefValue, ExtractElementInst, ExtractValueInst>( 5400 V); 5401 }) && 5402 "Invalid opcode"); 5403 // Check if all of the extracts come from the same vector and from the 5404 // correct offset. 5405 Value *Vec = E0->getOperand(0); 5406 5407 CurrentOrder.clear(); 5408 5409 // We have to extract from a vector/aggregate with the same number of elements. 5410 unsigned NElts; 5411 if (E0->getOpcode() == Instruction::ExtractValue) { 5412 const DataLayout &DL = E0->getModule()->getDataLayout(); 5413 NElts = canMapToVector(Vec->getType(), DL); 5414 if (!NElts) 5415 return false; 5416 // Check if load can be rewritten as load of vector. 5417 LoadInst *LI = dyn_cast<LoadInst>(Vec); 5418 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 5419 return false; 5420 } else { 5421 NElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); 5422 } 5423 5424 if (NElts != VL.size()) 5425 return false; 5426 5427 // Check that all of the indices extract from the correct offset. 5428 bool ShouldKeepOrder = true; 5429 unsigned E = VL.size(); 5430 // Assign to all items the initial value E + 1 so we can check if the extract 5431 // instruction index was used already. 5432 // Also, later we can check that all the indices are used and we have a 5433 // consecutive access in the extract instructions, by checking that no 5434 // element of CurrentOrder still has value E + 1. 5435 CurrentOrder.assign(E, E); 5436 unsigned I = 0; 5437 for (; I < E; ++I) { 5438 auto *Inst = dyn_cast<Instruction>(VL[I]); 5439 if (!Inst) 5440 continue; 5441 if (Inst->getOperand(0) != Vec) 5442 break; 5443 if (auto *EE = dyn_cast<ExtractElementInst>(Inst)) 5444 if (isa<UndefValue>(EE->getIndexOperand())) 5445 continue; 5446 Optional<unsigned> Idx = getExtractIndex(Inst); 5447 if (!Idx) 5448 break; 5449 const unsigned ExtIdx = *Idx; 5450 if (ExtIdx != I) { 5451 if (ExtIdx >= E || CurrentOrder[ExtIdx] != E) 5452 break; 5453 ShouldKeepOrder = false; 5454 CurrentOrder[ExtIdx] = I; 5455 } else { 5456 if (CurrentOrder[I] != E) 5457 break; 5458 CurrentOrder[I] = I; 5459 } 5460 } 5461 if (I < E) { 5462 CurrentOrder.clear(); 5463 return false; 5464 } 5465 if (ShouldKeepOrder) 5466 CurrentOrder.clear(); 5467 5468 return ShouldKeepOrder; 5469 } 5470 5471 bool BoUpSLP::areAllUsersVectorized(Instruction *I, 5472 ArrayRef<Value *> VectorizedVals) const { 5473 return (I->hasOneUse() && is_contained(VectorizedVals, I)) || 5474 all_of(I->users(), [this](User *U) { 5475 return ScalarToTreeEntry.count(U) > 0 || 5476 isVectorLikeInstWithConstOps(U) || 5477 (isa<ExtractElementInst>(U) && MustGather.contains(U)); 5478 }); 5479 } 5480 5481 static std::pair<InstructionCost, InstructionCost> 5482 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy, 5483 TargetTransformInfo *TTI, TargetLibraryInfo *TLI) { 5484 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 5485 5486 // Calculate the cost of the scalar and vector calls. 5487 SmallVector<Type *, 4> VecTys; 5488 for (Use &Arg : CI->args()) 5489 VecTys.push_back( 5490 FixedVectorType::get(Arg->getType(), VecTy->getNumElements())); 5491 FastMathFlags FMF; 5492 if (auto *FPCI = dyn_cast<FPMathOperator>(CI)) 5493 FMF = FPCI->getFastMathFlags(); 5494 SmallVector<const Value *> Arguments(CI->args()); 5495 IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF, 5496 dyn_cast<IntrinsicInst>(CI)); 5497 auto IntrinsicCost = 5498 TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput); 5499 5500 auto Shape = VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>( 5501 VecTy->getNumElements())), 5502 false /*HasGlobalPred*/); 5503 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 5504 auto LibCost = IntrinsicCost; 5505 if (!CI->isNoBuiltin() && VecFunc) { 5506 // Calculate the cost of the vector library call. 5507 // If the corresponding vector call is cheaper, return its cost. 5508 LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys, 5509 TTI::TCK_RecipThroughput); 5510 } 5511 return {IntrinsicCost, LibCost}; 5512 } 5513 5514 /// Compute the cost of creating a vector of type \p VecTy containing the 5515 /// extracted values from \p VL. 5516 static InstructionCost 5517 computeExtractCost(ArrayRef<Value *> VL, FixedVectorType *VecTy, 5518 TargetTransformInfo::ShuffleKind ShuffleKind, 5519 ArrayRef<int> Mask, TargetTransformInfo &TTI) { 5520 unsigned NumOfParts = TTI.getNumberOfParts(VecTy); 5521 5522 if (ShuffleKind != TargetTransformInfo::SK_PermuteSingleSrc || !NumOfParts || 5523 VecTy->getNumElements() < NumOfParts) 5524 return TTI.getShuffleCost(ShuffleKind, VecTy, Mask); 5525 5526 bool AllConsecutive = true; 5527 unsigned EltsPerVector = VecTy->getNumElements() / NumOfParts; 5528 unsigned Idx = -1; 5529 InstructionCost Cost = 0; 5530 5531 // Process extracts in blocks of EltsPerVector to check if the source vector 5532 // operand can be re-used directly. If not, add the cost of creating a shuffle 5533 // to extract the values into a vector register. 5534 SmallVector<int> RegMask(EltsPerVector, UndefMaskElem); 5535 for (auto *V : VL) { 5536 ++Idx; 5537 5538 // Need to exclude undefs from analysis. 5539 if (isa<UndefValue>(V) || Mask[Idx] == UndefMaskElem) 5540 continue; 5541 5542 // Reached the start of a new vector registers. 5543 if (Idx % EltsPerVector == 0) { 5544 RegMask.assign(EltsPerVector, UndefMaskElem); 5545 AllConsecutive = true; 5546 continue; 5547 } 5548 5549 // Check all extracts for a vector register on the target directly 5550 // extract values in order. 5551 unsigned CurrentIdx = *getExtractIndex(cast<Instruction>(V)); 5552 if (!isa<UndefValue>(VL[Idx - 1]) && Mask[Idx - 1] != UndefMaskElem) { 5553 unsigned PrevIdx = *getExtractIndex(cast<Instruction>(VL[Idx - 1])); 5554 AllConsecutive &= PrevIdx + 1 == CurrentIdx && 5555 CurrentIdx % EltsPerVector == Idx % EltsPerVector; 5556 RegMask[Idx % EltsPerVector] = CurrentIdx % EltsPerVector; 5557 } 5558 5559 if (AllConsecutive) 5560 continue; 5561 5562 // Skip all indices, except for the last index per vector block. 5563 if ((Idx + 1) % EltsPerVector != 0 && Idx + 1 != VL.size()) 5564 continue; 5565 5566 // If we have a series of extracts which are not consecutive and hence 5567 // cannot re-use the source vector register directly, compute the shuffle 5568 // cost to extract the vector with EltsPerVector elements. 5569 Cost += TTI.getShuffleCost( 5570 TargetTransformInfo::SK_PermuteSingleSrc, 5571 FixedVectorType::get(VecTy->getElementType(), EltsPerVector), RegMask); 5572 } 5573 return Cost; 5574 } 5575 5576 /// Build shuffle mask for shuffle graph entries and lists of main and alternate 5577 /// operations operands. 5578 static void 5579 buildShuffleEntryMask(ArrayRef<Value *> VL, ArrayRef<unsigned> ReorderIndices, 5580 ArrayRef<int> ReusesIndices, 5581 const function_ref<bool(Instruction *)> IsAltOp, 5582 SmallVectorImpl<int> &Mask, 5583 SmallVectorImpl<Value *> *OpScalars = nullptr, 5584 SmallVectorImpl<Value *> *AltScalars = nullptr) { 5585 unsigned Sz = VL.size(); 5586 Mask.assign(Sz, UndefMaskElem); 5587 SmallVector<int> OrderMask; 5588 if (!ReorderIndices.empty()) 5589 inversePermutation(ReorderIndices, OrderMask); 5590 for (unsigned I = 0; I < Sz; ++I) { 5591 unsigned Idx = I; 5592 if (!ReorderIndices.empty()) 5593 Idx = OrderMask[I]; 5594 auto *OpInst = cast<Instruction>(VL[Idx]); 5595 if (IsAltOp(OpInst)) { 5596 Mask[I] = Sz + Idx; 5597 if (AltScalars) 5598 AltScalars->push_back(OpInst); 5599 } else { 5600 Mask[I] = Idx; 5601 if (OpScalars) 5602 OpScalars->push_back(OpInst); 5603 } 5604 } 5605 if (!ReusesIndices.empty()) { 5606 SmallVector<int> NewMask(ReusesIndices.size(), UndefMaskElem); 5607 transform(ReusesIndices, NewMask.begin(), [&Mask](int Idx) { 5608 return Idx != UndefMaskElem ? Mask[Idx] : UndefMaskElem; 5609 }); 5610 Mask.swap(NewMask); 5611 } 5612 } 5613 5614 /// Checks if the specified instruction \p I is an alternate operation for the 5615 /// given \p MainOp and \p AltOp instructions. 5616 static bool isAlternateInstruction(const Instruction *I, 5617 const Instruction *MainOp, 5618 const Instruction *AltOp) { 5619 if (auto *CI0 = dyn_cast<CmpInst>(MainOp)) { 5620 auto *AltCI0 = cast<CmpInst>(AltOp); 5621 auto *CI = cast<CmpInst>(I); 5622 CmpInst::Predicate P0 = CI0->getPredicate(); 5623 CmpInst::Predicate AltP0 = AltCI0->getPredicate(); 5624 assert(P0 != AltP0 && "Expected different main/alternate predicates."); 5625 CmpInst::Predicate AltP0Swapped = CmpInst::getSwappedPredicate(AltP0); 5626 CmpInst::Predicate CurrentPred = CI->getPredicate(); 5627 if (P0 == AltP0Swapped) 5628 return I == AltCI0 || 5629 (I != MainOp && 5630 !areCompatibleCmpOps(CI0->getOperand(0), CI0->getOperand(1), 5631 CI->getOperand(0), CI->getOperand(1))); 5632 return AltP0 == CurrentPred || AltP0Swapped == CurrentPred; 5633 } 5634 return I->getOpcode() == AltOp->getOpcode(); 5635 } 5636 5637 InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E, 5638 ArrayRef<Value *> VectorizedVals) { 5639 ArrayRef<Value*> VL = E->Scalars; 5640 5641 Type *ScalarTy = VL[0]->getType(); 5642 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 5643 ScalarTy = SI->getValueOperand()->getType(); 5644 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0])) 5645 ScalarTy = CI->getOperand(0)->getType(); 5646 else if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 5647 ScalarTy = IE->getOperand(1)->getType(); 5648 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 5649 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 5650 5651 // If we have computed a smaller type for the expression, update VecTy so 5652 // that the costs will be accurate. 5653 if (MinBWs.count(VL[0])) 5654 VecTy = FixedVectorType::get( 5655 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size()); 5656 unsigned EntryVF = E->getVectorFactor(); 5657 auto *FinalVecTy = FixedVectorType::get(VecTy->getElementType(), EntryVF); 5658 5659 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 5660 // FIXME: it tries to fix a problem with MSVC buildbots. 5661 TargetTransformInfo &TTIRef = *TTI; 5662 auto &&AdjustExtractsCost = [this, &TTIRef, CostKind, VL, VecTy, 5663 VectorizedVals, E](InstructionCost &Cost) { 5664 DenseMap<Value *, int> ExtractVectorsTys; 5665 SmallPtrSet<Value *, 4> CheckedExtracts; 5666 for (auto *V : VL) { 5667 if (isa<UndefValue>(V)) 5668 continue; 5669 // If all users of instruction are going to be vectorized and this 5670 // instruction itself is not going to be vectorized, consider this 5671 // instruction as dead and remove its cost from the final cost of the 5672 // vectorized tree. 5673 // Also, avoid adjusting the cost for extractelements with multiple uses 5674 // in different graph entries. 5675 const TreeEntry *VE = getTreeEntry(V); 5676 if (!CheckedExtracts.insert(V).second || 5677 !areAllUsersVectorized(cast<Instruction>(V), VectorizedVals) || 5678 (VE && VE != E)) 5679 continue; 5680 auto *EE = cast<ExtractElementInst>(V); 5681 Optional<unsigned> EEIdx = getExtractIndex(EE); 5682 if (!EEIdx) 5683 continue; 5684 unsigned Idx = *EEIdx; 5685 if (TTIRef.getNumberOfParts(VecTy) != 5686 TTIRef.getNumberOfParts(EE->getVectorOperandType())) { 5687 auto It = 5688 ExtractVectorsTys.try_emplace(EE->getVectorOperand(), Idx).first; 5689 It->getSecond() = std::min<int>(It->second, Idx); 5690 } 5691 // Take credit for instruction that will become dead. 5692 if (EE->hasOneUse()) { 5693 Instruction *Ext = EE->user_back(); 5694 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 5695 all_of(Ext->users(), 5696 [](User *U) { return isa<GetElementPtrInst>(U); })) { 5697 // Use getExtractWithExtendCost() to calculate the cost of 5698 // extractelement/ext pair. 5699 Cost -= 5700 TTIRef.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(), 5701 EE->getVectorOperandType(), Idx); 5702 // Add back the cost of s|zext which is subtracted separately. 5703 Cost += TTIRef.getCastInstrCost( 5704 Ext->getOpcode(), Ext->getType(), EE->getType(), 5705 TTI::getCastContextHint(Ext), CostKind, Ext); 5706 continue; 5707 } 5708 } 5709 Cost -= TTIRef.getVectorInstrCost(Instruction::ExtractElement, 5710 EE->getVectorOperandType(), Idx); 5711 } 5712 // Add a cost for subvector extracts/inserts if required. 5713 for (const auto &Data : ExtractVectorsTys) { 5714 auto *EEVTy = cast<FixedVectorType>(Data.first->getType()); 5715 unsigned NumElts = VecTy->getNumElements(); 5716 if (Data.second % NumElts == 0) 5717 continue; 5718 if (TTIRef.getNumberOfParts(EEVTy) > TTIRef.getNumberOfParts(VecTy)) { 5719 unsigned Idx = (Data.second / NumElts) * NumElts; 5720 unsigned EENumElts = EEVTy->getNumElements(); 5721 if (Idx + NumElts <= EENumElts) { 5722 Cost += 5723 TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 5724 EEVTy, None, Idx, VecTy); 5725 } else { 5726 // Need to round up the subvector type vectorization factor to avoid a 5727 // crash in cost model functions. Make SubVT so that Idx + VF of SubVT 5728 // <= EENumElts. 5729 auto *SubVT = 5730 FixedVectorType::get(VecTy->getElementType(), EENumElts - Idx); 5731 Cost += 5732 TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 5733 EEVTy, None, Idx, SubVT); 5734 } 5735 } else { 5736 Cost += TTIRef.getShuffleCost(TargetTransformInfo::SK_InsertSubvector, 5737 VecTy, None, 0, EEVTy); 5738 } 5739 } 5740 }; 5741 if (E->State == TreeEntry::NeedToGather) { 5742 if (allConstant(VL)) 5743 return 0; 5744 if (isa<InsertElementInst>(VL[0])) 5745 return InstructionCost::getInvalid(); 5746 SmallVector<int> Mask; 5747 SmallVector<const TreeEntry *> Entries; 5748 Optional<TargetTransformInfo::ShuffleKind> Shuffle = 5749 isGatherShuffledEntry(E, Mask, Entries); 5750 if (Shuffle.hasValue()) { 5751 InstructionCost GatherCost = 0; 5752 if (ShuffleVectorInst::isIdentityMask(Mask)) { 5753 // Perfect match in the graph, will reuse the previously vectorized 5754 // node. Cost is 0. 5755 LLVM_DEBUG( 5756 dbgs() 5757 << "SLP: perfect diamond match for gather bundle that starts with " 5758 << *VL.front() << ".\n"); 5759 if (NeedToShuffleReuses) 5760 GatherCost = 5761 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, 5762 FinalVecTy, E->ReuseShuffleIndices); 5763 } else { 5764 LLVM_DEBUG(dbgs() << "SLP: shuffled " << Entries.size() 5765 << " entries for bundle that starts with " 5766 << *VL.front() << ".\n"); 5767 // Detected that instead of gather we can emit a shuffle of single/two 5768 // previously vectorized nodes. Add the cost of the permutation rather 5769 // than gather. 5770 ::addMask(Mask, E->ReuseShuffleIndices); 5771 GatherCost = TTI->getShuffleCost(*Shuffle, FinalVecTy, Mask); 5772 } 5773 return GatherCost; 5774 } 5775 if ((E->getOpcode() == Instruction::ExtractElement || 5776 all_of(E->Scalars, 5777 [](Value *V) { 5778 return isa<ExtractElementInst, UndefValue>(V); 5779 })) && 5780 allSameType(VL)) { 5781 // Check that gather of extractelements can be represented as just a 5782 // shuffle of a single/two vectors the scalars are extracted from. 5783 SmallVector<int> Mask; 5784 Optional<TargetTransformInfo::ShuffleKind> ShuffleKind = 5785 isFixedVectorShuffle(VL, Mask); 5786 if (ShuffleKind.hasValue()) { 5787 // Found the bunch of extractelement instructions that must be gathered 5788 // into a vector and can be represented as a permutation elements in a 5789 // single input vector or of 2 input vectors. 5790 InstructionCost Cost = 5791 computeExtractCost(VL, VecTy, *ShuffleKind, Mask, *TTI); 5792 AdjustExtractsCost(Cost); 5793 if (NeedToShuffleReuses) 5794 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, 5795 FinalVecTy, E->ReuseShuffleIndices); 5796 return Cost; 5797 } 5798 } 5799 if (isSplat(VL)) { 5800 // Found the broadcasting of the single scalar, calculate the cost as the 5801 // broadcast. 5802 assert(VecTy == FinalVecTy && 5803 "No reused scalars expected for broadcast."); 5804 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 5805 /*Mask=*/None, /*Index=*/0, 5806 /*SubTp=*/nullptr, /*Args=*/VL[0]); 5807 } 5808 InstructionCost ReuseShuffleCost = 0; 5809 if (NeedToShuffleReuses) 5810 ReuseShuffleCost = TTI->getShuffleCost( 5811 TTI::SK_PermuteSingleSrc, FinalVecTy, E->ReuseShuffleIndices); 5812 // Improve gather cost for gather of loads, if we can group some of the 5813 // loads into vector loads. 5814 if (VL.size() > 2 && E->getOpcode() == Instruction::Load && 5815 !E->isAltShuffle()) { 5816 BoUpSLP::ValueSet VectorizedLoads; 5817 unsigned StartIdx = 0; 5818 unsigned VF = VL.size() / 2; 5819 unsigned VectorizedCnt = 0; 5820 unsigned ScatterVectorizeCnt = 0; 5821 const unsigned Sz = DL->getTypeSizeInBits(E->getMainOp()->getType()); 5822 for (unsigned MinVF = getMinVF(2 * Sz); VF >= MinVF; VF /= 2) { 5823 for (unsigned Cnt = StartIdx, End = VL.size(); Cnt + VF <= End; 5824 Cnt += VF) { 5825 ArrayRef<Value *> Slice = VL.slice(Cnt, VF); 5826 if (!VectorizedLoads.count(Slice.front()) && 5827 !VectorizedLoads.count(Slice.back()) && allSameBlock(Slice)) { 5828 SmallVector<Value *> PointerOps; 5829 OrdersType CurrentOrder; 5830 LoadsState LS = canVectorizeLoads(Slice, Slice.front(), *TTI, *DL, 5831 *SE, CurrentOrder, PointerOps); 5832 switch (LS) { 5833 case LoadsState::Vectorize: 5834 case LoadsState::ScatterVectorize: 5835 // Mark the vectorized loads so that we don't vectorize them 5836 // again. 5837 if (LS == LoadsState::Vectorize) 5838 ++VectorizedCnt; 5839 else 5840 ++ScatterVectorizeCnt; 5841 VectorizedLoads.insert(Slice.begin(), Slice.end()); 5842 // If we vectorized initial block, no need to try to vectorize it 5843 // again. 5844 if (Cnt == StartIdx) 5845 StartIdx += VF; 5846 break; 5847 case LoadsState::Gather: 5848 break; 5849 } 5850 } 5851 } 5852 // Check if the whole array was vectorized already - exit. 5853 if (StartIdx >= VL.size()) 5854 break; 5855 // Found vectorizable parts - exit. 5856 if (!VectorizedLoads.empty()) 5857 break; 5858 } 5859 if (!VectorizedLoads.empty()) { 5860 InstructionCost GatherCost = 0; 5861 unsigned NumParts = TTI->getNumberOfParts(VecTy); 5862 bool NeedInsertSubvectorAnalysis = 5863 !NumParts || (VL.size() / VF) > NumParts; 5864 // Get the cost for gathered loads. 5865 for (unsigned I = 0, End = VL.size(); I < End; I += VF) { 5866 if (VectorizedLoads.contains(VL[I])) 5867 continue; 5868 GatherCost += getGatherCost(VL.slice(I, VF)); 5869 } 5870 // The cost for vectorized loads. 5871 InstructionCost ScalarsCost = 0; 5872 for (Value *V : VectorizedLoads) { 5873 auto *LI = cast<LoadInst>(V); 5874 ScalarsCost += TTI->getMemoryOpCost( 5875 Instruction::Load, LI->getType(), LI->getAlign(), 5876 LI->getPointerAddressSpace(), CostKind, LI); 5877 } 5878 auto *LI = cast<LoadInst>(E->getMainOp()); 5879 auto *LoadTy = FixedVectorType::get(LI->getType(), VF); 5880 Align Alignment = LI->getAlign(); 5881 GatherCost += 5882 VectorizedCnt * 5883 TTI->getMemoryOpCost(Instruction::Load, LoadTy, Alignment, 5884 LI->getPointerAddressSpace(), CostKind, LI); 5885 GatherCost += ScatterVectorizeCnt * 5886 TTI->getGatherScatterOpCost( 5887 Instruction::Load, LoadTy, LI->getPointerOperand(), 5888 /*VariableMask=*/false, Alignment, CostKind, LI); 5889 if (NeedInsertSubvectorAnalysis) { 5890 // Add the cost for the subvectors insert. 5891 for (int I = VF, E = VL.size(); I < E; I += VF) 5892 GatherCost += TTI->getShuffleCost(TTI::SK_InsertSubvector, VecTy, 5893 None, I, LoadTy); 5894 } 5895 return ReuseShuffleCost + GatherCost - ScalarsCost; 5896 } 5897 } 5898 return ReuseShuffleCost + getGatherCost(VL); 5899 } 5900 InstructionCost CommonCost = 0; 5901 SmallVector<int> Mask; 5902 if (!E->ReorderIndices.empty()) { 5903 SmallVector<int> NewMask; 5904 if (E->getOpcode() == Instruction::Store) { 5905 // For stores the order is actually a mask. 5906 NewMask.resize(E->ReorderIndices.size()); 5907 copy(E->ReorderIndices, NewMask.begin()); 5908 } else { 5909 inversePermutation(E->ReorderIndices, NewMask); 5910 } 5911 ::addMask(Mask, NewMask); 5912 } 5913 if (NeedToShuffleReuses) 5914 ::addMask(Mask, E->ReuseShuffleIndices); 5915 if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask)) 5916 CommonCost = 5917 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask); 5918 assert((E->State == TreeEntry::Vectorize || 5919 E->State == TreeEntry::ScatterVectorize) && 5920 "Unhandled state"); 5921 assert(E->getOpcode() && allSameType(VL) && allSameBlock(VL) && "Invalid VL"); 5922 Instruction *VL0 = E->getMainOp(); 5923 unsigned ShuffleOrOp = 5924 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 5925 switch (ShuffleOrOp) { 5926 case Instruction::PHI: 5927 return 0; 5928 5929 case Instruction::ExtractValue: 5930 case Instruction::ExtractElement: { 5931 // The common cost of removal ExtractElement/ExtractValue instructions + 5932 // the cost of shuffles, if required to resuffle the original vector. 5933 if (NeedToShuffleReuses) { 5934 unsigned Idx = 0; 5935 for (unsigned I : E->ReuseShuffleIndices) { 5936 if (ShuffleOrOp == Instruction::ExtractElement) { 5937 auto *EE = cast<ExtractElementInst>(VL[I]); 5938 CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement, 5939 EE->getVectorOperandType(), 5940 *getExtractIndex(EE)); 5941 } else { 5942 CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement, 5943 VecTy, Idx); 5944 ++Idx; 5945 } 5946 } 5947 Idx = EntryVF; 5948 for (Value *V : VL) { 5949 if (ShuffleOrOp == Instruction::ExtractElement) { 5950 auto *EE = cast<ExtractElementInst>(V); 5951 CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement, 5952 EE->getVectorOperandType(), 5953 *getExtractIndex(EE)); 5954 } else { 5955 --Idx; 5956 CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement, 5957 VecTy, Idx); 5958 } 5959 } 5960 } 5961 if (ShuffleOrOp == Instruction::ExtractValue) { 5962 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 5963 auto *EI = cast<Instruction>(VL[I]); 5964 // Take credit for instruction that will become dead. 5965 if (EI->hasOneUse()) { 5966 Instruction *Ext = EI->user_back(); 5967 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 5968 all_of(Ext->users(), 5969 [](User *U) { return isa<GetElementPtrInst>(U); })) { 5970 // Use getExtractWithExtendCost() to calculate the cost of 5971 // extractelement/ext pair. 5972 CommonCost -= TTI->getExtractWithExtendCost( 5973 Ext->getOpcode(), Ext->getType(), VecTy, I); 5974 // Add back the cost of s|zext which is subtracted separately. 5975 CommonCost += TTI->getCastInstrCost( 5976 Ext->getOpcode(), Ext->getType(), EI->getType(), 5977 TTI::getCastContextHint(Ext), CostKind, Ext); 5978 continue; 5979 } 5980 } 5981 CommonCost -= 5982 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, I); 5983 } 5984 } else { 5985 AdjustExtractsCost(CommonCost); 5986 } 5987 return CommonCost; 5988 } 5989 case Instruction::InsertElement: { 5990 assert(E->ReuseShuffleIndices.empty() && 5991 "Unique insertelements only are expected."); 5992 auto *SrcVecTy = cast<FixedVectorType>(VL0->getType()); 5993 5994 unsigned const NumElts = SrcVecTy->getNumElements(); 5995 unsigned const NumScalars = VL.size(); 5996 APInt DemandedElts = APInt::getZero(NumElts); 5997 // TODO: Add support for Instruction::InsertValue. 5998 SmallVector<int> Mask; 5999 if (!E->ReorderIndices.empty()) { 6000 inversePermutation(E->ReorderIndices, Mask); 6001 Mask.append(NumElts - NumScalars, UndefMaskElem); 6002 } else { 6003 Mask.assign(NumElts, UndefMaskElem); 6004 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); 6005 } 6006 unsigned Offset = *getInsertIndex(VL0); 6007 bool IsIdentity = true; 6008 SmallVector<int> PrevMask(NumElts, UndefMaskElem); 6009 Mask.swap(PrevMask); 6010 for (unsigned I = 0; I < NumScalars; ++I) { 6011 unsigned InsertIdx = *getInsertIndex(VL[PrevMask[I]]); 6012 DemandedElts.setBit(InsertIdx); 6013 IsIdentity &= InsertIdx - Offset == I; 6014 Mask[InsertIdx - Offset] = I; 6015 } 6016 assert(Offset < NumElts && "Failed to find vector index offset"); 6017 6018 InstructionCost Cost = 0; 6019 Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts, 6020 /*Insert*/ true, /*Extract*/ false); 6021 6022 if (IsIdentity && NumElts != NumScalars && Offset % NumScalars != 0) { 6023 // FIXME: Replace with SK_InsertSubvector once it is properly supported. 6024 unsigned Sz = PowerOf2Ceil(Offset + NumScalars); 6025 Cost += TTI->getShuffleCost( 6026 TargetTransformInfo::SK_PermuteSingleSrc, 6027 FixedVectorType::get(SrcVecTy->getElementType(), Sz)); 6028 } else if (!IsIdentity) { 6029 auto *FirstInsert = 6030 cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 6031 return !is_contained(E->Scalars, 6032 cast<Instruction>(V)->getOperand(0)); 6033 })); 6034 if (isUndefVector(FirstInsert->getOperand(0))) { 6035 Cost += TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, SrcVecTy, Mask); 6036 } else { 6037 SmallVector<int> InsertMask(NumElts); 6038 std::iota(InsertMask.begin(), InsertMask.end(), 0); 6039 for (unsigned I = 0; I < NumElts; I++) { 6040 if (Mask[I] != UndefMaskElem) 6041 InsertMask[Offset + I] = NumElts + I; 6042 } 6043 Cost += 6044 TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVecTy, InsertMask); 6045 } 6046 } 6047 6048 return Cost; 6049 } 6050 case Instruction::ZExt: 6051 case Instruction::SExt: 6052 case Instruction::FPToUI: 6053 case Instruction::FPToSI: 6054 case Instruction::FPExt: 6055 case Instruction::PtrToInt: 6056 case Instruction::IntToPtr: 6057 case Instruction::SIToFP: 6058 case Instruction::UIToFP: 6059 case Instruction::Trunc: 6060 case Instruction::FPTrunc: 6061 case Instruction::BitCast: { 6062 Type *SrcTy = VL0->getOperand(0)->getType(); 6063 InstructionCost ScalarEltCost = 6064 TTI->getCastInstrCost(E->getOpcode(), ScalarTy, SrcTy, 6065 TTI::getCastContextHint(VL0), CostKind, VL0); 6066 if (NeedToShuffleReuses) { 6067 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; 6068 } 6069 6070 // Calculate the cost of this instruction. 6071 InstructionCost ScalarCost = VL.size() * ScalarEltCost; 6072 6073 auto *SrcVecTy = FixedVectorType::get(SrcTy, VL.size()); 6074 InstructionCost VecCost = 0; 6075 // Check if the values are candidates to demote. 6076 if (!MinBWs.count(VL0) || VecTy != SrcVecTy) { 6077 VecCost = CommonCost + TTI->getCastInstrCost( 6078 E->getOpcode(), VecTy, SrcVecTy, 6079 TTI::getCastContextHint(VL0), CostKind, VL0); 6080 } 6081 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 6082 return VecCost - ScalarCost; 6083 } 6084 case Instruction::FCmp: 6085 case Instruction::ICmp: 6086 case Instruction::Select: { 6087 // Calculate the cost of this instruction. 6088 InstructionCost ScalarEltCost = 6089 TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, Builder.getInt1Ty(), 6090 CmpInst::BAD_ICMP_PREDICATE, CostKind, VL0); 6091 if (NeedToShuffleReuses) { 6092 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; 6093 } 6094 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); 6095 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 6096 6097 // Check if all entries in VL are either compares or selects with compares 6098 // as condition that have the same predicates. 6099 CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE; 6100 bool First = true; 6101 for (auto *V : VL) { 6102 CmpInst::Predicate CurrentPred; 6103 auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value()); 6104 if ((!match(V, m_Select(MatchCmp, m_Value(), m_Value())) && 6105 !match(V, MatchCmp)) || 6106 (!First && VecPred != CurrentPred)) { 6107 VecPred = CmpInst::BAD_ICMP_PREDICATE; 6108 break; 6109 } 6110 First = false; 6111 VecPred = CurrentPred; 6112 } 6113 6114 InstructionCost VecCost = TTI->getCmpSelInstrCost( 6115 E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0); 6116 // Check if it is possible and profitable to use min/max for selects in 6117 // VL. 6118 // 6119 auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL); 6120 if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) { 6121 IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy, 6122 {VecTy, VecTy}); 6123 InstructionCost IntrinsicCost = 6124 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 6125 // If the selects are the only uses of the compares, they will be dead 6126 // and we can adjust the cost by removing their cost. 6127 if (IntrinsicAndUse.second) 6128 IntrinsicCost -= TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy, 6129 MaskTy, VecPred, CostKind); 6130 VecCost = std::min(VecCost, IntrinsicCost); 6131 } 6132 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 6133 return CommonCost + VecCost - ScalarCost; 6134 } 6135 case Instruction::FNeg: 6136 case Instruction::Add: 6137 case Instruction::FAdd: 6138 case Instruction::Sub: 6139 case Instruction::FSub: 6140 case Instruction::Mul: 6141 case Instruction::FMul: 6142 case Instruction::UDiv: 6143 case Instruction::SDiv: 6144 case Instruction::FDiv: 6145 case Instruction::URem: 6146 case Instruction::SRem: 6147 case Instruction::FRem: 6148 case Instruction::Shl: 6149 case Instruction::LShr: 6150 case Instruction::AShr: 6151 case Instruction::And: 6152 case Instruction::Or: 6153 case Instruction::Xor: { 6154 // Certain instructions can be cheaper to vectorize if they have a 6155 // constant second vector operand. 6156 TargetTransformInfo::OperandValueKind Op1VK = 6157 TargetTransformInfo::OK_AnyValue; 6158 TargetTransformInfo::OperandValueKind Op2VK = 6159 TargetTransformInfo::OK_UniformConstantValue; 6160 TargetTransformInfo::OperandValueProperties Op1VP = 6161 TargetTransformInfo::OP_None; 6162 TargetTransformInfo::OperandValueProperties Op2VP = 6163 TargetTransformInfo::OP_PowerOf2; 6164 6165 // If all operands are exactly the same ConstantInt then set the 6166 // operand kind to OK_UniformConstantValue. 6167 // If instead not all operands are constants, then set the operand kind 6168 // to OK_AnyValue. If all operands are constants but not the same, 6169 // then set the operand kind to OK_NonUniformConstantValue. 6170 ConstantInt *CInt0 = nullptr; 6171 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 6172 const Instruction *I = cast<Instruction>(VL[i]); 6173 unsigned OpIdx = isa<BinaryOperator>(I) ? 1 : 0; 6174 ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(OpIdx)); 6175 if (!CInt) { 6176 Op2VK = TargetTransformInfo::OK_AnyValue; 6177 Op2VP = TargetTransformInfo::OP_None; 6178 break; 6179 } 6180 if (Op2VP == TargetTransformInfo::OP_PowerOf2 && 6181 !CInt->getValue().isPowerOf2()) 6182 Op2VP = TargetTransformInfo::OP_None; 6183 if (i == 0) { 6184 CInt0 = CInt; 6185 continue; 6186 } 6187 if (CInt0 != CInt) 6188 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 6189 } 6190 6191 SmallVector<const Value *, 4> Operands(VL0->operand_values()); 6192 InstructionCost ScalarEltCost = 6193 TTI->getArithmeticInstrCost(E->getOpcode(), ScalarTy, CostKind, Op1VK, 6194 Op2VK, Op1VP, Op2VP, Operands, VL0); 6195 if (NeedToShuffleReuses) { 6196 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; 6197 } 6198 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 6199 InstructionCost VecCost = 6200 TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind, Op1VK, 6201 Op2VK, Op1VP, Op2VP, Operands, VL0); 6202 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 6203 return CommonCost + VecCost - ScalarCost; 6204 } 6205 case Instruction::GetElementPtr: { 6206 TargetTransformInfo::OperandValueKind Op1VK = 6207 TargetTransformInfo::OK_AnyValue; 6208 TargetTransformInfo::OperandValueKind Op2VK = 6209 TargetTransformInfo::OK_UniformConstantValue; 6210 6211 InstructionCost ScalarEltCost = TTI->getArithmeticInstrCost( 6212 Instruction::Add, ScalarTy, CostKind, Op1VK, Op2VK); 6213 if (NeedToShuffleReuses) { 6214 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; 6215 } 6216 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 6217 InstructionCost VecCost = TTI->getArithmeticInstrCost( 6218 Instruction::Add, VecTy, CostKind, Op1VK, Op2VK); 6219 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 6220 return CommonCost + VecCost - ScalarCost; 6221 } 6222 case Instruction::Load: { 6223 // Cost of wide load - cost of scalar loads. 6224 Align Alignment = cast<LoadInst>(VL0)->getAlign(); 6225 InstructionCost ScalarEltCost = TTI->getMemoryOpCost( 6226 Instruction::Load, ScalarTy, Alignment, 0, CostKind, VL0); 6227 if (NeedToShuffleReuses) { 6228 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; 6229 } 6230 InstructionCost ScalarLdCost = VecTy->getNumElements() * ScalarEltCost; 6231 InstructionCost VecLdCost; 6232 if (E->State == TreeEntry::Vectorize) { 6233 VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, Alignment, 0, 6234 CostKind, VL0); 6235 } else { 6236 assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState"); 6237 Align CommonAlignment = Alignment; 6238 for (Value *V : VL) 6239 CommonAlignment = 6240 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 6241 VecLdCost = TTI->getGatherScatterOpCost( 6242 Instruction::Load, VecTy, cast<LoadInst>(VL0)->getPointerOperand(), 6243 /*VariableMask=*/false, CommonAlignment, CostKind, VL0); 6244 } 6245 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecLdCost, ScalarLdCost)); 6246 return CommonCost + VecLdCost - ScalarLdCost; 6247 } 6248 case Instruction::Store: { 6249 // We know that we can merge the stores. Calculate the cost. 6250 bool IsReorder = !E->ReorderIndices.empty(); 6251 auto *SI = 6252 cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0); 6253 Align Alignment = SI->getAlign(); 6254 InstructionCost ScalarEltCost = TTI->getMemoryOpCost( 6255 Instruction::Store, ScalarTy, Alignment, 0, CostKind, VL0); 6256 InstructionCost ScalarStCost = VecTy->getNumElements() * ScalarEltCost; 6257 InstructionCost VecStCost = TTI->getMemoryOpCost( 6258 Instruction::Store, VecTy, Alignment, 0, CostKind, VL0); 6259 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecStCost, ScalarStCost)); 6260 return CommonCost + VecStCost - ScalarStCost; 6261 } 6262 case Instruction::Call: { 6263 CallInst *CI = cast<CallInst>(VL0); 6264 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6265 6266 // Calculate the cost of the scalar and vector calls. 6267 IntrinsicCostAttributes CostAttrs(ID, *CI, 1); 6268 InstructionCost ScalarEltCost = 6269 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 6270 if (NeedToShuffleReuses) { 6271 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; 6272 } 6273 InstructionCost ScalarCallCost = VecTy->getNumElements() * ScalarEltCost; 6274 6275 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 6276 InstructionCost VecCallCost = 6277 std::min(VecCallCosts.first, VecCallCosts.second); 6278 6279 LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost 6280 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 6281 << " for " << *CI << "\n"); 6282 6283 return CommonCost + VecCallCost - ScalarCallCost; 6284 } 6285 case Instruction::ShuffleVector: { 6286 assert(E->isAltShuffle() && 6287 ((Instruction::isBinaryOp(E->getOpcode()) && 6288 Instruction::isBinaryOp(E->getAltOpcode())) || 6289 (Instruction::isCast(E->getOpcode()) && 6290 Instruction::isCast(E->getAltOpcode())) || 6291 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && 6292 "Invalid Shuffle Vector Operand"); 6293 InstructionCost ScalarCost = 0; 6294 if (NeedToShuffleReuses) { 6295 for (unsigned Idx : E->ReuseShuffleIndices) { 6296 Instruction *I = cast<Instruction>(VL[Idx]); 6297 CommonCost -= TTI->getInstructionCost(I, CostKind); 6298 } 6299 for (Value *V : VL) { 6300 Instruction *I = cast<Instruction>(V); 6301 CommonCost += TTI->getInstructionCost(I, CostKind); 6302 } 6303 } 6304 for (Value *V : VL) { 6305 Instruction *I = cast<Instruction>(V); 6306 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 6307 ScalarCost += TTI->getInstructionCost(I, CostKind); 6308 } 6309 // VecCost is equal to sum of the cost of creating 2 vectors 6310 // and the cost of creating shuffle. 6311 InstructionCost VecCost = 0; 6312 // Try to find the previous shuffle node with the same operands and same 6313 // main/alternate ops. 6314 auto &&TryFindNodeWithEqualOperands = [this, E]() { 6315 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 6316 if (TE.get() == E) 6317 break; 6318 if (TE->isAltShuffle() && 6319 ((TE->getOpcode() == E->getOpcode() && 6320 TE->getAltOpcode() == E->getAltOpcode()) || 6321 (TE->getOpcode() == E->getAltOpcode() && 6322 TE->getAltOpcode() == E->getOpcode())) && 6323 TE->hasEqualOperands(*E)) 6324 return true; 6325 } 6326 return false; 6327 }; 6328 if (TryFindNodeWithEqualOperands()) { 6329 LLVM_DEBUG({ 6330 dbgs() << "SLP: diamond match for alternate node found.\n"; 6331 E->dump(); 6332 }); 6333 // No need to add new vector costs here since we're going to reuse 6334 // same main/alternate vector ops, just do different shuffling. 6335 } else if (Instruction::isBinaryOp(E->getOpcode())) { 6336 VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind); 6337 VecCost += TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy, 6338 CostKind); 6339 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) { 6340 VecCost = TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, 6341 Builder.getInt1Ty(), 6342 CI0->getPredicate(), CostKind, VL0); 6343 VecCost += TTI->getCmpSelInstrCost( 6344 E->getOpcode(), ScalarTy, Builder.getInt1Ty(), 6345 cast<CmpInst>(E->getAltOp())->getPredicate(), CostKind, 6346 E->getAltOp()); 6347 } else { 6348 Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType(); 6349 Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType(); 6350 auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size()); 6351 auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size()); 6352 VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty, 6353 TTI::CastContextHint::None, CostKind); 6354 VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty, 6355 TTI::CastContextHint::None, CostKind); 6356 } 6357 6358 if (E->ReuseShuffleIndices.empty()) { 6359 CommonCost = 6360 TTI->getShuffleCost(TargetTransformInfo::SK_Select, FinalVecTy); 6361 } else { 6362 SmallVector<int> Mask; 6363 buildShuffleEntryMask( 6364 E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices, 6365 [E](Instruction *I) { 6366 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 6367 return I->getOpcode() == E->getAltOpcode(); 6368 }, 6369 Mask); 6370 CommonCost = TTI->getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, 6371 FinalVecTy, Mask); 6372 } 6373 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 6374 return CommonCost + VecCost - ScalarCost; 6375 } 6376 default: 6377 llvm_unreachable("Unknown instruction"); 6378 } 6379 } 6380 6381 bool BoUpSLP::isFullyVectorizableTinyTree(bool ForReduction) const { 6382 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height " 6383 << VectorizableTree.size() << " is fully vectorizable .\n"); 6384 6385 auto &&AreVectorizableGathers = [this](const TreeEntry *TE, unsigned Limit) { 6386 SmallVector<int> Mask; 6387 return TE->State == TreeEntry::NeedToGather && 6388 !any_of(TE->Scalars, 6389 [this](Value *V) { return EphValues.contains(V); }) && 6390 (allConstant(TE->Scalars) || isSplat(TE->Scalars) || 6391 TE->Scalars.size() < Limit || 6392 ((TE->getOpcode() == Instruction::ExtractElement || 6393 all_of(TE->Scalars, 6394 [](Value *V) { 6395 return isa<ExtractElementInst, UndefValue>(V); 6396 })) && 6397 isFixedVectorShuffle(TE->Scalars, Mask)) || 6398 (TE->State == TreeEntry::NeedToGather && 6399 TE->getOpcode() == Instruction::Load && !TE->isAltShuffle())); 6400 }; 6401 6402 // We only handle trees of heights 1 and 2. 6403 if (VectorizableTree.size() == 1 && 6404 (VectorizableTree[0]->State == TreeEntry::Vectorize || 6405 (ForReduction && 6406 AreVectorizableGathers(VectorizableTree[0].get(), 6407 VectorizableTree[0]->Scalars.size()) && 6408 VectorizableTree[0]->getVectorFactor() > 2))) 6409 return true; 6410 6411 if (VectorizableTree.size() != 2) 6412 return false; 6413 6414 // Handle splat and all-constants stores. Also try to vectorize tiny trees 6415 // with the second gather nodes if they have less scalar operands rather than 6416 // the initial tree element (may be profitable to shuffle the second gather) 6417 // or they are extractelements, which form shuffle. 6418 SmallVector<int> Mask; 6419 if (VectorizableTree[0]->State == TreeEntry::Vectorize && 6420 AreVectorizableGathers(VectorizableTree[1].get(), 6421 VectorizableTree[0]->Scalars.size())) 6422 return true; 6423 6424 // Gathering cost would be too much for tiny trees. 6425 if (VectorizableTree[0]->State == TreeEntry::NeedToGather || 6426 (VectorizableTree[1]->State == TreeEntry::NeedToGather && 6427 VectorizableTree[0]->State != TreeEntry::ScatterVectorize)) 6428 return false; 6429 6430 return true; 6431 } 6432 6433 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts, 6434 TargetTransformInfo *TTI, 6435 bool MustMatchOrInst) { 6436 // Look past the root to find a source value. Arbitrarily follow the 6437 // path through operand 0 of any 'or'. Also, peek through optional 6438 // shift-left-by-multiple-of-8-bits. 6439 Value *ZextLoad = Root; 6440 const APInt *ShAmtC; 6441 bool FoundOr = false; 6442 while (!isa<ConstantExpr>(ZextLoad) && 6443 (match(ZextLoad, m_Or(m_Value(), m_Value())) || 6444 (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) && 6445 ShAmtC->urem(8) == 0))) { 6446 auto *BinOp = cast<BinaryOperator>(ZextLoad); 6447 ZextLoad = BinOp->getOperand(0); 6448 if (BinOp->getOpcode() == Instruction::Or) 6449 FoundOr = true; 6450 } 6451 // Check if the input is an extended load of the required or/shift expression. 6452 Value *Load; 6453 if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root || 6454 !match(ZextLoad, m_ZExt(m_Value(Load))) || !isa<LoadInst>(Load)) 6455 return false; 6456 6457 // Require that the total load bit width is a legal integer type. 6458 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target. 6459 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it. 6460 Type *SrcTy = Load->getType(); 6461 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts; 6462 if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth))) 6463 return false; 6464 6465 // Everything matched - assume that we can fold the whole sequence using 6466 // load combining. 6467 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at " 6468 << *(cast<Instruction>(Root)) << "\n"); 6469 6470 return true; 6471 } 6472 6473 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const { 6474 if (RdxKind != RecurKind::Or) 6475 return false; 6476 6477 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 6478 Value *FirstReduced = VectorizableTree[0]->Scalars[0]; 6479 return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI, 6480 /* MatchOr */ false); 6481 } 6482 6483 bool BoUpSLP::isLoadCombineCandidate() const { 6484 // Peek through a final sequence of stores and check if all operations are 6485 // likely to be load-combined. 6486 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 6487 for (Value *Scalar : VectorizableTree[0]->Scalars) { 6488 Value *X; 6489 if (!match(Scalar, m_Store(m_Value(X), m_Value())) || 6490 !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true)) 6491 return false; 6492 } 6493 return true; 6494 } 6495 6496 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const { 6497 // No need to vectorize inserts of gathered values. 6498 if (VectorizableTree.size() == 2 && 6499 isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) && 6500 VectorizableTree[1]->State == TreeEntry::NeedToGather) 6501 return true; 6502 6503 // We can vectorize the tree if its size is greater than or equal to the 6504 // minimum size specified by the MinTreeSize command line option. 6505 if (VectorizableTree.size() >= MinTreeSize) 6506 return false; 6507 6508 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 6509 // can vectorize it if we can prove it fully vectorizable. 6510 if (isFullyVectorizableTinyTree(ForReduction)) 6511 return false; 6512 6513 assert(VectorizableTree.empty() 6514 ? ExternalUses.empty() 6515 : true && "We shouldn't have any external users"); 6516 6517 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 6518 // vectorizable. 6519 return true; 6520 } 6521 6522 InstructionCost BoUpSLP::getSpillCost() const { 6523 // Walk from the bottom of the tree to the top, tracking which values are 6524 // live. When we see a call instruction that is not part of our tree, 6525 // query TTI to see if there is a cost to keeping values live over it 6526 // (for example, if spills and fills are required). 6527 unsigned BundleWidth = VectorizableTree.front()->Scalars.size(); 6528 InstructionCost Cost = 0; 6529 6530 SmallPtrSet<Instruction*, 4> LiveValues; 6531 Instruction *PrevInst = nullptr; 6532 6533 // The entries in VectorizableTree are not necessarily ordered by their 6534 // position in basic blocks. Collect them and order them by dominance so later 6535 // instructions are guaranteed to be visited first. For instructions in 6536 // different basic blocks, we only scan to the beginning of the block, so 6537 // their order does not matter, as long as all instructions in a basic block 6538 // are grouped together. Using dominance ensures a deterministic order. 6539 SmallVector<Instruction *, 16> OrderedScalars; 6540 for (const auto &TEPtr : VectorizableTree) { 6541 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]); 6542 if (!Inst) 6543 continue; 6544 OrderedScalars.push_back(Inst); 6545 } 6546 llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) { 6547 auto *NodeA = DT->getNode(A->getParent()); 6548 auto *NodeB = DT->getNode(B->getParent()); 6549 assert(NodeA && "Should only process reachable instructions"); 6550 assert(NodeB && "Should only process reachable instructions"); 6551 assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 6552 "Different nodes should have different DFS numbers"); 6553 if (NodeA != NodeB) 6554 return NodeA->getDFSNumIn() < NodeB->getDFSNumIn(); 6555 return B->comesBefore(A); 6556 }); 6557 6558 for (Instruction *Inst : OrderedScalars) { 6559 if (!PrevInst) { 6560 PrevInst = Inst; 6561 continue; 6562 } 6563 6564 // Update LiveValues. 6565 LiveValues.erase(PrevInst); 6566 for (auto &J : PrevInst->operands()) { 6567 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 6568 LiveValues.insert(cast<Instruction>(&*J)); 6569 } 6570 6571 LLVM_DEBUG({ 6572 dbgs() << "SLP: #LV: " << LiveValues.size(); 6573 for (auto *X : LiveValues) 6574 dbgs() << " " << X->getName(); 6575 dbgs() << ", Looking at "; 6576 Inst->dump(); 6577 }); 6578 6579 // Now find the sequence of instructions between PrevInst and Inst. 6580 unsigned NumCalls = 0; 6581 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 6582 PrevInstIt = 6583 PrevInst->getIterator().getReverse(); 6584 while (InstIt != PrevInstIt) { 6585 if (PrevInstIt == PrevInst->getParent()->rend()) { 6586 PrevInstIt = Inst->getParent()->rbegin(); 6587 continue; 6588 } 6589 6590 // Debug information does not impact spill cost. 6591 if ((isa<CallInst>(&*PrevInstIt) && 6592 !isa<DbgInfoIntrinsic>(&*PrevInstIt)) && 6593 &*PrevInstIt != PrevInst) 6594 NumCalls++; 6595 6596 ++PrevInstIt; 6597 } 6598 6599 if (NumCalls) { 6600 SmallVector<Type*, 4> V; 6601 for (auto *II : LiveValues) { 6602 auto *ScalarTy = II->getType(); 6603 if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy)) 6604 ScalarTy = VectorTy->getElementType(); 6605 V.push_back(FixedVectorType::get(ScalarTy, BundleWidth)); 6606 } 6607 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V); 6608 } 6609 6610 PrevInst = Inst; 6611 } 6612 6613 return Cost; 6614 } 6615 6616 /// Check if two insertelement instructions are from the same buildvector. 6617 static bool areTwoInsertFromSameBuildVector(InsertElementInst *VU, 6618 InsertElementInst *V) { 6619 // Instructions must be from the same basic blocks. 6620 if (VU->getParent() != V->getParent()) 6621 return false; 6622 // Checks if 2 insertelements are from the same buildvector. 6623 if (VU->getType() != V->getType()) 6624 return false; 6625 // Multiple used inserts are separate nodes. 6626 if (!VU->hasOneUse() && !V->hasOneUse()) 6627 return false; 6628 auto *IE1 = VU; 6629 auto *IE2 = V; 6630 unsigned Idx1 = *getInsertIndex(IE1); 6631 unsigned Idx2 = *getInsertIndex(IE2); 6632 // Go through the vector operand of insertelement instructions trying to find 6633 // either VU as the original vector for IE2 or V as the original vector for 6634 // IE1. 6635 do { 6636 if (IE2 == VU) 6637 return VU->hasOneUse(); 6638 if (IE1 == V) 6639 return V->hasOneUse(); 6640 if (IE1) { 6641 if ((IE1 != VU && !IE1->hasOneUse()) || 6642 getInsertIndex(IE1).getValueOr(Idx2) == Idx2) 6643 IE1 = nullptr; 6644 else 6645 IE1 = dyn_cast<InsertElementInst>(IE1->getOperand(0)); 6646 } 6647 if (IE2) { 6648 if ((IE2 != V && !IE2->hasOneUse()) || 6649 getInsertIndex(IE2).getValueOr(Idx1) == Idx1) 6650 IE2 = nullptr; 6651 else 6652 IE2 = dyn_cast<InsertElementInst>(IE2->getOperand(0)); 6653 } 6654 } while (IE1 || IE2); 6655 return false; 6656 } 6657 6658 /// Checks if the \p IE1 instructions is followed by \p IE2 instruction in the 6659 /// buildvector sequence. 6660 static bool isFirstInsertElement(const InsertElementInst *IE1, 6661 const InsertElementInst *IE2) { 6662 const auto *I1 = IE1; 6663 const auto *I2 = IE2; 6664 const InsertElementInst *PrevI1; 6665 const InsertElementInst *PrevI2; 6666 unsigned Idx1 = *getInsertIndex(IE1); 6667 unsigned Idx2 = *getInsertIndex(IE2); 6668 do { 6669 if (I2 == IE1) 6670 return true; 6671 if (I1 == IE2) 6672 return false; 6673 PrevI1 = I1; 6674 PrevI2 = I2; 6675 if (I1 && (I1 == IE1 || I1->hasOneUse()) && 6676 getInsertIndex(I1).getValueOr(Idx2) != Idx2) 6677 I1 = dyn_cast<InsertElementInst>(I1->getOperand(0)); 6678 if (I2 && ((I2 == IE2 || I2->hasOneUse())) && 6679 getInsertIndex(I2).getValueOr(Idx1) != Idx1) 6680 I2 = dyn_cast<InsertElementInst>(I2->getOperand(0)); 6681 } while ((I1 && PrevI1 != I1) || (I2 && PrevI2 != I2)); 6682 llvm_unreachable("Two different buildvectors not expected."); 6683 } 6684 6685 namespace { 6686 /// Returns incoming Value *, if the requested type is Value * too, or a default 6687 /// value, otherwise. 6688 struct ValueSelect { 6689 template <typename U> 6690 static typename std::enable_if<std::is_same<Value *, U>::value, Value *>::type 6691 get(Value *V) { 6692 return V; 6693 } 6694 template <typename U> 6695 static typename std::enable_if<!std::is_same<Value *, U>::value, U>::type 6696 get(Value *) { 6697 return U(); 6698 } 6699 }; 6700 } // namespace 6701 6702 /// Does the analysis of the provided shuffle masks and performs the requested 6703 /// actions on the vectors with the given shuffle masks. It tries to do it in 6704 /// several steps. 6705 /// 1. If the Base vector is not undef vector, resizing the very first mask to 6706 /// have common VF and perform action for 2 input vectors (including non-undef 6707 /// Base). Other shuffle masks are combined with the resulting after the 1 stage 6708 /// and processed as a shuffle of 2 elements. 6709 /// 2. If the Base is undef vector and have only 1 shuffle mask, perform the 6710 /// action only for 1 vector with the given mask, if it is not the identity 6711 /// mask. 6712 /// 3. If > 2 masks are used, perform the remaining shuffle actions for 2 6713 /// vectors, combing the masks properly between the steps. 6714 template <typename T> 6715 static T *performExtractsShuffleAction( 6716 MutableArrayRef<std::pair<T *, SmallVector<int>>> ShuffleMask, Value *Base, 6717 function_ref<unsigned(T *)> GetVF, 6718 function_ref<std::pair<T *, bool>(T *, ArrayRef<int>)> ResizeAction, 6719 function_ref<T *(ArrayRef<int>, ArrayRef<T *>)> Action) { 6720 assert(!ShuffleMask.empty() && "Empty list of shuffles for inserts."); 6721 SmallVector<int> Mask(ShuffleMask.begin()->second); 6722 auto VMIt = std::next(ShuffleMask.begin()); 6723 T *Prev = nullptr; 6724 bool IsBaseNotUndef = !isUndefVector(Base); 6725 if (IsBaseNotUndef) { 6726 // Base is not undef, need to combine it with the next subvectors. 6727 std::pair<T *, bool> Res = ResizeAction(ShuffleMask.begin()->first, Mask); 6728 for (unsigned Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) { 6729 if (Mask[Idx] == UndefMaskElem) 6730 Mask[Idx] = Idx; 6731 else 6732 Mask[Idx] = (Res.second ? Idx : Mask[Idx]) + VF; 6733 } 6734 auto *V = ValueSelect::get<T *>(Base); 6735 (void)V; 6736 assert((!V || GetVF(V) == Mask.size()) && 6737 "Expected base vector of VF number of elements."); 6738 Prev = Action(Mask, {nullptr, Res.first}); 6739 } else if (ShuffleMask.size() == 1) { 6740 // Base is undef and only 1 vector is shuffled - perform the action only for 6741 // single vector, if the mask is not the identity mask. 6742 std::pair<T *, bool> Res = ResizeAction(ShuffleMask.begin()->first, Mask); 6743 if (Res.second) 6744 // Identity mask is found. 6745 Prev = Res.first; 6746 else 6747 Prev = Action(Mask, {ShuffleMask.begin()->first}); 6748 } else { 6749 // Base is undef and at least 2 input vectors shuffled - perform 2 vectors 6750 // shuffles step by step, combining shuffle between the steps. 6751 unsigned Vec1VF = GetVF(ShuffleMask.begin()->first); 6752 unsigned Vec2VF = GetVF(VMIt->first); 6753 if (Vec1VF == Vec2VF) { 6754 // No need to resize the input vectors since they are of the same size, we 6755 // can shuffle them directly. 6756 ArrayRef<int> SecMask = VMIt->second; 6757 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) { 6758 if (SecMask[I] != UndefMaskElem) { 6759 assert(Mask[I] == UndefMaskElem && "Multiple uses of scalars."); 6760 Mask[I] = SecMask[I] + Vec1VF; 6761 } 6762 } 6763 Prev = Action(Mask, {ShuffleMask.begin()->first, VMIt->first}); 6764 } else { 6765 // Vectors of different sizes - resize and reshuffle. 6766 std::pair<T *, bool> Res1 = 6767 ResizeAction(ShuffleMask.begin()->first, Mask); 6768 std::pair<T *, bool> Res2 = ResizeAction(VMIt->first, VMIt->second); 6769 ArrayRef<int> SecMask = VMIt->second; 6770 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) { 6771 if (Mask[I] != UndefMaskElem) { 6772 assert(SecMask[I] == UndefMaskElem && "Multiple uses of scalars."); 6773 if (Res1.second) 6774 Mask[I] = I; 6775 } else if (SecMask[I] != UndefMaskElem) { 6776 assert(Mask[I] == UndefMaskElem && "Multiple uses of scalars."); 6777 Mask[I] = (Res2.second ? I : SecMask[I]) + VF; 6778 } 6779 } 6780 Prev = Action(Mask, {Res1.first, Res2.first}); 6781 } 6782 VMIt = std::next(VMIt); 6783 } 6784 // Perform requested actions for the remaining masks/vectors. 6785 for (auto E = ShuffleMask.end(); VMIt != E; ++VMIt) { 6786 // Shuffle other input vectors, if any. 6787 std::pair<T *, bool> Res = ResizeAction(VMIt->first, VMIt->second); 6788 ArrayRef<int> SecMask = VMIt->second; 6789 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) { 6790 if (SecMask[I] != UndefMaskElem) { 6791 assert((Mask[I] == UndefMaskElem || IsBaseNotUndef) && 6792 "Multiple uses of scalars."); 6793 Mask[I] = (Res.second ? I : SecMask[I]) + VF; 6794 } else if (Mask[I] != UndefMaskElem) { 6795 Mask[I] = I; 6796 } 6797 } 6798 Prev = Action(Mask, {Prev, Res.first}); 6799 } 6800 return Prev; 6801 } 6802 6803 InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) { 6804 InstructionCost Cost = 0; 6805 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size " 6806 << VectorizableTree.size() << ".\n"); 6807 6808 unsigned BundleWidth = VectorizableTree[0]->Scalars.size(); 6809 6810 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) { 6811 TreeEntry &TE = *VectorizableTree[I]; 6812 6813 InstructionCost C = getEntryCost(&TE, VectorizedVals); 6814 Cost += C; 6815 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 6816 << " for bundle that starts with " << *TE.Scalars[0] 6817 << ".\n" 6818 << "SLP: Current total cost = " << Cost << "\n"); 6819 } 6820 6821 SmallPtrSet<Value *, 16> ExtractCostCalculated; 6822 InstructionCost ExtractCost = 0; 6823 SmallVector<MapVector<const TreeEntry *, SmallVector<int>>> ShuffleMasks; 6824 SmallVector<std::pair<Value *, const TreeEntry *>> FirstUsers; 6825 SmallVector<APInt> DemandedElts; 6826 for (ExternalUser &EU : ExternalUses) { 6827 // We only add extract cost once for the same scalar. 6828 if (!isa_and_nonnull<InsertElementInst>(EU.User) && 6829 !ExtractCostCalculated.insert(EU.Scalar).second) 6830 continue; 6831 6832 // Uses by ephemeral values are free (because the ephemeral value will be 6833 // removed prior to code generation, and so the extraction will be 6834 // removed as well). 6835 if (EphValues.count(EU.User)) 6836 continue; 6837 6838 // No extract cost for vector "scalar" 6839 if (isa<FixedVectorType>(EU.Scalar->getType())) 6840 continue; 6841 6842 // Already counted the cost for external uses when tried to adjust the cost 6843 // for extractelements, no need to add it again. 6844 if (isa<ExtractElementInst>(EU.Scalar)) 6845 continue; 6846 6847 // If found user is an insertelement, do not calculate extract cost but try 6848 // to detect it as a final shuffled/identity match. 6849 if (auto *VU = dyn_cast_or_null<InsertElementInst>(EU.User)) { 6850 if (auto *FTy = dyn_cast<FixedVectorType>(VU->getType())) { 6851 Optional<unsigned> InsertIdx = getInsertIndex(VU); 6852 if (InsertIdx) { 6853 const TreeEntry *ScalarTE = getTreeEntry(EU.Scalar); 6854 auto *It = 6855 find_if(FirstUsers, 6856 [VU](const std::pair<Value *, const TreeEntry *> &Pair) { 6857 return areTwoInsertFromSameBuildVector( 6858 VU, cast<InsertElementInst>(Pair.first)); 6859 }); 6860 int VecId = -1; 6861 if (It == FirstUsers.end()) { 6862 (void)ShuffleMasks.emplace_back(); 6863 SmallVectorImpl<int> &Mask = ShuffleMasks.back()[ScalarTE]; 6864 if (Mask.empty()) 6865 Mask.assign(FTy->getNumElements(), UndefMaskElem); 6866 // Find the insertvector, vectorized in tree, if any. 6867 Value *Base = VU; 6868 while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) { 6869 if (IEBase != EU.User && 6870 (!IEBase->hasOneUse() || 6871 getInsertIndex(IEBase).getValueOr(*InsertIdx) == *InsertIdx)) 6872 break; 6873 // Build the mask for the vectorized insertelement instructions. 6874 if (const TreeEntry *E = getTreeEntry(IEBase)) { 6875 VU = IEBase; 6876 do { 6877 IEBase = cast<InsertElementInst>(Base); 6878 int Idx = *getInsertIndex(IEBase); 6879 assert(Mask[Idx] == UndefMaskElem && 6880 "InsertElementInstruction used already."); 6881 Mask[Idx] = Idx; 6882 Base = IEBase->getOperand(0); 6883 } while (E == getTreeEntry(Base)); 6884 break; 6885 } 6886 Base = cast<InsertElementInst>(Base)->getOperand(0); 6887 } 6888 FirstUsers.emplace_back(VU, ScalarTE); 6889 DemandedElts.push_back(APInt::getZero(FTy->getNumElements())); 6890 VecId = FirstUsers.size() - 1; 6891 } else { 6892 if (isFirstInsertElement(VU, cast<InsertElementInst>(It->first))) 6893 It->first = VU; 6894 VecId = std::distance(FirstUsers.begin(), It); 6895 } 6896 int InIdx = *InsertIdx; 6897 SmallVectorImpl<int> &Mask = ShuffleMasks[VecId][ScalarTE]; 6898 if (Mask.empty()) 6899 Mask.assign(FTy->getNumElements(), UndefMaskElem); 6900 Mask[InIdx] = EU.Lane; 6901 DemandedElts[VecId].setBit(InIdx); 6902 continue; 6903 } 6904 } 6905 } 6906 6907 // If we plan to rewrite the tree in a smaller type, we will need to sign 6908 // extend the extracted value back to the original type. Here, we account 6909 // for the extract and the added cost of the sign extend if needed. 6910 auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth); 6911 auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; 6912 if (MinBWs.count(ScalarRoot)) { 6913 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 6914 auto Extend = 6915 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; 6916 VecTy = FixedVectorType::get(MinTy, BundleWidth); 6917 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 6918 VecTy, EU.Lane); 6919 } else { 6920 ExtractCost += 6921 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); 6922 } 6923 } 6924 6925 InstructionCost SpillCost = getSpillCost(); 6926 Cost += SpillCost + ExtractCost; 6927 auto &&ResizeToVF = [this, &Cost](const TreeEntry *TE, ArrayRef<int> Mask) { 6928 InstructionCost C = 0; 6929 unsigned VF = Mask.size(); 6930 unsigned VecVF = TE->getVectorFactor(); 6931 if (VF != VecVF && 6932 (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); }) || 6933 (all_of(Mask, 6934 [VF](int Idx) { return Idx < 2 * static_cast<int>(VF); }) && 6935 !ShuffleVectorInst::isIdentityMask(Mask)))) { 6936 SmallVector<int> OrigMask(VecVF, UndefMaskElem); 6937 std::copy(Mask.begin(), std::next(Mask.begin(), std::min(VF, VecVF)), 6938 OrigMask.begin()); 6939 C = TTI->getShuffleCost( 6940 TTI::SK_PermuteSingleSrc, 6941 FixedVectorType::get(TE->getMainOp()->getType(), VecVF), OrigMask); 6942 LLVM_DEBUG( 6943 dbgs() << "SLP: Adding cost " << C 6944 << " for final shuffle of insertelement external users.\n"; 6945 TE->dump(); dbgs() << "SLP: Current total cost = " << Cost << "\n"); 6946 Cost += C; 6947 return std::make_pair(TE, true); 6948 } 6949 return std::make_pair(TE, false); 6950 }; 6951 // Calculate the cost of the reshuffled vectors, if any. 6952 for (int I = 0, E = FirstUsers.size(); I < E; ++I) { 6953 Value *Base = cast<Instruction>(FirstUsers[I].first)->getOperand(0); 6954 unsigned VF = ShuffleMasks[I].begin()->second.size(); 6955 auto *FTy = FixedVectorType::get( 6956 cast<VectorType>(FirstUsers[I].first->getType())->getElementType(), VF); 6957 auto Vector = ShuffleMasks[I].takeVector(); 6958 auto &&EstimateShufflesCost = [this, FTy, 6959 &Cost](ArrayRef<int> Mask, 6960 ArrayRef<const TreeEntry *> TEs) { 6961 assert((TEs.size() == 1 || TEs.size() == 2) && 6962 "Expected exactly 1 or 2 tree entries."); 6963 if (TEs.size() == 1) { 6964 int Limit = 2 * Mask.size(); 6965 if (!all_of(Mask, [Limit](int Idx) { return Idx < Limit; }) || 6966 !ShuffleVectorInst::isIdentityMask(Mask)) { 6967 InstructionCost C = 6968 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FTy, Mask); 6969 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 6970 << " for final shuffle of insertelement " 6971 "external users.\n"; 6972 TEs.front()->dump(); 6973 dbgs() << "SLP: Current total cost = " << Cost << "\n"); 6974 Cost += C; 6975 } 6976 } else { 6977 InstructionCost C = 6978 TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, FTy, Mask); 6979 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 6980 << " for final shuffle of vector node and external " 6981 "insertelement users.\n"; 6982 if (TEs.front()) { TEs.front()->dump(); } TEs.back()->dump(); 6983 dbgs() << "SLP: Current total cost = " << Cost << "\n"); 6984 Cost += C; 6985 } 6986 return TEs.back(); 6987 }; 6988 (void)performExtractsShuffleAction<const TreeEntry>( 6989 makeMutableArrayRef(Vector.data(), Vector.size()), Base, 6990 [](const TreeEntry *E) { return E->getVectorFactor(); }, ResizeToVF, 6991 EstimateShufflesCost); 6992 InstructionCost InsertCost = TTI->getScalarizationOverhead( 6993 cast<FixedVectorType>(FirstUsers[I].first->getType()), DemandedElts[I], 6994 /*Insert*/ true, /*Extract*/ false); 6995 Cost -= InsertCost; 6996 } 6997 6998 #ifndef NDEBUG 6999 SmallString<256> Str; 7000 { 7001 raw_svector_ostream OS(Str); 7002 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 7003 << "SLP: Extract Cost = " << ExtractCost << ".\n" 7004 << "SLP: Total Cost = " << Cost << ".\n"; 7005 } 7006 LLVM_DEBUG(dbgs() << Str); 7007 if (ViewSLPTree) 7008 ViewGraph(this, "SLP" + F->getName(), false, Str); 7009 #endif 7010 7011 return Cost; 7012 } 7013 7014 Optional<TargetTransformInfo::ShuffleKind> 7015 BoUpSLP::isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask, 7016 SmallVectorImpl<const TreeEntry *> &Entries) { 7017 // TODO: currently checking only for Scalars in the tree entry, need to count 7018 // reused elements too for better cost estimation. 7019 Mask.assign(TE->Scalars.size(), UndefMaskElem); 7020 Entries.clear(); 7021 // Build a lists of values to tree entries. 7022 DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>> ValueToTEs; 7023 for (const std::unique_ptr<TreeEntry> &EntryPtr : VectorizableTree) { 7024 if (EntryPtr.get() == TE) 7025 break; 7026 if (EntryPtr->State != TreeEntry::NeedToGather) 7027 continue; 7028 for (Value *V : EntryPtr->Scalars) 7029 ValueToTEs.try_emplace(V).first->getSecond().insert(EntryPtr.get()); 7030 } 7031 // Find all tree entries used by the gathered values. If no common entries 7032 // found - not a shuffle. 7033 // Here we build a set of tree nodes for each gathered value and trying to 7034 // find the intersection between these sets. If we have at least one common 7035 // tree node for each gathered value - we have just a permutation of the 7036 // single vector. If we have 2 different sets, we're in situation where we 7037 // have a permutation of 2 input vectors. 7038 SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs; 7039 DenseMap<Value *, int> UsedValuesEntry; 7040 for (Value *V : TE->Scalars) { 7041 if (isa<UndefValue>(V)) 7042 continue; 7043 // Build a list of tree entries where V is used. 7044 SmallPtrSet<const TreeEntry *, 4> VToTEs; 7045 auto It = ValueToTEs.find(V); 7046 if (It != ValueToTEs.end()) 7047 VToTEs = It->second; 7048 if (const TreeEntry *VTE = getTreeEntry(V)) 7049 VToTEs.insert(VTE); 7050 if (VToTEs.empty()) 7051 return None; 7052 if (UsedTEs.empty()) { 7053 // The first iteration, just insert the list of nodes to vector. 7054 UsedTEs.push_back(VToTEs); 7055 } else { 7056 // Need to check if there are any previously used tree nodes which use V. 7057 // If there are no such nodes, consider that we have another one input 7058 // vector. 7059 SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs); 7060 unsigned Idx = 0; 7061 for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) { 7062 // Do we have a non-empty intersection of previously listed tree entries 7063 // and tree entries using current V? 7064 set_intersect(VToTEs, Set); 7065 if (!VToTEs.empty()) { 7066 // Yes, write the new subset and continue analysis for the next 7067 // scalar. 7068 Set.swap(VToTEs); 7069 break; 7070 } 7071 VToTEs = SavedVToTEs; 7072 ++Idx; 7073 } 7074 // No non-empty intersection found - need to add a second set of possible 7075 // source vectors. 7076 if (Idx == UsedTEs.size()) { 7077 // If the number of input vectors is greater than 2 - not a permutation, 7078 // fallback to the regular gather. 7079 if (UsedTEs.size() == 2) 7080 return None; 7081 UsedTEs.push_back(SavedVToTEs); 7082 Idx = UsedTEs.size() - 1; 7083 } 7084 UsedValuesEntry.try_emplace(V, Idx); 7085 } 7086 } 7087 7088 if (UsedTEs.empty()) { 7089 assert(all_of(TE->Scalars, UndefValue::classof) && 7090 "Expected vector of undefs only."); 7091 return None; 7092 } 7093 7094 unsigned VF = 0; 7095 if (UsedTEs.size() == 1) { 7096 // Try to find the perfect match in another gather node at first. 7097 auto It = find_if(UsedTEs.front(), [TE](const TreeEntry *EntryPtr) { 7098 return EntryPtr->isSame(TE->Scalars); 7099 }); 7100 if (It != UsedTEs.front().end()) { 7101 Entries.push_back(*It); 7102 std::iota(Mask.begin(), Mask.end(), 0); 7103 return TargetTransformInfo::SK_PermuteSingleSrc; 7104 } 7105 // No perfect match, just shuffle, so choose the first tree node. 7106 Entries.push_back(*UsedTEs.front().begin()); 7107 } else { 7108 // Try to find nodes with the same vector factor. 7109 assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries."); 7110 DenseMap<int, const TreeEntry *> VFToTE; 7111 for (const TreeEntry *TE : UsedTEs.front()) 7112 VFToTE.try_emplace(TE->getVectorFactor(), TE); 7113 for (const TreeEntry *TE : UsedTEs.back()) { 7114 auto It = VFToTE.find(TE->getVectorFactor()); 7115 if (It != VFToTE.end()) { 7116 VF = It->first; 7117 Entries.push_back(It->second); 7118 Entries.push_back(TE); 7119 break; 7120 } 7121 } 7122 // No 2 source vectors with the same vector factor - give up and do regular 7123 // gather. 7124 if (Entries.empty()) 7125 return None; 7126 } 7127 7128 // Build a shuffle mask for better cost estimation and vector emission. 7129 for (int I = 0, E = TE->Scalars.size(); I < E; ++I) { 7130 Value *V = TE->Scalars[I]; 7131 if (isa<UndefValue>(V)) 7132 continue; 7133 unsigned Idx = UsedValuesEntry.lookup(V); 7134 const TreeEntry *VTE = Entries[Idx]; 7135 int FoundLane = VTE->findLaneForValue(V); 7136 Mask[I] = Idx * VF + FoundLane; 7137 // Extra check required by isSingleSourceMaskImpl function (called by 7138 // ShuffleVectorInst::isSingleSourceMask). 7139 if (Mask[I] >= 2 * E) 7140 return None; 7141 } 7142 switch (Entries.size()) { 7143 case 1: 7144 return TargetTransformInfo::SK_PermuteSingleSrc; 7145 case 2: 7146 return TargetTransformInfo::SK_PermuteTwoSrc; 7147 default: 7148 break; 7149 } 7150 return None; 7151 } 7152 7153 InstructionCost BoUpSLP::getGatherCost(FixedVectorType *Ty, 7154 const APInt &ShuffledIndices, 7155 bool NeedToShuffle) const { 7156 InstructionCost Cost = 7157 TTI->getScalarizationOverhead(Ty, ~ShuffledIndices, /*Insert*/ true, 7158 /*Extract*/ false); 7159 if (NeedToShuffle) 7160 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty); 7161 return Cost; 7162 } 7163 7164 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const { 7165 // Find the type of the operands in VL. 7166 Type *ScalarTy = VL[0]->getType(); 7167 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 7168 ScalarTy = SI->getValueOperand()->getType(); 7169 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 7170 bool DuplicateNonConst = false; 7171 // Find the cost of inserting/extracting values from the vector. 7172 // Check if the same elements are inserted several times and count them as 7173 // shuffle candidates. 7174 APInt ShuffledElements = APInt::getZero(VL.size()); 7175 DenseSet<Value *> UniqueElements; 7176 // Iterate in reverse order to consider insert elements with the high cost. 7177 for (unsigned I = VL.size(); I > 0; --I) { 7178 unsigned Idx = I - 1; 7179 // No need to shuffle duplicates for constants. 7180 if (isConstant(VL[Idx])) { 7181 ShuffledElements.setBit(Idx); 7182 continue; 7183 } 7184 if (!UniqueElements.insert(VL[Idx]).second) { 7185 DuplicateNonConst = true; 7186 ShuffledElements.setBit(Idx); 7187 } 7188 } 7189 return getGatherCost(VecTy, ShuffledElements, DuplicateNonConst); 7190 } 7191 7192 // Perform operand reordering on the instructions in VL and return the reordered 7193 // operands in Left and Right. 7194 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 7195 SmallVectorImpl<Value *> &Left, 7196 SmallVectorImpl<Value *> &Right, 7197 const DataLayout &DL, 7198 ScalarEvolution &SE, 7199 const BoUpSLP &R) { 7200 if (VL.empty()) 7201 return; 7202 VLOperands Ops(VL, DL, SE, R); 7203 // Reorder the operands in place. 7204 Ops.reorder(); 7205 Left = Ops.getVL(0); 7206 Right = Ops.getVL(1); 7207 } 7208 7209 void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) { 7210 // Get the basic block this bundle is in. All instructions in the bundle 7211 // should be in this block. 7212 auto *Front = E->getMainOp(); 7213 auto *BB = Front->getParent(); 7214 assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool { 7215 auto *I = cast<Instruction>(V); 7216 return !E->isOpcodeOrAlt(I) || I->getParent() == BB; 7217 })); 7218 7219 auto &&FindLastInst = [E, Front]() { 7220 Instruction *LastInst = Front; 7221 for (Value *V : E->Scalars) { 7222 auto *I = dyn_cast<Instruction>(V); 7223 if (!I) 7224 continue; 7225 if (LastInst->comesBefore(I)) 7226 LastInst = I; 7227 } 7228 return LastInst; 7229 }; 7230 7231 auto &&FindFirstInst = [E, Front]() { 7232 Instruction *FirstInst = Front; 7233 for (Value *V : E->Scalars) { 7234 auto *I = dyn_cast<Instruction>(V); 7235 if (!I) 7236 continue; 7237 if (I->comesBefore(FirstInst)) 7238 FirstInst = I; 7239 } 7240 return FirstInst; 7241 }; 7242 7243 // Set the insert point to the beginning of the basic block if the entry 7244 // should not be scheduled. 7245 if (E->State != TreeEntry::NeedToGather && 7246 doesNotNeedToSchedule(E->Scalars)) { 7247 Instruction *InsertInst; 7248 if (all_of(E->Scalars, isUsedOutsideBlock)) 7249 InsertInst = FindLastInst(); 7250 else 7251 InsertInst = FindFirstInst(); 7252 // If the instruction is PHI, set the insert point after all the PHIs. 7253 if (isa<PHINode>(InsertInst)) 7254 InsertInst = BB->getFirstNonPHI(); 7255 BasicBlock::iterator InsertPt = InsertInst->getIterator(); 7256 Builder.SetInsertPoint(BB, InsertPt); 7257 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 7258 return; 7259 } 7260 7261 // The last instruction in the bundle in program order. 7262 Instruction *LastInst = nullptr; 7263 7264 // Find the last instruction. The common case should be that BB has been 7265 // scheduled, and the last instruction is VL.back(). So we start with 7266 // VL.back() and iterate over schedule data until we reach the end of the 7267 // bundle. The end of the bundle is marked by null ScheduleData. 7268 if (BlocksSchedules.count(BB)) { 7269 Value *V = E->isOneOf(E->Scalars.back()); 7270 if (doesNotNeedToBeScheduled(V)) 7271 V = *find_if_not(E->Scalars, doesNotNeedToBeScheduled); 7272 auto *Bundle = BlocksSchedules[BB]->getScheduleData(V); 7273 if (Bundle && Bundle->isPartOfBundle()) 7274 for (; Bundle; Bundle = Bundle->NextInBundle) 7275 if (Bundle->OpValue == Bundle->Inst) 7276 LastInst = Bundle->Inst; 7277 } 7278 7279 // LastInst can still be null at this point if there's either not an entry 7280 // for BB in BlocksSchedules or there's no ScheduleData available for 7281 // VL.back(). This can be the case if buildTree_rec aborts for various 7282 // reasons (e.g., the maximum recursion depth is reached, the maximum region 7283 // size is reached, etc.). ScheduleData is initialized in the scheduling 7284 // "dry-run". 7285 // 7286 // If this happens, we can still find the last instruction by brute force. We 7287 // iterate forwards from Front (inclusive) until we either see all 7288 // instructions in the bundle or reach the end of the block. If Front is the 7289 // last instruction in program order, LastInst will be set to Front, and we 7290 // will visit all the remaining instructions in the block. 7291 // 7292 // One of the reasons we exit early from buildTree_rec is to place an upper 7293 // bound on compile-time. Thus, taking an additional compile-time hit here is 7294 // not ideal. However, this should be exceedingly rare since it requires that 7295 // we both exit early from buildTree_rec and that the bundle be out-of-order 7296 // (causing us to iterate all the way to the end of the block). 7297 if (!LastInst) { 7298 LastInst = FindLastInst(); 7299 // If the instruction is PHI, set the insert point after all the PHIs. 7300 if (isa<PHINode>(LastInst)) 7301 LastInst = BB->getFirstNonPHI()->getPrevNode(); 7302 } 7303 assert(LastInst && "Failed to find last instruction in bundle"); 7304 7305 // Set the insertion point after the last instruction in the bundle. Set the 7306 // debug location to Front. 7307 Builder.SetInsertPoint(BB, std::next(LastInst->getIterator())); 7308 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 7309 } 7310 7311 Value *BoUpSLP::gather(ArrayRef<Value *> VL) { 7312 // List of instructions/lanes from current block and/or the blocks which are 7313 // part of the current loop. These instructions will be inserted at the end to 7314 // make it possible to optimize loops and hoist invariant instructions out of 7315 // the loops body with better chances for success. 7316 SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts; 7317 SmallSet<int, 4> PostponedIndices; 7318 Loop *L = LI->getLoopFor(Builder.GetInsertBlock()); 7319 auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) { 7320 SmallPtrSet<BasicBlock *, 4> Visited; 7321 while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second) 7322 InsertBB = InsertBB->getSinglePredecessor(); 7323 return InsertBB && InsertBB == InstBB; 7324 }; 7325 for (int I = 0, E = VL.size(); I < E; ++I) { 7326 if (auto *Inst = dyn_cast<Instruction>(VL[I])) 7327 if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) || 7328 getTreeEntry(Inst) || (L && (L->contains(Inst)))) && 7329 PostponedIndices.insert(I).second) 7330 PostponedInsts.emplace_back(Inst, I); 7331 } 7332 7333 auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos) { 7334 Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(Pos)); 7335 auto *InsElt = dyn_cast<InsertElementInst>(Vec); 7336 if (!InsElt) 7337 return Vec; 7338 GatherShuffleSeq.insert(InsElt); 7339 CSEBlocks.insert(InsElt->getParent()); 7340 // Add to our 'need-to-extract' list. 7341 if (TreeEntry *Entry = getTreeEntry(V)) { 7342 // Find which lane we need to extract. 7343 unsigned FoundLane = Entry->findLaneForValue(V); 7344 ExternalUses.emplace_back(V, InsElt, FoundLane); 7345 } 7346 return Vec; 7347 }; 7348 Value *Val0 = 7349 isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0]; 7350 FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size()); 7351 Value *Vec = PoisonValue::get(VecTy); 7352 SmallVector<int> NonConsts; 7353 // Insert constant values at first. 7354 for (int I = 0, E = VL.size(); I < E; ++I) { 7355 if (PostponedIndices.contains(I)) 7356 continue; 7357 if (!isConstant(VL[I])) { 7358 NonConsts.push_back(I); 7359 continue; 7360 } 7361 Vec = CreateInsertElement(Vec, VL[I], I); 7362 } 7363 // Insert non-constant values. 7364 for (int I : NonConsts) 7365 Vec = CreateInsertElement(Vec, VL[I], I); 7366 // Append instructions, which are/may be part of the loop, in the end to make 7367 // it possible to hoist non-loop-based instructions. 7368 for (const std::pair<Value *, unsigned> &Pair : PostponedInsts) 7369 Vec = CreateInsertElement(Vec, Pair.first, Pair.second); 7370 7371 return Vec; 7372 } 7373 7374 namespace { 7375 /// Merges shuffle masks and emits final shuffle instruction, if required. 7376 class ShuffleInstructionBuilder { 7377 IRBuilderBase &Builder; 7378 const unsigned VF = 0; 7379 bool IsFinalized = false; 7380 SmallVector<int, 4> Mask; 7381 /// Holds all of the instructions that we gathered. 7382 SetVector<Instruction *> &GatherShuffleSeq; 7383 /// A list of blocks that we are going to CSE. 7384 SetVector<BasicBlock *> &CSEBlocks; 7385 7386 public: 7387 ShuffleInstructionBuilder(IRBuilderBase &Builder, unsigned VF, 7388 SetVector<Instruction *> &GatherShuffleSeq, 7389 SetVector<BasicBlock *> &CSEBlocks) 7390 : Builder(Builder), VF(VF), GatherShuffleSeq(GatherShuffleSeq), 7391 CSEBlocks(CSEBlocks) {} 7392 7393 /// Adds a mask, inverting it before applying. 7394 void addInversedMask(ArrayRef<unsigned> SubMask) { 7395 if (SubMask.empty()) 7396 return; 7397 SmallVector<int, 4> NewMask; 7398 inversePermutation(SubMask, NewMask); 7399 addMask(NewMask); 7400 } 7401 7402 /// Functions adds masks, merging them into single one. 7403 void addMask(ArrayRef<unsigned> SubMask) { 7404 SmallVector<int, 4> NewMask(SubMask.begin(), SubMask.end()); 7405 addMask(NewMask); 7406 } 7407 7408 void addMask(ArrayRef<int> SubMask) { ::addMask(Mask, SubMask); } 7409 7410 Value *finalize(Value *V) { 7411 IsFinalized = true; 7412 unsigned ValueVF = cast<FixedVectorType>(V->getType())->getNumElements(); 7413 if (VF == ValueVF && Mask.empty()) 7414 return V; 7415 SmallVector<int, 4> NormalizedMask(VF, UndefMaskElem); 7416 std::iota(NormalizedMask.begin(), NormalizedMask.end(), 0); 7417 addMask(NormalizedMask); 7418 7419 if (VF == ValueVF && ShuffleVectorInst::isIdentityMask(Mask)) 7420 return V; 7421 Value *Vec = Builder.CreateShuffleVector(V, Mask, "shuffle"); 7422 if (auto *I = dyn_cast<Instruction>(Vec)) { 7423 GatherShuffleSeq.insert(I); 7424 CSEBlocks.insert(I->getParent()); 7425 } 7426 return Vec; 7427 } 7428 7429 ~ShuffleInstructionBuilder() { 7430 assert((IsFinalized || Mask.empty()) && 7431 "Shuffle construction must be finalized."); 7432 } 7433 }; 7434 } // namespace 7435 7436 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 7437 const unsigned VF = VL.size(); 7438 InstructionsState S = getSameOpcode(VL); 7439 if (S.getOpcode()) { 7440 if (TreeEntry *E = getTreeEntry(S.OpValue)) 7441 if (E->isSame(VL)) { 7442 Value *V = vectorizeTree(E); 7443 if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) { 7444 if (!E->ReuseShuffleIndices.empty()) { 7445 // Reshuffle to get only unique values. 7446 // If some of the scalars are duplicated in the vectorization tree 7447 // entry, we do not vectorize them but instead generate a mask for 7448 // the reuses. But if there are several users of the same entry, 7449 // they may have different vectorization factors. This is especially 7450 // important for PHI nodes. In this case, we need to adapt the 7451 // resulting instruction for the user vectorization factor and have 7452 // to reshuffle it again to take only unique elements of the vector. 7453 // Without this code the function incorrectly returns reduced vector 7454 // instruction with the same elements, not with the unique ones. 7455 7456 // block: 7457 // %phi = phi <2 x > { .., %entry} {%shuffle, %block} 7458 // %2 = shuffle <2 x > %phi, poison, <4 x > <1, 1, 0, 0> 7459 // ... (use %2) 7460 // %shuffle = shuffle <2 x> %2, poison, <2 x> {2, 0} 7461 // br %block 7462 SmallVector<int> UniqueIdxs(VF, UndefMaskElem); 7463 SmallSet<int, 4> UsedIdxs; 7464 int Pos = 0; 7465 int Sz = VL.size(); 7466 for (int Idx : E->ReuseShuffleIndices) { 7467 if (Idx != Sz && Idx != UndefMaskElem && 7468 UsedIdxs.insert(Idx).second) 7469 UniqueIdxs[Idx] = Pos; 7470 ++Pos; 7471 } 7472 assert(VF >= UsedIdxs.size() && "Expected vectorization factor " 7473 "less than original vector size."); 7474 UniqueIdxs.append(VF - UsedIdxs.size(), UndefMaskElem); 7475 V = Builder.CreateShuffleVector(V, UniqueIdxs, "shrink.shuffle"); 7476 } else { 7477 assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() && 7478 "Expected vectorization factor less " 7479 "than original vector size."); 7480 SmallVector<int> UniformMask(VF, 0); 7481 std::iota(UniformMask.begin(), UniformMask.end(), 0); 7482 V = Builder.CreateShuffleVector(V, UniformMask, "shrink.shuffle"); 7483 } 7484 if (auto *I = dyn_cast<Instruction>(V)) { 7485 GatherShuffleSeq.insert(I); 7486 CSEBlocks.insert(I->getParent()); 7487 } 7488 } 7489 return V; 7490 } 7491 } 7492 7493 // Can't vectorize this, so simply build a new vector with each lane 7494 // corresponding to the requested value. 7495 return createBuildVector(VL); 7496 } 7497 Value *BoUpSLP::createBuildVector(ArrayRef<Value *> VL) { 7498 unsigned VF = VL.size(); 7499 // Exploit possible reuse of values across lanes. 7500 SmallVector<int> ReuseShuffleIndicies; 7501 SmallVector<Value *> UniqueValues; 7502 if (VL.size() > 2) { 7503 DenseMap<Value *, unsigned> UniquePositions; 7504 unsigned NumValues = 7505 std::distance(VL.begin(), find_if(reverse(VL), [](Value *V) { 7506 return !isa<UndefValue>(V); 7507 }).base()); 7508 VF = std::max<unsigned>(VF, PowerOf2Ceil(NumValues)); 7509 int UniqueVals = 0; 7510 for (Value *V : VL.drop_back(VL.size() - VF)) { 7511 if (isa<UndefValue>(V)) { 7512 ReuseShuffleIndicies.emplace_back(UndefMaskElem); 7513 continue; 7514 } 7515 if (isConstant(V)) { 7516 ReuseShuffleIndicies.emplace_back(UniqueValues.size()); 7517 UniqueValues.emplace_back(V); 7518 continue; 7519 } 7520 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 7521 ReuseShuffleIndicies.emplace_back(Res.first->second); 7522 if (Res.second) { 7523 UniqueValues.emplace_back(V); 7524 ++UniqueVals; 7525 } 7526 } 7527 if (UniqueVals == 1 && UniqueValues.size() == 1) { 7528 // Emit pure splat vector. 7529 ReuseShuffleIndicies.append(VF - ReuseShuffleIndicies.size(), 7530 UndefMaskElem); 7531 } else if (UniqueValues.size() >= VF - 1 || UniqueValues.size() <= 1) { 7532 ReuseShuffleIndicies.clear(); 7533 UniqueValues.clear(); 7534 UniqueValues.append(VL.begin(), std::next(VL.begin(), NumValues)); 7535 } 7536 UniqueValues.append(VF - UniqueValues.size(), 7537 PoisonValue::get(VL[0]->getType())); 7538 VL = UniqueValues; 7539 } 7540 7541 ShuffleInstructionBuilder ShuffleBuilder(Builder, VF, GatherShuffleSeq, 7542 CSEBlocks); 7543 Value *Vec = gather(VL); 7544 if (!ReuseShuffleIndicies.empty()) { 7545 ShuffleBuilder.addMask(ReuseShuffleIndicies); 7546 Vec = ShuffleBuilder.finalize(Vec); 7547 } 7548 return Vec; 7549 } 7550 7551 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 7552 IRBuilder<>::InsertPointGuard Guard(Builder); 7553 7554 if (E->VectorizedValue) { 7555 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 7556 return E->VectorizedValue; 7557 } 7558 7559 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 7560 unsigned VF = E->getVectorFactor(); 7561 ShuffleInstructionBuilder ShuffleBuilder(Builder, VF, GatherShuffleSeq, 7562 CSEBlocks); 7563 if (E->State == TreeEntry::NeedToGather) { 7564 if (E->getMainOp()) 7565 setInsertPointAfterBundle(E); 7566 Value *Vec; 7567 SmallVector<int> Mask; 7568 SmallVector<const TreeEntry *> Entries; 7569 Optional<TargetTransformInfo::ShuffleKind> Shuffle = 7570 isGatherShuffledEntry(E, Mask, Entries); 7571 if (Shuffle.hasValue()) { 7572 assert((Entries.size() == 1 || Entries.size() == 2) && 7573 "Expected shuffle of 1 or 2 entries."); 7574 Vec = Builder.CreateShuffleVector(Entries.front()->VectorizedValue, 7575 Entries.back()->VectorizedValue, Mask); 7576 if (auto *I = dyn_cast<Instruction>(Vec)) { 7577 GatherShuffleSeq.insert(I); 7578 CSEBlocks.insert(I->getParent()); 7579 } 7580 } else { 7581 Vec = gather(E->Scalars); 7582 } 7583 if (NeedToShuffleReuses) { 7584 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 7585 Vec = ShuffleBuilder.finalize(Vec); 7586 } 7587 E->VectorizedValue = Vec; 7588 return Vec; 7589 } 7590 7591 assert((E->State == TreeEntry::Vectorize || 7592 E->State == TreeEntry::ScatterVectorize) && 7593 "Unhandled state"); 7594 unsigned ShuffleOrOp = 7595 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 7596 Instruction *VL0 = E->getMainOp(); 7597 Type *ScalarTy = VL0->getType(); 7598 if (auto *Store = dyn_cast<StoreInst>(VL0)) 7599 ScalarTy = Store->getValueOperand()->getType(); 7600 else if (auto *IE = dyn_cast<InsertElementInst>(VL0)) 7601 ScalarTy = IE->getOperand(1)->getType(); 7602 auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size()); 7603 switch (ShuffleOrOp) { 7604 case Instruction::PHI: { 7605 assert( 7606 (E->ReorderIndices.empty() || E != VectorizableTree.front().get()) && 7607 "PHI reordering is free."); 7608 auto *PH = cast<PHINode>(VL0); 7609 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 7610 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 7611 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 7612 Value *V = NewPhi; 7613 7614 // Adjust insertion point once all PHI's have been generated. 7615 Builder.SetInsertPoint(&*PH->getParent()->getFirstInsertionPt()); 7616 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 7617 7618 ShuffleBuilder.addInversedMask(E->ReorderIndices); 7619 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 7620 V = ShuffleBuilder.finalize(V); 7621 7622 E->VectorizedValue = V; 7623 7624 // PHINodes may have multiple entries from the same block. We want to 7625 // visit every block once. 7626 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 7627 7628 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 7629 ValueList Operands; 7630 BasicBlock *IBB = PH->getIncomingBlock(i); 7631 7632 if (!VisitedBBs.insert(IBB).second) { 7633 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 7634 continue; 7635 } 7636 7637 Builder.SetInsertPoint(IBB->getTerminator()); 7638 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 7639 Value *Vec = vectorizeTree(E->getOperand(i)); 7640 NewPhi->addIncoming(Vec, IBB); 7641 } 7642 7643 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 7644 "Invalid number of incoming values"); 7645 return V; 7646 } 7647 7648 case Instruction::ExtractElement: { 7649 Value *V = E->getSingleOperand(0); 7650 Builder.SetInsertPoint(VL0); 7651 ShuffleBuilder.addInversedMask(E->ReorderIndices); 7652 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 7653 V = ShuffleBuilder.finalize(V); 7654 E->VectorizedValue = V; 7655 return V; 7656 } 7657 case Instruction::ExtractValue: { 7658 auto *LI = cast<LoadInst>(E->getSingleOperand(0)); 7659 Builder.SetInsertPoint(LI); 7660 auto *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); 7661 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); 7662 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign()); 7663 Value *NewV = propagateMetadata(V, E->Scalars); 7664 ShuffleBuilder.addInversedMask(E->ReorderIndices); 7665 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 7666 NewV = ShuffleBuilder.finalize(NewV); 7667 E->VectorizedValue = NewV; 7668 return NewV; 7669 } 7670 case Instruction::InsertElement: { 7671 assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique"); 7672 Builder.SetInsertPoint(cast<Instruction>(E->Scalars.back())); 7673 Value *V = vectorizeTree(E->getOperand(1)); 7674 7675 // Create InsertVector shuffle if necessary 7676 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 7677 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0)); 7678 })); 7679 const unsigned NumElts = 7680 cast<FixedVectorType>(FirstInsert->getType())->getNumElements(); 7681 const unsigned NumScalars = E->Scalars.size(); 7682 7683 unsigned Offset = *getInsertIndex(VL0); 7684 assert(Offset < NumElts && "Failed to find vector index offset"); 7685 7686 // Create shuffle to resize vector 7687 SmallVector<int> Mask; 7688 if (!E->ReorderIndices.empty()) { 7689 inversePermutation(E->ReorderIndices, Mask); 7690 Mask.append(NumElts - NumScalars, UndefMaskElem); 7691 } else { 7692 Mask.assign(NumElts, UndefMaskElem); 7693 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); 7694 } 7695 // Create InsertVector shuffle if necessary 7696 bool IsIdentity = true; 7697 SmallVector<int> PrevMask(NumElts, UndefMaskElem); 7698 Mask.swap(PrevMask); 7699 for (unsigned I = 0; I < NumScalars; ++I) { 7700 Value *Scalar = E->Scalars[PrevMask[I]]; 7701 unsigned InsertIdx = *getInsertIndex(Scalar); 7702 IsIdentity &= InsertIdx - Offset == I; 7703 Mask[InsertIdx - Offset] = I; 7704 } 7705 if (!IsIdentity || NumElts != NumScalars) { 7706 V = Builder.CreateShuffleVector(V, Mask); 7707 if (auto *I = dyn_cast<Instruction>(V)) { 7708 GatherShuffleSeq.insert(I); 7709 CSEBlocks.insert(I->getParent()); 7710 } 7711 } 7712 7713 if ((!IsIdentity || Offset != 0 || 7714 !isUndefVector(FirstInsert->getOperand(0))) && 7715 NumElts != NumScalars) { 7716 SmallVector<int> InsertMask(NumElts); 7717 std::iota(InsertMask.begin(), InsertMask.end(), 0); 7718 for (unsigned I = 0; I < NumElts; I++) { 7719 if (Mask[I] != UndefMaskElem) 7720 InsertMask[Offset + I] = NumElts + I; 7721 } 7722 7723 V = Builder.CreateShuffleVector( 7724 FirstInsert->getOperand(0), V, InsertMask, 7725 cast<Instruction>(E->Scalars.back())->getName()); 7726 if (auto *I = dyn_cast<Instruction>(V)) { 7727 GatherShuffleSeq.insert(I); 7728 CSEBlocks.insert(I->getParent()); 7729 } 7730 } 7731 7732 ++NumVectorInstructions; 7733 E->VectorizedValue = V; 7734 return V; 7735 } 7736 case Instruction::ZExt: 7737 case Instruction::SExt: 7738 case Instruction::FPToUI: 7739 case Instruction::FPToSI: 7740 case Instruction::FPExt: 7741 case Instruction::PtrToInt: 7742 case Instruction::IntToPtr: 7743 case Instruction::SIToFP: 7744 case Instruction::UIToFP: 7745 case Instruction::Trunc: 7746 case Instruction::FPTrunc: 7747 case Instruction::BitCast: { 7748 setInsertPointAfterBundle(E); 7749 7750 Value *InVec = vectorizeTree(E->getOperand(0)); 7751 7752 if (E->VectorizedValue) { 7753 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 7754 return E->VectorizedValue; 7755 } 7756 7757 auto *CI = cast<CastInst>(VL0); 7758 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 7759 ShuffleBuilder.addInversedMask(E->ReorderIndices); 7760 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 7761 V = ShuffleBuilder.finalize(V); 7762 7763 E->VectorizedValue = V; 7764 ++NumVectorInstructions; 7765 return V; 7766 } 7767 case Instruction::FCmp: 7768 case Instruction::ICmp: { 7769 setInsertPointAfterBundle(E); 7770 7771 Value *L = vectorizeTree(E->getOperand(0)); 7772 Value *R = vectorizeTree(E->getOperand(1)); 7773 7774 if (E->VectorizedValue) { 7775 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 7776 return E->VectorizedValue; 7777 } 7778 7779 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 7780 Value *V = Builder.CreateCmp(P0, L, R); 7781 propagateIRFlags(V, E->Scalars, VL0); 7782 ShuffleBuilder.addInversedMask(E->ReorderIndices); 7783 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 7784 V = ShuffleBuilder.finalize(V); 7785 7786 E->VectorizedValue = V; 7787 ++NumVectorInstructions; 7788 return V; 7789 } 7790 case Instruction::Select: { 7791 setInsertPointAfterBundle(E); 7792 7793 Value *Cond = vectorizeTree(E->getOperand(0)); 7794 Value *True = vectorizeTree(E->getOperand(1)); 7795 Value *False = vectorizeTree(E->getOperand(2)); 7796 7797 if (E->VectorizedValue) { 7798 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 7799 return E->VectorizedValue; 7800 } 7801 7802 Value *V = Builder.CreateSelect(Cond, True, False); 7803 ShuffleBuilder.addInversedMask(E->ReorderIndices); 7804 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 7805 V = ShuffleBuilder.finalize(V); 7806 7807 E->VectorizedValue = V; 7808 ++NumVectorInstructions; 7809 return V; 7810 } 7811 case Instruction::FNeg: { 7812 setInsertPointAfterBundle(E); 7813 7814 Value *Op = vectorizeTree(E->getOperand(0)); 7815 7816 if (E->VectorizedValue) { 7817 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 7818 return E->VectorizedValue; 7819 } 7820 7821 Value *V = Builder.CreateUnOp( 7822 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op); 7823 propagateIRFlags(V, E->Scalars, VL0); 7824 if (auto *I = dyn_cast<Instruction>(V)) 7825 V = propagateMetadata(I, E->Scalars); 7826 7827 ShuffleBuilder.addInversedMask(E->ReorderIndices); 7828 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 7829 V = ShuffleBuilder.finalize(V); 7830 7831 E->VectorizedValue = V; 7832 ++NumVectorInstructions; 7833 7834 return V; 7835 } 7836 case Instruction::Add: 7837 case Instruction::FAdd: 7838 case Instruction::Sub: 7839 case Instruction::FSub: 7840 case Instruction::Mul: 7841 case Instruction::FMul: 7842 case Instruction::UDiv: 7843 case Instruction::SDiv: 7844 case Instruction::FDiv: 7845 case Instruction::URem: 7846 case Instruction::SRem: 7847 case Instruction::FRem: 7848 case Instruction::Shl: 7849 case Instruction::LShr: 7850 case Instruction::AShr: 7851 case Instruction::And: 7852 case Instruction::Or: 7853 case Instruction::Xor: { 7854 setInsertPointAfterBundle(E); 7855 7856 Value *LHS = vectorizeTree(E->getOperand(0)); 7857 Value *RHS = vectorizeTree(E->getOperand(1)); 7858 7859 if (E->VectorizedValue) { 7860 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 7861 return E->VectorizedValue; 7862 } 7863 7864 Value *V = Builder.CreateBinOp( 7865 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, 7866 RHS); 7867 propagateIRFlags(V, E->Scalars, VL0); 7868 if (auto *I = dyn_cast<Instruction>(V)) 7869 V = propagateMetadata(I, E->Scalars); 7870 7871 ShuffleBuilder.addInversedMask(E->ReorderIndices); 7872 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 7873 V = ShuffleBuilder.finalize(V); 7874 7875 E->VectorizedValue = V; 7876 ++NumVectorInstructions; 7877 7878 return V; 7879 } 7880 case Instruction::Load: { 7881 // Loads are inserted at the head of the tree because we don't want to 7882 // sink them all the way down past store instructions. 7883 setInsertPointAfterBundle(E); 7884 7885 LoadInst *LI = cast<LoadInst>(VL0); 7886 Instruction *NewLI; 7887 unsigned AS = LI->getPointerAddressSpace(); 7888 Value *PO = LI->getPointerOperand(); 7889 if (E->State == TreeEntry::Vectorize) { 7890 Value *VecPtr = Builder.CreateBitCast(PO, VecTy->getPointerTo(AS)); 7891 NewLI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign()); 7892 7893 // The pointer operand uses an in-tree scalar so we add the new BitCast 7894 // or LoadInst to ExternalUses list to make sure that an extract will 7895 // be generated in the future. 7896 if (TreeEntry *Entry = getTreeEntry(PO)) { 7897 // Find which lane we need to extract. 7898 unsigned FoundLane = Entry->findLaneForValue(PO); 7899 ExternalUses.emplace_back( 7900 PO, PO != VecPtr ? cast<User>(VecPtr) : NewLI, FoundLane); 7901 } 7902 } else { 7903 assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state"); 7904 Value *VecPtr = vectorizeTree(E->getOperand(0)); 7905 // Use the minimum alignment of the gathered loads. 7906 Align CommonAlignment = LI->getAlign(); 7907 for (Value *V : E->Scalars) 7908 CommonAlignment = 7909 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 7910 NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment); 7911 } 7912 Value *V = propagateMetadata(NewLI, E->Scalars); 7913 7914 ShuffleBuilder.addInversedMask(E->ReorderIndices); 7915 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 7916 V = ShuffleBuilder.finalize(V); 7917 E->VectorizedValue = V; 7918 ++NumVectorInstructions; 7919 return V; 7920 } 7921 case Instruction::Store: { 7922 auto *SI = cast<StoreInst>(VL0); 7923 unsigned AS = SI->getPointerAddressSpace(); 7924 7925 setInsertPointAfterBundle(E); 7926 7927 Value *VecValue = vectorizeTree(E->getOperand(0)); 7928 ShuffleBuilder.addMask(E->ReorderIndices); 7929 VecValue = ShuffleBuilder.finalize(VecValue); 7930 7931 Value *ScalarPtr = SI->getPointerOperand(); 7932 Value *VecPtr = Builder.CreateBitCast( 7933 ScalarPtr, VecValue->getType()->getPointerTo(AS)); 7934 StoreInst *ST = 7935 Builder.CreateAlignedStore(VecValue, VecPtr, SI->getAlign()); 7936 7937 // The pointer operand uses an in-tree scalar, so add the new BitCast or 7938 // StoreInst to ExternalUses to make sure that an extract will be 7939 // generated in the future. 7940 if (TreeEntry *Entry = getTreeEntry(ScalarPtr)) { 7941 // Find which lane we need to extract. 7942 unsigned FoundLane = Entry->findLaneForValue(ScalarPtr); 7943 ExternalUses.push_back(ExternalUser( 7944 ScalarPtr, ScalarPtr != VecPtr ? cast<User>(VecPtr) : ST, 7945 FoundLane)); 7946 } 7947 7948 Value *V = propagateMetadata(ST, E->Scalars); 7949 7950 E->VectorizedValue = V; 7951 ++NumVectorInstructions; 7952 return V; 7953 } 7954 case Instruction::GetElementPtr: { 7955 auto *GEP0 = cast<GetElementPtrInst>(VL0); 7956 setInsertPointAfterBundle(E); 7957 7958 Value *Op0 = vectorizeTree(E->getOperand(0)); 7959 7960 SmallVector<Value *> OpVecs; 7961 for (int J = 1, N = GEP0->getNumOperands(); J < N; ++J) { 7962 Value *OpVec = vectorizeTree(E->getOperand(J)); 7963 OpVecs.push_back(OpVec); 7964 } 7965 7966 Value *V = Builder.CreateGEP(GEP0->getSourceElementType(), Op0, OpVecs); 7967 if (Instruction *I = dyn_cast<Instruction>(V)) 7968 V = propagateMetadata(I, E->Scalars); 7969 7970 ShuffleBuilder.addInversedMask(E->ReorderIndices); 7971 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 7972 V = ShuffleBuilder.finalize(V); 7973 7974 E->VectorizedValue = V; 7975 ++NumVectorInstructions; 7976 7977 return V; 7978 } 7979 case Instruction::Call: { 7980 CallInst *CI = cast<CallInst>(VL0); 7981 setInsertPointAfterBundle(E); 7982 7983 Intrinsic::ID IID = Intrinsic::not_intrinsic; 7984 if (Function *FI = CI->getCalledFunction()) 7985 IID = FI->getIntrinsicID(); 7986 7987 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 7988 7989 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 7990 bool UseIntrinsic = ID != Intrinsic::not_intrinsic && 7991 VecCallCosts.first <= VecCallCosts.second; 7992 7993 Value *ScalarArg = nullptr; 7994 std::vector<Value *> OpVecs; 7995 SmallVector<Type *, 2> TysForDecl = 7996 {FixedVectorType::get(CI->getType(), E->Scalars.size())}; 7997 for (int j = 0, e = CI->arg_size(); j < e; ++j) { 7998 ValueList OpVL; 7999 // Some intrinsics have scalar arguments. This argument should not be 8000 // vectorized. 8001 if (UseIntrinsic && isVectorIntrinsicWithScalarOpAtArg(IID, j)) { 8002 CallInst *CEI = cast<CallInst>(VL0); 8003 ScalarArg = CEI->getArgOperand(j); 8004 OpVecs.push_back(CEI->getArgOperand(j)); 8005 if (isVectorIntrinsicWithOverloadTypeAtArg(IID, j)) 8006 TysForDecl.push_back(ScalarArg->getType()); 8007 continue; 8008 } 8009 8010 Value *OpVec = vectorizeTree(E->getOperand(j)); 8011 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 8012 OpVecs.push_back(OpVec); 8013 if (isVectorIntrinsicWithOverloadTypeAtArg(IID, j)) 8014 TysForDecl.push_back(OpVec->getType()); 8015 } 8016 8017 Function *CF; 8018 if (!UseIntrinsic) { 8019 VFShape Shape = 8020 VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>( 8021 VecTy->getNumElements())), 8022 false /*HasGlobalPred*/); 8023 CF = VFDatabase(*CI).getVectorizedFunction(Shape); 8024 } else { 8025 CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl); 8026 } 8027 8028 SmallVector<OperandBundleDef, 1> OpBundles; 8029 CI->getOperandBundlesAsDefs(OpBundles); 8030 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 8031 8032 // The scalar argument uses an in-tree scalar so we add the new vectorized 8033 // call to ExternalUses list to make sure that an extract will be 8034 // generated in the future. 8035 if (ScalarArg) { 8036 if (TreeEntry *Entry = getTreeEntry(ScalarArg)) { 8037 // Find which lane we need to extract. 8038 unsigned FoundLane = Entry->findLaneForValue(ScalarArg); 8039 ExternalUses.push_back( 8040 ExternalUser(ScalarArg, cast<User>(V), FoundLane)); 8041 } 8042 } 8043 8044 propagateIRFlags(V, E->Scalars, VL0); 8045 ShuffleBuilder.addInversedMask(E->ReorderIndices); 8046 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 8047 V = ShuffleBuilder.finalize(V); 8048 8049 E->VectorizedValue = V; 8050 ++NumVectorInstructions; 8051 return V; 8052 } 8053 case Instruction::ShuffleVector: { 8054 assert(E->isAltShuffle() && 8055 ((Instruction::isBinaryOp(E->getOpcode()) && 8056 Instruction::isBinaryOp(E->getAltOpcode())) || 8057 (Instruction::isCast(E->getOpcode()) && 8058 Instruction::isCast(E->getAltOpcode())) || 8059 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && 8060 "Invalid Shuffle Vector Operand"); 8061 8062 Value *LHS = nullptr, *RHS = nullptr; 8063 if (Instruction::isBinaryOp(E->getOpcode()) || isa<CmpInst>(VL0)) { 8064 setInsertPointAfterBundle(E); 8065 LHS = vectorizeTree(E->getOperand(0)); 8066 RHS = vectorizeTree(E->getOperand(1)); 8067 } else { 8068 setInsertPointAfterBundle(E); 8069 LHS = vectorizeTree(E->getOperand(0)); 8070 } 8071 8072 if (E->VectorizedValue) { 8073 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 8074 return E->VectorizedValue; 8075 } 8076 8077 Value *V0, *V1; 8078 if (Instruction::isBinaryOp(E->getOpcode())) { 8079 V0 = Builder.CreateBinOp( 8080 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS); 8081 V1 = Builder.CreateBinOp( 8082 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS); 8083 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) { 8084 V0 = Builder.CreateCmp(CI0->getPredicate(), LHS, RHS); 8085 auto *AltCI = cast<CmpInst>(E->getAltOp()); 8086 CmpInst::Predicate AltPred = AltCI->getPredicate(); 8087 V1 = Builder.CreateCmp(AltPred, LHS, RHS); 8088 } else { 8089 V0 = Builder.CreateCast( 8090 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy); 8091 V1 = Builder.CreateCast( 8092 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy); 8093 } 8094 // Add V0 and V1 to later analysis to try to find and remove matching 8095 // instruction, if any. 8096 for (Value *V : {V0, V1}) { 8097 if (auto *I = dyn_cast<Instruction>(V)) { 8098 GatherShuffleSeq.insert(I); 8099 CSEBlocks.insert(I->getParent()); 8100 } 8101 } 8102 8103 // Create shuffle to take alternate operations from the vector. 8104 // Also, gather up main and alt scalar ops to propagate IR flags to 8105 // each vector operation. 8106 ValueList OpScalars, AltScalars; 8107 SmallVector<int> Mask; 8108 buildShuffleEntryMask( 8109 E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices, 8110 [E](Instruction *I) { 8111 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 8112 return isAlternateInstruction(I, E->getMainOp(), E->getAltOp()); 8113 }, 8114 Mask, &OpScalars, &AltScalars); 8115 8116 propagateIRFlags(V0, OpScalars); 8117 propagateIRFlags(V1, AltScalars); 8118 8119 Value *V = Builder.CreateShuffleVector(V0, V1, Mask); 8120 if (auto *I = dyn_cast<Instruction>(V)) { 8121 V = propagateMetadata(I, E->Scalars); 8122 GatherShuffleSeq.insert(I); 8123 CSEBlocks.insert(I->getParent()); 8124 } 8125 V = ShuffleBuilder.finalize(V); 8126 8127 E->VectorizedValue = V; 8128 ++NumVectorInstructions; 8129 8130 return V; 8131 } 8132 default: 8133 llvm_unreachable("unknown inst"); 8134 } 8135 return nullptr; 8136 } 8137 8138 Value *BoUpSLP::vectorizeTree() { 8139 ExtraValueToDebugLocsMap ExternallyUsedValues; 8140 return vectorizeTree(ExternallyUsedValues); 8141 } 8142 8143 namespace { 8144 /// Data type for handling buildvector sequences with the reused scalars from 8145 /// other tree entries. 8146 struct ShuffledInsertData { 8147 /// List of insertelements to be replaced by shuffles. 8148 SmallVector<InsertElementInst *> InsertElements; 8149 /// The parent vectors and shuffle mask for the given list of inserts. 8150 MapVector<Value *, SmallVector<int>> ValueMasks; 8151 }; 8152 } // namespace 8153 8154 Value * 8155 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { 8156 // All blocks must be scheduled before any instructions are inserted. 8157 for (auto &BSIter : BlocksSchedules) { 8158 scheduleBlock(BSIter.second.get()); 8159 } 8160 8161 Builder.SetInsertPoint(&F->getEntryBlock().front()); 8162 auto *VectorRoot = vectorizeTree(VectorizableTree[0].get()); 8163 8164 // If the vectorized tree can be rewritten in a smaller type, we truncate the 8165 // vectorized root. InstCombine will then rewrite the entire expression. We 8166 // sign extend the extracted values below. 8167 auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; 8168 if (MinBWs.count(ScalarRoot)) { 8169 if (auto *I = dyn_cast<Instruction>(VectorRoot)) { 8170 // If current instr is a phi and not the last phi, insert it after the 8171 // last phi node. 8172 if (isa<PHINode>(I)) 8173 Builder.SetInsertPoint(&*I->getParent()->getFirstInsertionPt()); 8174 else 8175 Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); 8176 } 8177 auto BundleWidth = VectorizableTree[0]->Scalars.size(); 8178 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 8179 auto *VecTy = FixedVectorType::get(MinTy, BundleWidth); 8180 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); 8181 VectorizableTree[0]->VectorizedValue = Trunc; 8182 } 8183 8184 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() 8185 << " values .\n"); 8186 8187 SmallVector<ShuffledInsertData> ShuffledInserts; 8188 // Maps vector instruction to original insertelement instruction 8189 DenseMap<Value *, InsertElementInst *> VectorToInsertElement; 8190 // Extract all of the elements with the external uses. 8191 for (const auto &ExternalUse : ExternalUses) { 8192 Value *Scalar = ExternalUse.Scalar; 8193 llvm::User *User = ExternalUse.User; 8194 8195 // Skip users that we already RAUW. This happens when one instruction 8196 // has multiple uses of the same value. 8197 if (User && !is_contained(Scalar->users(), User)) 8198 continue; 8199 TreeEntry *E = getTreeEntry(Scalar); 8200 assert(E && "Invalid scalar"); 8201 assert(E->State != TreeEntry::NeedToGather && 8202 "Extracting from a gather list"); 8203 8204 Value *Vec = E->VectorizedValue; 8205 assert(Vec && "Can't find vectorizable value"); 8206 8207 Value *Lane = Builder.getInt32(ExternalUse.Lane); 8208 auto ExtractAndExtendIfNeeded = [&](Value *Vec) { 8209 if (Scalar->getType() != Vec->getType()) { 8210 Value *Ex; 8211 // "Reuse" the existing extract to improve final codegen. 8212 if (auto *ES = dyn_cast<ExtractElementInst>(Scalar)) { 8213 Ex = Builder.CreateExtractElement(ES->getOperand(0), 8214 ES->getOperand(1)); 8215 } else { 8216 Ex = Builder.CreateExtractElement(Vec, Lane); 8217 } 8218 // If necessary, sign-extend or zero-extend ScalarRoot 8219 // to the larger type. 8220 if (!MinBWs.count(ScalarRoot)) 8221 return Ex; 8222 if (MinBWs[ScalarRoot].second) 8223 return Builder.CreateSExt(Ex, Scalar->getType()); 8224 return Builder.CreateZExt(Ex, Scalar->getType()); 8225 } 8226 assert(isa<FixedVectorType>(Scalar->getType()) && 8227 isa<InsertElementInst>(Scalar) && 8228 "In-tree scalar of vector type is not insertelement?"); 8229 auto *IE = cast<InsertElementInst>(Scalar); 8230 VectorToInsertElement.try_emplace(Vec, IE); 8231 return Vec; 8232 }; 8233 // If User == nullptr, the Scalar is used as extra arg. Generate 8234 // ExtractElement instruction and update the record for this scalar in 8235 // ExternallyUsedValues. 8236 if (!User) { 8237 assert(ExternallyUsedValues.count(Scalar) && 8238 "Scalar with nullptr as an external user must be registered in " 8239 "ExternallyUsedValues map"); 8240 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 8241 Builder.SetInsertPoint(VecI->getParent(), 8242 std::next(VecI->getIterator())); 8243 } else { 8244 Builder.SetInsertPoint(&F->getEntryBlock().front()); 8245 } 8246 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 8247 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent()); 8248 auto &NewInstLocs = ExternallyUsedValues[NewInst]; 8249 auto It = ExternallyUsedValues.find(Scalar); 8250 assert(It != ExternallyUsedValues.end() && 8251 "Externally used scalar is not found in ExternallyUsedValues"); 8252 NewInstLocs.append(It->second); 8253 ExternallyUsedValues.erase(Scalar); 8254 // Required to update internally referenced instructions. 8255 Scalar->replaceAllUsesWith(NewInst); 8256 continue; 8257 } 8258 8259 if (auto *VU = dyn_cast<InsertElementInst>(User)) { 8260 // Skip if the scalar is another vector op or Vec is not an instruction. 8261 if (!Scalar->getType()->isVectorTy() && isa<Instruction>(Vec)) { 8262 if (auto *FTy = dyn_cast<FixedVectorType>(User->getType())) { 8263 Optional<unsigned> InsertIdx = getInsertIndex(VU); 8264 if (InsertIdx) { 8265 auto *It = 8266 find_if(ShuffledInserts, [VU](const ShuffledInsertData &Data) { 8267 // Checks if 2 insertelements are from the same buildvector. 8268 InsertElementInst *VecInsert = Data.InsertElements.front(); 8269 return areTwoInsertFromSameBuildVector(VU, VecInsert); 8270 }); 8271 unsigned Idx = *InsertIdx; 8272 if (It == ShuffledInserts.end()) { 8273 (void)ShuffledInserts.emplace_back(); 8274 It = std::next(ShuffledInserts.begin(), 8275 ShuffledInserts.size() - 1); 8276 SmallVectorImpl<int> &Mask = It->ValueMasks[Vec]; 8277 if (Mask.empty()) 8278 Mask.assign(FTy->getNumElements(), UndefMaskElem); 8279 // Find the insertvector, vectorized in tree, if any. 8280 Value *Base = VU; 8281 while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) { 8282 if (IEBase != User && 8283 (!IEBase->hasOneUse() || 8284 getInsertIndex(IEBase).getValueOr(Idx) == Idx)) 8285 break; 8286 // Build the mask for the vectorized insertelement instructions. 8287 if (const TreeEntry *E = getTreeEntry(IEBase)) { 8288 do { 8289 IEBase = cast<InsertElementInst>(Base); 8290 int IEIdx = *getInsertIndex(IEBase); 8291 assert(Mask[Idx] == UndefMaskElem && 8292 "InsertElementInstruction used already."); 8293 Mask[IEIdx] = IEIdx; 8294 Base = IEBase->getOperand(0); 8295 } while (E == getTreeEntry(Base)); 8296 break; 8297 } 8298 Base = cast<InsertElementInst>(Base)->getOperand(0); 8299 // After the vectorization the def-use chain has changed, need 8300 // to look through original insertelement instructions, if they 8301 // get replaced by vector instructions. 8302 auto It = VectorToInsertElement.find(Base); 8303 if (It != VectorToInsertElement.end()) 8304 Base = It->second; 8305 } 8306 } 8307 SmallVectorImpl<int> &Mask = It->ValueMasks[Vec]; 8308 if (Mask.empty()) 8309 Mask.assign(FTy->getNumElements(), UndefMaskElem); 8310 Mask[Idx] = ExternalUse.Lane; 8311 It->InsertElements.push_back(cast<InsertElementInst>(User)); 8312 continue; 8313 } 8314 } 8315 } 8316 } 8317 8318 // Generate extracts for out-of-tree users. 8319 // Find the insertion point for the extractelement lane. 8320 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 8321 if (PHINode *PH = dyn_cast<PHINode>(User)) { 8322 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 8323 if (PH->getIncomingValue(i) == Scalar) { 8324 Instruction *IncomingTerminator = 8325 PH->getIncomingBlock(i)->getTerminator(); 8326 if (isa<CatchSwitchInst>(IncomingTerminator)) { 8327 Builder.SetInsertPoint(VecI->getParent(), 8328 std::next(VecI->getIterator())); 8329 } else { 8330 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 8331 } 8332 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 8333 CSEBlocks.insert(PH->getIncomingBlock(i)); 8334 PH->setOperand(i, NewInst); 8335 } 8336 } 8337 } else { 8338 Builder.SetInsertPoint(cast<Instruction>(User)); 8339 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 8340 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 8341 User->replaceUsesOfWith(Scalar, NewInst); 8342 } 8343 } else { 8344 Builder.SetInsertPoint(&F->getEntryBlock().front()); 8345 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 8346 CSEBlocks.insert(&F->getEntryBlock()); 8347 User->replaceUsesOfWith(Scalar, NewInst); 8348 } 8349 8350 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 8351 } 8352 8353 // Checks if the mask is an identity mask. 8354 auto &&IsIdentityMask = [](ArrayRef<int> Mask, FixedVectorType *VecTy) { 8355 int Limit = Mask.size(); 8356 return VecTy->getNumElements() == Mask.size() && 8357 all_of(Mask, [Limit](int Idx) { return Idx < Limit; }) && 8358 ShuffleVectorInst::isIdentityMask(Mask); 8359 }; 8360 // Tries to combine 2 different masks into single one. 8361 auto &&CombineMasks = [](SmallVectorImpl<int> &Mask, ArrayRef<int> ExtMask) { 8362 SmallVector<int> NewMask(ExtMask.size(), UndefMaskElem); 8363 for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) { 8364 if (ExtMask[I] == UndefMaskElem) 8365 continue; 8366 NewMask[I] = Mask[ExtMask[I]]; 8367 } 8368 Mask.swap(NewMask); 8369 }; 8370 // Peek through shuffles, trying to simplify the final shuffle code. 8371 auto &&PeekThroughShuffles = 8372 [&IsIdentityMask, &CombineMasks](Value *&V, SmallVectorImpl<int> &Mask, 8373 bool CheckForLengthChange = false) { 8374 while (auto *SV = dyn_cast<ShuffleVectorInst>(V)) { 8375 // Exit if not a fixed vector type or changing size shuffle. 8376 if (!isa<FixedVectorType>(SV->getType()) || 8377 (CheckForLengthChange && SV->changesLength())) 8378 break; 8379 // Exit if the identity or broadcast mask is found. 8380 if (IsIdentityMask(Mask, cast<FixedVectorType>(SV->getType())) || 8381 SV->isZeroEltSplat()) 8382 break; 8383 bool IsOp1Undef = isUndefVector(SV->getOperand(0)); 8384 bool IsOp2Undef = isUndefVector(SV->getOperand(1)); 8385 if (!IsOp1Undef && !IsOp2Undef) 8386 break; 8387 SmallVector<int> ShuffleMask(SV->getShuffleMask().begin(), 8388 SV->getShuffleMask().end()); 8389 CombineMasks(ShuffleMask, Mask); 8390 Mask.swap(ShuffleMask); 8391 if (IsOp2Undef) 8392 V = SV->getOperand(0); 8393 else 8394 V = SV->getOperand(1); 8395 } 8396 }; 8397 // Smart shuffle instruction emission, walks through shuffles trees and 8398 // tries to find the best matching vector for the actual shuffle 8399 // instruction. 8400 auto &&CreateShuffle = [this, &IsIdentityMask, &PeekThroughShuffles, 8401 &CombineMasks](Value *V1, Value *V2, 8402 ArrayRef<int> Mask) -> Value * { 8403 assert(V1 && "Expected at least one vector value."); 8404 if (V2 && !isUndefVector(V2)) { 8405 // Peek through shuffles. 8406 Value *Op1 = V1; 8407 Value *Op2 = V2; 8408 int VF = 8409 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue(); 8410 SmallVector<int> CombinedMask1(Mask.size(), UndefMaskElem); 8411 SmallVector<int> CombinedMask2(Mask.size(), UndefMaskElem); 8412 for (int I = 0, E = Mask.size(); I < E; ++I) { 8413 if (Mask[I] < VF) 8414 CombinedMask1[I] = Mask[I]; 8415 else 8416 CombinedMask2[I] = Mask[I] - VF; 8417 } 8418 Value *PrevOp1; 8419 Value *PrevOp2; 8420 do { 8421 PrevOp1 = Op1; 8422 PrevOp2 = Op2; 8423 PeekThroughShuffles(Op1, CombinedMask1, /*CheckForLengthChange=*/true); 8424 PeekThroughShuffles(Op2, CombinedMask2, /*CheckForLengthChange=*/true); 8425 // Check if we have 2 resizing shuffles - need to peek through operands 8426 // again. 8427 if (auto *SV1 = dyn_cast<ShuffleVectorInst>(Op1)) 8428 if (auto *SV2 = dyn_cast<ShuffleVectorInst>(Op2)) 8429 if (SV1->getOperand(0)->getType() == 8430 SV2->getOperand(0)->getType() && 8431 SV1->getOperand(0)->getType() != SV1->getType() && 8432 isUndefVector(SV1->getOperand(1)) && 8433 isUndefVector(SV2->getOperand(1))) { 8434 Op1 = SV1->getOperand(0); 8435 Op2 = SV2->getOperand(0); 8436 SmallVector<int> ShuffleMask1(SV1->getShuffleMask().begin(), 8437 SV1->getShuffleMask().end()); 8438 CombineMasks(ShuffleMask1, CombinedMask1); 8439 CombinedMask1.swap(ShuffleMask1); 8440 SmallVector<int> ShuffleMask2(SV2->getShuffleMask().begin(), 8441 SV2->getShuffleMask().end()); 8442 CombineMasks(ShuffleMask2, CombinedMask2); 8443 CombinedMask2.swap(ShuffleMask2); 8444 } 8445 } while (PrevOp1 != Op1 || PrevOp2 != Op2); 8446 VF = cast<VectorType>(Op1->getType()) 8447 ->getElementCount() 8448 .getKnownMinValue(); 8449 for (int I = 0, E = Mask.size(); I < E; ++I) { 8450 if (CombinedMask2[I] != UndefMaskElem) { 8451 assert(CombinedMask1[I] == UndefMaskElem && 8452 "Expected undefined mask element"); 8453 CombinedMask1[I] = CombinedMask2[I] + (Op1 == Op2 ? 0 : VF); 8454 } 8455 } 8456 Value *Vec = Builder.CreateShuffleVector( 8457 Op1, Op1 == Op2 ? PoisonValue::get(Op1->getType()) : Op2, 8458 CombinedMask1); 8459 if (auto *I = dyn_cast<Instruction>(Vec)) { 8460 GatherShuffleSeq.insert(I); 8461 CSEBlocks.insert(I->getParent()); 8462 } 8463 return Vec; 8464 } 8465 if (isa<PoisonValue>(V1)) 8466 return PoisonValue::get(FixedVectorType::get( 8467 cast<VectorType>(V1->getType())->getElementType(), Mask.size())); 8468 Value *Op = V1; 8469 SmallVector<int> CombinedMask(Mask.begin(), Mask.end()); 8470 PeekThroughShuffles(Op, CombinedMask); 8471 if (!isa<FixedVectorType>(Op->getType()) || 8472 !IsIdentityMask(CombinedMask, cast<FixedVectorType>(Op->getType()))) { 8473 Value *Vec = Builder.CreateShuffleVector(Op, CombinedMask); 8474 if (auto *I = dyn_cast<Instruction>(Vec)) { 8475 GatherShuffleSeq.insert(I); 8476 CSEBlocks.insert(I->getParent()); 8477 } 8478 return Vec; 8479 } 8480 return Op; 8481 }; 8482 8483 auto &&ResizeToVF = [&CreateShuffle](Value *Vec, ArrayRef<int> Mask) { 8484 unsigned VF = Mask.size(); 8485 unsigned VecVF = cast<FixedVectorType>(Vec->getType())->getNumElements(); 8486 if (VF != VecVF) { 8487 if (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); })) { 8488 Vec = CreateShuffle(Vec, nullptr, Mask); 8489 return std::make_pair(Vec, true); 8490 } 8491 SmallVector<int> ResizeMask(VF, UndefMaskElem); 8492 for (unsigned I = 0; I < VF; ++I) { 8493 if (Mask[I] != UndefMaskElem) 8494 ResizeMask[Mask[I]] = Mask[I]; 8495 } 8496 Vec = CreateShuffle(Vec, nullptr, ResizeMask); 8497 } 8498 8499 return std::make_pair(Vec, false); 8500 }; 8501 // Perform shuffling of the vectorize tree entries for better handling of 8502 // external extracts. 8503 for (int I = 0, E = ShuffledInserts.size(); I < E; ++I) { 8504 // Find the first and the last instruction in the list of insertelements. 8505 sort(ShuffledInserts[I].InsertElements, isFirstInsertElement); 8506 InsertElementInst *FirstInsert = ShuffledInserts[I].InsertElements.front(); 8507 InsertElementInst *LastInsert = ShuffledInserts[I].InsertElements.back(); 8508 Builder.SetInsertPoint(LastInsert); 8509 auto Vector = ShuffledInserts[I].ValueMasks.takeVector(); 8510 Value *NewInst = performExtractsShuffleAction<Value>( 8511 makeMutableArrayRef(Vector.data(), Vector.size()), 8512 FirstInsert->getOperand(0), 8513 [](Value *Vec) { 8514 return cast<VectorType>(Vec->getType()) 8515 ->getElementCount() 8516 .getKnownMinValue(); 8517 }, 8518 ResizeToVF, 8519 [FirstInsert, &CreateShuffle](ArrayRef<int> Mask, 8520 ArrayRef<Value *> Vals) { 8521 assert((Vals.size() == 1 || Vals.size() == 2) && 8522 "Expected exactly 1 or 2 input values."); 8523 if (Vals.size() == 1) { 8524 // Do not create shuffle if the mask is a simple identity 8525 // non-resizing mask. 8526 if (Mask.size() != cast<FixedVectorType>(Vals.front()->getType()) 8527 ->getNumElements() || 8528 !ShuffleVectorInst::isIdentityMask(Mask)) 8529 return CreateShuffle(Vals.front(), nullptr, Mask); 8530 return Vals.front(); 8531 } 8532 return CreateShuffle(Vals.front() ? Vals.front() 8533 : FirstInsert->getOperand(0), 8534 Vals.back(), Mask); 8535 }); 8536 auto It = ShuffledInserts[I].InsertElements.rbegin(); 8537 // Rebuild buildvector chain. 8538 InsertElementInst *II = nullptr; 8539 if (It != ShuffledInserts[I].InsertElements.rend()) 8540 II = *It; 8541 SmallVector<Instruction *> Inserts; 8542 while (It != ShuffledInserts[I].InsertElements.rend()) { 8543 assert(II && "Must be an insertelement instruction."); 8544 if (*It == II) 8545 ++It; 8546 else 8547 Inserts.push_back(cast<Instruction>(II)); 8548 II = dyn_cast<InsertElementInst>(II->getOperand(0)); 8549 } 8550 for (Instruction *II : reverse(Inserts)) { 8551 II->replaceUsesOfWith(II->getOperand(0), NewInst); 8552 if (auto *NewI = dyn_cast<Instruction>(NewInst)) 8553 if (II->getParent() == NewI->getParent() && II->comesBefore(NewI)) 8554 II->moveAfter(NewI); 8555 NewInst = II; 8556 } 8557 LastInsert->replaceAllUsesWith(NewInst); 8558 for (InsertElementInst *IE : reverse(ShuffledInserts[I].InsertElements)) { 8559 IE->replaceUsesOfWith(IE->getOperand(1), 8560 PoisonValue::get(IE->getOperand(1)->getType())); 8561 eraseInstruction(IE); 8562 } 8563 CSEBlocks.insert(LastInsert->getParent()); 8564 } 8565 8566 // For each vectorized value: 8567 for (auto &TEPtr : VectorizableTree) { 8568 TreeEntry *Entry = TEPtr.get(); 8569 8570 // No need to handle users of gathered values. 8571 if (Entry->State == TreeEntry::NeedToGather) 8572 continue; 8573 8574 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 8575 8576 // For each lane: 8577 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 8578 Value *Scalar = Entry->Scalars[Lane]; 8579 8580 #ifndef NDEBUG 8581 Type *Ty = Scalar->getType(); 8582 if (!Ty->isVoidTy()) { 8583 for (User *U : Scalar->users()) { 8584 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 8585 8586 // It is legal to delete users in the ignorelist. 8587 assert((getTreeEntry(U) || 8588 (UserIgnoreList && UserIgnoreList->contains(U)) || 8589 (isa_and_nonnull<Instruction>(U) && 8590 isDeleted(cast<Instruction>(U)))) && 8591 "Deleting out-of-tree value"); 8592 } 8593 } 8594 #endif 8595 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 8596 eraseInstruction(cast<Instruction>(Scalar)); 8597 } 8598 } 8599 8600 Builder.ClearInsertionPoint(); 8601 InstrElementSize.clear(); 8602 8603 return VectorizableTree[0]->VectorizedValue; 8604 } 8605 8606 void BoUpSLP::optimizeGatherSequence() { 8607 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherShuffleSeq.size() 8608 << " gather sequences instructions.\n"); 8609 // LICM InsertElementInst sequences. 8610 for (Instruction *I : GatherShuffleSeq) { 8611 if (isDeleted(I)) 8612 continue; 8613 8614 // Check if this block is inside a loop. 8615 Loop *L = LI->getLoopFor(I->getParent()); 8616 if (!L) 8617 continue; 8618 8619 // Check if it has a preheader. 8620 BasicBlock *PreHeader = L->getLoopPreheader(); 8621 if (!PreHeader) 8622 continue; 8623 8624 // If the vector or the element that we insert into it are 8625 // instructions that are defined in this basic block then we can't 8626 // hoist this instruction. 8627 if (any_of(I->operands(), [L](Value *V) { 8628 auto *OpI = dyn_cast<Instruction>(V); 8629 return OpI && L->contains(OpI); 8630 })) 8631 continue; 8632 8633 // We can hoist this instruction. Move it to the pre-header. 8634 I->moveBefore(PreHeader->getTerminator()); 8635 } 8636 8637 // Make a list of all reachable blocks in our CSE queue. 8638 SmallVector<const DomTreeNode *, 8> CSEWorkList; 8639 CSEWorkList.reserve(CSEBlocks.size()); 8640 for (BasicBlock *BB : CSEBlocks) 8641 if (DomTreeNode *N = DT->getNode(BB)) { 8642 assert(DT->isReachableFromEntry(N)); 8643 CSEWorkList.push_back(N); 8644 } 8645 8646 // Sort blocks by domination. This ensures we visit a block after all blocks 8647 // dominating it are visited. 8648 llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) { 8649 assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) && 8650 "Different nodes should have different DFS numbers"); 8651 return A->getDFSNumIn() < B->getDFSNumIn(); 8652 }); 8653 8654 // Less defined shuffles can be replaced by the more defined copies. 8655 // Between two shuffles one is less defined if it has the same vector operands 8656 // and its mask indeces are the same as in the first one or undefs. E.g. 8657 // shuffle %0, poison, <0, 0, 0, undef> is less defined than shuffle %0, 8658 // poison, <0, 0, 0, 0>. 8659 auto &&IsIdenticalOrLessDefined = [this](Instruction *I1, Instruction *I2, 8660 SmallVectorImpl<int> &NewMask) { 8661 if (I1->getType() != I2->getType()) 8662 return false; 8663 auto *SI1 = dyn_cast<ShuffleVectorInst>(I1); 8664 auto *SI2 = dyn_cast<ShuffleVectorInst>(I2); 8665 if (!SI1 || !SI2) 8666 return I1->isIdenticalTo(I2); 8667 if (SI1->isIdenticalTo(SI2)) 8668 return true; 8669 for (int I = 0, E = SI1->getNumOperands(); I < E; ++I) 8670 if (SI1->getOperand(I) != SI2->getOperand(I)) 8671 return false; 8672 // Check if the second instruction is more defined than the first one. 8673 NewMask.assign(SI2->getShuffleMask().begin(), SI2->getShuffleMask().end()); 8674 ArrayRef<int> SM1 = SI1->getShuffleMask(); 8675 // Count trailing undefs in the mask to check the final number of used 8676 // registers. 8677 unsigned LastUndefsCnt = 0; 8678 for (int I = 0, E = NewMask.size(); I < E; ++I) { 8679 if (SM1[I] == UndefMaskElem) 8680 ++LastUndefsCnt; 8681 else 8682 LastUndefsCnt = 0; 8683 if (NewMask[I] != UndefMaskElem && SM1[I] != UndefMaskElem && 8684 NewMask[I] != SM1[I]) 8685 return false; 8686 if (NewMask[I] == UndefMaskElem) 8687 NewMask[I] = SM1[I]; 8688 } 8689 // Check if the last undefs actually change the final number of used vector 8690 // registers. 8691 return SM1.size() - LastUndefsCnt > 1 && 8692 TTI->getNumberOfParts(SI1->getType()) == 8693 TTI->getNumberOfParts( 8694 FixedVectorType::get(SI1->getType()->getElementType(), 8695 SM1.size() - LastUndefsCnt)); 8696 }; 8697 // Perform O(N^2) search over the gather/shuffle sequences and merge identical 8698 // instructions. TODO: We can further optimize this scan if we split the 8699 // instructions into different buckets based on the insert lane. 8700 SmallVector<Instruction *, 16> Visited; 8701 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 8702 assert(*I && 8703 (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 8704 "Worklist not sorted properly!"); 8705 BasicBlock *BB = (*I)->getBlock(); 8706 // For all instructions in blocks containing gather sequences: 8707 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 8708 if (isDeleted(&In)) 8709 continue; 8710 if (!isa<InsertElementInst>(&In) && !isa<ExtractElementInst>(&In) && 8711 !isa<ShuffleVectorInst>(&In) && !GatherShuffleSeq.contains(&In)) 8712 continue; 8713 8714 // Check if we can replace this instruction with any of the 8715 // visited instructions. 8716 bool Replaced = false; 8717 for (Instruction *&V : Visited) { 8718 SmallVector<int> NewMask; 8719 if (IsIdenticalOrLessDefined(&In, V, NewMask) && 8720 DT->dominates(V->getParent(), In.getParent())) { 8721 In.replaceAllUsesWith(V); 8722 eraseInstruction(&In); 8723 if (auto *SI = dyn_cast<ShuffleVectorInst>(V)) 8724 if (!NewMask.empty()) 8725 SI->setShuffleMask(NewMask); 8726 Replaced = true; 8727 break; 8728 } 8729 if (isa<ShuffleVectorInst>(In) && isa<ShuffleVectorInst>(V) && 8730 GatherShuffleSeq.contains(V) && 8731 IsIdenticalOrLessDefined(V, &In, NewMask) && 8732 DT->dominates(In.getParent(), V->getParent())) { 8733 In.moveAfter(V); 8734 V->replaceAllUsesWith(&In); 8735 eraseInstruction(V); 8736 if (auto *SI = dyn_cast<ShuffleVectorInst>(&In)) 8737 if (!NewMask.empty()) 8738 SI->setShuffleMask(NewMask); 8739 V = &In; 8740 Replaced = true; 8741 break; 8742 } 8743 } 8744 if (!Replaced) { 8745 assert(!is_contained(Visited, &In)); 8746 Visited.push_back(&In); 8747 } 8748 } 8749 } 8750 CSEBlocks.clear(); 8751 GatherShuffleSeq.clear(); 8752 } 8753 8754 BoUpSLP::ScheduleData * 8755 BoUpSLP::BlockScheduling::buildBundle(ArrayRef<Value *> VL) { 8756 ScheduleData *Bundle = nullptr; 8757 ScheduleData *PrevInBundle = nullptr; 8758 for (Value *V : VL) { 8759 if (doesNotNeedToBeScheduled(V)) 8760 continue; 8761 ScheduleData *BundleMember = getScheduleData(V); 8762 assert(BundleMember && 8763 "no ScheduleData for bundle member " 8764 "(maybe not in same basic block)"); 8765 assert(BundleMember->isSchedulingEntity() && 8766 "bundle member already part of other bundle"); 8767 if (PrevInBundle) { 8768 PrevInBundle->NextInBundle = BundleMember; 8769 } else { 8770 Bundle = BundleMember; 8771 } 8772 8773 // Group the instructions to a bundle. 8774 BundleMember->FirstInBundle = Bundle; 8775 PrevInBundle = BundleMember; 8776 } 8777 assert(Bundle && "Failed to find schedule bundle"); 8778 return Bundle; 8779 } 8780 8781 // Groups the instructions to a bundle (which is then a single scheduling entity) 8782 // and schedules instructions until the bundle gets ready. 8783 Optional<BoUpSLP::ScheduleData *> 8784 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 8785 const InstructionsState &S) { 8786 // No need to schedule PHIs, insertelement, extractelement and extractvalue 8787 // instructions. 8788 if (isa<PHINode>(S.OpValue) || isVectorLikeInstWithConstOps(S.OpValue) || 8789 doesNotNeedToSchedule(VL)) 8790 return nullptr; 8791 8792 // Initialize the instruction bundle. 8793 Instruction *OldScheduleEnd = ScheduleEnd; 8794 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n"); 8795 8796 auto TryScheduleBundleImpl = [this, OldScheduleEnd, SLP](bool ReSchedule, 8797 ScheduleData *Bundle) { 8798 // The scheduling region got new instructions at the lower end (or it is a 8799 // new region for the first bundle). This makes it necessary to 8800 // recalculate all dependencies. 8801 // It is seldom that this needs to be done a second time after adding the 8802 // initial bundle to the region. 8803 if (ScheduleEnd != OldScheduleEnd) { 8804 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) 8805 doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); }); 8806 ReSchedule = true; 8807 } 8808 if (Bundle) { 8809 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle 8810 << " in block " << BB->getName() << "\n"); 8811 calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP); 8812 } 8813 8814 if (ReSchedule) { 8815 resetSchedule(); 8816 initialFillReadyList(ReadyInsts); 8817 } 8818 8819 // Now try to schedule the new bundle or (if no bundle) just calculate 8820 // dependencies. As soon as the bundle is "ready" it means that there are no 8821 // cyclic dependencies and we can schedule it. Note that's important that we 8822 // don't "schedule" the bundle yet (see cancelScheduling). 8823 while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) && 8824 !ReadyInsts.empty()) { 8825 ScheduleData *Picked = ReadyInsts.pop_back_val(); 8826 assert(Picked->isSchedulingEntity() && Picked->isReady() && 8827 "must be ready to schedule"); 8828 schedule(Picked, ReadyInsts); 8829 } 8830 }; 8831 8832 // Make sure that the scheduling region contains all 8833 // instructions of the bundle. 8834 for (Value *V : VL) { 8835 if (doesNotNeedToBeScheduled(V)) 8836 continue; 8837 if (!extendSchedulingRegion(V, S)) { 8838 // If the scheduling region got new instructions at the lower end (or it 8839 // is a new region for the first bundle). This makes it necessary to 8840 // recalculate all dependencies. 8841 // Otherwise the compiler may crash trying to incorrectly calculate 8842 // dependencies and emit instruction in the wrong order at the actual 8843 // scheduling. 8844 TryScheduleBundleImpl(/*ReSchedule=*/false, nullptr); 8845 return None; 8846 } 8847 } 8848 8849 bool ReSchedule = false; 8850 for (Value *V : VL) { 8851 if (doesNotNeedToBeScheduled(V)) 8852 continue; 8853 ScheduleData *BundleMember = getScheduleData(V); 8854 assert(BundleMember && 8855 "no ScheduleData for bundle member (maybe not in same basic block)"); 8856 8857 // Make sure we don't leave the pieces of the bundle in the ready list when 8858 // whole bundle might not be ready. 8859 ReadyInsts.remove(BundleMember); 8860 8861 if (!BundleMember->IsScheduled) 8862 continue; 8863 // A bundle member was scheduled as single instruction before and now 8864 // needs to be scheduled as part of the bundle. We just get rid of the 8865 // existing schedule. 8866 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 8867 << " was already scheduled\n"); 8868 ReSchedule = true; 8869 } 8870 8871 auto *Bundle = buildBundle(VL); 8872 TryScheduleBundleImpl(ReSchedule, Bundle); 8873 if (!Bundle->isReady()) { 8874 cancelScheduling(VL, S.OpValue); 8875 return None; 8876 } 8877 return Bundle; 8878 } 8879 8880 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 8881 Value *OpValue) { 8882 if (isa<PHINode>(OpValue) || isVectorLikeInstWithConstOps(OpValue) || 8883 doesNotNeedToSchedule(VL)) 8884 return; 8885 8886 if (doesNotNeedToBeScheduled(OpValue)) 8887 OpValue = *find_if_not(VL, doesNotNeedToBeScheduled); 8888 ScheduleData *Bundle = getScheduleData(OpValue); 8889 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 8890 assert(!Bundle->IsScheduled && 8891 "Can't cancel bundle which is already scheduled"); 8892 assert(Bundle->isSchedulingEntity() && 8893 (Bundle->isPartOfBundle() || needToScheduleSingleInstruction(VL)) && 8894 "tried to unbundle something which is not a bundle"); 8895 8896 // Remove the bundle from the ready list. 8897 if (Bundle->isReady()) 8898 ReadyInsts.remove(Bundle); 8899 8900 // Un-bundle: make single instructions out of the bundle. 8901 ScheduleData *BundleMember = Bundle; 8902 while (BundleMember) { 8903 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 8904 BundleMember->FirstInBundle = BundleMember; 8905 ScheduleData *Next = BundleMember->NextInBundle; 8906 BundleMember->NextInBundle = nullptr; 8907 BundleMember->TE = nullptr; 8908 if (BundleMember->unscheduledDepsInBundle() == 0) { 8909 ReadyInsts.insert(BundleMember); 8910 } 8911 BundleMember = Next; 8912 } 8913 } 8914 8915 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 8916 // Allocate a new ScheduleData for the instruction. 8917 if (ChunkPos >= ChunkSize) { 8918 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize)); 8919 ChunkPos = 0; 8920 } 8921 return &(ScheduleDataChunks.back()[ChunkPos++]); 8922 } 8923 8924 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 8925 const InstructionsState &S) { 8926 if (getScheduleData(V, isOneOf(S, V))) 8927 return true; 8928 Instruction *I = dyn_cast<Instruction>(V); 8929 assert(I && "bundle member must be an instruction"); 8930 assert(!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) && 8931 !doesNotNeedToBeScheduled(I) && 8932 "phi nodes/insertelements/extractelements/extractvalues don't need to " 8933 "be scheduled"); 8934 auto &&CheckScheduleForI = [this, &S](Instruction *I) -> bool { 8935 ScheduleData *ISD = getScheduleData(I); 8936 if (!ISD) 8937 return false; 8938 assert(isInSchedulingRegion(ISD) && 8939 "ScheduleData not in scheduling region"); 8940 ScheduleData *SD = allocateScheduleDataChunks(); 8941 SD->Inst = I; 8942 SD->init(SchedulingRegionID, S.OpValue); 8943 ExtraScheduleDataMap[I][S.OpValue] = SD; 8944 return true; 8945 }; 8946 if (CheckScheduleForI(I)) 8947 return true; 8948 if (!ScheduleStart) { 8949 // It's the first instruction in the new region. 8950 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 8951 ScheduleStart = I; 8952 ScheduleEnd = I->getNextNode(); 8953 if (isOneOf(S, I) != I) 8954 CheckScheduleForI(I); 8955 assert(ScheduleEnd && "tried to vectorize a terminator?"); 8956 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 8957 return true; 8958 } 8959 // Search up and down at the same time, because we don't know if the new 8960 // instruction is above or below the existing scheduling region. 8961 BasicBlock::reverse_iterator UpIter = 8962 ++ScheduleStart->getIterator().getReverse(); 8963 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 8964 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 8965 BasicBlock::iterator LowerEnd = BB->end(); 8966 while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I && 8967 &*DownIter != I) { 8968 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 8969 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 8970 return false; 8971 } 8972 8973 ++UpIter; 8974 ++DownIter; 8975 } 8976 if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) { 8977 assert(I->getParent() == ScheduleStart->getParent() && 8978 "Instruction is in wrong basic block."); 8979 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 8980 ScheduleStart = I; 8981 if (isOneOf(S, I) != I) 8982 CheckScheduleForI(I); 8983 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I 8984 << "\n"); 8985 return true; 8986 } 8987 assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && 8988 "Expected to reach top of the basic block or instruction down the " 8989 "lower end."); 8990 assert(I->getParent() == ScheduleEnd->getParent() && 8991 "Instruction is in wrong basic block."); 8992 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 8993 nullptr); 8994 ScheduleEnd = I->getNextNode(); 8995 if (isOneOf(S, I) != I) 8996 CheckScheduleForI(I); 8997 assert(ScheduleEnd && "tried to vectorize a terminator?"); 8998 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 8999 return true; 9000 } 9001 9002 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 9003 Instruction *ToI, 9004 ScheduleData *PrevLoadStore, 9005 ScheduleData *NextLoadStore) { 9006 ScheduleData *CurrentLoadStore = PrevLoadStore; 9007 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 9008 // No need to allocate data for non-schedulable instructions. 9009 if (doesNotNeedToBeScheduled(I)) 9010 continue; 9011 ScheduleData *SD = ScheduleDataMap.lookup(I); 9012 if (!SD) { 9013 SD = allocateScheduleDataChunks(); 9014 ScheduleDataMap[I] = SD; 9015 SD->Inst = I; 9016 } 9017 assert(!isInSchedulingRegion(SD) && 9018 "new ScheduleData already in scheduling region"); 9019 SD->init(SchedulingRegionID, I); 9020 9021 if (I->mayReadOrWriteMemory() && 9022 (!isa<IntrinsicInst>(I) || 9023 (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect && 9024 cast<IntrinsicInst>(I)->getIntrinsicID() != 9025 Intrinsic::pseudoprobe))) { 9026 // Update the linked list of memory accessing instructions. 9027 if (CurrentLoadStore) { 9028 CurrentLoadStore->NextLoadStore = SD; 9029 } else { 9030 FirstLoadStoreInRegion = SD; 9031 } 9032 CurrentLoadStore = SD; 9033 } 9034 9035 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) || 9036 match(I, m_Intrinsic<Intrinsic::stackrestore>())) 9037 RegionHasStackSave = true; 9038 } 9039 if (NextLoadStore) { 9040 if (CurrentLoadStore) 9041 CurrentLoadStore->NextLoadStore = NextLoadStore; 9042 } else { 9043 LastLoadStoreInRegion = CurrentLoadStore; 9044 } 9045 } 9046 9047 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 9048 bool InsertInReadyList, 9049 BoUpSLP *SLP) { 9050 assert(SD->isSchedulingEntity()); 9051 9052 SmallVector<ScheduleData *, 10> WorkList; 9053 WorkList.push_back(SD); 9054 9055 while (!WorkList.empty()) { 9056 ScheduleData *SD = WorkList.pop_back_val(); 9057 for (ScheduleData *BundleMember = SD; BundleMember; 9058 BundleMember = BundleMember->NextInBundle) { 9059 assert(isInSchedulingRegion(BundleMember)); 9060 if (BundleMember->hasValidDependencies()) 9061 continue; 9062 9063 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember 9064 << "\n"); 9065 BundleMember->Dependencies = 0; 9066 BundleMember->resetUnscheduledDeps(); 9067 9068 // Handle def-use chain dependencies. 9069 if (BundleMember->OpValue != BundleMember->Inst) { 9070 if (ScheduleData *UseSD = getScheduleData(BundleMember->Inst)) { 9071 BundleMember->Dependencies++; 9072 ScheduleData *DestBundle = UseSD->FirstInBundle; 9073 if (!DestBundle->IsScheduled) 9074 BundleMember->incrementUnscheduledDeps(1); 9075 if (!DestBundle->hasValidDependencies()) 9076 WorkList.push_back(DestBundle); 9077 } 9078 } else { 9079 for (User *U : BundleMember->Inst->users()) { 9080 if (ScheduleData *UseSD = getScheduleData(cast<Instruction>(U))) { 9081 BundleMember->Dependencies++; 9082 ScheduleData *DestBundle = UseSD->FirstInBundle; 9083 if (!DestBundle->IsScheduled) 9084 BundleMember->incrementUnscheduledDeps(1); 9085 if (!DestBundle->hasValidDependencies()) 9086 WorkList.push_back(DestBundle); 9087 } 9088 } 9089 } 9090 9091 auto makeControlDependent = [&](Instruction *I) { 9092 auto *DepDest = getScheduleData(I); 9093 assert(DepDest && "must be in schedule window"); 9094 DepDest->ControlDependencies.push_back(BundleMember); 9095 BundleMember->Dependencies++; 9096 ScheduleData *DestBundle = DepDest->FirstInBundle; 9097 if (!DestBundle->IsScheduled) 9098 BundleMember->incrementUnscheduledDeps(1); 9099 if (!DestBundle->hasValidDependencies()) 9100 WorkList.push_back(DestBundle); 9101 }; 9102 9103 // Any instruction which isn't safe to speculate at the begining of the 9104 // block is control dependend on any early exit or non-willreturn call 9105 // which proceeds it. 9106 if (!isGuaranteedToTransferExecutionToSuccessor(BundleMember->Inst)) { 9107 for (Instruction *I = BundleMember->Inst->getNextNode(); 9108 I != ScheduleEnd; I = I->getNextNode()) { 9109 if (isSafeToSpeculativelyExecute(I, &*BB->begin())) 9110 continue; 9111 9112 // Add the dependency 9113 makeControlDependent(I); 9114 9115 if (!isGuaranteedToTransferExecutionToSuccessor(I)) 9116 // Everything past here must be control dependent on I. 9117 break; 9118 } 9119 } 9120 9121 if (RegionHasStackSave) { 9122 // If we have an inalloc alloca instruction, it needs to be scheduled 9123 // after any preceeding stacksave. We also need to prevent any alloca 9124 // from reordering above a preceeding stackrestore. 9125 if (match(BundleMember->Inst, m_Intrinsic<Intrinsic::stacksave>()) || 9126 match(BundleMember->Inst, m_Intrinsic<Intrinsic::stackrestore>())) { 9127 for (Instruction *I = BundleMember->Inst->getNextNode(); 9128 I != ScheduleEnd; I = I->getNextNode()) { 9129 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) || 9130 match(I, m_Intrinsic<Intrinsic::stackrestore>())) 9131 // Any allocas past here must be control dependent on I, and I 9132 // must be memory dependend on BundleMember->Inst. 9133 break; 9134 9135 if (!isa<AllocaInst>(I)) 9136 continue; 9137 9138 // Add the dependency 9139 makeControlDependent(I); 9140 } 9141 } 9142 9143 // In addition to the cases handle just above, we need to prevent 9144 // allocas from moving below a stacksave. The stackrestore case 9145 // is currently thought to be conservatism. 9146 if (isa<AllocaInst>(BundleMember->Inst)) { 9147 for (Instruction *I = BundleMember->Inst->getNextNode(); 9148 I != ScheduleEnd; I = I->getNextNode()) { 9149 if (!match(I, m_Intrinsic<Intrinsic::stacksave>()) && 9150 !match(I, m_Intrinsic<Intrinsic::stackrestore>())) 9151 continue; 9152 9153 // Add the dependency 9154 makeControlDependent(I); 9155 break; 9156 } 9157 } 9158 } 9159 9160 // Handle the memory dependencies (if any). 9161 ScheduleData *DepDest = BundleMember->NextLoadStore; 9162 if (!DepDest) 9163 continue; 9164 Instruction *SrcInst = BundleMember->Inst; 9165 assert(SrcInst->mayReadOrWriteMemory() && 9166 "NextLoadStore list for non memory effecting bundle?"); 9167 MemoryLocation SrcLoc = getLocation(SrcInst); 9168 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 9169 unsigned numAliased = 0; 9170 unsigned DistToSrc = 1; 9171 9172 for ( ; DepDest; DepDest = DepDest->NextLoadStore) { 9173 assert(isInSchedulingRegion(DepDest)); 9174 9175 // We have two limits to reduce the complexity: 9176 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 9177 // SLP->isAliased (which is the expensive part in this loop). 9178 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 9179 // the whole loop (even if the loop is fast, it's quadratic). 9180 // It's important for the loop break condition (see below) to 9181 // check this limit even between two read-only instructions. 9182 if (DistToSrc >= MaxMemDepDistance || 9183 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 9184 (numAliased >= AliasedCheckLimit || 9185 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 9186 9187 // We increment the counter only if the locations are aliased 9188 // (instead of counting all alias checks). This gives a better 9189 // balance between reduced runtime and accurate dependencies. 9190 numAliased++; 9191 9192 DepDest->MemoryDependencies.push_back(BundleMember); 9193 BundleMember->Dependencies++; 9194 ScheduleData *DestBundle = DepDest->FirstInBundle; 9195 if (!DestBundle->IsScheduled) { 9196 BundleMember->incrementUnscheduledDeps(1); 9197 } 9198 if (!DestBundle->hasValidDependencies()) { 9199 WorkList.push_back(DestBundle); 9200 } 9201 } 9202 9203 // Example, explaining the loop break condition: Let's assume our 9204 // starting instruction is i0 and MaxMemDepDistance = 3. 9205 // 9206 // +--------v--v--v 9207 // i0,i1,i2,i3,i4,i5,i6,i7,i8 9208 // +--------^--^--^ 9209 // 9210 // MaxMemDepDistance let us stop alias-checking at i3 and we add 9211 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 9212 // Previously we already added dependencies from i3 to i6,i7,i8 9213 // (because of MaxMemDepDistance). As we added a dependency from 9214 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 9215 // and we can abort this loop at i6. 9216 if (DistToSrc >= 2 * MaxMemDepDistance) 9217 break; 9218 DistToSrc++; 9219 } 9220 } 9221 if (InsertInReadyList && SD->isReady()) { 9222 ReadyInsts.insert(SD); 9223 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst 9224 << "\n"); 9225 } 9226 } 9227 } 9228 9229 void BoUpSLP::BlockScheduling::resetSchedule() { 9230 assert(ScheduleStart && 9231 "tried to reset schedule on block which has not been scheduled"); 9232 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 9233 doForAllOpcodes(I, [&](ScheduleData *SD) { 9234 assert(isInSchedulingRegion(SD) && 9235 "ScheduleData not in scheduling region"); 9236 SD->IsScheduled = false; 9237 SD->resetUnscheduledDeps(); 9238 }); 9239 } 9240 ReadyInsts.clear(); 9241 } 9242 9243 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 9244 if (!BS->ScheduleStart) 9245 return; 9246 9247 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 9248 9249 // A key point - if we got here, pre-scheduling was able to find a valid 9250 // scheduling of the sub-graph of the scheduling window which consists 9251 // of all vector bundles and their transitive users. As such, we do not 9252 // need to reschedule anything *outside of* that subgraph. 9253 9254 BS->resetSchedule(); 9255 9256 // For the real scheduling we use a more sophisticated ready-list: it is 9257 // sorted by the original instruction location. This lets the final schedule 9258 // be as close as possible to the original instruction order. 9259 // WARNING: If changing this order causes a correctness issue, that means 9260 // there is some missing dependence edge in the schedule data graph. 9261 struct ScheduleDataCompare { 9262 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 9263 return SD2->SchedulingPriority < SD1->SchedulingPriority; 9264 } 9265 }; 9266 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 9267 9268 // Ensure that all dependency data is updated (for nodes in the sub-graph) 9269 // and fill the ready-list with initial instructions. 9270 int Idx = 0; 9271 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 9272 I = I->getNextNode()) { 9273 BS->doForAllOpcodes(I, [this, &Idx, BS](ScheduleData *SD) { 9274 TreeEntry *SDTE = getTreeEntry(SD->Inst); 9275 (void)SDTE; 9276 assert((isVectorLikeInstWithConstOps(SD->Inst) || 9277 SD->isPartOfBundle() == 9278 (SDTE && !doesNotNeedToSchedule(SDTE->Scalars))) && 9279 "scheduler and vectorizer bundle mismatch"); 9280 SD->FirstInBundle->SchedulingPriority = Idx++; 9281 9282 if (SD->isSchedulingEntity() && SD->isPartOfBundle()) 9283 BS->calculateDependencies(SD, false, this); 9284 }); 9285 } 9286 BS->initialFillReadyList(ReadyInsts); 9287 9288 Instruction *LastScheduledInst = BS->ScheduleEnd; 9289 9290 // Do the "real" scheduling. 9291 while (!ReadyInsts.empty()) { 9292 ScheduleData *picked = *ReadyInsts.begin(); 9293 ReadyInsts.erase(ReadyInsts.begin()); 9294 9295 // Move the scheduled instruction(s) to their dedicated places, if not 9296 // there yet. 9297 for (ScheduleData *BundleMember = picked; BundleMember; 9298 BundleMember = BundleMember->NextInBundle) { 9299 Instruction *pickedInst = BundleMember->Inst; 9300 if (pickedInst->getNextNode() != LastScheduledInst) 9301 pickedInst->moveBefore(LastScheduledInst); 9302 LastScheduledInst = pickedInst; 9303 } 9304 9305 BS->schedule(picked, ReadyInsts); 9306 } 9307 9308 // Check that we didn't break any of our invariants. 9309 #ifdef EXPENSIVE_CHECKS 9310 BS->verify(); 9311 #endif 9312 9313 #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS) 9314 // Check that all schedulable entities got scheduled 9315 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; I = I->getNextNode()) { 9316 BS->doForAllOpcodes(I, [&](ScheduleData *SD) { 9317 if (SD->isSchedulingEntity() && SD->hasValidDependencies()) { 9318 assert(SD->IsScheduled && "must be scheduled at this point"); 9319 } 9320 }); 9321 } 9322 #endif 9323 9324 // Avoid duplicate scheduling of the block. 9325 BS->ScheduleStart = nullptr; 9326 } 9327 9328 unsigned BoUpSLP::getVectorElementSize(Value *V) { 9329 // If V is a store, just return the width of the stored value (or value 9330 // truncated just before storing) without traversing the expression tree. 9331 // This is the common case. 9332 if (auto *Store = dyn_cast<StoreInst>(V)) 9333 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 9334 9335 if (auto *IEI = dyn_cast<InsertElementInst>(V)) 9336 return getVectorElementSize(IEI->getOperand(1)); 9337 9338 auto E = InstrElementSize.find(V); 9339 if (E != InstrElementSize.end()) 9340 return E->second; 9341 9342 // If V is not a store, we can traverse the expression tree to find loads 9343 // that feed it. The type of the loaded value may indicate a more suitable 9344 // width than V's type. We want to base the vector element size on the width 9345 // of memory operations where possible. 9346 SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist; 9347 SmallPtrSet<Instruction *, 16> Visited; 9348 if (auto *I = dyn_cast<Instruction>(V)) { 9349 Worklist.emplace_back(I, I->getParent()); 9350 Visited.insert(I); 9351 } 9352 9353 // Traverse the expression tree in bottom-up order looking for loads. If we 9354 // encounter an instruction we don't yet handle, we give up. 9355 auto Width = 0u; 9356 while (!Worklist.empty()) { 9357 Instruction *I; 9358 BasicBlock *Parent; 9359 std::tie(I, Parent) = Worklist.pop_back_val(); 9360 9361 // We should only be looking at scalar instructions here. If the current 9362 // instruction has a vector type, skip. 9363 auto *Ty = I->getType(); 9364 if (isa<VectorType>(Ty)) 9365 continue; 9366 9367 // If the current instruction is a load, update MaxWidth to reflect the 9368 // width of the loaded value. 9369 if (isa<LoadInst>(I) || isa<ExtractElementInst>(I) || 9370 isa<ExtractValueInst>(I)) 9371 Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty)); 9372 9373 // Otherwise, we need to visit the operands of the instruction. We only 9374 // handle the interesting cases from buildTree here. If an operand is an 9375 // instruction we haven't yet visited and from the same basic block as the 9376 // user or the use is a PHI node, we add it to the worklist. 9377 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 9378 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I) || 9379 isa<UnaryOperator>(I)) { 9380 for (Use &U : I->operands()) 9381 if (auto *J = dyn_cast<Instruction>(U.get())) 9382 if (Visited.insert(J).second && 9383 (isa<PHINode>(I) || J->getParent() == Parent)) 9384 Worklist.emplace_back(J, J->getParent()); 9385 } else { 9386 break; 9387 } 9388 } 9389 9390 // If we didn't encounter a memory access in the expression tree, or if we 9391 // gave up for some reason, just return the width of V. Otherwise, return the 9392 // maximum width we found. 9393 if (!Width) { 9394 if (auto *CI = dyn_cast<CmpInst>(V)) 9395 V = CI->getOperand(0); 9396 Width = DL->getTypeSizeInBits(V->getType()); 9397 } 9398 9399 for (Instruction *I : Visited) 9400 InstrElementSize[I] = Width; 9401 9402 return Width; 9403 } 9404 9405 // Determine if a value V in a vectorizable expression Expr can be demoted to a 9406 // smaller type with a truncation. We collect the values that will be demoted 9407 // in ToDemote and additional roots that require investigating in Roots. 9408 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, 9409 SmallVectorImpl<Value *> &ToDemote, 9410 SmallVectorImpl<Value *> &Roots) { 9411 // We can always demote constants. 9412 if (isa<Constant>(V)) { 9413 ToDemote.push_back(V); 9414 return true; 9415 } 9416 9417 // If the value is not an instruction in the expression with only one use, it 9418 // cannot be demoted. 9419 auto *I = dyn_cast<Instruction>(V); 9420 if (!I || !I->hasOneUse() || !Expr.count(I)) 9421 return false; 9422 9423 switch (I->getOpcode()) { 9424 9425 // We can always demote truncations and extensions. Since truncations can 9426 // seed additional demotion, we save the truncated value. 9427 case Instruction::Trunc: 9428 Roots.push_back(I->getOperand(0)); 9429 break; 9430 case Instruction::ZExt: 9431 case Instruction::SExt: 9432 if (isa<ExtractElementInst>(I->getOperand(0)) || 9433 isa<InsertElementInst>(I->getOperand(0))) 9434 return false; 9435 break; 9436 9437 // We can demote certain binary operations if we can demote both of their 9438 // operands. 9439 case Instruction::Add: 9440 case Instruction::Sub: 9441 case Instruction::Mul: 9442 case Instruction::And: 9443 case Instruction::Or: 9444 case Instruction::Xor: 9445 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || 9446 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) 9447 return false; 9448 break; 9449 9450 // We can demote selects if we can demote their true and false values. 9451 case Instruction::Select: { 9452 SelectInst *SI = cast<SelectInst>(I); 9453 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || 9454 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) 9455 return false; 9456 break; 9457 } 9458 9459 // We can demote phis if we can demote all their incoming operands. Note that 9460 // we don't need to worry about cycles since we ensure single use above. 9461 case Instruction::PHI: { 9462 PHINode *PN = cast<PHINode>(I); 9463 for (Value *IncValue : PN->incoming_values()) 9464 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) 9465 return false; 9466 break; 9467 } 9468 9469 // Otherwise, conservatively give up. 9470 default: 9471 return false; 9472 } 9473 9474 // Record the value that we can demote. 9475 ToDemote.push_back(V); 9476 return true; 9477 } 9478 9479 void BoUpSLP::computeMinimumValueSizes() { 9480 // If there are no external uses, the expression tree must be rooted by a 9481 // store. We can't demote in-memory values, so there is nothing to do here. 9482 if (ExternalUses.empty()) 9483 return; 9484 9485 // We only attempt to truncate integer expressions. 9486 auto &TreeRoot = VectorizableTree[0]->Scalars; 9487 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 9488 if (!TreeRootIT) 9489 return; 9490 9491 // If the expression is not rooted by a store, these roots should have 9492 // external uses. We will rely on InstCombine to rewrite the expression in 9493 // the narrower type. However, InstCombine only rewrites single-use values. 9494 // This means that if a tree entry other than a root is used externally, it 9495 // must have multiple uses and InstCombine will not rewrite it. The code 9496 // below ensures that only the roots are used externally. 9497 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); 9498 for (auto &EU : ExternalUses) 9499 if (!Expr.erase(EU.Scalar)) 9500 return; 9501 if (!Expr.empty()) 9502 return; 9503 9504 // Collect the scalar values of the vectorizable expression. We will use this 9505 // context to determine which values can be demoted. If we see a truncation, 9506 // we mark it as seeding another demotion. 9507 for (auto &EntryPtr : VectorizableTree) 9508 Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end()); 9509 9510 // Ensure the roots of the vectorizable tree don't form a cycle. They must 9511 // have a single external user that is not in the vectorizable tree. 9512 for (auto *Root : TreeRoot) 9513 if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) 9514 return; 9515 9516 // Conservatively determine if we can actually truncate the roots of the 9517 // expression. Collect the values that can be demoted in ToDemote and 9518 // additional roots that require investigating in Roots. 9519 SmallVector<Value *, 32> ToDemote; 9520 SmallVector<Value *, 4> Roots; 9521 for (auto *Root : TreeRoot) 9522 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) 9523 return; 9524 9525 // The maximum bit width required to represent all the values that can be 9526 // demoted without loss of precision. It would be safe to truncate the roots 9527 // of the expression to this width. 9528 auto MaxBitWidth = 8u; 9529 9530 // We first check if all the bits of the roots are demanded. If they're not, 9531 // we can truncate the roots to this narrower type. 9532 for (auto *Root : TreeRoot) { 9533 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 9534 MaxBitWidth = std::max<unsigned>( 9535 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); 9536 } 9537 9538 // True if the roots can be zero-extended back to their original type, rather 9539 // than sign-extended. We know that if the leading bits are not demanded, we 9540 // can safely zero-extend. So we initialize IsKnownPositive to True. 9541 bool IsKnownPositive = true; 9542 9543 // If all the bits of the roots are demanded, we can try a little harder to 9544 // compute a narrower type. This can happen, for example, if the roots are 9545 // getelementptr indices. InstCombine promotes these indices to the pointer 9546 // width. Thus, all their bits are technically demanded even though the 9547 // address computation might be vectorized in a smaller type. 9548 // 9549 // We start by looking at each entry that can be demoted. We compute the 9550 // maximum bit width required to store the scalar by using ValueTracking to 9551 // compute the number of high-order bits we can truncate. 9552 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) && 9553 llvm::all_of(TreeRoot, [](Value *R) { 9554 assert(R->hasOneUse() && "Root should have only one use!"); 9555 return isa<GetElementPtrInst>(R->user_back()); 9556 })) { 9557 MaxBitWidth = 8u; 9558 9559 // Determine if the sign bit of all the roots is known to be zero. If not, 9560 // IsKnownPositive is set to False. 9561 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { 9562 KnownBits Known = computeKnownBits(R, *DL); 9563 return Known.isNonNegative(); 9564 }); 9565 9566 // Determine the maximum number of bits required to store the scalar 9567 // values. 9568 for (auto *Scalar : ToDemote) { 9569 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); 9570 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 9571 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 9572 } 9573 9574 // If we can't prove that the sign bit is zero, we must add one to the 9575 // maximum bit width to account for the unknown sign bit. This preserves 9576 // the existing sign bit so we can safely sign-extend the root back to the 9577 // original type. Otherwise, if we know the sign bit is zero, we will 9578 // zero-extend the root instead. 9579 // 9580 // FIXME: This is somewhat suboptimal, as there will be cases where adding 9581 // one to the maximum bit width will yield a larger-than-necessary 9582 // type. In general, we need to add an extra bit only if we can't 9583 // prove that the upper bit of the original type is equal to the 9584 // upper bit of the proposed smaller type. If these two bits are the 9585 // same (either zero or one) we know that sign-extending from the 9586 // smaller type will result in the same value. Here, since we can't 9587 // yet prove this, we are just making the proposed smaller type 9588 // larger to ensure correctness. 9589 if (!IsKnownPositive) 9590 ++MaxBitWidth; 9591 } 9592 9593 // Round MaxBitWidth up to the next power-of-two. 9594 if (!isPowerOf2_64(MaxBitWidth)) 9595 MaxBitWidth = NextPowerOf2(MaxBitWidth); 9596 9597 // If the maximum bit width we compute is less than the with of the roots' 9598 // type, we can proceed with the narrowing. Otherwise, do nothing. 9599 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 9600 return; 9601 9602 // If we can truncate the root, we must collect additional values that might 9603 // be demoted as a result. That is, those seeded by truncations we will 9604 // modify. 9605 while (!Roots.empty()) 9606 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); 9607 9608 // Finally, map the values we can demote to the maximum bit with we computed. 9609 for (auto *Scalar : ToDemote) 9610 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive); 9611 } 9612 9613 namespace { 9614 9615 /// The SLPVectorizer Pass. 9616 struct SLPVectorizer : public FunctionPass { 9617 SLPVectorizerPass Impl; 9618 9619 /// Pass identification, replacement for typeid 9620 static char ID; 9621 9622 explicit SLPVectorizer() : FunctionPass(ID) { 9623 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 9624 } 9625 9626 bool doInitialization(Module &M) override { return false; } 9627 9628 bool runOnFunction(Function &F) override { 9629 if (skipFunction(F)) 9630 return false; 9631 9632 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 9633 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 9634 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 9635 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 9636 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 9637 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 9638 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 9639 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 9640 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 9641 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 9642 9643 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 9644 } 9645 9646 void getAnalysisUsage(AnalysisUsage &AU) const override { 9647 FunctionPass::getAnalysisUsage(AU); 9648 AU.addRequired<AssumptionCacheTracker>(); 9649 AU.addRequired<ScalarEvolutionWrapperPass>(); 9650 AU.addRequired<AAResultsWrapperPass>(); 9651 AU.addRequired<TargetTransformInfoWrapperPass>(); 9652 AU.addRequired<LoopInfoWrapperPass>(); 9653 AU.addRequired<DominatorTreeWrapperPass>(); 9654 AU.addRequired<DemandedBitsWrapperPass>(); 9655 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 9656 AU.addRequired<InjectTLIMappingsLegacy>(); 9657 AU.addPreserved<LoopInfoWrapperPass>(); 9658 AU.addPreserved<DominatorTreeWrapperPass>(); 9659 AU.addPreserved<AAResultsWrapperPass>(); 9660 AU.addPreserved<GlobalsAAWrapperPass>(); 9661 AU.setPreservesCFG(); 9662 } 9663 }; 9664 9665 } // end anonymous namespace 9666 9667 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 9668 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 9669 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 9670 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 9671 auto *AA = &AM.getResult<AAManager>(F); 9672 auto *LI = &AM.getResult<LoopAnalysis>(F); 9673 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 9674 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 9675 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 9676 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 9677 9678 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 9679 if (!Changed) 9680 return PreservedAnalyses::all(); 9681 9682 PreservedAnalyses PA; 9683 PA.preserveSet<CFGAnalyses>(); 9684 return PA; 9685 } 9686 9687 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 9688 TargetTransformInfo *TTI_, 9689 TargetLibraryInfo *TLI_, AAResults *AA_, 9690 LoopInfo *LI_, DominatorTree *DT_, 9691 AssumptionCache *AC_, DemandedBits *DB_, 9692 OptimizationRemarkEmitter *ORE_) { 9693 if (!RunSLPVectorization) 9694 return false; 9695 SE = SE_; 9696 TTI = TTI_; 9697 TLI = TLI_; 9698 AA = AA_; 9699 LI = LI_; 9700 DT = DT_; 9701 AC = AC_; 9702 DB = DB_; 9703 DL = &F.getParent()->getDataLayout(); 9704 9705 Stores.clear(); 9706 GEPs.clear(); 9707 bool Changed = false; 9708 9709 // If the target claims to have no vector registers don't attempt 9710 // vectorization. 9711 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) { 9712 LLVM_DEBUG( 9713 dbgs() << "SLP: Didn't find any vector registers for target, abort.\n"); 9714 return false; 9715 } 9716 9717 // Don't vectorize when the attribute NoImplicitFloat is used. 9718 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 9719 return false; 9720 9721 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 9722 9723 // Use the bottom up slp vectorizer to construct chains that start with 9724 // store instructions. 9725 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 9726 9727 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 9728 // delete instructions. 9729 9730 // Update DFS numbers now so that we can use them for ordering. 9731 DT->updateDFSNumbers(); 9732 9733 // Scan the blocks in the function in post order. 9734 for (auto BB : post_order(&F.getEntryBlock())) { 9735 // Start new block - clear the list of reduction roots. 9736 R.clearReductionData(); 9737 collectSeedInstructions(BB); 9738 9739 // Vectorize trees that end at stores. 9740 if (!Stores.empty()) { 9741 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 9742 << " underlying objects.\n"); 9743 Changed |= vectorizeStoreChains(R); 9744 } 9745 9746 // Vectorize trees that end at reductions. 9747 Changed |= vectorizeChainsInBlock(BB, R); 9748 9749 // Vectorize the index computations of getelementptr instructions. This 9750 // is primarily intended to catch gather-like idioms ending at 9751 // non-consecutive loads. 9752 if (!GEPs.empty()) { 9753 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 9754 << " underlying objects.\n"); 9755 Changed |= vectorizeGEPIndices(BB, R); 9756 } 9757 } 9758 9759 if (Changed) { 9760 R.optimizeGatherSequence(); 9761 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 9762 } 9763 return Changed; 9764 } 9765 9766 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 9767 unsigned Idx, unsigned MinVF) { 9768 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size() 9769 << "\n"); 9770 const unsigned Sz = R.getVectorElementSize(Chain[0]); 9771 unsigned VF = Chain.size(); 9772 9773 if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF) 9774 return false; 9775 9776 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx 9777 << "\n"); 9778 9779 R.buildTree(Chain); 9780 if (R.isTreeTinyAndNotFullyVectorizable()) 9781 return false; 9782 if (R.isLoadCombineCandidate()) 9783 return false; 9784 R.reorderTopToBottom(); 9785 R.reorderBottomToTop(); 9786 R.buildExternalUses(); 9787 9788 R.computeMinimumValueSizes(); 9789 9790 InstructionCost Cost = R.getTreeCost(); 9791 9792 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF =" << VF << "\n"); 9793 if (Cost < -SLPCostThreshold) { 9794 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n"); 9795 9796 using namespace ore; 9797 9798 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 9799 cast<StoreInst>(Chain[0])) 9800 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 9801 << " and with tree size " 9802 << NV("TreeSize", R.getTreeSize())); 9803 9804 R.vectorizeTree(); 9805 return true; 9806 } 9807 9808 return false; 9809 } 9810 9811 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 9812 BoUpSLP &R) { 9813 // We may run into multiple chains that merge into a single chain. We mark the 9814 // stores that we vectorized so that we don't visit the same store twice. 9815 BoUpSLP::ValueSet VectorizedStores; 9816 bool Changed = false; 9817 9818 int E = Stores.size(); 9819 SmallBitVector Tails(E, false); 9820 int MaxIter = MaxStoreLookup.getValue(); 9821 SmallVector<std::pair<int, int>, 16> ConsecutiveChain( 9822 E, std::make_pair(E, INT_MAX)); 9823 SmallVector<SmallBitVector, 4> CheckedPairs(E, SmallBitVector(E, false)); 9824 int IterCnt; 9825 auto &&FindConsecutiveAccess = [this, &Stores, &Tails, &IterCnt, MaxIter, 9826 &CheckedPairs, 9827 &ConsecutiveChain](int K, int Idx) { 9828 if (IterCnt >= MaxIter) 9829 return true; 9830 if (CheckedPairs[Idx].test(K)) 9831 return ConsecutiveChain[K].second == 1 && 9832 ConsecutiveChain[K].first == Idx; 9833 ++IterCnt; 9834 CheckedPairs[Idx].set(K); 9835 CheckedPairs[K].set(Idx); 9836 Optional<int> Diff = getPointersDiff( 9837 Stores[K]->getValueOperand()->getType(), Stores[K]->getPointerOperand(), 9838 Stores[Idx]->getValueOperand()->getType(), 9839 Stores[Idx]->getPointerOperand(), *DL, *SE, /*StrictCheck=*/true); 9840 if (!Diff || *Diff == 0) 9841 return false; 9842 int Val = *Diff; 9843 if (Val < 0) { 9844 if (ConsecutiveChain[Idx].second > -Val) { 9845 Tails.set(K); 9846 ConsecutiveChain[Idx] = std::make_pair(K, -Val); 9847 } 9848 return false; 9849 } 9850 if (ConsecutiveChain[K].second <= Val) 9851 return false; 9852 9853 Tails.set(Idx); 9854 ConsecutiveChain[K] = std::make_pair(Idx, Val); 9855 return Val == 1; 9856 }; 9857 // Do a quadratic search on all of the given stores in reverse order and find 9858 // all of the pairs of stores that follow each other. 9859 for (int Idx = E - 1; Idx >= 0; --Idx) { 9860 // If a store has multiple consecutive store candidates, search according 9861 // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ... 9862 // This is because usually pairing with immediate succeeding or preceding 9863 // candidate create the best chance to find slp vectorization opportunity. 9864 const int MaxLookDepth = std::max(E - Idx, Idx + 1); 9865 IterCnt = 0; 9866 for (int Offset = 1, F = MaxLookDepth; Offset < F; ++Offset) 9867 if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) || 9868 (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx))) 9869 break; 9870 } 9871 9872 // Tracks if we tried to vectorize stores starting from the given tail 9873 // already. 9874 SmallBitVector TriedTails(E, false); 9875 // For stores that start but don't end a link in the chain: 9876 for (int Cnt = E; Cnt > 0; --Cnt) { 9877 int I = Cnt - 1; 9878 if (ConsecutiveChain[I].first == E || Tails.test(I)) 9879 continue; 9880 // We found a store instr that starts a chain. Now follow the chain and try 9881 // to vectorize it. 9882 BoUpSLP::ValueList Operands; 9883 // Collect the chain into a list. 9884 while (I != E && !VectorizedStores.count(Stores[I])) { 9885 Operands.push_back(Stores[I]); 9886 Tails.set(I); 9887 if (ConsecutiveChain[I].second != 1) { 9888 // Mark the new end in the chain and go back, if required. It might be 9889 // required if the original stores come in reversed order, for example. 9890 if (ConsecutiveChain[I].first != E && 9891 Tails.test(ConsecutiveChain[I].first) && !TriedTails.test(I) && 9892 !VectorizedStores.count(Stores[ConsecutiveChain[I].first])) { 9893 TriedTails.set(I); 9894 Tails.reset(ConsecutiveChain[I].first); 9895 if (Cnt < ConsecutiveChain[I].first + 2) 9896 Cnt = ConsecutiveChain[I].first + 2; 9897 } 9898 break; 9899 } 9900 // Move to the next value in the chain. 9901 I = ConsecutiveChain[I].first; 9902 } 9903 assert(!Operands.empty() && "Expected non-empty list of stores."); 9904 9905 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 9906 unsigned EltSize = R.getVectorElementSize(Operands[0]); 9907 unsigned MaxElts = llvm::PowerOf2Floor(MaxVecRegSize / EltSize); 9908 9909 unsigned MaxVF = std::min(R.getMaximumVF(EltSize, Instruction::Store), 9910 MaxElts); 9911 auto *Store = cast<StoreInst>(Operands[0]); 9912 Type *StoreTy = Store->getValueOperand()->getType(); 9913 Type *ValueTy = StoreTy; 9914 if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand())) 9915 ValueTy = Trunc->getSrcTy(); 9916 unsigned MinVF = TTI->getStoreMinimumVF( 9917 R.getMinVF(DL->getTypeSizeInBits(ValueTy)), StoreTy, ValueTy); 9918 9919 // FIXME: Is division-by-2 the correct step? Should we assert that the 9920 // register size is a power-of-2? 9921 unsigned StartIdx = 0; 9922 for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) { 9923 for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) { 9924 ArrayRef<Value *> Slice = makeArrayRef(Operands).slice(Cnt, Size); 9925 if (!VectorizedStores.count(Slice.front()) && 9926 !VectorizedStores.count(Slice.back()) && 9927 vectorizeStoreChain(Slice, R, Cnt, MinVF)) { 9928 // Mark the vectorized stores so that we don't vectorize them again. 9929 VectorizedStores.insert(Slice.begin(), Slice.end()); 9930 Changed = true; 9931 // If we vectorized initial block, no need to try to vectorize it 9932 // again. 9933 if (Cnt == StartIdx) 9934 StartIdx += Size; 9935 Cnt += Size; 9936 continue; 9937 } 9938 ++Cnt; 9939 } 9940 // Check if the whole array was vectorized already - exit. 9941 if (StartIdx >= Operands.size()) 9942 break; 9943 } 9944 } 9945 9946 return Changed; 9947 } 9948 9949 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 9950 // Initialize the collections. We will make a single pass over the block. 9951 Stores.clear(); 9952 GEPs.clear(); 9953 9954 // Visit the store and getelementptr instructions in BB and organize them in 9955 // Stores and GEPs according to the underlying objects of their pointer 9956 // operands. 9957 for (Instruction &I : *BB) { 9958 // Ignore store instructions that are volatile or have a pointer operand 9959 // that doesn't point to a scalar type. 9960 if (auto *SI = dyn_cast<StoreInst>(&I)) { 9961 if (!SI->isSimple()) 9962 continue; 9963 if (!isValidElementType(SI->getValueOperand()->getType())) 9964 continue; 9965 Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI); 9966 } 9967 9968 // Ignore getelementptr instructions that have more than one index, a 9969 // constant index, or a pointer operand that doesn't point to a scalar 9970 // type. 9971 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 9972 auto Idx = GEP->idx_begin()->get(); 9973 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) 9974 continue; 9975 if (!isValidElementType(Idx->getType())) 9976 continue; 9977 if (GEP->getType()->isVectorTy()) 9978 continue; 9979 GEPs[GEP->getPointerOperand()].push_back(GEP); 9980 } 9981 } 9982 } 9983 9984 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 9985 if (!A || !B) 9986 return false; 9987 if (isa<InsertElementInst>(A) || isa<InsertElementInst>(B)) 9988 return false; 9989 Value *VL[] = {A, B}; 9990 return tryToVectorizeList(VL, R); 9991 } 9992 9993 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 9994 bool LimitForRegisterSize) { 9995 if (VL.size() < 2) 9996 return false; 9997 9998 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " 9999 << VL.size() << ".\n"); 10000 10001 // Check that all of the parts are instructions of the same type, 10002 // we permit an alternate opcode via InstructionsState. 10003 InstructionsState S = getSameOpcode(VL); 10004 if (!S.getOpcode()) 10005 return false; 10006 10007 Instruction *I0 = cast<Instruction>(S.OpValue); 10008 // Make sure invalid types (including vector type) are rejected before 10009 // determining vectorization factor for scalar instructions. 10010 for (Value *V : VL) { 10011 Type *Ty = V->getType(); 10012 if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) { 10013 // NOTE: the following will give user internal llvm type name, which may 10014 // not be useful. 10015 R.getORE()->emit([&]() { 10016 std::string type_str; 10017 llvm::raw_string_ostream rso(type_str); 10018 Ty->print(rso); 10019 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0) 10020 << "Cannot SLP vectorize list: type " 10021 << rso.str() + " is unsupported by vectorizer"; 10022 }); 10023 return false; 10024 } 10025 } 10026 10027 unsigned Sz = R.getVectorElementSize(I0); 10028 unsigned MinVF = R.getMinVF(Sz); 10029 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF); 10030 MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF); 10031 if (MaxVF < 2) { 10032 R.getORE()->emit([&]() { 10033 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0) 10034 << "Cannot SLP vectorize list: vectorization factor " 10035 << "less than 2 is not supported"; 10036 }); 10037 return false; 10038 } 10039 10040 bool Changed = false; 10041 bool CandidateFound = false; 10042 InstructionCost MinCost = SLPCostThreshold.getValue(); 10043 Type *ScalarTy = VL[0]->getType(); 10044 if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 10045 ScalarTy = IE->getOperand(1)->getType(); 10046 10047 unsigned NextInst = 0, MaxInst = VL.size(); 10048 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) { 10049 // No actual vectorization should happen, if number of parts is the same as 10050 // provided vectorization factor (i.e. the scalar type is used for vector 10051 // code during codegen). 10052 auto *VecTy = FixedVectorType::get(ScalarTy, VF); 10053 if (TTI->getNumberOfParts(VecTy) == VF) 10054 continue; 10055 for (unsigned I = NextInst; I < MaxInst; ++I) { 10056 unsigned OpsWidth = 0; 10057 10058 if (I + VF > MaxInst) 10059 OpsWidth = MaxInst - I; 10060 else 10061 OpsWidth = VF; 10062 10063 if (!isPowerOf2_32(OpsWidth)) 10064 continue; 10065 10066 if ((LimitForRegisterSize && OpsWidth < MaxVF) || 10067 (VF > MinVF && OpsWidth <= VF / 2) || (VF == MinVF && OpsWidth < 2)) 10068 break; 10069 10070 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth); 10071 // Check that a previous iteration of this loop did not delete the Value. 10072 if (llvm::any_of(Ops, [&R](Value *V) { 10073 auto *I = dyn_cast<Instruction>(V); 10074 return I && R.isDeleted(I); 10075 })) 10076 continue; 10077 10078 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 10079 << "\n"); 10080 10081 R.buildTree(Ops); 10082 if (R.isTreeTinyAndNotFullyVectorizable()) 10083 continue; 10084 R.reorderTopToBottom(); 10085 R.reorderBottomToTop(!isa<InsertElementInst>(Ops.front())); 10086 R.buildExternalUses(); 10087 10088 R.computeMinimumValueSizes(); 10089 InstructionCost Cost = R.getTreeCost(); 10090 CandidateFound = true; 10091 MinCost = std::min(MinCost, Cost); 10092 10093 if (Cost < -SLPCostThreshold) { 10094 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 10095 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 10096 cast<Instruction>(Ops[0])) 10097 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 10098 << " and with tree size " 10099 << ore::NV("TreeSize", R.getTreeSize())); 10100 10101 R.vectorizeTree(); 10102 // Move to the next bundle. 10103 I += VF - 1; 10104 NextInst = I + 1; 10105 Changed = true; 10106 } 10107 } 10108 } 10109 10110 if (!Changed && CandidateFound) { 10111 R.getORE()->emit([&]() { 10112 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0) 10113 << "List vectorization was possible but not beneficial with cost " 10114 << ore::NV("Cost", MinCost) << " >= " 10115 << ore::NV("Treshold", -SLPCostThreshold); 10116 }); 10117 } else if (!Changed) { 10118 R.getORE()->emit([&]() { 10119 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0) 10120 << "Cannot SLP vectorize list: vectorization was impossible" 10121 << " with available vectorization factors"; 10122 }); 10123 } 10124 return Changed; 10125 } 10126 10127 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 10128 if (!I) 10129 return false; 10130 10131 if ((!isa<BinaryOperator>(I) && !isa<CmpInst>(I)) || 10132 isa<VectorType>(I->getType())) 10133 return false; 10134 10135 Value *P = I->getParent(); 10136 10137 // Vectorize in current basic block only. 10138 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 10139 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 10140 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 10141 return false; 10142 10143 // First collect all possible candidates 10144 SmallVector<std::pair<Value *, Value *>, 4> Candidates; 10145 Candidates.emplace_back(Op0, Op1); 10146 10147 auto *A = dyn_cast<BinaryOperator>(Op0); 10148 auto *B = dyn_cast<BinaryOperator>(Op1); 10149 // Try to skip B. 10150 if (A && B && B->hasOneUse()) { 10151 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 10152 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 10153 if (B0 && B0->getParent() == P) 10154 Candidates.emplace_back(A, B0); 10155 if (B1 && B1->getParent() == P) 10156 Candidates.emplace_back(A, B1); 10157 } 10158 // Try to skip A. 10159 if (B && A && A->hasOneUse()) { 10160 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 10161 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 10162 if (A0 && A0->getParent() == P) 10163 Candidates.emplace_back(A0, B); 10164 if (A1 && A1->getParent() == P) 10165 Candidates.emplace_back(A1, B); 10166 } 10167 10168 if (Candidates.size() == 1) 10169 return tryToVectorizePair(Op0, Op1, R); 10170 10171 // We have multiple options. Try to pick the single best. 10172 Optional<int> BestCandidate = R.findBestRootPair(Candidates); 10173 if (!BestCandidate) 10174 return false; 10175 return tryToVectorizePair(Candidates[*BestCandidate].first, 10176 Candidates[*BestCandidate].second, R); 10177 } 10178 10179 namespace { 10180 10181 /// Model horizontal reductions. 10182 /// 10183 /// A horizontal reduction is a tree of reduction instructions that has values 10184 /// that can be put into a vector as its leaves. For example: 10185 /// 10186 /// mul mul mul mul 10187 /// \ / \ / 10188 /// + + 10189 /// \ / 10190 /// + 10191 /// This tree has "mul" as its leaf values and "+" as its reduction 10192 /// instructions. A reduction can feed into a store or a binary operation 10193 /// feeding a phi. 10194 /// ... 10195 /// \ / 10196 /// + 10197 /// | 10198 /// phi += 10199 /// 10200 /// Or: 10201 /// ... 10202 /// \ / 10203 /// + 10204 /// | 10205 /// *p = 10206 /// 10207 class HorizontalReduction { 10208 using ReductionOpsType = SmallVector<Value *, 16>; 10209 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; 10210 ReductionOpsListType ReductionOps; 10211 /// List of possibly reduced values. 10212 SmallVector<SmallVector<Value *>> ReducedVals; 10213 /// Maps reduced value to the corresponding reduction operation. 10214 DenseMap<Value *, SmallVector<Instruction *>> ReducedValsToOps; 10215 // Use map vector to make stable output. 10216 MapVector<Instruction *, Value *> ExtraArgs; 10217 WeakTrackingVH ReductionRoot; 10218 /// The type of reduction operation. 10219 RecurKind RdxKind; 10220 10221 static bool isCmpSelMinMax(Instruction *I) { 10222 return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) && 10223 RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I)); 10224 } 10225 10226 // And/or are potentially poison-safe logical patterns like: 10227 // select x, y, false 10228 // select x, true, y 10229 static bool isBoolLogicOp(Instruction *I) { 10230 return match(I, m_LogicalAnd(m_Value(), m_Value())) || 10231 match(I, m_LogicalOr(m_Value(), m_Value())); 10232 } 10233 10234 /// Checks if instruction is associative and can be vectorized. 10235 static bool isVectorizable(RecurKind Kind, Instruction *I) { 10236 if (Kind == RecurKind::None) 10237 return false; 10238 10239 // Integer ops that map to select instructions or intrinsics are fine. 10240 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) || 10241 isBoolLogicOp(I)) 10242 return true; 10243 10244 if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) { 10245 // FP min/max are associative except for NaN and -0.0. We do not 10246 // have to rule out -0.0 here because the intrinsic semantics do not 10247 // specify a fixed result for it. 10248 return I->getFastMathFlags().noNaNs(); 10249 } 10250 10251 return I->isAssociative(); 10252 } 10253 10254 static Value *getRdxOperand(Instruction *I, unsigned Index) { 10255 // Poison-safe 'or' takes the form: select X, true, Y 10256 // To make that work with the normal operand processing, we skip the 10257 // true value operand. 10258 // TODO: Change the code and data structures to handle this without a hack. 10259 if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1) 10260 return I->getOperand(2); 10261 return I->getOperand(Index); 10262 } 10263 10264 /// Creates reduction operation with the current opcode. 10265 static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS, 10266 Value *RHS, const Twine &Name, bool UseSelect) { 10267 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind); 10268 switch (Kind) { 10269 case RecurKind::Or: 10270 if (UseSelect && 10271 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType())) 10272 return Builder.CreateSelect(LHS, Builder.getTrue(), RHS, Name); 10273 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 10274 Name); 10275 case RecurKind::And: 10276 if (UseSelect && 10277 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType())) 10278 return Builder.CreateSelect(LHS, RHS, Builder.getFalse(), Name); 10279 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 10280 Name); 10281 case RecurKind::Add: 10282 case RecurKind::Mul: 10283 case RecurKind::Xor: 10284 case RecurKind::FAdd: 10285 case RecurKind::FMul: 10286 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 10287 Name); 10288 case RecurKind::FMax: 10289 return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS); 10290 case RecurKind::FMin: 10291 return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS); 10292 case RecurKind::SMax: 10293 if (UseSelect) { 10294 Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name); 10295 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 10296 } 10297 return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS); 10298 case RecurKind::SMin: 10299 if (UseSelect) { 10300 Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name); 10301 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 10302 } 10303 return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS); 10304 case RecurKind::UMax: 10305 if (UseSelect) { 10306 Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name); 10307 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 10308 } 10309 return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS); 10310 case RecurKind::UMin: 10311 if (UseSelect) { 10312 Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name); 10313 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 10314 } 10315 return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS); 10316 default: 10317 llvm_unreachable("Unknown reduction operation."); 10318 } 10319 } 10320 10321 /// Creates reduction operation with the current opcode with the IR flags 10322 /// from \p ReductionOps. 10323 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 10324 Value *RHS, const Twine &Name, 10325 const ReductionOpsListType &ReductionOps) { 10326 bool UseSelect = ReductionOps.size() == 2 || 10327 // Logical or/and. 10328 (ReductionOps.size() == 1 && 10329 isa<SelectInst>(ReductionOps.front().front())); 10330 assert((!UseSelect || ReductionOps.size() != 2 || 10331 isa<SelectInst>(ReductionOps[1][0])) && 10332 "Expected cmp + select pairs for reduction"); 10333 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect); 10334 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 10335 if (auto *Sel = dyn_cast<SelectInst>(Op)) { 10336 propagateIRFlags(Sel->getCondition(), ReductionOps[0]); 10337 propagateIRFlags(Op, ReductionOps[1]); 10338 return Op; 10339 } 10340 } 10341 propagateIRFlags(Op, ReductionOps[0]); 10342 return Op; 10343 } 10344 10345 /// Creates reduction operation with the current opcode with the IR flags 10346 /// from \p I. 10347 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 10348 Value *RHS, const Twine &Name, Value *I) { 10349 auto *SelI = dyn_cast<SelectInst>(I); 10350 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, SelI != nullptr); 10351 if (SelI && RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 10352 if (auto *Sel = dyn_cast<SelectInst>(Op)) 10353 propagateIRFlags(Sel->getCondition(), SelI->getCondition()); 10354 } 10355 propagateIRFlags(Op, I); 10356 return Op; 10357 } 10358 10359 static RecurKind getRdxKind(Value *V) { 10360 auto *I = dyn_cast<Instruction>(V); 10361 if (!I) 10362 return RecurKind::None; 10363 if (match(I, m_Add(m_Value(), m_Value()))) 10364 return RecurKind::Add; 10365 if (match(I, m_Mul(m_Value(), m_Value()))) 10366 return RecurKind::Mul; 10367 if (match(I, m_And(m_Value(), m_Value())) || 10368 match(I, m_LogicalAnd(m_Value(), m_Value()))) 10369 return RecurKind::And; 10370 if (match(I, m_Or(m_Value(), m_Value())) || 10371 match(I, m_LogicalOr(m_Value(), m_Value()))) 10372 return RecurKind::Or; 10373 if (match(I, m_Xor(m_Value(), m_Value()))) 10374 return RecurKind::Xor; 10375 if (match(I, m_FAdd(m_Value(), m_Value()))) 10376 return RecurKind::FAdd; 10377 if (match(I, m_FMul(m_Value(), m_Value()))) 10378 return RecurKind::FMul; 10379 10380 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value()))) 10381 return RecurKind::FMax; 10382 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value()))) 10383 return RecurKind::FMin; 10384 10385 // This matches either cmp+select or intrinsics. SLP is expected to handle 10386 // either form. 10387 // TODO: If we are canonicalizing to intrinsics, we can remove several 10388 // special-case paths that deal with selects. 10389 if (match(I, m_SMax(m_Value(), m_Value()))) 10390 return RecurKind::SMax; 10391 if (match(I, m_SMin(m_Value(), m_Value()))) 10392 return RecurKind::SMin; 10393 if (match(I, m_UMax(m_Value(), m_Value()))) 10394 return RecurKind::UMax; 10395 if (match(I, m_UMin(m_Value(), m_Value()))) 10396 return RecurKind::UMin; 10397 10398 if (auto *Select = dyn_cast<SelectInst>(I)) { 10399 // Try harder: look for min/max pattern based on instructions producing 10400 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2). 10401 // During the intermediate stages of SLP, it's very common to have 10402 // pattern like this (since optimizeGatherSequence is run only once 10403 // at the end): 10404 // %1 = extractelement <2 x i32> %a, i32 0 10405 // %2 = extractelement <2 x i32> %a, i32 1 10406 // %cond = icmp sgt i32 %1, %2 10407 // %3 = extractelement <2 x i32> %a, i32 0 10408 // %4 = extractelement <2 x i32> %a, i32 1 10409 // %select = select i1 %cond, i32 %3, i32 %4 10410 CmpInst::Predicate Pred; 10411 Instruction *L1; 10412 Instruction *L2; 10413 10414 Value *LHS = Select->getTrueValue(); 10415 Value *RHS = Select->getFalseValue(); 10416 Value *Cond = Select->getCondition(); 10417 10418 // TODO: Support inverse predicates. 10419 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) { 10420 if (!isa<ExtractElementInst>(RHS) || 10421 !L2->isIdenticalTo(cast<Instruction>(RHS))) 10422 return RecurKind::None; 10423 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) { 10424 if (!isa<ExtractElementInst>(LHS) || 10425 !L1->isIdenticalTo(cast<Instruction>(LHS))) 10426 return RecurKind::None; 10427 } else { 10428 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS)) 10429 return RecurKind::None; 10430 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) || 10431 !L1->isIdenticalTo(cast<Instruction>(LHS)) || 10432 !L2->isIdenticalTo(cast<Instruction>(RHS))) 10433 return RecurKind::None; 10434 } 10435 10436 switch (Pred) { 10437 default: 10438 return RecurKind::None; 10439 case CmpInst::ICMP_SGT: 10440 case CmpInst::ICMP_SGE: 10441 return RecurKind::SMax; 10442 case CmpInst::ICMP_SLT: 10443 case CmpInst::ICMP_SLE: 10444 return RecurKind::SMin; 10445 case CmpInst::ICMP_UGT: 10446 case CmpInst::ICMP_UGE: 10447 return RecurKind::UMax; 10448 case CmpInst::ICMP_ULT: 10449 case CmpInst::ICMP_ULE: 10450 return RecurKind::UMin; 10451 } 10452 } 10453 return RecurKind::None; 10454 } 10455 10456 /// Get the index of the first operand. 10457 static unsigned getFirstOperandIndex(Instruction *I) { 10458 return isCmpSelMinMax(I) ? 1 : 0; 10459 } 10460 10461 /// Total number of operands in the reduction operation. 10462 static unsigned getNumberOfOperands(Instruction *I) { 10463 return isCmpSelMinMax(I) ? 3 : 2; 10464 } 10465 10466 /// Checks if the instruction is in basic block \p BB. 10467 /// For a cmp+sel min/max reduction check that both ops are in \p BB. 10468 static bool hasSameParent(Instruction *I, BasicBlock *BB) { 10469 if (isCmpSelMinMax(I) || (isBoolLogicOp(I) && isa<SelectInst>(I))) { 10470 auto *Sel = cast<SelectInst>(I); 10471 auto *Cmp = dyn_cast<Instruction>(Sel->getCondition()); 10472 return Sel->getParent() == BB && Cmp && Cmp->getParent() == BB; 10473 } 10474 return I->getParent() == BB; 10475 } 10476 10477 /// Expected number of uses for reduction operations/reduced values. 10478 static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) { 10479 if (IsCmpSelMinMax) { 10480 // SelectInst must be used twice while the condition op must have single 10481 // use only. 10482 if (auto *Sel = dyn_cast<SelectInst>(I)) 10483 return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse(); 10484 return I->hasNUses(2); 10485 } 10486 10487 // Arithmetic reduction operation must be used once only. 10488 return I->hasOneUse(); 10489 } 10490 10491 /// Initializes the list of reduction operations. 10492 void initReductionOps(Instruction *I) { 10493 if (isCmpSelMinMax(I)) 10494 ReductionOps.assign(2, ReductionOpsType()); 10495 else 10496 ReductionOps.assign(1, ReductionOpsType()); 10497 } 10498 10499 /// Add all reduction operations for the reduction instruction \p I. 10500 void addReductionOps(Instruction *I) { 10501 if (isCmpSelMinMax(I)) { 10502 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition()); 10503 ReductionOps[1].emplace_back(I); 10504 } else { 10505 ReductionOps[0].emplace_back(I); 10506 } 10507 } 10508 10509 static Value *getLHS(RecurKind Kind, Instruction *I) { 10510 if (Kind == RecurKind::None) 10511 return nullptr; 10512 return I->getOperand(getFirstOperandIndex(I)); 10513 } 10514 static Value *getRHS(RecurKind Kind, Instruction *I) { 10515 if (Kind == RecurKind::None) 10516 return nullptr; 10517 return I->getOperand(getFirstOperandIndex(I) + 1); 10518 } 10519 10520 public: 10521 HorizontalReduction() = default; 10522 10523 /// Try to find a reduction tree. 10524 bool matchAssociativeReduction(PHINode *Phi, Instruction *Inst, 10525 ScalarEvolution &SE, const DataLayout &DL, 10526 const TargetLibraryInfo &TLI) { 10527 assert((!Phi || is_contained(Phi->operands(), Inst)) && 10528 "Phi needs to use the binary operator"); 10529 assert((isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) || 10530 isa<IntrinsicInst>(Inst)) && 10531 "Expected binop, select, or intrinsic for reduction matching"); 10532 RdxKind = getRdxKind(Inst); 10533 10534 // We could have a initial reductions that is not an add. 10535 // r *= v1 + v2 + v3 + v4 10536 // In such a case start looking for a tree rooted in the first '+'. 10537 if (Phi) { 10538 if (getLHS(RdxKind, Inst) == Phi) { 10539 Phi = nullptr; 10540 Inst = dyn_cast<Instruction>(getRHS(RdxKind, Inst)); 10541 if (!Inst) 10542 return false; 10543 RdxKind = getRdxKind(Inst); 10544 } else if (getRHS(RdxKind, Inst) == Phi) { 10545 Phi = nullptr; 10546 Inst = dyn_cast<Instruction>(getLHS(RdxKind, Inst)); 10547 if (!Inst) 10548 return false; 10549 RdxKind = getRdxKind(Inst); 10550 } 10551 } 10552 10553 if (!isVectorizable(RdxKind, Inst)) 10554 return false; 10555 10556 // Analyze "regular" integer/FP types for reductions - no target-specific 10557 // types or pointers. 10558 Type *Ty = Inst->getType(); 10559 if (!isValidElementType(Ty) || Ty->isPointerTy()) 10560 return false; 10561 10562 // Though the ultimate reduction may have multiple uses, its condition must 10563 // have only single use. 10564 if (auto *Sel = dyn_cast<SelectInst>(Inst)) 10565 if (!Sel->getCondition()->hasOneUse()) 10566 return false; 10567 10568 ReductionRoot = Inst; 10569 10570 // Iterate through all the operands of the possible reduction tree and 10571 // gather all the reduced values, sorting them by their value id. 10572 BasicBlock *BB = Inst->getParent(); 10573 bool IsCmpSelMinMax = isCmpSelMinMax(Inst); 10574 SmallVector<Instruction *> Worklist(1, Inst); 10575 // Checks if the operands of the \p TreeN instruction are also reduction 10576 // operations or should be treated as reduced values or an extra argument, 10577 // which is not part of the reduction. 10578 auto &&CheckOperands = [this, IsCmpSelMinMax, 10579 BB](Instruction *TreeN, 10580 SmallVectorImpl<Value *> &ExtraArgs, 10581 SmallVectorImpl<Value *> &PossibleReducedVals, 10582 SmallVectorImpl<Instruction *> &ReductionOps) { 10583 for (int I = getFirstOperandIndex(TreeN), 10584 End = getNumberOfOperands(TreeN); 10585 I < End; ++I) { 10586 Value *EdgeVal = getRdxOperand(TreeN, I); 10587 ReducedValsToOps[EdgeVal].push_back(TreeN); 10588 auto *EdgeInst = dyn_cast<Instruction>(EdgeVal); 10589 // Edge has wrong parent - mark as an extra argument. 10590 if (EdgeInst && !isVectorLikeInstWithConstOps(EdgeInst) && 10591 !hasSameParent(EdgeInst, BB)) { 10592 ExtraArgs.push_back(EdgeVal); 10593 continue; 10594 } 10595 // If the edge is not an instruction, or it is different from the main 10596 // reduction opcode or has too many uses - possible reduced value. 10597 if (!EdgeInst || getRdxKind(EdgeInst) != RdxKind || 10598 IsCmpSelMinMax != isCmpSelMinMax(EdgeInst) || 10599 !hasRequiredNumberOfUses(IsCmpSelMinMax, EdgeInst) || 10600 !isVectorizable(getRdxKind(EdgeInst), EdgeInst)) { 10601 PossibleReducedVals.push_back(EdgeVal); 10602 continue; 10603 } 10604 ReductionOps.push_back(EdgeInst); 10605 } 10606 }; 10607 // Try to regroup reduced values so that it gets more profitable to try to 10608 // reduce them. Values are grouped by their value ids, instructions - by 10609 // instruction op id and/or alternate op id, plus do extra analysis for 10610 // loads (grouping them by the distabce between pointers) and cmp 10611 // instructions (grouping them by the predicate). 10612 MapVector<size_t, MapVector<size_t, MapVector<Value *, unsigned>>> 10613 PossibleReducedVals; 10614 initReductionOps(Inst); 10615 while (!Worklist.empty()) { 10616 Instruction *TreeN = Worklist.pop_back_val(); 10617 SmallVector<Value *> Args; 10618 SmallVector<Value *> PossibleRedVals; 10619 SmallVector<Instruction *> PossibleReductionOps; 10620 CheckOperands(TreeN, Args, PossibleRedVals, PossibleReductionOps); 10621 // If too many extra args - mark the instruction itself as a reduction 10622 // value, not a reduction operation. 10623 if (Args.size() < 2) { 10624 addReductionOps(TreeN); 10625 // Add extra args. 10626 if (!Args.empty()) { 10627 assert(Args.size() == 1 && "Expected only single argument."); 10628 ExtraArgs[TreeN] = Args.front(); 10629 } 10630 // Add reduction values. The values are sorted for better vectorization 10631 // results. 10632 for (Value *V : PossibleRedVals) { 10633 size_t Key, Idx; 10634 std::tie(Key, Idx) = generateKeySubkey( 10635 V, &TLI, 10636 [&PossibleReducedVals, &DL, &SE](size_t Key, LoadInst *LI) { 10637 for (const auto &LoadData : PossibleReducedVals[Key]) { 10638 auto *RLI = cast<LoadInst>(LoadData.second.front().first); 10639 if (getPointersDiff(RLI->getType(), RLI->getPointerOperand(), 10640 LI->getType(), LI->getPointerOperand(), 10641 DL, SE, /*StrictCheck=*/true)) 10642 return hash_value(RLI->getPointerOperand()); 10643 } 10644 return hash_value(LI->getPointerOperand()); 10645 }, 10646 /*AllowAlternate=*/false); 10647 ++PossibleReducedVals[Key][Idx] 10648 .insert(std::make_pair(V, 0)) 10649 .first->second; 10650 } 10651 Worklist.append(PossibleReductionOps.rbegin(), 10652 PossibleReductionOps.rend()); 10653 } else { 10654 size_t Key, Idx; 10655 std::tie(Key, Idx) = generateKeySubkey( 10656 TreeN, &TLI, 10657 [&PossibleReducedVals, &DL, &SE](size_t Key, LoadInst *LI) { 10658 for (const auto &LoadData : PossibleReducedVals[Key]) { 10659 auto *RLI = cast<LoadInst>(LoadData.second.front().first); 10660 if (getPointersDiff(RLI->getType(), RLI->getPointerOperand(), 10661 LI->getType(), LI->getPointerOperand(), DL, 10662 SE, /*StrictCheck=*/true)) 10663 return hash_value(RLI->getPointerOperand()); 10664 } 10665 return hash_value(LI->getPointerOperand()); 10666 }, 10667 /*AllowAlternate=*/false); 10668 ++PossibleReducedVals[Key][Idx] 10669 .insert(std::make_pair(TreeN, 0)) 10670 .first->second; 10671 } 10672 } 10673 auto PossibleReducedValsVect = PossibleReducedVals.takeVector(); 10674 // Sort values by the total number of values kinds to start the reduction 10675 // from the longest possible reduced values sequences. 10676 for (auto &PossibleReducedVals : PossibleReducedValsVect) { 10677 auto PossibleRedVals = PossibleReducedVals.second.takeVector(); 10678 SmallVector<SmallVector<Value *>> PossibleRedValsVect; 10679 for (auto It = PossibleRedVals.begin(), E = PossibleRedVals.end(); 10680 It != E; ++It) { 10681 PossibleRedValsVect.emplace_back(); 10682 auto RedValsVect = It->second.takeVector(); 10683 stable_sort(RedValsVect, [](const auto &P1, const auto &P2) { 10684 return P1.second < P2.second; 10685 }); 10686 for (const std::pair<Value *, unsigned> &Data : RedValsVect) 10687 PossibleRedValsVect.back().append(Data.second, Data.first); 10688 } 10689 stable_sort(PossibleRedValsVect, [](const auto &P1, const auto &P2) { 10690 return P1.size() > P2.size(); 10691 }); 10692 ReducedVals.emplace_back(); 10693 for (ArrayRef<Value *> Data : PossibleRedValsVect) 10694 ReducedVals.back().append(Data.rbegin(), Data.rend()); 10695 } 10696 // Sort the reduced values by number of same/alternate opcode and/or pointer 10697 // operand. 10698 stable_sort(ReducedVals, [](ArrayRef<Value *> P1, ArrayRef<Value *> P2) { 10699 return P1.size() > P2.size(); 10700 }); 10701 return true; 10702 } 10703 10704 /// Attempt to vectorize the tree found by matchAssociativeReduction. 10705 Value *tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 10706 constexpr int ReductionLimit = 4; 10707 constexpr unsigned RegMaxNumber = 4; 10708 constexpr unsigned RedValsMaxNumber = 128; 10709 // If there are a sufficient number of reduction values, reduce 10710 // to a nearby power-of-2. We can safely generate oversized 10711 // vectors and rely on the backend to split them to legal sizes. 10712 unsigned NumReducedVals = std::accumulate( 10713 ReducedVals.begin(), ReducedVals.end(), 0, 10714 [](int Num, ArrayRef<Value *> Vals) { return Num + Vals.size(); }); 10715 if (NumReducedVals < ReductionLimit) 10716 return nullptr; 10717 10718 IRBuilder<> Builder(cast<Instruction>(ReductionRoot)); 10719 10720 // Track the reduced values in case if they are replaced by extractelement 10721 // because of the vectorization. 10722 DenseMap<Value *, WeakTrackingVH> TrackedVals; 10723 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 10724 // The same extra argument may be used several times, so log each attempt 10725 // to use it. 10726 for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) { 10727 assert(Pair.first && "DebugLoc must be set."); 10728 ExternallyUsedValues[Pair.second].push_back(Pair.first); 10729 TrackedVals.try_emplace(Pair.second, Pair.second); 10730 } 10731 10732 // The compare instruction of a min/max is the insertion point for new 10733 // instructions and may be replaced with a new compare instruction. 10734 auto &&GetCmpForMinMaxReduction = [](Instruction *RdxRootInst) { 10735 assert(isa<SelectInst>(RdxRootInst) && 10736 "Expected min/max reduction to have select root instruction"); 10737 Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition(); 10738 assert(isa<Instruction>(ScalarCond) && 10739 "Expected min/max reduction to have compare condition"); 10740 return cast<Instruction>(ScalarCond); 10741 }; 10742 10743 // The reduction root is used as the insertion point for new instructions, 10744 // so set it as externally used to prevent it from being deleted. 10745 ExternallyUsedValues[ReductionRoot]; 10746 SmallDenseSet<Value *> IgnoreList; 10747 for (ReductionOpsType &RdxOps : ReductionOps) 10748 for (Value *RdxOp : RdxOps) { 10749 if (!RdxOp) 10750 continue; 10751 IgnoreList.insert(RdxOp); 10752 } 10753 bool IsCmpSelMinMax = isCmpSelMinMax(cast<Instruction>(ReductionRoot)); 10754 10755 // Need to track reduced vals, they may be changed during vectorization of 10756 // subvectors. 10757 for (ArrayRef<Value *> Candidates : ReducedVals) 10758 for (Value *V : Candidates) 10759 TrackedVals.try_emplace(V, V); 10760 10761 DenseMap<Value *, unsigned> VectorizedVals; 10762 Value *VectorizedTree = nullptr; 10763 bool CheckForReusedReductionOps = false; 10764 // Try to vectorize elements based on their type. 10765 for (unsigned I = 0, E = ReducedVals.size(); I < E; ++I) { 10766 ArrayRef<Value *> OrigReducedVals = ReducedVals[I]; 10767 InstructionsState S = getSameOpcode(OrigReducedVals); 10768 SmallVector<Value *> Candidates; 10769 DenseMap<Value *, Value *> TrackedToOrig; 10770 for (unsigned Cnt = 0, Sz = OrigReducedVals.size(); Cnt < Sz; ++Cnt) { 10771 Value *RdxVal = TrackedVals.find(OrigReducedVals[Cnt])->second; 10772 // Check if the reduction value was not overriden by the extractelement 10773 // instruction because of the vectorization and exclude it, if it is not 10774 // compatible with other values. 10775 if (auto *Inst = dyn_cast<Instruction>(RdxVal)) 10776 if (isVectorLikeInstWithConstOps(Inst) && 10777 (!S.getOpcode() || !S.isOpcodeOrAlt(Inst))) 10778 continue; 10779 Candidates.push_back(RdxVal); 10780 TrackedToOrig.try_emplace(RdxVal, OrigReducedVals[Cnt]); 10781 } 10782 bool ShuffledExtracts = false; 10783 // Try to handle shuffled extractelements. 10784 if (S.getOpcode() == Instruction::ExtractElement && !S.isAltShuffle() && 10785 I + 1 < E) { 10786 InstructionsState NextS = getSameOpcode(ReducedVals[I + 1]); 10787 if (NextS.getOpcode() == Instruction::ExtractElement && 10788 !NextS.isAltShuffle()) { 10789 SmallVector<Value *> CommonCandidates(Candidates); 10790 for (Value *RV : ReducedVals[I + 1]) { 10791 Value *RdxVal = TrackedVals.find(RV)->second; 10792 // Check if the reduction value was not overriden by the 10793 // extractelement instruction because of the vectorization and 10794 // exclude it, if it is not compatible with other values. 10795 if (auto *Inst = dyn_cast<Instruction>(RdxVal)) 10796 if (!NextS.getOpcode() || !NextS.isOpcodeOrAlt(Inst)) 10797 continue; 10798 CommonCandidates.push_back(RdxVal); 10799 TrackedToOrig.try_emplace(RdxVal, RV); 10800 } 10801 SmallVector<int> Mask; 10802 if (isFixedVectorShuffle(CommonCandidates, Mask)) { 10803 ++I; 10804 Candidates.swap(CommonCandidates); 10805 ShuffledExtracts = true; 10806 } 10807 } 10808 } 10809 unsigned NumReducedVals = Candidates.size(); 10810 if (NumReducedVals < ReductionLimit) 10811 continue; 10812 10813 unsigned MaxVecRegSize = V.getMaxVecRegSize(); 10814 unsigned EltSize = V.getVectorElementSize(Candidates[0]); 10815 unsigned MaxElts = RegMaxNumber * PowerOf2Floor(MaxVecRegSize / EltSize); 10816 10817 unsigned ReduxWidth = std::min<unsigned>( 10818 PowerOf2Floor(NumReducedVals), std::max(RedValsMaxNumber, MaxElts)); 10819 unsigned Start = 0; 10820 unsigned Pos = Start; 10821 // Restarts vectorization attempt with lower vector factor. 10822 unsigned PrevReduxWidth = ReduxWidth; 10823 bool CheckForReusedReductionOpsLocal = false; 10824 auto &&AdjustReducedVals = [&Pos, &Start, &ReduxWidth, NumReducedVals, 10825 &CheckForReusedReductionOpsLocal, 10826 &PrevReduxWidth, &V, 10827 &IgnoreList](bool IgnoreVL = false) { 10828 bool IsAnyRedOpGathered = !IgnoreVL && V.isAnyGathered(IgnoreList); 10829 if (!CheckForReusedReductionOpsLocal && PrevReduxWidth == ReduxWidth) { 10830 // Check if any of the reduction ops are gathered. If so, worth 10831 // trying again with less number of reduction ops. 10832 CheckForReusedReductionOpsLocal |= IsAnyRedOpGathered; 10833 } 10834 ++Pos; 10835 if (Pos < NumReducedVals - ReduxWidth + 1) 10836 return IsAnyRedOpGathered; 10837 Pos = Start; 10838 ReduxWidth /= 2; 10839 return IsAnyRedOpGathered; 10840 }; 10841 while (Pos < NumReducedVals - ReduxWidth + 1 && 10842 ReduxWidth >= ReductionLimit) { 10843 // Dependency in tree of the reduction ops - drop this attempt, try 10844 // later. 10845 if (CheckForReusedReductionOpsLocal && PrevReduxWidth != ReduxWidth && 10846 Start == 0) { 10847 CheckForReusedReductionOps = true; 10848 break; 10849 } 10850 PrevReduxWidth = ReduxWidth; 10851 ArrayRef<Value *> VL(std::next(Candidates.begin(), Pos), ReduxWidth); 10852 // Beeing analyzed already - skip. 10853 if (V.areAnalyzedReductionVals(VL)) { 10854 (void)AdjustReducedVals(/*IgnoreVL=*/true); 10855 continue; 10856 } 10857 // Early exit if any of the reduction values were deleted during 10858 // previous vectorization attempts. 10859 if (any_of(VL, [&V](Value *RedVal) { 10860 auto *RedValI = dyn_cast<Instruction>(RedVal); 10861 if (!RedValI) 10862 return false; 10863 return V.isDeleted(RedValI); 10864 })) 10865 break; 10866 V.buildTree(VL, IgnoreList); 10867 if (V.isTreeTinyAndNotFullyVectorizable(/*ForReduction=*/true)) { 10868 if (!AdjustReducedVals()) 10869 V.analyzedReductionVals(VL); 10870 continue; 10871 } 10872 if (V.isLoadCombineReductionCandidate(RdxKind)) { 10873 if (!AdjustReducedVals()) 10874 V.analyzedReductionVals(VL); 10875 continue; 10876 } 10877 V.reorderTopToBottom(); 10878 // No need to reorder the root node at all. 10879 V.reorderBottomToTop(/*IgnoreReorder=*/true); 10880 // Keep extracted other reduction values, if they are used in the 10881 // vectorization trees. 10882 BoUpSLP::ExtraValueToDebugLocsMap LocalExternallyUsedValues( 10883 ExternallyUsedValues); 10884 for (unsigned Cnt = 0, Sz = ReducedVals.size(); Cnt < Sz; ++Cnt) { 10885 if (Cnt == I || (ShuffledExtracts && Cnt == I - 1)) 10886 continue; 10887 for_each(ReducedVals[Cnt], 10888 [&LocalExternallyUsedValues, &TrackedVals](Value *V) { 10889 if (isa<Instruction>(V)) 10890 LocalExternallyUsedValues[TrackedVals[V]]; 10891 }); 10892 } 10893 // Number of uses of the candidates in the vector of values. 10894 SmallDenseMap<Value *, unsigned> NumUses; 10895 for (unsigned Cnt = 0; Cnt < Pos; ++Cnt) { 10896 Value *V = Candidates[Cnt]; 10897 if (NumUses.count(V) > 0) 10898 continue; 10899 NumUses[V] = std::count(VL.begin(), VL.end(), V); 10900 } 10901 for (unsigned Cnt = Pos + ReduxWidth; Cnt < NumReducedVals; ++Cnt) { 10902 Value *V = Candidates[Cnt]; 10903 if (NumUses.count(V) > 0) 10904 continue; 10905 NumUses[V] = std::count(VL.begin(), VL.end(), V); 10906 } 10907 // Gather externally used values. 10908 SmallPtrSet<Value *, 4> Visited; 10909 for (unsigned Cnt = 0; Cnt < Pos; ++Cnt) { 10910 Value *V = Candidates[Cnt]; 10911 if (!Visited.insert(V).second) 10912 continue; 10913 unsigned NumOps = VectorizedVals.lookup(V) + NumUses[V]; 10914 if (NumOps != ReducedValsToOps.find(V)->second.size()) 10915 LocalExternallyUsedValues[V]; 10916 } 10917 for (unsigned Cnt = Pos + ReduxWidth; Cnt < NumReducedVals; ++Cnt) { 10918 Value *V = Candidates[Cnt]; 10919 if (!Visited.insert(V).second) 10920 continue; 10921 unsigned NumOps = VectorizedVals.lookup(V) + NumUses[V]; 10922 if (NumOps != ReducedValsToOps.find(V)->second.size()) 10923 LocalExternallyUsedValues[V]; 10924 } 10925 V.buildExternalUses(LocalExternallyUsedValues); 10926 10927 V.computeMinimumValueSizes(); 10928 10929 // Intersect the fast-math-flags from all reduction operations. 10930 FastMathFlags RdxFMF; 10931 RdxFMF.set(); 10932 for (Value *U : IgnoreList) 10933 if (auto *FPMO = dyn_cast<FPMathOperator>(U)) 10934 RdxFMF &= FPMO->getFastMathFlags(); 10935 // Estimate cost. 10936 InstructionCost TreeCost = V.getTreeCost(VL); 10937 InstructionCost ReductionCost = 10938 getReductionCost(TTI, VL, ReduxWidth, RdxFMF); 10939 InstructionCost Cost = TreeCost + ReductionCost; 10940 if (!Cost.isValid()) { 10941 LLVM_DEBUG(dbgs() << "Encountered invalid baseline cost.\n"); 10942 return nullptr; 10943 } 10944 if (Cost >= -SLPCostThreshold) { 10945 V.getORE()->emit([&]() { 10946 return OptimizationRemarkMissed( 10947 SV_NAME, "HorSLPNotBeneficial", 10948 ReducedValsToOps.find(VL[0])->second.front()) 10949 << "Vectorizing horizontal reduction is possible" 10950 << "but not beneficial with cost " << ore::NV("Cost", Cost) 10951 << " and threshold " 10952 << ore::NV("Threshold", -SLPCostThreshold); 10953 }); 10954 if (!AdjustReducedVals()) 10955 V.analyzedReductionVals(VL); 10956 continue; 10957 } 10958 10959 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" 10960 << Cost << ". (HorRdx)\n"); 10961 V.getORE()->emit([&]() { 10962 return OptimizationRemark( 10963 SV_NAME, "VectorizedHorizontalReduction", 10964 ReducedValsToOps.find(VL[0])->second.front()) 10965 << "Vectorized horizontal reduction with cost " 10966 << ore::NV("Cost", Cost) << " and with tree size " 10967 << ore::NV("TreeSize", V.getTreeSize()); 10968 }); 10969 10970 Builder.setFastMathFlags(RdxFMF); 10971 10972 // Vectorize a tree. 10973 Value *VectorizedRoot = V.vectorizeTree(LocalExternallyUsedValues); 10974 10975 // Emit a reduction. If the root is a select (min/max idiom), the insert 10976 // point is the compare condition of that select. 10977 Instruction *RdxRootInst = cast<Instruction>(ReductionRoot); 10978 if (IsCmpSelMinMax) 10979 Builder.SetInsertPoint(GetCmpForMinMaxReduction(RdxRootInst)); 10980 else 10981 Builder.SetInsertPoint(RdxRootInst); 10982 10983 // To prevent poison from leaking across what used to be sequential, 10984 // safe, scalar boolean logic operations, the reduction operand must be 10985 // frozen. 10986 if (isa<SelectInst>(RdxRootInst) && isBoolLogicOp(RdxRootInst)) 10987 VectorizedRoot = Builder.CreateFreeze(VectorizedRoot); 10988 10989 Value *ReducedSubTree = 10990 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); 10991 10992 if (!VectorizedTree) { 10993 // Initialize the final value in the reduction. 10994 VectorizedTree = ReducedSubTree; 10995 } else { 10996 // Update the final value in the reduction. 10997 Builder.SetCurrentDebugLocation( 10998 cast<Instruction>(ReductionOps.front().front())->getDebugLoc()); 10999 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree, 11000 ReducedSubTree, "op.rdx", ReductionOps); 11001 } 11002 // Count vectorized reduced values to exclude them from final reduction. 11003 for (Value *V : VL) 11004 ++VectorizedVals.try_emplace(TrackedToOrig.find(V)->second, 0) 11005 .first->getSecond(); 11006 Pos += ReduxWidth; 11007 Start = Pos; 11008 ReduxWidth = PowerOf2Floor(NumReducedVals - Pos); 11009 } 11010 } 11011 if (VectorizedTree) { 11012 // Finish the reduction. 11013 // Need to add extra arguments and not vectorized possible reduction 11014 // values. 11015 // Try to avoid dependencies between the scalar remainders after 11016 // reductions. 11017 auto &&FinalGen = 11018 [this, &Builder, 11019 &TrackedVals](ArrayRef<std::pair<Instruction *, Value *>> InstVals) { 11020 unsigned Sz = InstVals.size(); 11021 SmallVector<std::pair<Instruction *, Value *>> ExtraReds(Sz / 2 + 11022 Sz % 2); 11023 for (unsigned I = 0, E = (Sz / 2) * 2; I < E; I += 2) { 11024 Instruction *RedOp = InstVals[I + 1].first; 11025 Builder.SetCurrentDebugLocation(RedOp->getDebugLoc()); 11026 ReductionOpsListType Ops; 11027 if (auto *Sel = dyn_cast<SelectInst>(RedOp)) 11028 Ops.emplace_back().push_back(Sel->getCondition()); 11029 Ops.emplace_back().push_back(RedOp); 11030 Value *RdxVal1 = InstVals[I].second; 11031 Value *StableRdxVal1 = RdxVal1; 11032 auto It1 = TrackedVals.find(RdxVal1); 11033 if (It1 != TrackedVals.end()) 11034 StableRdxVal1 = It1->second; 11035 Value *RdxVal2 = InstVals[I + 1].second; 11036 Value *StableRdxVal2 = RdxVal2; 11037 auto It2 = TrackedVals.find(RdxVal2); 11038 if (It2 != TrackedVals.end()) 11039 StableRdxVal2 = It2->second; 11040 Value *ExtraRed = createOp(Builder, RdxKind, StableRdxVal1, 11041 StableRdxVal2, "op.rdx", Ops); 11042 ExtraReds[I / 2] = std::make_pair(InstVals[I].first, ExtraRed); 11043 } 11044 if (Sz % 2 == 1) 11045 ExtraReds[Sz / 2] = InstVals.back(); 11046 return ExtraReds; 11047 }; 11048 SmallVector<std::pair<Instruction *, Value *>> ExtraReductions; 11049 SmallPtrSet<Value *, 8> Visited; 11050 for (ArrayRef<Value *> Candidates : ReducedVals) { 11051 for (Value *RdxVal : Candidates) { 11052 if (!Visited.insert(RdxVal).second) 11053 continue; 11054 unsigned NumOps = VectorizedVals.lookup(RdxVal); 11055 for (Instruction *RedOp : 11056 makeArrayRef(ReducedValsToOps.find(RdxVal)->second) 11057 .drop_back(NumOps)) 11058 ExtraReductions.emplace_back(RedOp, RdxVal); 11059 } 11060 } 11061 for (auto &Pair : ExternallyUsedValues) { 11062 // Add each externally used value to the final reduction. 11063 for (auto *I : Pair.second) 11064 ExtraReductions.emplace_back(I, Pair.first); 11065 } 11066 // Iterate through all not-vectorized reduction values/extra arguments. 11067 while (ExtraReductions.size() > 1) { 11068 SmallVector<std::pair<Instruction *, Value *>> NewReds = 11069 FinalGen(ExtraReductions); 11070 ExtraReductions.swap(NewReds); 11071 } 11072 // Final reduction. 11073 if (ExtraReductions.size() == 1) { 11074 Instruction *RedOp = ExtraReductions.back().first; 11075 Builder.SetCurrentDebugLocation(RedOp->getDebugLoc()); 11076 ReductionOpsListType Ops; 11077 if (auto *Sel = dyn_cast<SelectInst>(RedOp)) 11078 Ops.emplace_back().push_back(Sel->getCondition()); 11079 Ops.emplace_back().push_back(RedOp); 11080 Value *RdxVal = ExtraReductions.back().second; 11081 Value *StableRdxVal = RdxVal; 11082 auto It = TrackedVals.find(RdxVal); 11083 if (It != TrackedVals.end()) 11084 StableRdxVal = It->second; 11085 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree, 11086 StableRdxVal, "op.rdx", Ops); 11087 } 11088 11089 ReductionRoot->replaceAllUsesWith(VectorizedTree); 11090 11091 // The original scalar reduction is expected to have no remaining 11092 // uses outside the reduction tree itself. Assert that we got this 11093 // correct, replace internal uses with undef, and mark for eventual 11094 // deletion. 11095 #ifndef NDEBUG 11096 SmallSet<Value *, 4> IgnoreSet; 11097 for (ArrayRef<Value *> RdxOps : ReductionOps) 11098 IgnoreSet.insert(RdxOps.begin(), RdxOps.end()); 11099 #endif 11100 for (ArrayRef<Value *> RdxOps : ReductionOps) { 11101 for (Value *Ignore : RdxOps) { 11102 if (!Ignore) 11103 continue; 11104 #ifndef NDEBUG 11105 for (auto *U : Ignore->users()) { 11106 assert(IgnoreSet.count(U) && 11107 "All users must be either in the reduction ops list."); 11108 } 11109 #endif 11110 if (!Ignore->use_empty()) { 11111 Value *Undef = UndefValue::get(Ignore->getType()); 11112 Ignore->replaceAllUsesWith(Undef); 11113 } 11114 V.eraseInstruction(cast<Instruction>(Ignore)); 11115 } 11116 } 11117 } else if (!CheckForReusedReductionOps) { 11118 for (ReductionOpsType &RdxOps : ReductionOps) 11119 for (Value *RdxOp : RdxOps) 11120 V.analyzedReductionRoot(cast<Instruction>(RdxOp)); 11121 } 11122 return VectorizedTree; 11123 } 11124 11125 private: 11126 /// Calculate the cost of a reduction. 11127 InstructionCost getReductionCost(TargetTransformInfo *TTI, 11128 ArrayRef<Value *> ReducedVals, 11129 unsigned ReduxWidth, FastMathFlags FMF) { 11130 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 11131 Value *FirstReducedVal = ReducedVals.front(); 11132 Type *ScalarTy = FirstReducedVal->getType(); 11133 FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth); 11134 InstructionCost VectorCost = 0, ScalarCost; 11135 // If all of the reduced values are constant, the vector cost is 0, since 11136 // the reduction value can be calculated at the compile time. 11137 bool AllConsts = all_of(ReducedVals, isConstant); 11138 switch (RdxKind) { 11139 case RecurKind::Add: 11140 case RecurKind::Mul: 11141 case RecurKind::Or: 11142 case RecurKind::And: 11143 case RecurKind::Xor: 11144 case RecurKind::FAdd: 11145 case RecurKind::FMul: { 11146 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind); 11147 if (!AllConsts) 11148 VectorCost = 11149 TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF, CostKind); 11150 ScalarCost = TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy, CostKind); 11151 break; 11152 } 11153 case RecurKind::FMax: 11154 case RecurKind::FMin: { 11155 auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy); 11156 if (!AllConsts) { 11157 auto *VecCondTy = 11158 cast<VectorType>(CmpInst::makeCmpResultType(VectorTy)); 11159 VectorCost = 11160 TTI->getMinMaxReductionCost(VectorTy, VecCondTy, 11161 /*IsUnsigned=*/false, CostKind); 11162 } 11163 CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind); 11164 ScalarCost = TTI->getCmpSelInstrCost(Instruction::FCmp, ScalarTy, 11165 SclCondTy, RdxPred, CostKind) + 11166 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 11167 SclCondTy, RdxPred, CostKind); 11168 break; 11169 } 11170 case RecurKind::SMax: 11171 case RecurKind::SMin: 11172 case RecurKind::UMax: 11173 case RecurKind::UMin: { 11174 auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy); 11175 if (!AllConsts) { 11176 auto *VecCondTy = 11177 cast<VectorType>(CmpInst::makeCmpResultType(VectorTy)); 11178 bool IsUnsigned = 11179 RdxKind == RecurKind::UMax || RdxKind == RecurKind::UMin; 11180 VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy, 11181 IsUnsigned, CostKind); 11182 } 11183 CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind); 11184 ScalarCost = TTI->getCmpSelInstrCost(Instruction::ICmp, ScalarTy, 11185 SclCondTy, RdxPred, CostKind) + 11186 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 11187 SclCondTy, RdxPred, CostKind); 11188 break; 11189 } 11190 default: 11191 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 11192 } 11193 11194 // Scalar cost is repeated for N-1 elements. 11195 ScalarCost *= (ReduxWidth - 1); 11196 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost 11197 << " for reduction that starts with " << *FirstReducedVal 11198 << " (It is a splitting reduction)\n"); 11199 return VectorCost - ScalarCost; 11200 } 11201 11202 /// Emit a horizontal reduction of the vectorized value. 11203 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 11204 unsigned ReduxWidth, const TargetTransformInfo *TTI) { 11205 assert(VectorizedValue && "Need to have a vectorized tree node"); 11206 assert(isPowerOf2_32(ReduxWidth) && 11207 "We only handle power-of-two reductions for now"); 11208 assert(RdxKind != RecurKind::FMulAdd && 11209 "A call to the llvm.fmuladd intrinsic is not handled yet"); 11210 11211 ++NumVectorInstructions; 11212 return createSimpleTargetReduction(Builder, TTI, VectorizedValue, RdxKind); 11213 } 11214 }; 11215 11216 } // end anonymous namespace 11217 11218 static Optional<unsigned> getAggregateSize(Instruction *InsertInst) { 11219 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) 11220 return cast<FixedVectorType>(IE->getType())->getNumElements(); 11221 11222 unsigned AggregateSize = 1; 11223 auto *IV = cast<InsertValueInst>(InsertInst); 11224 Type *CurrentType = IV->getType(); 11225 do { 11226 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 11227 for (auto *Elt : ST->elements()) 11228 if (Elt != ST->getElementType(0)) // check homogeneity 11229 return None; 11230 AggregateSize *= ST->getNumElements(); 11231 CurrentType = ST->getElementType(0); 11232 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 11233 AggregateSize *= AT->getNumElements(); 11234 CurrentType = AT->getElementType(); 11235 } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) { 11236 AggregateSize *= VT->getNumElements(); 11237 return AggregateSize; 11238 } else if (CurrentType->isSingleValueType()) { 11239 return AggregateSize; 11240 } else { 11241 return None; 11242 } 11243 } while (true); 11244 } 11245 11246 static void findBuildAggregate_rec(Instruction *LastInsertInst, 11247 TargetTransformInfo *TTI, 11248 SmallVectorImpl<Value *> &BuildVectorOpds, 11249 SmallVectorImpl<Value *> &InsertElts, 11250 unsigned OperandOffset) { 11251 do { 11252 Value *InsertedOperand = LastInsertInst->getOperand(1); 11253 Optional<unsigned> OperandIndex = 11254 getInsertIndex(LastInsertInst, OperandOffset); 11255 if (!OperandIndex) 11256 return; 11257 if (isa<InsertElementInst>(InsertedOperand) || 11258 isa<InsertValueInst>(InsertedOperand)) { 11259 findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI, 11260 BuildVectorOpds, InsertElts, *OperandIndex); 11261 11262 } else { 11263 BuildVectorOpds[*OperandIndex] = InsertedOperand; 11264 InsertElts[*OperandIndex] = LastInsertInst; 11265 } 11266 LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0)); 11267 } while (LastInsertInst != nullptr && 11268 (isa<InsertValueInst>(LastInsertInst) || 11269 isa<InsertElementInst>(LastInsertInst)) && 11270 LastInsertInst->hasOneUse()); 11271 } 11272 11273 /// Recognize construction of vectors like 11274 /// %ra = insertelement <4 x float> poison, float %s0, i32 0 11275 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 11276 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 11277 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 11278 /// starting from the last insertelement or insertvalue instruction. 11279 /// 11280 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>}, 11281 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on. 11282 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples. 11283 /// 11284 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type. 11285 /// 11286 /// \return true if it matches. 11287 static bool findBuildAggregate(Instruction *LastInsertInst, 11288 TargetTransformInfo *TTI, 11289 SmallVectorImpl<Value *> &BuildVectorOpds, 11290 SmallVectorImpl<Value *> &InsertElts) { 11291 11292 assert((isa<InsertElementInst>(LastInsertInst) || 11293 isa<InsertValueInst>(LastInsertInst)) && 11294 "Expected insertelement or insertvalue instruction!"); 11295 11296 assert((BuildVectorOpds.empty() && InsertElts.empty()) && 11297 "Expected empty result vectors!"); 11298 11299 Optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst); 11300 if (!AggregateSize) 11301 return false; 11302 BuildVectorOpds.resize(*AggregateSize); 11303 InsertElts.resize(*AggregateSize); 11304 11305 findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 0); 11306 llvm::erase_value(BuildVectorOpds, nullptr); 11307 llvm::erase_value(InsertElts, nullptr); 11308 if (BuildVectorOpds.size() >= 2) 11309 return true; 11310 11311 return false; 11312 } 11313 11314 /// Try and get a reduction value from a phi node. 11315 /// 11316 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 11317 /// if they come from either \p ParentBB or a containing loop latch. 11318 /// 11319 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 11320 /// if not possible. 11321 static Value *getReductionValue(const DominatorTree *DT, PHINode *P, 11322 BasicBlock *ParentBB, LoopInfo *LI) { 11323 // There are situations where the reduction value is not dominated by the 11324 // reduction phi. Vectorizing such cases has been reported to cause 11325 // miscompiles. See PR25787. 11326 auto DominatedReduxValue = [&](Value *R) { 11327 return isa<Instruction>(R) && 11328 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent()); 11329 }; 11330 11331 Value *Rdx = nullptr; 11332 11333 // Return the incoming value if it comes from the same BB as the phi node. 11334 if (P->getIncomingBlock(0) == ParentBB) { 11335 Rdx = P->getIncomingValue(0); 11336 } else if (P->getIncomingBlock(1) == ParentBB) { 11337 Rdx = P->getIncomingValue(1); 11338 } 11339 11340 if (Rdx && DominatedReduxValue(Rdx)) 11341 return Rdx; 11342 11343 // Otherwise, check whether we have a loop latch to look at. 11344 Loop *BBL = LI->getLoopFor(ParentBB); 11345 if (!BBL) 11346 return nullptr; 11347 BasicBlock *BBLatch = BBL->getLoopLatch(); 11348 if (!BBLatch) 11349 return nullptr; 11350 11351 // There is a loop latch, return the incoming value if it comes from 11352 // that. This reduction pattern occasionally turns up. 11353 if (P->getIncomingBlock(0) == BBLatch) { 11354 Rdx = P->getIncomingValue(0); 11355 } else if (P->getIncomingBlock(1) == BBLatch) { 11356 Rdx = P->getIncomingValue(1); 11357 } 11358 11359 if (Rdx && DominatedReduxValue(Rdx)) 11360 return Rdx; 11361 11362 return nullptr; 11363 } 11364 11365 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) { 11366 if (match(I, m_BinOp(m_Value(V0), m_Value(V1)))) 11367 return true; 11368 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1)))) 11369 return true; 11370 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1)))) 11371 return true; 11372 if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1)))) 11373 return true; 11374 if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1)))) 11375 return true; 11376 if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1)))) 11377 return true; 11378 if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1)))) 11379 return true; 11380 return false; 11381 } 11382 11383 /// Attempt to reduce a horizontal reduction. 11384 /// If it is legal to match a horizontal reduction feeding the phi node \a P 11385 /// with reduction operators \a Root (or one of its operands) in a basic block 11386 /// \a BB, then check if it can be done. If horizontal reduction is not found 11387 /// and root instruction is a binary operation, vectorization of the operands is 11388 /// attempted. 11389 /// \returns true if a horizontal reduction was matched and reduced or operands 11390 /// of one of the binary instruction were vectorized. 11391 /// \returns false if a horizontal reduction was not matched (or not possible) 11392 /// or no vectorization of any binary operation feeding \a Root instruction was 11393 /// performed. 11394 static bool tryToVectorizeHorReductionOrInstOperands( 11395 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, 11396 TargetTransformInfo *TTI, ScalarEvolution &SE, const DataLayout &DL, 11397 const TargetLibraryInfo &TLI, 11398 const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) { 11399 if (!ShouldVectorizeHor) 11400 return false; 11401 11402 if (!Root) 11403 return false; 11404 11405 if (Root->getParent() != BB || isa<PHINode>(Root)) 11406 return false; 11407 // Start analysis starting from Root instruction. If horizontal reduction is 11408 // found, try to vectorize it. If it is not a horizontal reduction or 11409 // vectorization is not possible or not effective, and currently analyzed 11410 // instruction is a binary operation, try to vectorize the operands, using 11411 // pre-order DFS traversal order. If the operands were not vectorized, repeat 11412 // the same procedure considering each operand as a possible root of the 11413 // horizontal reduction. 11414 // Interrupt the process if the Root instruction itself was vectorized or all 11415 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 11416 // Skip the analysis of CmpInsts. Compiler implements postanalysis of the 11417 // CmpInsts so we can skip extra attempts in 11418 // tryToVectorizeHorReductionOrInstOperands and save compile time. 11419 std::queue<std::pair<Instruction *, unsigned>> Stack; 11420 Stack.emplace(Root, 0); 11421 SmallPtrSet<Value *, 8> VisitedInstrs; 11422 SmallVector<WeakTrackingVH> PostponedInsts; 11423 bool Res = false; 11424 auto &&TryToReduce = [TTI, &SE, &DL, &P, &R, &TLI](Instruction *Inst, 11425 Value *&B0, 11426 Value *&B1) -> Value * { 11427 if (R.isAnalyzedReductionRoot(Inst)) 11428 return nullptr; 11429 bool IsBinop = matchRdxBop(Inst, B0, B1); 11430 bool IsSelect = match(Inst, m_Select(m_Value(), m_Value(), m_Value())); 11431 if (IsBinop || IsSelect) { 11432 HorizontalReduction HorRdx; 11433 if (HorRdx.matchAssociativeReduction(P, Inst, SE, DL, TLI)) 11434 return HorRdx.tryToReduce(R, TTI); 11435 } 11436 return nullptr; 11437 }; 11438 while (!Stack.empty()) { 11439 Instruction *Inst; 11440 unsigned Level; 11441 std::tie(Inst, Level) = Stack.front(); 11442 Stack.pop(); 11443 // Do not try to analyze instruction that has already been vectorized. 11444 // This may happen when we vectorize instruction operands on a previous 11445 // iteration while stack was populated before that happened. 11446 if (R.isDeleted(Inst)) 11447 continue; 11448 Value *B0 = nullptr, *B1 = nullptr; 11449 if (Value *V = TryToReduce(Inst, B0, B1)) { 11450 Res = true; 11451 // Set P to nullptr to avoid re-analysis of phi node in 11452 // matchAssociativeReduction function unless this is the root node. 11453 P = nullptr; 11454 if (auto *I = dyn_cast<Instruction>(V)) { 11455 // Try to find another reduction. 11456 Stack.emplace(I, Level); 11457 continue; 11458 } 11459 } else { 11460 bool IsBinop = B0 && B1; 11461 if (P && IsBinop) { 11462 Inst = dyn_cast<Instruction>(B0); 11463 if (Inst == P) 11464 Inst = dyn_cast<Instruction>(B1); 11465 if (!Inst) { 11466 // Set P to nullptr to avoid re-analysis of phi node in 11467 // matchAssociativeReduction function unless this is the root node. 11468 P = nullptr; 11469 continue; 11470 } 11471 } 11472 // Set P to nullptr to avoid re-analysis of phi node in 11473 // matchAssociativeReduction function unless this is the root node. 11474 P = nullptr; 11475 // Do not try to vectorize CmpInst operands, this is done separately. 11476 // Final attempt for binop args vectorization should happen after the loop 11477 // to try to find reductions. 11478 if (!isa<CmpInst, InsertElementInst, InsertValueInst>(Inst)) 11479 PostponedInsts.push_back(Inst); 11480 } 11481 11482 // Try to vectorize operands. 11483 // Continue analysis for the instruction from the same basic block only to 11484 // save compile time. 11485 if (++Level < RecursionMaxDepth) 11486 for (auto *Op : Inst->operand_values()) 11487 if (VisitedInstrs.insert(Op).second) 11488 if (auto *I = dyn_cast<Instruction>(Op)) 11489 // Do not try to vectorize CmpInst operands, this is done 11490 // separately. 11491 if (!isa<PHINode, CmpInst, InsertElementInst, InsertValueInst>(I) && 11492 !R.isDeleted(I) && I->getParent() == BB) 11493 Stack.emplace(I, Level); 11494 } 11495 // Try to vectorized binops where reductions were not found. 11496 for (Value *V : PostponedInsts) 11497 if (auto *Inst = dyn_cast<Instruction>(V)) 11498 if (!R.isDeleted(Inst)) 11499 Res |= Vectorize(Inst, R); 11500 return Res; 11501 } 11502 11503 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V, 11504 BasicBlock *BB, BoUpSLP &R, 11505 TargetTransformInfo *TTI) { 11506 auto *I = dyn_cast_or_null<Instruction>(V); 11507 if (!I) 11508 return false; 11509 11510 if (!isa<BinaryOperator>(I)) 11511 P = nullptr; 11512 // Try to match and vectorize a horizontal reduction. 11513 auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool { 11514 return tryToVectorize(I, R); 11515 }; 11516 return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI, *SE, *DL, 11517 *TLI, ExtraVectorization); 11518 } 11519 11520 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 11521 BasicBlock *BB, BoUpSLP &R) { 11522 const DataLayout &DL = BB->getModule()->getDataLayout(); 11523 if (!R.canMapToVector(IVI->getType(), DL)) 11524 return false; 11525 11526 SmallVector<Value *, 16> BuildVectorOpds; 11527 SmallVector<Value *, 16> BuildVectorInsts; 11528 if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts)) 11529 return false; 11530 11531 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 11532 // Aggregate value is unlikely to be processed in vector register. 11533 return tryToVectorizeList(BuildVectorOpds, R); 11534 } 11535 11536 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 11537 BasicBlock *BB, BoUpSLP &R) { 11538 SmallVector<Value *, 16> BuildVectorInsts; 11539 SmallVector<Value *, 16> BuildVectorOpds; 11540 SmallVector<int> Mask; 11541 if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) || 11542 (llvm::all_of( 11543 BuildVectorOpds, 11544 [](Value *V) { return isa<ExtractElementInst, UndefValue>(V); }) && 11545 isFixedVectorShuffle(BuildVectorOpds, Mask))) 11546 return false; 11547 11548 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n"); 11549 return tryToVectorizeList(BuildVectorInsts, R); 11550 } 11551 11552 template <typename T> 11553 static bool 11554 tryToVectorizeSequence(SmallVectorImpl<T *> &Incoming, 11555 function_ref<unsigned(T *)> Limit, 11556 function_ref<bool(T *, T *)> Comparator, 11557 function_ref<bool(T *, T *)> AreCompatible, 11558 function_ref<bool(ArrayRef<T *>, bool)> TryToVectorizeHelper, 11559 bool LimitForRegisterSize) { 11560 bool Changed = false; 11561 // Sort by type, parent, operands. 11562 stable_sort(Incoming, Comparator); 11563 11564 // Try to vectorize elements base on their type. 11565 SmallVector<T *> Candidates; 11566 for (auto *IncIt = Incoming.begin(), *E = Incoming.end(); IncIt != E;) { 11567 // Look for the next elements with the same type, parent and operand 11568 // kinds. 11569 auto *SameTypeIt = IncIt; 11570 while (SameTypeIt != E && AreCompatible(*SameTypeIt, *IncIt)) 11571 ++SameTypeIt; 11572 11573 // Try to vectorize them. 11574 unsigned NumElts = (SameTypeIt - IncIt); 11575 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at nodes (" 11576 << NumElts << ")\n"); 11577 // The vectorization is a 3-state attempt: 11578 // 1. Try to vectorize instructions with the same/alternate opcodes with the 11579 // size of maximal register at first. 11580 // 2. Try to vectorize remaining instructions with the same type, if 11581 // possible. This may result in the better vectorization results rather than 11582 // if we try just to vectorize instructions with the same/alternate opcodes. 11583 // 3. Final attempt to try to vectorize all instructions with the 11584 // same/alternate ops only, this may result in some extra final 11585 // vectorization. 11586 if (NumElts > 1 && 11587 TryToVectorizeHelper(makeArrayRef(IncIt, NumElts), LimitForRegisterSize)) { 11588 // Success start over because instructions might have been changed. 11589 Changed = true; 11590 } else if (NumElts < Limit(*IncIt) && 11591 (Candidates.empty() || 11592 Candidates.front()->getType() == (*IncIt)->getType())) { 11593 Candidates.append(IncIt, std::next(IncIt, NumElts)); 11594 } 11595 // Final attempt to vectorize instructions with the same types. 11596 if (Candidates.size() > 1 && 11597 (SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType())) { 11598 if (TryToVectorizeHelper(Candidates, /*LimitForRegisterSize=*/false)) { 11599 // Success start over because instructions might have been changed. 11600 Changed = true; 11601 } else if (LimitForRegisterSize) { 11602 // Try to vectorize using small vectors. 11603 for (auto *It = Candidates.begin(), *End = Candidates.end(); 11604 It != End;) { 11605 auto *SameTypeIt = It; 11606 while (SameTypeIt != End && AreCompatible(*SameTypeIt, *It)) 11607 ++SameTypeIt; 11608 unsigned NumElts = (SameTypeIt - It); 11609 if (NumElts > 1 && TryToVectorizeHelper(makeArrayRef(It, NumElts), 11610 /*LimitForRegisterSize=*/false)) 11611 Changed = true; 11612 It = SameTypeIt; 11613 } 11614 } 11615 Candidates.clear(); 11616 } 11617 11618 // Start over at the next instruction of a different type (or the end). 11619 IncIt = SameTypeIt; 11620 } 11621 return Changed; 11622 } 11623 11624 /// Compare two cmp instructions. If IsCompatibility is true, function returns 11625 /// true if 2 cmps have same/swapped predicates and mos compatible corresponding 11626 /// operands. If IsCompatibility is false, function implements strict weak 11627 /// ordering relation between two cmp instructions, returning true if the first 11628 /// instruction is "less" than the second, i.e. its predicate is less than the 11629 /// predicate of the second or the operands IDs are less than the operands IDs 11630 /// of the second cmp instruction. 11631 template <bool IsCompatibility> 11632 static bool compareCmp(Value *V, Value *V2, 11633 function_ref<bool(Instruction *)> IsDeleted) { 11634 auto *CI1 = cast<CmpInst>(V); 11635 auto *CI2 = cast<CmpInst>(V2); 11636 if (IsDeleted(CI2) || !isValidElementType(CI2->getType())) 11637 return false; 11638 if (CI1->getOperand(0)->getType()->getTypeID() < 11639 CI2->getOperand(0)->getType()->getTypeID()) 11640 return !IsCompatibility; 11641 if (CI1->getOperand(0)->getType()->getTypeID() > 11642 CI2->getOperand(0)->getType()->getTypeID()) 11643 return false; 11644 CmpInst::Predicate Pred1 = CI1->getPredicate(); 11645 CmpInst::Predicate Pred2 = CI2->getPredicate(); 11646 CmpInst::Predicate SwapPred1 = CmpInst::getSwappedPredicate(Pred1); 11647 CmpInst::Predicate SwapPred2 = CmpInst::getSwappedPredicate(Pred2); 11648 CmpInst::Predicate BasePred1 = std::min(Pred1, SwapPred1); 11649 CmpInst::Predicate BasePred2 = std::min(Pred2, SwapPred2); 11650 if (BasePred1 < BasePred2) 11651 return !IsCompatibility; 11652 if (BasePred1 > BasePred2) 11653 return false; 11654 // Compare operands. 11655 bool LEPreds = Pred1 <= Pred2; 11656 bool GEPreds = Pred1 >= Pred2; 11657 for (int I = 0, E = CI1->getNumOperands(); I < E; ++I) { 11658 auto *Op1 = CI1->getOperand(LEPreds ? I : E - I - 1); 11659 auto *Op2 = CI2->getOperand(GEPreds ? I : E - I - 1); 11660 if (Op1->getValueID() < Op2->getValueID()) 11661 return !IsCompatibility; 11662 if (Op1->getValueID() > Op2->getValueID()) 11663 return false; 11664 if (auto *I1 = dyn_cast<Instruction>(Op1)) 11665 if (auto *I2 = dyn_cast<Instruction>(Op2)) { 11666 if (I1->getParent() != I2->getParent()) 11667 return false; 11668 InstructionsState S = getSameOpcode({I1, I2}); 11669 if (S.getOpcode()) 11670 continue; 11671 return false; 11672 } 11673 } 11674 return IsCompatibility; 11675 } 11676 11677 bool SLPVectorizerPass::vectorizeSimpleInstructions( 11678 SmallVectorImpl<Instruction *> &Instructions, BasicBlock *BB, BoUpSLP &R, 11679 bool AtTerminator) { 11680 bool OpsChanged = false; 11681 SmallVector<Instruction *, 4> PostponedCmps; 11682 for (auto *I : reverse(Instructions)) { 11683 if (R.isDeleted(I)) 11684 continue; 11685 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) { 11686 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 11687 } else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) { 11688 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 11689 } else if (isa<CmpInst>(I)) { 11690 PostponedCmps.push_back(I); 11691 continue; 11692 } 11693 // Try to find reductions in buildvector sequnces. 11694 OpsChanged |= vectorizeRootInstruction(nullptr, I, BB, R, TTI); 11695 } 11696 if (AtTerminator) { 11697 // Try to find reductions first. 11698 for (Instruction *I : PostponedCmps) { 11699 if (R.isDeleted(I)) 11700 continue; 11701 for (Value *Op : I->operands()) 11702 OpsChanged |= vectorizeRootInstruction(nullptr, Op, BB, R, TTI); 11703 } 11704 // Try to vectorize operands as vector bundles. 11705 for (Instruction *I : PostponedCmps) { 11706 if (R.isDeleted(I)) 11707 continue; 11708 OpsChanged |= tryToVectorize(I, R); 11709 } 11710 // Try to vectorize list of compares. 11711 // Sort by type, compare predicate, etc. 11712 auto &&CompareSorter = [&R](Value *V, Value *V2) { 11713 return compareCmp<false>(V, V2, 11714 [&R](Instruction *I) { return R.isDeleted(I); }); 11715 }; 11716 11717 auto &&AreCompatibleCompares = [&R](Value *V1, Value *V2) { 11718 if (V1 == V2) 11719 return true; 11720 return compareCmp<true>(V1, V2, 11721 [&R](Instruction *I) { return R.isDeleted(I); }); 11722 }; 11723 auto Limit = [&R](Value *V) { 11724 unsigned EltSize = R.getVectorElementSize(V); 11725 return std::max(2U, R.getMaxVecRegSize() / EltSize); 11726 }; 11727 11728 SmallVector<Value *> Vals(PostponedCmps.begin(), PostponedCmps.end()); 11729 OpsChanged |= tryToVectorizeSequence<Value>( 11730 Vals, Limit, CompareSorter, AreCompatibleCompares, 11731 [this, &R](ArrayRef<Value *> Candidates, bool LimitForRegisterSize) { 11732 // Exclude possible reductions from other blocks. 11733 bool ArePossiblyReducedInOtherBlock = 11734 any_of(Candidates, [](Value *V) { 11735 return any_of(V->users(), [V](User *U) { 11736 return isa<SelectInst>(U) && 11737 cast<SelectInst>(U)->getParent() != 11738 cast<Instruction>(V)->getParent(); 11739 }); 11740 }); 11741 if (ArePossiblyReducedInOtherBlock) 11742 return false; 11743 return tryToVectorizeList(Candidates, R, LimitForRegisterSize); 11744 }, 11745 /*LimitForRegisterSize=*/true); 11746 Instructions.clear(); 11747 } else { 11748 // Insert in reverse order since the PostponedCmps vector was filled in 11749 // reverse order. 11750 Instructions.assign(PostponedCmps.rbegin(), PostponedCmps.rend()); 11751 } 11752 return OpsChanged; 11753 } 11754 11755 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 11756 bool Changed = false; 11757 SmallVector<Value *, 4> Incoming; 11758 SmallPtrSet<Value *, 16> VisitedInstrs; 11759 // Maps phi nodes to the non-phi nodes found in the use tree for each phi 11760 // node. Allows better to identify the chains that can be vectorized in the 11761 // better way. 11762 DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes; 11763 auto PHICompare = [this, &PHIToOpcodes](Value *V1, Value *V2) { 11764 assert(isValidElementType(V1->getType()) && 11765 isValidElementType(V2->getType()) && 11766 "Expected vectorizable types only."); 11767 // It is fine to compare type IDs here, since we expect only vectorizable 11768 // types, like ints, floats and pointers, we don't care about other type. 11769 if (V1->getType()->getTypeID() < V2->getType()->getTypeID()) 11770 return true; 11771 if (V1->getType()->getTypeID() > V2->getType()->getTypeID()) 11772 return false; 11773 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 11774 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 11775 if (Opcodes1.size() < Opcodes2.size()) 11776 return true; 11777 if (Opcodes1.size() > Opcodes2.size()) 11778 return false; 11779 Optional<bool> ConstOrder; 11780 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 11781 // Undefs are compatible with any other value. 11782 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) { 11783 if (!ConstOrder) 11784 ConstOrder = 11785 !isa<UndefValue>(Opcodes1[I]) && isa<UndefValue>(Opcodes2[I]); 11786 continue; 11787 } 11788 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 11789 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 11790 DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent()); 11791 DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent()); 11792 if (!NodeI1) 11793 return NodeI2 != nullptr; 11794 if (!NodeI2) 11795 return false; 11796 assert((NodeI1 == NodeI2) == 11797 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 11798 "Different nodes should have different DFS numbers"); 11799 if (NodeI1 != NodeI2) 11800 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 11801 InstructionsState S = getSameOpcode({I1, I2}); 11802 if (S.getOpcode()) 11803 continue; 11804 return I1->getOpcode() < I2->getOpcode(); 11805 } 11806 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) { 11807 if (!ConstOrder) 11808 ConstOrder = Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID(); 11809 continue; 11810 } 11811 if (Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID()) 11812 return true; 11813 if (Opcodes1[I]->getValueID() > Opcodes2[I]->getValueID()) 11814 return false; 11815 } 11816 return ConstOrder && *ConstOrder; 11817 }; 11818 auto AreCompatiblePHIs = [&PHIToOpcodes](Value *V1, Value *V2) { 11819 if (V1 == V2) 11820 return true; 11821 if (V1->getType() != V2->getType()) 11822 return false; 11823 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 11824 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 11825 if (Opcodes1.size() != Opcodes2.size()) 11826 return false; 11827 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 11828 // Undefs are compatible with any other value. 11829 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) 11830 continue; 11831 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 11832 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 11833 if (I1->getParent() != I2->getParent()) 11834 return false; 11835 InstructionsState S = getSameOpcode({I1, I2}); 11836 if (S.getOpcode()) 11837 continue; 11838 return false; 11839 } 11840 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 11841 continue; 11842 if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID()) 11843 return false; 11844 } 11845 return true; 11846 }; 11847 auto Limit = [&R](Value *V) { 11848 unsigned EltSize = R.getVectorElementSize(V); 11849 return std::max(2U, R.getMaxVecRegSize() / EltSize); 11850 }; 11851 11852 bool HaveVectorizedPhiNodes = false; 11853 do { 11854 // Collect the incoming values from the PHIs. 11855 Incoming.clear(); 11856 for (Instruction &I : *BB) { 11857 PHINode *P = dyn_cast<PHINode>(&I); 11858 if (!P) 11859 break; 11860 11861 // No need to analyze deleted, vectorized and non-vectorizable 11862 // instructions. 11863 if (!VisitedInstrs.count(P) && !R.isDeleted(P) && 11864 isValidElementType(P->getType())) 11865 Incoming.push_back(P); 11866 } 11867 11868 // Find the corresponding non-phi nodes for better matching when trying to 11869 // build the tree. 11870 for (Value *V : Incoming) { 11871 SmallVectorImpl<Value *> &Opcodes = 11872 PHIToOpcodes.try_emplace(V).first->getSecond(); 11873 if (!Opcodes.empty()) 11874 continue; 11875 SmallVector<Value *, 4> Nodes(1, V); 11876 SmallPtrSet<Value *, 4> Visited; 11877 while (!Nodes.empty()) { 11878 auto *PHI = cast<PHINode>(Nodes.pop_back_val()); 11879 if (!Visited.insert(PHI).second) 11880 continue; 11881 for (Value *V : PHI->incoming_values()) { 11882 if (auto *PHI1 = dyn_cast<PHINode>((V))) { 11883 Nodes.push_back(PHI1); 11884 continue; 11885 } 11886 Opcodes.emplace_back(V); 11887 } 11888 } 11889 } 11890 11891 HaveVectorizedPhiNodes = tryToVectorizeSequence<Value>( 11892 Incoming, Limit, PHICompare, AreCompatiblePHIs, 11893 [this, &R](ArrayRef<Value *> Candidates, bool LimitForRegisterSize) { 11894 return tryToVectorizeList(Candidates, R, LimitForRegisterSize); 11895 }, 11896 /*LimitForRegisterSize=*/true); 11897 Changed |= HaveVectorizedPhiNodes; 11898 VisitedInstrs.insert(Incoming.begin(), Incoming.end()); 11899 } while (HaveVectorizedPhiNodes); 11900 11901 VisitedInstrs.clear(); 11902 11903 SmallVector<Instruction *, 8> PostProcessInstructions; 11904 SmallDenseSet<Instruction *, 4> KeyNodes; 11905 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 11906 // Skip instructions with scalable type. The num of elements is unknown at 11907 // compile-time for scalable type. 11908 if (isa<ScalableVectorType>(it->getType())) 11909 continue; 11910 11911 // Skip instructions marked for the deletion. 11912 if (R.isDeleted(&*it)) 11913 continue; 11914 // We may go through BB multiple times so skip the one we have checked. 11915 if (!VisitedInstrs.insert(&*it).second) { 11916 if (it->use_empty() && KeyNodes.contains(&*it) && 11917 vectorizeSimpleInstructions(PostProcessInstructions, BB, R, 11918 it->isTerminator())) { 11919 // We would like to start over since some instructions are deleted 11920 // and the iterator may become invalid value. 11921 Changed = true; 11922 it = BB->begin(); 11923 e = BB->end(); 11924 } 11925 continue; 11926 } 11927 11928 if (isa<DbgInfoIntrinsic>(it)) 11929 continue; 11930 11931 // Try to vectorize reductions that use PHINodes. 11932 if (PHINode *P = dyn_cast<PHINode>(it)) { 11933 // Check that the PHI is a reduction PHI. 11934 if (P->getNumIncomingValues() == 2) { 11935 // Try to match and vectorize a horizontal reduction. 11936 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R, 11937 TTI)) { 11938 Changed = true; 11939 it = BB->begin(); 11940 e = BB->end(); 11941 continue; 11942 } 11943 } 11944 // Try to vectorize the incoming values of the PHI, to catch reductions 11945 // that feed into PHIs. 11946 for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) { 11947 // Skip if the incoming block is the current BB for now. Also, bypass 11948 // unreachable IR for efficiency and to avoid crashing. 11949 // TODO: Collect the skipped incoming values and try to vectorize them 11950 // after processing BB. 11951 if (BB == P->getIncomingBlock(I) || 11952 !DT->isReachableFromEntry(P->getIncomingBlock(I))) 11953 continue; 11954 11955 Changed |= vectorizeRootInstruction(nullptr, P->getIncomingValue(I), 11956 P->getIncomingBlock(I), R, TTI); 11957 } 11958 continue; 11959 } 11960 11961 // Ran into an instruction without users, like terminator, or function call 11962 // with ignored return value, store. Ignore unused instructions (basing on 11963 // instruction type, except for CallInst and InvokeInst). 11964 if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) || 11965 isa<InvokeInst>(it))) { 11966 KeyNodes.insert(&*it); 11967 bool OpsChanged = false; 11968 if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) { 11969 for (auto *V : it->operand_values()) { 11970 // Try to match and vectorize a horizontal reduction. 11971 OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI); 11972 } 11973 } 11974 // Start vectorization of post-process list of instructions from the 11975 // top-tree instructions to try to vectorize as many instructions as 11976 // possible. 11977 OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R, 11978 it->isTerminator()); 11979 if (OpsChanged) { 11980 // We would like to start over since some instructions are deleted 11981 // and the iterator may become invalid value. 11982 Changed = true; 11983 it = BB->begin(); 11984 e = BB->end(); 11985 continue; 11986 } 11987 } 11988 11989 if (isa<InsertElementInst>(it) || isa<CmpInst>(it) || 11990 isa<InsertValueInst>(it)) 11991 PostProcessInstructions.push_back(&*it); 11992 } 11993 11994 return Changed; 11995 } 11996 11997 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 11998 auto Changed = false; 11999 for (auto &Entry : GEPs) { 12000 // If the getelementptr list has fewer than two elements, there's nothing 12001 // to do. 12002 if (Entry.second.size() < 2) 12003 continue; 12004 12005 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 12006 << Entry.second.size() << ".\n"); 12007 12008 // Process the GEP list in chunks suitable for the target's supported 12009 // vector size. If a vector register can't hold 1 element, we are done. We 12010 // are trying to vectorize the index computations, so the maximum number of 12011 // elements is based on the size of the index expression, rather than the 12012 // size of the GEP itself (the target's pointer size). 12013 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 12014 unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin()); 12015 if (MaxVecRegSize < EltSize) 12016 continue; 12017 12018 unsigned MaxElts = MaxVecRegSize / EltSize; 12019 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) { 12020 auto Len = std::min<unsigned>(BE - BI, MaxElts); 12021 ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len); 12022 12023 // Initialize a set a candidate getelementptrs. Note that we use a 12024 // SetVector here to preserve program order. If the index computations 12025 // are vectorizable and begin with loads, we want to minimize the chance 12026 // of having to reorder them later. 12027 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 12028 12029 // Some of the candidates may have already been vectorized after we 12030 // initially collected them. If so, they are marked as deleted, so remove 12031 // them from the set of candidates. 12032 Candidates.remove_if( 12033 [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); }); 12034 12035 // Remove from the set of candidates all pairs of getelementptrs with 12036 // constant differences. Such getelementptrs are likely not good 12037 // candidates for vectorization in a bottom-up phase since one can be 12038 // computed from the other. We also ensure all candidate getelementptr 12039 // indices are unique. 12040 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 12041 auto *GEPI = GEPList[I]; 12042 if (!Candidates.count(GEPI)) 12043 continue; 12044 auto *SCEVI = SE->getSCEV(GEPList[I]); 12045 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 12046 auto *GEPJ = GEPList[J]; 12047 auto *SCEVJ = SE->getSCEV(GEPList[J]); 12048 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 12049 Candidates.remove(GEPI); 12050 Candidates.remove(GEPJ); 12051 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 12052 Candidates.remove(GEPJ); 12053 } 12054 } 12055 } 12056 12057 // We break out of the above computation as soon as we know there are 12058 // fewer than two candidates remaining. 12059 if (Candidates.size() < 2) 12060 continue; 12061 12062 // Add the single, non-constant index of each candidate to the bundle. We 12063 // ensured the indices met these constraints when we originally collected 12064 // the getelementptrs. 12065 SmallVector<Value *, 16> Bundle(Candidates.size()); 12066 auto BundleIndex = 0u; 12067 for (auto *V : Candidates) { 12068 auto *GEP = cast<GetElementPtrInst>(V); 12069 auto *GEPIdx = GEP->idx_begin()->get(); 12070 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 12071 Bundle[BundleIndex++] = GEPIdx; 12072 } 12073 12074 // Try and vectorize the indices. We are currently only interested in 12075 // gather-like cases of the form: 12076 // 12077 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 12078 // 12079 // where the loads of "a", the loads of "b", and the subtractions can be 12080 // performed in parallel. It's likely that detecting this pattern in a 12081 // bottom-up phase will be simpler and less costly than building a 12082 // full-blown top-down phase beginning at the consecutive loads. 12083 Changed |= tryToVectorizeList(Bundle, R); 12084 } 12085 } 12086 return Changed; 12087 } 12088 12089 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 12090 bool Changed = false; 12091 // Sort by type, base pointers and values operand. Value operands must be 12092 // compatible (have the same opcode, same parent), otherwise it is 12093 // definitely not profitable to try to vectorize them. 12094 auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) { 12095 if (V->getPointerOperandType()->getTypeID() < 12096 V2->getPointerOperandType()->getTypeID()) 12097 return true; 12098 if (V->getPointerOperandType()->getTypeID() > 12099 V2->getPointerOperandType()->getTypeID()) 12100 return false; 12101 // UndefValues are compatible with all other values. 12102 if (isa<UndefValue>(V->getValueOperand()) || 12103 isa<UndefValue>(V2->getValueOperand())) 12104 return false; 12105 if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand())) 12106 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 12107 DomTreeNodeBase<llvm::BasicBlock> *NodeI1 = 12108 DT->getNode(I1->getParent()); 12109 DomTreeNodeBase<llvm::BasicBlock> *NodeI2 = 12110 DT->getNode(I2->getParent()); 12111 assert(NodeI1 && "Should only process reachable instructions"); 12112 assert(NodeI2 && "Should only process reachable instructions"); 12113 assert((NodeI1 == NodeI2) == 12114 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 12115 "Different nodes should have different DFS numbers"); 12116 if (NodeI1 != NodeI2) 12117 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 12118 InstructionsState S = getSameOpcode({I1, I2}); 12119 if (S.getOpcode()) 12120 return false; 12121 return I1->getOpcode() < I2->getOpcode(); 12122 } 12123 if (isa<Constant>(V->getValueOperand()) && 12124 isa<Constant>(V2->getValueOperand())) 12125 return false; 12126 return V->getValueOperand()->getValueID() < 12127 V2->getValueOperand()->getValueID(); 12128 }; 12129 12130 auto &&AreCompatibleStores = [](StoreInst *V1, StoreInst *V2) { 12131 if (V1 == V2) 12132 return true; 12133 if (V1->getPointerOperandType() != V2->getPointerOperandType()) 12134 return false; 12135 // Undefs are compatible with any other value. 12136 if (isa<UndefValue>(V1->getValueOperand()) || 12137 isa<UndefValue>(V2->getValueOperand())) 12138 return true; 12139 if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand())) 12140 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 12141 if (I1->getParent() != I2->getParent()) 12142 return false; 12143 InstructionsState S = getSameOpcode({I1, I2}); 12144 return S.getOpcode() > 0; 12145 } 12146 if (isa<Constant>(V1->getValueOperand()) && 12147 isa<Constant>(V2->getValueOperand())) 12148 return true; 12149 return V1->getValueOperand()->getValueID() == 12150 V2->getValueOperand()->getValueID(); 12151 }; 12152 auto Limit = [&R, this](StoreInst *SI) { 12153 unsigned EltSize = DL->getTypeSizeInBits(SI->getValueOperand()->getType()); 12154 return R.getMinVF(EltSize); 12155 }; 12156 12157 // Attempt to sort and vectorize each of the store-groups. 12158 for (auto &Pair : Stores) { 12159 if (Pair.second.size() < 2) 12160 continue; 12161 12162 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 12163 << Pair.second.size() << ".\n"); 12164 12165 if (!isValidElementType(Pair.second.front()->getValueOperand()->getType())) 12166 continue; 12167 12168 Changed |= tryToVectorizeSequence<StoreInst>( 12169 Pair.second, Limit, StoreSorter, AreCompatibleStores, 12170 [this, &R](ArrayRef<StoreInst *> Candidates, bool) { 12171 return vectorizeStores(Candidates, R); 12172 }, 12173 /*LimitForRegisterSize=*/false); 12174 } 12175 return Changed; 12176 } 12177 12178 char SLPVectorizer::ID = 0; 12179 12180 static const char lv_name[] = "SLP Vectorizer"; 12181 12182 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 12183 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 12184 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 12185 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 12186 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 12187 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 12188 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 12189 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 12190 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 12191 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 12192 12193 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); } 12194