1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 19 #include "llvm/ADT/Optional.h" 20 #include "llvm/ADT/PostOrderIterator.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/CodeMetrics.h" 24 #include "llvm/Analysis/GlobalsModRef.h" 25 #include "llvm/Analysis/LoopAccessAnalysis.h" 26 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/Analysis/VectorUtils.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/Dominators.h" 31 #include "llvm/IR/IRBuilder.h" 32 #include "llvm/IR/Instructions.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/Module.h" 35 #include "llvm/IR/NoFolder.h" 36 #include "llvm/IR/PatternMatch.h" 37 #include "llvm/IR/Type.h" 38 #include "llvm/IR/Value.h" 39 #include "llvm/IR/Verifier.h" 40 #include "llvm/Pass.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Support/GraphWriter.h" 44 #include "llvm/Support/KnownBits.h" 45 #include "llvm/Support/raw_ostream.h" 46 #include "llvm/Transforms/Utils/LoopUtils.h" 47 #include "llvm/Transforms/Vectorize.h" 48 #include <algorithm> 49 #include <memory> 50 51 using namespace llvm; 52 using namespace llvm::PatternMatch; 53 using namespace slpvectorizer; 54 55 #define SV_NAME "slp-vectorizer" 56 #define DEBUG_TYPE "SLP" 57 58 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 59 60 static cl::opt<int> 61 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 62 cl::desc("Only vectorize if you gain more than this " 63 "number ")); 64 65 static cl::opt<bool> 66 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 67 cl::desc("Attempt to vectorize horizontal reductions")); 68 69 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 70 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 71 cl::desc( 72 "Attempt to vectorize horizontal reductions feeding into a store")); 73 74 static cl::opt<int> 75 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 76 cl::desc("Attempt to vectorize for this register size in bits")); 77 78 /// Limits the size of scheduling regions in a block. 79 /// It avoid long compile times for _very_ large blocks where vector 80 /// instructions are spread over a wide range. 81 /// This limit is way higher than needed by real-world functions. 82 static cl::opt<int> 83 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 84 cl::desc("Limit the size of the SLP scheduling region per block")); 85 86 static cl::opt<int> MinVectorRegSizeOption( 87 "slp-min-reg-size", cl::init(128), cl::Hidden, 88 cl::desc("Attempt to vectorize for this register size in bits")); 89 90 static cl::opt<unsigned> RecursionMaxDepth( 91 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 92 cl::desc("Limit the recursion depth when building a vectorizable tree")); 93 94 static cl::opt<unsigned> MinTreeSize( 95 "slp-min-tree-size", cl::init(3), cl::Hidden, 96 cl::desc("Only vectorize small trees if they are fully vectorizable")); 97 98 static cl::opt<bool> 99 ViewSLPTree("view-slp-tree", cl::Hidden, 100 cl::desc("Display the SLP trees with Graphviz")); 101 102 // Limit the number of alias checks. The limit is chosen so that 103 // it has no negative effect on the llvm benchmarks. 104 static const unsigned AliasedCheckLimit = 10; 105 106 // Another limit for the alias checks: The maximum distance between load/store 107 // instructions where alias checks are done. 108 // This limit is useful for very large basic blocks. 109 static const unsigned MaxMemDepDistance = 160; 110 111 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 112 /// regions to be handled. 113 static const int MinScheduleRegionSize = 16; 114 115 /// \brief Predicate for the element types that the SLP vectorizer supports. 116 /// 117 /// The most important thing to filter here are types which are invalid in LLVM 118 /// vectors. We also filter target specific types which have absolutely no 119 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 120 /// avoids spending time checking the cost model and realizing that they will 121 /// be inevitably scalarized. 122 static bool isValidElementType(Type *Ty) { 123 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 124 !Ty->isPPC_FP128Ty(); 125 } 126 127 /// \returns true if all of the instructions in \p VL are in the same block or 128 /// false otherwise. 129 static bool allSameBlock(ArrayRef<Value *> VL) { 130 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 131 if (!I0) 132 return false; 133 BasicBlock *BB = I0->getParent(); 134 for (int i = 1, e = VL.size(); i < e; i++) { 135 Instruction *I = dyn_cast<Instruction>(VL[i]); 136 if (!I) 137 return false; 138 139 if (BB != I->getParent()) 140 return false; 141 } 142 return true; 143 } 144 145 /// \returns True if all of the values in \p VL are constants. 146 static bool allConstant(ArrayRef<Value *> VL) { 147 for (Value *i : VL) 148 if (!isa<Constant>(i)) 149 return false; 150 return true; 151 } 152 153 /// \returns True if all of the values in \p VL are identical. 154 static bool isSplat(ArrayRef<Value *> VL) { 155 for (unsigned i = 1, e = VL.size(); i < e; ++i) 156 if (VL[i] != VL[0]) 157 return false; 158 return true; 159 } 160 161 /// Checks if the vector of instructions can be represented as a shuffle, like: 162 /// %x0 = extractelement <4 x i8> %x, i32 0 163 /// %x3 = extractelement <4 x i8> %x, i32 3 164 /// %y1 = extractelement <4 x i8> %y, i32 1 165 /// %y2 = extractelement <4 x i8> %y, i32 2 166 /// %x0x0 = mul i8 %x0, %x0 167 /// %x3x3 = mul i8 %x3, %x3 168 /// %y1y1 = mul i8 %y1, %y1 169 /// %y2y2 = mul i8 %y2, %y2 170 /// %ins1 = insertelement <4 x i8> undef, i8 %x0x0, i32 0 171 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 172 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 173 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 174 /// ret <4 x i8> %ins4 175 /// can be transformed into: 176 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 177 /// i32 6> 178 /// %2 = mul <4 x i8> %1, %1 179 /// ret <4 x i8> %2 180 /// We convert this initially to something like: 181 /// %x0 = extractelement <4 x i8> %x, i32 0 182 /// %x3 = extractelement <4 x i8> %x, i32 3 183 /// %y1 = extractelement <4 x i8> %y, i32 1 184 /// %y2 = extractelement <4 x i8> %y, i32 2 185 /// %1 = insertelement <4 x i8> undef, i8 %x0, i32 0 186 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1 187 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2 188 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3 189 /// %5 = mul <4 x i8> %4, %4 190 /// %6 = extractelement <4 x i8> %5, i32 0 191 /// %ins1 = insertelement <4 x i8> undef, i8 %6, i32 0 192 /// %7 = extractelement <4 x i8> %5, i32 1 193 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1 194 /// %8 = extractelement <4 x i8> %5, i32 2 195 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2 196 /// %9 = extractelement <4 x i8> %5, i32 3 197 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3 198 /// ret <4 x i8> %ins4 199 /// InstCombiner transforms this into a shuffle and vector mul 200 static Optional<TargetTransformInfo::ShuffleKind> 201 isShuffle(ArrayRef<Value *> VL) { 202 auto *EI0 = cast<ExtractElementInst>(VL[0]); 203 unsigned Size = EI0->getVectorOperandType()->getVectorNumElements(); 204 Value *Vec1 = nullptr; 205 Value *Vec2 = nullptr; 206 enum ShuffleMode {Unknown, FirstAlternate, SecondAlternate, Permute}; 207 ShuffleMode CommonShuffleMode = Unknown; 208 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 209 auto *EI = cast<ExtractElementInst>(VL[I]); 210 auto *Vec = EI->getVectorOperand(); 211 // All vector operands must have the same number of vector elements. 212 if (Vec->getType()->getVectorNumElements() != Size) 213 return None; 214 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 215 if (!Idx) 216 return None; 217 // Undefined behavior if Idx is negative or >= Size. 218 if (Idx->getValue().uge(Size)) 219 continue; 220 unsigned IntIdx = Idx->getValue().getZExtValue(); 221 // We can extractelement from undef vector. 222 if (isa<UndefValue>(Vec)) 223 continue; 224 // For correct shuffling we have to have at most 2 different vector operands 225 // in all extractelement instructions. 226 if (Vec1 && Vec2 && Vec != Vec1 && Vec != Vec2) 227 return None; 228 if (CommonShuffleMode == Permute) 229 continue; 230 // If the extract index is not the same as the operation number, it is a 231 // permutation. 232 if (IntIdx != I) { 233 CommonShuffleMode = Permute; 234 continue; 235 } 236 // Check the shuffle mode for the current operation. 237 if (!Vec1) 238 Vec1 = Vec; 239 else if (Vec != Vec1) 240 Vec2 = Vec; 241 // Example: shufflevector A, B, <0,5,2,7> 242 // I is odd and IntIdx for A == I - FirstAlternate shuffle. 243 // I is even and IntIdx for B == I - FirstAlternate shuffle. 244 // Example: shufflevector A, B, <4,1,6,3> 245 // I is even and IntIdx for A == I - SecondAlternate shuffle. 246 // I is odd and IntIdx for B == I - SecondAlternate shuffle. 247 const bool IIsEven = I & 1; 248 const bool CurrVecIsA = Vec == Vec1; 249 const bool IIsOdd = !IIsEven; 250 const bool CurrVecIsB = !CurrVecIsA; 251 ShuffleMode CurrentShuffleMode = 252 ((IIsOdd && CurrVecIsA) || (IIsEven && CurrVecIsB)) ? FirstAlternate 253 : SecondAlternate; 254 // Common mode is not set or the same as the shuffle mode of the current 255 // operation - alternate. 256 if (CommonShuffleMode == Unknown) 257 CommonShuffleMode = CurrentShuffleMode; 258 // Common shuffle mode is not the same as the shuffle mode of the current 259 // operation - permutation. 260 if (CommonShuffleMode != CurrentShuffleMode) 261 CommonShuffleMode = Permute; 262 } 263 // If we're not crossing lanes in different vectors, consider it as blending. 264 if ((CommonShuffleMode == FirstAlternate || 265 CommonShuffleMode == SecondAlternate) && 266 Vec2) 267 return TargetTransformInfo::SK_Alternate; 268 // If Vec2 was never used, we have a permutation of a single vector, otherwise 269 // we have permutation of 2 vectors. 270 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 271 : TargetTransformInfo::SK_PermuteSingleSrc; 272 } 273 274 ///\returns Opcode that can be clubbed with \p Op to create an alternate 275 /// sequence which can later be merged as a ShuffleVector instruction. 276 static unsigned getAltOpcode(unsigned Op) { 277 switch (Op) { 278 case Instruction::FAdd: 279 return Instruction::FSub; 280 case Instruction::FSub: 281 return Instruction::FAdd; 282 case Instruction::Add: 283 return Instruction::Sub; 284 case Instruction::Sub: 285 return Instruction::Add; 286 default: 287 return 0; 288 } 289 } 290 291 /// true if the \p Value is odd, false otherwise. 292 static bool isOdd(unsigned Value) { 293 return Value & 1; 294 } 295 296 static bool sameOpcodeOrAlt(unsigned Opcode, unsigned AltOpcode, 297 unsigned CheckedOpcode) { 298 return Opcode == CheckedOpcode || AltOpcode == CheckedOpcode; 299 } 300 301 /// Chooses the correct key for scheduling data. If \p Op has the same (or 302 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 303 /// OpValue. 304 static Value *isOneOf(Value *OpValue, Value *Op) { 305 auto *I = dyn_cast<Instruction>(Op); 306 if (!I) 307 return OpValue; 308 auto *OpInst = cast<Instruction>(OpValue); 309 unsigned OpInstOpcode = OpInst->getOpcode(); 310 unsigned IOpcode = I->getOpcode(); 311 if (sameOpcodeOrAlt(OpInstOpcode, getAltOpcode(OpInstOpcode), IOpcode)) 312 return Op; 313 return OpValue; 314 } 315 316 ///\returns bool representing if Opcode \p Op can be part 317 /// of an alternate sequence which can later be merged as 318 /// a ShuffleVector instruction. 319 static bool canCombineAsAltInst(unsigned Op) { 320 return Op == Instruction::FAdd || Op == Instruction::FSub || 321 Op == Instruction::Sub || Op == Instruction::Add; 322 } 323 324 /// \returns ShuffleVector instruction if instructions in \p VL have 325 /// alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence. 326 /// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...) 327 static unsigned isAltInst(ArrayRef<Value *> VL) { 328 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 329 unsigned Opcode = I0->getOpcode(); 330 unsigned AltOpcode = getAltOpcode(Opcode); 331 for (int i = 1, e = VL.size(); i < e; i++) { 332 Instruction *I = dyn_cast<Instruction>(VL[i]); 333 if (!I || I->getOpcode() != (isOdd(i) ? AltOpcode : Opcode)) 334 return 0; 335 } 336 return Instruction::ShuffleVector; 337 } 338 339 /// \returns The opcode if all of the Instructions in \p VL have the same 340 /// opcode, or zero. 341 static unsigned getSameOpcode(ArrayRef<Value *> VL) { 342 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 343 if (!I0) 344 return 0; 345 unsigned Opcode = I0->getOpcode(); 346 for (int i = 1, e = VL.size(); i < e; i++) { 347 Instruction *I = dyn_cast<Instruction>(VL[i]); 348 if (!I || Opcode != I->getOpcode()) { 349 if (canCombineAsAltInst(Opcode) && i == 1) 350 return isAltInst(VL); 351 return 0; 352 } 353 } 354 return Opcode; 355 } 356 357 /// \returns true if all of the values in \p VL have the same type or false 358 /// otherwise. 359 static bool allSameType(ArrayRef<Value *> VL) { 360 Type *Ty = VL[0]->getType(); 361 for (int i = 1, e = VL.size(); i < e; i++) 362 if (VL[i]->getType() != Ty) 363 return false; 364 365 return true; 366 } 367 368 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 369 static bool matchExtractIndex(Instruction *E, unsigned Idx, unsigned Opcode) { 370 assert(Opcode == Instruction::ExtractElement || 371 Opcode == Instruction::ExtractValue); 372 if (Opcode == Instruction::ExtractElement) { 373 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 374 return CI && CI->getZExtValue() == Idx; 375 } else { 376 ExtractValueInst *EI = cast<ExtractValueInst>(E); 377 return EI->getNumIndices() == 1 && *EI->idx_begin() == Idx; 378 } 379 } 380 381 /// \returns True if in-tree use also needs extract. This refers to 382 /// possible scalar operand in vectorized instruction. 383 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 384 TargetLibraryInfo *TLI) { 385 386 unsigned Opcode = UserInst->getOpcode(); 387 switch (Opcode) { 388 case Instruction::Load: { 389 LoadInst *LI = cast<LoadInst>(UserInst); 390 return (LI->getPointerOperand() == Scalar); 391 } 392 case Instruction::Store: { 393 StoreInst *SI = cast<StoreInst>(UserInst); 394 return (SI->getPointerOperand() == Scalar); 395 } 396 case Instruction::Call: { 397 CallInst *CI = cast<CallInst>(UserInst); 398 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 399 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 400 return (CI->getArgOperand(1) == Scalar); 401 } 402 LLVM_FALLTHROUGH; 403 } 404 default: 405 return false; 406 } 407 } 408 409 /// \returns the AA location that is being access by the instruction. 410 static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) { 411 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 412 return MemoryLocation::get(SI); 413 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 414 return MemoryLocation::get(LI); 415 return MemoryLocation(); 416 } 417 418 /// \returns True if the instruction is not a volatile or atomic load/store. 419 static bool isSimple(Instruction *I) { 420 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 421 return LI->isSimple(); 422 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 423 return SI->isSimple(); 424 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 425 return !MI->isVolatile(); 426 return true; 427 } 428 429 namespace llvm { 430 namespace slpvectorizer { 431 /// Bottom Up SLP Vectorizer. 432 class BoUpSLP { 433 public: 434 typedef SmallVector<Value *, 8> ValueList; 435 typedef SmallVector<Instruction *, 16> InstrList; 436 typedef SmallPtrSet<Value *, 16> ValueSet; 437 typedef SmallVector<StoreInst *, 8> StoreList; 438 typedef MapVector<Value *, SmallVector<Instruction *, 2>> 439 ExtraValueToDebugLocsMap; 440 441 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 442 TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li, 443 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 444 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 445 : NumLoadsWantToKeepOrder(0), NumLoadsWantToChangeOrder(0), F(Func), 446 SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), DB(DB), 447 DL(DL), ORE(ORE), Builder(Se->getContext()) { 448 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 449 // Use the vector register size specified by the target unless overridden 450 // by a command-line option. 451 // TODO: It would be better to limit the vectorization factor based on 452 // data type rather than just register size. For example, x86 AVX has 453 // 256-bit registers, but it does not support integer operations 454 // at that width (that requires AVX2). 455 if (MaxVectorRegSizeOption.getNumOccurrences()) 456 MaxVecRegSize = MaxVectorRegSizeOption; 457 else 458 MaxVecRegSize = TTI->getRegisterBitWidth(true); 459 460 if (MinVectorRegSizeOption.getNumOccurrences()) 461 MinVecRegSize = MinVectorRegSizeOption; 462 else 463 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 464 } 465 466 /// \brief Vectorize the tree that starts with the elements in \p VL. 467 /// Returns the vectorized root. 468 Value *vectorizeTree(); 469 /// Vectorize the tree but with the list of externally used values \p 470 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 471 /// generated extractvalue instructions. 472 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues); 473 474 /// \returns the cost incurred by unwanted spills and fills, caused by 475 /// holding live values over call sites. 476 int getSpillCost(); 477 478 /// \returns the vectorization cost of the subtree that starts at \p VL. 479 /// A negative number means that this is profitable. 480 int getTreeCost(); 481 482 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 483 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 484 void buildTree(ArrayRef<Value *> Roots, 485 ArrayRef<Value *> UserIgnoreLst = None); 486 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 487 /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking 488 /// into account (anf updating it, if required) list of externally used 489 /// values stored in \p ExternallyUsedValues. 490 void buildTree(ArrayRef<Value *> Roots, 491 ExtraValueToDebugLocsMap &ExternallyUsedValues, 492 ArrayRef<Value *> UserIgnoreLst = None); 493 494 /// Clear the internal data structures that are created by 'buildTree'. 495 void deleteTree() { 496 VectorizableTree.clear(); 497 ScalarToTreeEntry.clear(); 498 MustGather.clear(); 499 ExternalUses.clear(); 500 NumLoadsWantToKeepOrder = 0; 501 NumLoadsWantToChangeOrder = 0; 502 for (auto &Iter : BlocksSchedules) { 503 BlockScheduling *BS = Iter.second.get(); 504 BS->clear(); 505 } 506 MinBWs.clear(); 507 } 508 509 unsigned getTreeSize() const { return VectorizableTree.size(); } 510 511 /// \brief Perform LICM and CSE on the newly generated gather sequences. 512 void optimizeGatherSequence(); 513 514 /// \returns true if it is beneficial to reverse the vector order. 515 bool shouldReorder() const { 516 return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder; 517 } 518 519 /// \return The vector element size in bits to use when vectorizing the 520 /// expression tree ending at \p V. If V is a store, the size is the width of 521 /// the stored value. Otherwise, the size is the width of the largest loaded 522 /// value reaching V. This method is used by the vectorizer to calculate 523 /// vectorization factors. 524 unsigned getVectorElementSize(Value *V); 525 526 /// Compute the minimum type sizes required to represent the entries in a 527 /// vectorizable tree. 528 void computeMinimumValueSizes(); 529 530 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 531 unsigned getMaxVecRegSize() const { 532 return MaxVecRegSize; 533 } 534 535 // \returns minimum vector register size as set by cl::opt. 536 unsigned getMinVecRegSize() const { 537 return MinVecRegSize; 538 } 539 540 /// \brief Check if ArrayType or StructType is isomorphic to some VectorType. 541 /// 542 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 543 unsigned canMapToVector(Type *T, const DataLayout &DL) const; 544 545 /// \returns True if the VectorizableTree is both tiny and not fully 546 /// vectorizable. We do not vectorize such trees. 547 bool isTreeTinyAndNotFullyVectorizable(); 548 549 OptimizationRemarkEmitter *getORE() { return ORE; } 550 551 private: 552 struct TreeEntry; 553 554 /// Checks if all users of \p I are the part of the vectorization tree. 555 bool areAllUsersVectorized(Instruction *I) const; 556 557 /// \returns the cost of the vectorizable entry. 558 int getEntryCost(TreeEntry *E); 559 560 /// This is the recursive part of buildTree. 561 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, int); 562 563 /// \returns True if the ExtractElement/ExtractValue instructions in VL can 564 /// be vectorized to use the original vector (or aggregate "bitcast" to a vector). 565 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue) const; 566 567 /// Vectorize a single entry in the tree. 568 Value *vectorizeTree(TreeEntry *E); 569 570 /// Vectorize a single entry in the tree, starting in \p VL. 571 Value *vectorizeTree(ArrayRef<Value *> VL); 572 573 /// \returns the pointer to the vectorized value if \p VL is already 574 /// vectorized, or NULL. They may happen in cycles. 575 Value *alreadyVectorized(ArrayRef<Value *> VL, Value *OpValue) const; 576 577 /// \returns the scalarization cost for this type. Scalarization in this 578 /// context means the creation of vectors from a group of scalars. 579 int getGatherCost(Type *Ty); 580 581 /// \returns the scalarization cost for this list of values. Assuming that 582 /// this subtree gets vectorized, we may need to extract the values from the 583 /// roots. This method calculates the cost of extracting the values. 584 int getGatherCost(ArrayRef<Value *> VL); 585 586 /// \brief Set the Builder insert point to one after the last instruction in 587 /// the bundle 588 void setInsertPointAfterBundle(ArrayRef<Value *> VL, Value *OpValue); 589 590 /// \returns a vector from a collection of scalars in \p VL. 591 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 592 593 /// \returns whether the VectorizableTree is fully vectorizable and will 594 /// be beneficial even the tree height is tiny. 595 bool isFullyVectorizableTinyTree(); 596 597 /// \reorder commutative operands in alt shuffle if they result in 598 /// vectorized code. 599 void reorderAltShuffleOperands(unsigned Opcode, ArrayRef<Value *> VL, 600 SmallVectorImpl<Value *> &Left, 601 SmallVectorImpl<Value *> &Right); 602 /// \reorder commutative operands to get better probability of 603 /// generating vectorized code. 604 void reorderInputsAccordingToOpcode(unsigned Opcode, ArrayRef<Value *> VL, 605 SmallVectorImpl<Value *> &Left, 606 SmallVectorImpl<Value *> &Right); 607 struct TreeEntry { 608 TreeEntry(std::vector<TreeEntry> &Container) 609 : Scalars(), VectorizedValue(nullptr), NeedToGather(0), 610 Container(Container) {} 611 612 /// \returns true if the scalars in VL are equal to this entry. 613 bool isSame(ArrayRef<Value *> VL) const { 614 assert(VL.size() == Scalars.size() && "Invalid size"); 615 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 616 } 617 618 /// A vector of scalars. 619 ValueList Scalars; 620 621 /// The Scalars are vectorized into this value. It is initialized to Null. 622 Value *VectorizedValue; 623 624 /// Do we need to gather this sequence ? 625 bool NeedToGather; 626 627 /// Points back to the VectorizableTree. 628 /// 629 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 630 /// to be a pointer and needs to be able to initialize the child iterator. 631 /// Thus we need a reference back to the container to translate the indices 632 /// to entries. 633 std::vector<TreeEntry> &Container; 634 635 /// The TreeEntry index containing the user of this entry. We can actually 636 /// have multiple users so the data structure is not truly a tree. 637 SmallVector<int, 1> UserTreeIndices; 638 }; 639 640 /// Create a new VectorizableTree entry. 641 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized, 642 int &UserTreeIdx) { 643 VectorizableTree.emplace_back(VectorizableTree); 644 int idx = VectorizableTree.size() - 1; 645 TreeEntry *Last = &VectorizableTree[idx]; 646 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 647 Last->NeedToGather = !Vectorized; 648 if (Vectorized) { 649 for (int i = 0, e = VL.size(); i != e; ++i) { 650 assert(!getTreeEntry(VL[i]) && "Scalar already in tree!"); 651 ScalarToTreeEntry[VL[i]] = idx; 652 } 653 } else { 654 MustGather.insert(VL.begin(), VL.end()); 655 } 656 657 if (UserTreeIdx >= 0) 658 Last->UserTreeIndices.push_back(UserTreeIdx); 659 UserTreeIdx = idx; 660 return Last; 661 } 662 663 /// -- Vectorization State -- 664 /// Holds all of the tree entries. 665 std::vector<TreeEntry> VectorizableTree; 666 667 TreeEntry *getTreeEntry(Value *V) { 668 auto I = ScalarToTreeEntry.find(V); 669 if (I != ScalarToTreeEntry.end()) 670 return &VectorizableTree[I->second]; 671 return nullptr; 672 } 673 674 const TreeEntry *getTreeEntry(Value *V) const { 675 auto I = ScalarToTreeEntry.find(V); 676 if (I != ScalarToTreeEntry.end()) 677 return &VectorizableTree[I->second]; 678 return nullptr; 679 } 680 681 /// Maps a specific scalar to its tree entry. 682 SmallDenseMap<Value*, int> ScalarToTreeEntry; 683 684 /// A list of scalars that we found that we need to keep as scalars. 685 ValueSet MustGather; 686 687 /// This POD struct describes one external user in the vectorized tree. 688 struct ExternalUser { 689 ExternalUser (Value *S, llvm::User *U, int L) : 690 Scalar(S), User(U), Lane(L){} 691 // Which scalar in our function. 692 Value *Scalar; 693 // Which user that uses the scalar. 694 llvm::User *User; 695 // Which lane does the scalar belong to. 696 int Lane; 697 }; 698 typedef SmallVector<ExternalUser, 16> UserList; 699 700 /// Checks if two instructions may access the same memory. 701 /// 702 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 703 /// is invariant in the calling loop. 704 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 705 Instruction *Inst2) { 706 707 // First check if the result is already in the cache. 708 AliasCacheKey key = std::make_pair(Inst1, Inst2); 709 Optional<bool> &result = AliasCache[key]; 710 if (result.hasValue()) { 711 return result.getValue(); 712 } 713 MemoryLocation Loc2 = getLocation(Inst2, AA); 714 bool aliased = true; 715 if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) { 716 // Do the alias check. 717 aliased = AA->alias(Loc1, Loc2); 718 } 719 // Store the result in the cache. 720 result = aliased; 721 return aliased; 722 } 723 724 typedef std::pair<Instruction *, Instruction *> AliasCacheKey; 725 726 /// Cache for alias results. 727 /// TODO: consider moving this to the AliasAnalysis itself. 728 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 729 730 /// Removes an instruction from its block and eventually deletes it. 731 /// It's like Instruction::eraseFromParent() except that the actual deletion 732 /// is delayed until BoUpSLP is destructed. 733 /// This is required to ensure that there are no incorrect collisions in the 734 /// AliasCache, which can happen if a new instruction is allocated at the 735 /// same address as a previously deleted instruction. 736 void eraseInstruction(Instruction *I) { 737 I->removeFromParent(); 738 I->dropAllReferences(); 739 DeletedInstructions.emplace_back(I); 740 } 741 742 /// Temporary store for deleted instructions. Instructions will be deleted 743 /// eventually when the BoUpSLP is destructed. 744 SmallVector<unique_value, 8> DeletedInstructions; 745 746 /// A list of values that need to extracted out of the tree. 747 /// This list holds pairs of (Internal Scalar : External User). External User 748 /// can be nullptr, it means that this Internal Scalar will be used later, 749 /// after vectorization. 750 UserList ExternalUses; 751 752 /// Values used only by @llvm.assume calls. 753 SmallPtrSet<const Value *, 32> EphValues; 754 755 /// Holds all of the instructions that we gathered. 756 SetVector<Instruction *> GatherSeq; 757 /// A list of blocks that we are going to CSE. 758 SetVector<BasicBlock *> CSEBlocks; 759 760 /// Contains all scheduling relevant data for an instruction. 761 /// A ScheduleData either represents a single instruction or a member of an 762 /// instruction bundle (= a group of instructions which is combined into a 763 /// vector instruction). 764 struct ScheduleData { 765 766 // The initial value for the dependency counters. It means that the 767 // dependencies are not calculated yet. 768 enum { InvalidDeps = -1 }; 769 770 ScheduleData() 771 : Inst(nullptr), FirstInBundle(nullptr), NextInBundle(nullptr), 772 NextLoadStore(nullptr), SchedulingRegionID(0), SchedulingPriority(0), 773 Dependencies(InvalidDeps), UnscheduledDeps(InvalidDeps), 774 UnscheduledDepsInBundle(InvalidDeps), IsScheduled(false), 775 OpValue(nullptr) {} 776 777 void init(int BlockSchedulingRegionID, Value *OpVal) { 778 FirstInBundle = this; 779 NextInBundle = nullptr; 780 NextLoadStore = nullptr; 781 IsScheduled = false; 782 SchedulingRegionID = BlockSchedulingRegionID; 783 UnscheduledDepsInBundle = UnscheduledDeps; 784 clearDependencies(); 785 OpValue = OpVal; 786 } 787 788 /// Returns true if the dependency information has been calculated. 789 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 790 791 /// Returns true for single instructions and for bundle representatives 792 /// (= the head of a bundle). 793 bool isSchedulingEntity() const { return FirstInBundle == this; } 794 795 /// Returns true if it represents an instruction bundle and not only a 796 /// single instruction. 797 bool isPartOfBundle() const { 798 return NextInBundle != nullptr || FirstInBundle != this; 799 } 800 801 /// Returns true if it is ready for scheduling, i.e. it has no more 802 /// unscheduled depending instructions/bundles. 803 bool isReady() const { 804 assert(isSchedulingEntity() && 805 "can't consider non-scheduling entity for ready list"); 806 return UnscheduledDepsInBundle == 0 && !IsScheduled; 807 } 808 809 /// Modifies the number of unscheduled dependencies, also updating it for 810 /// the whole bundle. 811 int incrementUnscheduledDeps(int Incr) { 812 UnscheduledDeps += Incr; 813 return FirstInBundle->UnscheduledDepsInBundle += Incr; 814 } 815 816 /// Sets the number of unscheduled dependencies to the number of 817 /// dependencies. 818 void resetUnscheduledDeps() { 819 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 820 } 821 822 /// Clears all dependency information. 823 void clearDependencies() { 824 Dependencies = InvalidDeps; 825 resetUnscheduledDeps(); 826 MemoryDependencies.clear(); 827 } 828 829 void dump(raw_ostream &os) const { 830 if (!isSchedulingEntity()) { 831 os << "/ " << *Inst; 832 } else if (NextInBundle) { 833 os << '[' << *Inst; 834 ScheduleData *SD = NextInBundle; 835 while (SD) { 836 os << ';' << *SD->Inst; 837 SD = SD->NextInBundle; 838 } 839 os << ']'; 840 } else { 841 os << *Inst; 842 } 843 } 844 845 Instruction *Inst; 846 847 /// Points to the head in an instruction bundle (and always to this for 848 /// single instructions). 849 ScheduleData *FirstInBundle; 850 851 /// Single linked list of all instructions in a bundle. Null if it is a 852 /// single instruction. 853 ScheduleData *NextInBundle; 854 855 /// Single linked list of all memory instructions (e.g. load, store, call) 856 /// in the block - until the end of the scheduling region. 857 ScheduleData *NextLoadStore; 858 859 /// The dependent memory instructions. 860 /// This list is derived on demand in calculateDependencies(). 861 SmallVector<ScheduleData *, 4> MemoryDependencies; 862 863 /// This ScheduleData is in the current scheduling region if this matches 864 /// the current SchedulingRegionID of BlockScheduling. 865 int SchedulingRegionID; 866 867 /// Used for getting a "good" final ordering of instructions. 868 int SchedulingPriority; 869 870 /// The number of dependencies. Constitutes of the number of users of the 871 /// instruction plus the number of dependent memory instructions (if any). 872 /// This value is calculated on demand. 873 /// If InvalidDeps, the number of dependencies is not calculated yet. 874 /// 875 int Dependencies; 876 877 /// The number of dependencies minus the number of dependencies of scheduled 878 /// instructions. As soon as this is zero, the instruction/bundle gets ready 879 /// for scheduling. 880 /// Note that this is negative as long as Dependencies is not calculated. 881 int UnscheduledDeps; 882 883 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 884 /// single instructions. 885 int UnscheduledDepsInBundle; 886 887 /// True if this instruction is scheduled (or considered as scheduled in the 888 /// dry-run). 889 bool IsScheduled; 890 891 /// Opcode of the current instruction in the schedule data. 892 Value *OpValue; 893 }; 894 895 #ifndef NDEBUG 896 friend inline raw_ostream &operator<<(raw_ostream &os, 897 const BoUpSLP::ScheduleData &SD) { 898 SD.dump(os); 899 return os; 900 } 901 #endif 902 friend struct GraphTraits<BoUpSLP *>; 903 friend struct DOTGraphTraits<BoUpSLP *>; 904 905 /// Contains all scheduling data for a basic block. 906 /// 907 struct BlockScheduling { 908 909 BlockScheduling(BasicBlock *BB) 910 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize), 911 ScheduleStart(nullptr), ScheduleEnd(nullptr), 912 FirstLoadStoreInRegion(nullptr), LastLoadStoreInRegion(nullptr), 913 ScheduleRegionSize(0), 914 ScheduleRegionSizeLimit(ScheduleRegionSizeBudget), 915 // Make sure that the initial SchedulingRegionID is greater than the 916 // initial SchedulingRegionID in ScheduleData (which is 0). 917 SchedulingRegionID(1) {} 918 919 void clear() { 920 ReadyInsts.clear(); 921 ScheduleStart = nullptr; 922 ScheduleEnd = nullptr; 923 FirstLoadStoreInRegion = nullptr; 924 LastLoadStoreInRegion = nullptr; 925 926 // Reduce the maximum schedule region size by the size of the 927 // previous scheduling run. 928 ScheduleRegionSizeLimit -= ScheduleRegionSize; 929 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 930 ScheduleRegionSizeLimit = MinScheduleRegionSize; 931 ScheduleRegionSize = 0; 932 933 // Make a new scheduling region, i.e. all existing ScheduleData is not 934 // in the new region yet. 935 ++SchedulingRegionID; 936 } 937 938 ScheduleData *getScheduleData(Value *V) { 939 ScheduleData *SD = ScheduleDataMap[V]; 940 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 941 return SD; 942 return nullptr; 943 } 944 945 ScheduleData *getScheduleData(Value *V, Value *Key) { 946 if (V == Key) 947 return getScheduleData(V); 948 auto I = ExtraScheduleDataMap.find(V); 949 if (I != ExtraScheduleDataMap.end()) { 950 ScheduleData *SD = I->second[Key]; 951 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 952 return SD; 953 } 954 return nullptr; 955 } 956 957 bool isInSchedulingRegion(ScheduleData *SD) { 958 return SD->SchedulingRegionID == SchedulingRegionID; 959 } 960 961 /// Marks an instruction as scheduled and puts all dependent ready 962 /// instructions into the ready-list. 963 template <typename ReadyListType> 964 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 965 SD->IsScheduled = true; 966 DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 967 968 ScheduleData *BundleMember = SD; 969 while (BundleMember) { 970 if (BundleMember->Inst != BundleMember->OpValue) { 971 BundleMember = BundleMember->NextInBundle; 972 continue; 973 } 974 // Handle the def-use chain dependencies. 975 for (Use &U : BundleMember->Inst->operands()) { 976 auto *I = dyn_cast<Instruction>(U.get()); 977 if (!I) 978 continue; 979 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 980 if (OpDef && OpDef->hasValidDependencies() && 981 OpDef->incrementUnscheduledDeps(-1) == 0) { 982 // There are no more unscheduled dependencies after 983 // decrementing, so we can put the dependent instruction 984 // into the ready list. 985 ScheduleData *DepBundle = OpDef->FirstInBundle; 986 assert(!DepBundle->IsScheduled && 987 "already scheduled bundle gets ready"); 988 ReadyList.insert(DepBundle); 989 DEBUG(dbgs() 990 << "SLP: gets ready (def): " << *DepBundle << "\n"); 991 } 992 }); 993 } 994 // Handle the memory dependencies. 995 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 996 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 997 // There are no more unscheduled dependencies after decrementing, 998 // so we can put the dependent instruction into the ready list. 999 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 1000 assert(!DepBundle->IsScheduled && 1001 "already scheduled bundle gets ready"); 1002 ReadyList.insert(DepBundle); 1003 DEBUG(dbgs() << "SLP: gets ready (mem): " << *DepBundle 1004 << "\n"); 1005 } 1006 } 1007 BundleMember = BundleMember->NextInBundle; 1008 } 1009 } 1010 1011 void doForAllOpcodes(Value *V, 1012 function_ref<void(ScheduleData *SD)> Action) { 1013 if (ScheduleData *SD = getScheduleData(V)) 1014 Action(SD); 1015 auto I = ExtraScheduleDataMap.find(V); 1016 if (I != ExtraScheduleDataMap.end()) 1017 for (auto &P : I->second) 1018 if (P.second->SchedulingRegionID == SchedulingRegionID) 1019 Action(P.second); 1020 } 1021 1022 /// Put all instructions into the ReadyList which are ready for scheduling. 1023 template <typename ReadyListType> 1024 void initialFillReadyList(ReadyListType &ReadyList) { 1025 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 1026 doForAllOpcodes(I, [&](ScheduleData *SD) { 1027 if (SD->isSchedulingEntity() && SD->isReady()) { 1028 ReadyList.insert(SD); 1029 DEBUG(dbgs() << "SLP: initially in ready list: " << *I << "\n"); 1030 } 1031 }); 1032 } 1033 } 1034 1035 /// Checks if a bundle of instructions can be scheduled, i.e. has no 1036 /// cyclic dependencies. This is only a dry-run, no instructions are 1037 /// actually moved at this stage. 1038 bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, Value *OpValue); 1039 1040 /// Un-bundles a group of instructions. 1041 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 1042 1043 /// Allocates schedule data chunk. 1044 ScheduleData *allocateScheduleDataChunks(); 1045 1046 /// Extends the scheduling region so that V is inside the region. 1047 /// \returns true if the region size is within the limit. 1048 bool extendSchedulingRegion(Value *V, Value *OpValue); 1049 1050 /// Initialize the ScheduleData structures for new instructions in the 1051 /// scheduling region. 1052 void initScheduleData(Instruction *FromI, Instruction *ToI, 1053 ScheduleData *PrevLoadStore, 1054 ScheduleData *NextLoadStore); 1055 1056 /// Updates the dependency information of a bundle and of all instructions/ 1057 /// bundles which depend on the original bundle. 1058 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 1059 BoUpSLP *SLP); 1060 1061 /// Sets all instruction in the scheduling region to un-scheduled. 1062 void resetSchedule(); 1063 1064 BasicBlock *BB; 1065 1066 /// Simple memory allocation for ScheduleData. 1067 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 1068 1069 /// The size of a ScheduleData array in ScheduleDataChunks. 1070 int ChunkSize; 1071 1072 /// The allocator position in the current chunk, which is the last entry 1073 /// of ScheduleDataChunks. 1074 int ChunkPos; 1075 1076 /// Attaches ScheduleData to Instruction. 1077 /// Note that the mapping survives during all vectorization iterations, i.e. 1078 /// ScheduleData structures are recycled. 1079 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 1080 1081 /// Attaches ScheduleData to Instruction with the leading key. 1082 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 1083 ExtraScheduleDataMap; 1084 1085 struct ReadyList : SmallVector<ScheduleData *, 8> { 1086 void insert(ScheduleData *SD) { push_back(SD); } 1087 }; 1088 1089 /// The ready-list for scheduling (only used for the dry-run). 1090 ReadyList ReadyInsts; 1091 1092 /// The first instruction of the scheduling region. 1093 Instruction *ScheduleStart; 1094 1095 /// The first instruction _after_ the scheduling region. 1096 Instruction *ScheduleEnd; 1097 1098 /// The first memory accessing instruction in the scheduling region 1099 /// (can be null). 1100 ScheduleData *FirstLoadStoreInRegion; 1101 1102 /// The last memory accessing instruction in the scheduling region 1103 /// (can be null). 1104 ScheduleData *LastLoadStoreInRegion; 1105 1106 /// The current size of the scheduling region. 1107 int ScheduleRegionSize; 1108 1109 /// The maximum size allowed for the scheduling region. 1110 int ScheduleRegionSizeLimit; 1111 1112 /// The ID of the scheduling region. For a new vectorization iteration this 1113 /// is incremented which "removes" all ScheduleData from the region. 1114 int SchedulingRegionID; 1115 }; 1116 1117 /// Attaches the BlockScheduling structures to basic blocks. 1118 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 1119 1120 /// Performs the "real" scheduling. Done before vectorization is actually 1121 /// performed in a basic block. 1122 void scheduleBlock(BlockScheduling *BS); 1123 1124 /// List of users to ignore during scheduling and that don't need extracting. 1125 ArrayRef<Value *> UserIgnoreList; 1126 1127 // Number of load bundles that contain consecutive loads. 1128 int NumLoadsWantToKeepOrder; 1129 1130 // Number of load bundles that contain consecutive loads in reversed order. 1131 int NumLoadsWantToChangeOrder; 1132 1133 // Analysis and block reference. 1134 Function *F; 1135 ScalarEvolution *SE; 1136 TargetTransformInfo *TTI; 1137 TargetLibraryInfo *TLI; 1138 AliasAnalysis *AA; 1139 LoopInfo *LI; 1140 DominatorTree *DT; 1141 AssumptionCache *AC; 1142 DemandedBits *DB; 1143 const DataLayout *DL; 1144 OptimizationRemarkEmitter *ORE; 1145 1146 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 1147 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 1148 /// Instruction builder to construct the vectorized tree. 1149 IRBuilder<> Builder; 1150 1151 /// A map of scalar integer values to the smallest bit width with which they 1152 /// can legally be represented. The values map to (width, signed) pairs, 1153 /// where "width" indicates the minimum bit width and "signed" is True if the 1154 /// value must be signed-extended, rather than zero-extended, back to its 1155 /// original width. 1156 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs; 1157 }; 1158 } // end namespace slpvectorizer 1159 1160 template <> struct GraphTraits<BoUpSLP *> { 1161 typedef BoUpSLP::TreeEntry TreeEntry; 1162 1163 /// NodeRef has to be a pointer per the GraphWriter. 1164 typedef TreeEntry *NodeRef; 1165 1166 /// \brief Add the VectorizableTree to the index iterator to be able to return 1167 /// TreeEntry pointers. 1168 struct ChildIteratorType 1169 : public iterator_adaptor_base<ChildIteratorType, 1170 SmallVector<int, 1>::iterator> { 1171 1172 std::vector<TreeEntry> &VectorizableTree; 1173 1174 ChildIteratorType(SmallVector<int, 1>::iterator W, 1175 std::vector<TreeEntry> &VT) 1176 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 1177 1178 NodeRef operator*() { return &VectorizableTree[*I]; } 1179 }; 1180 1181 static NodeRef getEntryNode(BoUpSLP &R) { return &R.VectorizableTree[0]; } 1182 1183 static ChildIteratorType child_begin(NodeRef N) { 1184 return {N->UserTreeIndices.begin(), N->Container}; 1185 } 1186 static ChildIteratorType child_end(NodeRef N) { 1187 return {N->UserTreeIndices.end(), N->Container}; 1188 } 1189 1190 /// For the node iterator we just need to turn the TreeEntry iterator into a 1191 /// TreeEntry* iterator so that it dereferences to NodeRef. 1192 typedef pointer_iterator<std::vector<TreeEntry>::iterator> nodes_iterator; 1193 1194 static nodes_iterator nodes_begin(BoUpSLP *R) { 1195 return nodes_iterator(R->VectorizableTree.begin()); 1196 } 1197 static nodes_iterator nodes_end(BoUpSLP *R) { 1198 return nodes_iterator(R->VectorizableTree.end()); 1199 } 1200 1201 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 1202 }; 1203 1204 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 1205 typedef BoUpSLP::TreeEntry TreeEntry; 1206 1207 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 1208 1209 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 1210 std::string Str; 1211 raw_string_ostream OS(Str); 1212 if (isSplat(Entry->Scalars)) { 1213 OS << "<splat> " << *Entry->Scalars[0]; 1214 return Str; 1215 } 1216 for (auto V : Entry->Scalars) { 1217 OS << *V; 1218 if (std::any_of( 1219 R->ExternalUses.begin(), R->ExternalUses.end(), 1220 [&](const BoUpSLP::ExternalUser &EU) { return EU.Scalar == V; })) 1221 OS << " <extract>"; 1222 OS << "\n"; 1223 } 1224 return Str; 1225 } 1226 1227 static std::string getNodeAttributes(const TreeEntry *Entry, 1228 const BoUpSLP *) { 1229 if (Entry->NeedToGather) 1230 return "color=red"; 1231 return ""; 1232 } 1233 }; 1234 1235 } // end namespace llvm 1236 1237 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 1238 ArrayRef<Value *> UserIgnoreLst) { 1239 ExtraValueToDebugLocsMap ExternallyUsedValues; 1240 buildTree(Roots, ExternallyUsedValues, UserIgnoreLst); 1241 } 1242 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 1243 ExtraValueToDebugLocsMap &ExternallyUsedValues, 1244 ArrayRef<Value *> UserIgnoreLst) { 1245 deleteTree(); 1246 UserIgnoreList = UserIgnoreLst; 1247 if (!allSameType(Roots)) 1248 return; 1249 buildTree_rec(Roots, 0, -1); 1250 1251 // Collect the values that we need to extract from the tree. 1252 for (TreeEntry &EIdx : VectorizableTree) { 1253 TreeEntry *Entry = &EIdx; 1254 1255 // No need to handle users of gathered values. 1256 if (Entry->NeedToGather) 1257 continue; 1258 1259 // For each lane: 1260 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 1261 Value *Scalar = Entry->Scalars[Lane]; 1262 1263 // Check if the scalar is externally used as an extra arg. 1264 auto ExtI = ExternallyUsedValues.find(Scalar); 1265 if (ExtI != ExternallyUsedValues.end()) { 1266 DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " << 1267 Lane << " from " << *Scalar << ".\n"); 1268 ExternalUses.emplace_back(Scalar, nullptr, Lane); 1269 continue; 1270 } 1271 for (User *U : Scalar->users()) { 1272 DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 1273 1274 Instruction *UserInst = dyn_cast<Instruction>(U); 1275 if (!UserInst) 1276 continue; 1277 1278 // Skip in-tree scalars that become vectors 1279 if (TreeEntry *UseEntry = getTreeEntry(U)) { 1280 Value *UseScalar = UseEntry->Scalars[0]; 1281 // Some in-tree scalars will remain as scalar in vectorized 1282 // instructions. If that is the case, the one in Lane 0 will 1283 // be used. 1284 if (UseScalar != U || 1285 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 1286 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 1287 << ".\n"); 1288 assert(!UseEntry->NeedToGather && "Bad state"); 1289 continue; 1290 } 1291 } 1292 1293 // Ignore users in the user ignore list. 1294 if (is_contained(UserIgnoreList, UserInst)) 1295 continue; 1296 1297 DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " << 1298 Lane << " from " << *Scalar << ".\n"); 1299 ExternalUses.push_back(ExternalUser(Scalar, U, Lane)); 1300 } 1301 } 1302 } 1303 } 1304 1305 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 1306 int UserTreeIdx) { 1307 bool isAltShuffle = false; 1308 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 1309 1310 if (Depth == RecursionMaxDepth) { 1311 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 1312 newTreeEntry(VL, false, UserTreeIdx); 1313 return; 1314 } 1315 1316 // Don't handle vectors. 1317 if (VL[0]->getType()->isVectorTy()) { 1318 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 1319 newTreeEntry(VL, false, UserTreeIdx); 1320 return; 1321 } 1322 1323 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1324 if (SI->getValueOperand()->getType()->isVectorTy()) { 1325 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 1326 newTreeEntry(VL, false, UserTreeIdx); 1327 return; 1328 } 1329 unsigned Opcode = getSameOpcode(VL); 1330 1331 // Check that this shuffle vector refers to the alternate 1332 // sequence of opcodes. 1333 if (Opcode == Instruction::ShuffleVector) { 1334 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 1335 unsigned Op = I0->getOpcode(); 1336 if (Op != Instruction::ShuffleVector) 1337 isAltShuffle = true; 1338 } 1339 1340 // If all of the operands are identical or constant we have a simple solution. 1341 if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !Opcode) { 1342 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 1343 newTreeEntry(VL, false, UserTreeIdx); 1344 return; 1345 } 1346 1347 // We now know that this is a vector of instructions of the same type from 1348 // the same block. 1349 1350 // Don't vectorize ephemeral values. 1351 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1352 if (EphValues.count(VL[i])) { 1353 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1354 ") is ephemeral.\n"); 1355 newTreeEntry(VL, false, UserTreeIdx); 1356 return; 1357 } 1358 } 1359 1360 // Check if this is a duplicate of another entry. 1361 if (TreeEntry *E = getTreeEntry(VL[0])) { 1362 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1363 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 1364 if (E->Scalars[i] != VL[i]) { 1365 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 1366 newTreeEntry(VL, false, UserTreeIdx); 1367 return; 1368 } 1369 } 1370 // Record the reuse of the tree node. FIXME, currently this is only used to 1371 // properly draw the graph rather than for the actual vectorization. 1372 E->UserTreeIndices.push_back(UserTreeIdx); 1373 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n"); 1374 return; 1375 } 1376 1377 // Check that none of the instructions in the bundle are already in the tree. 1378 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1379 if (ScalarToTreeEntry.count(VL[i])) { 1380 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1381 ") is already in tree.\n"); 1382 newTreeEntry(VL, false, UserTreeIdx); 1383 return; 1384 } 1385 } 1386 1387 // If any of the scalars is marked as a value that needs to stay scalar then 1388 // we need to gather the scalars. 1389 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1390 if (MustGather.count(VL[i])) { 1391 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 1392 newTreeEntry(VL, false, UserTreeIdx); 1393 return; 1394 } 1395 } 1396 1397 // Check that all of the users of the scalars that we want to vectorize are 1398 // schedulable. 1399 Instruction *VL0 = cast<Instruction>(VL[0]); 1400 BasicBlock *BB = VL0->getParent(); 1401 1402 if (!DT->isReachableFromEntry(BB)) { 1403 // Don't go into unreachable blocks. They may contain instructions with 1404 // dependency cycles which confuse the final scheduling. 1405 DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 1406 newTreeEntry(VL, false, UserTreeIdx); 1407 return; 1408 } 1409 1410 // Check that every instructions appears once in this bundle. 1411 for (unsigned i = 0, e = VL.size(); i < e; ++i) 1412 for (unsigned j = i+1; j < e; ++j) 1413 if (VL[i] == VL[j]) { 1414 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 1415 newTreeEntry(VL, false, UserTreeIdx); 1416 return; 1417 } 1418 1419 auto &BSRef = BlocksSchedules[BB]; 1420 if (!BSRef) { 1421 BSRef = llvm::make_unique<BlockScheduling>(BB); 1422 } 1423 BlockScheduling &BS = *BSRef.get(); 1424 1425 if (!BS.tryScheduleBundle(VL, this, VL0)) { 1426 DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 1427 assert((!BS.getScheduleData(VL0) || 1428 !BS.getScheduleData(VL0)->isPartOfBundle()) && 1429 "tryScheduleBundle should cancelScheduling on failure"); 1430 newTreeEntry(VL, false, UserTreeIdx); 1431 return; 1432 } 1433 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 1434 1435 switch (Opcode) { 1436 case Instruction::PHI: { 1437 PHINode *PH = dyn_cast<PHINode>(VL0); 1438 1439 // Check for terminator values (e.g. invoke). 1440 for (unsigned j = 0; j < VL.size(); ++j) 1441 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1442 TerminatorInst *Term = dyn_cast<TerminatorInst>( 1443 cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i))); 1444 if (Term) { 1445 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); 1446 BS.cancelScheduling(VL, VL0); 1447 newTreeEntry(VL, false, UserTreeIdx); 1448 return; 1449 } 1450 } 1451 1452 newTreeEntry(VL, true, UserTreeIdx); 1453 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 1454 1455 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1456 ValueList Operands; 1457 // Prepare the operand vector. 1458 for (Value *j : VL) 1459 Operands.push_back(cast<PHINode>(j)->getIncomingValueForBlock( 1460 PH->getIncomingBlock(i))); 1461 1462 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1463 } 1464 return; 1465 } 1466 case Instruction::ExtractValue: 1467 case Instruction::ExtractElement: { 1468 bool Reuse = canReuseExtract(VL, VL0); 1469 if (Reuse) { 1470 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 1471 } else { 1472 BS.cancelScheduling(VL, VL0); 1473 } 1474 newTreeEntry(VL, Reuse, UserTreeIdx); 1475 return; 1476 } 1477 case Instruction::Load: { 1478 // Check that a vectorized load would load the same memory as a scalar 1479 // load. 1480 // For example we don't want vectorize loads that are smaller than 8 bit. 1481 // Even though we have a packed struct {<i2, i2, i2, i2>} LLVM treats 1482 // loading/storing it as an i8 struct. If we vectorize loads/stores from 1483 // such a struct we read/write packed bits disagreeing with the 1484 // unvectorized version. 1485 Type *ScalarTy = VL0->getType(); 1486 1487 if (DL->getTypeSizeInBits(ScalarTy) != 1488 DL->getTypeAllocSizeInBits(ScalarTy)) { 1489 BS.cancelScheduling(VL, VL0); 1490 newTreeEntry(VL, false, UserTreeIdx); 1491 DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 1492 return; 1493 } 1494 1495 // Make sure all loads in the bundle are simple - we can't vectorize 1496 // atomic or volatile loads. 1497 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 1498 LoadInst *L = cast<LoadInst>(VL[i]); 1499 if (!L->isSimple()) { 1500 BS.cancelScheduling(VL, VL0); 1501 newTreeEntry(VL, false, UserTreeIdx); 1502 DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 1503 return; 1504 } 1505 } 1506 1507 // Check if the loads are consecutive, reversed, or neither. 1508 // TODO: What we really want is to sort the loads, but for now, check 1509 // the two likely directions. 1510 bool Consecutive = true; 1511 bool ReverseConsecutive = true; 1512 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 1513 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { 1514 Consecutive = false; 1515 break; 1516 } else { 1517 ReverseConsecutive = false; 1518 } 1519 } 1520 1521 if (Consecutive) { 1522 ++NumLoadsWantToKeepOrder; 1523 newTreeEntry(VL, true, UserTreeIdx); 1524 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 1525 return; 1526 } 1527 1528 // If none of the load pairs were consecutive when checked in order, 1529 // check the reverse order. 1530 if (ReverseConsecutive) 1531 for (unsigned i = VL.size() - 1; i > 0; --i) 1532 if (!isConsecutiveAccess(VL[i], VL[i - 1], *DL, *SE)) { 1533 ReverseConsecutive = false; 1534 break; 1535 } 1536 1537 BS.cancelScheduling(VL, VL0); 1538 newTreeEntry(VL, false, UserTreeIdx); 1539 1540 if (ReverseConsecutive) { 1541 ++NumLoadsWantToChangeOrder; 1542 DEBUG(dbgs() << "SLP: Gathering reversed loads.\n"); 1543 } else { 1544 DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 1545 } 1546 return; 1547 } 1548 case Instruction::ZExt: 1549 case Instruction::SExt: 1550 case Instruction::FPToUI: 1551 case Instruction::FPToSI: 1552 case Instruction::FPExt: 1553 case Instruction::PtrToInt: 1554 case Instruction::IntToPtr: 1555 case Instruction::SIToFP: 1556 case Instruction::UIToFP: 1557 case Instruction::Trunc: 1558 case Instruction::FPTrunc: 1559 case Instruction::BitCast: { 1560 Type *SrcTy = VL0->getOperand(0)->getType(); 1561 for (unsigned i = 0; i < VL.size(); ++i) { 1562 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 1563 if (Ty != SrcTy || !isValidElementType(Ty)) { 1564 BS.cancelScheduling(VL, VL0); 1565 newTreeEntry(VL, false, UserTreeIdx); 1566 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 1567 return; 1568 } 1569 } 1570 newTreeEntry(VL, true, UserTreeIdx); 1571 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 1572 1573 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1574 ValueList Operands; 1575 // Prepare the operand vector. 1576 for (Value *j : VL) 1577 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1578 1579 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1580 } 1581 return; 1582 } 1583 case Instruction::ICmp: 1584 case Instruction::FCmp: { 1585 // Check that all of the compares have the same predicate. 1586 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 1587 Type *ComparedTy = VL0->getOperand(0)->getType(); 1588 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 1589 CmpInst *Cmp = cast<CmpInst>(VL[i]); 1590 if (Cmp->getPredicate() != P0 || 1591 Cmp->getOperand(0)->getType() != ComparedTy) { 1592 BS.cancelScheduling(VL, VL0); 1593 newTreeEntry(VL, false, UserTreeIdx); 1594 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 1595 return; 1596 } 1597 } 1598 1599 newTreeEntry(VL, true, UserTreeIdx); 1600 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 1601 1602 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1603 ValueList Operands; 1604 // Prepare the operand vector. 1605 for (Value *j : VL) 1606 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1607 1608 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1609 } 1610 return; 1611 } 1612 case Instruction::Select: 1613 case Instruction::Add: 1614 case Instruction::FAdd: 1615 case Instruction::Sub: 1616 case Instruction::FSub: 1617 case Instruction::Mul: 1618 case Instruction::FMul: 1619 case Instruction::UDiv: 1620 case Instruction::SDiv: 1621 case Instruction::FDiv: 1622 case Instruction::URem: 1623 case Instruction::SRem: 1624 case Instruction::FRem: 1625 case Instruction::Shl: 1626 case Instruction::LShr: 1627 case Instruction::AShr: 1628 case Instruction::And: 1629 case Instruction::Or: 1630 case Instruction::Xor: { 1631 newTreeEntry(VL, true, UserTreeIdx); 1632 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 1633 1634 // Sort operands of the instructions so that each side is more likely to 1635 // have the same opcode. 1636 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 1637 ValueList Left, Right; 1638 reorderInputsAccordingToOpcode(VL0->getOpcode(), VL, Left, Right); 1639 buildTree_rec(Left, Depth + 1, UserTreeIdx); 1640 buildTree_rec(Right, Depth + 1, UserTreeIdx); 1641 return; 1642 } 1643 1644 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1645 ValueList Operands; 1646 // Prepare the operand vector. 1647 for (Value *j : VL) 1648 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1649 1650 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1651 } 1652 return; 1653 } 1654 case Instruction::GetElementPtr: { 1655 // We don't combine GEPs with complicated (nested) indexing. 1656 for (unsigned j = 0; j < VL.size(); ++j) { 1657 if (cast<Instruction>(VL[j])->getNumOperands() != 2) { 1658 DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 1659 BS.cancelScheduling(VL, VL0); 1660 newTreeEntry(VL, false, UserTreeIdx); 1661 return; 1662 } 1663 } 1664 1665 // We can't combine several GEPs into one vector if they operate on 1666 // different types. 1667 Type *Ty0 = VL0->getOperand(0)->getType(); 1668 for (unsigned j = 0; j < VL.size(); ++j) { 1669 Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType(); 1670 if (Ty0 != CurTy) { 1671 DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); 1672 BS.cancelScheduling(VL, VL0); 1673 newTreeEntry(VL, false, UserTreeIdx); 1674 return; 1675 } 1676 } 1677 1678 // We don't combine GEPs with non-constant indexes. 1679 for (unsigned j = 0; j < VL.size(); ++j) { 1680 auto Op = cast<Instruction>(VL[j])->getOperand(1); 1681 if (!isa<ConstantInt>(Op)) { 1682 DEBUG( 1683 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 1684 BS.cancelScheduling(VL, VL0); 1685 newTreeEntry(VL, false, UserTreeIdx); 1686 return; 1687 } 1688 } 1689 1690 newTreeEntry(VL, true, UserTreeIdx); 1691 DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 1692 for (unsigned i = 0, e = 2; i < e; ++i) { 1693 ValueList Operands; 1694 // Prepare the operand vector. 1695 for (Value *j : VL) 1696 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1697 1698 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1699 } 1700 return; 1701 } 1702 case Instruction::Store: { 1703 // Check if the stores are consecutive or of we need to swizzle them. 1704 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 1705 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { 1706 BS.cancelScheduling(VL, VL0); 1707 newTreeEntry(VL, false, UserTreeIdx); 1708 DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 1709 return; 1710 } 1711 1712 newTreeEntry(VL, true, UserTreeIdx); 1713 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 1714 1715 ValueList Operands; 1716 for (Value *j : VL) 1717 Operands.push_back(cast<Instruction>(j)->getOperand(0)); 1718 1719 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1720 return; 1721 } 1722 case Instruction::Call: { 1723 // Check if the calls are all to the same vectorizable intrinsic. 1724 CallInst *CI = cast<CallInst>(VL0); 1725 // Check if this is an Intrinsic call or something that can be 1726 // represented by an intrinsic call 1727 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 1728 if (!isTriviallyVectorizable(ID)) { 1729 BS.cancelScheduling(VL, VL0); 1730 newTreeEntry(VL, false, UserTreeIdx); 1731 DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 1732 return; 1733 } 1734 Function *Int = CI->getCalledFunction(); 1735 Value *A1I = nullptr; 1736 if (hasVectorInstrinsicScalarOpd(ID, 1)) 1737 A1I = CI->getArgOperand(1); 1738 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 1739 CallInst *CI2 = dyn_cast<CallInst>(VL[i]); 1740 if (!CI2 || CI2->getCalledFunction() != Int || 1741 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 1742 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 1743 BS.cancelScheduling(VL, VL0); 1744 newTreeEntry(VL, false, UserTreeIdx); 1745 DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] 1746 << "\n"); 1747 return; 1748 } 1749 // ctlz,cttz and powi are special intrinsics whose second argument 1750 // should be same in order for them to be vectorized. 1751 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 1752 Value *A1J = CI2->getArgOperand(1); 1753 if (A1I != A1J) { 1754 BS.cancelScheduling(VL, VL0); 1755 newTreeEntry(VL, false, UserTreeIdx); 1756 DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 1757 << " argument "<< A1I<<"!=" << A1J 1758 << "\n"); 1759 return; 1760 } 1761 } 1762 // Verify that the bundle operands are identical between the two calls. 1763 if (CI->hasOperandBundles() && 1764 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 1765 CI->op_begin() + CI->getBundleOperandsEndIndex(), 1766 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 1767 BS.cancelScheduling(VL, VL0); 1768 newTreeEntry(VL, false, UserTreeIdx); 1769 DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!=" 1770 << *VL[i] << '\n'); 1771 return; 1772 } 1773 } 1774 1775 newTreeEntry(VL, true, UserTreeIdx); 1776 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 1777 ValueList Operands; 1778 // Prepare the operand vector. 1779 for (Value *j : VL) { 1780 CallInst *CI2 = dyn_cast<CallInst>(j); 1781 Operands.push_back(CI2->getArgOperand(i)); 1782 } 1783 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1784 } 1785 return; 1786 } 1787 case Instruction::ShuffleVector: { 1788 // If this is not an alternate sequence of opcode like add-sub 1789 // then do not vectorize this instruction. 1790 if (!isAltShuffle) { 1791 BS.cancelScheduling(VL, VL0); 1792 newTreeEntry(VL, false, UserTreeIdx); 1793 DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 1794 return; 1795 } 1796 newTreeEntry(VL, true, UserTreeIdx); 1797 DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 1798 1799 // Reorder operands if reordering would enable vectorization. 1800 if (isa<BinaryOperator>(VL0)) { 1801 ValueList Left, Right; 1802 reorderAltShuffleOperands(VL0->getOpcode(), VL, Left, Right); 1803 buildTree_rec(Left, Depth + 1, UserTreeIdx); 1804 buildTree_rec(Right, Depth + 1, UserTreeIdx); 1805 return; 1806 } 1807 1808 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1809 ValueList Operands; 1810 // Prepare the operand vector. 1811 for (Value *j : VL) 1812 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1813 1814 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1815 } 1816 return; 1817 } 1818 default: 1819 BS.cancelScheduling(VL, VL0); 1820 newTreeEntry(VL, false, UserTreeIdx); 1821 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 1822 return; 1823 } 1824 } 1825 1826 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { 1827 unsigned N; 1828 Type *EltTy; 1829 auto *ST = dyn_cast<StructType>(T); 1830 if (ST) { 1831 N = ST->getNumElements(); 1832 EltTy = *ST->element_begin(); 1833 } else { 1834 N = cast<ArrayType>(T)->getNumElements(); 1835 EltTy = cast<ArrayType>(T)->getElementType(); 1836 } 1837 if (!isValidElementType(EltTy)) 1838 return 0; 1839 uint64_t VTSize = DL.getTypeStoreSizeInBits(VectorType::get(EltTy, N)); 1840 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) 1841 return 0; 1842 if (ST) { 1843 // Check that struct is homogeneous. 1844 for (const auto *Ty : ST->elements()) 1845 if (Ty != EltTy) 1846 return 0; 1847 } 1848 return N; 1849 } 1850 1851 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue) const { 1852 Instruction *E0 = cast<Instruction>(OpValue); 1853 assert(E0->getOpcode() == Instruction::ExtractElement || 1854 E0->getOpcode() == Instruction::ExtractValue); 1855 assert(E0->getOpcode() == getSameOpcode(VL) && "Invalid opcode"); 1856 // Check if all of the extracts come from the same vector and from the 1857 // correct offset. 1858 Value *Vec = E0->getOperand(0); 1859 1860 // We have to extract from a vector/aggregate with the same number of elements. 1861 unsigned NElts; 1862 if (E0->getOpcode() == Instruction::ExtractValue) { 1863 const DataLayout &DL = E0->getModule()->getDataLayout(); 1864 NElts = canMapToVector(Vec->getType(), DL); 1865 if (!NElts) 1866 return false; 1867 // Check if load can be rewritten as load of vector. 1868 LoadInst *LI = dyn_cast<LoadInst>(Vec); 1869 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 1870 return false; 1871 } else { 1872 NElts = Vec->getType()->getVectorNumElements(); 1873 } 1874 1875 if (NElts != VL.size()) 1876 return false; 1877 1878 // Check that all of the indices extract from the correct offset. 1879 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 1880 Instruction *Inst = cast<Instruction>(VL[I]); 1881 if (!matchExtractIndex(Inst, I, Inst->getOpcode())) 1882 return false; 1883 if (Inst->getOperand(0) != Vec) 1884 return false; 1885 } 1886 1887 return true; 1888 } 1889 1890 bool BoUpSLP::areAllUsersVectorized(Instruction *I) const { 1891 return I->hasOneUse() || 1892 std::all_of(I->user_begin(), I->user_end(), [this](User *U) { 1893 return ScalarToTreeEntry.count(U) > 0; 1894 }); 1895 } 1896 1897 int BoUpSLP::getEntryCost(TreeEntry *E) { 1898 ArrayRef<Value*> VL = E->Scalars; 1899 1900 Type *ScalarTy = VL[0]->getType(); 1901 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1902 ScalarTy = SI->getValueOperand()->getType(); 1903 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0])) 1904 ScalarTy = CI->getOperand(0)->getType(); 1905 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1906 1907 // If we have computed a smaller type for the expression, update VecTy so 1908 // that the costs will be accurate. 1909 if (MinBWs.count(VL[0])) 1910 VecTy = VectorType::get( 1911 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size()); 1912 1913 if (E->NeedToGather) { 1914 if (allConstant(VL)) 1915 return 0; 1916 if (isSplat(VL)) { 1917 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 1918 } 1919 if (getSameOpcode(VL) == Instruction::ExtractElement) { 1920 Optional<TargetTransformInfo::ShuffleKind> ShuffleKind = isShuffle(VL); 1921 if (ShuffleKind.hasValue()) { 1922 int Cost = TTI->getShuffleCost(ShuffleKind.getValue(), VecTy); 1923 for (auto *V : VL) { 1924 // If all users of instruction are going to be vectorized and this 1925 // instruction itself is not going to be vectorized, consider this 1926 // instruction as dead and remove its cost from the final cost of the 1927 // vectorized tree. 1928 if (areAllUsersVectorized(cast<Instruction>(V)) && 1929 !ScalarToTreeEntry.count(V)) { 1930 auto *IO = cast<ConstantInt>( 1931 cast<ExtractElementInst>(V)->getIndexOperand()); 1932 Cost -= TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 1933 IO->getZExtValue()); 1934 } 1935 } 1936 return Cost; 1937 } 1938 } 1939 return getGatherCost(E->Scalars); 1940 } 1941 unsigned Opcode = getSameOpcode(VL); 1942 assert(Opcode && allSameType(VL) && allSameBlock(VL) && "Invalid VL"); 1943 Instruction *VL0 = cast<Instruction>(VL[0]); 1944 switch (Opcode) { 1945 case Instruction::PHI: { 1946 return 0; 1947 } 1948 case Instruction::ExtractValue: 1949 case Instruction::ExtractElement: { 1950 if (canReuseExtract(VL, VL0)) { 1951 int DeadCost = 0; 1952 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 1953 Instruction *E = cast<Instruction>(VL[i]); 1954 // If all users are going to be vectorized, instruction can be 1955 // considered as dead. 1956 // The same, if have only one user, it will be vectorized for sure. 1957 if (areAllUsersVectorized(E)) 1958 // Take credit for instruction that will become dead. 1959 DeadCost += 1960 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); 1961 } 1962 return -DeadCost; 1963 } 1964 return getGatherCost(VecTy); 1965 } 1966 case Instruction::ZExt: 1967 case Instruction::SExt: 1968 case Instruction::FPToUI: 1969 case Instruction::FPToSI: 1970 case Instruction::FPExt: 1971 case Instruction::PtrToInt: 1972 case Instruction::IntToPtr: 1973 case Instruction::SIToFP: 1974 case Instruction::UIToFP: 1975 case Instruction::Trunc: 1976 case Instruction::FPTrunc: 1977 case Instruction::BitCast: { 1978 Type *SrcTy = VL0->getOperand(0)->getType(); 1979 1980 // Calculate the cost of this instruction. 1981 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 1982 VL0->getType(), SrcTy, VL0); 1983 1984 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 1985 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy, VL0); 1986 return VecCost - ScalarCost; 1987 } 1988 case Instruction::FCmp: 1989 case Instruction::ICmp: 1990 case Instruction::Select: { 1991 // Calculate the cost of this instruction. 1992 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 1993 int ScalarCost = VecTy->getNumElements() * 1994 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty(), VL0); 1995 int VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy, VL0); 1996 return VecCost - ScalarCost; 1997 } 1998 case Instruction::Add: 1999 case Instruction::FAdd: 2000 case Instruction::Sub: 2001 case Instruction::FSub: 2002 case Instruction::Mul: 2003 case Instruction::FMul: 2004 case Instruction::UDiv: 2005 case Instruction::SDiv: 2006 case Instruction::FDiv: 2007 case Instruction::URem: 2008 case Instruction::SRem: 2009 case Instruction::FRem: 2010 case Instruction::Shl: 2011 case Instruction::LShr: 2012 case Instruction::AShr: 2013 case Instruction::And: 2014 case Instruction::Or: 2015 case Instruction::Xor: { 2016 // Certain instructions can be cheaper to vectorize if they have a 2017 // constant second vector operand. 2018 TargetTransformInfo::OperandValueKind Op1VK = 2019 TargetTransformInfo::OK_AnyValue; 2020 TargetTransformInfo::OperandValueKind Op2VK = 2021 TargetTransformInfo::OK_UniformConstantValue; 2022 TargetTransformInfo::OperandValueProperties Op1VP = 2023 TargetTransformInfo::OP_None; 2024 TargetTransformInfo::OperandValueProperties Op2VP = 2025 TargetTransformInfo::OP_None; 2026 2027 // If all operands are exactly the same ConstantInt then set the 2028 // operand kind to OK_UniformConstantValue. 2029 // If instead not all operands are constants, then set the operand kind 2030 // to OK_AnyValue. If all operands are constants but not the same, 2031 // then set the operand kind to OK_NonUniformConstantValue. 2032 ConstantInt *CInt = nullptr; 2033 for (unsigned i = 0; i < VL.size(); ++i) { 2034 const Instruction *I = cast<Instruction>(VL[i]); 2035 if (!isa<ConstantInt>(I->getOperand(1))) { 2036 Op2VK = TargetTransformInfo::OK_AnyValue; 2037 break; 2038 } 2039 if (i == 0) { 2040 CInt = cast<ConstantInt>(I->getOperand(1)); 2041 continue; 2042 } 2043 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && 2044 CInt != cast<ConstantInt>(I->getOperand(1))) 2045 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 2046 } 2047 // FIXME: Currently cost of model modification for division by power of 2048 // 2 is handled for X86 and AArch64. Add support for other targets. 2049 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt && 2050 CInt->getValue().isPowerOf2()) 2051 Op2VP = TargetTransformInfo::OP_PowerOf2; 2052 2053 SmallVector<const Value *, 4> Operands(VL0->operand_values()); 2054 int ScalarCost = 2055 VecTy->getNumElements() * 2056 TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK, Op1VP, 2057 Op2VP, Operands); 2058 int VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK, 2059 Op1VP, Op2VP, Operands); 2060 return VecCost - ScalarCost; 2061 } 2062 case Instruction::GetElementPtr: { 2063 TargetTransformInfo::OperandValueKind Op1VK = 2064 TargetTransformInfo::OK_AnyValue; 2065 TargetTransformInfo::OperandValueKind Op2VK = 2066 TargetTransformInfo::OK_UniformConstantValue; 2067 2068 int ScalarCost = 2069 VecTy->getNumElements() * 2070 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK); 2071 int VecCost = 2072 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK); 2073 2074 return VecCost - ScalarCost; 2075 } 2076 case Instruction::Load: { 2077 // Cost of wide load - cost of scalar loads. 2078 unsigned alignment = dyn_cast<LoadInst>(VL0)->getAlignment(); 2079 int ScalarLdCost = VecTy->getNumElements() * 2080 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, alignment, 0, VL0); 2081 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, 2082 VecTy, alignment, 0, VL0); 2083 return VecLdCost - ScalarLdCost; 2084 } 2085 case Instruction::Store: { 2086 // We know that we can merge the stores. Calculate the cost. 2087 unsigned alignment = dyn_cast<StoreInst>(VL0)->getAlignment(); 2088 int ScalarStCost = VecTy->getNumElements() * 2089 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, alignment, 0, VL0); 2090 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, 2091 VecTy, alignment, 0, VL0); 2092 return VecStCost - ScalarStCost; 2093 } 2094 case Instruction::Call: { 2095 CallInst *CI = cast<CallInst>(VL0); 2096 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 2097 2098 // Calculate the cost of the scalar and vector calls. 2099 SmallVector<Type*, 4> ScalarTys; 2100 for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) 2101 ScalarTys.push_back(CI->getArgOperand(op)->getType()); 2102 2103 FastMathFlags FMF; 2104 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 2105 FMF = FPMO->getFastMathFlags(); 2106 2107 int ScalarCallCost = VecTy->getNumElements() * 2108 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys, FMF); 2109 2110 SmallVector<Value *, 4> Args(CI->arg_operands()); 2111 int VecCallCost = TTI->getIntrinsicInstrCost(ID, CI->getType(), Args, FMF, 2112 VecTy->getNumElements()); 2113 2114 DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost 2115 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 2116 << " for " << *CI << "\n"); 2117 2118 return VecCallCost - ScalarCallCost; 2119 } 2120 case Instruction::ShuffleVector: { 2121 TargetTransformInfo::OperandValueKind Op1VK = 2122 TargetTransformInfo::OK_AnyValue; 2123 TargetTransformInfo::OperandValueKind Op2VK = 2124 TargetTransformInfo::OK_AnyValue; 2125 int ScalarCost = 0; 2126 int VecCost = 0; 2127 for (Value *i : VL) { 2128 Instruction *I = cast<Instruction>(i); 2129 if (!I) 2130 break; 2131 ScalarCost += 2132 TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK); 2133 } 2134 // VecCost is equal to sum of the cost of creating 2 vectors 2135 // and the cost of creating shuffle. 2136 Instruction *I0 = cast<Instruction>(VL[0]); 2137 VecCost = 2138 TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK); 2139 Instruction *I1 = cast<Instruction>(VL[1]); 2140 VecCost += 2141 TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK); 2142 VecCost += 2143 TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0); 2144 return VecCost - ScalarCost; 2145 } 2146 default: 2147 llvm_unreachable("Unknown instruction"); 2148 } 2149 } 2150 2151 bool BoUpSLP::isFullyVectorizableTinyTree() { 2152 DEBUG(dbgs() << "SLP: Check whether the tree with height " << 2153 VectorizableTree.size() << " is fully vectorizable .\n"); 2154 2155 // We only handle trees of heights 1 and 2. 2156 if (VectorizableTree.size() == 1 && !VectorizableTree[0].NeedToGather) 2157 return true; 2158 2159 if (VectorizableTree.size() != 2) 2160 return false; 2161 2162 // Handle splat and all-constants stores. 2163 if (!VectorizableTree[0].NeedToGather && 2164 (allConstant(VectorizableTree[1].Scalars) || 2165 isSplat(VectorizableTree[1].Scalars))) 2166 return true; 2167 2168 // Gathering cost would be too much for tiny trees. 2169 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 2170 return false; 2171 2172 return true; 2173 } 2174 2175 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() { 2176 2177 // We can vectorize the tree if its size is greater than or equal to the 2178 // minimum size specified by the MinTreeSize command line option. 2179 if (VectorizableTree.size() >= MinTreeSize) 2180 return false; 2181 2182 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 2183 // can vectorize it if we can prove it fully vectorizable. 2184 if (isFullyVectorizableTinyTree()) 2185 return false; 2186 2187 assert(VectorizableTree.empty() 2188 ? ExternalUses.empty() 2189 : true && "We shouldn't have any external users"); 2190 2191 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 2192 // vectorizable. 2193 return true; 2194 } 2195 2196 int BoUpSLP::getSpillCost() { 2197 // Walk from the bottom of the tree to the top, tracking which values are 2198 // live. When we see a call instruction that is not part of our tree, 2199 // query TTI to see if there is a cost to keeping values live over it 2200 // (for example, if spills and fills are required). 2201 unsigned BundleWidth = VectorizableTree.front().Scalars.size(); 2202 int Cost = 0; 2203 2204 SmallPtrSet<Instruction*, 4> LiveValues; 2205 Instruction *PrevInst = nullptr; 2206 2207 for (const auto &N : VectorizableTree) { 2208 Instruction *Inst = dyn_cast<Instruction>(N.Scalars[0]); 2209 if (!Inst) 2210 continue; 2211 2212 if (!PrevInst) { 2213 PrevInst = Inst; 2214 continue; 2215 } 2216 2217 // Update LiveValues. 2218 LiveValues.erase(PrevInst); 2219 for (auto &J : PrevInst->operands()) { 2220 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 2221 LiveValues.insert(cast<Instruction>(&*J)); 2222 } 2223 2224 DEBUG( 2225 dbgs() << "SLP: #LV: " << LiveValues.size(); 2226 for (auto *X : LiveValues) 2227 dbgs() << " " << X->getName(); 2228 dbgs() << ", Looking at "; 2229 Inst->dump(); 2230 ); 2231 2232 // Now find the sequence of instructions between PrevInst and Inst. 2233 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 2234 PrevInstIt = 2235 PrevInst->getIterator().getReverse(); 2236 while (InstIt != PrevInstIt) { 2237 if (PrevInstIt == PrevInst->getParent()->rend()) { 2238 PrevInstIt = Inst->getParent()->rbegin(); 2239 continue; 2240 } 2241 2242 if (isa<CallInst>(&*PrevInstIt) && &*PrevInstIt != PrevInst) { 2243 SmallVector<Type*, 4> V; 2244 for (auto *II : LiveValues) 2245 V.push_back(VectorType::get(II->getType(), BundleWidth)); 2246 Cost += TTI->getCostOfKeepingLiveOverCall(V); 2247 } 2248 2249 ++PrevInstIt; 2250 } 2251 2252 PrevInst = Inst; 2253 } 2254 2255 return Cost; 2256 } 2257 2258 int BoUpSLP::getTreeCost() { 2259 int Cost = 0; 2260 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 2261 VectorizableTree.size() << ".\n"); 2262 2263 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 2264 2265 for (TreeEntry &TE : VectorizableTree) { 2266 int C = getEntryCost(&TE); 2267 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 2268 << *TE.Scalars[0] << ".\n"); 2269 Cost += C; 2270 } 2271 2272 SmallSet<Value *, 16> ExtractCostCalculated; 2273 int ExtractCost = 0; 2274 for (ExternalUser &EU : ExternalUses) { 2275 // We only add extract cost once for the same scalar. 2276 if (!ExtractCostCalculated.insert(EU.Scalar).second) 2277 continue; 2278 2279 // Uses by ephemeral values are free (because the ephemeral value will be 2280 // removed prior to code generation, and so the extraction will be 2281 // removed as well). 2282 if (EphValues.count(EU.User)) 2283 continue; 2284 2285 // If we plan to rewrite the tree in a smaller type, we will need to sign 2286 // extend the extracted value back to the original type. Here, we account 2287 // for the extract and the added cost of the sign extend if needed. 2288 auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth); 2289 auto *ScalarRoot = VectorizableTree[0].Scalars[0]; 2290 if (MinBWs.count(ScalarRoot)) { 2291 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 2292 auto Extend = 2293 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; 2294 VecTy = VectorType::get(MinTy, BundleWidth); 2295 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 2296 VecTy, EU.Lane); 2297 } else { 2298 ExtractCost += 2299 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); 2300 } 2301 } 2302 2303 int SpillCost = getSpillCost(); 2304 Cost += SpillCost + ExtractCost; 2305 2306 std::string Str; 2307 { 2308 raw_string_ostream OS(Str); 2309 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 2310 << "SLP: Extract Cost = " << ExtractCost << ".\n" 2311 << "SLP: Total Cost = " << Cost << ".\n"; 2312 } 2313 DEBUG(dbgs() << Str); 2314 2315 if (ViewSLPTree) 2316 ViewGraph(this, "SLP" + F->getName(), false, Str); 2317 2318 return Cost; 2319 } 2320 2321 int BoUpSLP::getGatherCost(Type *Ty) { 2322 int Cost = 0; 2323 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 2324 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 2325 return Cost; 2326 } 2327 2328 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 2329 // Find the type of the operands in VL. 2330 Type *ScalarTy = VL[0]->getType(); 2331 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 2332 ScalarTy = SI->getValueOperand()->getType(); 2333 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 2334 // Find the cost of inserting/extracting values from the vector. 2335 return getGatherCost(VecTy); 2336 } 2337 2338 // Reorder commutative operations in alternate shuffle if the resulting vectors 2339 // are consecutive loads. This would allow us to vectorize the tree. 2340 // If we have something like- 2341 // load a[0] - load b[0] 2342 // load b[1] + load a[1] 2343 // load a[2] - load b[2] 2344 // load a[3] + load b[3] 2345 // Reordering the second load b[1] load a[1] would allow us to vectorize this 2346 // code. 2347 void BoUpSLP::reorderAltShuffleOperands(unsigned Opcode, ArrayRef<Value *> VL, 2348 SmallVectorImpl<Value *> &Left, 2349 SmallVectorImpl<Value *> &Right) { 2350 // Push left and right operands of binary operation into Left and Right 2351 unsigned AltOpcode = getAltOpcode(Opcode); 2352 (void)AltOpcode; 2353 for (Value *V : VL) { 2354 auto *I = cast<Instruction>(V); 2355 assert(sameOpcodeOrAlt(Opcode, AltOpcode, I->getOpcode()) && 2356 "Incorrect instruction in vector"); 2357 Left.push_back(I->getOperand(0)); 2358 Right.push_back(I->getOperand(1)); 2359 } 2360 2361 // Reorder if we have a commutative operation and consecutive access 2362 // are on either side of the alternate instructions. 2363 for (unsigned j = 0; j < VL.size() - 1; ++j) { 2364 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 2365 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 2366 Instruction *VL1 = cast<Instruction>(VL[j]); 2367 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 2368 if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) { 2369 std::swap(Left[j], Right[j]); 2370 continue; 2371 } else if (VL2->isCommutative() && 2372 isConsecutiveAccess(L, L1, *DL, *SE)) { 2373 std::swap(Left[j + 1], Right[j + 1]); 2374 continue; 2375 } 2376 // else unchanged 2377 } 2378 } 2379 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 2380 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 2381 Instruction *VL1 = cast<Instruction>(VL[j]); 2382 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 2383 if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) { 2384 std::swap(Left[j], Right[j]); 2385 continue; 2386 } else if (VL2->isCommutative() && 2387 isConsecutiveAccess(L, L1, *DL, *SE)) { 2388 std::swap(Left[j + 1], Right[j + 1]); 2389 continue; 2390 } 2391 // else unchanged 2392 } 2393 } 2394 } 2395 } 2396 2397 // Return true if I should be commuted before adding it's left and right 2398 // operands to the arrays Left and Right. 2399 // 2400 // The vectorizer is trying to either have all elements one side being 2401 // instruction with the same opcode to enable further vectorization, or having 2402 // a splat to lower the vectorizing cost. 2403 static bool shouldReorderOperands( 2404 int i, unsigned Opcode, Instruction &I, ArrayRef<Value *> Left, 2405 ArrayRef<Value *> Right, bool AllSameOpcodeLeft, bool AllSameOpcodeRight, 2406 bool SplatLeft, bool SplatRight, Value *&VLeft, Value *&VRight) { 2407 VLeft = I.getOperand(0); 2408 VRight = I.getOperand(1); 2409 // If we have "SplatRight", try to see if commuting is needed to preserve it. 2410 if (SplatRight) { 2411 if (VRight == Right[i - 1]) 2412 // Preserve SplatRight 2413 return false; 2414 if (VLeft == Right[i - 1]) { 2415 // Commuting would preserve SplatRight, but we don't want to break 2416 // SplatLeft either, i.e. preserve the original order if possible. 2417 // (FIXME: why do we care?) 2418 if (SplatLeft && VLeft == Left[i - 1]) 2419 return false; 2420 return true; 2421 } 2422 } 2423 // Symmetrically handle Right side. 2424 if (SplatLeft) { 2425 if (VLeft == Left[i - 1]) 2426 // Preserve SplatLeft 2427 return false; 2428 if (VRight == Left[i - 1]) 2429 return true; 2430 } 2431 2432 Instruction *ILeft = dyn_cast<Instruction>(VLeft); 2433 Instruction *IRight = dyn_cast<Instruction>(VRight); 2434 2435 // If we have "AllSameOpcodeRight", try to see if the left operands preserves 2436 // it and not the right, in this case we want to commute. 2437 if (AllSameOpcodeRight) { 2438 unsigned RightPrevOpcode = cast<Instruction>(Right[i - 1])->getOpcode(); 2439 if (IRight && RightPrevOpcode == IRight->getOpcode()) 2440 // Do not commute, a match on the right preserves AllSameOpcodeRight 2441 return false; 2442 if (ILeft && RightPrevOpcode == ILeft->getOpcode()) { 2443 // We have a match and may want to commute, but first check if there is 2444 // not also a match on the existing operands on the Left to preserve 2445 // AllSameOpcodeLeft, i.e. preserve the original order if possible. 2446 // (FIXME: why do we care?) 2447 if (AllSameOpcodeLeft && ILeft && 2448 cast<Instruction>(Left[i - 1])->getOpcode() == ILeft->getOpcode()) 2449 return false; 2450 return true; 2451 } 2452 } 2453 // Symmetrically handle Left side. 2454 if (AllSameOpcodeLeft) { 2455 unsigned LeftPrevOpcode = cast<Instruction>(Left[i - 1])->getOpcode(); 2456 if (ILeft && LeftPrevOpcode == ILeft->getOpcode()) 2457 return false; 2458 if (IRight && LeftPrevOpcode == IRight->getOpcode()) 2459 return true; 2460 } 2461 return false; 2462 } 2463 2464 void BoUpSLP::reorderInputsAccordingToOpcode(unsigned Opcode, 2465 ArrayRef<Value *> VL, 2466 SmallVectorImpl<Value *> &Left, 2467 SmallVectorImpl<Value *> &Right) { 2468 2469 if (VL.size()) { 2470 // Peel the first iteration out of the loop since there's nothing 2471 // interesting to do anyway and it simplifies the checks in the loop. 2472 auto *I = cast<Instruction>(VL[0]); 2473 Value *VLeft = I->getOperand(0); 2474 Value *VRight = I->getOperand(1); 2475 if (!isa<Instruction>(VRight) && isa<Instruction>(VLeft)) 2476 // Favor having instruction to the right. FIXME: why? 2477 std::swap(VLeft, VRight); 2478 Left.push_back(VLeft); 2479 Right.push_back(VRight); 2480 } 2481 2482 // Keep track if we have instructions with all the same opcode on one side. 2483 bool AllSameOpcodeLeft = isa<Instruction>(Left[0]); 2484 bool AllSameOpcodeRight = isa<Instruction>(Right[0]); 2485 // Keep track if we have one side with all the same value (broadcast). 2486 bool SplatLeft = true; 2487 bool SplatRight = true; 2488 2489 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 2490 Instruction *I = cast<Instruction>(VL[i]); 2491 assert(((I->getOpcode() == Opcode && I->isCommutative()) || 2492 (I->getOpcode() != Opcode && Instruction::isCommutative(Opcode))) && 2493 "Can only process commutative instruction"); 2494 // Commute to favor either a splat or maximizing having the same opcodes on 2495 // one side. 2496 Value *VLeft; 2497 Value *VRight; 2498 if (shouldReorderOperands(i, Opcode, *I, Left, Right, AllSameOpcodeLeft, 2499 AllSameOpcodeRight, SplatLeft, SplatRight, VLeft, 2500 VRight)) { 2501 Left.push_back(VRight); 2502 Right.push_back(VLeft); 2503 } else { 2504 Left.push_back(VLeft); 2505 Right.push_back(VRight); 2506 } 2507 // Update Splat* and AllSameOpcode* after the insertion. 2508 SplatRight = SplatRight && (Right[i - 1] == Right[i]); 2509 SplatLeft = SplatLeft && (Left[i - 1] == Left[i]); 2510 AllSameOpcodeLeft = AllSameOpcodeLeft && isa<Instruction>(Left[i]) && 2511 (cast<Instruction>(Left[i - 1])->getOpcode() == 2512 cast<Instruction>(Left[i])->getOpcode()); 2513 AllSameOpcodeRight = AllSameOpcodeRight && isa<Instruction>(Right[i]) && 2514 (cast<Instruction>(Right[i - 1])->getOpcode() == 2515 cast<Instruction>(Right[i])->getOpcode()); 2516 } 2517 2518 // If one operand end up being broadcast, return this operand order. 2519 if (SplatRight || SplatLeft) 2520 return; 2521 2522 // Finally check if we can get longer vectorizable chain by reordering 2523 // without breaking the good operand order detected above. 2524 // E.g. If we have something like- 2525 // load a[0] load b[0] 2526 // load b[1] load a[1] 2527 // load a[2] load b[2] 2528 // load a[3] load b[3] 2529 // Reordering the second load b[1] load a[1] would allow us to vectorize 2530 // this code and we still retain AllSameOpcode property. 2531 // FIXME: This load reordering might break AllSameOpcode in some rare cases 2532 // such as- 2533 // add a[0],c[0] load b[0] 2534 // add a[1],c[2] load b[1] 2535 // b[2] load b[2] 2536 // add a[3],c[3] load b[3] 2537 for (unsigned j = 0; j < VL.size() - 1; ++j) { 2538 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 2539 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 2540 if (isConsecutiveAccess(L, L1, *DL, *SE)) { 2541 std::swap(Left[j + 1], Right[j + 1]); 2542 continue; 2543 } 2544 } 2545 } 2546 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 2547 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 2548 if (isConsecutiveAccess(L, L1, *DL, *SE)) { 2549 std::swap(Left[j + 1], Right[j + 1]); 2550 continue; 2551 } 2552 } 2553 } 2554 // else unchanged 2555 } 2556 } 2557 2558 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL, Value *OpValue) { 2559 2560 // Get the basic block this bundle is in. All instructions in the bundle 2561 // should be in this block. 2562 auto *Front = cast<Instruction>(OpValue); 2563 auto *BB = Front->getParent(); 2564 const unsigned Opcode = cast<Instruction>(OpValue)->getOpcode(); 2565 const unsigned AltOpcode = getAltOpcode(Opcode); 2566 assert(all_of(make_range(VL.begin(), VL.end()), [=](Value *V) -> bool { 2567 return !sameOpcodeOrAlt(Opcode, AltOpcode, 2568 cast<Instruction>(V)->getOpcode()) || 2569 cast<Instruction>(V)->getParent() == BB; 2570 })); 2571 2572 // The last instruction in the bundle in program order. 2573 Instruction *LastInst = nullptr; 2574 2575 // Find the last instruction. The common case should be that BB has been 2576 // scheduled, and the last instruction is VL.back(). So we start with 2577 // VL.back() and iterate over schedule data until we reach the end of the 2578 // bundle. The end of the bundle is marked by null ScheduleData. 2579 if (BlocksSchedules.count(BB)) { 2580 auto *Bundle = 2581 BlocksSchedules[BB]->getScheduleData(isOneOf(OpValue, VL.back())); 2582 if (Bundle && Bundle->isPartOfBundle()) 2583 for (; Bundle; Bundle = Bundle->NextInBundle) 2584 if (Bundle->OpValue == Bundle->Inst) 2585 LastInst = Bundle->Inst; 2586 } 2587 2588 // LastInst can still be null at this point if there's either not an entry 2589 // for BB in BlocksSchedules or there's no ScheduleData available for 2590 // VL.back(). This can be the case if buildTree_rec aborts for various 2591 // reasons (e.g., the maximum recursion depth is reached, the maximum region 2592 // size is reached, etc.). ScheduleData is initialized in the scheduling 2593 // "dry-run". 2594 // 2595 // If this happens, we can still find the last instruction by brute force. We 2596 // iterate forwards from Front (inclusive) until we either see all 2597 // instructions in the bundle or reach the end of the block. If Front is the 2598 // last instruction in program order, LastInst will be set to Front, and we 2599 // will visit all the remaining instructions in the block. 2600 // 2601 // One of the reasons we exit early from buildTree_rec is to place an upper 2602 // bound on compile-time. Thus, taking an additional compile-time hit here is 2603 // not ideal. However, this should be exceedingly rare since it requires that 2604 // we both exit early from buildTree_rec and that the bundle be out-of-order 2605 // (causing us to iterate all the way to the end of the block). 2606 if (!LastInst) { 2607 SmallPtrSet<Value *, 16> Bundle(VL.begin(), VL.end()); 2608 for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) { 2609 if (Bundle.erase(&I) && sameOpcodeOrAlt(Opcode, AltOpcode, I.getOpcode())) 2610 LastInst = &I; 2611 if (Bundle.empty()) 2612 break; 2613 } 2614 } 2615 2616 // Set the insertion point after the last instruction in the bundle. Set the 2617 // debug location to Front. 2618 Builder.SetInsertPoint(BB, ++LastInst->getIterator()); 2619 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 2620 } 2621 2622 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 2623 Value *Vec = UndefValue::get(Ty); 2624 // Generate the 'InsertElement' instruction. 2625 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 2626 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 2627 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 2628 GatherSeq.insert(Insrt); 2629 CSEBlocks.insert(Insrt->getParent()); 2630 2631 // Add to our 'need-to-extract' list. 2632 if (TreeEntry *E = getTreeEntry(VL[i])) { 2633 // Find which lane we need to extract. 2634 int FoundLane = -1; 2635 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 2636 // Is this the lane of the scalar that we are looking for ? 2637 if (E->Scalars[Lane] == VL[i]) { 2638 FoundLane = Lane; 2639 break; 2640 } 2641 } 2642 assert(FoundLane >= 0 && "Could not find the correct lane"); 2643 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 2644 } 2645 } 2646 } 2647 2648 return Vec; 2649 } 2650 2651 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL, Value *OpValue) const { 2652 if (const TreeEntry *En = getTreeEntry(OpValue)) { 2653 if (En->isSame(VL) && En->VectorizedValue) 2654 return En->VectorizedValue; 2655 } 2656 return nullptr; 2657 } 2658 2659 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 2660 if (TreeEntry *E = getTreeEntry(VL[0])) 2661 if (E->isSame(VL)) 2662 return vectorizeTree(E); 2663 2664 Type *ScalarTy = VL[0]->getType(); 2665 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 2666 ScalarTy = SI->getValueOperand()->getType(); 2667 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 2668 2669 return Gather(VL, VecTy); 2670 } 2671 2672 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 2673 IRBuilder<>::InsertPointGuard Guard(Builder); 2674 2675 if (E->VectorizedValue) { 2676 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 2677 return E->VectorizedValue; 2678 } 2679 2680 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 2681 Type *ScalarTy = VL0->getType(); 2682 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 2683 ScalarTy = SI->getValueOperand()->getType(); 2684 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 2685 2686 if (E->NeedToGather) { 2687 setInsertPointAfterBundle(E->Scalars, VL0); 2688 auto *V = Gather(E->Scalars, VecTy); 2689 E->VectorizedValue = V; 2690 return V; 2691 } 2692 2693 unsigned Opcode = getSameOpcode(E->Scalars); 2694 2695 switch (Opcode) { 2696 case Instruction::PHI: { 2697 PHINode *PH = dyn_cast<PHINode>(VL0); 2698 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 2699 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2700 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 2701 E->VectorizedValue = NewPhi; 2702 2703 // PHINodes may have multiple entries from the same block. We want to 2704 // visit every block once. 2705 SmallSet<BasicBlock*, 4> VisitedBBs; 2706 2707 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 2708 ValueList Operands; 2709 BasicBlock *IBB = PH->getIncomingBlock(i); 2710 2711 if (!VisitedBBs.insert(IBB).second) { 2712 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 2713 continue; 2714 } 2715 2716 // Prepare the operand vector. 2717 for (Value *V : E->Scalars) 2718 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(IBB)); 2719 2720 Builder.SetInsertPoint(IBB->getTerminator()); 2721 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2722 Value *Vec = vectorizeTree(Operands); 2723 NewPhi->addIncoming(Vec, IBB); 2724 } 2725 2726 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 2727 "Invalid number of incoming values"); 2728 return NewPhi; 2729 } 2730 2731 case Instruction::ExtractElement: { 2732 if (canReuseExtract(E->Scalars, VL0)) { 2733 Value *V = VL0->getOperand(0); 2734 E->VectorizedValue = V; 2735 return V; 2736 } 2737 setInsertPointAfterBundle(E->Scalars, VL0); 2738 auto *V = Gather(E->Scalars, VecTy); 2739 E->VectorizedValue = V; 2740 return V; 2741 } 2742 case Instruction::ExtractValue: { 2743 if (canReuseExtract(E->Scalars, VL0)) { 2744 LoadInst *LI = cast<LoadInst>(VL0->getOperand(0)); 2745 Builder.SetInsertPoint(LI); 2746 PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); 2747 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); 2748 LoadInst *V = Builder.CreateAlignedLoad(Ptr, LI->getAlignment()); 2749 E->VectorizedValue = V; 2750 return propagateMetadata(V, E->Scalars); 2751 } 2752 setInsertPointAfterBundle(E->Scalars, VL0); 2753 auto *V = Gather(E->Scalars, VecTy); 2754 E->VectorizedValue = V; 2755 return V; 2756 } 2757 case Instruction::ZExt: 2758 case Instruction::SExt: 2759 case Instruction::FPToUI: 2760 case Instruction::FPToSI: 2761 case Instruction::FPExt: 2762 case Instruction::PtrToInt: 2763 case Instruction::IntToPtr: 2764 case Instruction::SIToFP: 2765 case Instruction::UIToFP: 2766 case Instruction::Trunc: 2767 case Instruction::FPTrunc: 2768 case Instruction::BitCast: { 2769 ValueList INVL; 2770 for (Value *V : E->Scalars) 2771 INVL.push_back(cast<Instruction>(V)->getOperand(0)); 2772 2773 setInsertPointAfterBundle(E->Scalars, VL0); 2774 2775 Value *InVec = vectorizeTree(INVL); 2776 2777 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2778 return V; 2779 2780 CastInst *CI = dyn_cast<CastInst>(VL0); 2781 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 2782 E->VectorizedValue = V; 2783 ++NumVectorInstructions; 2784 return V; 2785 } 2786 case Instruction::FCmp: 2787 case Instruction::ICmp: { 2788 ValueList LHSV, RHSV; 2789 for (Value *V : E->Scalars) { 2790 LHSV.push_back(cast<Instruction>(V)->getOperand(0)); 2791 RHSV.push_back(cast<Instruction>(V)->getOperand(1)); 2792 } 2793 2794 setInsertPointAfterBundle(E->Scalars, VL0); 2795 2796 Value *L = vectorizeTree(LHSV); 2797 Value *R = vectorizeTree(RHSV); 2798 2799 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2800 return V; 2801 2802 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 2803 Value *V; 2804 if (Opcode == Instruction::FCmp) 2805 V = Builder.CreateFCmp(P0, L, R); 2806 else 2807 V = Builder.CreateICmp(P0, L, R); 2808 2809 E->VectorizedValue = V; 2810 propagateIRFlags(E->VectorizedValue, E->Scalars, VL0); 2811 ++NumVectorInstructions; 2812 return V; 2813 } 2814 case Instruction::Select: { 2815 ValueList TrueVec, FalseVec, CondVec; 2816 for (Value *V : E->Scalars) { 2817 CondVec.push_back(cast<Instruction>(V)->getOperand(0)); 2818 TrueVec.push_back(cast<Instruction>(V)->getOperand(1)); 2819 FalseVec.push_back(cast<Instruction>(V)->getOperand(2)); 2820 } 2821 2822 setInsertPointAfterBundle(E->Scalars, VL0); 2823 2824 Value *Cond = vectorizeTree(CondVec); 2825 Value *True = vectorizeTree(TrueVec); 2826 Value *False = vectorizeTree(FalseVec); 2827 2828 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2829 return V; 2830 2831 Value *V = Builder.CreateSelect(Cond, True, False); 2832 E->VectorizedValue = V; 2833 ++NumVectorInstructions; 2834 return V; 2835 } 2836 case Instruction::Add: 2837 case Instruction::FAdd: 2838 case Instruction::Sub: 2839 case Instruction::FSub: 2840 case Instruction::Mul: 2841 case Instruction::FMul: 2842 case Instruction::UDiv: 2843 case Instruction::SDiv: 2844 case Instruction::FDiv: 2845 case Instruction::URem: 2846 case Instruction::SRem: 2847 case Instruction::FRem: 2848 case Instruction::Shl: 2849 case Instruction::LShr: 2850 case Instruction::AShr: 2851 case Instruction::And: 2852 case Instruction::Or: 2853 case Instruction::Xor: { 2854 ValueList LHSVL, RHSVL; 2855 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) 2856 reorderInputsAccordingToOpcode(VL0->getOpcode(), 2857 E->Scalars, LHSVL, RHSVL); 2858 else 2859 for (Value *V : E->Scalars) { 2860 auto *I = cast<Instruction>(V); 2861 LHSVL.push_back(I->getOperand(0)); 2862 RHSVL.push_back(I->getOperand(1)); 2863 } 2864 2865 setInsertPointAfterBundle(E->Scalars, VL0); 2866 2867 Value *LHS = vectorizeTree(LHSVL); 2868 Value *RHS = vectorizeTree(RHSVL); 2869 2870 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2871 return V; 2872 2873 BinaryOperator *BinOp = cast<BinaryOperator>(VL0); 2874 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS); 2875 E->VectorizedValue = V; 2876 propagateIRFlags(E->VectorizedValue, E->Scalars, VL0); 2877 ++NumVectorInstructions; 2878 2879 if (Instruction *I = dyn_cast<Instruction>(V)) 2880 return propagateMetadata(I, E->Scalars); 2881 2882 return V; 2883 } 2884 case Instruction::Load: { 2885 // Loads are inserted at the head of the tree because we don't want to 2886 // sink them all the way down past store instructions. 2887 setInsertPointAfterBundle(E->Scalars, VL0); 2888 2889 LoadInst *LI = cast<LoadInst>(VL0); 2890 Type *ScalarLoadTy = LI->getType(); 2891 unsigned AS = LI->getPointerAddressSpace(); 2892 2893 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 2894 VecTy->getPointerTo(AS)); 2895 2896 // The pointer operand uses an in-tree scalar so we add the new BitCast to 2897 // ExternalUses list to make sure that an extract will be generated in the 2898 // future. 2899 Value *PO = LI->getPointerOperand(); 2900 if (getTreeEntry(PO)) 2901 ExternalUses.push_back(ExternalUser(PO, cast<User>(VecPtr), 0)); 2902 2903 unsigned Alignment = LI->getAlignment(); 2904 LI = Builder.CreateLoad(VecPtr); 2905 if (!Alignment) { 2906 Alignment = DL->getABITypeAlignment(ScalarLoadTy); 2907 } 2908 LI->setAlignment(Alignment); 2909 E->VectorizedValue = LI; 2910 ++NumVectorInstructions; 2911 return propagateMetadata(LI, E->Scalars); 2912 } 2913 case Instruction::Store: { 2914 StoreInst *SI = cast<StoreInst>(VL0); 2915 unsigned Alignment = SI->getAlignment(); 2916 unsigned AS = SI->getPointerAddressSpace(); 2917 2918 ValueList ValueOp; 2919 for (Value *V : E->Scalars) 2920 ValueOp.push_back(cast<StoreInst>(V)->getValueOperand()); 2921 2922 setInsertPointAfterBundle(E->Scalars, VL0); 2923 2924 Value *VecValue = vectorizeTree(ValueOp); 2925 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), 2926 VecTy->getPointerTo(AS)); 2927 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 2928 2929 // The pointer operand uses an in-tree scalar so we add the new BitCast to 2930 // ExternalUses list to make sure that an extract will be generated in the 2931 // future. 2932 Value *PO = SI->getPointerOperand(); 2933 if (getTreeEntry(PO)) 2934 ExternalUses.push_back(ExternalUser(PO, cast<User>(VecPtr), 0)); 2935 2936 if (!Alignment) { 2937 Alignment = DL->getABITypeAlignment(SI->getValueOperand()->getType()); 2938 } 2939 S->setAlignment(Alignment); 2940 E->VectorizedValue = S; 2941 ++NumVectorInstructions; 2942 return propagateMetadata(S, E->Scalars); 2943 } 2944 case Instruction::GetElementPtr: { 2945 setInsertPointAfterBundle(E->Scalars, VL0); 2946 2947 ValueList Op0VL; 2948 for (Value *V : E->Scalars) 2949 Op0VL.push_back(cast<GetElementPtrInst>(V)->getOperand(0)); 2950 2951 Value *Op0 = vectorizeTree(Op0VL); 2952 2953 std::vector<Value *> OpVecs; 2954 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 2955 ++j) { 2956 ValueList OpVL; 2957 for (Value *V : E->Scalars) 2958 OpVL.push_back(cast<GetElementPtrInst>(V)->getOperand(j)); 2959 2960 Value *OpVec = vectorizeTree(OpVL); 2961 OpVecs.push_back(OpVec); 2962 } 2963 2964 Value *V = Builder.CreateGEP( 2965 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs); 2966 E->VectorizedValue = V; 2967 ++NumVectorInstructions; 2968 2969 if (Instruction *I = dyn_cast<Instruction>(V)) 2970 return propagateMetadata(I, E->Scalars); 2971 2972 return V; 2973 } 2974 case Instruction::Call: { 2975 CallInst *CI = cast<CallInst>(VL0); 2976 setInsertPointAfterBundle(E->Scalars, VL0); 2977 Function *FI; 2978 Intrinsic::ID IID = Intrinsic::not_intrinsic; 2979 Value *ScalarArg = nullptr; 2980 if (CI && (FI = CI->getCalledFunction())) { 2981 IID = FI->getIntrinsicID(); 2982 } 2983 std::vector<Value *> OpVecs; 2984 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 2985 ValueList OpVL; 2986 // ctlz,cttz and powi are special intrinsics whose second argument is 2987 // a scalar. This argument should not be vectorized. 2988 if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) { 2989 CallInst *CEI = cast<CallInst>(VL0); 2990 ScalarArg = CEI->getArgOperand(j); 2991 OpVecs.push_back(CEI->getArgOperand(j)); 2992 continue; 2993 } 2994 for (Value *V : E->Scalars) { 2995 CallInst *CEI = cast<CallInst>(V); 2996 OpVL.push_back(CEI->getArgOperand(j)); 2997 } 2998 2999 Value *OpVec = vectorizeTree(OpVL); 3000 DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 3001 OpVecs.push_back(OpVec); 3002 } 3003 3004 Module *M = F->getParent(); 3005 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3006 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; 3007 Function *CF = Intrinsic::getDeclaration(M, ID, Tys); 3008 SmallVector<OperandBundleDef, 1> OpBundles; 3009 CI->getOperandBundlesAsDefs(OpBundles); 3010 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 3011 3012 // The scalar argument uses an in-tree scalar so we add the new vectorized 3013 // call to ExternalUses list to make sure that an extract will be 3014 // generated in the future. 3015 if (ScalarArg && getTreeEntry(ScalarArg)) 3016 ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0)); 3017 3018 E->VectorizedValue = V; 3019 propagateIRFlags(E->VectorizedValue, E->Scalars, VL0); 3020 ++NumVectorInstructions; 3021 return V; 3022 } 3023 case Instruction::ShuffleVector: { 3024 ValueList LHSVL, RHSVL; 3025 assert(isa<BinaryOperator>(VL0) && "Invalid Shuffle Vector Operand"); 3026 reorderAltShuffleOperands(VL0->getOpcode(), E->Scalars, LHSVL, RHSVL); 3027 setInsertPointAfterBundle(E->Scalars, VL0); 3028 3029 Value *LHS = vectorizeTree(LHSVL); 3030 Value *RHS = vectorizeTree(RHSVL); 3031 3032 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 3033 return V; 3034 3035 // Create a vector of LHS op1 RHS 3036 BinaryOperator *BinOp0 = cast<BinaryOperator>(VL0); 3037 Value *V0 = Builder.CreateBinOp(BinOp0->getOpcode(), LHS, RHS); 3038 3039 // Create a vector of LHS op2 RHS 3040 Instruction *VL1 = cast<Instruction>(E->Scalars[1]); 3041 BinaryOperator *BinOp1 = cast<BinaryOperator>(VL1); 3042 Value *V1 = Builder.CreateBinOp(BinOp1->getOpcode(), LHS, RHS); 3043 3044 // Create shuffle to take alternate operations from the vector. 3045 // Also, gather up odd and even scalar ops to propagate IR flags to 3046 // each vector operation. 3047 ValueList OddScalars, EvenScalars; 3048 unsigned e = E->Scalars.size(); 3049 SmallVector<Constant *, 8> Mask(e); 3050 for (unsigned i = 0; i < e; ++i) { 3051 if (isOdd(i)) { 3052 Mask[i] = Builder.getInt32(e + i); 3053 OddScalars.push_back(E->Scalars[i]); 3054 } else { 3055 Mask[i] = Builder.getInt32(i); 3056 EvenScalars.push_back(E->Scalars[i]); 3057 } 3058 } 3059 3060 Value *ShuffleMask = ConstantVector::get(Mask); 3061 propagateIRFlags(V0, EvenScalars); 3062 propagateIRFlags(V1, OddScalars); 3063 3064 Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 3065 E->VectorizedValue = V; 3066 ++NumVectorInstructions; 3067 if (Instruction *I = dyn_cast<Instruction>(V)) 3068 return propagateMetadata(I, E->Scalars); 3069 3070 return V; 3071 } 3072 default: 3073 llvm_unreachable("unknown inst"); 3074 } 3075 return nullptr; 3076 } 3077 3078 Value *BoUpSLP::vectorizeTree() { 3079 ExtraValueToDebugLocsMap ExternallyUsedValues; 3080 return vectorizeTree(ExternallyUsedValues); 3081 } 3082 3083 Value * 3084 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { 3085 3086 // All blocks must be scheduled before any instructions are inserted. 3087 for (auto &BSIter : BlocksSchedules) { 3088 scheduleBlock(BSIter.second.get()); 3089 } 3090 3091 Builder.SetInsertPoint(&F->getEntryBlock().front()); 3092 auto *VectorRoot = vectorizeTree(&VectorizableTree[0]); 3093 3094 // If the vectorized tree can be rewritten in a smaller type, we truncate the 3095 // vectorized root. InstCombine will then rewrite the entire expression. We 3096 // sign extend the extracted values below. 3097 auto *ScalarRoot = VectorizableTree[0].Scalars[0]; 3098 if (MinBWs.count(ScalarRoot)) { 3099 if (auto *I = dyn_cast<Instruction>(VectorRoot)) 3100 Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); 3101 auto BundleWidth = VectorizableTree[0].Scalars.size(); 3102 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 3103 auto *VecTy = VectorType::get(MinTy, BundleWidth); 3104 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); 3105 VectorizableTree[0].VectorizedValue = Trunc; 3106 } 3107 3108 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 3109 3110 // If necessary, sign-extend or zero-extend ScalarRoot to the larger type 3111 // specified by ScalarType. 3112 auto extend = [&](Value *ScalarRoot, Value *Ex, Type *ScalarType) { 3113 if (!MinBWs.count(ScalarRoot)) 3114 return Ex; 3115 if (MinBWs[ScalarRoot].second) 3116 return Builder.CreateSExt(Ex, ScalarType); 3117 return Builder.CreateZExt(Ex, ScalarType); 3118 }; 3119 3120 // Extract all of the elements with the external uses. 3121 for (const auto &ExternalUse : ExternalUses) { 3122 Value *Scalar = ExternalUse.Scalar; 3123 llvm::User *User = ExternalUse.User; 3124 3125 // Skip users that we already RAUW. This happens when one instruction 3126 // has multiple uses of the same value. 3127 if (User && !is_contained(Scalar->users(), User)) 3128 continue; 3129 TreeEntry *E = getTreeEntry(Scalar); 3130 assert(E && "Invalid scalar"); 3131 assert(!E->NeedToGather && "Extracting from a gather list"); 3132 3133 Value *Vec = E->VectorizedValue; 3134 assert(Vec && "Can't find vectorizable value"); 3135 3136 Value *Lane = Builder.getInt32(ExternalUse.Lane); 3137 // If User == nullptr, the Scalar is used as extra arg. Generate 3138 // ExtractElement instruction and update the record for this scalar in 3139 // ExternallyUsedValues. 3140 if (!User) { 3141 assert(ExternallyUsedValues.count(Scalar) && 3142 "Scalar with nullptr as an external user must be registered in " 3143 "ExternallyUsedValues map"); 3144 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 3145 Builder.SetInsertPoint(VecI->getParent(), 3146 std::next(VecI->getIterator())); 3147 } else { 3148 Builder.SetInsertPoint(&F->getEntryBlock().front()); 3149 } 3150 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 3151 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 3152 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent()); 3153 auto &Locs = ExternallyUsedValues[Scalar]; 3154 ExternallyUsedValues.insert({Ex, Locs}); 3155 ExternallyUsedValues.erase(Scalar); 3156 continue; 3157 } 3158 3159 // Generate extracts for out-of-tree users. 3160 // Find the insertion point for the extractelement lane. 3161 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 3162 if (PHINode *PH = dyn_cast<PHINode>(User)) { 3163 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 3164 if (PH->getIncomingValue(i) == Scalar) { 3165 TerminatorInst *IncomingTerminator = 3166 PH->getIncomingBlock(i)->getTerminator(); 3167 if (isa<CatchSwitchInst>(IncomingTerminator)) { 3168 Builder.SetInsertPoint(VecI->getParent(), 3169 std::next(VecI->getIterator())); 3170 } else { 3171 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 3172 } 3173 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 3174 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 3175 CSEBlocks.insert(PH->getIncomingBlock(i)); 3176 PH->setOperand(i, Ex); 3177 } 3178 } 3179 } else { 3180 Builder.SetInsertPoint(cast<Instruction>(User)); 3181 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 3182 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 3183 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 3184 User->replaceUsesOfWith(Scalar, Ex); 3185 } 3186 } else { 3187 Builder.SetInsertPoint(&F->getEntryBlock().front()); 3188 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 3189 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 3190 CSEBlocks.insert(&F->getEntryBlock()); 3191 User->replaceUsesOfWith(Scalar, Ex); 3192 } 3193 3194 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 3195 } 3196 3197 // For each vectorized value: 3198 for (TreeEntry &EIdx : VectorizableTree) { 3199 TreeEntry *Entry = &EIdx; 3200 3201 // For each lane: 3202 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 3203 Value *Scalar = Entry->Scalars[Lane]; 3204 // No need to handle users of gathered values. 3205 if (Entry->NeedToGather) 3206 continue; 3207 3208 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 3209 3210 Type *Ty = Scalar->getType(); 3211 if (!Ty->isVoidTy()) { 3212 #ifndef NDEBUG 3213 for (User *U : Scalar->users()) { 3214 DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 3215 3216 // It is legal to replace users in the ignorelist by undef. 3217 assert((getTreeEntry(U) || is_contained(UserIgnoreList, U)) && 3218 "Replacing out-of-tree value with undef"); 3219 } 3220 #endif 3221 Value *Undef = UndefValue::get(Ty); 3222 Scalar->replaceAllUsesWith(Undef); 3223 } 3224 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 3225 eraseInstruction(cast<Instruction>(Scalar)); 3226 } 3227 } 3228 3229 Builder.ClearInsertionPoint(); 3230 3231 return VectorizableTree[0].VectorizedValue; 3232 } 3233 3234 void BoUpSLP::optimizeGatherSequence() { 3235 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 3236 << " gather sequences instructions.\n"); 3237 // LICM InsertElementInst sequences. 3238 for (Instruction *it : GatherSeq) { 3239 InsertElementInst *Insert = dyn_cast<InsertElementInst>(it); 3240 3241 if (!Insert) 3242 continue; 3243 3244 // Check if this block is inside a loop. 3245 Loop *L = LI->getLoopFor(Insert->getParent()); 3246 if (!L) 3247 continue; 3248 3249 // Check if it has a preheader. 3250 BasicBlock *PreHeader = L->getLoopPreheader(); 3251 if (!PreHeader) 3252 continue; 3253 3254 // If the vector or the element that we insert into it are 3255 // instructions that are defined in this basic block then we can't 3256 // hoist this instruction. 3257 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 3258 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 3259 if (CurrVec && L->contains(CurrVec)) 3260 continue; 3261 if (NewElem && L->contains(NewElem)) 3262 continue; 3263 3264 // We can hoist this instruction. Move it to the pre-header. 3265 Insert->moveBefore(PreHeader->getTerminator()); 3266 } 3267 3268 // Make a list of all reachable blocks in our CSE queue. 3269 SmallVector<const DomTreeNode *, 8> CSEWorkList; 3270 CSEWorkList.reserve(CSEBlocks.size()); 3271 for (BasicBlock *BB : CSEBlocks) 3272 if (DomTreeNode *N = DT->getNode(BB)) { 3273 assert(DT->isReachableFromEntry(N)); 3274 CSEWorkList.push_back(N); 3275 } 3276 3277 // Sort blocks by domination. This ensures we visit a block after all blocks 3278 // dominating it are visited. 3279 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), 3280 [this](const DomTreeNode *A, const DomTreeNode *B) { 3281 return DT->properlyDominates(A, B); 3282 }); 3283 3284 // Perform O(N^2) search over the gather sequences and merge identical 3285 // instructions. TODO: We can further optimize this scan if we split the 3286 // instructions into different buckets based on the insert lane. 3287 SmallVector<Instruction *, 16> Visited; 3288 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 3289 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 3290 "Worklist not sorted properly!"); 3291 BasicBlock *BB = (*I)->getBlock(); 3292 // For all instructions in blocks containing gather sequences: 3293 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 3294 Instruction *In = &*it++; 3295 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 3296 continue; 3297 3298 // Check if we can replace this instruction with any of the 3299 // visited instructions. 3300 for (Instruction *v : Visited) { 3301 if (In->isIdenticalTo(v) && 3302 DT->dominates(v->getParent(), In->getParent())) { 3303 In->replaceAllUsesWith(v); 3304 eraseInstruction(In); 3305 In = nullptr; 3306 break; 3307 } 3308 } 3309 if (In) { 3310 assert(!is_contained(Visited, In)); 3311 Visited.push_back(In); 3312 } 3313 } 3314 } 3315 CSEBlocks.clear(); 3316 GatherSeq.clear(); 3317 } 3318 3319 // Groups the instructions to a bundle (which is then a single scheduling entity) 3320 // and schedules instructions until the bundle gets ready. 3321 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, 3322 BoUpSLP *SLP, Value *OpValue) { 3323 if (isa<PHINode>(OpValue)) 3324 return true; 3325 3326 // Initialize the instruction bundle. 3327 Instruction *OldScheduleEnd = ScheduleEnd; 3328 ScheduleData *PrevInBundle = nullptr; 3329 ScheduleData *Bundle = nullptr; 3330 bool ReSchedule = false; 3331 DEBUG(dbgs() << "SLP: bundle: " << *OpValue << "\n"); 3332 3333 // Make sure that the scheduling region contains all 3334 // instructions of the bundle. 3335 for (Value *V : VL) { 3336 if (!extendSchedulingRegion(V, OpValue)) 3337 return false; 3338 } 3339 3340 for (Value *V : VL) { 3341 ScheduleData *BundleMember = getScheduleData(V); 3342 assert(BundleMember && 3343 "no ScheduleData for bundle member (maybe not in same basic block)"); 3344 if (BundleMember->IsScheduled) { 3345 // A bundle member was scheduled as single instruction before and now 3346 // needs to be scheduled as part of the bundle. We just get rid of the 3347 // existing schedule. 3348 DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 3349 << " was already scheduled\n"); 3350 ReSchedule = true; 3351 } 3352 assert(BundleMember->isSchedulingEntity() && 3353 "bundle member already part of other bundle"); 3354 if (PrevInBundle) { 3355 PrevInBundle->NextInBundle = BundleMember; 3356 } else { 3357 Bundle = BundleMember; 3358 } 3359 BundleMember->UnscheduledDepsInBundle = 0; 3360 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 3361 3362 // Group the instructions to a bundle. 3363 BundleMember->FirstInBundle = Bundle; 3364 PrevInBundle = BundleMember; 3365 } 3366 if (ScheduleEnd != OldScheduleEnd) { 3367 // The scheduling region got new instructions at the lower end (or it is a 3368 // new region for the first bundle). This makes it necessary to 3369 // recalculate all dependencies. 3370 // It is seldom that this needs to be done a second time after adding the 3371 // initial bundle to the region. 3372 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3373 doForAllOpcodes(I, [](ScheduleData *SD) { 3374 SD->clearDependencies(); 3375 }); 3376 } 3377 ReSchedule = true; 3378 } 3379 if (ReSchedule) { 3380 resetSchedule(); 3381 initialFillReadyList(ReadyInsts); 3382 } 3383 3384 DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " 3385 << BB->getName() << "\n"); 3386 3387 calculateDependencies(Bundle, true, SLP); 3388 3389 // Now try to schedule the new bundle. As soon as the bundle is "ready" it 3390 // means that there are no cyclic dependencies and we can schedule it. 3391 // Note that's important that we don't "schedule" the bundle yet (see 3392 // cancelScheduling). 3393 while (!Bundle->isReady() && !ReadyInsts.empty()) { 3394 3395 ScheduleData *pickedSD = ReadyInsts.back(); 3396 ReadyInsts.pop_back(); 3397 3398 if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) { 3399 schedule(pickedSD, ReadyInsts); 3400 } 3401 } 3402 if (!Bundle->isReady()) { 3403 cancelScheduling(VL, OpValue); 3404 return false; 3405 } 3406 return true; 3407 } 3408 3409 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 3410 Value *OpValue) { 3411 if (isa<PHINode>(OpValue)) 3412 return; 3413 3414 ScheduleData *Bundle = getScheduleData(OpValue); 3415 DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 3416 assert(!Bundle->IsScheduled && 3417 "Can't cancel bundle which is already scheduled"); 3418 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 3419 "tried to unbundle something which is not a bundle"); 3420 3421 // Un-bundle: make single instructions out of the bundle. 3422 ScheduleData *BundleMember = Bundle; 3423 while (BundleMember) { 3424 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 3425 BundleMember->FirstInBundle = BundleMember; 3426 ScheduleData *Next = BundleMember->NextInBundle; 3427 BundleMember->NextInBundle = nullptr; 3428 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 3429 if (BundleMember->UnscheduledDepsInBundle == 0) { 3430 ReadyInsts.insert(BundleMember); 3431 } 3432 BundleMember = Next; 3433 } 3434 } 3435 3436 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 3437 // Allocate a new ScheduleData for the instruction. 3438 if (ChunkPos >= ChunkSize) { 3439 ScheduleDataChunks.push_back(llvm::make_unique<ScheduleData[]>(ChunkSize)); 3440 ChunkPos = 0; 3441 } 3442 return &(ScheduleDataChunks.back()[ChunkPos++]); 3443 } 3444 3445 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 3446 Value *OpValue) { 3447 if (getScheduleData(V, isOneOf(OpValue, V))) 3448 return true; 3449 Instruction *I = dyn_cast<Instruction>(V); 3450 assert(I && "bundle member must be an instruction"); 3451 assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled"); 3452 auto &&CheckSheduleForI = [this, OpValue](Instruction *I) -> bool { 3453 ScheduleData *ISD = getScheduleData(I); 3454 if (!ISD) 3455 return false; 3456 assert(isInSchedulingRegion(ISD) && 3457 "ScheduleData not in scheduling region"); 3458 ScheduleData *SD = allocateScheduleDataChunks(); 3459 SD->Inst = I; 3460 SD->init(SchedulingRegionID, OpValue); 3461 ExtraScheduleDataMap[I][OpValue] = SD; 3462 return true; 3463 }; 3464 if (CheckSheduleForI(I)) 3465 return true; 3466 if (!ScheduleStart) { 3467 // It's the first instruction in the new region. 3468 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 3469 ScheduleStart = I; 3470 ScheduleEnd = I->getNextNode(); 3471 if (isOneOf(OpValue, I) != I) 3472 CheckSheduleForI(I); 3473 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 3474 DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 3475 return true; 3476 } 3477 // Search up and down at the same time, because we don't know if the new 3478 // instruction is above or below the existing scheduling region. 3479 BasicBlock::reverse_iterator UpIter = 3480 ++ScheduleStart->getIterator().getReverse(); 3481 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 3482 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 3483 BasicBlock::iterator LowerEnd = BB->end(); 3484 for (;;) { 3485 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 3486 DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 3487 return false; 3488 } 3489 3490 if (UpIter != UpperEnd) { 3491 if (&*UpIter == I) { 3492 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 3493 ScheduleStart = I; 3494 if (isOneOf(OpValue, I) != I) 3495 CheckSheduleForI(I); 3496 DEBUG(dbgs() << "SLP: extend schedule region start to " << *I << "\n"); 3497 return true; 3498 } 3499 UpIter++; 3500 } 3501 if (DownIter != LowerEnd) { 3502 if (&*DownIter == I) { 3503 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 3504 nullptr); 3505 ScheduleEnd = I->getNextNode(); 3506 if (isOneOf(OpValue, I) != I) 3507 CheckSheduleForI(I); 3508 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 3509 DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 3510 return true; 3511 } 3512 DownIter++; 3513 } 3514 assert((UpIter != UpperEnd || DownIter != LowerEnd) && 3515 "instruction not found in block"); 3516 } 3517 return true; 3518 } 3519 3520 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 3521 Instruction *ToI, 3522 ScheduleData *PrevLoadStore, 3523 ScheduleData *NextLoadStore) { 3524 ScheduleData *CurrentLoadStore = PrevLoadStore; 3525 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 3526 ScheduleData *SD = ScheduleDataMap[I]; 3527 if (!SD) { 3528 // Allocate a new ScheduleData for the instruction. 3529 if (ChunkPos >= ChunkSize) { 3530 ScheduleDataChunks.push_back( 3531 llvm::make_unique<ScheduleData[]>(ChunkSize)); 3532 ChunkPos = 0; 3533 } 3534 SD = allocateScheduleDataChunks(); 3535 ScheduleDataMap[I] = SD; 3536 SD->Inst = I; 3537 } 3538 assert(!isInSchedulingRegion(SD) && 3539 "new ScheduleData already in scheduling region"); 3540 SD->init(SchedulingRegionID, I); 3541 3542 if (I->mayReadOrWriteMemory()) { 3543 // Update the linked list of memory accessing instructions. 3544 if (CurrentLoadStore) { 3545 CurrentLoadStore->NextLoadStore = SD; 3546 } else { 3547 FirstLoadStoreInRegion = SD; 3548 } 3549 CurrentLoadStore = SD; 3550 } 3551 } 3552 if (NextLoadStore) { 3553 if (CurrentLoadStore) 3554 CurrentLoadStore->NextLoadStore = NextLoadStore; 3555 } else { 3556 LastLoadStoreInRegion = CurrentLoadStore; 3557 } 3558 } 3559 3560 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 3561 bool InsertInReadyList, 3562 BoUpSLP *SLP) { 3563 assert(SD->isSchedulingEntity()); 3564 3565 SmallVector<ScheduleData *, 10> WorkList; 3566 WorkList.push_back(SD); 3567 3568 while (!WorkList.empty()) { 3569 ScheduleData *SD = WorkList.back(); 3570 WorkList.pop_back(); 3571 3572 ScheduleData *BundleMember = SD; 3573 while (BundleMember) { 3574 assert(isInSchedulingRegion(BundleMember)); 3575 if (!BundleMember->hasValidDependencies()) { 3576 3577 DEBUG(dbgs() << "SLP: update deps of " << *BundleMember << "\n"); 3578 BundleMember->Dependencies = 0; 3579 BundleMember->resetUnscheduledDeps(); 3580 3581 // Handle def-use chain dependencies. 3582 if (BundleMember->OpValue != BundleMember->Inst) { 3583 ScheduleData *UseSD = getScheduleData(BundleMember->Inst); 3584 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 3585 BundleMember->Dependencies++; 3586 ScheduleData *DestBundle = UseSD->FirstInBundle; 3587 if (!DestBundle->IsScheduled) 3588 BundleMember->incrementUnscheduledDeps(1); 3589 if (!DestBundle->hasValidDependencies()) 3590 WorkList.push_back(DestBundle); 3591 } 3592 } else { 3593 for (User *U : BundleMember->Inst->users()) { 3594 if (isa<Instruction>(U)) { 3595 ScheduleData *UseSD = getScheduleData(U); 3596 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 3597 BundleMember->Dependencies++; 3598 ScheduleData *DestBundle = UseSD->FirstInBundle; 3599 if (!DestBundle->IsScheduled) 3600 BundleMember->incrementUnscheduledDeps(1); 3601 if (!DestBundle->hasValidDependencies()) 3602 WorkList.push_back(DestBundle); 3603 } 3604 } else { 3605 // I'm not sure if this can ever happen. But we need to be safe. 3606 // This lets the instruction/bundle never be scheduled and 3607 // eventually disable vectorization. 3608 BundleMember->Dependencies++; 3609 BundleMember->incrementUnscheduledDeps(1); 3610 } 3611 } 3612 } 3613 3614 // Handle the memory dependencies. 3615 ScheduleData *DepDest = BundleMember->NextLoadStore; 3616 if (DepDest) { 3617 Instruction *SrcInst = BundleMember->Inst; 3618 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); 3619 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 3620 unsigned numAliased = 0; 3621 unsigned DistToSrc = 1; 3622 3623 while (DepDest) { 3624 assert(isInSchedulingRegion(DepDest)); 3625 3626 // We have two limits to reduce the complexity: 3627 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 3628 // SLP->isAliased (which is the expensive part in this loop). 3629 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 3630 // the whole loop (even if the loop is fast, it's quadratic). 3631 // It's important for the loop break condition (see below) to 3632 // check this limit even between two read-only instructions. 3633 if (DistToSrc >= MaxMemDepDistance || 3634 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 3635 (numAliased >= AliasedCheckLimit || 3636 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 3637 3638 // We increment the counter only if the locations are aliased 3639 // (instead of counting all alias checks). This gives a better 3640 // balance between reduced runtime and accurate dependencies. 3641 numAliased++; 3642 3643 DepDest->MemoryDependencies.push_back(BundleMember); 3644 BundleMember->Dependencies++; 3645 ScheduleData *DestBundle = DepDest->FirstInBundle; 3646 if (!DestBundle->IsScheduled) { 3647 BundleMember->incrementUnscheduledDeps(1); 3648 } 3649 if (!DestBundle->hasValidDependencies()) { 3650 WorkList.push_back(DestBundle); 3651 } 3652 } 3653 DepDest = DepDest->NextLoadStore; 3654 3655 // Example, explaining the loop break condition: Let's assume our 3656 // starting instruction is i0 and MaxMemDepDistance = 3. 3657 // 3658 // +--------v--v--v 3659 // i0,i1,i2,i3,i4,i5,i6,i7,i8 3660 // +--------^--^--^ 3661 // 3662 // MaxMemDepDistance let us stop alias-checking at i3 and we add 3663 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 3664 // Previously we already added dependencies from i3 to i6,i7,i8 3665 // (because of MaxMemDepDistance). As we added a dependency from 3666 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 3667 // and we can abort this loop at i6. 3668 if (DistToSrc >= 2 * MaxMemDepDistance) 3669 break; 3670 DistToSrc++; 3671 } 3672 } 3673 } 3674 BundleMember = BundleMember->NextInBundle; 3675 } 3676 if (InsertInReadyList && SD->isReady()) { 3677 ReadyInsts.push_back(SD); 3678 DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n"); 3679 } 3680 } 3681 } 3682 3683 void BoUpSLP::BlockScheduling::resetSchedule() { 3684 assert(ScheduleStart && 3685 "tried to reset schedule on block which has not been scheduled"); 3686 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3687 doForAllOpcodes(I, [&](ScheduleData *SD) { 3688 assert(isInSchedulingRegion(SD) && 3689 "ScheduleData not in scheduling region"); 3690 SD->IsScheduled = false; 3691 SD->resetUnscheduledDeps(); 3692 }); 3693 } 3694 ReadyInsts.clear(); 3695 } 3696 3697 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 3698 3699 if (!BS->ScheduleStart) 3700 return; 3701 3702 DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 3703 3704 BS->resetSchedule(); 3705 3706 // For the real scheduling we use a more sophisticated ready-list: it is 3707 // sorted by the original instruction location. This lets the final schedule 3708 // be as close as possible to the original instruction order. 3709 struct ScheduleDataCompare { 3710 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 3711 return SD2->SchedulingPriority < SD1->SchedulingPriority; 3712 } 3713 }; 3714 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 3715 3716 // Ensure that all dependency data is updated and fill the ready-list with 3717 // initial instructions. 3718 int Idx = 0; 3719 int NumToSchedule = 0; 3720 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 3721 I = I->getNextNode()) { 3722 BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) { 3723 assert(SD->isPartOfBundle() == 3724 (getTreeEntry(SD->Inst) != nullptr) && 3725 "scheduler and vectorizer bundle mismatch"); 3726 SD->FirstInBundle->SchedulingPriority = Idx++; 3727 if (SD->isSchedulingEntity()) { 3728 BS->calculateDependencies(SD, false, this); 3729 NumToSchedule++; 3730 } 3731 }); 3732 } 3733 BS->initialFillReadyList(ReadyInsts); 3734 3735 Instruction *LastScheduledInst = BS->ScheduleEnd; 3736 3737 // Do the "real" scheduling. 3738 while (!ReadyInsts.empty()) { 3739 ScheduleData *picked = *ReadyInsts.begin(); 3740 ReadyInsts.erase(ReadyInsts.begin()); 3741 3742 // Move the scheduled instruction(s) to their dedicated places, if not 3743 // there yet. 3744 ScheduleData *BundleMember = picked; 3745 while (BundleMember) { 3746 Instruction *pickedInst = BundleMember->Inst; 3747 if (LastScheduledInst->getNextNode() != pickedInst) { 3748 BS->BB->getInstList().remove(pickedInst); 3749 BS->BB->getInstList().insert(LastScheduledInst->getIterator(), 3750 pickedInst); 3751 } 3752 LastScheduledInst = pickedInst; 3753 BundleMember = BundleMember->NextInBundle; 3754 } 3755 3756 BS->schedule(picked, ReadyInsts); 3757 NumToSchedule--; 3758 } 3759 assert(NumToSchedule == 0 && "could not schedule all instructions"); 3760 3761 // Avoid duplicate scheduling of the block. 3762 BS->ScheduleStart = nullptr; 3763 } 3764 3765 unsigned BoUpSLP::getVectorElementSize(Value *V) { 3766 // If V is a store, just return the width of the stored value without 3767 // traversing the expression tree. This is the common case. 3768 if (auto *Store = dyn_cast<StoreInst>(V)) 3769 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 3770 3771 // If V is not a store, we can traverse the expression tree to find loads 3772 // that feed it. The type of the loaded value may indicate a more suitable 3773 // width than V's type. We want to base the vector element size on the width 3774 // of memory operations where possible. 3775 SmallVector<Instruction *, 16> Worklist; 3776 SmallPtrSet<Instruction *, 16> Visited; 3777 if (auto *I = dyn_cast<Instruction>(V)) 3778 Worklist.push_back(I); 3779 3780 // Traverse the expression tree in bottom-up order looking for loads. If we 3781 // encounter an instruciton we don't yet handle, we give up. 3782 auto MaxWidth = 0u; 3783 auto FoundUnknownInst = false; 3784 while (!Worklist.empty() && !FoundUnknownInst) { 3785 auto *I = Worklist.pop_back_val(); 3786 Visited.insert(I); 3787 3788 // We should only be looking at scalar instructions here. If the current 3789 // instruction has a vector type, give up. 3790 auto *Ty = I->getType(); 3791 if (isa<VectorType>(Ty)) 3792 FoundUnknownInst = true; 3793 3794 // If the current instruction is a load, update MaxWidth to reflect the 3795 // width of the loaded value. 3796 else if (isa<LoadInst>(I)) 3797 MaxWidth = std::max<unsigned>(MaxWidth, DL->getTypeSizeInBits(Ty)); 3798 3799 // Otherwise, we need to visit the operands of the instruction. We only 3800 // handle the interesting cases from buildTree here. If an operand is an 3801 // instruction we haven't yet visited, we add it to the worklist. 3802 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 3803 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I)) { 3804 for (Use &U : I->operands()) 3805 if (auto *J = dyn_cast<Instruction>(U.get())) 3806 if (!Visited.count(J)) 3807 Worklist.push_back(J); 3808 } 3809 3810 // If we don't yet handle the instruction, give up. 3811 else 3812 FoundUnknownInst = true; 3813 } 3814 3815 // If we didn't encounter a memory access in the expression tree, or if we 3816 // gave up for some reason, just return the width of V. 3817 if (!MaxWidth || FoundUnknownInst) 3818 return DL->getTypeSizeInBits(V->getType()); 3819 3820 // Otherwise, return the maximum width we found. 3821 return MaxWidth; 3822 } 3823 3824 // Determine if a value V in a vectorizable expression Expr can be demoted to a 3825 // smaller type with a truncation. We collect the values that will be demoted 3826 // in ToDemote and additional roots that require investigating in Roots. 3827 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, 3828 SmallVectorImpl<Value *> &ToDemote, 3829 SmallVectorImpl<Value *> &Roots) { 3830 3831 // We can always demote constants. 3832 if (isa<Constant>(V)) { 3833 ToDemote.push_back(V); 3834 return true; 3835 } 3836 3837 // If the value is not an instruction in the expression with only one use, it 3838 // cannot be demoted. 3839 auto *I = dyn_cast<Instruction>(V); 3840 if (!I || !I->hasOneUse() || !Expr.count(I)) 3841 return false; 3842 3843 switch (I->getOpcode()) { 3844 3845 // We can always demote truncations and extensions. Since truncations can 3846 // seed additional demotion, we save the truncated value. 3847 case Instruction::Trunc: 3848 Roots.push_back(I->getOperand(0)); 3849 case Instruction::ZExt: 3850 case Instruction::SExt: 3851 break; 3852 3853 // We can demote certain binary operations if we can demote both of their 3854 // operands. 3855 case Instruction::Add: 3856 case Instruction::Sub: 3857 case Instruction::Mul: 3858 case Instruction::And: 3859 case Instruction::Or: 3860 case Instruction::Xor: 3861 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || 3862 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) 3863 return false; 3864 break; 3865 3866 // We can demote selects if we can demote their true and false values. 3867 case Instruction::Select: { 3868 SelectInst *SI = cast<SelectInst>(I); 3869 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || 3870 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) 3871 return false; 3872 break; 3873 } 3874 3875 // We can demote phis if we can demote all their incoming operands. Note that 3876 // we don't need to worry about cycles since we ensure single use above. 3877 case Instruction::PHI: { 3878 PHINode *PN = cast<PHINode>(I); 3879 for (Value *IncValue : PN->incoming_values()) 3880 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) 3881 return false; 3882 break; 3883 } 3884 3885 // Otherwise, conservatively give up. 3886 default: 3887 return false; 3888 } 3889 3890 // Record the value that we can demote. 3891 ToDemote.push_back(V); 3892 return true; 3893 } 3894 3895 void BoUpSLP::computeMinimumValueSizes() { 3896 // If there are no external uses, the expression tree must be rooted by a 3897 // store. We can't demote in-memory values, so there is nothing to do here. 3898 if (ExternalUses.empty()) 3899 return; 3900 3901 // We only attempt to truncate integer expressions. 3902 auto &TreeRoot = VectorizableTree[0].Scalars; 3903 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 3904 if (!TreeRootIT) 3905 return; 3906 3907 // If the expression is not rooted by a store, these roots should have 3908 // external uses. We will rely on InstCombine to rewrite the expression in 3909 // the narrower type. However, InstCombine only rewrites single-use values. 3910 // This means that if a tree entry other than a root is used externally, it 3911 // must have multiple uses and InstCombine will not rewrite it. The code 3912 // below ensures that only the roots are used externally. 3913 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); 3914 for (auto &EU : ExternalUses) 3915 if (!Expr.erase(EU.Scalar)) 3916 return; 3917 if (!Expr.empty()) 3918 return; 3919 3920 // Collect the scalar values of the vectorizable expression. We will use this 3921 // context to determine which values can be demoted. If we see a truncation, 3922 // we mark it as seeding another demotion. 3923 for (auto &Entry : VectorizableTree) 3924 Expr.insert(Entry.Scalars.begin(), Entry.Scalars.end()); 3925 3926 // Ensure the roots of the vectorizable tree don't form a cycle. They must 3927 // have a single external user that is not in the vectorizable tree. 3928 for (auto *Root : TreeRoot) 3929 if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) 3930 return; 3931 3932 // Conservatively determine if we can actually truncate the roots of the 3933 // expression. Collect the values that can be demoted in ToDemote and 3934 // additional roots that require investigating in Roots. 3935 SmallVector<Value *, 32> ToDemote; 3936 SmallVector<Value *, 4> Roots; 3937 for (auto *Root : TreeRoot) 3938 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) 3939 return; 3940 3941 // The maximum bit width required to represent all the values that can be 3942 // demoted without loss of precision. It would be safe to truncate the roots 3943 // of the expression to this width. 3944 auto MaxBitWidth = 8u; 3945 3946 // We first check if all the bits of the roots are demanded. If they're not, 3947 // we can truncate the roots to this narrower type. 3948 for (auto *Root : TreeRoot) { 3949 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 3950 MaxBitWidth = std::max<unsigned>( 3951 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); 3952 } 3953 3954 // True if the roots can be zero-extended back to their original type, rather 3955 // than sign-extended. We know that if the leading bits are not demanded, we 3956 // can safely zero-extend. So we initialize IsKnownPositive to True. 3957 bool IsKnownPositive = true; 3958 3959 // If all the bits of the roots are demanded, we can try a little harder to 3960 // compute a narrower type. This can happen, for example, if the roots are 3961 // getelementptr indices. InstCombine promotes these indices to the pointer 3962 // width. Thus, all their bits are technically demanded even though the 3963 // address computation might be vectorized in a smaller type. 3964 // 3965 // We start by looking at each entry that can be demoted. We compute the 3966 // maximum bit width required to store the scalar by using ValueTracking to 3967 // compute the number of high-order bits we can truncate. 3968 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType())) { 3969 MaxBitWidth = 8u; 3970 3971 // Determine if the sign bit of all the roots is known to be zero. If not, 3972 // IsKnownPositive is set to False. 3973 IsKnownPositive = all_of(TreeRoot, [&](Value *R) { 3974 KnownBits Known = computeKnownBits(R, *DL); 3975 return Known.isNonNegative(); 3976 }); 3977 3978 // Determine the maximum number of bits required to store the scalar 3979 // values. 3980 for (auto *Scalar : ToDemote) { 3981 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, 0, DT); 3982 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 3983 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 3984 } 3985 3986 // If we can't prove that the sign bit is zero, we must add one to the 3987 // maximum bit width to account for the unknown sign bit. This preserves 3988 // the existing sign bit so we can safely sign-extend the root back to the 3989 // original type. Otherwise, if we know the sign bit is zero, we will 3990 // zero-extend the root instead. 3991 // 3992 // FIXME: This is somewhat suboptimal, as there will be cases where adding 3993 // one to the maximum bit width will yield a larger-than-necessary 3994 // type. In general, we need to add an extra bit only if we can't 3995 // prove that the upper bit of the original type is equal to the 3996 // upper bit of the proposed smaller type. If these two bits are the 3997 // same (either zero or one) we know that sign-extending from the 3998 // smaller type will result in the same value. Here, since we can't 3999 // yet prove this, we are just making the proposed smaller type 4000 // larger to ensure correctness. 4001 if (!IsKnownPositive) 4002 ++MaxBitWidth; 4003 } 4004 4005 // Round MaxBitWidth up to the next power-of-two. 4006 if (!isPowerOf2_64(MaxBitWidth)) 4007 MaxBitWidth = NextPowerOf2(MaxBitWidth); 4008 4009 // If the maximum bit width we compute is less than the with of the roots' 4010 // type, we can proceed with the narrowing. Otherwise, do nothing. 4011 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 4012 return; 4013 4014 // If we can truncate the root, we must collect additional values that might 4015 // be demoted as a result. That is, those seeded by truncations we will 4016 // modify. 4017 while (!Roots.empty()) 4018 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); 4019 4020 // Finally, map the values we can demote to the maximum bit with we computed. 4021 for (auto *Scalar : ToDemote) 4022 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive); 4023 } 4024 4025 namespace { 4026 /// The SLPVectorizer Pass. 4027 struct SLPVectorizer : public FunctionPass { 4028 SLPVectorizerPass Impl; 4029 4030 /// Pass identification, replacement for typeid 4031 static char ID; 4032 4033 explicit SLPVectorizer() : FunctionPass(ID) { 4034 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 4035 } 4036 4037 4038 bool doInitialization(Module &M) override { 4039 return false; 4040 } 4041 4042 bool runOnFunction(Function &F) override { 4043 if (skipFunction(F)) 4044 return false; 4045 4046 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 4047 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 4048 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 4049 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 4050 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 4051 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 4052 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 4053 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 4054 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 4055 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 4056 4057 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 4058 } 4059 4060 void getAnalysisUsage(AnalysisUsage &AU) const override { 4061 FunctionPass::getAnalysisUsage(AU); 4062 AU.addRequired<AssumptionCacheTracker>(); 4063 AU.addRequired<ScalarEvolutionWrapperPass>(); 4064 AU.addRequired<AAResultsWrapperPass>(); 4065 AU.addRequired<TargetTransformInfoWrapperPass>(); 4066 AU.addRequired<LoopInfoWrapperPass>(); 4067 AU.addRequired<DominatorTreeWrapperPass>(); 4068 AU.addRequired<DemandedBitsWrapperPass>(); 4069 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 4070 AU.addPreserved<LoopInfoWrapperPass>(); 4071 AU.addPreserved<DominatorTreeWrapperPass>(); 4072 AU.addPreserved<AAResultsWrapperPass>(); 4073 AU.addPreserved<GlobalsAAWrapperPass>(); 4074 AU.setPreservesCFG(); 4075 } 4076 }; 4077 } // end anonymous namespace 4078 4079 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 4080 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 4081 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 4082 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 4083 auto *AA = &AM.getResult<AAManager>(F); 4084 auto *LI = &AM.getResult<LoopAnalysis>(F); 4085 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 4086 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 4087 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 4088 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 4089 4090 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 4091 if (!Changed) 4092 return PreservedAnalyses::all(); 4093 4094 PreservedAnalyses PA; 4095 PA.preserveSet<CFGAnalyses>(); 4096 PA.preserve<AAManager>(); 4097 PA.preserve<GlobalsAA>(); 4098 return PA; 4099 } 4100 4101 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 4102 TargetTransformInfo *TTI_, 4103 TargetLibraryInfo *TLI_, AliasAnalysis *AA_, 4104 LoopInfo *LI_, DominatorTree *DT_, 4105 AssumptionCache *AC_, DemandedBits *DB_, 4106 OptimizationRemarkEmitter *ORE_) { 4107 SE = SE_; 4108 TTI = TTI_; 4109 TLI = TLI_; 4110 AA = AA_; 4111 LI = LI_; 4112 DT = DT_; 4113 AC = AC_; 4114 DB = DB_; 4115 DL = &F.getParent()->getDataLayout(); 4116 4117 Stores.clear(); 4118 GEPs.clear(); 4119 bool Changed = false; 4120 4121 // If the target claims to have no vector registers don't attempt 4122 // vectorization. 4123 if (!TTI->getNumberOfRegisters(true)) 4124 return false; 4125 4126 // Don't vectorize when the attribute NoImplicitFloat is used. 4127 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 4128 return false; 4129 4130 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 4131 4132 // Use the bottom up slp vectorizer to construct chains that start with 4133 // store instructions. 4134 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 4135 4136 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 4137 // delete instructions. 4138 4139 // Scan the blocks in the function in post order. 4140 for (auto BB : post_order(&F.getEntryBlock())) { 4141 collectSeedInstructions(BB); 4142 4143 // Vectorize trees that end at stores. 4144 if (!Stores.empty()) { 4145 DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 4146 << " underlying objects.\n"); 4147 Changed |= vectorizeStoreChains(R); 4148 } 4149 4150 // Vectorize trees that end at reductions. 4151 Changed |= vectorizeChainsInBlock(BB, R); 4152 4153 // Vectorize the index computations of getelementptr instructions. This 4154 // is primarily intended to catch gather-like idioms ending at 4155 // non-consecutive loads. 4156 if (!GEPs.empty()) { 4157 DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 4158 << " underlying objects.\n"); 4159 Changed |= vectorizeGEPIndices(BB, R); 4160 } 4161 } 4162 4163 if (Changed) { 4164 R.optimizeGatherSequence(); 4165 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 4166 DEBUG(verifyFunction(F)); 4167 } 4168 return Changed; 4169 } 4170 4171 /// \brief Check that the Values in the slice in VL array are still existent in 4172 /// the WeakTrackingVH array. 4173 /// Vectorization of part of the VL array may cause later values in the VL array 4174 /// to become invalid. We track when this has happened in the WeakTrackingVH 4175 /// array. 4176 static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, 4177 ArrayRef<WeakTrackingVH> VH, unsigned SliceBegin, 4178 unsigned SliceSize) { 4179 VL = VL.slice(SliceBegin, SliceSize); 4180 VH = VH.slice(SliceBegin, SliceSize); 4181 return !std::equal(VL.begin(), VL.end(), VH.begin()); 4182 } 4183 4184 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 4185 unsigned VecRegSize) { 4186 unsigned ChainLen = Chain.size(); 4187 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 4188 << "\n"); 4189 unsigned Sz = R.getVectorElementSize(Chain[0]); 4190 unsigned VF = VecRegSize / Sz; 4191 4192 if (!isPowerOf2_32(Sz) || VF < 2) 4193 return false; 4194 4195 // Keep track of values that were deleted by vectorizing in the loop below. 4196 SmallVector<WeakTrackingVH, 8> TrackValues(Chain.begin(), Chain.end()); 4197 4198 bool Changed = false; 4199 // Look for profitable vectorizable trees at all offsets, starting at zero. 4200 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 4201 if (i + VF > e) 4202 break; 4203 4204 // Check that a previous iteration of this loop did not delete the Value. 4205 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 4206 continue; 4207 4208 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 4209 << "\n"); 4210 ArrayRef<Value *> Operands = Chain.slice(i, VF); 4211 4212 R.buildTree(Operands); 4213 if (R.isTreeTinyAndNotFullyVectorizable()) 4214 continue; 4215 4216 R.computeMinimumValueSizes(); 4217 4218 int Cost = R.getTreeCost(); 4219 4220 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 4221 if (Cost < -SLPCostThreshold) { 4222 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 4223 using namespace ore; 4224 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 4225 cast<StoreInst>(Chain[i])) 4226 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 4227 << " and with tree size " 4228 << NV("TreeSize", R.getTreeSize())); 4229 4230 R.vectorizeTree(); 4231 4232 // Move to the next bundle. 4233 i += VF - 1; 4234 Changed = true; 4235 } 4236 } 4237 4238 return Changed; 4239 } 4240 4241 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 4242 BoUpSLP &R) { 4243 SetVector<StoreInst *> Heads, Tails; 4244 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; 4245 4246 // We may run into multiple chains that merge into a single chain. We mark the 4247 // stores that we vectorized so that we don't visit the same store twice. 4248 BoUpSLP::ValueSet VectorizedStores; 4249 bool Changed = false; 4250 4251 // Do a quadratic search on all of the given stores and find 4252 // all of the pairs of stores that follow each other. 4253 SmallVector<unsigned, 16> IndexQueue; 4254 for (unsigned i = 0, e = Stores.size(); i < e; ++i) { 4255 IndexQueue.clear(); 4256 // If a store has multiple consecutive store candidates, search Stores 4257 // array according to the sequence: from i+1 to e, then from i-1 to 0. 4258 // This is because usually pairing with immediate succeeding or preceding 4259 // candidate create the best chance to find slp vectorization opportunity. 4260 unsigned j = 0; 4261 for (j = i + 1; j < e; ++j) 4262 IndexQueue.push_back(j); 4263 for (j = i; j > 0; --j) 4264 IndexQueue.push_back(j - 1); 4265 4266 for (auto &k : IndexQueue) { 4267 if (isConsecutiveAccess(Stores[i], Stores[k], *DL, *SE)) { 4268 Tails.insert(Stores[k]); 4269 Heads.insert(Stores[i]); 4270 ConsecutiveChain[Stores[i]] = Stores[k]; 4271 break; 4272 } 4273 } 4274 } 4275 4276 // For stores that start but don't end a link in the chain: 4277 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end(); 4278 it != e; ++it) { 4279 if (Tails.count(*it)) 4280 continue; 4281 4282 // We found a store instr that starts a chain. Now follow the chain and try 4283 // to vectorize it. 4284 BoUpSLP::ValueList Operands; 4285 StoreInst *I = *it; 4286 // Collect the chain into a list. 4287 while (Tails.count(I) || Heads.count(I)) { 4288 if (VectorizedStores.count(I)) 4289 break; 4290 Operands.push_back(I); 4291 // Move to the next value in the chain. 4292 I = ConsecutiveChain[I]; 4293 } 4294 4295 // FIXME: Is division-by-2 the correct step? Should we assert that the 4296 // register size is a power-of-2? 4297 for (unsigned Size = R.getMaxVecRegSize(); Size >= R.getMinVecRegSize(); 4298 Size /= 2) { 4299 if (vectorizeStoreChain(Operands, R, Size)) { 4300 // Mark the vectorized stores so that we don't vectorize them again. 4301 VectorizedStores.insert(Operands.begin(), Operands.end()); 4302 Changed = true; 4303 break; 4304 } 4305 } 4306 } 4307 4308 return Changed; 4309 } 4310 4311 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 4312 4313 // Initialize the collections. We will make a single pass over the block. 4314 Stores.clear(); 4315 GEPs.clear(); 4316 4317 // Visit the store and getelementptr instructions in BB and organize them in 4318 // Stores and GEPs according to the underlying objects of their pointer 4319 // operands. 4320 for (Instruction &I : *BB) { 4321 4322 // Ignore store instructions that are volatile or have a pointer operand 4323 // that doesn't point to a scalar type. 4324 if (auto *SI = dyn_cast<StoreInst>(&I)) { 4325 if (!SI->isSimple()) 4326 continue; 4327 if (!isValidElementType(SI->getValueOperand()->getType())) 4328 continue; 4329 Stores[GetUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI); 4330 } 4331 4332 // Ignore getelementptr instructions that have more than one index, a 4333 // constant index, or a pointer operand that doesn't point to a scalar 4334 // type. 4335 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 4336 auto Idx = GEP->idx_begin()->get(); 4337 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) 4338 continue; 4339 if (!isValidElementType(Idx->getType())) 4340 continue; 4341 if (GEP->getType()->isVectorTy()) 4342 continue; 4343 GEPs[GetUnderlyingObject(GEP->getPointerOperand(), *DL)].push_back(GEP); 4344 } 4345 } 4346 } 4347 4348 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 4349 if (!A || !B) 4350 return false; 4351 Value *VL[] = { A, B }; 4352 return tryToVectorizeList(VL, R, None, true); 4353 } 4354 4355 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 4356 ArrayRef<Value *> BuildVector, 4357 bool AllowReorder) { 4358 if (VL.size() < 2) 4359 return false; 4360 4361 DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " << VL.size() 4362 << ".\n"); 4363 4364 // Check that all of the parts are scalar instructions of the same type. 4365 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 4366 if (!I0) 4367 return false; 4368 4369 unsigned Opcode0 = I0->getOpcode(); 4370 4371 unsigned Sz = R.getVectorElementSize(I0); 4372 unsigned MinVF = std::max(2U, R.getMinVecRegSize() / Sz); 4373 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF); 4374 if (MaxVF < 2) 4375 return false; 4376 4377 for (Value *V : VL) { 4378 Type *Ty = V->getType(); 4379 if (!isValidElementType(Ty)) 4380 return false; 4381 Instruction *Inst = dyn_cast<Instruction>(V); 4382 if (!Inst || Inst->getOpcode() != Opcode0) 4383 return false; 4384 } 4385 4386 bool Changed = false; 4387 4388 // Keep track of values that were deleted by vectorizing in the loop below. 4389 SmallVector<WeakTrackingVH, 8> TrackValues(VL.begin(), VL.end()); 4390 4391 unsigned NextInst = 0, MaxInst = VL.size(); 4392 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; 4393 VF /= 2) { 4394 // No actual vectorization should happen, if number of parts is the same as 4395 // provided vectorization factor (i.e. the scalar type is used for vector 4396 // code during codegen). 4397 auto *VecTy = VectorType::get(VL[0]->getType(), VF); 4398 if (TTI->getNumberOfParts(VecTy) == VF) 4399 continue; 4400 for (unsigned I = NextInst; I < MaxInst; ++I) { 4401 unsigned OpsWidth = 0; 4402 4403 if (I + VF > MaxInst) 4404 OpsWidth = MaxInst - I; 4405 else 4406 OpsWidth = VF; 4407 4408 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 4409 break; 4410 4411 // Check that a previous iteration of this loop did not delete the Value. 4412 if (hasValueBeenRAUWed(VL, TrackValues, I, OpsWidth)) 4413 continue; 4414 4415 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 4416 << "\n"); 4417 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth); 4418 4419 ArrayRef<Value *> BuildVectorSlice; 4420 if (!BuildVector.empty()) 4421 BuildVectorSlice = BuildVector.slice(I, OpsWidth); 4422 4423 R.buildTree(Ops, BuildVectorSlice); 4424 // TODO: check if we can allow reordering for more cases. 4425 if (AllowReorder && R.shouldReorder()) { 4426 // Conceptually, there is nothing actually preventing us from trying to 4427 // reorder a larger list. In fact, we do exactly this when vectorizing 4428 // reductions. However, at this point, we only expect to get here when 4429 // there are exactly two operations. 4430 assert(Ops.size() == 2); 4431 assert(BuildVectorSlice.empty()); 4432 Value *ReorderedOps[] = {Ops[1], Ops[0]}; 4433 R.buildTree(ReorderedOps, None); 4434 } 4435 if (R.isTreeTinyAndNotFullyVectorizable()) 4436 continue; 4437 4438 R.computeMinimumValueSizes(); 4439 int Cost = R.getTreeCost(); 4440 4441 if (Cost < -SLPCostThreshold) { 4442 DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 4443 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 4444 cast<Instruction>(Ops[0])) 4445 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 4446 << " and with tree size " 4447 << ore::NV("TreeSize", R.getTreeSize())); 4448 4449 Value *VectorizedRoot = R.vectorizeTree(); 4450 4451 // Reconstruct the build vector by extracting the vectorized root. This 4452 // way we handle the case where some elements of the vector are 4453 // undefined. 4454 // (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2)) 4455 if (!BuildVectorSlice.empty()) { 4456 // The insert point is the last build vector instruction. The 4457 // vectorized root will precede it. This guarantees that we get an 4458 // instruction. The vectorized tree could have been constant folded. 4459 Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back()); 4460 unsigned VecIdx = 0; 4461 for (auto &V : BuildVectorSlice) { 4462 IRBuilder<NoFolder> Builder(InsertAfter->getParent(), 4463 ++BasicBlock::iterator(InsertAfter)); 4464 Instruction *I = cast<Instruction>(V); 4465 assert(isa<InsertElementInst>(I) || isa<InsertValueInst>(I)); 4466 Instruction *Extract = 4467 cast<Instruction>(Builder.CreateExtractElement( 4468 VectorizedRoot, Builder.getInt32(VecIdx++))); 4469 I->setOperand(1, Extract); 4470 I->removeFromParent(); 4471 I->insertAfter(Extract); 4472 InsertAfter = I; 4473 } 4474 } 4475 // Move to the next bundle. 4476 I += VF - 1; 4477 NextInst = I + 1; 4478 Changed = true; 4479 } 4480 } 4481 } 4482 4483 return Changed; 4484 } 4485 4486 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 4487 if (!I) 4488 return false; 4489 4490 if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I)) 4491 return false; 4492 4493 Value *P = I->getParent(); 4494 4495 // Vectorize in current basic block only. 4496 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 4497 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 4498 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 4499 return false; 4500 4501 // Try to vectorize V. 4502 if (tryToVectorizePair(Op0, Op1, R)) 4503 return true; 4504 4505 auto *A = dyn_cast<BinaryOperator>(Op0); 4506 auto *B = dyn_cast<BinaryOperator>(Op1); 4507 // Try to skip B. 4508 if (B && B->hasOneUse()) { 4509 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 4510 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 4511 if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R)) 4512 return true; 4513 if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R)) 4514 return true; 4515 } 4516 4517 // Try to skip A. 4518 if (A && A->hasOneUse()) { 4519 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 4520 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 4521 if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R)) 4522 return true; 4523 if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R)) 4524 return true; 4525 } 4526 return false; 4527 } 4528 4529 /// \brief Generate a shuffle mask to be used in a reduction tree. 4530 /// 4531 /// \param VecLen The length of the vector to be reduced. 4532 /// \param NumEltsToRdx The number of elements that should be reduced in the 4533 /// vector. 4534 /// \param IsPairwise Whether the reduction is a pairwise or splitting 4535 /// reduction. A pairwise reduction will generate a mask of 4536 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 4537 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 4538 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 4539 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 4540 bool IsPairwise, bool IsLeft, 4541 IRBuilder<> &Builder) { 4542 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 4543 4544 SmallVector<Constant *, 32> ShuffleMask( 4545 VecLen, UndefValue::get(Builder.getInt32Ty())); 4546 4547 if (IsPairwise) 4548 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 4549 for (unsigned i = 0; i != NumEltsToRdx; ++i) 4550 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 4551 else 4552 // Move the upper half of the vector to the lower half. 4553 for (unsigned i = 0; i != NumEltsToRdx; ++i) 4554 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 4555 4556 return ConstantVector::get(ShuffleMask); 4557 } 4558 4559 namespace { 4560 /// Model horizontal reductions. 4561 /// 4562 /// A horizontal reduction is a tree of reduction operations (currently add and 4563 /// fadd) that has operations that can be put into a vector as its leaf. 4564 /// For example, this tree: 4565 /// 4566 /// mul mul mul mul 4567 /// \ / \ / 4568 /// + + 4569 /// \ / 4570 /// + 4571 /// This tree has "mul" as its reduced values and "+" as its reduction 4572 /// operations. A reduction might be feeding into a store or a binary operation 4573 /// feeding a phi. 4574 /// ... 4575 /// \ / 4576 /// + 4577 /// | 4578 /// phi += 4579 /// 4580 /// Or: 4581 /// ... 4582 /// \ / 4583 /// + 4584 /// | 4585 /// *p = 4586 /// 4587 class HorizontalReduction { 4588 SmallVector<Value *, 16> ReductionOps; 4589 SmallVector<Value *, 32> ReducedVals; 4590 // Use map vector to make stable output. 4591 MapVector<Instruction *, Value *> ExtraArgs; 4592 4593 /// Contains info about operation, like its opcode, left and right operands. 4594 struct OperationData { 4595 /// true if the operation is a reduced value, false if reduction operation. 4596 bool IsReducedValue = false; 4597 /// Opcode of the instruction. 4598 unsigned Opcode = 0; 4599 /// Left operand of the reduction operation. 4600 Value *LHS = nullptr; 4601 /// Right operand of the reduction operation. 4602 Value *RHS = nullptr; 4603 4604 /// Checks if the reduction operation can be vectorized. 4605 bool isVectorizable() const { 4606 return LHS && RHS && 4607 // We currently only support adds. 4608 (Opcode == Instruction::Add || Opcode == Instruction::FAdd); 4609 } 4610 4611 public: 4612 explicit OperationData() = default; 4613 /// Construction for reduced values. They are identified by opcode only and 4614 /// don't have associated LHS/RHS values. 4615 explicit OperationData(Value *V) : IsReducedValue(true) { 4616 if (auto *I = dyn_cast<Instruction>(V)) 4617 Opcode = I->getOpcode(); 4618 } 4619 /// Constructor for binary reduction operations with opcode and its left and 4620 /// right operands. 4621 OperationData(unsigned Opcode, Value *LHS, Value *RHS) 4622 : IsReducedValue(false), Opcode(Opcode), LHS(LHS), RHS(RHS) {} 4623 explicit operator bool() const { return Opcode; } 4624 /// Get the index of the first operand. 4625 unsigned getFirstOperandIndex() const { 4626 assert(!!*this && "The opcode is not set."); 4627 return 0; 4628 } 4629 /// Total number of operands in the reduction operation. 4630 unsigned getNumberOfOperands() const { 4631 assert(!IsReducedValue && !!*this && LHS && RHS && 4632 "Expected reduction operation."); 4633 return 2; 4634 } 4635 /// Expected number of uses for reduction operations/reduced values. 4636 unsigned getRequiredNumberOfUses() const { 4637 assert(!IsReducedValue && !!*this && LHS && RHS && 4638 "Expected reduction operation."); 4639 return 1; 4640 } 4641 /// Checks if instruction is associative and can be vectorized. 4642 bool isAssociative(Instruction *I) const { 4643 assert(!IsReducedValue && *this && LHS && RHS && 4644 "Expected reduction operation."); 4645 return I->isAssociative(); 4646 } 4647 /// Checks if the reduction operation can be vectorized. 4648 bool isVectorizable(Instruction *I) const { 4649 return isVectorizable() && isAssociative(I); 4650 } 4651 4652 /// Checks if two operation data are both a reduction op or both a reduced 4653 /// value. 4654 bool operator==(const OperationData &OD) { 4655 assert(((IsReducedValue != OD.IsReducedValue) || 4656 ((!LHS == !OD.LHS) && (!RHS == !OD.RHS))) && 4657 "One of the comparing operations is incorrect."); 4658 return this == &OD || 4659 (IsReducedValue == OD.IsReducedValue && Opcode == OD.Opcode); 4660 } 4661 bool operator!=(const OperationData &OD) { return !(*this == OD); } 4662 void clear() { 4663 IsReducedValue = false; 4664 Opcode = 0; 4665 LHS = nullptr; 4666 RHS = nullptr; 4667 } 4668 /// Get the opcode of the reduction operation. 4669 unsigned getOpcode() const { 4670 assert(isVectorizable() && "Expected vectorizable operation."); 4671 return Opcode; 4672 } 4673 Value *getLHS() const { return LHS; } 4674 Value *getRHS() const { return RHS; } 4675 /// Creates reduction operation with the current opcode. 4676 Value *createOp(IRBuilder<> &Builder, const Twine &Name = "") const { 4677 assert(!IsReducedValue && 4678 (Opcode == Instruction::FAdd || Opcode == Instruction::Add) && 4679 "Expected add|fadd reduction operation."); 4680 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, LHS, RHS, 4681 Name); 4682 } 4683 }; 4684 4685 Instruction *ReductionRoot = nullptr; 4686 4687 /// The operation data of the reduction operation. 4688 OperationData ReductionData; 4689 /// The operation data of the values we perform a reduction on. 4690 OperationData ReducedValueData; 4691 /// Should we model this reduction as a pairwise reduction tree or a tree that 4692 /// splits the vector in halves and adds those halves. 4693 bool IsPairwiseReduction = false; 4694 4695 /// Checks if the ParentStackElem.first should be marked as a reduction 4696 /// operation with an extra argument or as extra argument itself. 4697 void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem, 4698 Value *ExtraArg) { 4699 if (ExtraArgs.count(ParentStackElem.first)) { 4700 ExtraArgs[ParentStackElem.first] = nullptr; 4701 // We ran into something like: 4702 // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg. 4703 // The whole ParentStackElem.first should be considered as an extra value 4704 // in this case. 4705 // Do not perform analysis of remaining operands of ParentStackElem.first 4706 // instruction, this whole instruction is an extra argument. 4707 ParentStackElem.second = ParentStackElem.first->getNumOperands(); 4708 } else { 4709 // We ran into something like: 4710 // ParentStackElem.first += ... + ExtraArg + ... 4711 ExtraArgs[ParentStackElem.first] = ExtraArg; 4712 } 4713 } 4714 4715 static OperationData getOperationData(Value *V) { 4716 if (!V) 4717 return OperationData(); 4718 4719 Value *LHS; 4720 Value *RHS; 4721 if (m_BinOp(m_Value(LHS), m_Value(RHS)).match(V)) 4722 return OperationData(cast<BinaryOperator>(V)->getOpcode(), LHS, RHS); 4723 return OperationData(V); 4724 } 4725 4726 public: 4727 HorizontalReduction() = default; 4728 4729 /// \brief Try to find a reduction tree. 4730 bool matchAssociativeReduction(PHINode *Phi, Instruction *B) { 4731 assert((!Phi || is_contained(Phi->operands(), B)) && 4732 "Thi phi needs to use the binary operator"); 4733 4734 ReductionData = getOperationData(B); 4735 4736 // We could have a initial reductions that is not an add. 4737 // r *= v1 + v2 + v3 + v4 4738 // In such a case start looking for a tree rooted in the first '+'. 4739 if (Phi) { 4740 if (ReductionData.getLHS() == Phi) { 4741 Phi = nullptr; 4742 B = dyn_cast<Instruction>(ReductionData.getRHS()); 4743 ReductionData = getOperationData(B); 4744 } else if (ReductionData.getRHS() == Phi) { 4745 Phi = nullptr; 4746 B = dyn_cast<Instruction>(ReductionData.getLHS()); 4747 ReductionData = getOperationData(B); 4748 } 4749 } 4750 4751 if (!ReductionData.isVectorizable(B)) 4752 return false; 4753 4754 Type *Ty = B->getType(); 4755 if (!isValidElementType(Ty)) 4756 return false; 4757 4758 ReducedValueData.clear(); 4759 ReductionRoot = B; 4760 4761 // Post order traverse the reduction tree starting at B. We only handle true 4762 // trees containing only binary operators. 4763 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; 4764 Stack.push_back(std::make_pair(B, ReductionData.getFirstOperandIndex())); 4765 const unsigned NUses = ReductionData.getRequiredNumberOfUses(); 4766 while (!Stack.empty()) { 4767 Instruction *TreeN = Stack.back().first; 4768 unsigned EdgeToVist = Stack.back().second++; 4769 OperationData OpData = getOperationData(TreeN); 4770 bool IsReducedValue = OpData != ReductionData; 4771 4772 // Postorder vist. 4773 if (IsReducedValue || EdgeToVist == OpData.getNumberOfOperands()) { 4774 if (IsReducedValue) 4775 ReducedVals.push_back(TreeN); 4776 else { 4777 auto I = ExtraArgs.find(TreeN); 4778 if (I != ExtraArgs.end() && !I->second) { 4779 // Check if TreeN is an extra argument of its parent operation. 4780 if (Stack.size() <= 1) { 4781 // TreeN can't be an extra argument as it is a root reduction 4782 // operation. 4783 return false; 4784 } 4785 // Yes, TreeN is an extra argument, do not add it to a list of 4786 // reduction operations. 4787 // Stack[Stack.size() - 2] always points to the parent operation. 4788 markExtraArg(Stack[Stack.size() - 2], TreeN); 4789 ExtraArgs.erase(TreeN); 4790 } else 4791 ReductionOps.push_back(TreeN); 4792 } 4793 // Retract. 4794 Stack.pop_back(); 4795 continue; 4796 } 4797 4798 // Visit left or right. 4799 Value *NextV = TreeN->getOperand(EdgeToVist); 4800 if (NextV != Phi) { 4801 auto *I = dyn_cast<Instruction>(NextV); 4802 OpData = getOperationData(I); 4803 // Continue analysis if the next operand is a reduction operation or 4804 // (possibly) a reduced value. If the reduced value opcode is not set, 4805 // the first met operation != reduction operation is considered as the 4806 // reduced value class. 4807 if (I && (!ReducedValueData || OpData == ReducedValueData || 4808 OpData == ReductionData)) { 4809 // Only handle trees in the current basic block. 4810 if (I->getParent() != B->getParent()) { 4811 // I is an extra argument for TreeN (its parent operation). 4812 markExtraArg(Stack.back(), I); 4813 continue; 4814 } 4815 4816 // Each tree node needs to have minimal number of users except for the 4817 // ultimate reduction. 4818 if (!I->hasNUses(NUses) && I != B) { 4819 // I is an extra argument for TreeN (its parent operation). 4820 markExtraArg(Stack.back(), I); 4821 continue; 4822 } 4823 4824 if (OpData == ReductionData) { 4825 // We need to be able to reassociate the reduction operations. 4826 if (!OpData.isAssociative(I)) { 4827 // I is an extra argument for TreeN (its parent operation). 4828 markExtraArg(Stack.back(), I); 4829 continue; 4830 } 4831 } else if (ReducedValueData && 4832 ReducedValueData != OpData) { 4833 // Make sure that the opcodes of the operations that we are going to 4834 // reduce match. 4835 // I is an extra argument for TreeN (its parent operation). 4836 markExtraArg(Stack.back(), I); 4837 continue; 4838 } else if (!ReducedValueData) 4839 ReducedValueData = OpData; 4840 4841 Stack.push_back(std::make_pair(I, OpData.getFirstOperandIndex())); 4842 continue; 4843 } 4844 } 4845 // NextV is an extra argument for TreeN (its parent operation). 4846 markExtraArg(Stack.back(), NextV); 4847 } 4848 return true; 4849 } 4850 4851 /// \brief Attempt to vectorize the tree found by 4852 /// matchAssociativeReduction. 4853 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 4854 if (ReducedVals.empty()) 4855 return false; 4856 4857 // If there is a sufficient number of reduction values, reduce 4858 // to a nearby power-of-2. Can safely generate oversized 4859 // vectors and rely on the backend to split them to legal sizes. 4860 unsigned NumReducedVals = ReducedVals.size(); 4861 if (NumReducedVals < 4) 4862 return false; 4863 4864 unsigned ReduxWidth = PowerOf2Floor(NumReducedVals); 4865 4866 Value *VectorizedTree = nullptr; 4867 IRBuilder<> Builder(ReductionRoot); 4868 FastMathFlags Unsafe; 4869 Unsafe.setUnsafeAlgebra(); 4870 Builder.setFastMathFlags(Unsafe); 4871 unsigned i = 0; 4872 4873 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 4874 // The same extra argument may be used several time, so log each attempt 4875 // to use it. 4876 for (auto &Pair : ExtraArgs) 4877 ExternallyUsedValues[Pair.second].push_back(Pair.first); 4878 while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) { 4879 auto VL = makeArrayRef(&ReducedVals[i], ReduxWidth); 4880 V.buildTree(VL, ExternallyUsedValues, ReductionOps); 4881 if (V.shouldReorder()) { 4882 SmallVector<Value *, 8> Reversed(VL.rbegin(), VL.rend()); 4883 V.buildTree(Reversed, ExternallyUsedValues, ReductionOps); 4884 } 4885 if (V.isTreeTinyAndNotFullyVectorizable()) 4886 break; 4887 4888 V.computeMinimumValueSizes(); 4889 4890 // Estimate cost. 4891 int Cost = 4892 V.getTreeCost() + getReductionCost(TTI, ReducedVals[i], ReduxWidth); 4893 if (Cost >= -SLPCostThreshold) 4894 break; 4895 4896 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost 4897 << ". (HorRdx)\n"); 4898 auto *I0 = cast<Instruction>(VL[0]); 4899 V.getORE()->emit( 4900 OptimizationRemark(SV_NAME, "VectorizedHorizontalReduction", I0) 4901 << "Vectorized horizontal reduction with cost " 4902 << ore::NV("Cost", Cost) << " and with tree size " 4903 << ore::NV("TreeSize", V.getTreeSize())); 4904 4905 // Vectorize a tree. 4906 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 4907 Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues); 4908 4909 // Emit a reduction. 4910 Value *ReducedSubTree = 4911 emitReduction(VectorizedRoot, Builder, ReduxWidth, ReductionOps, TTI); 4912 if (VectorizedTree) { 4913 Builder.SetCurrentDebugLocation(Loc); 4914 OperationData VectReductionData(ReductionData.getOpcode(), 4915 VectorizedTree, ReducedSubTree); 4916 VectorizedTree = VectReductionData.createOp(Builder, "bin.rdx"); 4917 propagateIRFlags(VectorizedTree, ReductionOps); 4918 } else 4919 VectorizedTree = ReducedSubTree; 4920 i += ReduxWidth; 4921 ReduxWidth = PowerOf2Floor(NumReducedVals - i); 4922 } 4923 4924 if (VectorizedTree) { 4925 // Finish the reduction. 4926 for (; i < NumReducedVals; ++i) { 4927 auto *I = cast<Instruction>(ReducedVals[i]); 4928 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 4929 OperationData VectReductionData(ReductionData.getOpcode(), 4930 VectorizedTree, I); 4931 VectorizedTree = VectReductionData.createOp(Builder); 4932 propagateIRFlags(VectorizedTree, ReductionOps); 4933 } 4934 for (auto &Pair : ExternallyUsedValues) { 4935 assert(!Pair.second.empty() && 4936 "At least one DebugLoc must be inserted"); 4937 // Add each externally used value to the final reduction. 4938 for (auto *I : Pair.second) { 4939 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 4940 OperationData VectReductionData(ReductionData.getOpcode(), 4941 VectorizedTree, Pair.first); 4942 VectorizedTree = VectReductionData.createOp(Builder, "bin.extra"); 4943 propagateIRFlags(VectorizedTree, I); 4944 } 4945 } 4946 // Update users. 4947 ReductionRoot->replaceAllUsesWith(VectorizedTree); 4948 } 4949 return VectorizedTree != nullptr; 4950 } 4951 4952 unsigned numReductionValues() const { 4953 return ReducedVals.size(); 4954 } 4955 4956 private: 4957 /// \brief Calculate the cost of a reduction. 4958 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal, 4959 unsigned ReduxWidth) { 4960 Type *ScalarTy = FirstReducedVal->getType(); 4961 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 4962 4963 int PairwiseRdxCost = 4964 TTI->getArithmeticReductionCost(ReductionData.getOpcode(), VecTy, 4965 /*IsPairwiseForm=*/true); 4966 int SplittingRdxCost = 4967 TTI->getArithmeticReductionCost(ReductionData.getOpcode(), VecTy, 4968 /*IsPairwiseForm=*/false); 4969 4970 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 4971 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 4972 4973 int ScalarReduxCost = 4974 (ReduxWidth - 1) * 4975 TTI->getArithmeticInstrCost(ReductionData.getOpcode(), ScalarTy); 4976 4977 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 4978 << " for reduction that starts with " << *FirstReducedVal 4979 << " (It is a " 4980 << (IsPairwiseReduction ? "pairwise" : "splitting") 4981 << " reduction)\n"); 4982 4983 return VecReduxCost - ScalarReduxCost; 4984 } 4985 4986 /// \brief Emit a horizontal reduction of the vectorized value. 4987 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 4988 unsigned ReduxWidth, ArrayRef<Value *> RedOps, 4989 const TargetTransformInfo *TTI) { 4990 assert(VectorizedValue && "Need to have a vectorized tree node"); 4991 assert(isPowerOf2_32(ReduxWidth) && 4992 "We only handle power-of-two reductions for now"); 4993 4994 if (!IsPairwiseReduction) 4995 return createSimpleTargetReduction( 4996 Builder, TTI, ReductionData.getOpcode(), VectorizedValue, 4997 TargetTransformInfo::ReductionFlags(), RedOps); 4998 4999 Value *TmpVec = VectorizedValue; 5000 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 5001 Value *LeftMask = 5002 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 5003 Value *RightMask = 5004 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 5005 5006 Value *LeftShuf = Builder.CreateShuffleVector( 5007 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 5008 Value *RightShuf = Builder.CreateShuffleVector( 5009 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 5010 "rdx.shuf.r"); 5011 OperationData VectReductionData(ReductionData.getOpcode(), LeftShuf, 5012 RightShuf); 5013 TmpVec = VectReductionData.createOp(Builder, "bin.rdx"); 5014 propagateIRFlags(TmpVec, RedOps); 5015 } 5016 5017 // The result is in the first element of the vector. 5018 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 5019 } 5020 }; 5021 } // end anonymous namespace 5022 5023 /// \brief Recognize construction of vectors like 5024 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 5025 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 5026 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 5027 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 5028 /// starting from the last insertelement instruction. 5029 /// 5030 /// Returns true if it matches 5031 /// 5032 static bool findBuildVector(InsertElementInst *LastInsertElem, 5033 SmallVectorImpl<Value *> &BuildVector, 5034 SmallVectorImpl<Value *> &BuildVectorOpds) { 5035 Value *V = nullptr; 5036 do { 5037 BuildVector.push_back(LastInsertElem); 5038 BuildVectorOpds.push_back(LastInsertElem->getOperand(1)); 5039 V = LastInsertElem->getOperand(0); 5040 if (isa<UndefValue>(V)) 5041 break; 5042 LastInsertElem = dyn_cast<InsertElementInst>(V); 5043 if (!LastInsertElem || !LastInsertElem->hasOneUse()) 5044 return false; 5045 } while (true); 5046 std::reverse(BuildVector.begin(), BuildVector.end()); 5047 std::reverse(BuildVectorOpds.begin(), BuildVectorOpds.end()); 5048 return true; 5049 } 5050 5051 /// \brief Like findBuildVector, but looks for construction of aggregate. 5052 /// 5053 /// \return true if it matches. 5054 static bool findBuildAggregate(InsertValueInst *IV, 5055 SmallVectorImpl<Value *> &BuildVector, 5056 SmallVectorImpl<Value *> &BuildVectorOpds) { 5057 Value *V; 5058 do { 5059 BuildVector.push_back(IV); 5060 BuildVectorOpds.push_back(IV->getInsertedValueOperand()); 5061 V = IV->getAggregateOperand(); 5062 if (isa<UndefValue>(V)) 5063 break; 5064 IV = dyn_cast<InsertValueInst>(V); 5065 if (!IV || !IV->hasOneUse()) 5066 return false; 5067 } while (true); 5068 std::reverse(BuildVector.begin(), BuildVector.end()); 5069 std::reverse(BuildVectorOpds.begin(), BuildVectorOpds.end()); 5070 return true; 5071 } 5072 5073 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 5074 return V->getType() < V2->getType(); 5075 } 5076 5077 /// \brief Try and get a reduction value from a phi node. 5078 /// 5079 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 5080 /// if they come from either \p ParentBB or a containing loop latch. 5081 /// 5082 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 5083 /// if not possible. 5084 static Value *getReductionValue(const DominatorTree *DT, PHINode *P, 5085 BasicBlock *ParentBB, LoopInfo *LI) { 5086 // There are situations where the reduction value is not dominated by the 5087 // reduction phi. Vectorizing such cases has been reported to cause 5088 // miscompiles. See PR25787. 5089 auto DominatedReduxValue = [&](Value *R) { 5090 return ( 5091 dyn_cast<Instruction>(R) && 5092 DT->dominates(P->getParent(), dyn_cast<Instruction>(R)->getParent())); 5093 }; 5094 5095 Value *Rdx = nullptr; 5096 5097 // Return the incoming value if it comes from the same BB as the phi node. 5098 if (P->getIncomingBlock(0) == ParentBB) { 5099 Rdx = P->getIncomingValue(0); 5100 } else if (P->getIncomingBlock(1) == ParentBB) { 5101 Rdx = P->getIncomingValue(1); 5102 } 5103 5104 if (Rdx && DominatedReduxValue(Rdx)) 5105 return Rdx; 5106 5107 // Otherwise, check whether we have a loop latch to look at. 5108 Loop *BBL = LI->getLoopFor(ParentBB); 5109 if (!BBL) 5110 return nullptr; 5111 BasicBlock *BBLatch = BBL->getLoopLatch(); 5112 if (!BBLatch) 5113 return nullptr; 5114 5115 // There is a loop latch, return the incoming value if it comes from 5116 // that. This reduction pattern occasionally turns up. 5117 if (P->getIncomingBlock(0) == BBLatch) { 5118 Rdx = P->getIncomingValue(0); 5119 } else if (P->getIncomingBlock(1) == BBLatch) { 5120 Rdx = P->getIncomingValue(1); 5121 } 5122 5123 if (Rdx && DominatedReduxValue(Rdx)) 5124 return Rdx; 5125 5126 return nullptr; 5127 } 5128 5129 /// Attempt to reduce a horizontal reduction. 5130 /// If it is legal to match a horizontal reduction feeding the phi node \a P 5131 /// with reduction operators \a Root (or one of its operands) in a basic block 5132 /// \a BB, then check if it can be done. If horizontal reduction is not found 5133 /// and root instruction is a binary operation, vectorization of the operands is 5134 /// attempted. 5135 /// \returns true if a horizontal reduction was matched and reduced or operands 5136 /// of one of the binary instruction were vectorized. 5137 /// \returns false if a horizontal reduction was not matched (or not possible) 5138 /// or no vectorization of any binary operation feeding \a Root instruction was 5139 /// performed. 5140 static bool tryToVectorizeHorReductionOrInstOperands( 5141 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, 5142 TargetTransformInfo *TTI, 5143 const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) { 5144 if (!ShouldVectorizeHor) 5145 return false; 5146 5147 if (!Root) 5148 return false; 5149 5150 if (Root->getParent() != BB || isa<PHINode>(Root)) 5151 return false; 5152 // Start analysis starting from Root instruction. If horizontal reduction is 5153 // found, try to vectorize it. If it is not a horizontal reduction or 5154 // vectorization is not possible or not effective, and currently analyzed 5155 // instruction is a binary operation, try to vectorize the operands, using 5156 // pre-order DFS traversal order. If the operands were not vectorized, repeat 5157 // the same procedure considering each operand as a possible root of the 5158 // horizontal reduction. 5159 // Interrupt the process if the Root instruction itself was vectorized or all 5160 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 5161 SmallVector<std::pair<WeakTrackingVH, unsigned>, 8> Stack(1, {Root, 0}); 5162 SmallSet<Value *, 8> VisitedInstrs; 5163 bool Res = false; 5164 while (!Stack.empty()) { 5165 Value *V; 5166 unsigned Level; 5167 std::tie(V, Level) = Stack.pop_back_val(); 5168 if (!V) 5169 continue; 5170 auto *Inst = dyn_cast<Instruction>(V); 5171 if (!Inst) 5172 continue; 5173 if (auto *BI = dyn_cast<BinaryOperator>(Inst)) { 5174 HorizontalReduction HorRdx; 5175 if (HorRdx.matchAssociativeReduction(P, BI)) { 5176 if (HorRdx.tryToReduce(R, TTI)) { 5177 Res = true; 5178 // Set P to nullptr to avoid re-analysis of phi node in 5179 // matchAssociativeReduction function unless this is the root node. 5180 P = nullptr; 5181 continue; 5182 } 5183 } 5184 if (P) { 5185 Inst = dyn_cast<Instruction>(BI->getOperand(0)); 5186 if (Inst == P) 5187 Inst = dyn_cast<Instruction>(BI->getOperand(1)); 5188 if (!Inst) { 5189 // Set P to nullptr to avoid re-analysis of phi node in 5190 // matchAssociativeReduction function unless this is the root node. 5191 P = nullptr; 5192 continue; 5193 } 5194 } 5195 } 5196 // Set P to nullptr to avoid re-analysis of phi node in 5197 // matchAssociativeReduction function unless this is the root node. 5198 P = nullptr; 5199 if (Vectorize(Inst, R)) { 5200 Res = true; 5201 continue; 5202 } 5203 5204 // Try to vectorize operands. 5205 // Continue analysis for the instruction from the same basic block only to 5206 // save compile time. 5207 if (++Level < RecursionMaxDepth) 5208 for (auto *Op : Inst->operand_values()) 5209 if (VisitedInstrs.insert(Op).second) 5210 if (auto *I = dyn_cast<Instruction>(Op)) 5211 if (!isa<PHINode>(Inst) && I->getParent() == BB) 5212 Stack.emplace_back(Op, Level); 5213 } 5214 return Res; 5215 } 5216 5217 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V, 5218 BasicBlock *BB, BoUpSLP &R, 5219 TargetTransformInfo *TTI) { 5220 if (!V) 5221 return false; 5222 auto *I = dyn_cast<Instruction>(V); 5223 if (!I) 5224 return false; 5225 5226 if (!isa<BinaryOperator>(I)) 5227 P = nullptr; 5228 // Try to match and vectorize a horizontal reduction. 5229 auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool { 5230 return tryToVectorize(I, R); 5231 }; 5232 return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI, 5233 ExtraVectorization); 5234 } 5235 5236 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 5237 BasicBlock *BB, BoUpSLP &R) { 5238 const DataLayout &DL = BB->getModule()->getDataLayout(); 5239 if (!R.canMapToVector(IVI->getType(), DL)) 5240 return false; 5241 5242 SmallVector<Value *, 16> BuildVector; 5243 SmallVector<Value *, 16> BuildVectorOpds; 5244 if (!findBuildAggregate(IVI, BuildVector, BuildVectorOpds)) 5245 return false; 5246 5247 DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 5248 return tryToVectorizeList(BuildVectorOpds, R, BuildVector, false); 5249 } 5250 5251 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 5252 BasicBlock *BB, BoUpSLP &R) { 5253 SmallVector<Value *, 16> BuildVector; 5254 SmallVector<Value *, 16> BuildVectorOpds; 5255 if (!findBuildVector(IEI, BuildVector, BuildVectorOpds)) 5256 return false; 5257 5258 // Vectorize starting with the build vector operands ignoring the BuildVector 5259 // instructions for the purpose of scheduling and user extraction. 5260 return tryToVectorizeList(BuildVectorOpds, R, BuildVector); 5261 } 5262 5263 bool SLPVectorizerPass::vectorizeCmpInst(CmpInst *CI, BasicBlock *BB, 5264 BoUpSLP &R) { 5265 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) 5266 return true; 5267 5268 bool OpsChanged = false; 5269 for (int Idx = 0; Idx < 2; ++Idx) { 5270 OpsChanged |= 5271 vectorizeRootInstruction(nullptr, CI->getOperand(Idx), BB, R, TTI); 5272 } 5273 return OpsChanged; 5274 } 5275 5276 bool SLPVectorizerPass::vectorizeSimpleInstructions( 5277 SmallVectorImpl<WeakVH> &Instructions, BasicBlock *BB, BoUpSLP &R) { 5278 bool OpsChanged = false; 5279 for (auto &VH : reverse(Instructions)) { 5280 auto *I = dyn_cast_or_null<Instruction>(VH); 5281 if (!I) 5282 continue; 5283 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) 5284 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 5285 else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) 5286 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 5287 else if (auto *CI = dyn_cast<CmpInst>(I)) 5288 OpsChanged |= vectorizeCmpInst(CI, BB, R); 5289 } 5290 Instructions.clear(); 5291 return OpsChanged; 5292 } 5293 5294 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 5295 bool Changed = false; 5296 SmallVector<Value *, 4> Incoming; 5297 SmallSet<Value *, 16> VisitedInstrs; 5298 5299 bool HaveVectorizedPhiNodes = true; 5300 while (HaveVectorizedPhiNodes) { 5301 HaveVectorizedPhiNodes = false; 5302 5303 // Collect the incoming values from the PHIs. 5304 Incoming.clear(); 5305 for (Instruction &I : *BB) { 5306 PHINode *P = dyn_cast<PHINode>(&I); 5307 if (!P) 5308 break; 5309 5310 if (!VisitedInstrs.count(P)) 5311 Incoming.push_back(P); 5312 } 5313 5314 // Sort by type. 5315 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); 5316 5317 // Try to vectorize elements base on their type. 5318 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 5319 E = Incoming.end(); 5320 IncIt != E;) { 5321 5322 // Look for the next elements with the same type. 5323 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 5324 while (SameTypeIt != E && 5325 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 5326 VisitedInstrs.insert(*SameTypeIt); 5327 ++SameTypeIt; 5328 } 5329 5330 // Try to vectorize them. 5331 unsigned NumElts = (SameTypeIt - IncIt); 5332 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); 5333 // The order in which the phi nodes appear in the program does not matter. 5334 // So allow tryToVectorizeList to reorder them if it is beneficial. This 5335 // is done when there are exactly two elements since tryToVectorizeList 5336 // asserts that there are only two values when AllowReorder is true. 5337 bool AllowReorder = NumElts == 2; 5338 if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R, 5339 None, AllowReorder)) { 5340 // Success start over because instructions might have been changed. 5341 HaveVectorizedPhiNodes = true; 5342 Changed = true; 5343 break; 5344 } 5345 5346 // Start over at the next instruction of a different type (or the end). 5347 IncIt = SameTypeIt; 5348 } 5349 } 5350 5351 VisitedInstrs.clear(); 5352 5353 SmallVector<WeakVH, 8> PostProcessInstructions; 5354 SmallDenseSet<Instruction *, 4> KeyNodes; 5355 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 5356 // We may go through BB multiple times so skip the one we have checked. 5357 if (!VisitedInstrs.insert(&*it).second) { 5358 if (it->use_empty() && KeyNodes.count(&*it) > 0 && 5359 vectorizeSimpleInstructions(PostProcessInstructions, BB, R)) { 5360 // We would like to start over since some instructions are deleted 5361 // and the iterator may become invalid value. 5362 Changed = true; 5363 it = BB->begin(); 5364 e = BB->end(); 5365 } 5366 continue; 5367 } 5368 5369 if (isa<DbgInfoIntrinsic>(it)) 5370 continue; 5371 5372 // Try to vectorize reductions that use PHINodes. 5373 if (PHINode *P = dyn_cast<PHINode>(it)) { 5374 // Check that the PHI is a reduction PHI. 5375 if (P->getNumIncomingValues() != 2) 5376 return Changed; 5377 5378 // Try to match and vectorize a horizontal reduction. 5379 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R, 5380 TTI)) { 5381 Changed = true; 5382 it = BB->begin(); 5383 e = BB->end(); 5384 continue; 5385 } 5386 continue; 5387 } 5388 5389 // Ran into an instruction without users, like terminator, or function call 5390 // with ignored return value, store. Ignore unused instructions (basing on 5391 // instruction type, except for CallInst and InvokeInst). 5392 if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) || 5393 isa<InvokeInst>(it))) { 5394 KeyNodes.insert(&*it); 5395 bool OpsChanged = false; 5396 if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) { 5397 for (auto *V : it->operand_values()) { 5398 // Try to match and vectorize a horizontal reduction. 5399 OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI); 5400 } 5401 } 5402 // Start vectorization of post-process list of instructions from the 5403 // top-tree instructions to try to vectorize as many instructions as 5404 // possible. 5405 OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R); 5406 if (OpsChanged) { 5407 // We would like to start over since some instructions are deleted 5408 // and the iterator may become invalid value. 5409 Changed = true; 5410 it = BB->begin(); 5411 e = BB->end(); 5412 continue; 5413 } 5414 } 5415 5416 if (isa<InsertElementInst>(it) || isa<CmpInst>(it) || 5417 isa<InsertValueInst>(it)) 5418 PostProcessInstructions.push_back(&*it); 5419 5420 } 5421 5422 return Changed; 5423 } 5424 5425 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 5426 auto Changed = false; 5427 for (auto &Entry : GEPs) { 5428 5429 // If the getelementptr list has fewer than two elements, there's nothing 5430 // to do. 5431 if (Entry.second.size() < 2) 5432 continue; 5433 5434 DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 5435 << Entry.second.size() << ".\n"); 5436 5437 // We process the getelementptr list in chunks of 16 (like we do for 5438 // stores) to minimize compile-time. 5439 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += 16) { 5440 auto Len = std::min<unsigned>(BE - BI, 16); 5441 auto GEPList = makeArrayRef(&Entry.second[BI], Len); 5442 5443 // Initialize a set a candidate getelementptrs. Note that we use a 5444 // SetVector here to preserve program order. If the index computations 5445 // are vectorizable and begin with loads, we want to minimize the chance 5446 // of having to reorder them later. 5447 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 5448 5449 // Some of the candidates may have already been vectorized after we 5450 // initially collected them. If so, the WeakTrackingVHs will have 5451 // nullified the 5452 // values, so remove them from the set of candidates. 5453 Candidates.remove(nullptr); 5454 5455 // Remove from the set of candidates all pairs of getelementptrs with 5456 // constant differences. Such getelementptrs are likely not good 5457 // candidates for vectorization in a bottom-up phase since one can be 5458 // computed from the other. We also ensure all candidate getelementptr 5459 // indices are unique. 5460 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 5461 auto *GEPI = cast<GetElementPtrInst>(GEPList[I]); 5462 if (!Candidates.count(GEPI)) 5463 continue; 5464 auto *SCEVI = SE->getSCEV(GEPList[I]); 5465 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 5466 auto *GEPJ = cast<GetElementPtrInst>(GEPList[J]); 5467 auto *SCEVJ = SE->getSCEV(GEPList[J]); 5468 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 5469 Candidates.remove(GEPList[I]); 5470 Candidates.remove(GEPList[J]); 5471 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 5472 Candidates.remove(GEPList[J]); 5473 } 5474 } 5475 } 5476 5477 // We break out of the above computation as soon as we know there are 5478 // fewer than two candidates remaining. 5479 if (Candidates.size() < 2) 5480 continue; 5481 5482 // Add the single, non-constant index of each candidate to the bundle. We 5483 // ensured the indices met these constraints when we originally collected 5484 // the getelementptrs. 5485 SmallVector<Value *, 16> Bundle(Candidates.size()); 5486 auto BundleIndex = 0u; 5487 for (auto *V : Candidates) { 5488 auto *GEP = cast<GetElementPtrInst>(V); 5489 auto *GEPIdx = GEP->idx_begin()->get(); 5490 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 5491 Bundle[BundleIndex++] = GEPIdx; 5492 } 5493 5494 // Try and vectorize the indices. We are currently only interested in 5495 // gather-like cases of the form: 5496 // 5497 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 5498 // 5499 // where the loads of "a", the loads of "b", and the subtractions can be 5500 // performed in parallel. It's likely that detecting this pattern in a 5501 // bottom-up phase will be simpler and less costly than building a 5502 // full-blown top-down phase beginning at the consecutive loads. 5503 Changed |= tryToVectorizeList(Bundle, R); 5504 } 5505 } 5506 return Changed; 5507 } 5508 5509 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 5510 bool Changed = false; 5511 // Attempt to sort and vectorize each of the store-groups. 5512 for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e; 5513 ++it) { 5514 if (it->second.size() < 2) 5515 continue; 5516 5517 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 5518 << it->second.size() << ".\n"); 5519 5520 // Process the stores in chunks of 16. 5521 // TODO: The limit of 16 inhibits greater vectorization factors. 5522 // For example, AVX2 supports v32i8. Increasing this limit, however, 5523 // may cause a significant compile-time increase. 5524 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 5525 unsigned Len = std::min<unsigned>(CE - CI, 16); 5526 Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len), R); 5527 } 5528 } 5529 return Changed; 5530 } 5531 5532 char SLPVectorizer::ID = 0; 5533 static const char lv_name[] = "SLP Vectorizer"; 5534 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 5535 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 5536 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 5537 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 5538 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 5539 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 5540 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 5541 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 5542 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 5543 5544 namespace llvm { 5545 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); } 5546 } 5547