1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 19 #include "llvm/ADT/Optional.h" 20 #include "llvm/ADT/PostOrderIterator.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/CodeMetrics.h" 24 #include "llvm/Analysis/GlobalsModRef.h" 25 #include "llvm/Analysis/LoopAccessAnalysis.h" 26 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/Analysis/VectorUtils.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/Dominators.h" 31 #include "llvm/IR/IRBuilder.h" 32 #include "llvm/IR/Instructions.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/Module.h" 35 #include "llvm/IR/NoFolder.h" 36 #include "llvm/IR/Type.h" 37 #include "llvm/IR/Value.h" 38 #include "llvm/IR/Verifier.h" 39 #include "llvm/Pass.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/GraphWriter.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include "llvm/Transforms/Vectorize.h" 45 #include <algorithm> 46 #include <memory> 47 48 using namespace llvm; 49 using namespace slpvectorizer; 50 51 #define SV_NAME "slp-vectorizer" 52 #define DEBUG_TYPE "SLP" 53 54 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 55 56 static cl::opt<int> 57 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 58 cl::desc("Only vectorize if you gain more than this " 59 "number ")); 60 61 static cl::opt<bool> 62 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 63 cl::desc("Attempt to vectorize horizontal reductions")); 64 65 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 66 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 67 cl::desc( 68 "Attempt to vectorize horizontal reductions feeding into a store")); 69 70 static cl::opt<int> 71 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 72 cl::desc("Attempt to vectorize for this register size in bits")); 73 74 /// Limits the size of scheduling regions in a block. 75 /// It avoid long compile times for _very_ large blocks where vector 76 /// instructions are spread over a wide range. 77 /// This limit is way higher than needed by real-world functions. 78 static cl::opt<int> 79 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 80 cl::desc("Limit the size of the SLP scheduling region per block")); 81 82 static cl::opt<int> MinVectorRegSizeOption( 83 "slp-min-reg-size", cl::init(128), cl::Hidden, 84 cl::desc("Attempt to vectorize for this register size in bits")); 85 86 static cl::opt<unsigned> RecursionMaxDepth( 87 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 88 cl::desc("Limit the recursion depth when building a vectorizable tree")); 89 90 static cl::opt<unsigned> MinTreeSize( 91 "slp-min-tree-size", cl::init(3), cl::Hidden, 92 cl::desc("Only vectorize small trees if they are fully vectorizable")); 93 94 static cl::opt<bool> 95 ViewSLPTree("view-slp-tree", cl::Hidden, 96 cl::desc("Display the SLP trees with Graphviz")); 97 98 // Limit the number of alias checks. The limit is chosen so that 99 // it has no negative effect on the llvm benchmarks. 100 static const unsigned AliasedCheckLimit = 10; 101 102 // Another limit for the alias checks: The maximum distance between load/store 103 // instructions where alias checks are done. 104 // This limit is useful for very large basic blocks. 105 static const unsigned MaxMemDepDistance = 160; 106 107 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 108 /// regions to be handled. 109 static const int MinScheduleRegionSize = 16; 110 111 /// \brief Predicate for the element types that the SLP vectorizer supports. 112 /// 113 /// The most important thing to filter here are types which are invalid in LLVM 114 /// vectors. We also filter target specific types which have absolutely no 115 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 116 /// avoids spending time checking the cost model and realizing that they will 117 /// be inevitably scalarized. 118 static bool isValidElementType(Type *Ty) { 119 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 120 !Ty->isPPC_FP128Ty(); 121 } 122 123 /// \returns true if all of the instructions in \p VL are in the same block or 124 /// false otherwise. 125 static bool allSameBlock(ArrayRef<Value *> VL) { 126 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 127 if (!I0) 128 return false; 129 BasicBlock *BB = I0->getParent(); 130 for (int i = 1, e = VL.size(); i < e; i++) { 131 Instruction *I = dyn_cast<Instruction>(VL[i]); 132 if (!I) 133 return false; 134 135 if (BB != I->getParent()) 136 return false; 137 } 138 return true; 139 } 140 141 /// \returns True if all of the values in \p VL are constants. 142 static bool allConstant(ArrayRef<Value *> VL) { 143 for (Value *i : VL) 144 if (!isa<Constant>(i)) 145 return false; 146 return true; 147 } 148 149 /// \returns True if all of the values in \p VL are identical. 150 static bool isSplat(ArrayRef<Value *> VL) { 151 for (unsigned i = 1, e = VL.size(); i < e; ++i) 152 if (VL[i] != VL[0]) 153 return false; 154 return true; 155 } 156 157 ///\returns Opcode that can be clubbed with \p Op to create an alternate 158 /// sequence which can later be merged as a ShuffleVector instruction. 159 static unsigned getAltOpcode(unsigned Op) { 160 switch (Op) { 161 case Instruction::FAdd: 162 return Instruction::FSub; 163 case Instruction::FSub: 164 return Instruction::FAdd; 165 case Instruction::Add: 166 return Instruction::Sub; 167 case Instruction::Sub: 168 return Instruction::Add; 169 default: 170 return 0; 171 } 172 } 173 174 ///\returns bool representing if Opcode \p Op can be part 175 /// of an alternate sequence which can later be merged as 176 /// a ShuffleVector instruction. 177 static bool canCombineAsAltInst(unsigned Op) { 178 return Op == Instruction::FAdd || Op == Instruction::FSub || 179 Op == Instruction::Sub || Op == Instruction::Add; 180 } 181 182 /// \returns ShuffleVector instruction if instructions in \p VL have 183 /// alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence. 184 /// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...) 185 static unsigned isAltInst(ArrayRef<Value *> VL) { 186 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 187 unsigned Opcode = I0->getOpcode(); 188 unsigned AltOpcode = getAltOpcode(Opcode); 189 for (int i = 1, e = VL.size(); i < e; i++) { 190 Instruction *I = dyn_cast<Instruction>(VL[i]); 191 if (!I || I->getOpcode() != ((i & 1) ? AltOpcode : Opcode)) 192 return 0; 193 } 194 return Instruction::ShuffleVector; 195 } 196 197 /// \returns The opcode if all of the Instructions in \p VL have the same 198 /// opcode, or zero. 199 static unsigned getSameOpcode(ArrayRef<Value *> VL) { 200 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 201 if (!I0) 202 return 0; 203 unsigned Opcode = I0->getOpcode(); 204 for (int i = 1, e = VL.size(); i < e; i++) { 205 Instruction *I = dyn_cast<Instruction>(VL[i]); 206 if (!I || Opcode != I->getOpcode()) { 207 if (canCombineAsAltInst(Opcode) && i == 1) 208 return isAltInst(VL); 209 return 0; 210 } 211 } 212 return Opcode; 213 } 214 215 /// Get the intersection (logical and) of all of the potential IR flags 216 /// of each scalar operation (VL) that will be converted into a vector (I). 217 /// Flag set: NSW, NUW, exact, and all of fast-math. 218 static void propagateIRFlags(Value *I, ArrayRef<Value *> VL) { 219 if (auto *VecOp = dyn_cast<Instruction>(I)) { 220 if (auto *I0 = dyn_cast<Instruction>(VL[0])) { 221 // VecOVp is initialized to the 0th scalar, so start counting from index 222 // '1'. 223 VecOp->copyIRFlags(I0); 224 for (int i = 1, e = VL.size(); i < e; ++i) { 225 if (auto *Scalar = dyn_cast<Instruction>(VL[i])) 226 VecOp->andIRFlags(Scalar); 227 } 228 } 229 } 230 } 231 232 /// \returns true if all of the values in \p VL have the same type or false 233 /// otherwise. 234 static bool allSameType(ArrayRef<Value *> VL) { 235 Type *Ty = VL[0]->getType(); 236 for (int i = 1, e = VL.size(); i < e; i++) 237 if (VL[i]->getType() != Ty) 238 return false; 239 240 return true; 241 } 242 243 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 244 static bool matchExtractIndex(Instruction *E, unsigned Idx, unsigned Opcode) { 245 assert(Opcode == Instruction::ExtractElement || 246 Opcode == Instruction::ExtractValue); 247 if (Opcode == Instruction::ExtractElement) { 248 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 249 return CI && CI->getZExtValue() == Idx; 250 } else { 251 ExtractValueInst *EI = cast<ExtractValueInst>(E); 252 return EI->getNumIndices() == 1 && *EI->idx_begin() == Idx; 253 } 254 } 255 256 /// \returns True if in-tree use also needs extract. This refers to 257 /// possible scalar operand in vectorized instruction. 258 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 259 TargetLibraryInfo *TLI) { 260 261 unsigned Opcode = UserInst->getOpcode(); 262 switch (Opcode) { 263 case Instruction::Load: { 264 LoadInst *LI = cast<LoadInst>(UserInst); 265 return (LI->getPointerOperand() == Scalar); 266 } 267 case Instruction::Store: { 268 StoreInst *SI = cast<StoreInst>(UserInst); 269 return (SI->getPointerOperand() == Scalar); 270 } 271 case Instruction::Call: { 272 CallInst *CI = cast<CallInst>(UserInst); 273 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 274 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 275 return (CI->getArgOperand(1) == Scalar); 276 } 277 } 278 default: 279 return false; 280 } 281 } 282 283 /// \returns the AA location that is being access by the instruction. 284 static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) { 285 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 286 return MemoryLocation::get(SI); 287 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 288 return MemoryLocation::get(LI); 289 return MemoryLocation(); 290 } 291 292 /// \returns True if the instruction is not a volatile or atomic load/store. 293 static bool isSimple(Instruction *I) { 294 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 295 return LI->isSimple(); 296 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 297 return SI->isSimple(); 298 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 299 return !MI->isVolatile(); 300 return true; 301 } 302 303 namespace llvm { 304 namespace slpvectorizer { 305 /// Bottom Up SLP Vectorizer. 306 class BoUpSLP { 307 public: 308 typedef SmallVector<Value *, 8> ValueList; 309 typedef SmallVector<Instruction *, 16> InstrList; 310 typedef SmallPtrSet<Value *, 16> ValueSet; 311 typedef SmallVector<StoreInst *, 8> StoreList; 312 typedef MapVector<Value *, SmallVector<Instruction *, 2>> 313 ExtraValueToDebugLocsMap; 314 315 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 316 TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li, 317 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 318 const DataLayout *DL) 319 : NumLoadsWantToKeepOrder(0), NumLoadsWantToChangeOrder(0), F(Func), 320 SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), DB(DB), 321 DL(DL), Builder(Se->getContext()) { 322 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 323 // Use the vector register size specified by the target unless overridden 324 // by a command-line option. 325 // TODO: It would be better to limit the vectorization factor based on 326 // data type rather than just register size. For example, x86 AVX has 327 // 256-bit registers, but it does not support integer operations 328 // at that width (that requires AVX2). 329 if (MaxVectorRegSizeOption.getNumOccurrences()) 330 MaxVecRegSize = MaxVectorRegSizeOption; 331 else 332 MaxVecRegSize = TTI->getRegisterBitWidth(true); 333 334 MinVecRegSize = MinVectorRegSizeOption; 335 } 336 337 /// \brief Vectorize the tree that starts with the elements in \p VL. 338 /// Returns the vectorized root. 339 Value *vectorizeTree(); 340 /// Vectorize the tree but with the list of externally used values \p 341 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 342 /// generated extractvalue instructions. 343 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues); 344 345 /// \returns the cost incurred by unwanted spills and fills, caused by 346 /// holding live values over call sites. 347 int getSpillCost(); 348 349 /// \returns the vectorization cost of the subtree that starts at \p VL. 350 /// A negative number means that this is profitable. 351 int getTreeCost(); 352 353 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 354 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 355 void buildTree(ArrayRef<Value *> Roots, 356 ArrayRef<Value *> UserIgnoreLst = None); 357 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 358 /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking 359 /// into account (anf updating it, if required) list of externally used 360 /// values stored in \p ExternallyUsedValues. 361 void buildTree(ArrayRef<Value *> Roots, 362 ExtraValueToDebugLocsMap &ExternallyUsedValues, 363 ArrayRef<Value *> UserIgnoreLst = None); 364 365 /// Clear the internal data structures that are created by 'buildTree'. 366 void deleteTree() { 367 VectorizableTree.clear(); 368 ScalarToTreeEntry.clear(); 369 MustGather.clear(); 370 ExternalUses.clear(); 371 NumLoadsWantToKeepOrder = 0; 372 NumLoadsWantToChangeOrder = 0; 373 for (auto &Iter : BlocksSchedules) { 374 BlockScheduling *BS = Iter.second.get(); 375 BS->clear(); 376 } 377 MinBWs.clear(); 378 } 379 380 /// \brief Perform LICM and CSE on the newly generated gather sequences. 381 void optimizeGatherSequence(); 382 383 /// \returns true if it is beneficial to reverse the vector order. 384 bool shouldReorder() const { 385 return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder; 386 } 387 388 /// \return The vector element size in bits to use when vectorizing the 389 /// expression tree ending at \p V. If V is a store, the size is the width of 390 /// the stored value. Otherwise, the size is the width of the largest loaded 391 /// value reaching V. This method is used by the vectorizer to calculate 392 /// vectorization factors. 393 unsigned getVectorElementSize(Value *V); 394 395 /// Compute the minimum type sizes required to represent the entries in a 396 /// vectorizable tree. 397 void computeMinimumValueSizes(); 398 399 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 400 unsigned getMaxVecRegSize() const { 401 return MaxVecRegSize; 402 } 403 404 // \returns minimum vector register size as set by cl::opt. 405 unsigned getMinVecRegSize() const { 406 return MinVecRegSize; 407 } 408 409 /// \brief Check if ArrayType or StructType is isomorphic to some VectorType. 410 /// 411 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 412 unsigned canMapToVector(Type *T, const DataLayout &DL) const; 413 414 /// \returns True if the VectorizableTree is both tiny and not fully 415 /// vectorizable. We do not vectorize such trees. 416 bool isTreeTinyAndNotFullyVectorizable(); 417 418 private: 419 struct TreeEntry; 420 421 /// \returns the cost of the vectorizable entry. 422 int getEntryCost(TreeEntry *E); 423 424 /// This is the recursive part of buildTree. 425 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, int); 426 427 /// \returns True if the ExtractElement/ExtractValue instructions in VL can 428 /// be vectorized to use the original vector (or aggregate "bitcast" to a vector). 429 bool canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const; 430 431 /// Vectorize a single entry in the tree. 432 Value *vectorizeTree(TreeEntry *E); 433 434 /// Vectorize a single entry in the tree, starting in \p VL. 435 Value *vectorizeTree(ArrayRef<Value *> VL); 436 437 /// \returns the pointer to the vectorized value if \p VL is already 438 /// vectorized, or NULL. They may happen in cycles. 439 Value *alreadyVectorized(ArrayRef<Value *> VL) const; 440 441 /// \returns the scalarization cost for this type. Scalarization in this 442 /// context means the creation of vectors from a group of scalars. 443 int getGatherCost(Type *Ty); 444 445 /// \returns the scalarization cost for this list of values. Assuming that 446 /// this subtree gets vectorized, we may need to extract the values from the 447 /// roots. This method calculates the cost of extracting the values. 448 int getGatherCost(ArrayRef<Value *> VL); 449 450 /// \brief Set the Builder insert point to one after the last instruction in 451 /// the bundle 452 void setInsertPointAfterBundle(ArrayRef<Value *> VL); 453 454 /// \returns a vector from a collection of scalars in \p VL. 455 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 456 457 /// \returns whether the VectorizableTree is fully vectorizable and will 458 /// be beneficial even the tree height is tiny. 459 bool isFullyVectorizableTinyTree(); 460 461 /// \reorder commutative operands in alt shuffle if they result in 462 /// vectorized code. 463 void reorderAltShuffleOperands(ArrayRef<Value *> VL, 464 SmallVectorImpl<Value *> &Left, 465 SmallVectorImpl<Value *> &Right); 466 /// \reorder commutative operands to get better probability of 467 /// generating vectorized code. 468 void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 469 SmallVectorImpl<Value *> &Left, 470 SmallVectorImpl<Value *> &Right); 471 struct TreeEntry { 472 TreeEntry(std::vector<TreeEntry> &Container) 473 : Scalars(), VectorizedValue(nullptr), NeedToGather(0), 474 Container(Container) {} 475 476 /// \returns true if the scalars in VL are equal to this entry. 477 bool isSame(ArrayRef<Value *> VL) const { 478 assert(VL.size() == Scalars.size() && "Invalid size"); 479 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 480 } 481 482 /// A vector of scalars. 483 ValueList Scalars; 484 485 /// The Scalars are vectorized into this value. It is initialized to Null. 486 Value *VectorizedValue; 487 488 /// Do we need to gather this sequence ? 489 bool NeedToGather; 490 491 /// Points back to the VectorizableTree. 492 /// 493 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 494 /// to be a pointer and needs to be able to initialize the child iterator. 495 /// Thus we need a reference back to the container to translate the indices 496 /// to entries. 497 std::vector<TreeEntry> &Container; 498 499 /// The TreeEntry index containing the user of this entry. We can actually 500 /// have multiple users so the data structure is not truly a tree. 501 SmallVector<int, 1> UserTreeIndices; 502 }; 503 504 /// Create a new VectorizableTree entry. 505 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized, 506 int &UserTreeIdx) { 507 VectorizableTree.emplace_back(VectorizableTree); 508 int idx = VectorizableTree.size() - 1; 509 TreeEntry *Last = &VectorizableTree[idx]; 510 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 511 Last->NeedToGather = !Vectorized; 512 if (Vectorized) { 513 for (int i = 0, e = VL.size(); i != e; ++i) { 514 assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!"); 515 ScalarToTreeEntry[VL[i]] = idx; 516 } 517 } else { 518 MustGather.insert(VL.begin(), VL.end()); 519 } 520 521 if (UserTreeIdx >= 0) 522 Last->UserTreeIndices.push_back(UserTreeIdx); 523 UserTreeIdx = idx; 524 return Last; 525 } 526 527 /// -- Vectorization State -- 528 /// Holds all of the tree entries. 529 std::vector<TreeEntry> VectorizableTree; 530 531 /// Maps a specific scalar to its tree entry. 532 SmallDenseMap<Value*, int> ScalarToTreeEntry; 533 534 /// A list of scalars that we found that we need to keep as scalars. 535 ValueSet MustGather; 536 537 /// This POD struct describes one external user in the vectorized tree. 538 struct ExternalUser { 539 ExternalUser (Value *S, llvm::User *U, int L) : 540 Scalar(S), User(U), Lane(L){} 541 // Which scalar in our function. 542 Value *Scalar; 543 // Which user that uses the scalar. 544 llvm::User *User; 545 // Which lane does the scalar belong to. 546 int Lane; 547 }; 548 typedef SmallVector<ExternalUser, 16> UserList; 549 550 /// Checks if two instructions may access the same memory. 551 /// 552 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 553 /// is invariant in the calling loop. 554 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 555 Instruction *Inst2) { 556 557 // First check if the result is already in the cache. 558 AliasCacheKey key = std::make_pair(Inst1, Inst2); 559 Optional<bool> &result = AliasCache[key]; 560 if (result.hasValue()) { 561 return result.getValue(); 562 } 563 MemoryLocation Loc2 = getLocation(Inst2, AA); 564 bool aliased = true; 565 if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) { 566 // Do the alias check. 567 aliased = AA->alias(Loc1, Loc2); 568 } 569 // Store the result in the cache. 570 result = aliased; 571 return aliased; 572 } 573 574 typedef std::pair<Instruction *, Instruction *> AliasCacheKey; 575 576 /// Cache for alias results. 577 /// TODO: consider moving this to the AliasAnalysis itself. 578 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 579 580 /// Removes an instruction from its block and eventually deletes it. 581 /// It's like Instruction::eraseFromParent() except that the actual deletion 582 /// is delayed until BoUpSLP is destructed. 583 /// This is required to ensure that there are no incorrect collisions in the 584 /// AliasCache, which can happen if a new instruction is allocated at the 585 /// same address as a previously deleted instruction. 586 void eraseInstruction(Instruction *I) { 587 I->removeFromParent(); 588 I->dropAllReferences(); 589 DeletedInstructions.push_back(std::unique_ptr<Instruction>(I)); 590 } 591 592 /// Temporary store for deleted instructions. Instructions will be deleted 593 /// eventually when the BoUpSLP is destructed. 594 SmallVector<std::unique_ptr<Instruction>, 8> DeletedInstructions; 595 596 /// A list of values that need to extracted out of the tree. 597 /// This list holds pairs of (Internal Scalar : External User). External User 598 /// can be nullptr, it means that this Internal Scalar will be used later, 599 /// after vectorization. 600 UserList ExternalUses; 601 602 /// Values used only by @llvm.assume calls. 603 SmallPtrSet<const Value *, 32> EphValues; 604 605 /// Holds all of the instructions that we gathered. 606 SetVector<Instruction *> GatherSeq; 607 /// A list of blocks that we are going to CSE. 608 SetVector<BasicBlock *> CSEBlocks; 609 610 /// Contains all scheduling relevant data for an instruction. 611 /// A ScheduleData either represents a single instruction or a member of an 612 /// instruction bundle (= a group of instructions which is combined into a 613 /// vector instruction). 614 struct ScheduleData { 615 616 // The initial value for the dependency counters. It means that the 617 // dependencies are not calculated yet. 618 enum { InvalidDeps = -1 }; 619 620 ScheduleData() 621 : Inst(nullptr), FirstInBundle(nullptr), NextInBundle(nullptr), 622 NextLoadStore(nullptr), SchedulingRegionID(0), SchedulingPriority(0), 623 Dependencies(InvalidDeps), UnscheduledDeps(InvalidDeps), 624 UnscheduledDepsInBundle(InvalidDeps), IsScheduled(false) {} 625 626 void init(int BlockSchedulingRegionID) { 627 FirstInBundle = this; 628 NextInBundle = nullptr; 629 NextLoadStore = nullptr; 630 IsScheduled = false; 631 SchedulingRegionID = BlockSchedulingRegionID; 632 UnscheduledDepsInBundle = UnscheduledDeps; 633 clearDependencies(); 634 } 635 636 /// Returns true if the dependency information has been calculated. 637 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 638 639 /// Returns true for single instructions and for bundle representatives 640 /// (= the head of a bundle). 641 bool isSchedulingEntity() const { return FirstInBundle == this; } 642 643 /// Returns true if it represents an instruction bundle and not only a 644 /// single instruction. 645 bool isPartOfBundle() const { 646 return NextInBundle != nullptr || FirstInBundle != this; 647 } 648 649 /// Returns true if it is ready for scheduling, i.e. it has no more 650 /// unscheduled depending instructions/bundles. 651 bool isReady() const { 652 assert(isSchedulingEntity() && 653 "can't consider non-scheduling entity for ready list"); 654 return UnscheduledDepsInBundle == 0 && !IsScheduled; 655 } 656 657 /// Modifies the number of unscheduled dependencies, also updating it for 658 /// the whole bundle. 659 int incrementUnscheduledDeps(int Incr) { 660 UnscheduledDeps += Incr; 661 return FirstInBundle->UnscheduledDepsInBundle += Incr; 662 } 663 664 /// Sets the number of unscheduled dependencies to the number of 665 /// dependencies. 666 void resetUnscheduledDeps() { 667 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 668 } 669 670 /// Clears all dependency information. 671 void clearDependencies() { 672 Dependencies = InvalidDeps; 673 resetUnscheduledDeps(); 674 MemoryDependencies.clear(); 675 } 676 677 void dump(raw_ostream &os) const { 678 if (!isSchedulingEntity()) { 679 os << "/ " << *Inst; 680 } else if (NextInBundle) { 681 os << '[' << *Inst; 682 ScheduleData *SD = NextInBundle; 683 while (SD) { 684 os << ';' << *SD->Inst; 685 SD = SD->NextInBundle; 686 } 687 os << ']'; 688 } else { 689 os << *Inst; 690 } 691 } 692 693 Instruction *Inst; 694 695 /// Points to the head in an instruction bundle (and always to this for 696 /// single instructions). 697 ScheduleData *FirstInBundle; 698 699 /// Single linked list of all instructions in a bundle. Null if it is a 700 /// single instruction. 701 ScheduleData *NextInBundle; 702 703 /// Single linked list of all memory instructions (e.g. load, store, call) 704 /// in the block - until the end of the scheduling region. 705 ScheduleData *NextLoadStore; 706 707 /// The dependent memory instructions. 708 /// This list is derived on demand in calculateDependencies(). 709 SmallVector<ScheduleData *, 4> MemoryDependencies; 710 711 /// This ScheduleData is in the current scheduling region if this matches 712 /// the current SchedulingRegionID of BlockScheduling. 713 int SchedulingRegionID; 714 715 /// Used for getting a "good" final ordering of instructions. 716 int SchedulingPriority; 717 718 /// The number of dependencies. Constitutes of the number of users of the 719 /// instruction plus the number of dependent memory instructions (if any). 720 /// This value is calculated on demand. 721 /// If InvalidDeps, the number of dependencies is not calculated yet. 722 /// 723 int Dependencies; 724 725 /// The number of dependencies minus the number of dependencies of scheduled 726 /// instructions. As soon as this is zero, the instruction/bundle gets ready 727 /// for scheduling. 728 /// Note that this is negative as long as Dependencies is not calculated. 729 int UnscheduledDeps; 730 731 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 732 /// single instructions. 733 int UnscheduledDepsInBundle; 734 735 /// True if this instruction is scheduled (or considered as scheduled in the 736 /// dry-run). 737 bool IsScheduled; 738 }; 739 740 #ifndef NDEBUG 741 friend inline raw_ostream &operator<<(raw_ostream &os, 742 const BoUpSLP::ScheduleData &SD) { 743 SD.dump(os); 744 return os; 745 } 746 #endif 747 friend struct GraphTraits<BoUpSLP *>; 748 friend struct DOTGraphTraits<BoUpSLP *>; 749 750 /// Contains all scheduling data for a basic block. 751 /// 752 struct BlockScheduling { 753 754 BlockScheduling(BasicBlock *BB) 755 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize), 756 ScheduleStart(nullptr), ScheduleEnd(nullptr), 757 FirstLoadStoreInRegion(nullptr), LastLoadStoreInRegion(nullptr), 758 ScheduleRegionSize(0), 759 ScheduleRegionSizeLimit(ScheduleRegionSizeBudget), 760 // Make sure that the initial SchedulingRegionID is greater than the 761 // initial SchedulingRegionID in ScheduleData (which is 0). 762 SchedulingRegionID(1) {} 763 764 void clear() { 765 ReadyInsts.clear(); 766 ScheduleStart = nullptr; 767 ScheduleEnd = nullptr; 768 FirstLoadStoreInRegion = nullptr; 769 LastLoadStoreInRegion = nullptr; 770 771 // Reduce the maximum schedule region size by the size of the 772 // previous scheduling run. 773 ScheduleRegionSizeLimit -= ScheduleRegionSize; 774 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 775 ScheduleRegionSizeLimit = MinScheduleRegionSize; 776 ScheduleRegionSize = 0; 777 778 // Make a new scheduling region, i.e. all existing ScheduleData is not 779 // in the new region yet. 780 ++SchedulingRegionID; 781 } 782 783 ScheduleData *getScheduleData(Value *V) { 784 ScheduleData *SD = ScheduleDataMap[V]; 785 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 786 return SD; 787 return nullptr; 788 } 789 790 bool isInSchedulingRegion(ScheduleData *SD) { 791 return SD->SchedulingRegionID == SchedulingRegionID; 792 } 793 794 /// Marks an instruction as scheduled and puts all dependent ready 795 /// instructions into the ready-list. 796 template <typename ReadyListType> 797 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 798 SD->IsScheduled = true; 799 DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 800 801 ScheduleData *BundleMember = SD; 802 while (BundleMember) { 803 // Handle the def-use chain dependencies. 804 for (Use &U : BundleMember->Inst->operands()) { 805 ScheduleData *OpDef = getScheduleData(U.get()); 806 if (OpDef && OpDef->hasValidDependencies() && 807 OpDef->incrementUnscheduledDeps(-1) == 0) { 808 // There are no more unscheduled dependencies after decrementing, 809 // so we can put the dependent instruction into the ready list. 810 ScheduleData *DepBundle = OpDef->FirstInBundle; 811 assert(!DepBundle->IsScheduled && 812 "already scheduled bundle gets ready"); 813 ReadyList.insert(DepBundle); 814 DEBUG(dbgs() << "SLP: gets ready (def): " << *DepBundle << "\n"); 815 } 816 } 817 // Handle the memory dependencies. 818 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 819 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 820 // There are no more unscheduled dependencies after decrementing, 821 // so we can put the dependent instruction into the ready list. 822 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 823 assert(!DepBundle->IsScheduled && 824 "already scheduled bundle gets ready"); 825 ReadyList.insert(DepBundle); 826 DEBUG(dbgs() << "SLP: gets ready (mem): " << *DepBundle << "\n"); 827 } 828 } 829 BundleMember = BundleMember->NextInBundle; 830 } 831 } 832 833 /// Put all instructions into the ReadyList which are ready for scheduling. 834 template <typename ReadyListType> 835 void initialFillReadyList(ReadyListType &ReadyList) { 836 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 837 ScheduleData *SD = getScheduleData(I); 838 if (SD->isSchedulingEntity() && SD->isReady()) { 839 ReadyList.insert(SD); 840 DEBUG(dbgs() << "SLP: initially in ready list: " << *I << "\n"); 841 } 842 } 843 } 844 845 /// Checks if a bundle of instructions can be scheduled, i.e. has no 846 /// cyclic dependencies. This is only a dry-run, no instructions are 847 /// actually moved at this stage. 848 bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP); 849 850 /// Un-bundles a group of instructions. 851 void cancelScheduling(ArrayRef<Value *> VL); 852 853 /// Extends the scheduling region so that V is inside the region. 854 /// \returns true if the region size is within the limit. 855 bool extendSchedulingRegion(Value *V); 856 857 /// Initialize the ScheduleData structures for new instructions in the 858 /// scheduling region. 859 void initScheduleData(Instruction *FromI, Instruction *ToI, 860 ScheduleData *PrevLoadStore, 861 ScheduleData *NextLoadStore); 862 863 /// Updates the dependency information of a bundle and of all instructions/ 864 /// bundles which depend on the original bundle. 865 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 866 BoUpSLP *SLP); 867 868 /// Sets all instruction in the scheduling region to un-scheduled. 869 void resetSchedule(); 870 871 BasicBlock *BB; 872 873 /// Simple memory allocation for ScheduleData. 874 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 875 876 /// The size of a ScheduleData array in ScheduleDataChunks. 877 int ChunkSize; 878 879 /// The allocator position in the current chunk, which is the last entry 880 /// of ScheduleDataChunks. 881 int ChunkPos; 882 883 /// Attaches ScheduleData to Instruction. 884 /// Note that the mapping survives during all vectorization iterations, i.e. 885 /// ScheduleData structures are recycled. 886 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 887 888 struct ReadyList : SmallVector<ScheduleData *, 8> { 889 void insert(ScheduleData *SD) { push_back(SD); } 890 }; 891 892 /// The ready-list for scheduling (only used for the dry-run). 893 ReadyList ReadyInsts; 894 895 /// The first instruction of the scheduling region. 896 Instruction *ScheduleStart; 897 898 /// The first instruction _after_ the scheduling region. 899 Instruction *ScheduleEnd; 900 901 /// The first memory accessing instruction in the scheduling region 902 /// (can be null). 903 ScheduleData *FirstLoadStoreInRegion; 904 905 /// The last memory accessing instruction in the scheduling region 906 /// (can be null). 907 ScheduleData *LastLoadStoreInRegion; 908 909 /// The current size of the scheduling region. 910 int ScheduleRegionSize; 911 912 /// The maximum size allowed for the scheduling region. 913 int ScheduleRegionSizeLimit; 914 915 /// The ID of the scheduling region. For a new vectorization iteration this 916 /// is incremented which "removes" all ScheduleData from the region. 917 int SchedulingRegionID; 918 }; 919 920 /// Attaches the BlockScheduling structures to basic blocks. 921 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 922 923 /// Performs the "real" scheduling. Done before vectorization is actually 924 /// performed in a basic block. 925 void scheduleBlock(BlockScheduling *BS); 926 927 /// List of users to ignore during scheduling and that don't need extracting. 928 ArrayRef<Value *> UserIgnoreList; 929 930 // Number of load bundles that contain consecutive loads. 931 int NumLoadsWantToKeepOrder; 932 933 // Number of load bundles that contain consecutive loads in reversed order. 934 int NumLoadsWantToChangeOrder; 935 936 // Analysis and block reference. 937 Function *F; 938 ScalarEvolution *SE; 939 TargetTransformInfo *TTI; 940 TargetLibraryInfo *TLI; 941 AliasAnalysis *AA; 942 LoopInfo *LI; 943 DominatorTree *DT; 944 AssumptionCache *AC; 945 DemandedBits *DB; 946 const DataLayout *DL; 947 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 948 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 949 /// Instruction builder to construct the vectorized tree. 950 IRBuilder<> Builder; 951 952 /// A map of scalar integer values to the smallest bit width with which they 953 /// can legally be represented. The values map to (width, signed) pairs, 954 /// where "width" indicates the minimum bit width and "signed" is True if the 955 /// value must be signed-extended, rather than zero-extended, back to its 956 /// original width. 957 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs; 958 }; 959 } // end namespace slpvectorizer 960 961 template <> struct GraphTraits<BoUpSLP *> { 962 typedef BoUpSLP::TreeEntry TreeEntry; 963 964 /// NodeRef has to be a pointer per the GraphWriter. 965 typedef TreeEntry *NodeRef; 966 967 /// \brief Add the VectorizableTree to the index iterator to be able to return 968 /// TreeEntry pointers. 969 struct ChildIteratorType 970 : public iterator_adaptor_base<ChildIteratorType, 971 SmallVector<int, 1>::iterator> { 972 973 std::vector<TreeEntry> &VectorizableTree; 974 975 ChildIteratorType(SmallVector<int, 1>::iterator W, 976 std::vector<TreeEntry> &VT) 977 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 978 979 NodeRef operator*() { return &VectorizableTree[*I]; } 980 }; 981 982 static NodeRef getEntryNode(BoUpSLP &R) { return &R.VectorizableTree[0]; } 983 984 static ChildIteratorType child_begin(NodeRef N) { 985 return {N->UserTreeIndices.begin(), N->Container}; 986 } 987 static ChildIteratorType child_end(NodeRef N) { 988 return {N->UserTreeIndices.end(), N->Container}; 989 } 990 991 /// For the node iterator we just need to turn the TreeEntry iterator into a 992 /// TreeEntry* iterator so that it dereferences to NodeRef. 993 typedef pointer_iterator<std::vector<TreeEntry>::iterator> nodes_iterator; 994 995 static nodes_iterator nodes_begin(BoUpSLP *R) { 996 return nodes_iterator(R->VectorizableTree.begin()); 997 } 998 static nodes_iterator nodes_end(BoUpSLP *R) { 999 return nodes_iterator(R->VectorizableTree.end()); 1000 } 1001 1002 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 1003 }; 1004 1005 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 1006 typedef BoUpSLP::TreeEntry TreeEntry; 1007 1008 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 1009 1010 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 1011 std::string Str; 1012 raw_string_ostream OS(Str); 1013 if (isSplat(Entry->Scalars)) { 1014 OS << "<splat> " << *Entry->Scalars[0]; 1015 return Str; 1016 } 1017 for (auto V : Entry->Scalars) { 1018 OS << *V; 1019 if (std::any_of( 1020 R->ExternalUses.begin(), R->ExternalUses.end(), 1021 [&](const BoUpSLP::ExternalUser &EU) { return EU.Scalar == V; })) 1022 OS << " <extract>"; 1023 OS << "\n"; 1024 } 1025 return Str; 1026 } 1027 1028 static std::string getNodeAttributes(const TreeEntry *Entry, 1029 const BoUpSLP *) { 1030 if (Entry->NeedToGather) 1031 return "color=red"; 1032 return ""; 1033 } 1034 }; 1035 1036 } // end namespace llvm 1037 1038 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 1039 ArrayRef<Value *> UserIgnoreLst) { 1040 ExtraValueToDebugLocsMap ExternallyUsedValues; 1041 buildTree(Roots, ExternallyUsedValues, UserIgnoreLst); 1042 } 1043 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 1044 ExtraValueToDebugLocsMap &ExternallyUsedValues, 1045 ArrayRef<Value *> UserIgnoreLst) { 1046 deleteTree(); 1047 UserIgnoreList = UserIgnoreLst; 1048 if (!allSameType(Roots)) 1049 return; 1050 buildTree_rec(Roots, 0, -1); 1051 1052 // Collect the values that we need to extract from the tree. 1053 for (TreeEntry &EIdx : VectorizableTree) { 1054 TreeEntry *Entry = &EIdx; 1055 1056 // For each lane: 1057 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 1058 Value *Scalar = Entry->Scalars[Lane]; 1059 1060 // No need to handle users of gathered values. 1061 if (Entry->NeedToGather) 1062 continue; 1063 1064 // Check if the scalar is externally used as an extra arg. 1065 auto ExtI = ExternallyUsedValues.find(Scalar); 1066 if (ExtI != ExternallyUsedValues.end()) { 1067 DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " << 1068 Lane << " from " << *Scalar << ".\n"); 1069 ExternalUses.emplace_back(Scalar, nullptr, Lane); 1070 continue; 1071 } 1072 for (User *U : Scalar->users()) { 1073 DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 1074 1075 Instruction *UserInst = dyn_cast<Instruction>(U); 1076 if (!UserInst) 1077 continue; 1078 1079 // Skip in-tree scalars that become vectors 1080 if (ScalarToTreeEntry.count(U)) { 1081 int Idx = ScalarToTreeEntry[U]; 1082 TreeEntry *UseEntry = &VectorizableTree[Idx]; 1083 Value *UseScalar = UseEntry->Scalars[0]; 1084 // Some in-tree scalars will remain as scalar in vectorized 1085 // instructions. If that is the case, the one in Lane 0 will 1086 // be used. 1087 if (UseScalar != U || 1088 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 1089 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 1090 << ".\n"); 1091 assert(!VectorizableTree[Idx].NeedToGather && "Bad state"); 1092 continue; 1093 } 1094 } 1095 1096 // Ignore users in the user ignore list. 1097 if (is_contained(UserIgnoreList, UserInst)) 1098 continue; 1099 1100 DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " << 1101 Lane << " from " << *Scalar << ".\n"); 1102 ExternalUses.push_back(ExternalUser(Scalar, U, Lane)); 1103 } 1104 } 1105 } 1106 } 1107 1108 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 1109 int UserTreeIdx) { 1110 bool isAltShuffle = false; 1111 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 1112 1113 if (Depth == RecursionMaxDepth) { 1114 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 1115 newTreeEntry(VL, false, UserTreeIdx); 1116 return; 1117 } 1118 1119 // Don't handle vectors. 1120 if (VL[0]->getType()->isVectorTy()) { 1121 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 1122 newTreeEntry(VL, false, UserTreeIdx); 1123 return; 1124 } 1125 1126 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1127 if (SI->getValueOperand()->getType()->isVectorTy()) { 1128 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 1129 newTreeEntry(VL, false, UserTreeIdx); 1130 return; 1131 } 1132 unsigned Opcode = getSameOpcode(VL); 1133 1134 // Check that this shuffle vector refers to the alternate 1135 // sequence of opcodes. 1136 if (Opcode == Instruction::ShuffleVector) { 1137 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 1138 unsigned Op = I0->getOpcode(); 1139 if (Op != Instruction::ShuffleVector) 1140 isAltShuffle = true; 1141 } 1142 1143 // If all of the operands are identical or constant we have a simple solution. 1144 if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !Opcode) { 1145 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 1146 newTreeEntry(VL, false, UserTreeIdx); 1147 return; 1148 } 1149 1150 // We now know that this is a vector of instructions of the same type from 1151 // the same block. 1152 1153 // Don't vectorize ephemeral values. 1154 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1155 if (EphValues.count(VL[i])) { 1156 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1157 ") is ephemeral.\n"); 1158 newTreeEntry(VL, false, UserTreeIdx); 1159 return; 1160 } 1161 } 1162 1163 // Check if this is a duplicate of another entry. 1164 if (ScalarToTreeEntry.count(VL[0])) { 1165 int Idx = ScalarToTreeEntry[VL[0]]; 1166 TreeEntry *E = &VectorizableTree[Idx]; 1167 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1168 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 1169 if (E->Scalars[i] != VL[i]) { 1170 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 1171 newTreeEntry(VL, false, UserTreeIdx); 1172 return; 1173 } 1174 } 1175 // Record the reuse of the tree node. FIXME, currently this is only used to 1176 // properly draw the graph rather than for the actual vectorization. 1177 E->UserTreeIndices.push_back(UserTreeIdx); 1178 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n"); 1179 return; 1180 } 1181 1182 // Check that none of the instructions in the bundle are already in the tree. 1183 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1184 if (ScalarToTreeEntry.count(VL[i])) { 1185 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1186 ") is already in tree.\n"); 1187 newTreeEntry(VL, false, UserTreeIdx); 1188 return; 1189 } 1190 } 1191 1192 // If any of the scalars is marked as a value that needs to stay scalar then 1193 // we need to gather the scalars. 1194 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1195 if (MustGather.count(VL[i])) { 1196 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 1197 newTreeEntry(VL, false, UserTreeIdx); 1198 return; 1199 } 1200 } 1201 1202 // Check that all of the users of the scalars that we want to vectorize are 1203 // schedulable. 1204 Instruction *VL0 = cast<Instruction>(VL[0]); 1205 BasicBlock *BB = cast<Instruction>(VL0)->getParent(); 1206 1207 if (!DT->isReachableFromEntry(BB)) { 1208 // Don't go into unreachable blocks. They may contain instructions with 1209 // dependency cycles which confuse the final scheduling. 1210 DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 1211 newTreeEntry(VL, false, UserTreeIdx); 1212 return; 1213 } 1214 1215 // Check that every instructions appears once in this bundle. 1216 for (unsigned i = 0, e = VL.size(); i < e; ++i) 1217 for (unsigned j = i+1; j < e; ++j) 1218 if (VL[i] == VL[j]) { 1219 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 1220 newTreeEntry(VL, false, UserTreeIdx); 1221 return; 1222 } 1223 1224 auto &BSRef = BlocksSchedules[BB]; 1225 if (!BSRef) { 1226 BSRef = llvm::make_unique<BlockScheduling>(BB); 1227 } 1228 BlockScheduling &BS = *BSRef.get(); 1229 1230 if (!BS.tryScheduleBundle(VL, this)) { 1231 DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 1232 assert((!BS.getScheduleData(VL[0]) || 1233 !BS.getScheduleData(VL[0])->isPartOfBundle()) && 1234 "tryScheduleBundle should cancelScheduling on failure"); 1235 newTreeEntry(VL, false, UserTreeIdx); 1236 return; 1237 } 1238 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 1239 1240 switch (Opcode) { 1241 case Instruction::PHI: { 1242 PHINode *PH = dyn_cast<PHINode>(VL0); 1243 1244 // Check for terminator values (e.g. invoke). 1245 for (unsigned j = 0; j < VL.size(); ++j) 1246 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1247 TerminatorInst *Term = dyn_cast<TerminatorInst>( 1248 cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i))); 1249 if (Term) { 1250 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); 1251 BS.cancelScheduling(VL); 1252 newTreeEntry(VL, false, UserTreeIdx); 1253 return; 1254 } 1255 } 1256 1257 newTreeEntry(VL, true, UserTreeIdx); 1258 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 1259 1260 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1261 ValueList Operands; 1262 // Prepare the operand vector. 1263 for (Value *j : VL) 1264 Operands.push_back(cast<PHINode>(j)->getIncomingValueForBlock( 1265 PH->getIncomingBlock(i))); 1266 1267 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1268 } 1269 return; 1270 } 1271 case Instruction::ExtractValue: 1272 case Instruction::ExtractElement: { 1273 bool Reuse = canReuseExtract(VL, Opcode); 1274 if (Reuse) { 1275 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 1276 } else { 1277 BS.cancelScheduling(VL); 1278 } 1279 newTreeEntry(VL, Reuse, UserTreeIdx); 1280 return; 1281 } 1282 case Instruction::Load: { 1283 // Check that a vectorized load would load the same memory as a scalar 1284 // load. 1285 // For example we don't want vectorize loads that are smaller than 8 bit. 1286 // Even though we have a packed struct {<i2, i2, i2, i2>} LLVM treats 1287 // loading/storing it as an i8 struct. If we vectorize loads/stores from 1288 // such a struct we read/write packed bits disagreeing with the 1289 // unvectorized version. 1290 Type *ScalarTy = VL[0]->getType(); 1291 1292 if (DL->getTypeSizeInBits(ScalarTy) != 1293 DL->getTypeAllocSizeInBits(ScalarTy)) { 1294 BS.cancelScheduling(VL); 1295 newTreeEntry(VL, false, UserTreeIdx); 1296 DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 1297 return; 1298 } 1299 1300 // Make sure all loads in the bundle are simple - we can't vectorize 1301 // atomic or volatile loads. 1302 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 1303 LoadInst *L = cast<LoadInst>(VL[i]); 1304 if (!L->isSimple()) { 1305 BS.cancelScheduling(VL); 1306 newTreeEntry(VL, false, UserTreeIdx); 1307 DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 1308 return; 1309 } 1310 } 1311 1312 // Check if the loads are consecutive, reversed, or neither. 1313 // TODO: What we really want is to sort the loads, but for now, check 1314 // the two likely directions. 1315 bool Consecutive = true; 1316 bool ReverseConsecutive = true; 1317 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 1318 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { 1319 Consecutive = false; 1320 break; 1321 } else { 1322 ReverseConsecutive = false; 1323 } 1324 } 1325 1326 if (Consecutive) { 1327 ++NumLoadsWantToKeepOrder; 1328 newTreeEntry(VL, true, UserTreeIdx); 1329 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 1330 return; 1331 } 1332 1333 // If none of the load pairs were consecutive when checked in order, 1334 // check the reverse order. 1335 if (ReverseConsecutive) 1336 for (unsigned i = VL.size() - 1; i > 0; --i) 1337 if (!isConsecutiveAccess(VL[i], VL[i - 1], *DL, *SE)) { 1338 ReverseConsecutive = false; 1339 break; 1340 } 1341 1342 BS.cancelScheduling(VL); 1343 newTreeEntry(VL, false, UserTreeIdx); 1344 1345 if (ReverseConsecutive) { 1346 ++NumLoadsWantToChangeOrder; 1347 DEBUG(dbgs() << "SLP: Gathering reversed loads.\n"); 1348 } else { 1349 DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 1350 } 1351 return; 1352 } 1353 case Instruction::ZExt: 1354 case Instruction::SExt: 1355 case Instruction::FPToUI: 1356 case Instruction::FPToSI: 1357 case Instruction::FPExt: 1358 case Instruction::PtrToInt: 1359 case Instruction::IntToPtr: 1360 case Instruction::SIToFP: 1361 case Instruction::UIToFP: 1362 case Instruction::Trunc: 1363 case Instruction::FPTrunc: 1364 case Instruction::BitCast: { 1365 Type *SrcTy = VL0->getOperand(0)->getType(); 1366 for (unsigned i = 0; i < VL.size(); ++i) { 1367 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 1368 if (Ty != SrcTy || !isValidElementType(Ty)) { 1369 BS.cancelScheduling(VL); 1370 newTreeEntry(VL, false, UserTreeIdx); 1371 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 1372 return; 1373 } 1374 } 1375 newTreeEntry(VL, true, UserTreeIdx); 1376 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 1377 1378 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1379 ValueList Operands; 1380 // Prepare the operand vector. 1381 for (Value *j : VL) 1382 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1383 1384 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1385 } 1386 return; 1387 } 1388 case Instruction::ICmp: 1389 case Instruction::FCmp: { 1390 // Check that all of the compares have the same predicate. 1391 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 1392 Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType(); 1393 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 1394 CmpInst *Cmp = cast<CmpInst>(VL[i]); 1395 if (Cmp->getPredicate() != P0 || 1396 Cmp->getOperand(0)->getType() != ComparedTy) { 1397 BS.cancelScheduling(VL); 1398 newTreeEntry(VL, false, UserTreeIdx); 1399 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 1400 return; 1401 } 1402 } 1403 1404 newTreeEntry(VL, true, UserTreeIdx); 1405 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 1406 1407 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1408 ValueList Operands; 1409 // Prepare the operand vector. 1410 for (Value *j : VL) 1411 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1412 1413 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1414 } 1415 return; 1416 } 1417 case Instruction::Select: 1418 case Instruction::Add: 1419 case Instruction::FAdd: 1420 case Instruction::Sub: 1421 case Instruction::FSub: 1422 case Instruction::Mul: 1423 case Instruction::FMul: 1424 case Instruction::UDiv: 1425 case Instruction::SDiv: 1426 case Instruction::FDiv: 1427 case Instruction::URem: 1428 case Instruction::SRem: 1429 case Instruction::FRem: 1430 case Instruction::Shl: 1431 case Instruction::LShr: 1432 case Instruction::AShr: 1433 case Instruction::And: 1434 case Instruction::Or: 1435 case Instruction::Xor: { 1436 newTreeEntry(VL, true, UserTreeIdx); 1437 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 1438 1439 // Sort operands of the instructions so that each side is more likely to 1440 // have the same opcode. 1441 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 1442 ValueList Left, Right; 1443 reorderInputsAccordingToOpcode(VL, Left, Right); 1444 buildTree_rec(Left, Depth + 1, UserTreeIdx); 1445 buildTree_rec(Right, Depth + 1, UserTreeIdx); 1446 return; 1447 } 1448 1449 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1450 ValueList Operands; 1451 // Prepare the operand vector. 1452 for (Value *j : VL) 1453 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1454 1455 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1456 } 1457 return; 1458 } 1459 case Instruction::GetElementPtr: { 1460 // We don't combine GEPs with complicated (nested) indexing. 1461 for (unsigned j = 0; j < VL.size(); ++j) { 1462 if (cast<Instruction>(VL[j])->getNumOperands() != 2) { 1463 DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 1464 BS.cancelScheduling(VL); 1465 newTreeEntry(VL, false, UserTreeIdx); 1466 return; 1467 } 1468 } 1469 1470 // We can't combine several GEPs into one vector if they operate on 1471 // different types. 1472 Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType(); 1473 for (unsigned j = 0; j < VL.size(); ++j) { 1474 Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType(); 1475 if (Ty0 != CurTy) { 1476 DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); 1477 BS.cancelScheduling(VL); 1478 newTreeEntry(VL, false, UserTreeIdx); 1479 return; 1480 } 1481 } 1482 1483 // We don't combine GEPs with non-constant indexes. 1484 for (unsigned j = 0; j < VL.size(); ++j) { 1485 auto Op = cast<Instruction>(VL[j])->getOperand(1); 1486 if (!isa<ConstantInt>(Op)) { 1487 DEBUG( 1488 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 1489 BS.cancelScheduling(VL); 1490 newTreeEntry(VL, false, UserTreeIdx); 1491 return; 1492 } 1493 } 1494 1495 newTreeEntry(VL, true, UserTreeIdx); 1496 DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 1497 for (unsigned i = 0, e = 2; i < e; ++i) { 1498 ValueList Operands; 1499 // Prepare the operand vector. 1500 for (Value *j : VL) 1501 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1502 1503 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1504 } 1505 return; 1506 } 1507 case Instruction::Store: { 1508 // Check if the stores are consecutive or of we need to swizzle them. 1509 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 1510 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { 1511 BS.cancelScheduling(VL); 1512 newTreeEntry(VL, false, UserTreeIdx); 1513 DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 1514 return; 1515 } 1516 1517 newTreeEntry(VL, true, UserTreeIdx); 1518 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 1519 1520 ValueList Operands; 1521 for (Value *j : VL) 1522 Operands.push_back(cast<Instruction>(j)->getOperand(0)); 1523 1524 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1525 return; 1526 } 1527 case Instruction::Call: { 1528 // Check if the calls are all to the same vectorizable intrinsic. 1529 CallInst *CI = cast<CallInst>(VL[0]); 1530 // Check if this is an Intrinsic call or something that can be 1531 // represented by an intrinsic call 1532 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 1533 if (!isTriviallyVectorizable(ID)) { 1534 BS.cancelScheduling(VL); 1535 newTreeEntry(VL, false, UserTreeIdx); 1536 DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 1537 return; 1538 } 1539 Function *Int = CI->getCalledFunction(); 1540 Value *A1I = nullptr; 1541 if (hasVectorInstrinsicScalarOpd(ID, 1)) 1542 A1I = CI->getArgOperand(1); 1543 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 1544 CallInst *CI2 = dyn_cast<CallInst>(VL[i]); 1545 if (!CI2 || CI2->getCalledFunction() != Int || 1546 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 1547 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 1548 BS.cancelScheduling(VL); 1549 newTreeEntry(VL, false, UserTreeIdx); 1550 DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] 1551 << "\n"); 1552 return; 1553 } 1554 // ctlz,cttz and powi are special intrinsics whose second argument 1555 // should be same in order for them to be vectorized. 1556 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 1557 Value *A1J = CI2->getArgOperand(1); 1558 if (A1I != A1J) { 1559 BS.cancelScheduling(VL); 1560 newTreeEntry(VL, false, UserTreeIdx); 1561 DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 1562 << " argument "<< A1I<<"!=" << A1J 1563 << "\n"); 1564 return; 1565 } 1566 } 1567 // Verify that the bundle operands are identical between the two calls. 1568 if (CI->hasOperandBundles() && 1569 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 1570 CI->op_begin() + CI->getBundleOperandsEndIndex(), 1571 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 1572 BS.cancelScheduling(VL); 1573 newTreeEntry(VL, false, UserTreeIdx); 1574 DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!=" 1575 << *VL[i] << '\n'); 1576 return; 1577 } 1578 } 1579 1580 newTreeEntry(VL, true, UserTreeIdx); 1581 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 1582 ValueList Operands; 1583 // Prepare the operand vector. 1584 for (Value *j : VL) { 1585 CallInst *CI2 = dyn_cast<CallInst>(j); 1586 Operands.push_back(CI2->getArgOperand(i)); 1587 } 1588 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1589 } 1590 return; 1591 } 1592 case Instruction::ShuffleVector: { 1593 // If this is not an alternate sequence of opcode like add-sub 1594 // then do not vectorize this instruction. 1595 if (!isAltShuffle) { 1596 BS.cancelScheduling(VL); 1597 newTreeEntry(VL, false, UserTreeIdx); 1598 DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 1599 return; 1600 } 1601 newTreeEntry(VL, true, UserTreeIdx); 1602 DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 1603 1604 // Reorder operands if reordering would enable vectorization. 1605 if (isa<BinaryOperator>(VL0)) { 1606 ValueList Left, Right; 1607 reorderAltShuffleOperands(VL, Left, Right); 1608 buildTree_rec(Left, Depth + 1, UserTreeIdx); 1609 buildTree_rec(Right, Depth + 1, UserTreeIdx); 1610 return; 1611 } 1612 1613 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1614 ValueList Operands; 1615 // Prepare the operand vector. 1616 for (Value *j : VL) 1617 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1618 1619 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1620 } 1621 return; 1622 } 1623 default: 1624 BS.cancelScheduling(VL); 1625 newTreeEntry(VL, false, UserTreeIdx); 1626 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 1627 return; 1628 } 1629 } 1630 1631 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { 1632 unsigned N; 1633 Type *EltTy; 1634 auto *ST = dyn_cast<StructType>(T); 1635 if (ST) { 1636 N = ST->getNumElements(); 1637 EltTy = *ST->element_begin(); 1638 } else { 1639 N = cast<ArrayType>(T)->getNumElements(); 1640 EltTy = cast<ArrayType>(T)->getElementType(); 1641 } 1642 if (!isValidElementType(EltTy)) 1643 return 0; 1644 uint64_t VTSize = DL.getTypeStoreSizeInBits(VectorType::get(EltTy, N)); 1645 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) 1646 return 0; 1647 if (ST) { 1648 // Check that struct is homogeneous. 1649 for (const auto *Ty : ST->elements()) 1650 if (Ty != EltTy) 1651 return 0; 1652 } 1653 return N; 1654 } 1655 1656 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const { 1657 assert(Opcode == Instruction::ExtractElement || 1658 Opcode == Instruction::ExtractValue); 1659 assert(Opcode == getSameOpcode(VL) && "Invalid opcode"); 1660 // Check if all of the extracts come from the same vector and from the 1661 // correct offset. 1662 Value *VL0 = VL[0]; 1663 Instruction *E0 = cast<Instruction>(VL0); 1664 Value *Vec = E0->getOperand(0); 1665 1666 // We have to extract from a vector/aggregate with the same number of elements. 1667 unsigned NElts; 1668 if (Opcode == Instruction::ExtractValue) { 1669 const DataLayout &DL = E0->getModule()->getDataLayout(); 1670 NElts = canMapToVector(Vec->getType(), DL); 1671 if (!NElts) 1672 return false; 1673 // Check if load can be rewritten as load of vector. 1674 LoadInst *LI = dyn_cast<LoadInst>(Vec); 1675 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 1676 return false; 1677 } else { 1678 NElts = Vec->getType()->getVectorNumElements(); 1679 } 1680 1681 if (NElts != VL.size()) 1682 return false; 1683 1684 // Check that all of the indices extract from the correct offset. 1685 if (!matchExtractIndex(E0, 0, Opcode)) 1686 return false; 1687 1688 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 1689 Instruction *E = cast<Instruction>(VL[i]); 1690 if (!matchExtractIndex(E, i, Opcode)) 1691 return false; 1692 if (E->getOperand(0) != Vec) 1693 return false; 1694 } 1695 1696 return true; 1697 } 1698 1699 int BoUpSLP::getEntryCost(TreeEntry *E) { 1700 ArrayRef<Value*> VL = E->Scalars; 1701 1702 Type *ScalarTy = VL[0]->getType(); 1703 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1704 ScalarTy = SI->getValueOperand()->getType(); 1705 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0])) 1706 ScalarTy = CI->getOperand(0)->getType(); 1707 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1708 1709 // If we have computed a smaller type for the expression, update VecTy so 1710 // that the costs will be accurate. 1711 if (MinBWs.count(VL[0])) 1712 VecTy = VectorType::get( 1713 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size()); 1714 1715 if (E->NeedToGather) { 1716 if (allConstant(VL)) 1717 return 0; 1718 if (isSplat(VL)) { 1719 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 1720 } 1721 return getGatherCost(E->Scalars); 1722 } 1723 unsigned Opcode = getSameOpcode(VL); 1724 assert(Opcode && allSameType(VL) && allSameBlock(VL) && "Invalid VL"); 1725 Instruction *VL0 = cast<Instruction>(VL[0]); 1726 switch (Opcode) { 1727 case Instruction::PHI: { 1728 return 0; 1729 } 1730 case Instruction::ExtractValue: 1731 case Instruction::ExtractElement: { 1732 if (canReuseExtract(VL, Opcode)) { 1733 int DeadCost = 0; 1734 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 1735 Instruction *E = cast<Instruction>(VL[i]); 1736 // If all users are going to be vectorized, instruction can be 1737 // considered as dead. 1738 // The same, if have only one user, it will be vectorized for sure. 1739 if (E->hasOneUse() || 1740 std::all_of(E->user_begin(), E->user_end(), [this](User *U) { 1741 return ScalarToTreeEntry.count(U) > 0; 1742 })) 1743 // Take credit for instruction that will become dead. 1744 DeadCost += 1745 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); 1746 } 1747 return -DeadCost; 1748 } 1749 return getGatherCost(VecTy); 1750 } 1751 case Instruction::ZExt: 1752 case Instruction::SExt: 1753 case Instruction::FPToUI: 1754 case Instruction::FPToSI: 1755 case Instruction::FPExt: 1756 case Instruction::PtrToInt: 1757 case Instruction::IntToPtr: 1758 case Instruction::SIToFP: 1759 case Instruction::UIToFP: 1760 case Instruction::Trunc: 1761 case Instruction::FPTrunc: 1762 case Instruction::BitCast: { 1763 Type *SrcTy = VL0->getOperand(0)->getType(); 1764 1765 // Calculate the cost of this instruction. 1766 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 1767 VL0->getType(), SrcTy, VL0); 1768 1769 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 1770 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy, VL0); 1771 return VecCost - ScalarCost; 1772 } 1773 case Instruction::FCmp: 1774 case Instruction::ICmp: 1775 case Instruction::Select: { 1776 // Calculate the cost of this instruction. 1777 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 1778 int ScalarCost = VecTy->getNumElements() * 1779 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty(), VL0); 1780 int VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy, VL0); 1781 return VecCost - ScalarCost; 1782 } 1783 case Instruction::Add: 1784 case Instruction::FAdd: 1785 case Instruction::Sub: 1786 case Instruction::FSub: 1787 case Instruction::Mul: 1788 case Instruction::FMul: 1789 case Instruction::UDiv: 1790 case Instruction::SDiv: 1791 case Instruction::FDiv: 1792 case Instruction::URem: 1793 case Instruction::SRem: 1794 case Instruction::FRem: 1795 case Instruction::Shl: 1796 case Instruction::LShr: 1797 case Instruction::AShr: 1798 case Instruction::And: 1799 case Instruction::Or: 1800 case Instruction::Xor: { 1801 // Certain instructions can be cheaper to vectorize if they have a 1802 // constant second vector operand. 1803 TargetTransformInfo::OperandValueKind Op1VK = 1804 TargetTransformInfo::OK_AnyValue; 1805 TargetTransformInfo::OperandValueKind Op2VK = 1806 TargetTransformInfo::OK_UniformConstantValue; 1807 TargetTransformInfo::OperandValueProperties Op1VP = 1808 TargetTransformInfo::OP_None; 1809 TargetTransformInfo::OperandValueProperties Op2VP = 1810 TargetTransformInfo::OP_None; 1811 1812 // If all operands are exactly the same ConstantInt then set the 1813 // operand kind to OK_UniformConstantValue. 1814 // If instead not all operands are constants, then set the operand kind 1815 // to OK_AnyValue. If all operands are constants but not the same, 1816 // then set the operand kind to OK_NonUniformConstantValue. 1817 ConstantInt *CInt = nullptr; 1818 for (unsigned i = 0; i < VL.size(); ++i) { 1819 const Instruction *I = cast<Instruction>(VL[i]); 1820 if (!isa<ConstantInt>(I->getOperand(1))) { 1821 Op2VK = TargetTransformInfo::OK_AnyValue; 1822 break; 1823 } 1824 if (i == 0) { 1825 CInt = cast<ConstantInt>(I->getOperand(1)); 1826 continue; 1827 } 1828 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && 1829 CInt != cast<ConstantInt>(I->getOperand(1))) 1830 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 1831 } 1832 // FIXME: Currently cost of model modification for division by power of 1833 // 2 is handled for X86 and AArch64. Add support for other targets. 1834 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt && 1835 CInt->getValue().isPowerOf2()) 1836 Op2VP = TargetTransformInfo::OP_PowerOf2; 1837 1838 int ScalarCost = VecTy->getNumElements() * 1839 TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, 1840 Op2VK, Op1VP, Op2VP); 1841 int VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK, 1842 Op1VP, Op2VP); 1843 return VecCost - ScalarCost; 1844 } 1845 case Instruction::GetElementPtr: { 1846 TargetTransformInfo::OperandValueKind Op1VK = 1847 TargetTransformInfo::OK_AnyValue; 1848 TargetTransformInfo::OperandValueKind Op2VK = 1849 TargetTransformInfo::OK_UniformConstantValue; 1850 1851 int ScalarCost = 1852 VecTy->getNumElements() * 1853 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK); 1854 int VecCost = 1855 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK); 1856 1857 return VecCost - ScalarCost; 1858 } 1859 case Instruction::Load: { 1860 // Cost of wide load - cost of scalar loads. 1861 unsigned alignment = dyn_cast<LoadInst>(VL0)->getAlignment(); 1862 int ScalarLdCost = VecTy->getNumElements() * 1863 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, alignment, 0, VL0); 1864 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, 1865 VecTy, alignment, 0, VL0); 1866 return VecLdCost - ScalarLdCost; 1867 } 1868 case Instruction::Store: { 1869 // We know that we can merge the stores. Calculate the cost. 1870 unsigned alignment = dyn_cast<StoreInst>(VL0)->getAlignment(); 1871 int ScalarStCost = VecTy->getNumElements() * 1872 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, alignment, 0, VL0); 1873 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, 1874 VecTy, alignment, 0, VL0); 1875 return VecStCost - ScalarStCost; 1876 } 1877 case Instruction::Call: { 1878 CallInst *CI = cast<CallInst>(VL0); 1879 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 1880 1881 // Calculate the cost of the scalar and vector calls. 1882 SmallVector<Type*, 4> ScalarTys; 1883 for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) 1884 ScalarTys.push_back(CI->getArgOperand(op)->getType()); 1885 1886 FastMathFlags FMF; 1887 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 1888 FMF = FPMO->getFastMathFlags(); 1889 1890 int ScalarCallCost = VecTy->getNumElements() * 1891 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys, FMF); 1892 1893 SmallVector<Value *, 4> Args(CI->arg_operands()); 1894 int VecCallCost = TTI->getIntrinsicInstrCost(ID, CI->getType(), Args, FMF, 1895 VecTy->getNumElements()); 1896 1897 DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost 1898 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 1899 << " for " << *CI << "\n"); 1900 1901 return VecCallCost - ScalarCallCost; 1902 } 1903 case Instruction::ShuffleVector: { 1904 TargetTransformInfo::OperandValueKind Op1VK = 1905 TargetTransformInfo::OK_AnyValue; 1906 TargetTransformInfo::OperandValueKind Op2VK = 1907 TargetTransformInfo::OK_AnyValue; 1908 int ScalarCost = 0; 1909 int VecCost = 0; 1910 for (Value *i : VL) { 1911 Instruction *I = cast<Instruction>(i); 1912 if (!I) 1913 break; 1914 ScalarCost += 1915 TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK); 1916 } 1917 // VecCost is equal to sum of the cost of creating 2 vectors 1918 // and the cost of creating shuffle. 1919 Instruction *I0 = cast<Instruction>(VL[0]); 1920 VecCost = 1921 TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK); 1922 Instruction *I1 = cast<Instruction>(VL[1]); 1923 VecCost += 1924 TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK); 1925 VecCost += 1926 TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0); 1927 return VecCost - ScalarCost; 1928 } 1929 default: 1930 llvm_unreachable("Unknown instruction"); 1931 } 1932 } 1933 1934 bool BoUpSLP::isFullyVectorizableTinyTree() { 1935 DEBUG(dbgs() << "SLP: Check whether the tree with height " << 1936 VectorizableTree.size() << " is fully vectorizable .\n"); 1937 1938 // We only handle trees of heights 1 and 2. 1939 if (VectorizableTree.size() == 1 && !VectorizableTree[0].NeedToGather) 1940 return true; 1941 1942 if (VectorizableTree.size() != 2) 1943 return false; 1944 1945 // Handle splat and all-constants stores. 1946 if (!VectorizableTree[0].NeedToGather && 1947 (allConstant(VectorizableTree[1].Scalars) || 1948 isSplat(VectorizableTree[1].Scalars))) 1949 return true; 1950 1951 // Gathering cost would be too much for tiny trees. 1952 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 1953 return false; 1954 1955 return true; 1956 } 1957 1958 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() { 1959 1960 // We can vectorize the tree if its size is greater than or equal to the 1961 // minimum size specified by the MinTreeSize command line option. 1962 if (VectorizableTree.size() >= MinTreeSize) 1963 return false; 1964 1965 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 1966 // can vectorize it if we can prove it fully vectorizable. 1967 if (isFullyVectorizableTinyTree()) 1968 return false; 1969 1970 assert(VectorizableTree.empty() 1971 ? ExternalUses.empty() 1972 : true && "We shouldn't have any external users"); 1973 1974 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 1975 // vectorizable. 1976 return true; 1977 } 1978 1979 int BoUpSLP::getSpillCost() { 1980 // Walk from the bottom of the tree to the top, tracking which values are 1981 // live. When we see a call instruction that is not part of our tree, 1982 // query TTI to see if there is a cost to keeping values live over it 1983 // (for example, if spills and fills are required). 1984 unsigned BundleWidth = VectorizableTree.front().Scalars.size(); 1985 int Cost = 0; 1986 1987 SmallPtrSet<Instruction*, 4> LiveValues; 1988 Instruction *PrevInst = nullptr; 1989 1990 for (const auto &N : VectorizableTree) { 1991 Instruction *Inst = dyn_cast<Instruction>(N.Scalars[0]); 1992 if (!Inst) 1993 continue; 1994 1995 if (!PrevInst) { 1996 PrevInst = Inst; 1997 continue; 1998 } 1999 2000 // Update LiveValues. 2001 LiveValues.erase(PrevInst); 2002 for (auto &J : PrevInst->operands()) { 2003 if (isa<Instruction>(&*J) && ScalarToTreeEntry.count(&*J)) 2004 LiveValues.insert(cast<Instruction>(&*J)); 2005 } 2006 2007 DEBUG( 2008 dbgs() << "SLP: #LV: " << LiveValues.size(); 2009 for (auto *X : LiveValues) 2010 dbgs() << " " << X->getName(); 2011 dbgs() << ", Looking at "; 2012 Inst->dump(); 2013 ); 2014 2015 // Now find the sequence of instructions between PrevInst and Inst. 2016 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 2017 PrevInstIt = 2018 PrevInst->getIterator().getReverse(); 2019 while (InstIt != PrevInstIt) { 2020 if (PrevInstIt == PrevInst->getParent()->rend()) { 2021 PrevInstIt = Inst->getParent()->rbegin(); 2022 continue; 2023 } 2024 2025 if (isa<CallInst>(&*PrevInstIt) && &*PrevInstIt != PrevInst) { 2026 SmallVector<Type*, 4> V; 2027 for (auto *II : LiveValues) 2028 V.push_back(VectorType::get(II->getType(), BundleWidth)); 2029 Cost += TTI->getCostOfKeepingLiveOverCall(V); 2030 } 2031 2032 ++PrevInstIt; 2033 } 2034 2035 PrevInst = Inst; 2036 } 2037 2038 return Cost; 2039 } 2040 2041 int BoUpSLP::getTreeCost() { 2042 int Cost = 0; 2043 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 2044 VectorizableTree.size() << ".\n"); 2045 2046 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 2047 2048 for (TreeEntry &TE : VectorizableTree) { 2049 int C = getEntryCost(&TE); 2050 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 2051 << *TE.Scalars[0] << ".\n"); 2052 Cost += C; 2053 } 2054 2055 SmallSet<Value *, 16> ExtractCostCalculated; 2056 int ExtractCost = 0; 2057 for (ExternalUser &EU : ExternalUses) { 2058 // We only add extract cost once for the same scalar. 2059 if (!ExtractCostCalculated.insert(EU.Scalar).second) 2060 continue; 2061 2062 // Uses by ephemeral values are free (because the ephemeral value will be 2063 // removed prior to code generation, and so the extraction will be 2064 // removed as well). 2065 if (EphValues.count(EU.User)) 2066 continue; 2067 2068 // If we plan to rewrite the tree in a smaller type, we will need to sign 2069 // extend the extracted value back to the original type. Here, we account 2070 // for the extract and the added cost of the sign extend if needed. 2071 auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth); 2072 auto *ScalarRoot = VectorizableTree[0].Scalars[0]; 2073 if (MinBWs.count(ScalarRoot)) { 2074 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 2075 auto Extend = 2076 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; 2077 VecTy = VectorType::get(MinTy, BundleWidth); 2078 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 2079 VecTy, EU.Lane); 2080 } else { 2081 ExtractCost += 2082 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); 2083 } 2084 } 2085 2086 int SpillCost = getSpillCost(); 2087 Cost += SpillCost + ExtractCost; 2088 2089 std::string Str; 2090 { 2091 raw_string_ostream OS(Str); 2092 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 2093 << "SLP: Extract Cost = " << ExtractCost << ".\n" 2094 << "SLP: Total Cost = " << Cost << ".\n"; 2095 } 2096 DEBUG(dbgs() << Str); 2097 2098 if (ViewSLPTree) 2099 ViewGraph(this, "SLP" + F->getName(), false, Str); 2100 2101 return Cost; 2102 } 2103 2104 int BoUpSLP::getGatherCost(Type *Ty) { 2105 int Cost = 0; 2106 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 2107 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 2108 return Cost; 2109 } 2110 2111 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 2112 // Find the type of the operands in VL. 2113 Type *ScalarTy = VL[0]->getType(); 2114 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 2115 ScalarTy = SI->getValueOperand()->getType(); 2116 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 2117 // Find the cost of inserting/extracting values from the vector. 2118 return getGatherCost(VecTy); 2119 } 2120 2121 // Reorder commutative operations in alternate shuffle if the resulting vectors 2122 // are consecutive loads. This would allow us to vectorize the tree. 2123 // If we have something like- 2124 // load a[0] - load b[0] 2125 // load b[1] + load a[1] 2126 // load a[2] - load b[2] 2127 // load a[3] + load b[3] 2128 // Reordering the second load b[1] load a[1] would allow us to vectorize this 2129 // code. 2130 void BoUpSLP::reorderAltShuffleOperands(ArrayRef<Value *> VL, 2131 SmallVectorImpl<Value *> &Left, 2132 SmallVectorImpl<Value *> &Right) { 2133 // Push left and right operands of binary operation into Left and Right 2134 for (Value *i : VL) { 2135 Left.push_back(cast<Instruction>(i)->getOperand(0)); 2136 Right.push_back(cast<Instruction>(i)->getOperand(1)); 2137 } 2138 2139 // Reorder if we have a commutative operation and consecutive access 2140 // are on either side of the alternate instructions. 2141 for (unsigned j = 0; j < VL.size() - 1; ++j) { 2142 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 2143 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 2144 Instruction *VL1 = cast<Instruction>(VL[j]); 2145 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 2146 if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) { 2147 std::swap(Left[j], Right[j]); 2148 continue; 2149 } else if (VL2->isCommutative() && 2150 isConsecutiveAccess(L, L1, *DL, *SE)) { 2151 std::swap(Left[j + 1], Right[j + 1]); 2152 continue; 2153 } 2154 // else unchanged 2155 } 2156 } 2157 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 2158 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 2159 Instruction *VL1 = cast<Instruction>(VL[j]); 2160 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 2161 if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) { 2162 std::swap(Left[j], Right[j]); 2163 continue; 2164 } else if (VL2->isCommutative() && 2165 isConsecutiveAccess(L, L1, *DL, *SE)) { 2166 std::swap(Left[j + 1], Right[j + 1]); 2167 continue; 2168 } 2169 // else unchanged 2170 } 2171 } 2172 } 2173 } 2174 2175 // Return true if I should be commuted before adding it's left and right 2176 // operands to the arrays Left and Right. 2177 // 2178 // The vectorizer is trying to either have all elements one side being 2179 // instruction with the same opcode to enable further vectorization, or having 2180 // a splat to lower the vectorizing cost. 2181 static bool shouldReorderOperands(int i, Instruction &I, 2182 SmallVectorImpl<Value *> &Left, 2183 SmallVectorImpl<Value *> &Right, 2184 bool AllSameOpcodeLeft, 2185 bool AllSameOpcodeRight, bool SplatLeft, 2186 bool SplatRight) { 2187 Value *VLeft = I.getOperand(0); 2188 Value *VRight = I.getOperand(1); 2189 // If we have "SplatRight", try to see if commuting is needed to preserve it. 2190 if (SplatRight) { 2191 if (VRight == Right[i - 1]) 2192 // Preserve SplatRight 2193 return false; 2194 if (VLeft == Right[i - 1]) { 2195 // Commuting would preserve SplatRight, but we don't want to break 2196 // SplatLeft either, i.e. preserve the original order if possible. 2197 // (FIXME: why do we care?) 2198 if (SplatLeft && VLeft == Left[i - 1]) 2199 return false; 2200 return true; 2201 } 2202 } 2203 // Symmetrically handle Right side. 2204 if (SplatLeft) { 2205 if (VLeft == Left[i - 1]) 2206 // Preserve SplatLeft 2207 return false; 2208 if (VRight == Left[i - 1]) 2209 return true; 2210 } 2211 2212 Instruction *ILeft = dyn_cast<Instruction>(VLeft); 2213 Instruction *IRight = dyn_cast<Instruction>(VRight); 2214 2215 // If we have "AllSameOpcodeRight", try to see if the left operands preserves 2216 // it and not the right, in this case we want to commute. 2217 if (AllSameOpcodeRight) { 2218 unsigned RightPrevOpcode = cast<Instruction>(Right[i - 1])->getOpcode(); 2219 if (IRight && RightPrevOpcode == IRight->getOpcode()) 2220 // Do not commute, a match on the right preserves AllSameOpcodeRight 2221 return false; 2222 if (ILeft && RightPrevOpcode == ILeft->getOpcode()) { 2223 // We have a match and may want to commute, but first check if there is 2224 // not also a match on the existing operands on the Left to preserve 2225 // AllSameOpcodeLeft, i.e. preserve the original order if possible. 2226 // (FIXME: why do we care?) 2227 if (AllSameOpcodeLeft && ILeft && 2228 cast<Instruction>(Left[i - 1])->getOpcode() == ILeft->getOpcode()) 2229 return false; 2230 return true; 2231 } 2232 } 2233 // Symmetrically handle Left side. 2234 if (AllSameOpcodeLeft) { 2235 unsigned LeftPrevOpcode = cast<Instruction>(Left[i - 1])->getOpcode(); 2236 if (ILeft && LeftPrevOpcode == ILeft->getOpcode()) 2237 return false; 2238 if (IRight && LeftPrevOpcode == IRight->getOpcode()) 2239 return true; 2240 } 2241 return false; 2242 } 2243 2244 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 2245 SmallVectorImpl<Value *> &Left, 2246 SmallVectorImpl<Value *> &Right) { 2247 2248 if (VL.size()) { 2249 // Peel the first iteration out of the loop since there's nothing 2250 // interesting to do anyway and it simplifies the checks in the loop. 2251 auto VLeft = cast<Instruction>(VL[0])->getOperand(0); 2252 auto VRight = cast<Instruction>(VL[0])->getOperand(1); 2253 if (!isa<Instruction>(VRight) && isa<Instruction>(VLeft)) 2254 // Favor having instruction to the right. FIXME: why? 2255 std::swap(VLeft, VRight); 2256 Left.push_back(VLeft); 2257 Right.push_back(VRight); 2258 } 2259 2260 // Keep track if we have instructions with all the same opcode on one side. 2261 bool AllSameOpcodeLeft = isa<Instruction>(Left[0]); 2262 bool AllSameOpcodeRight = isa<Instruction>(Right[0]); 2263 // Keep track if we have one side with all the same value (broadcast). 2264 bool SplatLeft = true; 2265 bool SplatRight = true; 2266 2267 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 2268 Instruction *I = cast<Instruction>(VL[i]); 2269 assert(I->isCommutative() && "Can only process commutative instruction"); 2270 // Commute to favor either a splat or maximizing having the same opcodes on 2271 // one side. 2272 if (shouldReorderOperands(i, *I, Left, Right, AllSameOpcodeLeft, 2273 AllSameOpcodeRight, SplatLeft, SplatRight)) { 2274 Left.push_back(I->getOperand(1)); 2275 Right.push_back(I->getOperand(0)); 2276 } else { 2277 Left.push_back(I->getOperand(0)); 2278 Right.push_back(I->getOperand(1)); 2279 } 2280 // Update Splat* and AllSameOpcode* after the insertion. 2281 SplatRight = SplatRight && (Right[i - 1] == Right[i]); 2282 SplatLeft = SplatLeft && (Left[i - 1] == Left[i]); 2283 AllSameOpcodeLeft = AllSameOpcodeLeft && isa<Instruction>(Left[i]) && 2284 (cast<Instruction>(Left[i - 1])->getOpcode() == 2285 cast<Instruction>(Left[i])->getOpcode()); 2286 AllSameOpcodeRight = AllSameOpcodeRight && isa<Instruction>(Right[i]) && 2287 (cast<Instruction>(Right[i - 1])->getOpcode() == 2288 cast<Instruction>(Right[i])->getOpcode()); 2289 } 2290 2291 // If one operand end up being broadcast, return this operand order. 2292 if (SplatRight || SplatLeft) 2293 return; 2294 2295 // Finally check if we can get longer vectorizable chain by reordering 2296 // without breaking the good operand order detected above. 2297 // E.g. If we have something like- 2298 // load a[0] load b[0] 2299 // load b[1] load a[1] 2300 // load a[2] load b[2] 2301 // load a[3] load b[3] 2302 // Reordering the second load b[1] load a[1] would allow us to vectorize 2303 // this code and we still retain AllSameOpcode property. 2304 // FIXME: This load reordering might break AllSameOpcode in some rare cases 2305 // such as- 2306 // add a[0],c[0] load b[0] 2307 // add a[1],c[2] load b[1] 2308 // b[2] load b[2] 2309 // add a[3],c[3] load b[3] 2310 for (unsigned j = 0; j < VL.size() - 1; ++j) { 2311 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 2312 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 2313 if (isConsecutiveAccess(L, L1, *DL, *SE)) { 2314 std::swap(Left[j + 1], Right[j + 1]); 2315 continue; 2316 } 2317 } 2318 } 2319 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 2320 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 2321 if (isConsecutiveAccess(L, L1, *DL, *SE)) { 2322 std::swap(Left[j + 1], Right[j + 1]); 2323 continue; 2324 } 2325 } 2326 } 2327 // else unchanged 2328 } 2329 } 2330 2331 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) { 2332 2333 // Get the basic block this bundle is in. All instructions in the bundle 2334 // should be in this block. 2335 auto *Front = cast<Instruction>(VL.front()); 2336 auto *BB = Front->getParent(); 2337 assert(all_of(make_range(VL.begin(), VL.end()), [&](Value *V) -> bool { 2338 return cast<Instruction>(V)->getParent() == BB; 2339 })); 2340 2341 // The last instruction in the bundle in program order. 2342 Instruction *LastInst = nullptr; 2343 2344 // Find the last instruction. The common case should be that BB has been 2345 // scheduled, and the last instruction is VL.back(). So we start with 2346 // VL.back() and iterate over schedule data until we reach the end of the 2347 // bundle. The end of the bundle is marked by null ScheduleData. 2348 if (BlocksSchedules.count(BB)) { 2349 auto *Bundle = BlocksSchedules[BB]->getScheduleData(VL.back()); 2350 if (Bundle && Bundle->isPartOfBundle()) 2351 for (; Bundle; Bundle = Bundle->NextInBundle) 2352 LastInst = Bundle->Inst; 2353 } 2354 2355 // LastInst can still be null at this point if there's either not an entry 2356 // for BB in BlocksSchedules or there's no ScheduleData available for 2357 // VL.back(). This can be the case if buildTree_rec aborts for various 2358 // reasons (e.g., the maximum recursion depth is reached, the maximum region 2359 // size is reached, etc.). ScheduleData is initialized in the scheduling 2360 // "dry-run". 2361 // 2362 // If this happens, we can still find the last instruction by brute force. We 2363 // iterate forwards from Front (inclusive) until we either see all 2364 // instructions in the bundle or reach the end of the block. If Front is the 2365 // last instruction in program order, LastInst will be set to Front, and we 2366 // will visit all the remaining instructions in the block. 2367 // 2368 // One of the reasons we exit early from buildTree_rec is to place an upper 2369 // bound on compile-time. Thus, taking an additional compile-time hit here is 2370 // not ideal. However, this should be exceedingly rare since it requires that 2371 // we both exit early from buildTree_rec and that the bundle be out-of-order 2372 // (causing us to iterate all the way to the end of the block). 2373 if (!LastInst) { 2374 SmallPtrSet<Value *, 16> Bundle(VL.begin(), VL.end()); 2375 for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) { 2376 if (Bundle.erase(&I)) 2377 LastInst = &I; 2378 if (Bundle.empty()) 2379 break; 2380 } 2381 } 2382 2383 // Set the insertion point after the last instruction in the bundle. Set the 2384 // debug location to Front. 2385 Builder.SetInsertPoint(BB, ++LastInst->getIterator()); 2386 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 2387 } 2388 2389 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 2390 Value *Vec = UndefValue::get(Ty); 2391 // Generate the 'InsertElement' instruction. 2392 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 2393 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 2394 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 2395 GatherSeq.insert(Insrt); 2396 CSEBlocks.insert(Insrt->getParent()); 2397 2398 // Add to our 'need-to-extract' list. 2399 if (ScalarToTreeEntry.count(VL[i])) { 2400 int Idx = ScalarToTreeEntry[VL[i]]; 2401 TreeEntry *E = &VectorizableTree[Idx]; 2402 // Find which lane we need to extract. 2403 int FoundLane = -1; 2404 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 2405 // Is this the lane of the scalar that we are looking for ? 2406 if (E->Scalars[Lane] == VL[i]) { 2407 FoundLane = Lane; 2408 break; 2409 } 2410 } 2411 assert(FoundLane >= 0 && "Could not find the correct lane"); 2412 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 2413 } 2414 } 2415 } 2416 2417 return Vec; 2418 } 2419 2420 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const { 2421 SmallDenseMap<Value*, int>::const_iterator Entry 2422 = ScalarToTreeEntry.find(VL[0]); 2423 if (Entry != ScalarToTreeEntry.end()) { 2424 int Idx = Entry->second; 2425 const TreeEntry *En = &VectorizableTree[Idx]; 2426 if (En->isSame(VL) && En->VectorizedValue) 2427 return En->VectorizedValue; 2428 } 2429 return nullptr; 2430 } 2431 2432 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 2433 if (ScalarToTreeEntry.count(VL[0])) { 2434 int Idx = ScalarToTreeEntry[VL[0]]; 2435 TreeEntry *E = &VectorizableTree[Idx]; 2436 if (E->isSame(VL)) 2437 return vectorizeTree(E); 2438 } 2439 2440 Type *ScalarTy = VL[0]->getType(); 2441 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 2442 ScalarTy = SI->getValueOperand()->getType(); 2443 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 2444 2445 return Gather(VL, VecTy); 2446 } 2447 2448 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 2449 IRBuilder<>::InsertPointGuard Guard(Builder); 2450 2451 if (E->VectorizedValue) { 2452 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 2453 return E->VectorizedValue; 2454 } 2455 2456 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 2457 Type *ScalarTy = VL0->getType(); 2458 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 2459 ScalarTy = SI->getValueOperand()->getType(); 2460 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 2461 2462 if (E->NeedToGather) { 2463 setInsertPointAfterBundle(E->Scalars); 2464 auto *V = Gather(E->Scalars, VecTy); 2465 E->VectorizedValue = V; 2466 return V; 2467 } 2468 2469 unsigned Opcode = getSameOpcode(E->Scalars); 2470 2471 switch (Opcode) { 2472 case Instruction::PHI: { 2473 PHINode *PH = dyn_cast<PHINode>(VL0); 2474 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 2475 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2476 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 2477 E->VectorizedValue = NewPhi; 2478 2479 // PHINodes may have multiple entries from the same block. We want to 2480 // visit every block once. 2481 SmallSet<BasicBlock*, 4> VisitedBBs; 2482 2483 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 2484 ValueList Operands; 2485 BasicBlock *IBB = PH->getIncomingBlock(i); 2486 2487 if (!VisitedBBs.insert(IBB).second) { 2488 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 2489 continue; 2490 } 2491 2492 // Prepare the operand vector. 2493 for (Value *V : E->Scalars) 2494 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(IBB)); 2495 2496 Builder.SetInsertPoint(IBB->getTerminator()); 2497 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2498 Value *Vec = vectorizeTree(Operands); 2499 NewPhi->addIncoming(Vec, IBB); 2500 } 2501 2502 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 2503 "Invalid number of incoming values"); 2504 return NewPhi; 2505 } 2506 2507 case Instruction::ExtractElement: { 2508 if (canReuseExtract(E->Scalars, Instruction::ExtractElement)) { 2509 Value *V = VL0->getOperand(0); 2510 E->VectorizedValue = V; 2511 return V; 2512 } 2513 setInsertPointAfterBundle(E->Scalars); 2514 auto *V = Gather(E->Scalars, VecTy); 2515 E->VectorizedValue = V; 2516 return V; 2517 } 2518 case Instruction::ExtractValue: { 2519 if (canReuseExtract(E->Scalars, Instruction::ExtractValue)) { 2520 LoadInst *LI = cast<LoadInst>(VL0->getOperand(0)); 2521 Builder.SetInsertPoint(LI); 2522 PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); 2523 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); 2524 LoadInst *V = Builder.CreateAlignedLoad(Ptr, LI->getAlignment()); 2525 E->VectorizedValue = V; 2526 return propagateMetadata(V, E->Scalars); 2527 } 2528 setInsertPointAfterBundle(E->Scalars); 2529 auto *V = Gather(E->Scalars, VecTy); 2530 E->VectorizedValue = V; 2531 return V; 2532 } 2533 case Instruction::ZExt: 2534 case Instruction::SExt: 2535 case Instruction::FPToUI: 2536 case Instruction::FPToSI: 2537 case Instruction::FPExt: 2538 case Instruction::PtrToInt: 2539 case Instruction::IntToPtr: 2540 case Instruction::SIToFP: 2541 case Instruction::UIToFP: 2542 case Instruction::Trunc: 2543 case Instruction::FPTrunc: 2544 case Instruction::BitCast: { 2545 ValueList INVL; 2546 for (Value *V : E->Scalars) 2547 INVL.push_back(cast<Instruction>(V)->getOperand(0)); 2548 2549 setInsertPointAfterBundle(E->Scalars); 2550 2551 Value *InVec = vectorizeTree(INVL); 2552 2553 if (Value *V = alreadyVectorized(E->Scalars)) 2554 return V; 2555 2556 CastInst *CI = dyn_cast<CastInst>(VL0); 2557 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 2558 E->VectorizedValue = V; 2559 ++NumVectorInstructions; 2560 return V; 2561 } 2562 case Instruction::FCmp: 2563 case Instruction::ICmp: { 2564 ValueList LHSV, RHSV; 2565 for (Value *V : E->Scalars) { 2566 LHSV.push_back(cast<Instruction>(V)->getOperand(0)); 2567 RHSV.push_back(cast<Instruction>(V)->getOperand(1)); 2568 } 2569 2570 setInsertPointAfterBundle(E->Scalars); 2571 2572 Value *L = vectorizeTree(LHSV); 2573 Value *R = vectorizeTree(RHSV); 2574 2575 if (Value *V = alreadyVectorized(E->Scalars)) 2576 return V; 2577 2578 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 2579 Value *V; 2580 if (Opcode == Instruction::FCmp) 2581 V = Builder.CreateFCmp(P0, L, R); 2582 else 2583 V = Builder.CreateICmp(P0, L, R); 2584 2585 E->VectorizedValue = V; 2586 propagateIRFlags(E->VectorizedValue, E->Scalars); 2587 ++NumVectorInstructions; 2588 return V; 2589 } 2590 case Instruction::Select: { 2591 ValueList TrueVec, FalseVec, CondVec; 2592 for (Value *V : E->Scalars) { 2593 CondVec.push_back(cast<Instruction>(V)->getOperand(0)); 2594 TrueVec.push_back(cast<Instruction>(V)->getOperand(1)); 2595 FalseVec.push_back(cast<Instruction>(V)->getOperand(2)); 2596 } 2597 2598 setInsertPointAfterBundle(E->Scalars); 2599 2600 Value *Cond = vectorizeTree(CondVec); 2601 Value *True = vectorizeTree(TrueVec); 2602 Value *False = vectorizeTree(FalseVec); 2603 2604 if (Value *V = alreadyVectorized(E->Scalars)) 2605 return V; 2606 2607 Value *V = Builder.CreateSelect(Cond, True, False); 2608 E->VectorizedValue = V; 2609 ++NumVectorInstructions; 2610 return V; 2611 } 2612 case Instruction::Add: 2613 case Instruction::FAdd: 2614 case Instruction::Sub: 2615 case Instruction::FSub: 2616 case Instruction::Mul: 2617 case Instruction::FMul: 2618 case Instruction::UDiv: 2619 case Instruction::SDiv: 2620 case Instruction::FDiv: 2621 case Instruction::URem: 2622 case Instruction::SRem: 2623 case Instruction::FRem: 2624 case Instruction::Shl: 2625 case Instruction::LShr: 2626 case Instruction::AShr: 2627 case Instruction::And: 2628 case Instruction::Or: 2629 case Instruction::Xor: { 2630 ValueList LHSVL, RHSVL; 2631 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) 2632 reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL); 2633 else 2634 for (Value *V : E->Scalars) { 2635 LHSVL.push_back(cast<Instruction>(V)->getOperand(0)); 2636 RHSVL.push_back(cast<Instruction>(V)->getOperand(1)); 2637 } 2638 2639 setInsertPointAfterBundle(E->Scalars); 2640 2641 Value *LHS = vectorizeTree(LHSVL); 2642 Value *RHS = vectorizeTree(RHSVL); 2643 2644 if (Value *V = alreadyVectorized(E->Scalars)) 2645 return V; 2646 2647 BinaryOperator *BinOp = cast<BinaryOperator>(VL0); 2648 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS); 2649 E->VectorizedValue = V; 2650 propagateIRFlags(E->VectorizedValue, E->Scalars); 2651 ++NumVectorInstructions; 2652 2653 if (Instruction *I = dyn_cast<Instruction>(V)) 2654 return propagateMetadata(I, E->Scalars); 2655 2656 return V; 2657 } 2658 case Instruction::Load: { 2659 // Loads are inserted at the head of the tree because we don't want to 2660 // sink them all the way down past store instructions. 2661 setInsertPointAfterBundle(E->Scalars); 2662 2663 LoadInst *LI = cast<LoadInst>(VL0); 2664 Type *ScalarLoadTy = LI->getType(); 2665 unsigned AS = LI->getPointerAddressSpace(); 2666 2667 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 2668 VecTy->getPointerTo(AS)); 2669 2670 // The pointer operand uses an in-tree scalar so we add the new BitCast to 2671 // ExternalUses list to make sure that an extract will be generated in the 2672 // future. 2673 if (ScalarToTreeEntry.count(LI->getPointerOperand())) 2674 ExternalUses.push_back( 2675 ExternalUser(LI->getPointerOperand(), cast<User>(VecPtr), 0)); 2676 2677 unsigned Alignment = LI->getAlignment(); 2678 LI = Builder.CreateLoad(VecPtr); 2679 if (!Alignment) { 2680 Alignment = DL->getABITypeAlignment(ScalarLoadTy); 2681 } 2682 LI->setAlignment(Alignment); 2683 E->VectorizedValue = LI; 2684 ++NumVectorInstructions; 2685 return propagateMetadata(LI, E->Scalars); 2686 } 2687 case Instruction::Store: { 2688 StoreInst *SI = cast<StoreInst>(VL0); 2689 unsigned Alignment = SI->getAlignment(); 2690 unsigned AS = SI->getPointerAddressSpace(); 2691 2692 ValueList ValueOp; 2693 for (Value *V : E->Scalars) 2694 ValueOp.push_back(cast<StoreInst>(V)->getValueOperand()); 2695 2696 setInsertPointAfterBundle(E->Scalars); 2697 2698 Value *VecValue = vectorizeTree(ValueOp); 2699 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), 2700 VecTy->getPointerTo(AS)); 2701 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 2702 2703 // The pointer operand uses an in-tree scalar so we add the new BitCast to 2704 // ExternalUses list to make sure that an extract will be generated in the 2705 // future. 2706 if (ScalarToTreeEntry.count(SI->getPointerOperand())) 2707 ExternalUses.push_back( 2708 ExternalUser(SI->getPointerOperand(), cast<User>(VecPtr), 0)); 2709 2710 if (!Alignment) { 2711 Alignment = DL->getABITypeAlignment(SI->getValueOperand()->getType()); 2712 } 2713 S->setAlignment(Alignment); 2714 E->VectorizedValue = S; 2715 ++NumVectorInstructions; 2716 return propagateMetadata(S, E->Scalars); 2717 } 2718 case Instruction::GetElementPtr: { 2719 setInsertPointAfterBundle(E->Scalars); 2720 2721 ValueList Op0VL; 2722 for (Value *V : E->Scalars) 2723 Op0VL.push_back(cast<GetElementPtrInst>(V)->getOperand(0)); 2724 2725 Value *Op0 = vectorizeTree(Op0VL); 2726 2727 std::vector<Value *> OpVecs; 2728 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 2729 ++j) { 2730 ValueList OpVL; 2731 for (Value *V : E->Scalars) 2732 OpVL.push_back(cast<GetElementPtrInst>(V)->getOperand(j)); 2733 2734 Value *OpVec = vectorizeTree(OpVL); 2735 OpVecs.push_back(OpVec); 2736 } 2737 2738 Value *V = Builder.CreateGEP( 2739 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs); 2740 E->VectorizedValue = V; 2741 ++NumVectorInstructions; 2742 2743 if (Instruction *I = dyn_cast<Instruction>(V)) 2744 return propagateMetadata(I, E->Scalars); 2745 2746 return V; 2747 } 2748 case Instruction::Call: { 2749 CallInst *CI = cast<CallInst>(VL0); 2750 setInsertPointAfterBundle(E->Scalars); 2751 Function *FI; 2752 Intrinsic::ID IID = Intrinsic::not_intrinsic; 2753 Value *ScalarArg = nullptr; 2754 if (CI && (FI = CI->getCalledFunction())) { 2755 IID = FI->getIntrinsicID(); 2756 } 2757 std::vector<Value *> OpVecs; 2758 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 2759 ValueList OpVL; 2760 // ctlz,cttz and powi are special intrinsics whose second argument is 2761 // a scalar. This argument should not be vectorized. 2762 if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) { 2763 CallInst *CEI = cast<CallInst>(E->Scalars[0]); 2764 ScalarArg = CEI->getArgOperand(j); 2765 OpVecs.push_back(CEI->getArgOperand(j)); 2766 continue; 2767 } 2768 for (Value *V : E->Scalars) { 2769 CallInst *CEI = cast<CallInst>(V); 2770 OpVL.push_back(CEI->getArgOperand(j)); 2771 } 2772 2773 Value *OpVec = vectorizeTree(OpVL); 2774 DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 2775 OpVecs.push_back(OpVec); 2776 } 2777 2778 Module *M = F->getParent(); 2779 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 2780 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; 2781 Function *CF = Intrinsic::getDeclaration(M, ID, Tys); 2782 SmallVector<OperandBundleDef, 1> OpBundles; 2783 CI->getOperandBundlesAsDefs(OpBundles); 2784 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 2785 2786 // The scalar argument uses an in-tree scalar so we add the new vectorized 2787 // call to ExternalUses list to make sure that an extract will be 2788 // generated in the future. 2789 if (ScalarArg && ScalarToTreeEntry.count(ScalarArg)) 2790 ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0)); 2791 2792 E->VectorizedValue = V; 2793 propagateIRFlags(E->VectorizedValue, E->Scalars); 2794 ++NumVectorInstructions; 2795 return V; 2796 } 2797 case Instruction::ShuffleVector: { 2798 ValueList LHSVL, RHSVL; 2799 assert(isa<BinaryOperator>(VL0) && "Invalid Shuffle Vector Operand"); 2800 reorderAltShuffleOperands(E->Scalars, LHSVL, RHSVL); 2801 setInsertPointAfterBundle(E->Scalars); 2802 2803 Value *LHS = vectorizeTree(LHSVL); 2804 Value *RHS = vectorizeTree(RHSVL); 2805 2806 if (Value *V = alreadyVectorized(E->Scalars)) 2807 return V; 2808 2809 // Create a vector of LHS op1 RHS 2810 BinaryOperator *BinOp0 = cast<BinaryOperator>(VL0); 2811 Value *V0 = Builder.CreateBinOp(BinOp0->getOpcode(), LHS, RHS); 2812 2813 // Create a vector of LHS op2 RHS 2814 Instruction *VL1 = cast<Instruction>(E->Scalars[1]); 2815 BinaryOperator *BinOp1 = cast<BinaryOperator>(VL1); 2816 Value *V1 = Builder.CreateBinOp(BinOp1->getOpcode(), LHS, RHS); 2817 2818 // Create shuffle to take alternate operations from the vector. 2819 // Also, gather up odd and even scalar ops to propagate IR flags to 2820 // each vector operation. 2821 ValueList OddScalars, EvenScalars; 2822 unsigned e = E->Scalars.size(); 2823 SmallVector<Constant *, 8> Mask(e); 2824 for (unsigned i = 0; i < e; ++i) { 2825 if (i & 1) { 2826 Mask[i] = Builder.getInt32(e + i); 2827 OddScalars.push_back(E->Scalars[i]); 2828 } else { 2829 Mask[i] = Builder.getInt32(i); 2830 EvenScalars.push_back(E->Scalars[i]); 2831 } 2832 } 2833 2834 Value *ShuffleMask = ConstantVector::get(Mask); 2835 propagateIRFlags(V0, EvenScalars); 2836 propagateIRFlags(V1, OddScalars); 2837 2838 Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 2839 E->VectorizedValue = V; 2840 ++NumVectorInstructions; 2841 if (Instruction *I = dyn_cast<Instruction>(V)) 2842 return propagateMetadata(I, E->Scalars); 2843 2844 return V; 2845 } 2846 default: 2847 llvm_unreachable("unknown inst"); 2848 } 2849 return nullptr; 2850 } 2851 2852 Value *BoUpSLP::vectorizeTree() { 2853 ExtraValueToDebugLocsMap ExternallyUsedValues; 2854 return vectorizeTree(ExternallyUsedValues); 2855 } 2856 2857 Value * 2858 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { 2859 2860 // All blocks must be scheduled before any instructions are inserted. 2861 for (auto &BSIter : BlocksSchedules) { 2862 scheduleBlock(BSIter.second.get()); 2863 } 2864 2865 Builder.SetInsertPoint(&F->getEntryBlock().front()); 2866 auto *VectorRoot = vectorizeTree(&VectorizableTree[0]); 2867 2868 // If the vectorized tree can be rewritten in a smaller type, we truncate the 2869 // vectorized root. InstCombine will then rewrite the entire expression. We 2870 // sign extend the extracted values below. 2871 auto *ScalarRoot = VectorizableTree[0].Scalars[0]; 2872 if (MinBWs.count(ScalarRoot)) { 2873 if (auto *I = dyn_cast<Instruction>(VectorRoot)) 2874 Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); 2875 auto BundleWidth = VectorizableTree[0].Scalars.size(); 2876 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 2877 auto *VecTy = VectorType::get(MinTy, BundleWidth); 2878 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); 2879 VectorizableTree[0].VectorizedValue = Trunc; 2880 } 2881 2882 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 2883 2884 // If necessary, sign-extend or zero-extend ScalarRoot to the larger type 2885 // specified by ScalarType. 2886 auto extend = [&](Value *ScalarRoot, Value *Ex, Type *ScalarType) { 2887 if (!MinBWs.count(ScalarRoot)) 2888 return Ex; 2889 if (MinBWs[ScalarRoot].second) 2890 return Builder.CreateSExt(Ex, ScalarType); 2891 return Builder.CreateZExt(Ex, ScalarType); 2892 }; 2893 2894 // Extract all of the elements with the external uses. 2895 for (const auto &ExternalUse : ExternalUses) { 2896 Value *Scalar = ExternalUse.Scalar; 2897 llvm::User *User = ExternalUse.User; 2898 2899 // Skip users that we already RAUW. This happens when one instruction 2900 // has multiple uses of the same value. 2901 if (User && !is_contained(Scalar->users(), User)) 2902 continue; 2903 assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar"); 2904 2905 int Idx = ScalarToTreeEntry[Scalar]; 2906 TreeEntry *E = &VectorizableTree[Idx]; 2907 assert(!E->NeedToGather && "Extracting from a gather list"); 2908 2909 Value *Vec = E->VectorizedValue; 2910 assert(Vec && "Can't find vectorizable value"); 2911 2912 Value *Lane = Builder.getInt32(ExternalUse.Lane); 2913 // If User == nullptr, the Scalar is used as extra arg. Generate 2914 // ExtractElement instruction and update the record for this scalar in 2915 // ExternallyUsedValues. 2916 if (!User) { 2917 assert(ExternallyUsedValues.count(Scalar) && 2918 "Scalar with nullptr as an external user must be registered in " 2919 "ExternallyUsedValues map"); 2920 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 2921 Builder.SetInsertPoint(VecI->getParent(), 2922 std::next(VecI->getIterator())); 2923 } else { 2924 Builder.SetInsertPoint(&F->getEntryBlock().front()); 2925 } 2926 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2927 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 2928 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent()); 2929 auto &Locs = ExternallyUsedValues[Scalar]; 2930 ExternallyUsedValues.insert({Ex, Locs}); 2931 ExternallyUsedValues.erase(Scalar); 2932 continue; 2933 } 2934 2935 // Generate extracts for out-of-tree users. 2936 // Find the insertion point for the extractelement lane. 2937 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 2938 if (PHINode *PH = dyn_cast<PHINode>(User)) { 2939 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 2940 if (PH->getIncomingValue(i) == Scalar) { 2941 TerminatorInst *IncomingTerminator = 2942 PH->getIncomingBlock(i)->getTerminator(); 2943 if (isa<CatchSwitchInst>(IncomingTerminator)) { 2944 Builder.SetInsertPoint(VecI->getParent(), 2945 std::next(VecI->getIterator())); 2946 } else { 2947 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 2948 } 2949 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2950 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 2951 CSEBlocks.insert(PH->getIncomingBlock(i)); 2952 PH->setOperand(i, Ex); 2953 } 2954 } 2955 } else { 2956 Builder.SetInsertPoint(cast<Instruction>(User)); 2957 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2958 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 2959 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 2960 User->replaceUsesOfWith(Scalar, Ex); 2961 } 2962 } else { 2963 Builder.SetInsertPoint(&F->getEntryBlock().front()); 2964 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2965 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 2966 CSEBlocks.insert(&F->getEntryBlock()); 2967 User->replaceUsesOfWith(Scalar, Ex); 2968 } 2969 2970 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 2971 } 2972 2973 // For each vectorized value: 2974 for (TreeEntry &EIdx : VectorizableTree) { 2975 TreeEntry *Entry = &EIdx; 2976 2977 // For each lane: 2978 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 2979 Value *Scalar = Entry->Scalars[Lane]; 2980 // No need to handle users of gathered values. 2981 if (Entry->NeedToGather) 2982 continue; 2983 2984 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 2985 2986 Type *Ty = Scalar->getType(); 2987 if (!Ty->isVoidTy()) { 2988 #ifndef NDEBUG 2989 for (User *U : Scalar->users()) { 2990 DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 2991 2992 assert((ScalarToTreeEntry.count(U) || 2993 // It is legal to replace users in the ignorelist by undef. 2994 is_contained(UserIgnoreList, U)) && 2995 "Replacing out-of-tree value with undef"); 2996 } 2997 #endif 2998 Value *Undef = UndefValue::get(Ty); 2999 Scalar->replaceAllUsesWith(Undef); 3000 } 3001 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 3002 eraseInstruction(cast<Instruction>(Scalar)); 3003 } 3004 } 3005 3006 Builder.ClearInsertionPoint(); 3007 3008 return VectorizableTree[0].VectorizedValue; 3009 } 3010 3011 void BoUpSLP::optimizeGatherSequence() { 3012 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 3013 << " gather sequences instructions.\n"); 3014 // LICM InsertElementInst sequences. 3015 for (Instruction *it : GatherSeq) { 3016 InsertElementInst *Insert = dyn_cast<InsertElementInst>(it); 3017 3018 if (!Insert) 3019 continue; 3020 3021 // Check if this block is inside a loop. 3022 Loop *L = LI->getLoopFor(Insert->getParent()); 3023 if (!L) 3024 continue; 3025 3026 // Check if it has a preheader. 3027 BasicBlock *PreHeader = L->getLoopPreheader(); 3028 if (!PreHeader) 3029 continue; 3030 3031 // If the vector or the element that we insert into it are 3032 // instructions that are defined in this basic block then we can't 3033 // hoist this instruction. 3034 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 3035 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 3036 if (CurrVec && L->contains(CurrVec)) 3037 continue; 3038 if (NewElem && L->contains(NewElem)) 3039 continue; 3040 3041 // We can hoist this instruction. Move it to the pre-header. 3042 Insert->moveBefore(PreHeader->getTerminator()); 3043 } 3044 3045 // Make a list of all reachable blocks in our CSE queue. 3046 SmallVector<const DomTreeNode *, 8> CSEWorkList; 3047 CSEWorkList.reserve(CSEBlocks.size()); 3048 for (BasicBlock *BB : CSEBlocks) 3049 if (DomTreeNode *N = DT->getNode(BB)) { 3050 assert(DT->isReachableFromEntry(N)); 3051 CSEWorkList.push_back(N); 3052 } 3053 3054 // Sort blocks by domination. This ensures we visit a block after all blocks 3055 // dominating it are visited. 3056 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), 3057 [this](const DomTreeNode *A, const DomTreeNode *B) { 3058 return DT->properlyDominates(A, B); 3059 }); 3060 3061 // Perform O(N^2) search over the gather sequences and merge identical 3062 // instructions. TODO: We can further optimize this scan if we split the 3063 // instructions into different buckets based on the insert lane. 3064 SmallVector<Instruction *, 16> Visited; 3065 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 3066 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 3067 "Worklist not sorted properly!"); 3068 BasicBlock *BB = (*I)->getBlock(); 3069 // For all instructions in blocks containing gather sequences: 3070 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 3071 Instruction *In = &*it++; 3072 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 3073 continue; 3074 3075 // Check if we can replace this instruction with any of the 3076 // visited instructions. 3077 for (Instruction *v : Visited) { 3078 if (In->isIdenticalTo(v) && 3079 DT->dominates(v->getParent(), In->getParent())) { 3080 In->replaceAllUsesWith(v); 3081 eraseInstruction(In); 3082 In = nullptr; 3083 break; 3084 } 3085 } 3086 if (In) { 3087 assert(!is_contained(Visited, In)); 3088 Visited.push_back(In); 3089 } 3090 } 3091 } 3092 CSEBlocks.clear(); 3093 GatherSeq.clear(); 3094 } 3095 3096 // Groups the instructions to a bundle (which is then a single scheduling entity) 3097 // and schedules instructions until the bundle gets ready. 3098 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, 3099 BoUpSLP *SLP) { 3100 if (isa<PHINode>(VL[0])) 3101 return true; 3102 3103 // Initialize the instruction bundle. 3104 Instruction *OldScheduleEnd = ScheduleEnd; 3105 ScheduleData *PrevInBundle = nullptr; 3106 ScheduleData *Bundle = nullptr; 3107 bool ReSchedule = false; 3108 DEBUG(dbgs() << "SLP: bundle: " << *VL[0] << "\n"); 3109 3110 // Make sure that the scheduling region contains all 3111 // instructions of the bundle. 3112 for (Value *V : VL) { 3113 if (!extendSchedulingRegion(V)) 3114 return false; 3115 } 3116 3117 for (Value *V : VL) { 3118 ScheduleData *BundleMember = getScheduleData(V); 3119 assert(BundleMember && 3120 "no ScheduleData for bundle member (maybe not in same basic block)"); 3121 if (BundleMember->IsScheduled) { 3122 // A bundle member was scheduled as single instruction before and now 3123 // needs to be scheduled as part of the bundle. We just get rid of the 3124 // existing schedule. 3125 DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 3126 << " was already scheduled\n"); 3127 ReSchedule = true; 3128 } 3129 assert(BundleMember->isSchedulingEntity() && 3130 "bundle member already part of other bundle"); 3131 if (PrevInBundle) { 3132 PrevInBundle->NextInBundle = BundleMember; 3133 } else { 3134 Bundle = BundleMember; 3135 } 3136 BundleMember->UnscheduledDepsInBundle = 0; 3137 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 3138 3139 // Group the instructions to a bundle. 3140 BundleMember->FirstInBundle = Bundle; 3141 PrevInBundle = BundleMember; 3142 } 3143 if (ScheduleEnd != OldScheduleEnd) { 3144 // The scheduling region got new instructions at the lower end (or it is a 3145 // new region for the first bundle). This makes it necessary to 3146 // recalculate all dependencies. 3147 // It is seldom that this needs to be done a second time after adding the 3148 // initial bundle to the region. 3149 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3150 ScheduleData *SD = getScheduleData(I); 3151 SD->clearDependencies(); 3152 } 3153 ReSchedule = true; 3154 } 3155 if (ReSchedule) { 3156 resetSchedule(); 3157 initialFillReadyList(ReadyInsts); 3158 } 3159 3160 DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " 3161 << BB->getName() << "\n"); 3162 3163 calculateDependencies(Bundle, true, SLP); 3164 3165 // Now try to schedule the new bundle. As soon as the bundle is "ready" it 3166 // means that there are no cyclic dependencies and we can schedule it. 3167 // Note that's important that we don't "schedule" the bundle yet (see 3168 // cancelScheduling). 3169 while (!Bundle->isReady() && !ReadyInsts.empty()) { 3170 3171 ScheduleData *pickedSD = ReadyInsts.back(); 3172 ReadyInsts.pop_back(); 3173 3174 if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) { 3175 schedule(pickedSD, ReadyInsts); 3176 } 3177 } 3178 if (!Bundle->isReady()) { 3179 cancelScheduling(VL); 3180 return false; 3181 } 3182 return true; 3183 } 3184 3185 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL) { 3186 if (isa<PHINode>(VL[0])) 3187 return; 3188 3189 ScheduleData *Bundle = getScheduleData(VL[0]); 3190 DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 3191 assert(!Bundle->IsScheduled && 3192 "Can't cancel bundle which is already scheduled"); 3193 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 3194 "tried to unbundle something which is not a bundle"); 3195 3196 // Un-bundle: make single instructions out of the bundle. 3197 ScheduleData *BundleMember = Bundle; 3198 while (BundleMember) { 3199 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 3200 BundleMember->FirstInBundle = BundleMember; 3201 ScheduleData *Next = BundleMember->NextInBundle; 3202 BundleMember->NextInBundle = nullptr; 3203 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 3204 if (BundleMember->UnscheduledDepsInBundle == 0) { 3205 ReadyInsts.insert(BundleMember); 3206 } 3207 BundleMember = Next; 3208 } 3209 } 3210 3211 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V) { 3212 if (getScheduleData(V)) 3213 return true; 3214 Instruction *I = dyn_cast<Instruction>(V); 3215 assert(I && "bundle member must be an instruction"); 3216 assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled"); 3217 if (!ScheduleStart) { 3218 // It's the first instruction in the new region. 3219 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 3220 ScheduleStart = I; 3221 ScheduleEnd = I->getNextNode(); 3222 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 3223 DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 3224 return true; 3225 } 3226 // Search up and down at the same time, because we don't know if the new 3227 // instruction is above or below the existing scheduling region. 3228 BasicBlock::reverse_iterator UpIter = 3229 ++ScheduleStart->getIterator().getReverse(); 3230 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 3231 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 3232 BasicBlock::iterator LowerEnd = BB->end(); 3233 for (;;) { 3234 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 3235 DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 3236 return false; 3237 } 3238 3239 if (UpIter != UpperEnd) { 3240 if (&*UpIter == I) { 3241 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 3242 ScheduleStart = I; 3243 DEBUG(dbgs() << "SLP: extend schedule region start to " << *I << "\n"); 3244 return true; 3245 } 3246 UpIter++; 3247 } 3248 if (DownIter != LowerEnd) { 3249 if (&*DownIter == I) { 3250 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 3251 nullptr); 3252 ScheduleEnd = I->getNextNode(); 3253 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 3254 DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 3255 return true; 3256 } 3257 DownIter++; 3258 } 3259 assert((UpIter != UpperEnd || DownIter != LowerEnd) && 3260 "instruction not found in block"); 3261 } 3262 return true; 3263 } 3264 3265 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 3266 Instruction *ToI, 3267 ScheduleData *PrevLoadStore, 3268 ScheduleData *NextLoadStore) { 3269 ScheduleData *CurrentLoadStore = PrevLoadStore; 3270 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 3271 ScheduleData *SD = ScheduleDataMap[I]; 3272 if (!SD) { 3273 // Allocate a new ScheduleData for the instruction. 3274 if (ChunkPos >= ChunkSize) { 3275 ScheduleDataChunks.push_back( 3276 llvm::make_unique<ScheduleData[]>(ChunkSize)); 3277 ChunkPos = 0; 3278 } 3279 SD = &(ScheduleDataChunks.back()[ChunkPos++]); 3280 ScheduleDataMap[I] = SD; 3281 SD->Inst = I; 3282 } 3283 assert(!isInSchedulingRegion(SD) && 3284 "new ScheduleData already in scheduling region"); 3285 SD->init(SchedulingRegionID); 3286 3287 if (I->mayReadOrWriteMemory()) { 3288 // Update the linked list of memory accessing instructions. 3289 if (CurrentLoadStore) { 3290 CurrentLoadStore->NextLoadStore = SD; 3291 } else { 3292 FirstLoadStoreInRegion = SD; 3293 } 3294 CurrentLoadStore = SD; 3295 } 3296 } 3297 if (NextLoadStore) { 3298 if (CurrentLoadStore) 3299 CurrentLoadStore->NextLoadStore = NextLoadStore; 3300 } else { 3301 LastLoadStoreInRegion = CurrentLoadStore; 3302 } 3303 } 3304 3305 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 3306 bool InsertInReadyList, 3307 BoUpSLP *SLP) { 3308 assert(SD->isSchedulingEntity()); 3309 3310 SmallVector<ScheduleData *, 10> WorkList; 3311 WorkList.push_back(SD); 3312 3313 while (!WorkList.empty()) { 3314 ScheduleData *SD = WorkList.back(); 3315 WorkList.pop_back(); 3316 3317 ScheduleData *BundleMember = SD; 3318 while (BundleMember) { 3319 assert(isInSchedulingRegion(BundleMember)); 3320 if (!BundleMember->hasValidDependencies()) { 3321 3322 DEBUG(dbgs() << "SLP: update deps of " << *BundleMember << "\n"); 3323 BundleMember->Dependencies = 0; 3324 BundleMember->resetUnscheduledDeps(); 3325 3326 // Handle def-use chain dependencies. 3327 for (User *U : BundleMember->Inst->users()) { 3328 if (isa<Instruction>(U)) { 3329 ScheduleData *UseSD = getScheduleData(U); 3330 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 3331 BundleMember->Dependencies++; 3332 ScheduleData *DestBundle = UseSD->FirstInBundle; 3333 if (!DestBundle->IsScheduled) { 3334 BundleMember->incrementUnscheduledDeps(1); 3335 } 3336 if (!DestBundle->hasValidDependencies()) { 3337 WorkList.push_back(DestBundle); 3338 } 3339 } 3340 } else { 3341 // I'm not sure if this can ever happen. But we need to be safe. 3342 // This lets the instruction/bundle never be scheduled and 3343 // eventually disable vectorization. 3344 BundleMember->Dependencies++; 3345 BundleMember->incrementUnscheduledDeps(1); 3346 } 3347 } 3348 3349 // Handle the memory dependencies. 3350 ScheduleData *DepDest = BundleMember->NextLoadStore; 3351 if (DepDest) { 3352 Instruction *SrcInst = BundleMember->Inst; 3353 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); 3354 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 3355 unsigned numAliased = 0; 3356 unsigned DistToSrc = 1; 3357 3358 while (DepDest) { 3359 assert(isInSchedulingRegion(DepDest)); 3360 3361 // We have two limits to reduce the complexity: 3362 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 3363 // SLP->isAliased (which is the expensive part in this loop). 3364 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 3365 // the whole loop (even if the loop is fast, it's quadratic). 3366 // It's important for the loop break condition (see below) to 3367 // check this limit even between two read-only instructions. 3368 if (DistToSrc >= MaxMemDepDistance || 3369 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 3370 (numAliased >= AliasedCheckLimit || 3371 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 3372 3373 // We increment the counter only if the locations are aliased 3374 // (instead of counting all alias checks). This gives a better 3375 // balance between reduced runtime and accurate dependencies. 3376 numAliased++; 3377 3378 DepDest->MemoryDependencies.push_back(BundleMember); 3379 BundleMember->Dependencies++; 3380 ScheduleData *DestBundle = DepDest->FirstInBundle; 3381 if (!DestBundle->IsScheduled) { 3382 BundleMember->incrementUnscheduledDeps(1); 3383 } 3384 if (!DestBundle->hasValidDependencies()) { 3385 WorkList.push_back(DestBundle); 3386 } 3387 } 3388 DepDest = DepDest->NextLoadStore; 3389 3390 // Example, explaining the loop break condition: Let's assume our 3391 // starting instruction is i0 and MaxMemDepDistance = 3. 3392 // 3393 // +--------v--v--v 3394 // i0,i1,i2,i3,i4,i5,i6,i7,i8 3395 // +--------^--^--^ 3396 // 3397 // MaxMemDepDistance let us stop alias-checking at i3 and we add 3398 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 3399 // Previously we already added dependencies from i3 to i6,i7,i8 3400 // (because of MaxMemDepDistance). As we added a dependency from 3401 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 3402 // and we can abort this loop at i6. 3403 if (DistToSrc >= 2 * MaxMemDepDistance) 3404 break; 3405 DistToSrc++; 3406 } 3407 } 3408 } 3409 BundleMember = BundleMember->NextInBundle; 3410 } 3411 if (InsertInReadyList && SD->isReady()) { 3412 ReadyInsts.push_back(SD); 3413 DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n"); 3414 } 3415 } 3416 } 3417 3418 void BoUpSLP::BlockScheduling::resetSchedule() { 3419 assert(ScheduleStart && 3420 "tried to reset schedule on block which has not been scheduled"); 3421 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3422 ScheduleData *SD = getScheduleData(I); 3423 assert(isInSchedulingRegion(SD)); 3424 SD->IsScheduled = false; 3425 SD->resetUnscheduledDeps(); 3426 } 3427 ReadyInsts.clear(); 3428 } 3429 3430 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 3431 3432 if (!BS->ScheduleStart) 3433 return; 3434 3435 DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 3436 3437 BS->resetSchedule(); 3438 3439 // For the real scheduling we use a more sophisticated ready-list: it is 3440 // sorted by the original instruction location. This lets the final schedule 3441 // be as close as possible to the original instruction order. 3442 struct ScheduleDataCompare { 3443 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 3444 return SD2->SchedulingPriority < SD1->SchedulingPriority; 3445 } 3446 }; 3447 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 3448 3449 // Ensure that all dependency data is updated and fill the ready-list with 3450 // initial instructions. 3451 int Idx = 0; 3452 int NumToSchedule = 0; 3453 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 3454 I = I->getNextNode()) { 3455 ScheduleData *SD = BS->getScheduleData(I); 3456 assert( 3457 SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD->Inst) != 0) && 3458 "scheduler and vectorizer have different opinion on what is a bundle"); 3459 SD->FirstInBundle->SchedulingPriority = Idx++; 3460 if (SD->isSchedulingEntity()) { 3461 BS->calculateDependencies(SD, false, this); 3462 NumToSchedule++; 3463 } 3464 } 3465 BS->initialFillReadyList(ReadyInsts); 3466 3467 Instruction *LastScheduledInst = BS->ScheduleEnd; 3468 3469 // Do the "real" scheduling. 3470 while (!ReadyInsts.empty()) { 3471 ScheduleData *picked = *ReadyInsts.begin(); 3472 ReadyInsts.erase(ReadyInsts.begin()); 3473 3474 // Move the scheduled instruction(s) to their dedicated places, if not 3475 // there yet. 3476 ScheduleData *BundleMember = picked; 3477 while (BundleMember) { 3478 Instruction *pickedInst = BundleMember->Inst; 3479 if (LastScheduledInst->getNextNode() != pickedInst) { 3480 BS->BB->getInstList().remove(pickedInst); 3481 BS->BB->getInstList().insert(LastScheduledInst->getIterator(), 3482 pickedInst); 3483 } 3484 LastScheduledInst = pickedInst; 3485 BundleMember = BundleMember->NextInBundle; 3486 } 3487 3488 BS->schedule(picked, ReadyInsts); 3489 NumToSchedule--; 3490 } 3491 assert(NumToSchedule == 0 && "could not schedule all instructions"); 3492 3493 // Avoid duplicate scheduling of the block. 3494 BS->ScheduleStart = nullptr; 3495 } 3496 3497 unsigned BoUpSLP::getVectorElementSize(Value *V) { 3498 // If V is a store, just return the width of the stored value without 3499 // traversing the expression tree. This is the common case. 3500 if (auto *Store = dyn_cast<StoreInst>(V)) 3501 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 3502 3503 // If V is not a store, we can traverse the expression tree to find loads 3504 // that feed it. The type of the loaded value may indicate a more suitable 3505 // width than V's type. We want to base the vector element size on the width 3506 // of memory operations where possible. 3507 SmallVector<Instruction *, 16> Worklist; 3508 SmallPtrSet<Instruction *, 16> Visited; 3509 if (auto *I = dyn_cast<Instruction>(V)) 3510 Worklist.push_back(I); 3511 3512 // Traverse the expression tree in bottom-up order looking for loads. If we 3513 // encounter an instruciton we don't yet handle, we give up. 3514 auto MaxWidth = 0u; 3515 auto FoundUnknownInst = false; 3516 while (!Worklist.empty() && !FoundUnknownInst) { 3517 auto *I = Worklist.pop_back_val(); 3518 Visited.insert(I); 3519 3520 // We should only be looking at scalar instructions here. If the current 3521 // instruction has a vector type, give up. 3522 auto *Ty = I->getType(); 3523 if (isa<VectorType>(Ty)) 3524 FoundUnknownInst = true; 3525 3526 // If the current instruction is a load, update MaxWidth to reflect the 3527 // width of the loaded value. 3528 else if (isa<LoadInst>(I)) 3529 MaxWidth = std::max<unsigned>(MaxWidth, DL->getTypeSizeInBits(Ty)); 3530 3531 // Otherwise, we need to visit the operands of the instruction. We only 3532 // handle the interesting cases from buildTree here. If an operand is an 3533 // instruction we haven't yet visited, we add it to the worklist. 3534 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 3535 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I)) { 3536 for (Use &U : I->operands()) 3537 if (auto *J = dyn_cast<Instruction>(U.get())) 3538 if (!Visited.count(J)) 3539 Worklist.push_back(J); 3540 } 3541 3542 // If we don't yet handle the instruction, give up. 3543 else 3544 FoundUnknownInst = true; 3545 } 3546 3547 // If we didn't encounter a memory access in the expression tree, or if we 3548 // gave up for some reason, just return the width of V. 3549 if (!MaxWidth || FoundUnknownInst) 3550 return DL->getTypeSizeInBits(V->getType()); 3551 3552 // Otherwise, return the maximum width we found. 3553 return MaxWidth; 3554 } 3555 3556 // Determine if a value V in a vectorizable expression Expr can be demoted to a 3557 // smaller type with a truncation. We collect the values that will be demoted 3558 // in ToDemote and additional roots that require investigating in Roots. 3559 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, 3560 SmallVectorImpl<Value *> &ToDemote, 3561 SmallVectorImpl<Value *> &Roots) { 3562 3563 // We can always demote constants. 3564 if (isa<Constant>(V)) { 3565 ToDemote.push_back(V); 3566 return true; 3567 } 3568 3569 // If the value is not an instruction in the expression with only one use, it 3570 // cannot be demoted. 3571 auto *I = dyn_cast<Instruction>(V); 3572 if (!I || !I->hasOneUse() || !Expr.count(I)) 3573 return false; 3574 3575 switch (I->getOpcode()) { 3576 3577 // We can always demote truncations and extensions. Since truncations can 3578 // seed additional demotion, we save the truncated value. 3579 case Instruction::Trunc: 3580 Roots.push_back(I->getOperand(0)); 3581 case Instruction::ZExt: 3582 case Instruction::SExt: 3583 break; 3584 3585 // We can demote certain binary operations if we can demote both of their 3586 // operands. 3587 case Instruction::Add: 3588 case Instruction::Sub: 3589 case Instruction::Mul: 3590 case Instruction::And: 3591 case Instruction::Or: 3592 case Instruction::Xor: 3593 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || 3594 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) 3595 return false; 3596 break; 3597 3598 // We can demote selects if we can demote their true and false values. 3599 case Instruction::Select: { 3600 SelectInst *SI = cast<SelectInst>(I); 3601 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || 3602 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) 3603 return false; 3604 break; 3605 } 3606 3607 // We can demote phis if we can demote all their incoming operands. Note that 3608 // we don't need to worry about cycles since we ensure single use above. 3609 case Instruction::PHI: { 3610 PHINode *PN = cast<PHINode>(I); 3611 for (Value *IncValue : PN->incoming_values()) 3612 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) 3613 return false; 3614 break; 3615 } 3616 3617 // Otherwise, conservatively give up. 3618 default: 3619 return false; 3620 } 3621 3622 // Record the value that we can demote. 3623 ToDemote.push_back(V); 3624 return true; 3625 } 3626 3627 void BoUpSLP::computeMinimumValueSizes() { 3628 // If there are no external uses, the expression tree must be rooted by a 3629 // store. We can't demote in-memory values, so there is nothing to do here. 3630 if (ExternalUses.empty()) 3631 return; 3632 3633 // We only attempt to truncate integer expressions. 3634 auto &TreeRoot = VectorizableTree[0].Scalars; 3635 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 3636 if (!TreeRootIT) 3637 return; 3638 3639 // If the expression is not rooted by a store, these roots should have 3640 // external uses. We will rely on InstCombine to rewrite the expression in 3641 // the narrower type. However, InstCombine only rewrites single-use values. 3642 // This means that if a tree entry other than a root is used externally, it 3643 // must have multiple uses and InstCombine will not rewrite it. The code 3644 // below ensures that only the roots are used externally. 3645 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); 3646 for (auto &EU : ExternalUses) 3647 if (!Expr.erase(EU.Scalar)) 3648 return; 3649 if (!Expr.empty()) 3650 return; 3651 3652 // Collect the scalar values of the vectorizable expression. We will use this 3653 // context to determine which values can be demoted. If we see a truncation, 3654 // we mark it as seeding another demotion. 3655 for (auto &Entry : VectorizableTree) 3656 Expr.insert(Entry.Scalars.begin(), Entry.Scalars.end()); 3657 3658 // Ensure the roots of the vectorizable tree don't form a cycle. They must 3659 // have a single external user that is not in the vectorizable tree. 3660 for (auto *Root : TreeRoot) 3661 if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) 3662 return; 3663 3664 // Conservatively determine if we can actually truncate the roots of the 3665 // expression. Collect the values that can be demoted in ToDemote and 3666 // additional roots that require investigating in Roots. 3667 SmallVector<Value *, 32> ToDemote; 3668 SmallVector<Value *, 4> Roots; 3669 for (auto *Root : TreeRoot) 3670 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) 3671 return; 3672 3673 // The maximum bit width required to represent all the values that can be 3674 // demoted without loss of precision. It would be safe to truncate the roots 3675 // of the expression to this width. 3676 auto MaxBitWidth = 8u; 3677 3678 // We first check if all the bits of the roots are demanded. If they're not, 3679 // we can truncate the roots to this narrower type. 3680 for (auto *Root : TreeRoot) { 3681 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 3682 MaxBitWidth = std::max<unsigned>( 3683 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); 3684 } 3685 3686 // True if the roots can be zero-extended back to their original type, rather 3687 // than sign-extended. We know that if the leading bits are not demanded, we 3688 // can safely zero-extend. So we initialize IsKnownPositive to True. 3689 bool IsKnownPositive = true; 3690 3691 // If all the bits of the roots are demanded, we can try a little harder to 3692 // compute a narrower type. This can happen, for example, if the roots are 3693 // getelementptr indices. InstCombine promotes these indices to the pointer 3694 // width. Thus, all their bits are technically demanded even though the 3695 // address computation might be vectorized in a smaller type. 3696 // 3697 // We start by looking at each entry that can be demoted. We compute the 3698 // maximum bit width required to store the scalar by using ValueTracking to 3699 // compute the number of high-order bits we can truncate. 3700 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType())) { 3701 MaxBitWidth = 8u; 3702 3703 // Determine if the sign bit of all the roots is known to be zero. If not, 3704 // IsKnownPositive is set to False. 3705 IsKnownPositive = all_of(TreeRoot, [&](Value *R) { 3706 bool KnownZero = false; 3707 bool KnownOne = false; 3708 ComputeSignBit(R, KnownZero, KnownOne, *DL); 3709 return KnownZero; 3710 }); 3711 3712 // Determine the maximum number of bits required to store the scalar 3713 // values. 3714 for (auto *Scalar : ToDemote) { 3715 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, 0, DT); 3716 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 3717 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 3718 } 3719 3720 // If we can't prove that the sign bit is zero, we must add one to the 3721 // maximum bit width to account for the unknown sign bit. This preserves 3722 // the existing sign bit so we can safely sign-extend the root back to the 3723 // original type. Otherwise, if we know the sign bit is zero, we will 3724 // zero-extend the root instead. 3725 // 3726 // FIXME: This is somewhat suboptimal, as there will be cases where adding 3727 // one to the maximum bit width will yield a larger-than-necessary 3728 // type. In general, we need to add an extra bit only if we can't 3729 // prove that the upper bit of the original type is equal to the 3730 // upper bit of the proposed smaller type. If these two bits are the 3731 // same (either zero or one) we know that sign-extending from the 3732 // smaller type will result in the same value. Here, since we can't 3733 // yet prove this, we are just making the proposed smaller type 3734 // larger to ensure correctness. 3735 if (!IsKnownPositive) 3736 ++MaxBitWidth; 3737 } 3738 3739 // Round MaxBitWidth up to the next power-of-two. 3740 if (!isPowerOf2_64(MaxBitWidth)) 3741 MaxBitWidth = NextPowerOf2(MaxBitWidth); 3742 3743 // If the maximum bit width we compute is less than the with of the roots' 3744 // type, we can proceed with the narrowing. Otherwise, do nothing. 3745 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 3746 return; 3747 3748 // If we can truncate the root, we must collect additional values that might 3749 // be demoted as a result. That is, those seeded by truncations we will 3750 // modify. 3751 while (!Roots.empty()) 3752 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); 3753 3754 // Finally, map the values we can demote to the maximum bit with we computed. 3755 for (auto *Scalar : ToDemote) 3756 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive); 3757 } 3758 3759 namespace { 3760 /// The SLPVectorizer Pass. 3761 struct SLPVectorizer : public FunctionPass { 3762 SLPVectorizerPass Impl; 3763 3764 /// Pass identification, replacement for typeid 3765 static char ID; 3766 3767 explicit SLPVectorizer() : FunctionPass(ID) { 3768 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 3769 } 3770 3771 3772 bool doInitialization(Module &M) override { 3773 return false; 3774 } 3775 3776 bool runOnFunction(Function &F) override { 3777 if (skipFunction(F)) 3778 return false; 3779 3780 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 3781 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 3782 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 3783 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 3784 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 3785 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 3786 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 3787 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 3788 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 3789 3790 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB); 3791 } 3792 3793 void getAnalysisUsage(AnalysisUsage &AU) const override { 3794 FunctionPass::getAnalysisUsage(AU); 3795 AU.addRequired<AssumptionCacheTracker>(); 3796 AU.addRequired<ScalarEvolutionWrapperPass>(); 3797 AU.addRequired<AAResultsWrapperPass>(); 3798 AU.addRequired<TargetTransformInfoWrapperPass>(); 3799 AU.addRequired<LoopInfoWrapperPass>(); 3800 AU.addRequired<DominatorTreeWrapperPass>(); 3801 AU.addRequired<DemandedBitsWrapperPass>(); 3802 AU.addPreserved<LoopInfoWrapperPass>(); 3803 AU.addPreserved<DominatorTreeWrapperPass>(); 3804 AU.addPreserved<AAResultsWrapperPass>(); 3805 AU.addPreserved<GlobalsAAWrapperPass>(); 3806 AU.setPreservesCFG(); 3807 } 3808 }; 3809 } // end anonymous namespace 3810 3811 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 3812 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 3813 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 3814 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 3815 auto *AA = &AM.getResult<AAManager>(F); 3816 auto *LI = &AM.getResult<LoopAnalysis>(F); 3817 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 3818 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 3819 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 3820 3821 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB); 3822 if (!Changed) 3823 return PreservedAnalyses::all(); 3824 3825 PreservedAnalyses PA; 3826 PA.preserveSet<CFGAnalyses>(); 3827 PA.preserve<AAManager>(); 3828 PA.preserve<GlobalsAA>(); 3829 return PA; 3830 } 3831 3832 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 3833 TargetTransformInfo *TTI_, 3834 TargetLibraryInfo *TLI_, AliasAnalysis *AA_, 3835 LoopInfo *LI_, DominatorTree *DT_, 3836 AssumptionCache *AC_, DemandedBits *DB_) { 3837 SE = SE_; 3838 TTI = TTI_; 3839 TLI = TLI_; 3840 AA = AA_; 3841 LI = LI_; 3842 DT = DT_; 3843 AC = AC_; 3844 DB = DB_; 3845 DL = &F.getParent()->getDataLayout(); 3846 3847 Stores.clear(); 3848 GEPs.clear(); 3849 bool Changed = false; 3850 3851 // If the target claims to have no vector registers don't attempt 3852 // vectorization. 3853 if (!TTI->getNumberOfRegisters(true)) 3854 return false; 3855 3856 // Don't vectorize when the attribute NoImplicitFloat is used. 3857 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 3858 return false; 3859 3860 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 3861 3862 // Use the bottom up slp vectorizer to construct chains that start with 3863 // store instructions. 3864 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL); 3865 3866 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 3867 // delete instructions. 3868 3869 // Scan the blocks in the function in post order. 3870 for (auto BB : post_order(&F.getEntryBlock())) { 3871 collectSeedInstructions(BB); 3872 3873 // Vectorize trees that end at stores. 3874 if (!Stores.empty()) { 3875 DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 3876 << " underlying objects.\n"); 3877 Changed |= vectorizeStoreChains(R); 3878 } 3879 3880 // Vectorize trees that end at reductions. 3881 Changed |= vectorizeChainsInBlock(BB, R); 3882 3883 // Vectorize the index computations of getelementptr instructions. This 3884 // is primarily intended to catch gather-like idioms ending at 3885 // non-consecutive loads. 3886 if (!GEPs.empty()) { 3887 DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 3888 << " underlying objects.\n"); 3889 Changed |= vectorizeGEPIndices(BB, R); 3890 } 3891 } 3892 3893 if (Changed) { 3894 R.optimizeGatherSequence(); 3895 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 3896 DEBUG(verifyFunction(F)); 3897 } 3898 return Changed; 3899 } 3900 3901 /// \brief Check that the Values in the slice in VL array are still existent in 3902 /// the WeakVH array. 3903 /// Vectorization of part of the VL array may cause later values in the VL array 3904 /// to become invalid. We track when this has happened in the WeakVH array. 3905 static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, ArrayRef<WeakVH> VH, 3906 unsigned SliceBegin, unsigned SliceSize) { 3907 VL = VL.slice(SliceBegin, SliceSize); 3908 VH = VH.slice(SliceBegin, SliceSize); 3909 return !std::equal(VL.begin(), VL.end(), VH.begin()); 3910 } 3911 3912 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 3913 unsigned VecRegSize) { 3914 unsigned ChainLen = Chain.size(); 3915 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 3916 << "\n"); 3917 unsigned Sz = R.getVectorElementSize(Chain[0]); 3918 unsigned VF = VecRegSize / Sz; 3919 3920 if (!isPowerOf2_32(Sz) || VF < 2) 3921 return false; 3922 3923 // Keep track of values that were deleted by vectorizing in the loop below. 3924 SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end()); 3925 3926 bool Changed = false; 3927 // Look for profitable vectorizable trees at all offsets, starting at zero. 3928 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 3929 if (i + VF > e) 3930 break; 3931 3932 // Check that a previous iteration of this loop did not delete the Value. 3933 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 3934 continue; 3935 3936 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 3937 << "\n"); 3938 ArrayRef<Value *> Operands = Chain.slice(i, VF); 3939 3940 R.buildTree(Operands); 3941 if (R.isTreeTinyAndNotFullyVectorizable()) 3942 continue; 3943 3944 R.computeMinimumValueSizes(); 3945 3946 int Cost = R.getTreeCost(); 3947 3948 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 3949 if (Cost < -SLPCostThreshold) { 3950 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 3951 R.vectorizeTree(); 3952 3953 // Move to the next bundle. 3954 i += VF - 1; 3955 Changed = true; 3956 } 3957 } 3958 3959 return Changed; 3960 } 3961 3962 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 3963 BoUpSLP &R) { 3964 SetVector<StoreInst *> Heads, Tails; 3965 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; 3966 3967 // We may run into multiple chains that merge into a single chain. We mark the 3968 // stores that we vectorized so that we don't visit the same store twice. 3969 BoUpSLP::ValueSet VectorizedStores; 3970 bool Changed = false; 3971 3972 // Do a quadratic search on all of the given stores and find 3973 // all of the pairs of stores that follow each other. 3974 SmallVector<unsigned, 16> IndexQueue; 3975 for (unsigned i = 0, e = Stores.size(); i < e; ++i) { 3976 IndexQueue.clear(); 3977 // If a store has multiple consecutive store candidates, search Stores 3978 // array according to the sequence: from i+1 to e, then from i-1 to 0. 3979 // This is because usually pairing with immediate succeeding or preceding 3980 // candidate create the best chance to find slp vectorization opportunity. 3981 unsigned j = 0; 3982 for (j = i + 1; j < e; ++j) 3983 IndexQueue.push_back(j); 3984 for (j = i; j > 0; --j) 3985 IndexQueue.push_back(j - 1); 3986 3987 for (auto &k : IndexQueue) { 3988 if (isConsecutiveAccess(Stores[i], Stores[k], *DL, *SE)) { 3989 Tails.insert(Stores[k]); 3990 Heads.insert(Stores[i]); 3991 ConsecutiveChain[Stores[i]] = Stores[k]; 3992 break; 3993 } 3994 } 3995 } 3996 3997 // For stores that start but don't end a link in the chain: 3998 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end(); 3999 it != e; ++it) { 4000 if (Tails.count(*it)) 4001 continue; 4002 4003 // We found a store instr that starts a chain. Now follow the chain and try 4004 // to vectorize it. 4005 BoUpSLP::ValueList Operands; 4006 StoreInst *I = *it; 4007 // Collect the chain into a list. 4008 while (Tails.count(I) || Heads.count(I)) { 4009 if (VectorizedStores.count(I)) 4010 break; 4011 Operands.push_back(I); 4012 // Move to the next value in the chain. 4013 I = ConsecutiveChain[I]; 4014 } 4015 4016 // FIXME: Is division-by-2 the correct step? Should we assert that the 4017 // register size is a power-of-2? 4018 for (unsigned Size = R.getMaxVecRegSize(); Size >= R.getMinVecRegSize(); 4019 Size /= 2) { 4020 if (vectorizeStoreChain(Operands, R, Size)) { 4021 // Mark the vectorized stores so that we don't vectorize them again. 4022 VectorizedStores.insert(Operands.begin(), Operands.end()); 4023 Changed = true; 4024 break; 4025 } 4026 } 4027 } 4028 4029 return Changed; 4030 } 4031 4032 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 4033 4034 // Initialize the collections. We will make a single pass over the block. 4035 Stores.clear(); 4036 GEPs.clear(); 4037 4038 // Visit the store and getelementptr instructions in BB and organize them in 4039 // Stores and GEPs according to the underlying objects of their pointer 4040 // operands. 4041 for (Instruction &I : *BB) { 4042 4043 // Ignore store instructions that are volatile or have a pointer operand 4044 // that doesn't point to a scalar type. 4045 if (auto *SI = dyn_cast<StoreInst>(&I)) { 4046 if (!SI->isSimple()) 4047 continue; 4048 if (!isValidElementType(SI->getValueOperand()->getType())) 4049 continue; 4050 Stores[GetUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI); 4051 } 4052 4053 // Ignore getelementptr instructions that have more than one index, a 4054 // constant index, or a pointer operand that doesn't point to a scalar 4055 // type. 4056 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 4057 auto Idx = GEP->idx_begin()->get(); 4058 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) 4059 continue; 4060 if (!isValidElementType(Idx->getType())) 4061 continue; 4062 if (GEP->getType()->isVectorTy()) 4063 continue; 4064 GEPs[GetUnderlyingObject(GEP->getPointerOperand(), *DL)].push_back(GEP); 4065 } 4066 } 4067 } 4068 4069 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 4070 if (!A || !B) 4071 return false; 4072 Value *VL[] = { A, B }; 4073 return tryToVectorizeList(VL, R, None, true); 4074 } 4075 4076 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 4077 ArrayRef<Value *> BuildVector, 4078 bool AllowReorder) { 4079 if (VL.size() < 2) 4080 return false; 4081 4082 DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " << VL.size() 4083 << ".\n"); 4084 4085 // Check that all of the parts are scalar instructions of the same type. 4086 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 4087 if (!I0) 4088 return false; 4089 4090 unsigned Opcode0 = I0->getOpcode(); 4091 4092 unsigned Sz = R.getVectorElementSize(I0); 4093 unsigned MinVF = std::max(2U, R.getMinVecRegSize() / Sz); 4094 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF); 4095 if (MaxVF < 2) 4096 return false; 4097 4098 for (Value *V : VL) { 4099 Type *Ty = V->getType(); 4100 if (!isValidElementType(Ty)) 4101 return false; 4102 Instruction *Inst = dyn_cast<Instruction>(V); 4103 if (!Inst || Inst->getOpcode() != Opcode0) 4104 return false; 4105 } 4106 4107 bool Changed = false; 4108 4109 // Keep track of values that were deleted by vectorizing in the loop below. 4110 SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end()); 4111 4112 unsigned NextInst = 0, MaxInst = VL.size(); 4113 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; 4114 VF /= 2) { 4115 // No actual vectorization should happen, if number of parts is the same as 4116 // provided vectorization factor (i.e. the scalar type is used for vector 4117 // code during codegen). 4118 auto *VecTy = VectorType::get(VL[0]->getType(), VF); 4119 if (TTI->getNumberOfParts(VecTy) == VF) 4120 continue; 4121 for (unsigned I = NextInst; I < MaxInst; ++I) { 4122 unsigned OpsWidth = 0; 4123 4124 if (I + VF > MaxInst) 4125 OpsWidth = MaxInst - I; 4126 else 4127 OpsWidth = VF; 4128 4129 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 4130 break; 4131 4132 // Check that a previous iteration of this loop did not delete the Value. 4133 if (hasValueBeenRAUWed(VL, TrackValues, I, OpsWidth)) 4134 continue; 4135 4136 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 4137 << "\n"); 4138 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth); 4139 4140 ArrayRef<Value *> BuildVectorSlice; 4141 if (!BuildVector.empty()) 4142 BuildVectorSlice = BuildVector.slice(I, OpsWidth); 4143 4144 R.buildTree(Ops, BuildVectorSlice); 4145 // TODO: check if we can allow reordering for more cases. 4146 if (AllowReorder && R.shouldReorder()) { 4147 // Conceptually, there is nothing actually preventing us from trying to 4148 // reorder a larger list. In fact, we do exactly this when vectorizing 4149 // reductions. However, at this point, we only expect to get here when 4150 // there are exactly two operations. 4151 assert(Ops.size() == 2); 4152 assert(BuildVectorSlice.empty()); 4153 Value *ReorderedOps[] = {Ops[1], Ops[0]}; 4154 R.buildTree(ReorderedOps, None); 4155 } 4156 if (R.isTreeTinyAndNotFullyVectorizable()) 4157 continue; 4158 4159 R.computeMinimumValueSizes(); 4160 int Cost = R.getTreeCost(); 4161 4162 if (Cost < -SLPCostThreshold) { 4163 DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 4164 Value *VectorizedRoot = R.vectorizeTree(); 4165 4166 // Reconstruct the build vector by extracting the vectorized root. This 4167 // way we handle the case where some elements of the vector are 4168 // undefined. 4169 // (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2)) 4170 if (!BuildVectorSlice.empty()) { 4171 // The insert point is the last build vector instruction. The 4172 // vectorized root will precede it. This guarantees that we get an 4173 // instruction. The vectorized tree could have been constant folded. 4174 Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back()); 4175 unsigned VecIdx = 0; 4176 for (auto &V : BuildVectorSlice) { 4177 IRBuilder<NoFolder> Builder(InsertAfter->getParent(), 4178 ++BasicBlock::iterator(InsertAfter)); 4179 Instruction *I = cast<Instruction>(V); 4180 assert(isa<InsertElementInst>(I) || isa<InsertValueInst>(I)); 4181 Instruction *Extract = 4182 cast<Instruction>(Builder.CreateExtractElement( 4183 VectorizedRoot, Builder.getInt32(VecIdx++))); 4184 I->setOperand(1, Extract); 4185 I->removeFromParent(); 4186 I->insertAfter(Extract); 4187 InsertAfter = I; 4188 } 4189 } 4190 // Move to the next bundle. 4191 I += VF - 1; 4192 NextInst = I + 1; 4193 Changed = true; 4194 } 4195 } 4196 } 4197 4198 return Changed; 4199 } 4200 4201 bool SLPVectorizerPass::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { 4202 if (!V) 4203 return false; 4204 4205 Value *P = V->getParent(); 4206 4207 // Vectorize in current basic block only. 4208 auto *Op0 = dyn_cast<Instruction>(V->getOperand(0)); 4209 auto *Op1 = dyn_cast<Instruction>(V->getOperand(1)); 4210 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 4211 return false; 4212 4213 // Try to vectorize V. 4214 if (tryToVectorizePair(Op0, Op1, R)) 4215 return true; 4216 4217 auto *A = dyn_cast<BinaryOperator>(Op0); 4218 auto *B = dyn_cast<BinaryOperator>(Op1); 4219 // Try to skip B. 4220 if (B && B->hasOneUse()) { 4221 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 4222 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 4223 if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R)) 4224 return true; 4225 if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R)) 4226 return true; 4227 } 4228 4229 // Try to skip A. 4230 if (A && A->hasOneUse()) { 4231 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 4232 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 4233 if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R)) 4234 return true; 4235 if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R)) 4236 return true; 4237 } 4238 return false; 4239 } 4240 4241 /// \brief Generate a shuffle mask to be used in a reduction tree. 4242 /// 4243 /// \param VecLen The length of the vector to be reduced. 4244 /// \param NumEltsToRdx The number of elements that should be reduced in the 4245 /// vector. 4246 /// \param IsPairwise Whether the reduction is a pairwise or splitting 4247 /// reduction. A pairwise reduction will generate a mask of 4248 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 4249 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 4250 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 4251 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 4252 bool IsPairwise, bool IsLeft, 4253 IRBuilder<> &Builder) { 4254 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 4255 4256 SmallVector<Constant *, 32> ShuffleMask( 4257 VecLen, UndefValue::get(Builder.getInt32Ty())); 4258 4259 if (IsPairwise) 4260 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 4261 for (unsigned i = 0; i != NumEltsToRdx; ++i) 4262 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 4263 else 4264 // Move the upper half of the vector to the lower half. 4265 for (unsigned i = 0; i != NumEltsToRdx; ++i) 4266 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 4267 4268 return ConstantVector::get(ShuffleMask); 4269 } 4270 4271 namespace { 4272 /// Model horizontal reductions. 4273 /// 4274 /// A horizontal reduction is a tree of reduction operations (currently add and 4275 /// fadd) that has operations that can be put into a vector as its leaf. 4276 /// For example, this tree: 4277 /// 4278 /// mul mul mul mul 4279 /// \ / \ / 4280 /// + + 4281 /// \ / 4282 /// + 4283 /// This tree has "mul" as its reduced values and "+" as its reduction 4284 /// operations. A reduction might be feeding into a store or a binary operation 4285 /// feeding a phi. 4286 /// ... 4287 /// \ / 4288 /// + 4289 /// | 4290 /// phi += 4291 /// 4292 /// Or: 4293 /// ... 4294 /// \ / 4295 /// + 4296 /// | 4297 /// *p = 4298 /// 4299 class HorizontalReduction { 4300 SmallVector<Value *, 16> ReductionOps; 4301 SmallVector<Value *, 32> ReducedVals; 4302 // Use map vector to make stable output. 4303 MapVector<Instruction *, Value *> ExtraArgs; 4304 4305 BinaryOperator *ReductionRoot = nullptr; 4306 4307 /// The opcode of the reduction. 4308 Instruction::BinaryOps ReductionOpcode = Instruction::BinaryOpsEnd; 4309 /// The opcode of the values we perform a reduction on. 4310 unsigned ReducedValueOpcode = 0; 4311 /// Should we model this reduction as a pairwise reduction tree or a tree that 4312 /// splits the vector in halves and adds those halves. 4313 bool IsPairwiseReduction = false; 4314 4315 /// Checks if the ParentStackElem.first should be marked as a reduction 4316 /// operation with an extra argument or as extra argument itself. 4317 void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem, 4318 Value *ExtraArg) { 4319 if (ExtraArgs.count(ParentStackElem.first)) { 4320 ExtraArgs[ParentStackElem.first] = nullptr; 4321 // We ran into something like: 4322 // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg. 4323 // The whole ParentStackElem.first should be considered as an extra value 4324 // in this case. 4325 // Do not perform analysis of remaining operands of ParentStackElem.first 4326 // instruction, this whole instruction is an extra argument. 4327 ParentStackElem.second = ParentStackElem.first->getNumOperands(); 4328 } else { 4329 // We ran into something like: 4330 // ParentStackElem.first += ... + ExtraArg + ... 4331 ExtraArgs[ParentStackElem.first] = ExtraArg; 4332 } 4333 } 4334 4335 public: 4336 HorizontalReduction() = default; 4337 4338 /// \brief Try to find a reduction tree. 4339 bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B) { 4340 assert((!Phi || is_contained(Phi->operands(), B)) && 4341 "Thi phi needs to use the binary operator"); 4342 4343 // We could have a initial reductions that is not an add. 4344 // r *= v1 + v2 + v3 + v4 4345 // In such a case start looking for a tree rooted in the first '+'. 4346 if (Phi) { 4347 if (B->getOperand(0) == Phi) { 4348 Phi = nullptr; 4349 B = dyn_cast<BinaryOperator>(B->getOperand(1)); 4350 } else if (B->getOperand(1) == Phi) { 4351 Phi = nullptr; 4352 B = dyn_cast<BinaryOperator>(B->getOperand(0)); 4353 } 4354 } 4355 4356 if (!B) 4357 return false; 4358 4359 Type *Ty = B->getType(); 4360 if (!isValidElementType(Ty)) 4361 return false; 4362 4363 ReductionOpcode = B->getOpcode(); 4364 ReducedValueOpcode = 0; 4365 ReductionRoot = B; 4366 4367 // We currently only support adds. 4368 if ((ReductionOpcode != Instruction::Add && 4369 ReductionOpcode != Instruction::FAdd) || 4370 !B->isAssociative()) 4371 return false; 4372 4373 // Post order traverse the reduction tree starting at B. We only handle true 4374 // trees containing only binary operators or selects. 4375 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; 4376 Stack.push_back(std::make_pair(B, 0)); 4377 while (!Stack.empty()) { 4378 Instruction *TreeN = Stack.back().first; 4379 unsigned EdgeToVist = Stack.back().second++; 4380 bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode; 4381 4382 // Postorder vist. 4383 if (EdgeToVist == 2 || IsReducedValue) { 4384 if (IsReducedValue) 4385 ReducedVals.push_back(TreeN); 4386 else { 4387 auto I = ExtraArgs.find(TreeN); 4388 if (I != ExtraArgs.end() && !I->second) { 4389 // Check if TreeN is an extra argument of its parent operation. 4390 if (Stack.size() <= 1) { 4391 // TreeN can't be an extra argument as it is a root reduction 4392 // operation. 4393 return false; 4394 } 4395 // Yes, TreeN is an extra argument, do not add it to a list of 4396 // reduction operations. 4397 // Stack[Stack.size() - 2] always points to the parent operation. 4398 markExtraArg(Stack[Stack.size() - 2], TreeN); 4399 ExtraArgs.erase(TreeN); 4400 } else 4401 ReductionOps.push_back(TreeN); 4402 } 4403 // Retract. 4404 Stack.pop_back(); 4405 continue; 4406 } 4407 4408 // Visit left or right. 4409 Value *NextV = TreeN->getOperand(EdgeToVist); 4410 if (NextV != Phi) { 4411 auto *I = dyn_cast<Instruction>(NextV); 4412 // Continue analysis if the next operand is a reduction operation or 4413 // (possibly) a reduced value. If the reduced value opcode is not set, 4414 // the first met operation != reduction operation is considered as the 4415 // reduced value class. 4416 if (I && (!ReducedValueOpcode || I->getOpcode() == ReducedValueOpcode || 4417 I->getOpcode() == ReductionOpcode)) { 4418 // Only handle trees in the current basic block. 4419 if (I->getParent() != B->getParent()) { 4420 // I is an extra argument for TreeN (its parent operation). 4421 markExtraArg(Stack.back(), I); 4422 continue; 4423 } 4424 4425 // Each tree node needs to have one user except for the ultimate 4426 // reduction. 4427 if (!I->hasOneUse() && I != B) { 4428 // I is an extra argument for TreeN (its parent operation). 4429 markExtraArg(Stack.back(), I); 4430 continue; 4431 } 4432 4433 if (I->getOpcode() == ReductionOpcode) { 4434 // We need to be able to reassociate the reduction operations. 4435 if (!I->isAssociative()) { 4436 // I is an extra argument for TreeN (its parent operation). 4437 markExtraArg(Stack.back(), I); 4438 continue; 4439 } 4440 } else if (ReducedValueOpcode && 4441 ReducedValueOpcode != I->getOpcode()) { 4442 // Make sure that the opcodes of the operations that we are going to 4443 // reduce match. 4444 // I is an extra argument for TreeN (its parent operation). 4445 markExtraArg(Stack.back(), I); 4446 continue; 4447 } else if (!ReducedValueOpcode) 4448 ReducedValueOpcode = I->getOpcode(); 4449 4450 Stack.push_back(std::make_pair(I, 0)); 4451 continue; 4452 } 4453 } 4454 // NextV is an extra argument for TreeN (its parent operation). 4455 markExtraArg(Stack.back(), NextV); 4456 } 4457 return true; 4458 } 4459 4460 /// \brief Attempt to vectorize the tree found by 4461 /// matchAssociativeReduction. 4462 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 4463 if (ReducedVals.empty()) 4464 return false; 4465 4466 // If there is a sufficient number of reduction values, reduce 4467 // to a nearby power-of-2. Can safely generate oversized 4468 // vectors and rely on the backend to split them to legal sizes. 4469 unsigned NumReducedVals = ReducedVals.size(); 4470 if (NumReducedVals < 4) 4471 return false; 4472 4473 unsigned ReduxWidth = PowerOf2Floor(NumReducedVals); 4474 4475 Value *VectorizedTree = nullptr; 4476 IRBuilder<> Builder(ReductionRoot); 4477 FastMathFlags Unsafe; 4478 Unsafe.setUnsafeAlgebra(); 4479 Builder.setFastMathFlags(Unsafe); 4480 unsigned i = 0; 4481 4482 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 4483 // The same extra argument may be used several time, so log each attempt 4484 // to use it. 4485 for (auto &Pair : ExtraArgs) 4486 ExternallyUsedValues[Pair.second].push_back(Pair.first); 4487 while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) { 4488 auto VL = makeArrayRef(&ReducedVals[i], ReduxWidth); 4489 V.buildTree(VL, ExternallyUsedValues, ReductionOps); 4490 if (V.shouldReorder()) { 4491 SmallVector<Value *, 8> Reversed(VL.rbegin(), VL.rend()); 4492 V.buildTree(Reversed, ExternallyUsedValues, ReductionOps); 4493 } 4494 if (V.isTreeTinyAndNotFullyVectorizable()) 4495 break; 4496 4497 V.computeMinimumValueSizes(); 4498 4499 // Estimate cost. 4500 int Cost = 4501 V.getTreeCost() + getReductionCost(TTI, ReducedVals[i], ReduxWidth); 4502 if (Cost >= -SLPCostThreshold) 4503 break; 4504 4505 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost 4506 << ". (HorRdx)\n"); 4507 4508 // Vectorize a tree. 4509 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 4510 Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues); 4511 4512 // Emit a reduction. 4513 Value *ReducedSubTree = 4514 emitReduction(VectorizedRoot, Builder, ReduxWidth, ReductionOps); 4515 if (VectorizedTree) { 4516 Builder.SetCurrentDebugLocation(Loc); 4517 VectorizedTree = Builder.CreateBinOp(ReductionOpcode, VectorizedTree, 4518 ReducedSubTree, "bin.rdx"); 4519 propagateIRFlags(VectorizedTree, ReductionOps); 4520 } else 4521 VectorizedTree = ReducedSubTree; 4522 i += ReduxWidth; 4523 ReduxWidth = PowerOf2Floor(NumReducedVals - i); 4524 } 4525 4526 if (VectorizedTree) { 4527 // Finish the reduction. 4528 for (; i < NumReducedVals; ++i) { 4529 auto *I = cast<Instruction>(ReducedVals[i]); 4530 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 4531 VectorizedTree = 4532 Builder.CreateBinOp(ReductionOpcode, VectorizedTree, I); 4533 propagateIRFlags(VectorizedTree, ReductionOps); 4534 } 4535 for (auto &Pair : ExternallyUsedValues) { 4536 assert(!Pair.second.empty() && 4537 "At least one DebugLoc must be inserted"); 4538 // Add each externally used value to the final reduction. 4539 for (auto *I : Pair.second) { 4540 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 4541 VectorizedTree = Builder.CreateBinOp(ReductionOpcode, VectorizedTree, 4542 Pair.first, "bin.extra"); 4543 propagateIRFlags(VectorizedTree, I); 4544 } 4545 } 4546 // Update users. 4547 ReductionRoot->replaceAllUsesWith(VectorizedTree); 4548 } 4549 return VectorizedTree != nullptr; 4550 } 4551 4552 unsigned numReductionValues() const { 4553 return ReducedVals.size(); 4554 } 4555 4556 private: 4557 /// \brief Calculate the cost of a reduction. 4558 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal, 4559 unsigned ReduxWidth) { 4560 Type *ScalarTy = FirstReducedVal->getType(); 4561 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 4562 4563 int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true); 4564 int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false); 4565 4566 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 4567 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 4568 4569 int ScalarReduxCost = 4570 (ReduxWidth - 1) * 4571 TTI->getArithmeticInstrCost(ReductionOpcode, ScalarTy); 4572 4573 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 4574 << " for reduction that starts with " << *FirstReducedVal 4575 << " (It is a " 4576 << (IsPairwiseReduction ? "pairwise" : "splitting") 4577 << " reduction)\n"); 4578 4579 return VecReduxCost - ScalarReduxCost; 4580 } 4581 4582 /// \brief Emit a horizontal reduction of the vectorized value. 4583 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 4584 unsigned ReduxWidth, ArrayRef<Value *> RedOps) { 4585 assert(VectorizedValue && "Need to have a vectorized tree node"); 4586 assert(isPowerOf2_32(ReduxWidth) && 4587 "We only handle power-of-two reductions for now"); 4588 4589 Value *TmpVec = VectorizedValue; 4590 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 4591 if (IsPairwiseReduction) { 4592 Value *LeftMask = 4593 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 4594 Value *RightMask = 4595 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 4596 4597 Value *LeftShuf = Builder.CreateShuffleVector( 4598 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 4599 Value *RightShuf = Builder.CreateShuffleVector( 4600 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 4601 "rdx.shuf.r"); 4602 TmpVec = Builder.CreateBinOp(ReductionOpcode, LeftShuf, RightShuf, 4603 "bin.rdx"); 4604 } else { 4605 Value *UpperHalf = 4606 createRdxShuffleMask(ReduxWidth, i, false, false, Builder); 4607 Value *Shuf = Builder.CreateShuffleVector( 4608 TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf"); 4609 TmpVec = Builder.CreateBinOp(ReductionOpcode, TmpVec, Shuf, "bin.rdx"); 4610 } 4611 propagateIRFlags(TmpVec, RedOps); 4612 } 4613 4614 // The result is in the first element of the vector. 4615 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 4616 } 4617 }; 4618 } // end anonymous namespace 4619 4620 /// \brief Recognize construction of vectors like 4621 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 4622 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 4623 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 4624 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 4625 /// 4626 /// Returns true if it matches 4627 /// 4628 static bool findBuildVector(InsertElementInst *FirstInsertElem, 4629 SmallVectorImpl<Value *> &BuildVector, 4630 SmallVectorImpl<Value *> &BuildVectorOpds) { 4631 if (!isa<UndefValue>(FirstInsertElem->getOperand(0))) 4632 return false; 4633 4634 InsertElementInst *IE = FirstInsertElem; 4635 while (true) { 4636 BuildVector.push_back(IE); 4637 BuildVectorOpds.push_back(IE->getOperand(1)); 4638 4639 if (IE->use_empty()) 4640 return false; 4641 4642 InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back()); 4643 if (!NextUse) 4644 return true; 4645 4646 // If this isn't the final use, make sure the next insertelement is the only 4647 // use. It's OK if the final constructed vector is used multiple times 4648 if (!IE->hasOneUse()) 4649 return false; 4650 4651 IE = NextUse; 4652 } 4653 4654 return false; 4655 } 4656 4657 /// \brief Like findBuildVector, but looks backwards for construction of aggregate. 4658 /// 4659 /// \return true if it matches. 4660 static bool findBuildAggregate(InsertValueInst *IV, 4661 SmallVectorImpl<Value *> &BuildVector, 4662 SmallVectorImpl<Value *> &BuildVectorOpds) { 4663 Value *V; 4664 do { 4665 BuildVector.push_back(IV); 4666 BuildVectorOpds.push_back(IV->getInsertedValueOperand()); 4667 V = IV->getAggregateOperand(); 4668 if (isa<UndefValue>(V)) 4669 break; 4670 IV = dyn_cast<InsertValueInst>(V); 4671 if (!IV || !IV->hasOneUse()) 4672 return false; 4673 } while (true); 4674 std::reverse(BuildVector.begin(), BuildVector.end()); 4675 std::reverse(BuildVectorOpds.begin(), BuildVectorOpds.end()); 4676 return true; 4677 } 4678 4679 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 4680 return V->getType() < V2->getType(); 4681 } 4682 4683 /// \brief Try and get a reduction value from a phi node. 4684 /// 4685 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 4686 /// if they come from either \p ParentBB or a containing loop latch. 4687 /// 4688 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 4689 /// if not possible. 4690 static Value *getReductionValue(const DominatorTree *DT, PHINode *P, 4691 BasicBlock *ParentBB, LoopInfo *LI) { 4692 // There are situations where the reduction value is not dominated by the 4693 // reduction phi. Vectorizing such cases has been reported to cause 4694 // miscompiles. See PR25787. 4695 auto DominatedReduxValue = [&](Value *R) { 4696 return ( 4697 dyn_cast<Instruction>(R) && 4698 DT->dominates(P->getParent(), dyn_cast<Instruction>(R)->getParent())); 4699 }; 4700 4701 Value *Rdx = nullptr; 4702 4703 // Return the incoming value if it comes from the same BB as the phi node. 4704 if (P->getIncomingBlock(0) == ParentBB) { 4705 Rdx = P->getIncomingValue(0); 4706 } else if (P->getIncomingBlock(1) == ParentBB) { 4707 Rdx = P->getIncomingValue(1); 4708 } 4709 4710 if (Rdx && DominatedReduxValue(Rdx)) 4711 return Rdx; 4712 4713 // Otherwise, check whether we have a loop latch to look at. 4714 Loop *BBL = LI->getLoopFor(ParentBB); 4715 if (!BBL) 4716 return nullptr; 4717 BasicBlock *BBLatch = BBL->getLoopLatch(); 4718 if (!BBLatch) 4719 return nullptr; 4720 4721 // There is a loop latch, return the incoming value if it comes from 4722 // that. This reduction pattern occasionally turns up. 4723 if (P->getIncomingBlock(0) == BBLatch) { 4724 Rdx = P->getIncomingValue(0); 4725 } else if (P->getIncomingBlock(1) == BBLatch) { 4726 Rdx = P->getIncomingValue(1); 4727 } 4728 4729 if (Rdx && DominatedReduxValue(Rdx)) 4730 return Rdx; 4731 4732 return nullptr; 4733 } 4734 4735 namespace { 4736 /// Tracks instructons and its children. 4737 class WeakVHWithLevel final : public CallbackVH { 4738 /// Operand index of the instruction currently beeing analized. 4739 unsigned Level = 0; 4740 /// Is this the instruction that should be vectorized, or are we now 4741 /// processing children (i.e. operands of this instruction) for potential 4742 /// vectorization? 4743 bool IsInitial = true; 4744 4745 public: 4746 explicit WeakVHWithLevel() = default; 4747 WeakVHWithLevel(Value *V) : CallbackVH(V){}; 4748 /// Restart children analysis each time it is repaced by the new instruction. 4749 void allUsesReplacedWith(Value *New) override { 4750 setValPtr(New); 4751 Level = 0; 4752 IsInitial = true; 4753 } 4754 /// Check if the instruction was not deleted during vectorization. 4755 bool isValid() const { return !getValPtr(); } 4756 /// Is the istruction itself must be vectorized? 4757 bool isInitial() const { return IsInitial; } 4758 /// Try to vectorize children. 4759 void clearInitial() { IsInitial = false; } 4760 /// Are all children processed already? 4761 bool isFinal() const { 4762 assert(getValPtr() && 4763 (isa<Instruction>(getValPtr()) && 4764 cast<Instruction>(getValPtr())->getNumOperands() >= Level)); 4765 return getValPtr() && 4766 cast<Instruction>(getValPtr())->getNumOperands() == Level; 4767 } 4768 /// Get next child operation. 4769 Value *nextOperand() { 4770 assert(getValPtr() && isa<Instruction>(getValPtr()) && 4771 cast<Instruction>(getValPtr())->getNumOperands() > Level); 4772 return cast<Instruction>(getValPtr())->getOperand(Level++); 4773 } 4774 virtual ~WeakVHWithLevel() = default; 4775 }; 4776 } // namespace 4777 4778 /// \brief Attempt to reduce a horizontal reduction. 4779 /// If it is legal to match a horizontal reduction feeding 4780 /// the phi node P with reduction operators Root in a basic block BB, then check 4781 /// if it can be done. 4782 /// \returns true if a horizontal reduction was matched and reduced. 4783 /// \returns false if a horizontal reduction was not matched. 4784 static bool canBeVectorized( 4785 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, 4786 TargetTransformInfo *TTI, 4787 const function_ref<bool(BinaryOperator *, BoUpSLP &)> Vectorize) { 4788 if (!ShouldVectorizeHor) 4789 return false; 4790 4791 if (!Root) 4792 return false; 4793 4794 if (Root->getParent() != BB) 4795 return false; 4796 SmallVector<WeakVHWithLevel, 8> Stack(1, Root); 4797 SmallSet<Value *, 8> VisitedInstrs; 4798 bool Res = false; 4799 while (!Stack.empty()) { 4800 Value *V = Stack.back(); 4801 if (!V) { 4802 Stack.pop_back(); 4803 continue; 4804 } 4805 auto *Inst = dyn_cast<Instruction>(V); 4806 if (!Inst || isa<PHINode>(Inst)) { 4807 Stack.pop_back(); 4808 continue; 4809 } 4810 if (Stack.back().isInitial()) { 4811 Stack.back().clearInitial(); 4812 if (auto *BI = dyn_cast<BinaryOperator>(Inst)) { 4813 HorizontalReduction HorRdx; 4814 if (HorRdx.matchAssociativeReduction(P, BI)) { 4815 if (HorRdx.tryToReduce(R, TTI)) { 4816 Res = true; 4817 P = nullptr; 4818 continue; 4819 } 4820 } 4821 if (P) { 4822 Inst = dyn_cast<Instruction>(BI->getOperand(0)); 4823 if (Inst == P) 4824 Inst = dyn_cast<Instruction>(BI->getOperand(1)); 4825 if (!Inst) { 4826 P = nullptr; 4827 continue; 4828 } 4829 } 4830 } 4831 P = nullptr; 4832 if (Vectorize(dyn_cast<BinaryOperator>(Inst), R)) { 4833 Res = true; 4834 continue; 4835 } 4836 } 4837 if (Stack.back().isFinal()) { 4838 Stack.pop_back(); 4839 continue; 4840 } 4841 4842 if (auto *NextV = dyn_cast<Instruction>(Stack.back().nextOperand())) 4843 if (NextV->getParent() == BB && VisitedInstrs.insert(NextV).second && 4844 Stack.size() < RecursionMaxDepth) 4845 Stack.push_back(NextV); 4846 } 4847 return Res; 4848 } 4849 4850 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V, 4851 BasicBlock *BB, BoUpSLP &R, 4852 TargetTransformInfo *TTI) { 4853 if (!V) 4854 return false; 4855 auto *I = dyn_cast<Instruction>(V); 4856 if (!I) 4857 return false; 4858 4859 if (!isa<BinaryOperator>(I)) 4860 P = nullptr; 4861 // Try to match and vectorize a horizontal reduction. 4862 return canBeVectorized(P, I, BB, R, TTI, 4863 [this](BinaryOperator *BI, BoUpSLP &R) -> bool { 4864 return tryToVectorize(BI, R); 4865 }); 4866 } 4867 4868 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 4869 bool Changed = false; 4870 SmallVector<Value *, 4> Incoming; 4871 SmallSet<Value *, 16> VisitedInstrs; 4872 4873 bool HaveVectorizedPhiNodes = true; 4874 while (HaveVectorizedPhiNodes) { 4875 HaveVectorizedPhiNodes = false; 4876 4877 // Collect the incoming values from the PHIs. 4878 Incoming.clear(); 4879 for (Instruction &I : *BB) { 4880 PHINode *P = dyn_cast<PHINode>(&I); 4881 if (!P) 4882 break; 4883 4884 if (!VisitedInstrs.count(P)) 4885 Incoming.push_back(P); 4886 } 4887 4888 // Sort by type. 4889 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); 4890 4891 // Try to vectorize elements base on their type. 4892 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 4893 E = Incoming.end(); 4894 IncIt != E;) { 4895 4896 // Look for the next elements with the same type. 4897 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 4898 while (SameTypeIt != E && 4899 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 4900 VisitedInstrs.insert(*SameTypeIt); 4901 ++SameTypeIt; 4902 } 4903 4904 // Try to vectorize them. 4905 unsigned NumElts = (SameTypeIt - IncIt); 4906 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); 4907 // The order in which the phi nodes appear in the program does not matter. 4908 // So allow tryToVectorizeList to reorder them if it is beneficial. This 4909 // is done when there are exactly two elements since tryToVectorizeList 4910 // asserts that there are only two values when AllowReorder is true. 4911 bool AllowReorder = NumElts == 2; 4912 if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R, 4913 None, AllowReorder)) { 4914 // Success start over because instructions might have been changed. 4915 HaveVectorizedPhiNodes = true; 4916 Changed = true; 4917 break; 4918 } 4919 4920 // Start over at the next instruction of a different type (or the end). 4921 IncIt = SameTypeIt; 4922 } 4923 } 4924 4925 VisitedInstrs.clear(); 4926 4927 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 4928 // We may go through BB multiple times so skip the one we have checked. 4929 if (!VisitedInstrs.insert(&*it).second) 4930 continue; 4931 4932 if (isa<DbgInfoIntrinsic>(it)) 4933 continue; 4934 4935 // Try to vectorize reductions that use PHINodes. 4936 if (PHINode *P = dyn_cast<PHINode>(it)) { 4937 // Check that the PHI is a reduction PHI. 4938 if (P->getNumIncomingValues() != 2) 4939 return Changed; 4940 4941 // Try to match and vectorize a horizontal reduction. 4942 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R, 4943 TTI)) { 4944 Changed = true; 4945 it = BB->begin(); 4946 e = BB->end(); 4947 continue; 4948 } 4949 continue; 4950 } 4951 4952 if (ShouldStartVectorizeHorAtStore) { 4953 if (StoreInst *SI = dyn_cast<StoreInst>(it)) { 4954 // Try to match and vectorize a horizontal reduction. 4955 if (vectorizeRootInstruction(nullptr, SI->getValueOperand(), BB, R, 4956 TTI)) { 4957 Changed = true; 4958 it = BB->begin(); 4959 e = BB->end(); 4960 continue; 4961 } 4962 } 4963 } 4964 4965 // Try to vectorize horizontal reductions feeding into a return. 4966 if (ReturnInst *RI = dyn_cast<ReturnInst>(it)) { 4967 if (RI->getNumOperands() != 0) { 4968 // Try to match and vectorize a horizontal reduction. 4969 if (vectorizeRootInstruction(nullptr, RI->getOperand(0), BB, R, TTI)) { 4970 Changed = true; 4971 it = BB->begin(); 4972 e = BB->end(); 4973 continue; 4974 } 4975 } 4976 } 4977 4978 // Try to vectorize trees that start at compare instructions. 4979 if (CmpInst *CI = dyn_cast<CmpInst>(it)) { 4980 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) { 4981 Changed = true; 4982 // We would like to start over since some instructions are deleted 4983 // and the iterator may become invalid value. 4984 it = BB->begin(); 4985 e = BB->end(); 4986 continue; 4987 } 4988 4989 for (int I = 0; I < 2; ++I) { 4990 if (vectorizeRootInstruction(nullptr, CI->getOperand(I), BB, R, TTI)) { 4991 Changed = true; 4992 // We would like to start over since some instructions are deleted 4993 // and the iterator may become invalid value. 4994 it = BB->begin(); 4995 e = BB->end(); 4996 break; 4997 } 4998 } 4999 continue; 5000 } 5001 5002 // Try to vectorize trees that start at insertelement instructions. 5003 if (InsertElementInst *FirstInsertElem = dyn_cast<InsertElementInst>(it)) { 5004 SmallVector<Value *, 16> BuildVector; 5005 SmallVector<Value *, 16> BuildVectorOpds; 5006 if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds)) 5007 continue; 5008 5009 // Vectorize starting with the build vector operands ignoring the 5010 // BuildVector instructions for the purpose of scheduling and user 5011 // extraction. 5012 if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) { 5013 Changed = true; 5014 it = BB->begin(); 5015 e = BB->end(); 5016 } 5017 5018 continue; 5019 } 5020 5021 // Try to vectorize trees that start at insertvalue instructions feeding into 5022 // a store. 5023 if (StoreInst *SI = dyn_cast<StoreInst>(it)) { 5024 if (InsertValueInst *LastInsertValue = dyn_cast<InsertValueInst>(SI->getValueOperand())) { 5025 const DataLayout &DL = BB->getModule()->getDataLayout(); 5026 if (R.canMapToVector(SI->getValueOperand()->getType(), DL)) { 5027 SmallVector<Value *, 16> BuildVector; 5028 SmallVector<Value *, 16> BuildVectorOpds; 5029 if (!findBuildAggregate(LastInsertValue, BuildVector, BuildVectorOpds)) 5030 continue; 5031 5032 DEBUG(dbgs() << "SLP: store of array mappable to vector: " << *SI << "\n"); 5033 if (tryToVectorizeList(BuildVectorOpds, R, BuildVector, false)) { 5034 Changed = true; 5035 it = BB->begin(); 5036 e = BB->end(); 5037 } 5038 continue; 5039 } 5040 } 5041 } 5042 } 5043 5044 return Changed; 5045 } 5046 5047 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 5048 auto Changed = false; 5049 for (auto &Entry : GEPs) { 5050 5051 // If the getelementptr list has fewer than two elements, there's nothing 5052 // to do. 5053 if (Entry.second.size() < 2) 5054 continue; 5055 5056 DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 5057 << Entry.second.size() << ".\n"); 5058 5059 // We process the getelementptr list in chunks of 16 (like we do for 5060 // stores) to minimize compile-time. 5061 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += 16) { 5062 auto Len = std::min<unsigned>(BE - BI, 16); 5063 auto GEPList = makeArrayRef(&Entry.second[BI], Len); 5064 5065 // Initialize a set a candidate getelementptrs. Note that we use a 5066 // SetVector here to preserve program order. If the index computations 5067 // are vectorizable and begin with loads, we want to minimize the chance 5068 // of having to reorder them later. 5069 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 5070 5071 // Some of the candidates may have already been vectorized after we 5072 // initially collected them. If so, the WeakVHs will have nullified the 5073 // values, so remove them from the set of candidates. 5074 Candidates.remove(nullptr); 5075 5076 // Remove from the set of candidates all pairs of getelementptrs with 5077 // constant differences. Such getelementptrs are likely not good 5078 // candidates for vectorization in a bottom-up phase since one can be 5079 // computed from the other. We also ensure all candidate getelementptr 5080 // indices are unique. 5081 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 5082 auto *GEPI = cast<GetElementPtrInst>(GEPList[I]); 5083 if (!Candidates.count(GEPI)) 5084 continue; 5085 auto *SCEVI = SE->getSCEV(GEPList[I]); 5086 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 5087 auto *GEPJ = cast<GetElementPtrInst>(GEPList[J]); 5088 auto *SCEVJ = SE->getSCEV(GEPList[J]); 5089 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 5090 Candidates.remove(GEPList[I]); 5091 Candidates.remove(GEPList[J]); 5092 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 5093 Candidates.remove(GEPList[J]); 5094 } 5095 } 5096 } 5097 5098 // We break out of the above computation as soon as we know there are 5099 // fewer than two candidates remaining. 5100 if (Candidates.size() < 2) 5101 continue; 5102 5103 // Add the single, non-constant index of each candidate to the bundle. We 5104 // ensured the indices met these constraints when we originally collected 5105 // the getelementptrs. 5106 SmallVector<Value *, 16> Bundle(Candidates.size()); 5107 auto BundleIndex = 0u; 5108 for (auto *V : Candidates) { 5109 auto *GEP = cast<GetElementPtrInst>(V); 5110 auto *GEPIdx = GEP->idx_begin()->get(); 5111 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 5112 Bundle[BundleIndex++] = GEPIdx; 5113 } 5114 5115 // Try and vectorize the indices. We are currently only interested in 5116 // gather-like cases of the form: 5117 // 5118 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 5119 // 5120 // where the loads of "a", the loads of "b", and the subtractions can be 5121 // performed in parallel. It's likely that detecting this pattern in a 5122 // bottom-up phase will be simpler and less costly than building a 5123 // full-blown top-down phase beginning at the consecutive loads. 5124 Changed |= tryToVectorizeList(Bundle, R); 5125 } 5126 } 5127 return Changed; 5128 } 5129 5130 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 5131 bool Changed = false; 5132 // Attempt to sort and vectorize each of the store-groups. 5133 for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e; 5134 ++it) { 5135 if (it->second.size() < 2) 5136 continue; 5137 5138 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 5139 << it->second.size() << ".\n"); 5140 5141 // Process the stores in chunks of 16. 5142 // TODO: The limit of 16 inhibits greater vectorization factors. 5143 // For example, AVX2 supports v32i8. Increasing this limit, however, 5144 // may cause a significant compile-time increase. 5145 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 5146 unsigned Len = std::min<unsigned>(CE - CI, 16); 5147 Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len), R); 5148 } 5149 } 5150 return Changed; 5151 } 5152 5153 char SLPVectorizer::ID = 0; 5154 static const char lv_name[] = "SLP Vectorizer"; 5155 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 5156 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 5157 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 5158 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 5159 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 5160 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 5161 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 5162 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 5163 5164 namespace llvm { 5165 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); } 5166 } 5167