1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 19 #include "llvm/ADT/Optional.h" 20 #include "llvm/ADT/PostOrderIterator.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/CodeMetrics.h" 24 #include "llvm/Analysis/GlobalsModRef.h" 25 #include "llvm/Analysis/LoopAccessAnalysis.h" 26 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/Analysis/VectorUtils.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/Dominators.h" 31 #include "llvm/IR/IRBuilder.h" 32 #include "llvm/IR/Instructions.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/Module.h" 35 #include "llvm/IR/NoFolder.h" 36 #include "llvm/IR/Type.h" 37 #include "llvm/IR/Value.h" 38 #include "llvm/IR/Verifier.h" 39 #include "llvm/Pass.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/GraphWriter.h" 43 #include "llvm/Support/KnownBits.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include "llvm/Transforms/Utils/LoopUtils.h" 46 #include "llvm/Transforms/Vectorize.h" 47 #include <algorithm> 48 #include <memory> 49 50 using namespace llvm; 51 using namespace slpvectorizer; 52 53 #define SV_NAME "slp-vectorizer" 54 #define DEBUG_TYPE "SLP" 55 56 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 57 58 static cl::opt<int> 59 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 60 cl::desc("Only vectorize if you gain more than this " 61 "number ")); 62 63 static cl::opt<bool> 64 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 65 cl::desc("Attempt to vectorize horizontal reductions")); 66 67 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 68 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 69 cl::desc( 70 "Attempt to vectorize horizontal reductions feeding into a store")); 71 72 static cl::opt<int> 73 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 74 cl::desc("Attempt to vectorize for this register size in bits")); 75 76 /// Limits the size of scheduling regions in a block. 77 /// It avoid long compile times for _very_ large blocks where vector 78 /// instructions are spread over a wide range. 79 /// This limit is way higher than needed by real-world functions. 80 static cl::opt<int> 81 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 82 cl::desc("Limit the size of the SLP scheduling region per block")); 83 84 static cl::opt<int> MinVectorRegSizeOption( 85 "slp-min-reg-size", cl::init(128), cl::Hidden, 86 cl::desc("Attempt to vectorize for this register size in bits")); 87 88 static cl::opt<unsigned> RecursionMaxDepth( 89 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 90 cl::desc("Limit the recursion depth when building a vectorizable tree")); 91 92 static cl::opt<unsigned> MinTreeSize( 93 "slp-min-tree-size", cl::init(3), cl::Hidden, 94 cl::desc("Only vectorize small trees if they are fully vectorizable")); 95 96 static cl::opt<bool> 97 ViewSLPTree("view-slp-tree", cl::Hidden, 98 cl::desc("Display the SLP trees with Graphviz")); 99 100 // Limit the number of alias checks. The limit is chosen so that 101 // it has no negative effect on the llvm benchmarks. 102 static const unsigned AliasedCheckLimit = 10; 103 104 // Another limit for the alias checks: The maximum distance between load/store 105 // instructions where alias checks are done. 106 // This limit is useful for very large basic blocks. 107 static const unsigned MaxMemDepDistance = 160; 108 109 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 110 /// regions to be handled. 111 static const int MinScheduleRegionSize = 16; 112 113 /// \brief Predicate for the element types that the SLP vectorizer supports. 114 /// 115 /// The most important thing to filter here are types which are invalid in LLVM 116 /// vectors. We also filter target specific types which have absolutely no 117 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 118 /// avoids spending time checking the cost model and realizing that they will 119 /// be inevitably scalarized. 120 static bool isValidElementType(Type *Ty) { 121 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 122 !Ty->isPPC_FP128Ty(); 123 } 124 125 /// \returns true if all of the instructions in \p VL are in the same block or 126 /// false otherwise. 127 static bool allSameBlock(ArrayRef<Value *> VL) { 128 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 129 if (!I0) 130 return false; 131 BasicBlock *BB = I0->getParent(); 132 for (int i = 1, e = VL.size(); i < e; i++) { 133 Instruction *I = dyn_cast<Instruction>(VL[i]); 134 if (!I) 135 return false; 136 137 if (BB != I->getParent()) 138 return false; 139 } 140 return true; 141 } 142 143 /// \returns True if all of the values in \p VL are constants. 144 static bool allConstant(ArrayRef<Value *> VL) { 145 for (Value *i : VL) 146 if (!isa<Constant>(i)) 147 return false; 148 return true; 149 } 150 151 /// \returns True if all of the values in \p VL are identical. 152 static bool isSplat(ArrayRef<Value *> VL) { 153 for (unsigned i = 1, e = VL.size(); i < e; ++i) 154 if (VL[i] != VL[0]) 155 return false; 156 return true; 157 } 158 159 ///\returns Opcode that can be clubbed with \p Op to create an alternate 160 /// sequence which can later be merged as a ShuffleVector instruction. 161 static unsigned getAltOpcode(unsigned Op) { 162 switch (Op) { 163 case Instruction::FAdd: 164 return Instruction::FSub; 165 case Instruction::FSub: 166 return Instruction::FAdd; 167 case Instruction::Add: 168 return Instruction::Sub; 169 case Instruction::Sub: 170 return Instruction::Add; 171 default: 172 return 0; 173 } 174 } 175 176 /// true if the \p Value is odd, false otherwise. 177 static bool isOdd(unsigned Value) { 178 return Value & 1; 179 } 180 181 ///\returns bool representing if Opcode \p Op can be part 182 /// of an alternate sequence which can later be merged as 183 /// a ShuffleVector instruction. 184 static bool canCombineAsAltInst(unsigned Op) { 185 return Op == Instruction::FAdd || Op == Instruction::FSub || 186 Op == Instruction::Sub || Op == Instruction::Add; 187 } 188 189 /// \returns ShuffleVector instruction if instructions in \p VL have 190 /// alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence. 191 /// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...) 192 static unsigned isAltInst(ArrayRef<Value *> VL) { 193 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 194 unsigned Opcode = I0->getOpcode(); 195 unsigned AltOpcode = getAltOpcode(Opcode); 196 for (int i = 1, e = VL.size(); i < e; i++) { 197 Instruction *I = dyn_cast<Instruction>(VL[i]); 198 if (!I || I->getOpcode() != (isOdd(i) ? AltOpcode : Opcode)) 199 return 0; 200 } 201 return Instruction::ShuffleVector; 202 } 203 204 /// \returns The opcode if all of the Instructions in \p VL have the same 205 /// opcode, or zero. 206 static unsigned getSameOpcode(ArrayRef<Value *> VL) { 207 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 208 if (!I0) 209 return 0; 210 unsigned Opcode = I0->getOpcode(); 211 for (int i = 1, e = VL.size(); i < e; i++) { 212 Instruction *I = dyn_cast<Instruction>(VL[i]); 213 if (!I || Opcode != I->getOpcode()) { 214 if (canCombineAsAltInst(Opcode) && i == 1) 215 return isAltInst(VL); 216 return 0; 217 } 218 } 219 return Opcode; 220 } 221 222 /// \returns true if all of the values in \p VL have the same type or false 223 /// otherwise. 224 static bool allSameType(ArrayRef<Value *> VL) { 225 Type *Ty = VL[0]->getType(); 226 for (int i = 1, e = VL.size(); i < e; i++) 227 if (VL[i]->getType() != Ty) 228 return false; 229 230 return true; 231 } 232 233 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 234 static bool matchExtractIndex(Instruction *E, unsigned Idx, unsigned Opcode) { 235 assert(Opcode == Instruction::ExtractElement || 236 Opcode == Instruction::ExtractValue); 237 if (Opcode == Instruction::ExtractElement) { 238 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 239 return CI && CI->getZExtValue() == Idx; 240 } else { 241 ExtractValueInst *EI = cast<ExtractValueInst>(E); 242 return EI->getNumIndices() == 1 && *EI->idx_begin() == Idx; 243 } 244 } 245 246 /// \returns True if in-tree use also needs extract. This refers to 247 /// possible scalar operand in vectorized instruction. 248 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 249 TargetLibraryInfo *TLI) { 250 251 unsigned Opcode = UserInst->getOpcode(); 252 switch (Opcode) { 253 case Instruction::Load: { 254 LoadInst *LI = cast<LoadInst>(UserInst); 255 return (LI->getPointerOperand() == Scalar); 256 } 257 case Instruction::Store: { 258 StoreInst *SI = cast<StoreInst>(UserInst); 259 return (SI->getPointerOperand() == Scalar); 260 } 261 case Instruction::Call: { 262 CallInst *CI = cast<CallInst>(UserInst); 263 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 264 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 265 return (CI->getArgOperand(1) == Scalar); 266 } 267 LLVM_FALLTHROUGH; 268 } 269 default: 270 return false; 271 } 272 } 273 274 /// \returns the AA location that is being access by the instruction. 275 static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) { 276 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 277 return MemoryLocation::get(SI); 278 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 279 return MemoryLocation::get(LI); 280 return MemoryLocation(); 281 } 282 283 /// \returns True if the instruction is not a volatile or atomic load/store. 284 static bool isSimple(Instruction *I) { 285 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 286 return LI->isSimple(); 287 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 288 return SI->isSimple(); 289 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 290 return !MI->isVolatile(); 291 return true; 292 } 293 294 namespace llvm { 295 namespace slpvectorizer { 296 /// Bottom Up SLP Vectorizer. 297 class BoUpSLP { 298 public: 299 typedef SmallVector<Value *, 8> ValueList; 300 typedef SmallVector<Instruction *, 16> InstrList; 301 typedef SmallPtrSet<Value *, 16> ValueSet; 302 typedef SmallVector<StoreInst *, 8> StoreList; 303 typedef MapVector<Value *, SmallVector<Instruction *, 2>> 304 ExtraValueToDebugLocsMap; 305 306 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 307 TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li, 308 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 309 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 310 : NumLoadsWantToKeepOrder(0), NumLoadsWantToChangeOrder(0), F(Func), 311 SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), DB(DB), 312 DL(DL), ORE(ORE), Builder(Se->getContext()) { 313 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 314 // Use the vector register size specified by the target unless overridden 315 // by a command-line option. 316 // TODO: It would be better to limit the vectorization factor based on 317 // data type rather than just register size. For example, x86 AVX has 318 // 256-bit registers, but it does not support integer operations 319 // at that width (that requires AVX2). 320 if (MaxVectorRegSizeOption.getNumOccurrences()) 321 MaxVecRegSize = MaxVectorRegSizeOption; 322 else 323 MaxVecRegSize = TTI->getRegisterBitWidth(true); 324 325 if (MinVectorRegSizeOption.getNumOccurrences()) 326 MinVecRegSize = MinVectorRegSizeOption; 327 else 328 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 329 } 330 331 /// \brief Vectorize the tree that starts with the elements in \p VL. 332 /// Returns the vectorized root. 333 Value *vectorizeTree(); 334 /// Vectorize the tree but with the list of externally used values \p 335 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 336 /// generated extractvalue instructions. 337 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues); 338 339 /// \returns the cost incurred by unwanted spills and fills, caused by 340 /// holding live values over call sites. 341 int getSpillCost(); 342 343 /// \returns the vectorization cost of the subtree that starts at \p VL. 344 /// A negative number means that this is profitable. 345 int getTreeCost(); 346 347 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 348 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 349 void buildTree(ArrayRef<Value *> Roots, 350 ArrayRef<Value *> UserIgnoreLst = None); 351 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 352 /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking 353 /// into account (anf updating it, if required) list of externally used 354 /// values stored in \p ExternallyUsedValues. 355 void buildTree(ArrayRef<Value *> Roots, 356 ExtraValueToDebugLocsMap &ExternallyUsedValues, 357 ArrayRef<Value *> UserIgnoreLst = None); 358 359 /// Clear the internal data structures that are created by 'buildTree'. 360 void deleteTree() { 361 VectorizableTree.clear(); 362 ScalarToTreeEntry.clear(); 363 MustGather.clear(); 364 ExternalUses.clear(); 365 NumLoadsWantToKeepOrder = 0; 366 NumLoadsWantToChangeOrder = 0; 367 for (auto &Iter : BlocksSchedules) { 368 BlockScheduling *BS = Iter.second.get(); 369 BS->clear(); 370 } 371 MinBWs.clear(); 372 } 373 374 unsigned getTreeSize() const { return VectorizableTree.size(); } 375 376 /// \brief Perform LICM and CSE on the newly generated gather sequences. 377 void optimizeGatherSequence(); 378 379 /// \returns true if it is beneficial to reverse the vector order. 380 bool shouldReorder() const { 381 return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder; 382 } 383 384 /// \return The vector element size in bits to use when vectorizing the 385 /// expression tree ending at \p V. If V is a store, the size is the width of 386 /// the stored value. Otherwise, the size is the width of the largest loaded 387 /// value reaching V. This method is used by the vectorizer to calculate 388 /// vectorization factors. 389 unsigned getVectorElementSize(Value *V); 390 391 /// Compute the minimum type sizes required to represent the entries in a 392 /// vectorizable tree. 393 void computeMinimumValueSizes(); 394 395 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 396 unsigned getMaxVecRegSize() const { 397 return MaxVecRegSize; 398 } 399 400 // \returns minimum vector register size as set by cl::opt. 401 unsigned getMinVecRegSize() const { 402 return MinVecRegSize; 403 } 404 405 /// \brief Check if ArrayType or StructType is isomorphic to some VectorType. 406 /// 407 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 408 unsigned canMapToVector(Type *T, const DataLayout &DL) const; 409 410 /// \returns True if the VectorizableTree is both tiny and not fully 411 /// vectorizable. We do not vectorize such trees. 412 bool isTreeTinyAndNotFullyVectorizable(); 413 414 OptimizationRemarkEmitter *getORE() { return ORE; } 415 416 private: 417 struct TreeEntry; 418 419 /// \returns the cost of the vectorizable entry. 420 int getEntryCost(TreeEntry *E); 421 422 /// This is the recursive part of buildTree. 423 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, int); 424 425 /// \returns True if the ExtractElement/ExtractValue instructions in VL can 426 /// be vectorized to use the original vector (or aggregate "bitcast" to a vector). 427 bool canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const; 428 429 /// Vectorize a single entry in the tree. 430 Value *vectorizeTree(TreeEntry *E); 431 432 /// Vectorize a single entry in the tree, starting in \p VL. 433 Value *vectorizeTree(ArrayRef<Value *> VL); 434 435 /// \returns the pointer to the vectorized value if \p VL is already 436 /// vectorized, or NULL. They may happen in cycles. 437 Value *alreadyVectorized(ArrayRef<Value *> VL, Value *OpValue) const; 438 439 /// \returns the scalarization cost for this type. Scalarization in this 440 /// context means the creation of vectors from a group of scalars. 441 int getGatherCost(Type *Ty); 442 443 /// \returns the scalarization cost for this list of values. Assuming that 444 /// this subtree gets vectorized, we may need to extract the values from the 445 /// roots. This method calculates the cost of extracting the values. 446 int getGatherCost(ArrayRef<Value *> VL); 447 448 /// \brief Set the Builder insert point to one after the last instruction in 449 /// the bundle 450 void setInsertPointAfterBundle(ArrayRef<Value *> VL); 451 452 /// \returns a vector from a collection of scalars in \p VL. 453 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 454 455 /// \returns whether the VectorizableTree is fully vectorizable and will 456 /// be beneficial even the tree height is tiny. 457 bool isFullyVectorizableTinyTree(); 458 459 /// \reorder commutative operands in alt shuffle if they result in 460 /// vectorized code. 461 void reorderAltShuffleOperands(ArrayRef<Value *> VL, 462 SmallVectorImpl<Value *> &Left, 463 SmallVectorImpl<Value *> &Right); 464 /// \reorder commutative operands to get better probability of 465 /// generating vectorized code. 466 void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 467 SmallVectorImpl<Value *> &Left, 468 SmallVectorImpl<Value *> &Right); 469 struct TreeEntry { 470 TreeEntry(std::vector<TreeEntry> &Container) 471 : Scalars(), VectorizedValue(nullptr), NeedToGather(0), 472 Container(Container) {} 473 474 /// \returns true if the scalars in VL are equal to this entry. 475 bool isSame(ArrayRef<Value *> VL) const { 476 assert(VL.size() == Scalars.size() && "Invalid size"); 477 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 478 } 479 480 /// A vector of scalars. 481 ValueList Scalars; 482 483 /// The Scalars are vectorized into this value. It is initialized to Null. 484 Value *VectorizedValue; 485 486 /// Do we need to gather this sequence ? 487 bool NeedToGather; 488 489 /// Points back to the VectorizableTree. 490 /// 491 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 492 /// to be a pointer and needs to be able to initialize the child iterator. 493 /// Thus we need a reference back to the container to translate the indices 494 /// to entries. 495 std::vector<TreeEntry> &Container; 496 497 /// The TreeEntry index containing the user of this entry. We can actually 498 /// have multiple users so the data structure is not truly a tree. 499 SmallVector<int, 1> UserTreeIndices; 500 }; 501 502 /// Create a new VectorizableTree entry. 503 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized, 504 int &UserTreeIdx) { 505 VectorizableTree.emplace_back(VectorizableTree); 506 int idx = VectorizableTree.size() - 1; 507 TreeEntry *Last = &VectorizableTree[idx]; 508 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 509 Last->NeedToGather = !Vectorized; 510 if (Vectorized) { 511 for (int i = 0, e = VL.size(); i != e; ++i) { 512 assert(!getTreeEntry(VL[i]) && "Scalar already in tree!"); 513 ScalarToTreeEntry[VL[i]] = idx; 514 } 515 } else { 516 MustGather.insert(VL.begin(), VL.end()); 517 } 518 519 if (UserTreeIdx >= 0) 520 Last->UserTreeIndices.push_back(UserTreeIdx); 521 UserTreeIdx = idx; 522 return Last; 523 } 524 525 /// -- Vectorization State -- 526 /// Holds all of the tree entries. 527 std::vector<TreeEntry> VectorizableTree; 528 529 TreeEntry *getTreeEntry(Value *V) { 530 auto I = ScalarToTreeEntry.find(V); 531 if (I != ScalarToTreeEntry.end()) 532 return &VectorizableTree[I->second]; 533 return nullptr; 534 } 535 536 const TreeEntry *getTreeEntry(Value *V) const { 537 auto I = ScalarToTreeEntry.find(V); 538 if (I != ScalarToTreeEntry.end()) 539 return &VectorizableTree[I->second]; 540 return nullptr; 541 } 542 543 /// Maps a specific scalar to its tree entry. 544 SmallDenseMap<Value*, int> ScalarToTreeEntry; 545 546 /// A list of scalars that we found that we need to keep as scalars. 547 ValueSet MustGather; 548 549 /// This POD struct describes one external user in the vectorized tree. 550 struct ExternalUser { 551 ExternalUser (Value *S, llvm::User *U, int L) : 552 Scalar(S), User(U), Lane(L){} 553 // Which scalar in our function. 554 Value *Scalar; 555 // Which user that uses the scalar. 556 llvm::User *User; 557 // Which lane does the scalar belong to. 558 int Lane; 559 }; 560 typedef SmallVector<ExternalUser, 16> UserList; 561 562 /// Checks if two instructions may access the same memory. 563 /// 564 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 565 /// is invariant in the calling loop. 566 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 567 Instruction *Inst2) { 568 569 // First check if the result is already in the cache. 570 AliasCacheKey key = std::make_pair(Inst1, Inst2); 571 Optional<bool> &result = AliasCache[key]; 572 if (result.hasValue()) { 573 return result.getValue(); 574 } 575 MemoryLocation Loc2 = getLocation(Inst2, AA); 576 bool aliased = true; 577 if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) { 578 // Do the alias check. 579 aliased = AA->alias(Loc1, Loc2); 580 } 581 // Store the result in the cache. 582 result = aliased; 583 return aliased; 584 } 585 586 typedef std::pair<Instruction *, Instruction *> AliasCacheKey; 587 588 /// Cache for alias results. 589 /// TODO: consider moving this to the AliasAnalysis itself. 590 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 591 592 /// Removes an instruction from its block and eventually deletes it. 593 /// It's like Instruction::eraseFromParent() except that the actual deletion 594 /// is delayed until BoUpSLP is destructed. 595 /// This is required to ensure that there are no incorrect collisions in the 596 /// AliasCache, which can happen if a new instruction is allocated at the 597 /// same address as a previously deleted instruction. 598 void eraseInstruction(Instruction *I) { 599 I->removeFromParent(); 600 I->dropAllReferences(); 601 DeletedInstructions.emplace_back(I); 602 } 603 604 /// Temporary store for deleted instructions. Instructions will be deleted 605 /// eventually when the BoUpSLP is destructed. 606 SmallVector<unique_value, 8> DeletedInstructions; 607 608 /// A list of values that need to extracted out of the tree. 609 /// This list holds pairs of (Internal Scalar : External User). External User 610 /// can be nullptr, it means that this Internal Scalar will be used later, 611 /// after vectorization. 612 UserList ExternalUses; 613 614 /// Values used only by @llvm.assume calls. 615 SmallPtrSet<const Value *, 32> EphValues; 616 617 /// Holds all of the instructions that we gathered. 618 SetVector<Instruction *> GatherSeq; 619 /// A list of blocks that we are going to CSE. 620 SetVector<BasicBlock *> CSEBlocks; 621 622 /// Contains all scheduling relevant data for an instruction. 623 /// A ScheduleData either represents a single instruction or a member of an 624 /// instruction bundle (= a group of instructions which is combined into a 625 /// vector instruction). 626 struct ScheduleData { 627 628 // The initial value for the dependency counters. It means that the 629 // dependencies are not calculated yet. 630 enum { InvalidDeps = -1 }; 631 632 ScheduleData() 633 : Inst(nullptr), FirstInBundle(nullptr), NextInBundle(nullptr), 634 NextLoadStore(nullptr), SchedulingRegionID(0), SchedulingPriority(0), 635 Dependencies(InvalidDeps), UnscheduledDeps(InvalidDeps), 636 UnscheduledDepsInBundle(InvalidDeps), IsScheduled(false) {} 637 638 void init(int BlockSchedulingRegionID) { 639 FirstInBundle = this; 640 NextInBundle = nullptr; 641 NextLoadStore = nullptr; 642 IsScheduled = false; 643 SchedulingRegionID = BlockSchedulingRegionID; 644 UnscheduledDepsInBundle = UnscheduledDeps; 645 clearDependencies(); 646 } 647 648 /// Returns true if the dependency information has been calculated. 649 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 650 651 /// Returns true for single instructions and for bundle representatives 652 /// (= the head of a bundle). 653 bool isSchedulingEntity() const { return FirstInBundle == this; } 654 655 /// Returns true if it represents an instruction bundle and not only a 656 /// single instruction. 657 bool isPartOfBundle() const { 658 return NextInBundle != nullptr || FirstInBundle != this; 659 } 660 661 /// Returns true if it is ready for scheduling, i.e. it has no more 662 /// unscheduled depending instructions/bundles. 663 bool isReady() const { 664 assert(isSchedulingEntity() && 665 "can't consider non-scheduling entity for ready list"); 666 return UnscheduledDepsInBundle == 0 && !IsScheduled; 667 } 668 669 /// Modifies the number of unscheduled dependencies, also updating it for 670 /// the whole bundle. 671 int incrementUnscheduledDeps(int Incr) { 672 UnscheduledDeps += Incr; 673 return FirstInBundle->UnscheduledDepsInBundle += Incr; 674 } 675 676 /// Sets the number of unscheduled dependencies to the number of 677 /// dependencies. 678 void resetUnscheduledDeps() { 679 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 680 } 681 682 /// Clears all dependency information. 683 void clearDependencies() { 684 Dependencies = InvalidDeps; 685 resetUnscheduledDeps(); 686 MemoryDependencies.clear(); 687 } 688 689 void dump(raw_ostream &os) const { 690 if (!isSchedulingEntity()) { 691 os << "/ " << *Inst; 692 } else if (NextInBundle) { 693 os << '[' << *Inst; 694 ScheduleData *SD = NextInBundle; 695 while (SD) { 696 os << ';' << *SD->Inst; 697 SD = SD->NextInBundle; 698 } 699 os << ']'; 700 } else { 701 os << *Inst; 702 } 703 } 704 705 Instruction *Inst; 706 707 /// Points to the head in an instruction bundle (and always to this for 708 /// single instructions). 709 ScheduleData *FirstInBundle; 710 711 /// Single linked list of all instructions in a bundle. Null if it is a 712 /// single instruction. 713 ScheduleData *NextInBundle; 714 715 /// Single linked list of all memory instructions (e.g. load, store, call) 716 /// in the block - until the end of the scheduling region. 717 ScheduleData *NextLoadStore; 718 719 /// The dependent memory instructions. 720 /// This list is derived on demand in calculateDependencies(). 721 SmallVector<ScheduleData *, 4> MemoryDependencies; 722 723 /// This ScheduleData is in the current scheduling region if this matches 724 /// the current SchedulingRegionID of BlockScheduling. 725 int SchedulingRegionID; 726 727 /// Used for getting a "good" final ordering of instructions. 728 int SchedulingPriority; 729 730 /// The number of dependencies. Constitutes of the number of users of the 731 /// instruction plus the number of dependent memory instructions (if any). 732 /// This value is calculated on demand. 733 /// If InvalidDeps, the number of dependencies is not calculated yet. 734 /// 735 int Dependencies; 736 737 /// The number of dependencies minus the number of dependencies of scheduled 738 /// instructions. As soon as this is zero, the instruction/bundle gets ready 739 /// for scheduling. 740 /// Note that this is negative as long as Dependencies is not calculated. 741 int UnscheduledDeps; 742 743 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 744 /// single instructions. 745 int UnscheduledDepsInBundle; 746 747 /// True if this instruction is scheduled (or considered as scheduled in the 748 /// dry-run). 749 bool IsScheduled; 750 }; 751 752 #ifndef NDEBUG 753 friend inline raw_ostream &operator<<(raw_ostream &os, 754 const BoUpSLP::ScheduleData &SD) { 755 SD.dump(os); 756 return os; 757 } 758 #endif 759 friend struct GraphTraits<BoUpSLP *>; 760 friend struct DOTGraphTraits<BoUpSLP *>; 761 762 /// Contains all scheduling data for a basic block. 763 /// 764 struct BlockScheduling { 765 766 BlockScheduling(BasicBlock *BB) 767 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize), 768 ScheduleStart(nullptr), ScheduleEnd(nullptr), 769 FirstLoadStoreInRegion(nullptr), LastLoadStoreInRegion(nullptr), 770 ScheduleRegionSize(0), 771 ScheduleRegionSizeLimit(ScheduleRegionSizeBudget), 772 // Make sure that the initial SchedulingRegionID is greater than the 773 // initial SchedulingRegionID in ScheduleData (which is 0). 774 SchedulingRegionID(1) {} 775 776 void clear() { 777 ReadyInsts.clear(); 778 ScheduleStart = nullptr; 779 ScheduleEnd = nullptr; 780 FirstLoadStoreInRegion = nullptr; 781 LastLoadStoreInRegion = nullptr; 782 783 // Reduce the maximum schedule region size by the size of the 784 // previous scheduling run. 785 ScheduleRegionSizeLimit -= ScheduleRegionSize; 786 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 787 ScheduleRegionSizeLimit = MinScheduleRegionSize; 788 ScheduleRegionSize = 0; 789 790 // Make a new scheduling region, i.e. all existing ScheduleData is not 791 // in the new region yet. 792 ++SchedulingRegionID; 793 } 794 795 ScheduleData *getScheduleData(Value *V) { 796 ScheduleData *SD = ScheduleDataMap[V]; 797 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 798 return SD; 799 return nullptr; 800 } 801 802 bool isInSchedulingRegion(ScheduleData *SD) { 803 return SD->SchedulingRegionID == SchedulingRegionID; 804 } 805 806 /// Marks an instruction as scheduled and puts all dependent ready 807 /// instructions into the ready-list. 808 template <typename ReadyListType> 809 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 810 SD->IsScheduled = true; 811 DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 812 813 ScheduleData *BundleMember = SD; 814 while (BundleMember) { 815 // Handle the def-use chain dependencies. 816 for (Use &U : BundleMember->Inst->operands()) { 817 ScheduleData *OpDef = getScheduleData(U.get()); 818 if (OpDef && OpDef->hasValidDependencies() && 819 OpDef->incrementUnscheduledDeps(-1) == 0) { 820 // There are no more unscheduled dependencies after decrementing, 821 // so we can put the dependent instruction into the ready list. 822 ScheduleData *DepBundle = OpDef->FirstInBundle; 823 assert(!DepBundle->IsScheduled && 824 "already scheduled bundle gets ready"); 825 ReadyList.insert(DepBundle); 826 DEBUG(dbgs() << "SLP: gets ready (def): " << *DepBundle << "\n"); 827 } 828 } 829 // Handle the memory dependencies. 830 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 831 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 832 // There are no more unscheduled dependencies after decrementing, 833 // so we can put the dependent instruction into the ready list. 834 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 835 assert(!DepBundle->IsScheduled && 836 "already scheduled bundle gets ready"); 837 ReadyList.insert(DepBundle); 838 DEBUG(dbgs() << "SLP: gets ready (mem): " << *DepBundle << "\n"); 839 } 840 } 841 BundleMember = BundleMember->NextInBundle; 842 } 843 } 844 845 /// Put all instructions into the ReadyList which are ready for scheduling. 846 template <typename ReadyListType> 847 void initialFillReadyList(ReadyListType &ReadyList) { 848 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 849 ScheduleData *SD = getScheduleData(I); 850 if (SD->isSchedulingEntity() && SD->isReady()) { 851 ReadyList.insert(SD); 852 DEBUG(dbgs() << "SLP: initially in ready list: " << *I << "\n"); 853 } 854 } 855 } 856 857 /// Checks if a bundle of instructions can be scheduled, i.e. has no 858 /// cyclic dependencies. This is only a dry-run, no instructions are 859 /// actually moved at this stage. 860 bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, Value *OpValue); 861 862 /// Un-bundles a group of instructions. 863 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 864 865 /// Extends the scheduling region so that V is inside the region. 866 /// \returns true if the region size is within the limit. 867 bool extendSchedulingRegion(Value *V); 868 869 /// Initialize the ScheduleData structures for new instructions in the 870 /// scheduling region. 871 void initScheduleData(Instruction *FromI, Instruction *ToI, 872 ScheduleData *PrevLoadStore, 873 ScheduleData *NextLoadStore); 874 875 /// Updates the dependency information of a bundle and of all instructions/ 876 /// bundles which depend on the original bundle. 877 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 878 BoUpSLP *SLP); 879 880 /// Sets all instruction in the scheduling region to un-scheduled. 881 void resetSchedule(); 882 883 BasicBlock *BB; 884 885 /// Simple memory allocation for ScheduleData. 886 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 887 888 /// The size of a ScheduleData array in ScheduleDataChunks. 889 int ChunkSize; 890 891 /// The allocator position in the current chunk, which is the last entry 892 /// of ScheduleDataChunks. 893 int ChunkPos; 894 895 /// Attaches ScheduleData to Instruction. 896 /// Note that the mapping survives during all vectorization iterations, i.e. 897 /// ScheduleData structures are recycled. 898 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 899 900 struct ReadyList : SmallVector<ScheduleData *, 8> { 901 void insert(ScheduleData *SD) { push_back(SD); } 902 }; 903 904 /// The ready-list for scheduling (only used for the dry-run). 905 ReadyList ReadyInsts; 906 907 /// The first instruction of the scheduling region. 908 Instruction *ScheduleStart; 909 910 /// The first instruction _after_ the scheduling region. 911 Instruction *ScheduleEnd; 912 913 /// The first memory accessing instruction in the scheduling region 914 /// (can be null). 915 ScheduleData *FirstLoadStoreInRegion; 916 917 /// The last memory accessing instruction in the scheduling region 918 /// (can be null). 919 ScheduleData *LastLoadStoreInRegion; 920 921 /// The current size of the scheduling region. 922 int ScheduleRegionSize; 923 924 /// The maximum size allowed for the scheduling region. 925 int ScheduleRegionSizeLimit; 926 927 /// The ID of the scheduling region. For a new vectorization iteration this 928 /// is incremented which "removes" all ScheduleData from the region. 929 int SchedulingRegionID; 930 }; 931 932 /// Attaches the BlockScheduling structures to basic blocks. 933 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 934 935 /// Performs the "real" scheduling. Done before vectorization is actually 936 /// performed in a basic block. 937 void scheduleBlock(BlockScheduling *BS); 938 939 /// List of users to ignore during scheduling and that don't need extracting. 940 ArrayRef<Value *> UserIgnoreList; 941 942 // Number of load bundles that contain consecutive loads. 943 int NumLoadsWantToKeepOrder; 944 945 // Number of load bundles that contain consecutive loads in reversed order. 946 int NumLoadsWantToChangeOrder; 947 948 // Analysis and block reference. 949 Function *F; 950 ScalarEvolution *SE; 951 TargetTransformInfo *TTI; 952 TargetLibraryInfo *TLI; 953 AliasAnalysis *AA; 954 LoopInfo *LI; 955 DominatorTree *DT; 956 AssumptionCache *AC; 957 DemandedBits *DB; 958 const DataLayout *DL; 959 OptimizationRemarkEmitter *ORE; 960 961 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 962 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 963 /// Instruction builder to construct the vectorized tree. 964 IRBuilder<> Builder; 965 966 /// A map of scalar integer values to the smallest bit width with which they 967 /// can legally be represented. The values map to (width, signed) pairs, 968 /// where "width" indicates the minimum bit width and "signed" is True if the 969 /// value must be signed-extended, rather than zero-extended, back to its 970 /// original width. 971 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs; 972 }; 973 } // end namespace slpvectorizer 974 975 template <> struct GraphTraits<BoUpSLP *> { 976 typedef BoUpSLP::TreeEntry TreeEntry; 977 978 /// NodeRef has to be a pointer per the GraphWriter. 979 typedef TreeEntry *NodeRef; 980 981 /// \brief Add the VectorizableTree to the index iterator to be able to return 982 /// TreeEntry pointers. 983 struct ChildIteratorType 984 : public iterator_adaptor_base<ChildIteratorType, 985 SmallVector<int, 1>::iterator> { 986 987 std::vector<TreeEntry> &VectorizableTree; 988 989 ChildIteratorType(SmallVector<int, 1>::iterator W, 990 std::vector<TreeEntry> &VT) 991 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 992 993 NodeRef operator*() { return &VectorizableTree[*I]; } 994 }; 995 996 static NodeRef getEntryNode(BoUpSLP &R) { return &R.VectorizableTree[0]; } 997 998 static ChildIteratorType child_begin(NodeRef N) { 999 return {N->UserTreeIndices.begin(), N->Container}; 1000 } 1001 static ChildIteratorType child_end(NodeRef N) { 1002 return {N->UserTreeIndices.end(), N->Container}; 1003 } 1004 1005 /// For the node iterator we just need to turn the TreeEntry iterator into a 1006 /// TreeEntry* iterator so that it dereferences to NodeRef. 1007 typedef pointer_iterator<std::vector<TreeEntry>::iterator> nodes_iterator; 1008 1009 static nodes_iterator nodes_begin(BoUpSLP *R) { 1010 return nodes_iterator(R->VectorizableTree.begin()); 1011 } 1012 static nodes_iterator nodes_end(BoUpSLP *R) { 1013 return nodes_iterator(R->VectorizableTree.end()); 1014 } 1015 1016 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 1017 }; 1018 1019 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 1020 typedef BoUpSLP::TreeEntry TreeEntry; 1021 1022 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 1023 1024 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 1025 std::string Str; 1026 raw_string_ostream OS(Str); 1027 if (isSplat(Entry->Scalars)) { 1028 OS << "<splat> " << *Entry->Scalars[0]; 1029 return Str; 1030 } 1031 for (auto V : Entry->Scalars) { 1032 OS << *V; 1033 if (std::any_of( 1034 R->ExternalUses.begin(), R->ExternalUses.end(), 1035 [&](const BoUpSLP::ExternalUser &EU) { return EU.Scalar == V; })) 1036 OS << " <extract>"; 1037 OS << "\n"; 1038 } 1039 return Str; 1040 } 1041 1042 static std::string getNodeAttributes(const TreeEntry *Entry, 1043 const BoUpSLP *) { 1044 if (Entry->NeedToGather) 1045 return "color=red"; 1046 return ""; 1047 } 1048 }; 1049 1050 } // end namespace llvm 1051 1052 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 1053 ArrayRef<Value *> UserIgnoreLst) { 1054 ExtraValueToDebugLocsMap ExternallyUsedValues; 1055 buildTree(Roots, ExternallyUsedValues, UserIgnoreLst); 1056 } 1057 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 1058 ExtraValueToDebugLocsMap &ExternallyUsedValues, 1059 ArrayRef<Value *> UserIgnoreLst) { 1060 deleteTree(); 1061 UserIgnoreList = UserIgnoreLst; 1062 if (!allSameType(Roots)) 1063 return; 1064 buildTree_rec(Roots, 0, -1); 1065 1066 // Collect the values that we need to extract from the tree. 1067 for (TreeEntry &EIdx : VectorizableTree) { 1068 TreeEntry *Entry = &EIdx; 1069 1070 // No need to handle users of gathered values. 1071 if (Entry->NeedToGather) 1072 continue; 1073 1074 // For each lane: 1075 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 1076 Value *Scalar = Entry->Scalars[Lane]; 1077 1078 // Check if the scalar is externally used as an extra arg. 1079 auto ExtI = ExternallyUsedValues.find(Scalar); 1080 if (ExtI != ExternallyUsedValues.end()) { 1081 DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " << 1082 Lane << " from " << *Scalar << ".\n"); 1083 ExternalUses.emplace_back(Scalar, nullptr, Lane); 1084 continue; 1085 } 1086 for (User *U : Scalar->users()) { 1087 DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 1088 1089 Instruction *UserInst = dyn_cast<Instruction>(U); 1090 if (!UserInst) 1091 continue; 1092 1093 // Skip in-tree scalars that become vectors 1094 if (TreeEntry *UseEntry = getTreeEntry(U)) { 1095 Value *UseScalar = UseEntry->Scalars[0]; 1096 // Some in-tree scalars will remain as scalar in vectorized 1097 // instructions. If that is the case, the one in Lane 0 will 1098 // be used. 1099 if (UseScalar != U || 1100 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 1101 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 1102 << ".\n"); 1103 assert(!UseEntry->NeedToGather && "Bad state"); 1104 continue; 1105 } 1106 } 1107 1108 // Ignore users in the user ignore list. 1109 if (is_contained(UserIgnoreList, UserInst)) 1110 continue; 1111 1112 DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " << 1113 Lane << " from " << *Scalar << ".\n"); 1114 ExternalUses.push_back(ExternalUser(Scalar, U, Lane)); 1115 } 1116 } 1117 } 1118 } 1119 1120 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 1121 int UserTreeIdx) { 1122 bool isAltShuffle = false; 1123 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 1124 1125 if (Depth == RecursionMaxDepth) { 1126 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 1127 newTreeEntry(VL, false, UserTreeIdx); 1128 return; 1129 } 1130 1131 // Don't handle vectors. 1132 if (VL[0]->getType()->isVectorTy()) { 1133 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 1134 newTreeEntry(VL, false, UserTreeIdx); 1135 return; 1136 } 1137 1138 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1139 if (SI->getValueOperand()->getType()->isVectorTy()) { 1140 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 1141 newTreeEntry(VL, false, UserTreeIdx); 1142 return; 1143 } 1144 unsigned Opcode = getSameOpcode(VL); 1145 1146 // Check that this shuffle vector refers to the alternate 1147 // sequence of opcodes. 1148 if (Opcode == Instruction::ShuffleVector) { 1149 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 1150 unsigned Op = I0->getOpcode(); 1151 if (Op != Instruction::ShuffleVector) 1152 isAltShuffle = true; 1153 } 1154 1155 // If all of the operands are identical or constant we have a simple solution. 1156 if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !Opcode) { 1157 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 1158 newTreeEntry(VL, false, UserTreeIdx); 1159 return; 1160 } 1161 1162 // We now know that this is a vector of instructions of the same type from 1163 // the same block. 1164 1165 // Don't vectorize ephemeral values. 1166 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1167 if (EphValues.count(VL[i])) { 1168 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1169 ") is ephemeral.\n"); 1170 newTreeEntry(VL, false, UserTreeIdx); 1171 return; 1172 } 1173 } 1174 1175 // Check if this is a duplicate of another entry. 1176 if (TreeEntry *E = getTreeEntry(VL[0])) { 1177 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1178 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 1179 if (E->Scalars[i] != VL[i]) { 1180 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 1181 newTreeEntry(VL, false, UserTreeIdx); 1182 return; 1183 } 1184 } 1185 // Record the reuse of the tree node. FIXME, currently this is only used to 1186 // properly draw the graph rather than for the actual vectorization. 1187 E->UserTreeIndices.push_back(UserTreeIdx); 1188 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n"); 1189 return; 1190 } 1191 1192 // Check that none of the instructions in the bundle are already in the tree. 1193 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1194 if (ScalarToTreeEntry.count(VL[i])) { 1195 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1196 ") is already in tree.\n"); 1197 newTreeEntry(VL, false, UserTreeIdx); 1198 return; 1199 } 1200 } 1201 1202 // If any of the scalars is marked as a value that needs to stay scalar then 1203 // we need to gather the scalars. 1204 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1205 if (MustGather.count(VL[i])) { 1206 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 1207 newTreeEntry(VL, false, UserTreeIdx); 1208 return; 1209 } 1210 } 1211 1212 // Check that all of the users of the scalars that we want to vectorize are 1213 // schedulable. 1214 Instruction *VL0 = cast<Instruction>(VL[0]); 1215 BasicBlock *BB = VL0->getParent(); 1216 1217 if (!DT->isReachableFromEntry(BB)) { 1218 // Don't go into unreachable blocks. They may contain instructions with 1219 // dependency cycles which confuse the final scheduling. 1220 DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 1221 newTreeEntry(VL, false, UserTreeIdx); 1222 return; 1223 } 1224 1225 // Check that every instructions appears once in this bundle. 1226 for (unsigned i = 0, e = VL.size(); i < e; ++i) 1227 for (unsigned j = i+1; j < e; ++j) 1228 if (VL[i] == VL[j]) { 1229 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 1230 newTreeEntry(VL, false, UserTreeIdx); 1231 return; 1232 } 1233 1234 auto &BSRef = BlocksSchedules[BB]; 1235 if (!BSRef) { 1236 BSRef = llvm::make_unique<BlockScheduling>(BB); 1237 } 1238 BlockScheduling &BS = *BSRef.get(); 1239 1240 if (!BS.tryScheduleBundle(VL, this, VL0)) { 1241 DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 1242 assert((!BS.getScheduleData(VL[0]) || 1243 !BS.getScheduleData(VL[0])->isPartOfBundle()) && 1244 "tryScheduleBundle should cancelScheduling on failure"); 1245 newTreeEntry(VL, false, UserTreeIdx); 1246 return; 1247 } 1248 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 1249 1250 switch (Opcode) { 1251 case Instruction::PHI: { 1252 PHINode *PH = dyn_cast<PHINode>(VL0); 1253 1254 // Check for terminator values (e.g. invoke). 1255 for (unsigned j = 0; j < VL.size(); ++j) 1256 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1257 TerminatorInst *Term = dyn_cast<TerminatorInst>( 1258 cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i))); 1259 if (Term) { 1260 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); 1261 BS.cancelScheduling(VL, VL0); 1262 newTreeEntry(VL, false, UserTreeIdx); 1263 return; 1264 } 1265 } 1266 1267 newTreeEntry(VL, true, UserTreeIdx); 1268 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 1269 1270 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1271 ValueList Operands; 1272 // Prepare the operand vector. 1273 for (Value *j : VL) 1274 Operands.push_back(cast<PHINode>(j)->getIncomingValueForBlock( 1275 PH->getIncomingBlock(i))); 1276 1277 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1278 } 1279 return; 1280 } 1281 case Instruction::ExtractValue: 1282 case Instruction::ExtractElement: { 1283 bool Reuse = canReuseExtract(VL, Opcode); 1284 if (Reuse) { 1285 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 1286 } else { 1287 BS.cancelScheduling(VL, VL0); 1288 } 1289 newTreeEntry(VL, Reuse, UserTreeIdx); 1290 return; 1291 } 1292 case Instruction::Load: { 1293 // Check that a vectorized load would load the same memory as a scalar 1294 // load. 1295 // For example we don't want vectorize loads that are smaller than 8 bit. 1296 // Even though we have a packed struct {<i2, i2, i2, i2>} LLVM treats 1297 // loading/storing it as an i8 struct. If we vectorize loads/stores from 1298 // such a struct we read/write packed bits disagreeing with the 1299 // unvectorized version. 1300 Type *ScalarTy = VL[0]->getType(); 1301 1302 if (DL->getTypeSizeInBits(ScalarTy) != 1303 DL->getTypeAllocSizeInBits(ScalarTy)) { 1304 BS.cancelScheduling(VL, VL0); 1305 newTreeEntry(VL, false, UserTreeIdx); 1306 DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 1307 return; 1308 } 1309 1310 // Make sure all loads in the bundle are simple - we can't vectorize 1311 // atomic or volatile loads. 1312 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 1313 LoadInst *L = cast<LoadInst>(VL[i]); 1314 if (!L->isSimple()) { 1315 BS.cancelScheduling(VL, VL0); 1316 newTreeEntry(VL, false, UserTreeIdx); 1317 DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 1318 return; 1319 } 1320 } 1321 1322 // Check if the loads are consecutive, reversed, or neither. 1323 // TODO: What we really want is to sort the loads, but for now, check 1324 // the two likely directions. 1325 bool Consecutive = true; 1326 bool ReverseConsecutive = true; 1327 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 1328 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { 1329 Consecutive = false; 1330 break; 1331 } else { 1332 ReverseConsecutive = false; 1333 } 1334 } 1335 1336 if (Consecutive) { 1337 ++NumLoadsWantToKeepOrder; 1338 newTreeEntry(VL, true, UserTreeIdx); 1339 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 1340 return; 1341 } 1342 1343 // If none of the load pairs were consecutive when checked in order, 1344 // check the reverse order. 1345 if (ReverseConsecutive) 1346 for (unsigned i = VL.size() - 1; i > 0; --i) 1347 if (!isConsecutiveAccess(VL[i], VL[i - 1], *DL, *SE)) { 1348 ReverseConsecutive = false; 1349 break; 1350 } 1351 1352 BS.cancelScheduling(VL, VL0); 1353 newTreeEntry(VL, false, UserTreeIdx); 1354 1355 if (ReverseConsecutive) { 1356 ++NumLoadsWantToChangeOrder; 1357 DEBUG(dbgs() << "SLP: Gathering reversed loads.\n"); 1358 } else { 1359 DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 1360 } 1361 return; 1362 } 1363 case Instruction::ZExt: 1364 case Instruction::SExt: 1365 case Instruction::FPToUI: 1366 case Instruction::FPToSI: 1367 case Instruction::FPExt: 1368 case Instruction::PtrToInt: 1369 case Instruction::IntToPtr: 1370 case Instruction::SIToFP: 1371 case Instruction::UIToFP: 1372 case Instruction::Trunc: 1373 case Instruction::FPTrunc: 1374 case Instruction::BitCast: { 1375 Type *SrcTy = VL0->getOperand(0)->getType(); 1376 for (unsigned i = 0; i < VL.size(); ++i) { 1377 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 1378 if (Ty != SrcTy || !isValidElementType(Ty)) { 1379 BS.cancelScheduling(VL, VL0); 1380 newTreeEntry(VL, false, UserTreeIdx); 1381 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 1382 return; 1383 } 1384 } 1385 newTreeEntry(VL, true, UserTreeIdx); 1386 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 1387 1388 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1389 ValueList Operands; 1390 // Prepare the operand vector. 1391 for (Value *j : VL) 1392 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1393 1394 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1395 } 1396 return; 1397 } 1398 case Instruction::ICmp: 1399 case Instruction::FCmp: { 1400 // Check that all of the compares have the same predicate. 1401 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 1402 Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType(); 1403 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 1404 CmpInst *Cmp = cast<CmpInst>(VL[i]); 1405 if (Cmp->getPredicate() != P0 || 1406 Cmp->getOperand(0)->getType() != ComparedTy) { 1407 BS.cancelScheduling(VL, VL0); 1408 newTreeEntry(VL, false, UserTreeIdx); 1409 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 1410 return; 1411 } 1412 } 1413 1414 newTreeEntry(VL, true, UserTreeIdx); 1415 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 1416 1417 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1418 ValueList Operands; 1419 // Prepare the operand vector. 1420 for (Value *j : VL) 1421 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1422 1423 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1424 } 1425 return; 1426 } 1427 case Instruction::Select: 1428 case Instruction::Add: 1429 case Instruction::FAdd: 1430 case Instruction::Sub: 1431 case Instruction::FSub: 1432 case Instruction::Mul: 1433 case Instruction::FMul: 1434 case Instruction::UDiv: 1435 case Instruction::SDiv: 1436 case Instruction::FDiv: 1437 case Instruction::URem: 1438 case Instruction::SRem: 1439 case Instruction::FRem: 1440 case Instruction::Shl: 1441 case Instruction::LShr: 1442 case Instruction::AShr: 1443 case Instruction::And: 1444 case Instruction::Or: 1445 case Instruction::Xor: { 1446 newTreeEntry(VL, true, UserTreeIdx); 1447 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 1448 1449 // Sort operands of the instructions so that each side is more likely to 1450 // have the same opcode. 1451 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 1452 ValueList Left, Right; 1453 reorderInputsAccordingToOpcode(VL, Left, Right); 1454 buildTree_rec(Left, Depth + 1, UserTreeIdx); 1455 buildTree_rec(Right, Depth + 1, UserTreeIdx); 1456 return; 1457 } 1458 1459 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1460 ValueList Operands; 1461 // Prepare the operand vector. 1462 for (Value *j : VL) 1463 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1464 1465 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1466 } 1467 return; 1468 } 1469 case Instruction::GetElementPtr: { 1470 // We don't combine GEPs with complicated (nested) indexing. 1471 for (unsigned j = 0; j < VL.size(); ++j) { 1472 if (cast<Instruction>(VL[j])->getNumOperands() != 2) { 1473 DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 1474 BS.cancelScheduling(VL, VL0); 1475 newTreeEntry(VL, false, UserTreeIdx); 1476 return; 1477 } 1478 } 1479 1480 // We can't combine several GEPs into one vector if they operate on 1481 // different types. 1482 Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType(); 1483 for (unsigned j = 0; j < VL.size(); ++j) { 1484 Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType(); 1485 if (Ty0 != CurTy) { 1486 DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); 1487 BS.cancelScheduling(VL, VL0); 1488 newTreeEntry(VL, false, UserTreeIdx); 1489 return; 1490 } 1491 } 1492 1493 // We don't combine GEPs with non-constant indexes. 1494 for (unsigned j = 0; j < VL.size(); ++j) { 1495 auto Op = cast<Instruction>(VL[j])->getOperand(1); 1496 if (!isa<ConstantInt>(Op)) { 1497 DEBUG( 1498 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 1499 BS.cancelScheduling(VL, VL0); 1500 newTreeEntry(VL, false, UserTreeIdx); 1501 return; 1502 } 1503 } 1504 1505 newTreeEntry(VL, true, UserTreeIdx); 1506 DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 1507 for (unsigned i = 0, e = 2; i < e; ++i) { 1508 ValueList Operands; 1509 // Prepare the operand vector. 1510 for (Value *j : VL) 1511 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1512 1513 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1514 } 1515 return; 1516 } 1517 case Instruction::Store: { 1518 // Check if the stores are consecutive or of we need to swizzle them. 1519 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 1520 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { 1521 BS.cancelScheduling(VL, VL0); 1522 newTreeEntry(VL, false, UserTreeIdx); 1523 DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 1524 return; 1525 } 1526 1527 newTreeEntry(VL, true, UserTreeIdx); 1528 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 1529 1530 ValueList Operands; 1531 for (Value *j : VL) 1532 Operands.push_back(cast<Instruction>(j)->getOperand(0)); 1533 1534 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1535 return; 1536 } 1537 case Instruction::Call: { 1538 // Check if the calls are all to the same vectorizable intrinsic. 1539 CallInst *CI = cast<CallInst>(VL[0]); 1540 // Check if this is an Intrinsic call or something that can be 1541 // represented by an intrinsic call 1542 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 1543 if (!isTriviallyVectorizable(ID)) { 1544 BS.cancelScheduling(VL, VL0); 1545 newTreeEntry(VL, false, UserTreeIdx); 1546 DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 1547 return; 1548 } 1549 Function *Int = CI->getCalledFunction(); 1550 Value *A1I = nullptr; 1551 if (hasVectorInstrinsicScalarOpd(ID, 1)) 1552 A1I = CI->getArgOperand(1); 1553 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 1554 CallInst *CI2 = dyn_cast<CallInst>(VL[i]); 1555 if (!CI2 || CI2->getCalledFunction() != Int || 1556 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 1557 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 1558 BS.cancelScheduling(VL, VL0); 1559 newTreeEntry(VL, false, UserTreeIdx); 1560 DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] 1561 << "\n"); 1562 return; 1563 } 1564 // ctlz,cttz and powi are special intrinsics whose second argument 1565 // should be same in order for them to be vectorized. 1566 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 1567 Value *A1J = CI2->getArgOperand(1); 1568 if (A1I != A1J) { 1569 BS.cancelScheduling(VL, VL0); 1570 newTreeEntry(VL, false, UserTreeIdx); 1571 DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 1572 << " argument "<< A1I<<"!=" << A1J 1573 << "\n"); 1574 return; 1575 } 1576 } 1577 // Verify that the bundle operands are identical between the two calls. 1578 if (CI->hasOperandBundles() && 1579 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 1580 CI->op_begin() + CI->getBundleOperandsEndIndex(), 1581 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 1582 BS.cancelScheduling(VL, VL0); 1583 newTreeEntry(VL, false, UserTreeIdx); 1584 DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!=" 1585 << *VL[i] << '\n'); 1586 return; 1587 } 1588 } 1589 1590 newTreeEntry(VL, true, UserTreeIdx); 1591 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 1592 ValueList Operands; 1593 // Prepare the operand vector. 1594 for (Value *j : VL) { 1595 CallInst *CI2 = dyn_cast<CallInst>(j); 1596 Operands.push_back(CI2->getArgOperand(i)); 1597 } 1598 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1599 } 1600 return; 1601 } 1602 case Instruction::ShuffleVector: { 1603 // If this is not an alternate sequence of opcode like add-sub 1604 // then do not vectorize this instruction. 1605 if (!isAltShuffle) { 1606 BS.cancelScheduling(VL, VL0); 1607 newTreeEntry(VL, false, UserTreeIdx); 1608 DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 1609 return; 1610 } 1611 newTreeEntry(VL, true, UserTreeIdx); 1612 DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 1613 1614 // Reorder operands if reordering would enable vectorization. 1615 if (isa<BinaryOperator>(VL0)) { 1616 ValueList Left, Right; 1617 reorderAltShuffleOperands(VL, Left, Right); 1618 buildTree_rec(Left, Depth + 1, UserTreeIdx); 1619 buildTree_rec(Right, Depth + 1, UserTreeIdx); 1620 return; 1621 } 1622 1623 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1624 ValueList Operands; 1625 // Prepare the operand vector. 1626 for (Value *j : VL) 1627 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1628 1629 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1630 } 1631 return; 1632 } 1633 default: 1634 BS.cancelScheduling(VL, VL0); 1635 newTreeEntry(VL, false, UserTreeIdx); 1636 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 1637 return; 1638 } 1639 } 1640 1641 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { 1642 unsigned N; 1643 Type *EltTy; 1644 auto *ST = dyn_cast<StructType>(T); 1645 if (ST) { 1646 N = ST->getNumElements(); 1647 EltTy = *ST->element_begin(); 1648 } else { 1649 N = cast<ArrayType>(T)->getNumElements(); 1650 EltTy = cast<ArrayType>(T)->getElementType(); 1651 } 1652 if (!isValidElementType(EltTy)) 1653 return 0; 1654 uint64_t VTSize = DL.getTypeStoreSizeInBits(VectorType::get(EltTy, N)); 1655 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) 1656 return 0; 1657 if (ST) { 1658 // Check that struct is homogeneous. 1659 for (const auto *Ty : ST->elements()) 1660 if (Ty != EltTy) 1661 return 0; 1662 } 1663 return N; 1664 } 1665 1666 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const { 1667 assert(Opcode == Instruction::ExtractElement || 1668 Opcode == Instruction::ExtractValue); 1669 assert(Opcode == getSameOpcode(VL) && "Invalid opcode"); 1670 // Check if all of the extracts come from the same vector and from the 1671 // correct offset. 1672 Value *VL0 = VL[0]; 1673 Instruction *E0 = cast<Instruction>(VL0); 1674 Value *Vec = E0->getOperand(0); 1675 1676 // We have to extract from a vector/aggregate with the same number of elements. 1677 unsigned NElts; 1678 if (Opcode == Instruction::ExtractValue) { 1679 const DataLayout &DL = E0->getModule()->getDataLayout(); 1680 NElts = canMapToVector(Vec->getType(), DL); 1681 if (!NElts) 1682 return false; 1683 // Check if load can be rewritten as load of vector. 1684 LoadInst *LI = dyn_cast<LoadInst>(Vec); 1685 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 1686 return false; 1687 } else { 1688 NElts = Vec->getType()->getVectorNumElements(); 1689 } 1690 1691 if (NElts != VL.size()) 1692 return false; 1693 1694 // Check that all of the indices extract from the correct offset. 1695 if (!matchExtractIndex(E0, 0, Opcode)) 1696 return false; 1697 1698 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 1699 Instruction *E = cast<Instruction>(VL[i]); 1700 if (!matchExtractIndex(E, i, Opcode)) 1701 return false; 1702 if (E->getOperand(0) != Vec) 1703 return false; 1704 } 1705 1706 return true; 1707 } 1708 1709 int BoUpSLP::getEntryCost(TreeEntry *E) { 1710 ArrayRef<Value*> VL = E->Scalars; 1711 1712 Type *ScalarTy = VL[0]->getType(); 1713 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1714 ScalarTy = SI->getValueOperand()->getType(); 1715 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0])) 1716 ScalarTy = CI->getOperand(0)->getType(); 1717 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1718 1719 // If we have computed a smaller type for the expression, update VecTy so 1720 // that the costs will be accurate. 1721 if (MinBWs.count(VL[0])) 1722 VecTy = VectorType::get( 1723 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size()); 1724 1725 if (E->NeedToGather) { 1726 if (allConstant(VL)) 1727 return 0; 1728 if (isSplat(VL)) { 1729 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 1730 } 1731 return getGatherCost(E->Scalars); 1732 } 1733 unsigned Opcode = getSameOpcode(VL); 1734 assert(Opcode && allSameType(VL) && allSameBlock(VL) && "Invalid VL"); 1735 Instruction *VL0 = cast<Instruction>(VL[0]); 1736 switch (Opcode) { 1737 case Instruction::PHI: { 1738 return 0; 1739 } 1740 case Instruction::ExtractValue: 1741 case Instruction::ExtractElement: { 1742 if (canReuseExtract(VL, Opcode)) { 1743 int DeadCost = 0; 1744 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 1745 Instruction *E = cast<Instruction>(VL[i]); 1746 // If all users are going to be vectorized, instruction can be 1747 // considered as dead. 1748 // The same, if have only one user, it will be vectorized for sure. 1749 if (E->hasOneUse() || 1750 std::all_of(E->user_begin(), E->user_end(), [this](User *U) { 1751 return ScalarToTreeEntry.count(U) > 0; 1752 })) 1753 // Take credit for instruction that will become dead. 1754 DeadCost += 1755 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); 1756 } 1757 return -DeadCost; 1758 } 1759 return getGatherCost(VecTy); 1760 } 1761 case Instruction::ZExt: 1762 case Instruction::SExt: 1763 case Instruction::FPToUI: 1764 case Instruction::FPToSI: 1765 case Instruction::FPExt: 1766 case Instruction::PtrToInt: 1767 case Instruction::IntToPtr: 1768 case Instruction::SIToFP: 1769 case Instruction::UIToFP: 1770 case Instruction::Trunc: 1771 case Instruction::FPTrunc: 1772 case Instruction::BitCast: { 1773 Type *SrcTy = VL0->getOperand(0)->getType(); 1774 1775 // Calculate the cost of this instruction. 1776 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 1777 VL0->getType(), SrcTy, VL0); 1778 1779 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 1780 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy, VL0); 1781 return VecCost - ScalarCost; 1782 } 1783 case Instruction::FCmp: 1784 case Instruction::ICmp: 1785 case Instruction::Select: { 1786 // Calculate the cost of this instruction. 1787 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 1788 int ScalarCost = VecTy->getNumElements() * 1789 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty(), VL0); 1790 int VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy, VL0); 1791 return VecCost - ScalarCost; 1792 } 1793 case Instruction::Add: 1794 case Instruction::FAdd: 1795 case Instruction::Sub: 1796 case Instruction::FSub: 1797 case Instruction::Mul: 1798 case Instruction::FMul: 1799 case Instruction::UDiv: 1800 case Instruction::SDiv: 1801 case Instruction::FDiv: 1802 case Instruction::URem: 1803 case Instruction::SRem: 1804 case Instruction::FRem: 1805 case Instruction::Shl: 1806 case Instruction::LShr: 1807 case Instruction::AShr: 1808 case Instruction::And: 1809 case Instruction::Or: 1810 case Instruction::Xor: { 1811 // Certain instructions can be cheaper to vectorize if they have a 1812 // constant second vector operand. 1813 TargetTransformInfo::OperandValueKind Op1VK = 1814 TargetTransformInfo::OK_AnyValue; 1815 TargetTransformInfo::OperandValueKind Op2VK = 1816 TargetTransformInfo::OK_UniformConstantValue; 1817 TargetTransformInfo::OperandValueProperties Op1VP = 1818 TargetTransformInfo::OP_None; 1819 TargetTransformInfo::OperandValueProperties Op2VP = 1820 TargetTransformInfo::OP_None; 1821 1822 // If all operands are exactly the same ConstantInt then set the 1823 // operand kind to OK_UniformConstantValue. 1824 // If instead not all operands are constants, then set the operand kind 1825 // to OK_AnyValue. If all operands are constants but not the same, 1826 // then set the operand kind to OK_NonUniformConstantValue. 1827 ConstantInt *CInt = nullptr; 1828 for (unsigned i = 0; i < VL.size(); ++i) { 1829 const Instruction *I = cast<Instruction>(VL[i]); 1830 if (!isa<ConstantInt>(I->getOperand(1))) { 1831 Op2VK = TargetTransformInfo::OK_AnyValue; 1832 break; 1833 } 1834 if (i == 0) { 1835 CInt = cast<ConstantInt>(I->getOperand(1)); 1836 continue; 1837 } 1838 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && 1839 CInt != cast<ConstantInt>(I->getOperand(1))) 1840 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 1841 } 1842 // FIXME: Currently cost of model modification for division by power of 1843 // 2 is handled for X86 and AArch64. Add support for other targets. 1844 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt && 1845 CInt->getValue().isPowerOf2()) 1846 Op2VP = TargetTransformInfo::OP_PowerOf2; 1847 1848 SmallVector<const Value *, 4> Operands(VL0->operand_values()); 1849 int ScalarCost = 1850 VecTy->getNumElements() * 1851 TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK, Op1VP, 1852 Op2VP, Operands); 1853 int VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK, 1854 Op1VP, Op2VP, Operands); 1855 return VecCost - ScalarCost; 1856 } 1857 case Instruction::GetElementPtr: { 1858 TargetTransformInfo::OperandValueKind Op1VK = 1859 TargetTransformInfo::OK_AnyValue; 1860 TargetTransformInfo::OperandValueKind Op2VK = 1861 TargetTransformInfo::OK_UniformConstantValue; 1862 1863 int ScalarCost = 1864 VecTy->getNumElements() * 1865 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK); 1866 int VecCost = 1867 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK); 1868 1869 return VecCost - ScalarCost; 1870 } 1871 case Instruction::Load: { 1872 // Cost of wide load - cost of scalar loads. 1873 unsigned alignment = dyn_cast<LoadInst>(VL0)->getAlignment(); 1874 int ScalarLdCost = VecTy->getNumElements() * 1875 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, alignment, 0, VL0); 1876 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, 1877 VecTy, alignment, 0, VL0); 1878 return VecLdCost - ScalarLdCost; 1879 } 1880 case Instruction::Store: { 1881 // We know that we can merge the stores. Calculate the cost. 1882 unsigned alignment = dyn_cast<StoreInst>(VL0)->getAlignment(); 1883 int ScalarStCost = VecTy->getNumElements() * 1884 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, alignment, 0, VL0); 1885 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, 1886 VecTy, alignment, 0, VL0); 1887 return VecStCost - ScalarStCost; 1888 } 1889 case Instruction::Call: { 1890 CallInst *CI = cast<CallInst>(VL0); 1891 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 1892 1893 // Calculate the cost of the scalar and vector calls. 1894 SmallVector<Type*, 4> ScalarTys; 1895 for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) 1896 ScalarTys.push_back(CI->getArgOperand(op)->getType()); 1897 1898 FastMathFlags FMF; 1899 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 1900 FMF = FPMO->getFastMathFlags(); 1901 1902 int ScalarCallCost = VecTy->getNumElements() * 1903 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys, FMF); 1904 1905 SmallVector<Value *, 4> Args(CI->arg_operands()); 1906 int VecCallCost = TTI->getIntrinsicInstrCost(ID, CI->getType(), Args, FMF, 1907 VecTy->getNumElements()); 1908 1909 DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost 1910 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 1911 << " for " << *CI << "\n"); 1912 1913 return VecCallCost - ScalarCallCost; 1914 } 1915 case Instruction::ShuffleVector: { 1916 TargetTransformInfo::OperandValueKind Op1VK = 1917 TargetTransformInfo::OK_AnyValue; 1918 TargetTransformInfo::OperandValueKind Op2VK = 1919 TargetTransformInfo::OK_AnyValue; 1920 int ScalarCost = 0; 1921 int VecCost = 0; 1922 for (Value *i : VL) { 1923 Instruction *I = cast<Instruction>(i); 1924 if (!I) 1925 break; 1926 ScalarCost += 1927 TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK); 1928 } 1929 // VecCost is equal to sum of the cost of creating 2 vectors 1930 // and the cost of creating shuffle. 1931 Instruction *I0 = cast<Instruction>(VL[0]); 1932 VecCost = 1933 TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK); 1934 Instruction *I1 = cast<Instruction>(VL[1]); 1935 VecCost += 1936 TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK); 1937 VecCost += 1938 TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0); 1939 return VecCost - ScalarCost; 1940 } 1941 default: 1942 llvm_unreachable("Unknown instruction"); 1943 } 1944 } 1945 1946 bool BoUpSLP::isFullyVectorizableTinyTree() { 1947 DEBUG(dbgs() << "SLP: Check whether the tree with height " << 1948 VectorizableTree.size() << " is fully vectorizable .\n"); 1949 1950 // We only handle trees of heights 1 and 2. 1951 if (VectorizableTree.size() == 1 && !VectorizableTree[0].NeedToGather) 1952 return true; 1953 1954 if (VectorizableTree.size() != 2) 1955 return false; 1956 1957 // Handle splat and all-constants stores. 1958 if (!VectorizableTree[0].NeedToGather && 1959 (allConstant(VectorizableTree[1].Scalars) || 1960 isSplat(VectorizableTree[1].Scalars))) 1961 return true; 1962 1963 // Gathering cost would be too much for tiny trees. 1964 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 1965 return false; 1966 1967 return true; 1968 } 1969 1970 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() { 1971 1972 // We can vectorize the tree if its size is greater than or equal to the 1973 // minimum size specified by the MinTreeSize command line option. 1974 if (VectorizableTree.size() >= MinTreeSize) 1975 return false; 1976 1977 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 1978 // can vectorize it if we can prove it fully vectorizable. 1979 if (isFullyVectorizableTinyTree()) 1980 return false; 1981 1982 assert(VectorizableTree.empty() 1983 ? ExternalUses.empty() 1984 : true && "We shouldn't have any external users"); 1985 1986 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 1987 // vectorizable. 1988 return true; 1989 } 1990 1991 int BoUpSLP::getSpillCost() { 1992 // Walk from the bottom of the tree to the top, tracking which values are 1993 // live. When we see a call instruction that is not part of our tree, 1994 // query TTI to see if there is a cost to keeping values live over it 1995 // (for example, if spills and fills are required). 1996 unsigned BundleWidth = VectorizableTree.front().Scalars.size(); 1997 int Cost = 0; 1998 1999 SmallPtrSet<Instruction*, 4> LiveValues; 2000 Instruction *PrevInst = nullptr; 2001 2002 for (const auto &N : VectorizableTree) { 2003 Instruction *Inst = dyn_cast<Instruction>(N.Scalars[0]); 2004 if (!Inst) 2005 continue; 2006 2007 if (!PrevInst) { 2008 PrevInst = Inst; 2009 continue; 2010 } 2011 2012 // Update LiveValues. 2013 LiveValues.erase(PrevInst); 2014 for (auto &J : PrevInst->operands()) { 2015 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 2016 LiveValues.insert(cast<Instruction>(&*J)); 2017 } 2018 2019 DEBUG( 2020 dbgs() << "SLP: #LV: " << LiveValues.size(); 2021 for (auto *X : LiveValues) 2022 dbgs() << " " << X->getName(); 2023 dbgs() << ", Looking at "; 2024 Inst->dump(); 2025 ); 2026 2027 // Now find the sequence of instructions between PrevInst and Inst. 2028 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 2029 PrevInstIt = 2030 PrevInst->getIterator().getReverse(); 2031 while (InstIt != PrevInstIt) { 2032 if (PrevInstIt == PrevInst->getParent()->rend()) { 2033 PrevInstIt = Inst->getParent()->rbegin(); 2034 continue; 2035 } 2036 2037 if (isa<CallInst>(&*PrevInstIt) && &*PrevInstIt != PrevInst) { 2038 SmallVector<Type*, 4> V; 2039 for (auto *II : LiveValues) 2040 V.push_back(VectorType::get(II->getType(), BundleWidth)); 2041 Cost += TTI->getCostOfKeepingLiveOverCall(V); 2042 } 2043 2044 ++PrevInstIt; 2045 } 2046 2047 PrevInst = Inst; 2048 } 2049 2050 return Cost; 2051 } 2052 2053 int BoUpSLP::getTreeCost() { 2054 int Cost = 0; 2055 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 2056 VectorizableTree.size() << ".\n"); 2057 2058 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 2059 2060 for (TreeEntry &TE : VectorizableTree) { 2061 int C = getEntryCost(&TE); 2062 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 2063 << *TE.Scalars[0] << ".\n"); 2064 Cost += C; 2065 } 2066 2067 SmallSet<Value *, 16> ExtractCostCalculated; 2068 int ExtractCost = 0; 2069 for (ExternalUser &EU : ExternalUses) { 2070 // We only add extract cost once for the same scalar. 2071 if (!ExtractCostCalculated.insert(EU.Scalar).second) 2072 continue; 2073 2074 // Uses by ephemeral values are free (because the ephemeral value will be 2075 // removed prior to code generation, and so the extraction will be 2076 // removed as well). 2077 if (EphValues.count(EU.User)) 2078 continue; 2079 2080 // If we plan to rewrite the tree in a smaller type, we will need to sign 2081 // extend the extracted value back to the original type. Here, we account 2082 // for the extract and the added cost of the sign extend if needed. 2083 auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth); 2084 auto *ScalarRoot = VectorizableTree[0].Scalars[0]; 2085 if (MinBWs.count(ScalarRoot)) { 2086 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 2087 auto Extend = 2088 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; 2089 VecTy = VectorType::get(MinTy, BundleWidth); 2090 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 2091 VecTy, EU.Lane); 2092 } else { 2093 ExtractCost += 2094 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); 2095 } 2096 } 2097 2098 int SpillCost = getSpillCost(); 2099 Cost += SpillCost + ExtractCost; 2100 2101 std::string Str; 2102 { 2103 raw_string_ostream OS(Str); 2104 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 2105 << "SLP: Extract Cost = " << ExtractCost << ".\n" 2106 << "SLP: Total Cost = " << Cost << ".\n"; 2107 } 2108 DEBUG(dbgs() << Str); 2109 2110 if (ViewSLPTree) 2111 ViewGraph(this, "SLP" + F->getName(), false, Str); 2112 2113 return Cost; 2114 } 2115 2116 int BoUpSLP::getGatherCost(Type *Ty) { 2117 int Cost = 0; 2118 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 2119 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 2120 return Cost; 2121 } 2122 2123 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 2124 // Find the type of the operands in VL. 2125 Type *ScalarTy = VL[0]->getType(); 2126 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 2127 ScalarTy = SI->getValueOperand()->getType(); 2128 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 2129 // Find the cost of inserting/extracting values from the vector. 2130 return getGatherCost(VecTy); 2131 } 2132 2133 // Reorder commutative operations in alternate shuffle if the resulting vectors 2134 // are consecutive loads. This would allow us to vectorize the tree. 2135 // If we have something like- 2136 // load a[0] - load b[0] 2137 // load b[1] + load a[1] 2138 // load a[2] - load b[2] 2139 // load a[3] + load b[3] 2140 // Reordering the second load b[1] load a[1] would allow us to vectorize this 2141 // code. 2142 void BoUpSLP::reorderAltShuffleOperands(ArrayRef<Value *> VL, 2143 SmallVectorImpl<Value *> &Left, 2144 SmallVectorImpl<Value *> &Right) { 2145 // Push left and right operands of binary operation into Left and Right 2146 for (Value *i : VL) { 2147 Left.push_back(cast<Instruction>(i)->getOperand(0)); 2148 Right.push_back(cast<Instruction>(i)->getOperand(1)); 2149 } 2150 2151 // Reorder if we have a commutative operation and consecutive access 2152 // are on either side of the alternate instructions. 2153 for (unsigned j = 0; j < VL.size() - 1; ++j) { 2154 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 2155 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 2156 Instruction *VL1 = cast<Instruction>(VL[j]); 2157 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 2158 if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) { 2159 std::swap(Left[j], Right[j]); 2160 continue; 2161 } else if (VL2->isCommutative() && 2162 isConsecutiveAccess(L, L1, *DL, *SE)) { 2163 std::swap(Left[j + 1], Right[j + 1]); 2164 continue; 2165 } 2166 // else unchanged 2167 } 2168 } 2169 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 2170 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 2171 Instruction *VL1 = cast<Instruction>(VL[j]); 2172 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 2173 if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) { 2174 std::swap(Left[j], Right[j]); 2175 continue; 2176 } else if (VL2->isCommutative() && 2177 isConsecutiveAccess(L, L1, *DL, *SE)) { 2178 std::swap(Left[j + 1], Right[j + 1]); 2179 continue; 2180 } 2181 // else unchanged 2182 } 2183 } 2184 } 2185 } 2186 2187 // Return true if I should be commuted before adding it's left and right 2188 // operands to the arrays Left and Right. 2189 // 2190 // The vectorizer is trying to either have all elements one side being 2191 // instruction with the same opcode to enable further vectorization, or having 2192 // a splat to lower the vectorizing cost. 2193 static bool shouldReorderOperands(int i, Instruction &I, 2194 SmallVectorImpl<Value *> &Left, 2195 SmallVectorImpl<Value *> &Right, 2196 bool AllSameOpcodeLeft, 2197 bool AllSameOpcodeRight, bool SplatLeft, 2198 bool SplatRight) { 2199 Value *VLeft = I.getOperand(0); 2200 Value *VRight = I.getOperand(1); 2201 // If we have "SplatRight", try to see if commuting is needed to preserve it. 2202 if (SplatRight) { 2203 if (VRight == Right[i - 1]) 2204 // Preserve SplatRight 2205 return false; 2206 if (VLeft == Right[i - 1]) { 2207 // Commuting would preserve SplatRight, but we don't want to break 2208 // SplatLeft either, i.e. preserve the original order if possible. 2209 // (FIXME: why do we care?) 2210 if (SplatLeft && VLeft == Left[i - 1]) 2211 return false; 2212 return true; 2213 } 2214 } 2215 // Symmetrically handle Right side. 2216 if (SplatLeft) { 2217 if (VLeft == Left[i - 1]) 2218 // Preserve SplatLeft 2219 return false; 2220 if (VRight == Left[i - 1]) 2221 return true; 2222 } 2223 2224 Instruction *ILeft = dyn_cast<Instruction>(VLeft); 2225 Instruction *IRight = dyn_cast<Instruction>(VRight); 2226 2227 // If we have "AllSameOpcodeRight", try to see if the left operands preserves 2228 // it and not the right, in this case we want to commute. 2229 if (AllSameOpcodeRight) { 2230 unsigned RightPrevOpcode = cast<Instruction>(Right[i - 1])->getOpcode(); 2231 if (IRight && RightPrevOpcode == IRight->getOpcode()) 2232 // Do not commute, a match on the right preserves AllSameOpcodeRight 2233 return false; 2234 if (ILeft && RightPrevOpcode == ILeft->getOpcode()) { 2235 // We have a match and may want to commute, but first check if there is 2236 // not also a match on the existing operands on the Left to preserve 2237 // AllSameOpcodeLeft, i.e. preserve the original order if possible. 2238 // (FIXME: why do we care?) 2239 if (AllSameOpcodeLeft && ILeft && 2240 cast<Instruction>(Left[i - 1])->getOpcode() == ILeft->getOpcode()) 2241 return false; 2242 return true; 2243 } 2244 } 2245 // Symmetrically handle Left side. 2246 if (AllSameOpcodeLeft) { 2247 unsigned LeftPrevOpcode = cast<Instruction>(Left[i - 1])->getOpcode(); 2248 if (ILeft && LeftPrevOpcode == ILeft->getOpcode()) 2249 return false; 2250 if (IRight && LeftPrevOpcode == IRight->getOpcode()) 2251 return true; 2252 } 2253 return false; 2254 } 2255 2256 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 2257 SmallVectorImpl<Value *> &Left, 2258 SmallVectorImpl<Value *> &Right) { 2259 2260 if (VL.size()) { 2261 // Peel the first iteration out of the loop since there's nothing 2262 // interesting to do anyway and it simplifies the checks in the loop. 2263 auto VLeft = cast<Instruction>(VL[0])->getOperand(0); 2264 auto VRight = cast<Instruction>(VL[0])->getOperand(1); 2265 if (!isa<Instruction>(VRight) && isa<Instruction>(VLeft)) 2266 // Favor having instruction to the right. FIXME: why? 2267 std::swap(VLeft, VRight); 2268 Left.push_back(VLeft); 2269 Right.push_back(VRight); 2270 } 2271 2272 // Keep track if we have instructions with all the same opcode on one side. 2273 bool AllSameOpcodeLeft = isa<Instruction>(Left[0]); 2274 bool AllSameOpcodeRight = isa<Instruction>(Right[0]); 2275 // Keep track if we have one side with all the same value (broadcast). 2276 bool SplatLeft = true; 2277 bool SplatRight = true; 2278 2279 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 2280 Instruction *I = cast<Instruction>(VL[i]); 2281 assert(I->isCommutative() && "Can only process commutative instruction"); 2282 // Commute to favor either a splat or maximizing having the same opcodes on 2283 // one side. 2284 if (shouldReorderOperands(i, *I, Left, Right, AllSameOpcodeLeft, 2285 AllSameOpcodeRight, SplatLeft, SplatRight)) { 2286 Left.push_back(I->getOperand(1)); 2287 Right.push_back(I->getOperand(0)); 2288 } else { 2289 Left.push_back(I->getOperand(0)); 2290 Right.push_back(I->getOperand(1)); 2291 } 2292 // Update Splat* and AllSameOpcode* after the insertion. 2293 SplatRight = SplatRight && (Right[i - 1] == Right[i]); 2294 SplatLeft = SplatLeft && (Left[i - 1] == Left[i]); 2295 AllSameOpcodeLeft = AllSameOpcodeLeft && isa<Instruction>(Left[i]) && 2296 (cast<Instruction>(Left[i - 1])->getOpcode() == 2297 cast<Instruction>(Left[i])->getOpcode()); 2298 AllSameOpcodeRight = AllSameOpcodeRight && isa<Instruction>(Right[i]) && 2299 (cast<Instruction>(Right[i - 1])->getOpcode() == 2300 cast<Instruction>(Right[i])->getOpcode()); 2301 } 2302 2303 // If one operand end up being broadcast, return this operand order. 2304 if (SplatRight || SplatLeft) 2305 return; 2306 2307 // Finally check if we can get longer vectorizable chain by reordering 2308 // without breaking the good operand order detected above. 2309 // E.g. If we have something like- 2310 // load a[0] load b[0] 2311 // load b[1] load a[1] 2312 // load a[2] load b[2] 2313 // load a[3] load b[3] 2314 // Reordering the second load b[1] load a[1] would allow us to vectorize 2315 // this code and we still retain AllSameOpcode property. 2316 // FIXME: This load reordering might break AllSameOpcode in some rare cases 2317 // such as- 2318 // add a[0],c[0] load b[0] 2319 // add a[1],c[2] load b[1] 2320 // b[2] load b[2] 2321 // add a[3],c[3] load b[3] 2322 for (unsigned j = 0; j < VL.size() - 1; ++j) { 2323 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 2324 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 2325 if (isConsecutiveAccess(L, L1, *DL, *SE)) { 2326 std::swap(Left[j + 1], Right[j + 1]); 2327 continue; 2328 } 2329 } 2330 } 2331 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 2332 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 2333 if (isConsecutiveAccess(L, L1, *DL, *SE)) { 2334 std::swap(Left[j + 1], Right[j + 1]); 2335 continue; 2336 } 2337 } 2338 } 2339 // else unchanged 2340 } 2341 } 2342 2343 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) { 2344 2345 // Get the basic block this bundle is in. All instructions in the bundle 2346 // should be in this block. 2347 auto *Front = cast<Instruction>(VL.front()); 2348 auto *BB = Front->getParent(); 2349 assert(all_of(make_range(VL.begin(), VL.end()), [&](Value *V) -> bool { 2350 return cast<Instruction>(V)->getParent() == BB; 2351 })); 2352 2353 // The last instruction in the bundle in program order. 2354 Instruction *LastInst = nullptr; 2355 2356 // Find the last instruction. The common case should be that BB has been 2357 // scheduled, and the last instruction is VL.back(). So we start with 2358 // VL.back() and iterate over schedule data until we reach the end of the 2359 // bundle. The end of the bundle is marked by null ScheduleData. 2360 if (BlocksSchedules.count(BB)) { 2361 auto *Bundle = BlocksSchedules[BB]->getScheduleData(VL.back()); 2362 if (Bundle && Bundle->isPartOfBundle()) 2363 for (; Bundle; Bundle = Bundle->NextInBundle) 2364 LastInst = Bundle->Inst; 2365 } 2366 2367 // LastInst can still be null at this point if there's either not an entry 2368 // for BB in BlocksSchedules or there's no ScheduleData available for 2369 // VL.back(). This can be the case if buildTree_rec aborts for various 2370 // reasons (e.g., the maximum recursion depth is reached, the maximum region 2371 // size is reached, etc.). ScheduleData is initialized in the scheduling 2372 // "dry-run". 2373 // 2374 // If this happens, we can still find the last instruction by brute force. We 2375 // iterate forwards from Front (inclusive) until we either see all 2376 // instructions in the bundle or reach the end of the block. If Front is the 2377 // last instruction in program order, LastInst will be set to Front, and we 2378 // will visit all the remaining instructions in the block. 2379 // 2380 // One of the reasons we exit early from buildTree_rec is to place an upper 2381 // bound on compile-time. Thus, taking an additional compile-time hit here is 2382 // not ideal. However, this should be exceedingly rare since it requires that 2383 // we both exit early from buildTree_rec and that the bundle be out-of-order 2384 // (causing us to iterate all the way to the end of the block). 2385 if (!LastInst) { 2386 SmallPtrSet<Value *, 16> Bundle(VL.begin(), VL.end()); 2387 for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) { 2388 if (Bundle.erase(&I)) 2389 LastInst = &I; 2390 if (Bundle.empty()) 2391 break; 2392 } 2393 } 2394 2395 // Set the insertion point after the last instruction in the bundle. Set the 2396 // debug location to Front. 2397 Builder.SetInsertPoint(BB, ++LastInst->getIterator()); 2398 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 2399 } 2400 2401 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 2402 Value *Vec = UndefValue::get(Ty); 2403 // Generate the 'InsertElement' instruction. 2404 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 2405 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 2406 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 2407 GatherSeq.insert(Insrt); 2408 CSEBlocks.insert(Insrt->getParent()); 2409 2410 // Add to our 'need-to-extract' list. 2411 if (TreeEntry *E = getTreeEntry(VL[i])) { 2412 // Find which lane we need to extract. 2413 int FoundLane = -1; 2414 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 2415 // Is this the lane of the scalar that we are looking for ? 2416 if (E->Scalars[Lane] == VL[i]) { 2417 FoundLane = Lane; 2418 break; 2419 } 2420 } 2421 assert(FoundLane >= 0 && "Could not find the correct lane"); 2422 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 2423 } 2424 } 2425 } 2426 2427 return Vec; 2428 } 2429 2430 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL, Value *OpValue) const { 2431 if (const TreeEntry *En = getTreeEntry(OpValue)) { 2432 if (En->isSame(VL) && En->VectorizedValue) 2433 return En->VectorizedValue; 2434 } 2435 return nullptr; 2436 } 2437 2438 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 2439 if (TreeEntry *E = getTreeEntry(VL[0])) 2440 if (E->isSame(VL)) 2441 return vectorizeTree(E); 2442 2443 Type *ScalarTy = VL[0]->getType(); 2444 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 2445 ScalarTy = SI->getValueOperand()->getType(); 2446 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 2447 2448 return Gather(VL, VecTy); 2449 } 2450 2451 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 2452 IRBuilder<>::InsertPointGuard Guard(Builder); 2453 2454 if (E->VectorizedValue) { 2455 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 2456 return E->VectorizedValue; 2457 } 2458 2459 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 2460 Type *ScalarTy = VL0->getType(); 2461 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 2462 ScalarTy = SI->getValueOperand()->getType(); 2463 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 2464 2465 if (E->NeedToGather) { 2466 setInsertPointAfterBundle(E->Scalars); 2467 auto *V = Gather(E->Scalars, VecTy); 2468 E->VectorizedValue = V; 2469 return V; 2470 } 2471 2472 unsigned Opcode = getSameOpcode(E->Scalars); 2473 2474 switch (Opcode) { 2475 case Instruction::PHI: { 2476 PHINode *PH = dyn_cast<PHINode>(VL0); 2477 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 2478 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2479 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 2480 E->VectorizedValue = NewPhi; 2481 2482 // PHINodes may have multiple entries from the same block. We want to 2483 // visit every block once. 2484 SmallSet<BasicBlock*, 4> VisitedBBs; 2485 2486 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 2487 ValueList Operands; 2488 BasicBlock *IBB = PH->getIncomingBlock(i); 2489 2490 if (!VisitedBBs.insert(IBB).second) { 2491 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 2492 continue; 2493 } 2494 2495 // Prepare the operand vector. 2496 for (Value *V : E->Scalars) 2497 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(IBB)); 2498 2499 Builder.SetInsertPoint(IBB->getTerminator()); 2500 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2501 Value *Vec = vectorizeTree(Operands); 2502 NewPhi->addIncoming(Vec, IBB); 2503 } 2504 2505 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 2506 "Invalid number of incoming values"); 2507 return NewPhi; 2508 } 2509 2510 case Instruction::ExtractElement: { 2511 if (canReuseExtract(E->Scalars, Instruction::ExtractElement)) { 2512 Value *V = VL0->getOperand(0); 2513 E->VectorizedValue = V; 2514 return V; 2515 } 2516 setInsertPointAfterBundle(E->Scalars); 2517 auto *V = Gather(E->Scalars, VecTy); 2518 E->VectorizedValue = V; 2519 return V; 2520 } 2521 case Instruction::ExtractValue: { 2522 if (canReuseExtract(E->Scalars, Instruction::ExtractValue)) { 2523 LoadInst *LI = cast<LoadInst>(VL0->getOperand(0)); 2524 Builder.SetInsertPoint(LI); 2525 PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); 2526 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); 2527 LoadInst *V = Builder.CreateAlignedLoad(Ptr, LI->getAlignment()); 2528 E->VectorizedValue = V; 2529 return propagateMetadata(V, E->Scalars); 2530 } 2531 setInsertPointAfterBundle(E->Scalars); 2532 auto *V = Gather(E->Scalars, VecTy); 2533 E->VectorizedValue = V; 2534 return V; 2535 } 2536 case Instruction::ZExt: 2537 case Instruction::SExt: 2538 case Instruction::FPToUI: 2539 case Instruction::FPToSI: 2540 case Instruction::FPExt: 2541 case Instruction::PtrToInt: 2542 case Instruction::IntToPtr: 2543 case Instruction::SIToFP: 2544 case Instruction::UIToFP: 2545 case Instruction::Trunc: 2546 case Instruction::FPTrunc: 2547 case Instruction::BitCast: { 2548 ValueList INVL; 2549 for (Value *V : E->Scalars) 2550 INVL.push_back(cast<Instruction>(V)->getOperand(0)); 2551 2552 setInsertPointAfterBundle(E->Scalars); 2553 2554 Value *InVec = vectorizeTree(INVL); 2555 2556 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2557 return V; 2558 2559 CastInst *CI = dyn_cast<CastInst>(VL0); 2560 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 2561 E->VectorizedValue = V; 2562 ++NumVectorInstructions; 2563 return V; 2564 } 2565 case Instruction::FCmp: 2566 case Instruction::ICmp: { 2567 ValueList LHSV, RHSV; 2568 for (Value *V : E->Scalars) { 2569 LHSV.push_back(cast<Instruction>(V)->getOperand(0)); 2570 RHSV.push_back(cast<Instruction>(V)->getOperand(1)); 2571 } 2572 2573 setInsertPointAfterBundle(E->Scalars); 2574 2575 Value *L = vectorizeTree(LHSV); 2576 Value *R = vectorizeTree(RHSV); 2577 2578 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2579 return V; 2580 2581 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 2582 Value *V; 2583 if (Opcode == Instruction::FCmp) 2584 V = Builder.CreateFCmp(P0, L, R); 2585 else 2586 V = Builder.CreateICmp(P0, L, R); 2587 2588 E->VectorizedValue = V; 2589 propagateIRFlags(E->VectorizedValue, E->Scalars); 2590 ++NumVectorInstructions; 2591 return V; 2592 } 2593 case Instruction::Select: { 2594 ValueList TrueVec, FalseVec, CondVec; 2595 for (Value *V : E->Scalars) { 2596 CondVec.push_back(cast<Instruction>(V)->getOperand(0)); 2597 TrueVec.push_back(cast<Instruction>(V)->getOperand(1)); 2598 FalseVec.push_back(cast<Instruction>(V)->getOperand(2)); 2599 } 2600 2601 setInsertPointAfterBundle(E->Scalars); 2602 2603 Value *Cond = vectorizeTree(CondVec); 2604 Value *True = vectorizeTree(TrueVec); 2605 Value *False = vectorizeTree(FalseVec); 2606 2607 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2608 return V; 2609 2610 Value *V = Builder.CreateSelect(Cond, True, False); 2611 E->VectorizedValue = V; 2612 ++NumVectorInstructions; 2613 return V; 2614 } 2615 case Instruction::Add: 2616 case Instruction::FAdd: 2617 case Instruction::Sub: 2618 case Instruction::FSub: 2619 case Instruction::Mul: 2620 case Instruction::FMul: 2621 case Instruction::UDiv: 2622 case Instruction::SDiv: 2623 case Instruction::FDiv: 2624 case Instruction::URem: 2625 case Instruction::SRem: 2626 case Instruction::FRem: 2627 case Instruction::Shl: 2628 case Instruction::LShr: 2629 case Instruction::AShr: 2630 case Instruction::And: 2631 case Instruction::Or: 2632 case Instruction::Xor: { 2633 ValueList LHSVL, RHSVL; 2634 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) 2635 reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL); 2636 else 2637 for (Value *V : E->Scalars) { 2638 LHSVL.push_back(cast<Instruction>(V)->getOperand(0)); 2639 RHSVL.push_back(cast<Instruction>(V)->getOperand(1)); 2640 } 2641 2642 setInsertPointAfterBundle(E->Scalars); 2643 2644 Value *LHS = vectorizeTree(LHSVL); 2645 Value *RHS = vectorizeTree(RHSVL); 2646 2647 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2648 return V; 2649 2650 BinaryOperator *BinOp = cast<BinaryOperator>(VL0); 2651 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS); 2652 E->VectorizedValue = V; 2653 propagateIRFlags(E->VectorizedValue, E->Scalars); 2654 ++NumVectorInstructions; 2655 2656 if (Instruction *I = dyn_cast<Instruction>(V)) 2657 return propagateMetadata(I, E->Scalars); 2658 2659 return V; 2660 } 2661 case Instruction::Load: { 2662 // Loads are inserted at the head of the tree because we don't want to 2663 // sink them all the way down past store instructions. 2664 setInsertPointAfterBundle(E->Scalars); 2665 2666 LoadInst *LI = cast<LoadInst>(VL0); 2667 Type *ScalarLoadTy = LI->getType(); 2668 unsigned AS = LI->getPointerAddressSpace(); 2669 2670 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 2671 VecTy->getPointerTo(AS)); 2672 2673 // The pointer operand uses an in-tree scalar so we add the new BitCast to 2674 // ExternalUses list to make sure that an extract will be generated in the 2675 // future. 2676 Value *PO = LI->getPointerOperand(); 2677 if (getTreeEntry(PO)) 2678 ExternalUses.push_back(ExternalUser(PO, cast<User>(VecPtr), 0)); 2679 2680 unsigned Alignment = LI->getAlignment(); 2681 LI = Builder.CreateLoad(VecPtr); 2682 if (!Alignment) { 2683 Alignment = DL->getABITypeAlignment(ScalarLoadTy); 2684 } 2685 LI->setAlignment(Alignment); 2686 E->VectorizedValue = LI; 2687 ++NumVectorInstructions; 2688 return propagateMetadata(LI, E->Scalars); 2689 } 2690 case Instruction::Store: { 2691 StoreInst *SI = cast<StoreInst>(VL0); 2692 unsigned Alignment = SI->getAlignment(); 2693 unsigned AS = SI->getPointerAddressSpace(); 2694 2695 ValueList ValueOp; 2696 for (Value *V : E->Scalars) 2697 ValueOp.push_back(cast<StoreInst>(V)->getValueOperand()); 2698 2699 setInsertPointAfterBundle(E->Scalars); 2700 2701 Value *VecValue = vectorizeTree(ValueOp); 2702 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), 2703 VecTy->getPointerTo(AS)); 2704 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 2705 2706 // The pointer operand uses an in-tree scalar so we add the new BitCast to 2707 // ExternalUses list to make sure that an extract will be generated in the 2708 // future. 2709 Value *PO = SI->getPointerOperand(); 2710 if (getTreeEntry(PO)) 2711 ExternalUses.push_back(ExternalUser(PO, cast<User>(VecPtr), 0)); 2712 2713 if (!Alignment) { 2714 Alignment = DL->getABITypeAlignment(SI->getValueOperand()->getType()); 2715 } 2716 S->setAlignment(Alignment); 2717 E->VectorizedValue = S; 2718 ++NumVectorInstructions; 2719 return propagateMetadata(S, E->Scalars); 2720 } 2721 case Instruction::GetElementPtr: { 2722 setInsertPointAfterBundle(E->Scalars); 2723 2724 ValueList Op0VL; 2725 for (Value *V : E->Scalars) 2726 Op0VL.push_back(cast<GetElementPtrInst>(V)->getOperand(0)); 2727 2728 Value *Op0 = vectorizeTree(Op0VL); 2729 2730 std::vector<Value *> OpVecs; 2731 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 2732 ++j) { 2733 ValueList OpVL; 2734 for (Value *V : E->Scalars) 2735 OpVL.push_back(cast<GetElementPtrInst>(V)->getOperand(j)); 2736 2737 Value *OpVec = vectorizeTree(OpVL); 2738 OpVecs.push_back(OpVec); 2739 } 2740 2741 Value *V = Builder.CreateGEP( 2742 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs); 2743 E->VectorizedValue = V; 2744 ++NumVectorInstructions; 2745 2746 if (Instruction *I = dyn_cast<Instruction>(V)) 2747 return propagateMetadata(I, E->Scalars); 2748 2749 return V; 2750 } 2751 case Instruction::Call: { 2752 CallInst *CI = cast<CallInst>(VL0); 2753 setInsertPointAfterBundle(E->Scalars); 2754 Function *FI; 2755 Intrinsic::ID IID = Intrinsic::not_intrinsic; 2756 Value *ScalarArg = nullptr; 2757 if (CI && (FI = CI->getCalledFunction())) { 2758 IID = FI->getIntrinsicID(); 2759 } 2760 std::vector<Value *> OpVecs; 2761 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 2762 ValueList OpVL; 2763 // ctlz,cttz and powi are special intrinsics whose second argument is 2764 // a scalar. This argument should not be vectorized. 2765 if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) { 2766 CallInst *CEI = cast<CallInst>(E->Scalars[0]); 2767 ScalarArg = CEI->getArgOperand(j); 2768 OpVecs.push_back(CEI->getArgOperand(j)); 2769 continue; 2770 } 2771 for (Value *V : E->Scalars) { 2772 CallInst *CEI = cast<CallInst>(V); 2773 OpVL.push_back(CEI->getArgOperand(j)); 2774 } 2775 2776 Value *OpVec = vectorizeTree(OpVL); 2777 DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 2778 OpVecs.push_back(OpVec); 2779 } 2780 2781 Module *M = F->getParent(); 2782 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 2783 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; 2784 Function *CF = Intrinsic::getDeclaration(M, ID, Tys); 2785 SmallVector<OperandBundleDef, 1> OpBundles; 2786 CI->getOperandBundlesAsDefs(OpBundles); 2787 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 2788 2789 // The scalar argument uses an in-tree scalar so we add the new vectorized 2790 // call to ExternalUses list to make sure that an extract will be 2791 // generated in the future. 2792 if (ScalarArg && getTreeEntry(ScalarArg)) 2793 ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0)); 2794 2795 E->VectorizedValue = V; 2796 propagateIRFlags(E->VectorizedValue, E->Scalars); 2797 ++NumVectorInstructions; 2798 return V; 2799 } 2800 case Instruction::ShuffleVector: { 2801 ValueList LHSVL, RHSVL; 2802 assert(isa<BinaryOperator>(VL0) && "Invalid Shuffle Vector Operand"); 2803 reorderAltShuffleOperands(E->Scalars, LHSVL, RHSVL); 2804 setInsertPointAfterBundle(E->Scalars); 2805 2806 Value *LHS = vectorizeTree(LHSVL); 2807 Value *RHS = vectorizeTree(RHSVL); 2808 2809 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2810 return V; 2811 2812 // Create a vector of LHS op1 RHS 2813 BinaryOperator *BinOp0 = cast<BinaryOperator>(VL0); 2814 Value *V0 = Builder.CreateBinOp(BinOp0->getOpcode(), LHS, RHS); 2815 2816 // Create a vector of LHS op2 RHS 2817 Instruction *VL1 = cast<Instruction>(E->Scalars[1]); 2818 BinaryOperator *BinOp1 = cast<BinaryOperator>(VL1); 2819 Value *V1 = Builder.CreateBinOp(BinOp1->getOpcode(), LHS, RHS); 2820 2821 // Create shuffle to take alternate operations from the vector. 2822 // Also, gather up odd and even scalar ops to propagate IR flags to 2823 // each vector operation. 2824 ValueList OddScalars, EvenScalars; 2825 unsigned e = E->Scalars.size(); 2826 SmallVector<Constant *, 8> Mask(e); 2827 for (unsigned i = 0; i < e; ++i) { 2828 if (isOdd(i)) { 2829 Mask[i] = Builder.getInt32(e + i); 2830 OddScalars.push_back(E->Scalars[i]); 2831 } else { 2832 Mask[i] = Builder.getInt32(i); 2833 EvenScalars.push_back(E->Scalars[i]); 2834 } 2835 } 2836 2837 Value *ShuffleMask = ConstantVector::get(Mask); 2838 propagateIRFlags(V0, EvenScalars); 2839 propagateIRFlags(V1, OddScalars); 2840 2841 Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 2842 E->VectorizedValue = V; 2843 ++NumVectorInstructions; 2844 if (Instruction *I = dyn_cast<Instruction>(V)) 2845 return propagateMetadata(I, E->Scalars); 2846 2847 return V; 2848 } 2849 default: 2850 llvm_unreachable("unknown inst"); 2851 } 2852 return nullptr; 2853 } 2854 2855 Value *BoUpSLP::vectorizeTree() { 2856 ExtraValueToDebugLocsMap ExternallyUsedValues; 2857 return vectorizeTree(ExternallyUsedValues); 2858 } 2859 2860 Value * 2861 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { 2862 2863 // All blocks must be scheduled before any instructions are inserted. 2864 for (auto &BSIter : BlocksSchedules) { 2865 scheduleBlock(BSIter.second.get()); 2866 } 2867 2868 Builder.SetInsertPoint(&F->getEntryBlock().front()); 2869 auto *VectorRoot = vectorizeTree(&VectorizableTree[0]); 2870 2871 // If the vectorized tree can be rewritten in a smaller type, we truncate the 2872 // vectorized root. InstCombine will then rewrite the entire expression. We 2873 // sign extend the extracted values below. 2874 auto *ScalarRoot = VectorizableTree[0].Scalars[0]; 2875 if (MinBWs.count(ScalarRoot)) { 2876 if (auto *I = dyn_cast<Instruction>(VectorRoot)) 2877 Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); 2878 auto BundleWidth = VectorizableTree[0].Scalars.size(); 2879 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 2880 auto *VecTy = VectorType::get(MinTy, BundleWidth); 2881 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); 2882 VectorizableTree[0].VectorizedValue = Trunc; 2883 } 2884 2885 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 2886 2887 // If necessary, sign-extend or zero-extend ScalarRoot to the larger type 2888 // specified by ScalarType. 2889 auto extend = [&](Value *ScalarRoot, Value *Ex, Type *ScalarType) { 2890 if (!MinBWs.count(ScalarRoot)) 2891 return Ex; 2892 if (MinBWs[ScalarRoot].second) 2893 return Builder.CreateSExt(Ex, ScalarType); 2894 return Builder.CreateZExt(Ex, ScalarType); 2895 }; 2896 2897 // Extract all of the elements with the external uses. 2898 for (const auto &ExternalUse : ExternalUses) { 2899 Value *Scalar = ExternalUse.Scalar; 2900 llvm::User *User = ExternalUse.User; 2901 2902 // Skip users that we already RAUW. This happens when one instruction 2903 // has multiple uses of the same value. 2904 if (User && !is_contained(Scalar->users(), User)) 2905 continue; 2906 TreeEntry *E = getTreeEntry(Scalar); 2907 assert(E && "Invalid scalar"); 2908 assert(!E->NeedToGather && "Extracting from a gather list"); 2909 2910 Value *Vec = E->VectorizedValue; 2911 assert(Vec && "Can't find vectorizable value"); 2912 2913 Value *Lane = Builder.getInt32(ExternalUse.Lane); 2914 // If User == nullptr, the Scalar is used as extra arg. Generate 2915 // ExtractElement instruction and update the record for this scalar in 2916 // ExternallyUsedValues. 2917 if (!User) { 2918 assert(ExternallyUsedValues.count(Scalar) && 2919 "Scalar with nullptr as an external user must be registered in " 2920 "ExternallyUsedValues map"); 2921 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 2922 Builder.SetInsertPoint(VecI->getParent(), 2923 std::next(VecI->getIterator())); 2924 } else { 2925 Builder.SetInsertPoint(&F->getEntryBlock().front()); 2926 } 2927 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2928 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 2929 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent()); 2930 auto &Locs = ExternallyUsedValues[Scalar]; 2931 ExternallyUsedValues.insert({Ex, Locs}); 2932 ExternallyUsedValues.erase(Scalar); 2933 continue; 2934 } 2935 2936 // Generate extracts for out-of-tree users. 2937 // Find the insertion point for the extractelement lane. 2938 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 2939 if (PHINode *PH = dyn_cast<PHINode>(User)) { 2940 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 2941 if (PH->getIncomingValue(i) == Scalar) { 2942 TerminatorInst *IncomingTerminator = 2943 PH->getIncomingBlock(i)->getTerminator(); 2944 if (isa<CatchSwitchInst>(IncomingTerminator)) { 2945 Builder.SetInsertPoint(VecI->getParent(), 2946 std::next(VecI->getIterator())); 2947 } else { 2948 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 2949 } 2950 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2951 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 2952 CSEBlocks.insert(PH->getIncomingBlock(i)); 2953 PH->setOperand(i, Ex); 2954 } 2955 } 2956 } else { 2957 Builder.SetInsertPoint(cast<Instruction>(User)); 2958 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2959 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 2960 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 2961 User->replaceUsesOfWith(Scalar, Ex); 2962 } 2963 } else { 2964 Builder.SetInsertPoint(&F->getEntryBlock().front()); 2965 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2966 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 2967 CSEBlocks.insert(&F->getEntryBlock()); 2968 User->replaceUsesOfWith(Scalar, Ex); 2969 } 2970 2971 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 2972 } 2973 2974 // For each vectorized value: 2975 for (TreeEntry &EIdx : VectorizableTree) { 2976 TreeEntry *Entry = &EIdx; 2977 2978 // For each lane: 2979 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 2980 Value *Scalar = Entry->Scalars[Lane]; 2981 // No need to handle users of gathered values. 2982 if (Entry->NeedToGather) 2983 continue; 2984 2985 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 2986 2987 Type *Ty = Scalar->getType(); 2988 if (!Ty->isVoidTy()) { 2989 #ifndef NDEBUG 2990 for (User *U : Scalar->users()) { 2991 DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 2992 2993 assert((getTreeEntry(U) || 2994 // It is legal to replace users in the ignorelist by undef. 2995 is_contained(UserIgnoreList, U)) && 2996 "Replacing out-of-tree value with undef"); 2997 } 2998 #endif 2999 Value *Undef = UndefValue::get(Ty); 3000 Scalar->replaceAllUsesWith(Undef); 3001 } 3002 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 3003 eraseInstruction(cast<Instruction>(Scalar)); 3004 } 3005 } 3006 3007 Builder.ClearInsertionPoint(); 3008 3009 return VectorizableTree[0].VectorizedValue; 3010 } 3011 3012 void BoUpSLP::optimizeGatherSequence() { 3013 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 3014 << " gather sequences instructions.\n"); 3015 // LICM InsertElementInst sequences. 3016 for (Instruction *it : GatherSeq) { 3017 InsertElementInst *Insert = dyn_cast<InsertElementInst>(it); 3018 3019 if (!Insert) 3020 continue; 3021 3022 // Check if this block is inside a loop. 3023 Loop *L = LI->getLoopFor(Insert->getParent()); 3024 if (!L) 3025 continue; 3026 3027 // Check if it has a preheader. 3028 BasicBlock *PreHeader = L->getLoopPreheader(); 3029 if (!PreHeader) 3030 continue; 3031 3032 // If the vector or the element that we insert into it are 3033 // instructions that are defined in this basic block then we can't 3034 // hoist this instruction. 3035 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 3036 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 3037 if (CurrVec && L->contains(CurrVec)) 3038 continue; 3039 if (NewElem && L->contains(NewElem)) 3040 continue; 3041 3042 // We can hoist this instruction. Move it to the pre-header. 3043 Insert->moveBefore(PreHeader->getTerminator()); 3044 } 3045 3046 // Make a list of all reachable blocks in our CSE queue. 3047 SmallVector<const DomTreeNode *, 8> CSEWorkList; 3048 CSEWorkList.reserve(CSEBlocks.size()); 3049 for (BasicBlock *BB : CSEBlocks) 3050 if (DomTreeNode *N = DT->getNode(BB)) { 3051 assert(DT->isReachableFromEntry(N)); 3052 CSEWorkList.push_back(N); 3053 } 3054 3055 // Sort blocks by domination. This ensures we visit a block after all blocks 3056 // dominating it are visited. 3057 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), 3058 [this](const DomTreeNode *A, const DomTreeNode *B) { 3059 return DT->properlyDominates(A, B); 3060 }); 3061 3062 // Perform O(N^2) search over the gather sequences and merge identical 3063 // instructions. TODO: We can further optimize this scan if we split the 3064 // instructions into different buckets based on the insert lane. 3065 SmallVector<Instruction *, 16> Visited; 3066 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 3067 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 3068 "Worklist not sorted properly!"); 3069 BasicBlock *BB = (*I)->getBlock(); 3070 // For all instructions in blocks containing gather sequences: 3071 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 3072 Instruction *In = &*it++; 3073 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 3074 continue; 3075 3076 // Check if we can replace this instruction with any of the 3077 // visited instructions. 3078 for (Instruction *v : Visited) { 3079 if (In->isIdenticalTo(v) && 3080 DT->dominates(v->getParent(), In->getParent())) { 3081 In->replaceAllUsesWith(v); 3082 eraseInstruction(In); 3083 In = nullptr; 3084 break; 3085 } 3086 } 3087 if (In) { 3088 assert(!is_contained(Visited, In)); 3089 Visited.push_back(In); 3090 } 3091 } 3092 } 3093 CSEBlocks.clear(); 3094 GatherSeq.clear(); 3095 } 3096 3097 // Groups the instructions to a bundle (which is then a single scheduling entity) 3098 // and schedules instructions until the bundle gets ready. 3099 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, 3100 BoUpSLP *SLP, Value *OpValue) { 3101 if (isa<PHINode>(OpValue)) 3102 return true; 3103 3104 // Initialize the instruction bundle. 3105 Instruction *OldScheduleEnd = ScheduleEnd; 3106 ScheduleData *PrevInBundle = nullptr; 3107 ScheduleData *Bundle = nullptr; 3108 bool ReSchedule = false; 3109 DEBUG(dbgs() << "SLP: bundle: " << *OpValue << "\n"); 3110 3111 // Make sure that the scheduling region contains all 3112 // instructions of the bundle. 3113 for (Value *V : VL) { 3114 if (!extendSchedulingRegion(V)) 3115 return false; 3116 } 3117 3118 for (Value *V : VL) { 3119 ScheduleData *BundleMember = getScheduleData(V); 3120 assert(BundleMember && 3121 "no ScheduleData for bundle member (maybe not in same basic block)"); 3122 if (BundleMember->IsScheduled) { 3123 // A bundle member was scheduled as single instruction before and now 3124 // needs to be scheduled as part of the bundle. We just get rid of the 3125 // existing schedule. 3126 DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 3127 << " was already scheduled\n"); 3128 ReSchedule = true; 3129 } 3130 assert(BundleMember->isSchedulingEntity() && 3131 "bundle member already part of other bundle"); 3132 if (PrevInBundle) { 3133 PrevInBundle->NextInBundle = BundleMember; 3134 } else { 3135 Bundle = BundleMember; 3136 } 3137 BundleMember->UnscheduledDepsInBundle = 0; 3138 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 3139 3140 // Group the instructions to a bundle. 3141 BundleMember->FirstInBundle = Bundle; 3142 PrevInBundle = BundleMember; 3143 } 3144 if (ScheduleEnd != OldScheduleEnd) { 3145 // The scheduling region got new instructions at the lower end (or it is a 3146 // new region for the first bundle). This makes it necessary to 3147 // recalculate all dependencies. 3148 // It is seldom that this needs to be done a second time after adding the 3149 // initial bundle to the region. 3150 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3151 ScheduleData *SD = getScheduleData(I); 3152 SD->clearDependencies(); 3153 } 3154 ReSchedule = true; 3155 } 3156 if (ReSchedule) { 3157 resetSchedule(); 3158 initialFillReadyList(ReadyInsts); 3159 } 3160 3161 DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " 3162 << BB->getName() << "\n"); 3163 3164 calculateDependencies(Bundle, true, SLP); 3165 3166 // Now try to schedule the new bundle. As soon as the bundle is "ready" it 3167 // means that there are no cyclic dependencies and we can schedule it. 3168 // Note that's important that we don't "schedule" the bundle yet (see 3169 // cancelScheduling). 3170 while (!Bundle->isReady() && !ReadyInsts.empty()) { 3171 3172 ScheduleData *pickedSD = ReadyInsts.back(); 3173 ReadyInsts.pop_back(); 3174 3175 if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) { 3176 schedule(pickedSD, ReadyInsts); 3177 } 3178 } 3179 if (!Bundle->isReady()) { 3180 cancelScheduling(VL, OpValue); 3181 return false; 3182 } 3183 return true; 3184 } 3185 3186 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 3187 Value *OpValue) { 3188 if (isa<PHINode>(OpValue)) 3189 return; 3190 3191 ScheduleData *Bundle = getScheduleData(OpValue); 3192 DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 3193 assert(!Bundle->IsScheduled && 3194 "Can't cancel bundle which is already scheduled"); 3195 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 3196 "tried to unbundle something which is not a bundle"); 3197 3198 // Un-bundle: make single instructions out of the bundle. 3199 ScheduleData *BundleMember = Bundle; 3200 while (BundleMember) { 3201 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 3202 BundleMember->FirstInBundle = BundleMember; 3203 ScheduleData *Next = BundleMember->NextInBundle; 3204 BundleMember->NextInBundle = nullptr; 3205 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 3206 if (BundleMember->UnscheduledDepsInBundle == 0) { 3207 ReadyInsts.insert(BundleMember); 3208 } 3209 BundleMember = Next; 3210 } 3211 } 3212 3213 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V) { 3214 if (getScheduleData(V)) 3215 return true; 3216 Instruction *I = dyn_cast<Instruction>(V); 3217 assert(I && "bundle member must be an instruction"); 3218 assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled"); 3219 if (!ScheduleStart) { 3220 // It's the first instruction in the new region. 3221 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 3222 ScheduleStart = I; 3223 ScheduleEnd = I->getNextNode(); 3224 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 3225 DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 3226 return true; 3227 } 3228 // Search up and down at the same time, because we don't know if the new 3229 // instruction is above or below the existing scheduling region. 3230 BasicBlock::reverse_iterator UpIter = 3231 ++ScheduleStart->getIterator().getReverse(); 3232 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 3233 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 3234 BasicBlock::iterator LowerEnd = BB->end(); 3235 for (;;) { 3236 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 3237 DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 3238 return false; 3239 } 3240 3241 if (UpIter != UpperEnd) { 3242 if (&*UpIter == I) { 3243 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 3244 ScheduleStart = I; 3245 DEBUG(dbgs() << "SLP: extend schedule region start to " << *I << "\n"); 3246 return true; 3247 } 3248 UpIter++; 3249 } 3250 if (DownIter != LowerEnd) { 3251 if (&*DownIter == I) { 3252 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 3253 nullptr); 3254 ScheduleEnd = I->getNextNode(); 3255 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 3256 DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 3257 return true; 3258 } 3259 DownIter++; 3260 } 3261 assert((UpIter != UpperEnd || DownIter != LowerEnd) && 3262 "instruction not found in block"); 3263 } 3264 return true; 3265 } 3266 3267 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 3268 Instruction *ToI, 3269 ScheduleData *PrevLoadStore, 3270 ScheduleData *NextLoadStore) { 3271 ScheduleData *CurrentLoadStore = PrevLoadStore; 3272 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 3273 ScheduleData *SD = ScheduleDataMap[I]; 3274 if (!SD) { 3275 // Allocate a new ScheduleData for the instruction. 3276 if (ChunkPos >= ChunkSize) { 3277 ScheduleDataChunks.push_back( 3278 llvm::make_unique<ScheduleData[]>(ChunkSize)); 3279 ChunkPos = 0; 3280 } 3281 SD = &(ScheduleDataChunks.back()[ChunkPos++]); 3282 ScheduleDataMap[I] = SD; 3283 SD->Inst = I; 3284 } 3285 assert(!isInSchedulingRegion(SD) && 3286 "new ScheduleData already in scheduling region"); 3287 SD->init(SchedulingRegionID); 3288 3289 if (I->mayReadOrWriteMemory()) { 3290 // Update the linked list of memory accessing instructions. 3291 if (CurrentLoadStore) { 3292 CurrentLoadStore->NextLoadStore = SD; 3293 } else { 3294 FirstLoadStoreInRegion = SD; 3295 } 3296 CurrentLoadStore = SD; 3297 } 3298 } 3299 if (NextLoadStore) { 3300 if (CurrentLoadStore) 3301 CurrentLoadStore->NextLoadStore = NextLoadStore; 3302 } else { 3303 LastLoadStoreInRegion = CurrentLoadStore; 3304 } 3305 } 3306 3307 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 3308 bool InsertInReadyList, 3309 BoUpSLP *SLP) { 3310 assert(SD->isSchedulingEntity()); 3311 3312 SmallVector<ScheduleData *, 10> WorkList; 3313 WorkList.push_back(SD); 3314 3315 while (!WorkList.empty()) { 3316 ScheduleData *SD = WorkList.back(); 3317 WorkList.pop_back(); 3318 3319 ScheduleData *BundleMember = SD; 3320 while (BundleMember) { 3321 assert(isInSchedulingRegion(BundleMember)); 3322 if (!BundleMember->hasValidDependencies()) { 3323 3324 DEBUG(dbgs() << "SLP: update deps of " << *BundleMember << "\n"); 3325 BundleMember->Dependencies = 0; 3326 BundleMember->resetUnscheduledDeps(); 3327 3328 // Handle def-use chain dependencies. 3329 for (User *U : BundleMember->Inst->users()) { 3330 if (isa<Instruction>(U)) { 3331 ScheduleData *UseSD = getScheduleData(U); 3332 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 3333 BundleMember->Dependencies++; 3334 ScheduleData *DestBundle = UseSD->FirstInBundle; 3335 if (!DestBundle->IsScheduled) 3336 BundleMember->incrementUnscheduledDeps(1); 3337 if (!DestBundle->hasValidDependencies()) 3338 WorkList.push_back(DestBundle); 3339 } 3340 } else { 3341 // I'm not sure if this can ever happen. But we need to be safe. 3342 // This lets the instruction/bundle never be scheduled and 3343 // eventually disable vectorization. 3344 BundleMember->Dependencies++; 3345 BundleMember->incrementUnscheduledDeps(1); 3346 } 3347 } 3348 3349 // Handle the memory dependencies. 3350 ScheduleData *DepDest = BundleMember->NextLoadStore; 3351 if (DepDest) { 3352 Instruction *SrcInst = BundleMember->Inst; 3353 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); 3354 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 3355 unsigned numAliased = 0; 3356 unsigned DistToSrc = 1; 3357 3358 while (DepDest) { 3359 assert(isInSchedulingRegion(DepDest)); 3360 3361 // We have two limits to reduce the complexity: 3362 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 3363 // SLP->isAliased (which is the expensive part in this loop). 3364 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 3365 // the whole loop (even if the loop is fast, it's quadratic). 3366 // It's important for the loop break condition (see below) to 3367 // check this limit even between two read-only instructions. 3368 if (DistToSrc >= MaxMemDepDistance || 3369 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 3370 (numAliased >= AliasedCheckLimit || 3371 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 3372 3373 // We increment the counter only if the locations are aliased 3374 // (instead of counting all alias checks). This gives a better 3375 // balance between reduced runtime and accurate dependencies. 3376 numAliased++; 3377 3378 DepDest->MemoryDependencies.push_back(BundleMember); 3379 BundleMember->Dependencies++; 3380 ScheduleData *DestBundle = DepDest->FirstInBundle; 3381 if (!DestBundle->IsScheduled) { 3382 BundleMember->incrementUnscheduledDeps(1); 3383 } 3384 if (!DestBundle->hasValidDependencies()) { 3385 WorkList.push_back(DestBundle); 3386 } 3387 } 3388 DepDest = DepDest->NextLoadStore; 3389 3390 // Example, explaining the loop break condition: Let's assume our 3391 // starting instruction is i0 and MaxMemDepDistance = 3. 3392 // 3393 // +--------v--v--v 3394 // i0,i1,i2,i3,i4,i5,i6,i7,i8 3395 // +--------^--^--^ 3396 // 3397 // MaxMemDepDistance let us stop alias-checking at i3 and we add 3398 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 3399 // Previously we already added dependencies from i3 to i6,i7,i8 3400 // (because of MaxMemDepDistance). As we added a dependency from 3401 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 3402 // and we can abort this loop at i6. 3403 if (DistToSrc >= 2 * MaxMemDepDistance) 3404 break; 3405 DistToSrc++; 3406 } 3407 } 3408 } 3409 BundleMember = BundleMember->NextInBundle; 3410 } 3411 if (InsertInReadyList && SD->isReady()) { 3412 ReadyInsts.push_back(SD); 3413 DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n"); 3414 } 3415 } 3416 } 3417 3418 void BoUpSLP::BlockScheduling::resetSchedule() { 3419 assert(ScheduleStart && 3420 "tried to reset schedule on block which has not been scheduled"); 3421 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3422 ScheduleData *SD = getScheduleData(I); 3423 assert(isInSchedulingRegion(SD)); 3424 SD->IsScheduled = false; 3425 SD->resetUnscheduledDeps(); 3426 } 3427 ReadyInsts.clear(); 3428 } 3429 3430 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 3431 3432 if (!BS->ScheduleStart) 3433 return; 3434 3435 DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 3436 3437 BS->resetSchedule(); 3438 3439 // For the real scheduling we use a more sophisticated ready-list: it is 3440 // sorted by the original instruction location. This lets the final schedule 3441 // be as close as possible to the original instruction order. 3442 struct ScheduleDataCompare { 3443 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 3444 return SD2->SchedulingPriority < SD1->SchedulingPriority; 3445 } 3446 }; 3447 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 3448 3449 // Ensure that all dependency data is updated and fill the ready-list with 3450 // initial instructions. 3451 int Idx = 0; 3452 int NumToSchedule = 0; 3453 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 3454 I = I->getNextNode()) { 3455 ScheduleData *SD = BS->getScheduleData(I); 3456 assert( 3457 SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr) && 3458 "scheduler and vectorizer have different opinion on what is a bundle"); 3459 SD->FirstInBundle->SchedulingPriority = Idx++; 3460 if (SD->isSchedulingEntity()) { 3461 BS->calculateDependencies(SD, false, this); 3462 NumToSchedule++; 3463 } 3464 } 3465 BS->initialFillReadyList(ReadyInsts); 3466 3467 Instruction *LastScheduledInst = BS->ScheduleEnd; 3468 3469 // Do the "real" scheduling. 3470 while (!ReadyInsts.empty()) { 3471 ScheduleData *picked = *ReadyInsts.begin(); 3472 ReadyInsts.erase(ReadyInsts.begin()); 3473 3474 // Move the scheduled instruction(s) to their dedicated places, if not 3475 // there yet. 3476 ScheduleData *BundleMember = picked; 3477 while (BundleMember) { 3478 Instruction *pickedInst = BundleMember->Inst; 3479 if (LastScheduledInst->getNextNode() != pickedInst) { 3480 BS->BB->getInstList().remove(pickedInst); 3481 BS->BB->getInstList().insert(LastScheduledInst->getIterator(), 3482 pickedInst); 3483 } 3484 LastScheduledInst = pickedInst; 3485 BundleMember = BundleMember->NextInBundle; 3486 } 3487 3488 BS->schedule(picked, ReadyInsts); 3489 NumToSchedule--; 3490 } 3491 assert(NumToSchedule == 0 && "could not schedule all instructions"); 3492 3493 // Avoid duplicate scheduling of the block. 3494 BS->ScheduleStart = nullptr; 3495 } 3496 3497 unsigned BoUpSLP::getVectorElementSize(Value *V) { 3498 // If V is a store, just return the width of the stored value without 3499 // traversing the expression tree. This is the common case. 3500 if (auto *Store = dyn_cast<StoreInst>(V)) 3501 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 3502 3503 // If V is not a store, we can traverse the expression tree to find loads 3504 // that feed it. The type of the loaded value may indicate a more suitable 3505 // width than V's type. We want to base the vector element size on the width 3506 // of memory operations where possible. 3507 SmallVector<Instruction *, 16> Worklist; 3508 SmallPtrSet<Instruction *, 16> Visited; 3509 if (auto *I = dyn_cast<Instruction>(V)) 3510 Worklist.push_back(I); 3511 3512 // Traverse the expression tree in bottom-up order looking for loads. If we 3513 // encounter an instruciton we don't yet handle, we give up. 3514 auto MaxWidth = 0u; 3515 auto FoundUnknownInst = false; 3516 while (!Worklist.empty() && !FoundUnknownInst) { 3517 auto *I = Worklist.pop_back_val(); 3518 Visited.insert(I); 3519 3520 // We should only be looking at scalar instructions here. If the current 3521 // instruction has a vector type, give up. 3522 auto *Ty = I->getType(); 3523 if (isa<VectorType>(Ty)) 3524 FoundUnknownInst = true; 3525 3526 // If the current instruction is a load, update MaxWidth to reflect the 3527 // width of the loaded value. 3528 else if (isa<LoadInst>(I)) 3529 MaxWidth = std::max<unsigned>(MaxWidth, DL->getTypeSizeInBits(Ty)); 3530 3531 // Otherwise, we need to visit the operands of the instruction. We only 3532 // handle the interesting cases from buildTree here. If an operand is an 3533 // instruction we haven't yet visited, we add it to the worklist. 3534 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 3535 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I)) { 3536 for (Use &U : I->operands()) 3537 if (auto *J = dyn_cast<Instruction>(U.get())) 3538 if (!Visited.count(J)) 3539 Worklist.push_back(J); 3540 } 3541 3542 // If we don't yet handle the instruction, give up. 3543 else 3544 FoundUnknownInst = true; 3545 } 3546 3547 // If we didn't encounter a memory access in the expression tree, or if we 3548 // gave up for some reason, just return the width of V. 3549 if (!MaxWidth || FoundUnknownInst) 3550 return DL->getTypeSizeInBits(V->getType()); 3551 3552 // Otherwise, return the maximum width we found. 3553 return MaxWidth; 3554 } 3555 3556 // Determine if a value V in a vectorizable expression Expr can be demoted to a 3557 // smaller type with a truncation. We collect the values that will be demoted 3558 // in ToDemote and additional roots that require investigating in Roots. 3559 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, 3560 SmallVectorImpl<Value *> &ToDemote, 3561 SmallVectorImpl<Value *> &Roots) { 3562 3563 // We can always demote constants. 3564 if (isa<Constant>(V)) { 3565 ToDemote.push_back(V); 3566 return true; 3567 } 3568 3569 // If the value is not an instruction in the expression with only one use, it 3570 // cannot be demoted. 3571 auto *I = dyn_cast<Instruction>(V); 3572 if (!I || !I->hasOneUse() || !Expr.count(I)) 3573 return false; 3574 3575 switch (I->getOpcode()) { 3576 3577 // We can always demote truncations and extensions. Since truncations can 3578 // seed additional demotion, we save the truncated value. 3579 case Instruction::Trunc: 3580 Roots.push_back(I->getOperand(0)); 3581 case Instruction::ZExt: 3582 case Instruction::SExt: 3583 break; 3584 3585 // We can demote certain binary operations if we can demote both of their 3586 // operands. 3587 case Instruction::Add: 3588 case Instruction::Sub: 3589 case Instruction::Mul: 3590 case Instruction::And: 3591 case Instruction::Or: 3592 case Instruction::Xor: 3593 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || 3594 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) 3595 return false; 3596 break; 3597 3598 // We can demote selects if we can demote their true and false values. 3599 case Instruction::Select: { 3600 SelectInst *SI = cast<SelectInst>(I); 3601 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || 3602 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) 3603 return false; 3604 break; 3605 } 3606 3607 // We can demote phis if we can demote all their incoming operands. Note that 3608 // we don't need to worry about cycles since we ensure single use above. 3609 case Instruction::PHI: { 3610 PHINode *PN = cast<PHINode>(I); 3611 for (Value *IncValue : PN->incoming_values()) 3612 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) 3613 return false; 3614 break; 3615 } 3616 3617 // Otherwise, conservatively give up. 3618 default: 3619 return false; 3620 } 3621 3622 // Record the value that we can demote. 3623 ToDemote.push_back(V); 3624 return true; 3625 } 3626 3627 void BoUpSLP::computeMinimumValueSizes() { 3628 // If there are no external uses, the expression tree must be rooted by a 3629 // store. We can't demote in-memory values, so there is nothing to do here. 3630 if (ExternalUses.empty()) 3631 return; 3632 3633 // We only attempt to truncate integer expressions. 3634 auto &TreeRoot = VectorizableTree[0].Scalars; 3635 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 3636 if (!TreeRootIT) 3637 return; 3638 3639 // If the expression is not rooted by a store, these roots should have 3640 // external uses. We will rely on InstCombine to rewrite the expression in 3641 // the narrower type. However, InstCombine only rewrites single-use values. 3642 // This means that if a tree entry other than a root is used externally, it 3643 // must have multiple uses and InstCombine will not rewrite it. The code 3644 // below ensures that only the roots are used externally. 3645 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); 3646 for (auto &EU : ExternalUses) 3647 if (!Expr.erase(EU.Scalar)) 3648 return; 3649 if (!Expr.empty()) 3650 return; 3651 3652 // Collect the scalar values of the vectorizable expression. We will use this 3653 // context to determine which values can be demoted. If we see a truncation, 3654 // we mark it as seeding another demotion. 3655 for (auto &Entry : VectorizableTree) 3656 Expr.insert(Entry.Scalars.begin(), Entry.Scalars.end()); 3657 3658 // Ensure the roots of the vectorizable tree don't form a cycle. They must 3659 // have a single external user that is not in the vectorizable tree. 3660 for (auto *Root : TreeRoot) 3661 if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) 3662 return; 3663 3664 // Conservatively determine if we can actually truncate the roots of the 3665 // expression. Collect the values that can be demoted in ToDemote and 3666 // additional roots that require investigating in Roots. 3667 SmallVector<Value *, 32> ToDemote; 3668 SmallVector<Value *, 4> Roots; 3669 for (auto *Root : TreeRoot) 3670 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) 3671 return; 3672 3673 // The maximum bit width required to represent all the values that can be 3674 // demoted without loss of precision. It would be safe to truncate the roots 3675 // of the expression to this width. 3676 auto MaxBitWidth = 8u; 3677 3678 // We first check if all the bits of the roots are demanded. If they're not, 3679 // we can truncate the roots to this narrower type. 3680 for (auto *Root : TreeRoot) { 3681 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 3682 MaxBitWidth = std::max<unsigned>( 3683 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); 3684 } 3685 3686 // True if the roots can be zero-extended back to their original type, rather 3687 // than sign-extended. We know that if the leading bits are not demanded, we 3688 // can safely zero-extend. So we initialize IsKnownPositive to True. 3689 bool IsKnownPositive = true; 3690 3691 // If all the bits of the roots are demanded, we can try a little harder to 3692 // compute a narrower type. This can happen, for example, if the roots are 3693 // getelementptr indices. InstCombine promotes these indices to the pointer 3694 // width. Thus, all their bits are technically demanded even though the 3695 // address computation might be vectorized in a smaller type. 3696 // 3697 // We start by looking at each entry that can be demoted. We compute the 3698 // maximum bit width required to store the scalar by using ValueTracking to 3699 // compute the number of high-order bits we can truncate. 3700 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType())) { 3701 MaxBitWidth = 8u; 3702 3703 // Determine if the sign bit of all the roots is known to be zero. If not, 3704 // IsKnownPositive is set to False. 3705 IsKnownPositive = all_of(TreeRoot, [&](Value *R) { 3706 KnownBits Known = computeKnownBits(R, *DL); 3707 return Known.isNonNegative(); 3708 }); 3709 3710 // Determine the maximum number of bits required to store the scalar 3711 // values. 3712 for (auto *Scalar : ToDemote) { 3713 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, 0, DT); 3714 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 3715 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 3716 } 3717 3718 // If we can't prove that the sign bit is zero, we must add one to the 3719 // maximum bit width to account for the unknown sign bit. This preserves 3720 // the existing sign bit so we can safely sign-extend the root back to the 3721 // original type. Otherwise, if we know the sign bit is zero, we will 3722 // zero-extend the root instead. 3723 // 3724 // FIXME: This is somewhat suboptimal, as there will be cases where adding 3725 // one to the maximum bit width will yield a larger-than-necessary 3726 // type. In general, we need to add an extra bit only if we can't 3727 // prove that the upper bit of the original type is equal to the 3728 // upper bit of the proposed smaller type. If these two bits are the 3729 // same (either zero or one) we know that sign-extending from the 3730 // smaller type will result in the same value. Here, since we can't 3731 // yet prove this, we are just making the proposed smaller type 3732 // larger to ensure correctness. 3733 if (!IsKnownPositive) 3734 ++MaxBitWidth; 3735 } 3736 3737 // Round MaxBitWidth up to the next power-of-two. 3738 if (!isPowerOf2_64(MaxBitWidth)) 3739 MaxBitWidth = NextPowerOf2(MaxBitWidth); 3740 3741 // If the maximum bit width we compute is less than the with of the roots' 3742 // type, we can proceed with the narrowing. Otherwise, do nothing. 3743 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 3744 return; 3745 3746 // If we can truncate the root, we must collect additional values that might 3747 // be demoted as a result. That is, those seeded by truncations we will 3748 // modify. 3749 while (!Roots.empty()) 3750 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); 3751 3752 // Finally, map the values we can demote to the maximum bit with we computed. 3753 for (auto *Scalar : ToDemote) 3754 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive); 3755 } 3756 3757 namespace { 3758 /// The SLPVectorizer Pass. 3759 struct SLPVectorizer : public FunctionPass { 3760 SLPVectorizerPass Impl; 3761 3762 /// Pass identification, replacement for typeid 3763 static char ID; 3764 3765 explicit SLPVectorizer() : FunctionPass(ID) { 3766 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 3767 } 3768 3769 3770 bool doInitialization(Module &M) override { 3771 return false; 3772 } 3773 3774 bool runOnFunction(Function &F) override { 3775 if (skipFunction(F)) 3776 return false; 3777 3778 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 3779 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 3780 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 3781 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 3782 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 3783 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 3784 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 3785 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 3786 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 3787 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 3788 3789 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 3790 } 3791 3792 void getAnalysisUsage(AnalysisUsage &AU) const override { 3793 FunctionPass::getAnalysisUsage(AU); 3794 AU.addRequired<AssumptionCacheTracker>(); 3795 AU.addRequired<ScalarEvolutionWrapperPass>(); 3796 AU.addRequired<AAResultsWrapperPass>(); 3797 AU.addRequired<TargetTransformInfoWrapperPass>(); 3798 AU.addRequired<LoopInfoWrapperPass>(); 3799 AU.addRequired<DominatorTreeWrapperPass>(); 3800 AU.addRequired<DemandedBitsWrapperPass>(); 3801 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 3802 AU.addPreserved<LoopInfoWrapperPass>(); 3803 AU.addPreserved<DominatorTreeWrapperPass>(); 3804 AU.addPreserved<AAResultsWrapperPass>(); 3805 AU.addPreserved<GlobalsAAWrapperPass>(); 3806 AU.setPreservesCFG(); 3807 } 3808 }; 3809 } // end anonymous namespace 3810 3811 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 3812 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 3813 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 3814 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 3815 auto *AA = &AM.getResult<AAManager>(F); 3816 auto *LI = &AM.getResult<LoopAnalysis>(F); 3817 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 3818 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 3819 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 3820 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 3821 3822 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 3823 if (!Changed) 3824 return PreservedAnalyses::all(); 3825 3826 PreservedAnalyses PA; 3827 PA.preserveSet<CFGAnalyses>(); 3828 PA.preserve<AAManager>(); 3829 PA.preserve<GlobalsAA>(); 3830 return PA; 3831 } 3832 3833 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 3834 TargetTransformInfo *TTI_, 3835 TargetLibraryInfo *TLI_, AliasAnalysis *AA_, 3836 LoopInfo *LI_, DominatorTree *DT_, 3837 AssumptionCache *AC_, DemandedBits *DB_, 3838 OptimizationRemarkEmitter *ORE_) { 3839 SE = SE_; 3840 TTI = TTI_; 3841 TLI = TLI_; 3842 AA = AA_; 3843 LI = LI_; 3844 DT = DT_; 3845 AC = AC_; 3846 DB = DB_; 3847 DL = &F.getParent()->getDataLayout(); 3848 3849 Stores.clear(); 3850 GEPs.clear(); 3851 bool Changed = false; 3852 3853 // If the target claims to have no vector registers don't attempt 3854 // vectorization. 3855 if (!TTI->getNumberOfRegisters(true)) 3856 return false; 3857 3858 // Don't vectorize when the attribute NoImplicitFloat is used. 3859 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 3860 return false; 3861 3862 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 3863 3864 // Use the bottom up slp vectorizer to construct chains that start with 3865 // store instructions. 3866 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 3867 3868 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 3869 // delete instructions. 3870 3871 // Scan the blocks in the function in post order. 3872 for (auto BB : post_order(&F.getEntryBlock())) { 3873 collectSeedInstructions(BB); 3874 3875 // Vectorize trees that end at stores. 3876 if (!Stores.empty()) { 3877 DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 3878 << " underlying objects.\n"); 3879 Changed |= vectorizeStoreChains(R); 3880 } 3881 3882 // Vectorize trees that end at reductions. 3883 Changed |= vectorizeChainsInBlock(BB, R); 3884 3885 // Vectorize the index computations of getelementptr instructions. This 3886 // is primarily intended to catch gather-like idioms ending at 3887 // non-consecutive loads. 3888 if (!GEPs.empty()) { 3889 DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 3890 << " underlying objects.\n"); 3891 Changed |= vectorizeGEPIndices(BB, R); 3892 } 3893 } 3894 3895 if (Changed) { 3896 R.optimizeGatherSequence(); 3897 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 3898 DEBUG(verifyFunction(F)); 3899 } 3900 return Changed; 3901 } 3902 3903 /// \brief Check that the Values in the slice in VL array are still existent in 3904 /// the WeakTrackingVH array. 3905 /// Vectorization of part of the VL array may cause later values in the VL array 3906 /// to become invalid. We track when this has happened in the WeakTrackingVH 3907 /// array. 3908 static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, 3909 ArrayRef<WeakTrackingVH> VH, unsigned SliceBegin, 3910 unsigned SliceSize) { 3911 VL = VL.slice(SliceBegin, SliceSize); 3912 VH = VH.slice(SliceBegin, SliceSize); 3913 return !std::equal(VL.begin(), VL.end(), VH.begin()); 3914 } 3915 3916 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 3917 unsigned VecRegSize) { 3918 unsigned ChainLen = Chain.size(); 3919 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 3920 << "\n"); 3921 unsigned Sz = R.getVectorElementSize(Chain[0]); 3922 unsigned VF = VecRegSize / Sz; 3923 3924 if (!isPowerOf2_32(Sz) || VF < 2) 3925 return false; 3926 3927 // Keep track of values that were deleted by vectorizing in the loop below. 3928 SmallVector<WeakTrackingVH, 8> TrackValues(Chain.begin(), Chain.end()); 3929 3930 bool Changed = false; 3931 // Look for profitable vectorizable trees at all offsets, starting at zero. 3932 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 3933 if (i + VF > e) 3934 break; 3935 3936 // Check that a previous iteration of this loop did not delete the Value. 3937 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 3938 continue; 3939 3940 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 3941 << "\n"); 3942 ArrayRef<Value *> Operands = Chain.slice(i, VF); 3943 3944 R.buildTree(Operands); 3945 if (R.isTreeTinyAndNotFullyVectorizable()) 3946 continue; 3947 3948 R.computeMinimumValueSizes(); 3949 3950 int Cost = R.getTreeCost(); 3951 3952 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 3953 if (Cost < -SLPCostThreshold) { 3954 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 3955 using namespace ore; 3956 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 3957 cast<StoreInst>(Chain[i])) 3958 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 3959 << " and with tree size " 3960 << NV("TreeSize", R.getTreeSize())); 3961 3962 R.vectorizeTree(); 3963 3964 // Move to the next bundle. 3965 i += VF - 1; 3966 Changed = true; 3967 } 3968 } 3969 3970 return Changed; 3971 } 3972 3973 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 3974 BoUpSLP &R) { 3975 SetVector<StoreInst *> Heads, Tails; 3976 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; 3977 3978 // We may run into multiple chains that merge into a single chain. We mark the 3979 // stores that we vectorized so that we don't visit the same store twice. 3980 BoUpSLP::ValueSet VectorizedStores; 3981 bool Changed = false; 3982 3983 // Do a quadratic search on all of the given stores and find 3984 // all of the pairs of stores that follow each other. 3985 SmallVector<unsigned, 16> IndexQueue; 3986 for (unsigned i = 0, e = Stores.size(); i < e; ++i) { 3987 IndexQueue.clear(); 3988 // If a store has multiple consecutive store candidates, search Stores 3989 // array according to the sequence: from i+1 to e, then from i-1 to 0. 3990 // This is because usually pairing with immediate succeeding or preceding 3991 // candidate create the best chance to find slp vectorization opportunity. 3992 unsigned j = 0; 3993 for (j = i + 1; j < e; ++j) 3994 IndexQueue.push_back(j); 3995 for (j = i; j > 0; --j) 3996 IndexQueue.push_back(j - 1); 3997 3998 for (auto &k : IndexQueue) { 3999 if (isConsecutiveAccess(Stores[i], Stores[k], *DL, *SE)) { 4000 Tails.insert(Stores[k]); 4001 Heads.insert(Stores[i]); 4002 ConsecutiveChain[Stores[i]] = Stores[k]; 4003 break; 4004 } 4005 } 4006 } 4007 4008 // For stores that start but don't end a link in the chain: 4009 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end(); 4010 it != e; ++it) { 4011 if (Tails.count(*it)) 4012 continue; 4013 4014 // We found a store instr that starts a chain. Now follow the chain and try 4015 // to vectorize it. 4016 BoUpSLP::ValueList Operands; 4017 StoreInst *I = *it; 4018 // Collect the chain into a list. 4019 while (Tails.count(I) || Heads.count(I)) { 4020 if (VectorizedStores.count(I)) 4021 break; 4022 Operands.push_back(I); 4023 // Move to the next value in the chain. 4024 I = ConsecutiveChain[I]; 4025 } 4026 4027 // FIXME: Is division-by-2 the correct step? Should we assert that the 4028 // register size is a power-of-2? 4029 for (unsigned Size = R.getMaxVecRegSize(); Size >= R.getMinVecRegSize(); 4030 Size /= 2) { 4031 if (vectorizeStoreChain(Operands, R, Size)) { 4032 // Mark the vectorized stores so that we don't vectorize them again. 4033 VectorizedStores.insert(Operands.begin(), Operands.end()); 4034 Changed = true; 4035 break; 4036 } 4037 } 4038 } 4039 4040 return Changed; 4041 } 4042 4043 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 4044 4045 // Initialize the collections. We will make a single pass over the block. 4046 Stores.clear(); 4047 GEPs.clear(); 4048 4049 // Visit the store and getelementptr instructions in BB and organize them in 4050 // Stores and GEPs according to the underlying objects of their pointer 4051 // operands. 4052 for (Instruction &I : *BB) { 4053 4054 // Ignore store instructions that are volatile or have a pointer operand 4055 // that doesn't point to a scalar type. 4056 if (auto *SI = dyn_cast<StoreInst>(&I)) { 4057 if (!SI->isSimple()) 4058 continue; 4059 if (!isValidElementType(SI->getValueOperand()->getType())) 4060 continue; 4061 Stores[GetUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI); 4062 } 4063 4064 // Ignore getelementptr instructions that have more than one index, a 4065 // constant index, or a pointer operand that doesn't point to a scalar 4066 // type. 4067 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 4068 auto Idx = GEP->idx_begin()->get(); 4069 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) 4070 continue; 4071 if (!isValidElementType(Idx->getType())) 4072 continue; 4073 if (GEP->getType()->isVectorTy()) 4074 continue; 4075 GEPs[GetUnderlyingObject(GEP->getPointerOperand(), *DL)].push_back(GEP); 4076 } 4077 } 4078 } 4079 4080 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 4081 if (!A || !B) 4082 return false; 4083 Value *VL[] = { A, B }; 4084 return tryToVectorizeList(VL, R, None, true); 4085 } 4086 4087 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 4088 ArrayRef<Value *> BuildVector, 4089 bool AllowReorder) { 4090 if (VL.size() < 2) 4091 return false; 4092 4093 DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " << VL.size() 4094 << ".\n"); 4095 4096 // Check that all of the parts are scalar instructions of the same type. 4097 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 4098 if (!I0) 4099 return false; 4100 4101 unsigned Opcode0 = I0->getOpcode(); 4102 4103 unsigned Sz = R.getVectorElementSize(I0); 4104 unsigned MinVF = std::max(2U, R.getMinVecRegSize() / Sz); 4105 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF); 4106 if (MaxVF < 2) 4107 return false; 4108 4109 for (Value *V : VL) { 4110 Type *Ty = V->getType(); 4111 if (!isValidElementType(Ty)) 4112 return false; 4113 Instruction *Inst = dyn_cast<Instruction>(V); 4114 if (!Inst || Inst->getOpcode() != Opcode0) 4115 return false; 4116 } 4117 4118 bool Changed = false; 4119 4120 // Keep track of values that were deleted by vectorizing in the loop below. 4121 SmallVector<WeakTrackingVH, 8> TrackValues(VL.begin(), VL.end()); 4122 4123 unsigned NextInst = 0, MaxInst = VL.size(); 4124 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; 4125 VF /= 2) { 4126 // No actual vectorization should happen, if number of parts is the same as 4127 // provided vectorization factor (i.e. the scalar type is used for vector 4128 // code during codegen). 4129 auto *VecTy = VectorType::get(VL[0]->getType(), VF); 4130 if (TTI->getNumberOfParts(VecTy) == VF) 4131 continue; 4132 for (unsigned I = NextInst; I < MaxInst; ++I) { 4133 unsigned OpsWidth = 0; 4134 4135 if (I + VF > MaxInst) 4136 OpsWidth = MaxInst - I; 4137 else 4138 OpsWidth = VF; 4139 4140 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 4141 break; 4142 4143 // Check that a previous iteration of this loop did not delete the Value. 4144 if (hasValueBeenRAUWed(VL, TrackValues, I, OpsWidth)) 4145 continue; 4146 4147 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 4148 << "\n"); 4149 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth); 4150 4151 ArrayRef<Value *> BuildVectorSlice; 4152 if (!BuildVector.empty()) 4153 BuildVectorSlice = BuildVector.slice(I, OpsWidth); 4154 4155 R.buildTree(Ops, BuildVectorSlice); 4156 // TODO: check if we can allow reordering for more cases. 4157 if (AllowReorder && R.shouldReorder()) { 4158 // Conceptually, there is nothing actually preventing us from trying to 4159 // reorder a larger list. In fact, we do exactly this when vectorizing 4160 // reductions. However, at this point, we only expect to get here when 4161 // there are exactly two operations. 4162 assert(Ops.size() == 2); 4163 assert(BuildVectorSlice.empty()); 4164 Value *ReorderedOps[] = {Ops[1], Ops[0]}; 4165 R.buildTree(ReorderedOps, None); 4166 } 4167 if (R.isTreeTinyAndNotFullyVectorizable()) 4168 continue; 4169 4170 R.computeMinimumValueSizes(); 4171 int Cost = R.getTreeCost(); 4172 4173 if (Cost < -SLPCostThreshold) { 4174 DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 4175 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 4176 cast<Instruction>(Ops[0])) 4177 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 4178 << " and with tree size " 4179 << ore::NV("TreeSize", R.getTreeSize())); 4180 4181 Value *VectorizedRoot = R.vectorizeTree(); 4182 4183 // Reconstruct the build vector by extracting the vectorized root. This 4184 // way we handle the case where some elements of the vector are 4185 // undefined. 4186 // (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2)) 4187 if (!BuildVectorSlice.empty()) { 4188 // The insert point is the last build vector instruction. The 4189 // vectorized root will precede it. This guarantees that we get an 4190 // instruction. The vectorized tree could have been constant folded. 4191 Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back()); 4192 unsigned VecIdx = 0; 4193 for (auto &V : BuildVectorSlice) { 4194 IRBuilder<NoFolder> Builder(InsertAfter->getParent(), 4195 ++BasicBlock::iterator(InsertAfter)); 4196 Instruction *I = cast<Instruction>(V); 4197 assert(isa<InsertElementInst>(I) || isa<InsertValueInst>(I)); 4198 Instruction *Extract = 4199 cast<Instruction>(Builder.CreateExtractElement( 4200 VectorizedRoot, Builder.getInt32(VecIdx++))); 4201 I->setOperand(1, Extract); 4202 I->removeFromParent(); 4203 I->insertAfter(Extract); 4204 InsertAfter = I; 4205 } 4206 } 4207 // Move to the next bundle. 4208 I += VF - 1; 4209 NextInst = I + 1; 4210 Changed = true; 4211 } 4212 } 4213 } 4214 4215 return Changed; 4216 } 4217 4218 bool SLPVectorizerPass::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { 4219 if (!V) 4220 return false; 4221 4222 Value *P = V->getParent(); 4223 4224 // Vectorize in current basic block only. 4225 auto *Op0 = dyn_cast<Instruction>(V->getOperand(0)); 4226 auto *Op1 = dyn_cast<Instruction>(V->getOperand(1)); 4227 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 4228 return false; 4229 4230 // Try to vectorize V. 4231 if (tryToVectorizePair(Op0, Op1, R)) 4232 return true; 4233 4234 auto *A = dyn_cast<BinaryOperator>(Op0); 4235 auto *B = dyn_cast<BinaryOperator>(Op1); 4236 // Try to skip B. 4237 if (B && B->hasOneUse()) { 4238 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 4239 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 4240 if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R)) 4241 return true; 4242 if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R)) 4243 return true; 4244 } 4245 4246 // Try to skip A. 4247 if (A && A->hasOneUse()) { 4248 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 4249 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 4250 if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R)) 4251 return true; 4252 if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R)) 4253 return true; 4254 } 4255 return false; 4256 } 4257 4258 /// \brief Generate a shuffle mask to be used in a reduction tree. 4259 /// 4260 /// \param VecLen The length of the vector to be reduced. 4261 /// \param NumEltsToRdx The number of elements that should be reduced in the 4262 /// vector. 4263 /// \param IsPairwise Whether the reduction is a pairwise or splitting 4264 /// reduction. A pairwise reduction will generate a mask of 4265 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 4266 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 4267 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 4268 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 4269 bool IsPairwise, bool IsLeft, 4270 IRBuilder<> &Builder) { 4271 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 4272 4273 SmallVector<Constant *, 32> ShuffleMask( 4274 VecLen, UndefValue::get(Builder.getInt32Ty())); 4275 4276 if (IsPairwise) 4277 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 4278 for (unsigned i = 0; i != NumEltsToRdx; ++i) 4279 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 4280 else 4281 // Move the upper half of the vector to the lower half. 4282 for (unsigned i = 0; i != NumEltsToRdx; ++i) 4283 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 4284 4285 return ConstantVector::get(ShuffleMask); 4286 } 4287 4288 namespace { 4289 /// Model horizontal reductions. 4290 /// 4291 /// A horizontal reduction is a tree of reduction operations (currently add and 4292 /// fadd) that has operations that can be put into a vector as its leaf. 4293 /// For example, this tree: 4294 /// 4295 /// mul mul mul mul 4296 /// \ / \ / 4297 /// + + 4298 /// \ / 4299 /// + 4300 /// This tree has "mul" as its reduced values and "+" as its reduction 4301 /// operations. A reduction might be feeding into a store or a binary operation 4302 /// feeding a phi. 4303 /// ... 4304 /// \ / 4305 /// + 4306 /// | 4307 /// phi += 4308 /// 4309 /// Or: 4310 /// ... 4311 /// \ / 4312 /// + 4313 /// | 4314 /// *p = 4315 /// 4316 class HorizontalReduction { 4317 SmallVector<Value *, 16> ReductionOps; 4318 SmallVector<Value *, 32> ReducedVals; 4319 // Use map vector to make stable output. 4320 MapVector<Instruction *, Value *> ExtraArgs; 4321 4322 BinaryOperator *ReductionRoot = nullptr; 4323 4324 /// The opcode of the reduction. 4325 Instruction::BinaryOps ReductionOpcode = Instruction::BinaryOpsEnd; 4326 /// The opcode of the values we perform a reduction on. 4327 unsigned ReducedValueOpcode = 0; 4328 /// Should we model this reduction as a pairwise reduction tree or a tree that 4329 /// splits the vector in halves and adds those halves. 4330 bool IsPairwiseReduction = false; 4331 4332 /// Checks if the ParentStackElem.first should be marked as a reduction 4333 /// operation with an extra argument or as extra argument itself. 4334 void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem, 4335 Value *ExtraArg) { 4336 if (ExtraArgs.count(ParentStackElem.first)) { 4337 ExtraArgs[ParentStackElem.first] = nullptr; 4338 // We ran into something like: 4339 // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg. 4340 // The whole ParentStackElem.first should be considered as an extra value 4341 // in this case. 4342 // Do not perform analysis of remaining operands of ParentStackElem.first 4343 // instruction, this whole instruction is an extra argument. 4344 ParentStackElem.second = ParentStackElem.first->getNumOperands(); 4345 } else { 4346 // We ran into something like: 4347 // ParentStackElem.first += ... + ExtraArg + ... 4348 ExtraArgs[ParentStackElem.first] = ExtraArg; 4349 } 4350 } 4351 4352 public: 4353 HorizontalReduction() = default; 4354 4355 /// \brief Try to find a reduction tree. 4356 bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B) { 4357 assert((!Phi || is_contained(Phi->operands(), B)) && 4358 "Thi phi needs to use the binary operator"); 4359 4360 // We could have a initial reductions that is not an add. 4361 // r *= v1 + v2 + v3 + v4 4362 // In such a case start looking for a tree rooted in the first '+'. 4363 if (Phi) { 4364 if (B->getOperand(0) == Phi) { 4365 Phi = nullptr; 4366 B = dyn_cast<BinaryOperator>(B->getOperand(1)); 4367 } else if (B->getOperand(1) == Phi) { 4368 Phi = nullptr; 4369 B = dyn_cast<BinaryOperator>(B->getOperand(0)); 4370 } 4371 } 4372 4373 if (!B) 4374 return false; 4375 4376 Type *Ty = B->getType(); 4377 if (!isValidElementType(Ty)) 4378 return false; 4379 4380 ReductionOpcode = B->getOpcode(); 4381 ReducedValueOpcode = 0; 4382 ReductionRoot = B; 4383 4384 // We currently only support adds. 4385 if ((ReductionOpcode != Instruction::Add && 4386 ReductionOpcode != Instruction::FAdd) || 4387 !B->isAssociative()) 4388 return false; 4389 4390 // Post order traverse the reduction tree starting at B. We only handle true 4391 // trees containing only binary operators or selects. 4392 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; 4393 Stack.push_back(std::make_pair(B, 0)); 4394 while (!Stack.empty()) { 4395 Instruction *TreeN = Stack.back().first; 4396 unsigned EdgeToVist = Stack.back().second++; 4397 bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode; 4398 4399 // Postorder vist. 4400 if (EdgeToVist == 2 || IsReducedValue) { 4401 if (IsReducedValue) 4402 ReducedVals.push_back(TreeN); 4403 else { 4404 auto I = ExtraArgs.find(TreeN); 4405 if (I != ExtraArgs.end() && !I->second) { 4406 // Check if TreeN is an extra argument of its parent operation. 4407 if (Stack.size() <= 1) { 4408 // TreeN can't be an extra argument as it is a root reduction 4409 // operation. 4410 return false; 4411 } 4412 // Yes, TreeN is an extra argument, do not add it to a list of 4413 // reduction operations. 4414 // Stack[Stack.size() - 2] always points to the parent operation. 4415 markExtraArg(Stack[Stack.size() - 2], TreeN); 4416 ExtraArgs.erase(TreeN); 4417 } else 4418 ReductionOps.push_back(TreeN); 4419 } 4420 // Retract. 4421 Stack.pop_back(); 4422 continue; 4423 } 4424 4425 // Visit left or right. 4426 Value *NextV = TreeN->getOperand(EdgeToVist); 4427 if (NextV != Phi) { 4428 auto *I = dyn_cast<Instruction>(NextV); 4429 // Continue analysis if the next operand is a reduction operation or 4430 // (possibly) a reduced value. If the reduced value opcode is not set, 4431 // the first met operation != reduction operation is considered as the 4432 // reduced value class. 4433 if (I && (!ReducedValueOpcode || I->getOpcode() == ReducedValueOpcode || 4434 I->getOpcode() == ReductionOpcode)) { 4435 // Only handle trees in the current basic block. 4436 if (I->getParent() != B->getParent()) { 4437 // I is an extra argument for TreeN (its parent operation). 4438 markExtraArg(Stack.back(), I); 4439 continue; 4440 } 4441 4442 // Each tree node needs to have one user except for the ultimate 4443 // reduction. 4444 if (!I->hasOneUse() && I != B) { 4445 // I is an extra argument for TreeN (its parent operation). 4446 markExtraArg(Stack.back(), I); 4447 continue; 4448 } 4449 4450 if (I->getOpcode() == ReductionOpcode) { 4451 // We need to be able to reassociate the reduction operations. 4452 if (!I->isAssociative()) { 4453 // I is an extra argument for TreeN (its parent operation). 4454 markExtraArg(Stack.back(), I); 4455 continue; 4456 } 4457 } else if (ReducedValueOpcode && 4458 ReducedValueOpcode != I->getOpcode()) { 4459 // Make sure that the opcodes of the operations that we are going to 4460 // reduce match. 4461 // I is an extra argument for TreeN (its parent operation). 4462 markExtraArg(Stack.back(), I); 4463 continue; 4464 } else if (!ReducedValueOpcode) 4465 ReducedValueOpcode = I->getOpcode(); 4466 4467 Stack.push_back(std::make_pair(I, 0)); 4468 continue; 4469 } 4470 } 4471 // NextV is an extra argument for TreeN (its parent operation). 4472 markExtraArg(Stack.back(), NextV); 4473 } 4474 return true; 4475 } 4476 4477 /// \brief Attempt to vectorize the tree found by 4478 /// matchAssociativeReduction. 4479 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 4480 if (ReducedVals.empty()) 4481 return false; 4482 4483 // If there is a sufficient number of reduction values, reduce 4484 // to a nearby power-of-2. Can safely generate oversized 4485 // vectors and rely on the backend to split them to legal sizes. 4486 unsigned NumReducedVals = ReducedVals.size(); 4487 if (NumReducedVals < 4) 4488 return false; 4489 4490 unsigned ReduxWidth = PowerOf2Floor(NumReducedVals); 4491 4492 Value *VectorizedTree = nullptr; 4493 IRBuilder<> Builder(ReductionRoot); 4494 FastMathFlags Unsafe; 4495 Unsafe.setUnsafeAlgebra(); 4496 Builder.setFastMathFlags(Unsafe); 4497 unsigned i = 0; 4498 4499 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 4500 // The same extra argument may be used several time, so log each attempt 4501 // to use it. 4502 for (auto &Pair : ExtraArgs) 4503 ExternallyUsedValues[Pair.second].push_back(Pair.first); 4504 while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) { 4505 auto VL = makeArrayRef(&ReducedVals[i], ReduxWidth); 4506 V.buildTree(VL, ExternallyUsedValues, ReductionOps); 4507 if (V.shouldReorder()) { 4508 SmallVector<Value *, 8> Reversed(VL.rbegin(), VL.rend()); 4509 V.buildTree(Reversed, ExternallyUsedValues, ReductionOps); 4510 } 4511 if (V.isTreeTinyAndNotFullyVectorizable()) 4512 break; 4513 4514 V.computeMinimumValueSizes(); 4515 4516 // Estimate cost. 4517 int Cost = 4518 V.getTreeCost() + getReductionCost(TTI, ReducedVals[i], ReduxWidth); 4519 if (Cost >= -SLPCostThreshold) 4520 break; 4521 4522 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost 4523 << ". (HorRdx)\n"); 4524 auto *I0 = cast<Instruction>(VL[0]); 4525 V.getORE()->emit( 4526 OptimizationRemark(SV_NAME, "VectorizedHorizontalReduction", I0) 4527 << "Vectorized horizontal reduction with cost " 4528 << ore::NV("Cost", Cost) << " and with tree size " 4529 << ore::NV("TreeSize", V.getTreeSize())); 4530 4531 // Vectorize a tree. 4532 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 4533 Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues); 4534 4535 // Emit a reduction. 4536 Value *ReducedSubTree = 4537 emitReduction(VectorizedRoot, Builder, ReduxWidth, ReductionOps, TTI); 4538 if (VectorizedTree) { 4539 Builder.SetCurrentDebugLocation(Loc); 4540 VectorizedTree = Builder.CreateBinOp(ReductionOpcode, VectorizedTree, 4541 ReducedSubTree, "bin.rdx"); 4542 propagateIRFlags(VectorizedTree, ReductionOps); 4543 } else 4544 VectorizedTree = ReducedSubTree; 4545 i += ReduxWidth; 4546 ReduxWidth = PowerOf2Floor(NumReducedVals - i); 4547 } 4548 4549 if (VectorizedTree) { 4550 // Finish the reduction. 4551 for (; i < NumReducedVals; ++i) { 4552 auto *I = cast<Instruction>(ReducedVals[i]); 4553 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 4554 VectorizedTree = 4555 Builder.CreateBinOp(ReductionOpcode, VectorizedTree, I); 4556 propagateIRFlags(VectorizedTree, ReductionOps); 4557 } 4558 for (auto &Pair : ExternallyUsedValues) { 4559 assert(!Pair.second.empty() && 4560 "At least one DebugLoc must be inserted"); 4561 // Add each externally used value to the final reduction. 4562 for (auto *I : Pair.second) { 4563 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 4564 VectorizedTree = Builder.CreateBinOp(ReductionOpcode, VectorizedTree, 4565 Pair.first, "bin.extra"); 4566 propagateIRFlags(VectorizedTree, I); 4567 } 4568 } 4569 // Update users. 4570 ReductionRoot->replaceAllUsesWith(VectorizedTree); 4571 } 4572 return VectorizedTree != nullptr; 4573 } 4574 4575 unsigned numReductionValues() const { 4576 return ReducedVals.size(); 4577 } 4578 4579 private: 4580 /// \brief Calculate the cost of a reduction. 4581 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal, 4582 unsigned ReduxWidth) { 4583 Type *ScalarTy = FirstReducedVal->getType(); 4584 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 4585 4586 int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true); 4587 int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false); 4588 4589 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 4590 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 4591 4592 int ScalarReduxCost = 4593 (ReduxWidth - 1) * 4594 TTI->getArithmeticInstrCost(ReductionOpcode, ScalarTy); 4595 4596 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 4597 << " for reduction that starts with " << *FirstReducedVal 4598 << " (It is a " 4599 << (IsPairwiseReduction ? "pairwise" : "splitting") 4600 << " reduction)\n"); 4601 4602 return VecReduxCost - ScalarReduxCost; 4603 } 4604 4605 /// \brief Emit a horizontal reduction of the vectorized value. 4606 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 4607 unsigned ReduxWidth, ArrayRef<Value *> RedOps, 4608 const TargetTransformInfo *TTI) { 4609 assert(VectorizedValue && "Need to have a vectorized tree node"); 4610 assert(isPowerOf2_32(ReduxWidth) && 4611 "We only handle power-of-two reductions for now"); 4612 4613 if (!IsPairwiseReduction) 4614 return createSimpleTargetReduction( 4615 Builder, TTI, ReductionOpcode, VectorizedValue, 4616 TargetTransformInfo::ReductionFlags(), RedOps); 4617 4618 Value *TmpVec = VectorizedValue; 4619 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 4620 Value *LeftMask = 4621 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 4622 Value *RightMask = 4623 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 4624 4625 Value *LeftShuf = Builder.CreateShuffleVector( 4626 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 4627 Value *RightShuf = Builder.CreateShuffleVector( 4628 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 4629 "rdx.shuf.r"); 4630 TmpVec = 4631 Builder.CreateBinOp(ReductionOpcode, LeftShuf, RightShuf, "bin.rdx"); 4632 propagateIRFlags(TmpVec, RedOps); 4633 } 4634 4635 // The result is in the first element of the vector. 4636 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 4637 } 4638 }; 4639 } // end anonymous namespace 4640 4641 /// \brief Recognize construction of vectors like 4642 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 4643 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 4644 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 4645 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 4646 /// 4647 /// Returns true if it matches 4648 /// 4649 static bool findBuildVector(InsertElementInst *FirstInsertElem, 4650 SmallVectorImpl<Value *> &BuildVector, 4651 SmallVectorImpl<Value *> &BuildVectorOpds) { 4652 if (!isa<UndefValue>(FirstInsertElem->getOperand(0))) 4653 return false; 4654 4655 InsertElementInst *IE = FirstInsertElem; 4656 while (true) { 4657 BuildVector.push_back(IE); 4658 BuildVectorOpds.push_back(IE->getOperand(1)); 4659 4660 if (IE->use_empty()) 4661 return false; 4662 4663 InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back()); 4664 if (!NextUse) 4665 return true; 4666 4667 // If this isn't the final use, make sure the next insertelement is the only 4668 // use. It's OK if the final constructed vector is used multiple times 4669 if (!IE->hasOneUse()) 4670 return false; 4671 4672 IE = NextUse; 4673 } 4674 4675 return false; 4676 } 4677 4678 /// \brief Like findBuildVector, but looks backwards for construction of aggregate. 4679 /// 4680 /// \return true if it matches. 4681 static bool findBuildAggregate(InsertValueInst *IV, 4682 SmallVectorImpl<Value *> &BuildVector, 4683 SmallVectorImpl<Value *> &BuildVectorOpds) { 4684 Value *V; 4685 do { 4686 BuildVector.push_back(IV); 4687 BuildVectorOpds.push_back(IV->getInsertedValueOperand()); 4688 V = IV->getAggregateOperand(); 4689 if (isa<UndefValue>(V)) 4690 break; 4691 IV = dyn_cast<InsertValueInst>(V); 4692 if (!IV || !IV->hasOneUse()) 4693 return false; 4694 } while (true); 4695 std::reverse(BuildVector.begin(), BuildVector.end()); 4696 std::reverse(BuildVectorOpds.begin(), BuildVectorOpds.end()); 4697 return true; 4698 } 4699 4700 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 4701 return V->getType() < V2->getType(); 4702 } 4703 4704 /// \brief Try and get a reduction value from a phi node. 4705 /// 4706 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 4707 /// if they come from either \p ParentBB or a containing loop latch. 4708 /// 4709 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 4710 /// if not possible. 4711 static Value *getReductionValue(const DominatorTree *DT, PHINode *P, 4712 BasicBlock *ParentBB, LoopInfo *LI) { 4713 // There are situations where the reduction value is not dominated by the 4714 // reduction phi. Vectorizing such cases has been reported to cause 4715 // miscompiles. See PR25787. 4716 auto DominatedReduxValue = [&](Value *R) { 4717 return ( 4718 dyn_cast<Instruction>(R) && 4719 DT->dominates(P->getParent(), dyn_cast<Instruction>(R)->getParent())); 4720 }; 4721 4722 Value *Rdx = nullptr; 4723 4724 // Return the incoming value if it comes from the same BB as the phi node. 4725 if (P->getIncomingBlock(0) == ParentBB) { 4726 Rdx = P->getIncomingValue(0); 4727 } else if (P->getIncomingBlock(1) == ParentBB) { 4728 Rdx = P->getIncomingValue(1); 4729 } 4730 4731 if (Rdx && DominatedReduxValue(Rdx)) 4732 return Rdx; 4733 4734 // Otherwise, check whether we have a loop latch to look at. 4735 Loop *BBL = LI->getLoopFor(ParentBB); 4736 if (!BBL) 4737 return nullptr; 4738 BasicBlock *BBLatch = BBL->getLoopLatch(); 4739 if (!BBLatch) 4740 return nullptr; 4741 4742 // There is a loop latch, return the incoming value if it comes from 4743 // that. This reduction pattern occasionally turns up. 4744 if (P->getIncomingBlock(0) == BBLatch) { 4745 Rdx = P->getIncomingValue(0); 4746 } else if (P->getIncomingBlock(1) == BBLatch) { 4747 Rdx = P->getIncomingValue(1); 4748 } 4749 4750 if (Rdx && DominatedReduxValue(Rdx)) 4751 return Rdx; 4752 4753 return nullptr; 4754 } 4755 4756 /// Attempt to reduce a horizontal reduction. 4757 /// If it is legal to match a horizontal reduction feeding the phi node \a P 4758 /// with reduction operators \a Root (or one of its operands) in a basic block 4759 /// \a BB, then check if it can be done. If horizontal reduction is not found 4760 /// and root instruction is a binary operation, vectorization of the operands is 4761 /// attempted. 4762 /// \returns true if a horizontal reduction was matched and reduced or operands 4763 /// of one of the binary instruction were vectorized. 4764 /// \returns false if a horizontal reduction was not matched (or not possible) 4765 /// or no vectorization of any binary operation feeding \a Root instruction was 4766 /// performed. 4767 static bool tryToVectorizeHorReductionOrInstOperands( 4768 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, 4769 TargetTransformInfo *TTI, 4770 const function_ref<bool(BinaryOperator *, BoUpSLP &)> Vectorize) { 4771 if (!ShouldVectorizeHor) 4772 return false; 4773 4774 if (!Root) 4775 return false; 4776 4777 if (Root->getParent() != BB) 4778 return false; 4779 // Start analysis starting from Root instruction. If horizontal reduction is 4780 // found, try to vectorize it. If it is not a horizontal reduction or 4781 // vectorization is not possible or not effective, and currently analyzed 4782 // instruction is a binary operation, try to vectorize the operands, using 4783 // pre-order DFS traversal order. If the operands were not vectorized, repeat 4784 // the same procedure considering each operand as a possible root of the 4785 // horizontal reduction. 4786 // Interrupt the process if the Root instruction itself was vectorized or all 4787 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 4788 SmallVector<std::pair<WeakTrackingVH, unsigned>, 8> Stack(1, {Root, 0}); 4789 SmallSet<Value *, 8> VisitedInstrs; 4790 bool Res = false; 4791 while (!Stack.empty()) { 4792 Value *V; 4793 unsigned Level; 4794 std::tie(V, Level) = Stack.pop_back_val(); 4795 if (!V) 4796 continue; 4797 auto *Inst = dyn_cast<Instruction>(V); 4798 if (!Inst || isa<PHINode>(Inst)) 4799 continue; 4800 if (auto *BI = dyn_cast<BinaryOperator>(Inst)) { 4801 HorizontalReduction HorRdx; 4802 if (HorRdx.matchAssociativeReduction(P, BI)) { 4803 if (HorRdx.tryToReduce(R, TTI)) { 4804 Res = true; 4805 // Set P to nullptr to avoid re-analysis of phi node in 4806 // matchAssociativeReduction function unless this is the root node. 4807 P = nullptr; 4808 continue; 4809 } 4810 } 4811 if (P) { 4812 Inst = dyn_cast<Instruction>(BI->getOperand(0)); 4813 if (Inst == P) 4814 Inst = dyn_cast<Instruction>(BI->getOperand(1)); 4815 if (!Inst) { 4816 // Set P to nullptr to avoid re-analysis of phi node in 4817 // matchAssociativeReduction function unless this is the root node. 4818 P = nullptr; 4819 continue; 4820 } 4821 } 4822 } 4823 // Set P to nullptr to avoid re-analysis of phi node in 4824 // matchAssociativeReduction function unless this is the root node. 4825 P = nullptr; 4826 if (Vectorize(dyn_cast<BinaryOperator>(Inst), R)) { 4827 Res = true; 4828 continue; 4829 } 4830 4831 // Try to vectorize operands. 4832 if (++Level < RecursionMaxDepth) 4833 for (auto *Op : Inst->operand_values()) 4834 Stack.emplace_back(Op, Level); 4835 } 4836 return Res; 4837 } 4838 4839 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V, 4840 BasicBlock *BB, BoUpSLP &R, 4841 TargetTransformInfo *TTI) { 4842 if (!V) 4843 return false; 4844 auto *I = dyn_cast<Instruction>(V); 4845 if (!I) 4846 return false; 4847 4848 if (!isa<BinaryOperator>(I)) 4849 P = nullptr; 4850 // Try to match and vectorize a horizontal reduction. 4851 return tryToVectorizeHorReductionOrInstOperands( 4852 P, I, BB, R, TTI, [this](BinaryOperator *BI, BoUpSLP &R) -> bool { 4853 return tryToVectorize(BI, R); 4854 }); 4855 } 4856 4857 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 4858 bool Changed = false; 4859 SmallVector<Value *, 4> Incoming; 4860 SmallSet<Value *, 16> VisitedInstrs; 4861 4862 bool HaveVectorizedPhiNodes = true; 4863 while (HaveVectorizedPhiNodes) { 4864 HaveVectorizedPhiNodes = false; 4865 4866 // Collect the incoming values from the PHIs. 4867 Incoming.clear(); 4868 for (Instruction &I : *BB) { 4869 PHINode *P = dyn_cast<PHINode>(&I); 4870 if (!P) 4871 break; 4872 4873 if (!VisitedInstrs.count(P)) 4874 Incoming.push_back(P); 4875 } 4876 4877 // Sort by type. 4878 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); 4879 4880 // Try to vectorize elements base on their type. 4881 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 4882 E = Incoming.end(); 4883 IncIt != E;) { 4884 4885 // Look for the next elements with the same type. 4886 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 4887 while (SameTypeIt != E && 4888 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 4889 VisitedInstrs.insert(*SameTypeIt); 4890 ++SameTypeIt; 4891 } 4892 4893 // Try to vectorize them. 4894 unsigned NumElts = (SameTypeIt - IncIt); 4895 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); 4896 // The order in which the phi nodes appear in the program does not matter. 4897 // So allow tryToVectorizeList to reorder them if it is beneficial. This 4898 // is done when there are exactly two elements since tryToVectorizeList 4899 // asserts that there are only two values when AllowReorder is true. 4900 bool AllowReorder = NumElts == 2; 4901 if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R, 4902 None, AllowReorder)) { 4903 // Success start over because instructions might have been changed. 4904 HaveVectorizedPhiNodes = true; 4905 Changed = true; 4906 break; 4907 } 4908 4909 // Start over at the next instruction of a different type (or the end). 4910 IncIt = SameTypeIt; 4911 } 4912 } 4913 4914 VisitedInstrs.clear(); 4915 4916 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 4917 // We may go through BB multiple times so skip the one we have checked. 4918 if (!VisitedInstrs.insert(&*it).second) 4919 continue; 4920 4921 if (isa<DbgInfoIntrinsic>(it)) 4922 continue; 4923 4924 // Try to vectorize reductions that use PHINodes. 4925 if (PHINode *P = dyn_cast<PHINode>(it)) { 4926 // Check that the PHI is a reduction PHI. 4927 if (P->getNumIncomingValues() != 2) 4928 return Changed; 4929 4930 // Try to match and vectorize a horizontal reduction. 4931 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R, 4932 TTI)) { 4933 Changed = true; 4934 it = BB->begin(); 4935 e = BB->end(); 4936 continue; 4937 } 4938 continue; 4939 } 4940 4941 if (ShouldStartVectorizeHorAtStore) { 4942 if (StoreInst *SI = dyn_cast<StoreInst>(it)) { 4943 // Try to match and vectorize a horizontal reduction. 4944 if (vectorizeRootInstruction(nullptr, SI->getValueOperand(), BB, R, 4945 TTI)) { 4946 Changed = true; 4947 it = BB->begin(); 4948 e = BB->end(); 4949 continue; 4950 } 4951 } 4952 } 4953 4954 // Try to vectorize horizontal reductions feeding into a return. 4955 if (ReturnInst *RI = dyn_cast<ReturnInst>(it)) { 4956 if (RI->getNumOperands() != 0) { 4957 // Try to match and vectorize a horizontal reduction. 4958 if (vectorizeRootInstruction(nullptr, RI->getOperand(0), BB, R, TTI)) { 4959 Changed = true; 4960 it = BB->begin(); 4961 e = BB->end(); 4962 continue; 4963 } 4964 } 4965 } 4966 4967 // Try to vectorize trees that start at compare instructions. 4968 if (CmpInst *CI = dyn_cast<CmpInst>(it)) { 4969 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) { 4970 Changed = true; 4971 // We would like to start over since some instructions are deleted 4972 // and the iterator may become invalid value. 4973 it = BB->begin(); 4974 e = BB->end(); 4975 continue; 4976 } 4977 4978 for (int I = 0; I < 2; ++I) { 4979 if (vectorizeRootInstruction(nullptr, CI->getOperand(I), BB, R, TTI)) { 4980 Changed = true; 4981 // We would like to start over since some instructions are deleted 4982 // and the iterator may become invalid value. 4983 it = BB->begin(); 4984 e = BB->end(); 4985 break; 4986 } 4987 } 4988 continue; 4989 } 4990 4991 // Try to vectorize trees that start at insertelement instructions. 4992 if (InsertElementInst *FirstInsertElem = dyn_cast<InsertElementInst>(it)) { 4993 SmallVector<Value *, 16> BuildVector; 4994 SmallVector<Value *, 16> BuildVectorOpds; 4995 if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds)) 4996 continue; 4997 4998 // Vectorize starting with the build vector operands ignoring the 4999 // BuildVector instructions for the purpose of scheduling and user 5000 // extraction. 5001 if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) { 5002 Changed = true; 5003 it = BB->begin(); 5004 e = BB->end(); 5005 } 5006 5007 continue; 5008 } 5009 5010 // Try to vectorize trees that start at insertvalue instructions feeding into 5011 // a store. 5012 if (StoreInst *SI = dyn_cast<StoreInst>(it)) { 5013 if (InsertValueInst *LastInsertValue = dyn_cast<InsertValueInst>(SI->getValueOperand())) { 5014 const DataLayout &DL = BB->getModule()->getDataLayout(); 5015 if (R.canMapToVector(SI->getValueOperand()->getType(), DL)) { 5016 SmallVector<Value *, 16> BuildVector; 5017 SmallVector<Value *, 16> BuildVectorOpds; 5018 if (!findBuildAggregate(LastInsertValue, BuildVector, BuildVectorOpds)) 5019 continue; 5020 5021 DEBUG(dbgs() << "SLP: store of array mappable to vector: " << *SI << "\n"); 5022 if (tryToVectorizeList(BuildVectorOpds, R, BuildVector, false)) { 5023 Changed = true; 5024 it = BB->begin(); 5025 e = BB->end(); 5026 } 5027 continue; 5028 } 5029 } 5030 } 5031 } 5032 5033 return Changed; 5034 } 5035 5036 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 5037 auto Changed = false; 5038 for (auto &Entry : GEPs) { 5039 5040 // If the getelementptr list has fewer than two elements, there's nothing 5041 // to do. 5042 if (Entry.second.size() < 2) 5043 continue; 5044 5045 DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 5046 << Entry.second.size() << ".\n"); 5047 5048 // We process the getelementptr list in chunks of 16 (like we do for 5049 // stores) to minimize compile-time. 5050 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += 16) { 5051 auto Len = std::min<unsigned>(BE - BI, 16); 5052 auto GEPList = makeArrayRef(&Entry.second[BI], Len); 5053 5054 // Initialize a set a candidate getelementptrs. Note that we use a 5055 // SetVector here to preserve program order. If the index computations 5056 // are vectorizable and begin with loads, we want to minimize the chance 5057 // of having to reorder them later. 5058 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 5059 5060 // Some of the candidates may have already been vectorized after we 5061 // initially collected them. If so, the WeakTrackingVHs will have 5062 // nullified the 5063 // values, so remove them from the set of candidates. 5064 Candidates.remove(nullptr); 5065 5066 // Remove from the set of candidates all pairs of getelementptrs with 5067 // constant differences. Such getelementptrs are likely not good 5068 // candidates for vectorization in a bottom-up phase since one can be 5069 // computed from the other. We also ensure all candidate getelementptr 5070 // indices are unique. 5071 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 5072 auto *GEPI = cast<GetElementPtrInst>(GEPList[I]); 5073 if (!Candidates.count(GEPI)) 5074 continue; 5075 auto *SCEVI = SE->getSCEV(GEPList[I]); 5076 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 5077 auto *GEPJ = cast<GetElementPtrInst>(GEPList[J]); 5078 auto *SCEVJ = SE->getSCEV(GEPList[J]); 5079 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 5080 Candidates.remove(GEPList[I]); 5081 Candidates.remove(GEPList[J]); 5082 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 5083 Candidates.remove(GEPList[J]); 5084 } 5085 } 5086 } 5087 5088 // We break out of the above computation as soon as we know there are 5089 // fewer than two candidates remaining. 5090 if (Candidates.size() < 2) 5091 continue; 5092 5093 // Add the single, non-constant index of each candidate to the bundle. We 5094 // ensured the indices met these constraints when we originally collected 5095 // the getelementptrs. 5096 SmallVector<Value *, 16> Bundle(Candidates.size()); 5097 auto BundleIndex = 0u; 5098 for (auto *V : Candidates) { 5099 auto *GEP = cast<GetElementPtrInst>(V); 5100 auto *GEPIdx = GEP->idx_begin()->get(); 5101 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 5102 Bundle[BundleIndex++] = GEPIdx; 5103 } 5104 5105 // Try and vectorize the indices. We are currently only interested in 5106 // gather-like cases of the form: 5107 // 5108 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 5109 // 5110 // where the loads of "a", the loads of "b", and the subtractions can be 5111 // performed in parallel. It's likely that detecting this pattern in a 5112 // bottom-up phase will be simpler and less costly than building a 5113 // full-blown top-down phase beginning at the consecutive loads. 5114 Changed |= tryToVectorizeList(Bundle, R); 5115 } 5116 } 5117 return Changed; 5118 } 5119 5120 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 5121 bool Changed = false; 5122 // Attempt to sort and vectorize each of the store-groups. 5123 for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e; 5124 ++it) { 5125 if (it->second.size() < 2) 5126 continue; 5127 5128 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 5129 << it->second.size() << ".\n"); 5130 5131 // Process the stores in chunks of 16. 5132 // TODO: The limit of 16 inhibits greater vectorization factors. 5133 // For example, AVX2 supports v32i8. Increasing this limit, however, 5134 // may cause a significant compile-time increase. 5135 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 5136 unsigned Len = std::min<unsigned>(CE - CI, 16); 5137 Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len), R); 5138 } 5139 } 5140 return Changed; 5141 } 5142 5143 char SLPVectorizer::ID = 0; 5144 static const char lv_name[] = "SLP Vectorizer"; 5145 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 5146 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 5147 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 5148 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 5149 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 5150 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 5151 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 5152 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 5153 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 5154 5155 namespace llvm { 5156 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); } 5157 } 5158