1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 #include "llvm/Transforms/Vectorize.h" 19 #include "llvm/ADT/MapVector.h" 20 #include "llvm/ADT/Optional.h" 21 #include "llvm/ADT/PostOrderIterator.h" 22 #include "llvm/ADT/SetVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Analysis/AliasAnalysis.h" 25 #include "llvm/Analysis/AssumptionCache.h" 26 #include "llvm/Analysis/CodeMetrics.h" 27 #include "llvm/Analysis/LoopInfo.h" 28 #include "llvm/Analysis/ScalarEvolution.h" 29 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 30 #include "llvm/Analysis/TargetTransformInfo.h" 31 #include "llvm/Analysis/ValueTracking.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/Dominators.h" 34 #include "llvm/IR/IRBuilder.h" 35 #include "llvm/IR/Instructions.h" 36 #include "llvm/IR/IntrinsicInst.h" 37 #include "llvm/IR/Module.h" 38 #include "llvm/IR/NoFolder.h" 39 #include "llvm/IR/Type.h" 40 #include "llvm/IR/Value.h" 41 #include "llvm/IR/Verifier.h" 42 #include "llvm/Pass.h" 43 #include "llvm/Support/CommandLine.h" 44 #include "llvm/Support/Debug.h" 45 #include "llvm/Support/raw_ostream.h" 46 #include "llvm/Analysis/VectorUtils.h" 47 #include <algorithm> 48 #include <map> 49 #include <memory> 50 51 using namespace llvm; 52 53 #define SV_NAME "slp-vectorizer" 54 #define DEBUG_TYPE "SLP" 55 56 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 57 58 static cl::opt<int> 59 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 60 cl::desc("Only vectorize if you gain more than this " 61 "number ")); 62 63 static cl::opt<bool> 64 ShouldVectorizeHor("slp-vectorize-hor", cl::init(false), cl::Hidden, 65 cl::desc("Attempt to vectorize horizontal reductions")); 66 67 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 68 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 69 cl::desc( 70 "Attempt to vectorize horizontal reductions feeding into a store")); 71 72 static cl::opt<int> 73 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 74 cl::desc("Attempt to vectorize for this register size in bits")); 75 76 namespace { 77 78 // FIXME: Set this via cl::opt to allow overriding. 79 static const unsigned MinVecRegSize = 128; 80 81 static const unsigned RecursionMaxDepth = 12; 82 83 // Limit the number of alias checks. The limit is chosen so that 84 // it has no negative effect on the llvm benchmarks. 85 static const unsigned AliasedCheckLimit = 10; 86 87 // Another limit for the alias checks: The maximum distance between load/store 88 // instructions where alias checks are done. 89 // This limit is useful for very large basic blocks. 90 static const unsigned MaxMemDepDistance = 160; 91 92 /// \brief Predicate for the element types that the SLP vectorizer supports. 93 /// 94 /// The most important thing to filter here are types which are invalid in LLVM 95 /// vectors. We also filter target specific types which have absolutely no 96 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 97 /// avoids spending time checking the cost model and realizing that they will 98 /// be inevitably scalarized. 99 static bool isValidElementType(Type *Ty) { 100 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 101 !Ty->isPPC_FP128Ty(); 102 } 103 104 /// \returns the parent basic block if all of the instructions in \p VL 105 /// are in the same block or null otherwise. 106 static BasicBlock *getSameBlock(ArrayRef<Value *> VL) { 107 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 108 if (!I0) 109 return nullptr; 110 BasicBlock *BB = I0->getParent(); 111 for (int i = 1, e = VL.size(); i < e; i++) { 112 Instruction *I = dyn_cast<Instruction>(VL[i]); 113 if (!I) 114 return nullptr; 115 116 if (BB != I->getParent()) 117 return nullptr; 118 } 119 return BB; 120 } 121 122 /// \returns True if all of the values in \p VL are constants. 123 static bool allConstant(ArrayRef<Value *> VL) { 124 for (unsigned i = 0, e = VL.size(); i < e; ++i) 125 if (!isa<Constant>(VL[i])) 126 return false; 127 return true; 128 } 129 130 /// \returns True if all of the values in \p VL are identical. 131 static bool isSplat(ArrayRef<Value *> VL) { 132 for (unsigned i = 1, e = VL.size(); i < e; ++i) 133 if (VL[i] != VL[0]) 134 return false; 135 return true; 136 } 137 138 ///\returns Opcode that can be clubbed with \p Op to create an alternate 139 /// sequence which can later be merged as a ShuffleVector instruction. 140 static unsigned getAltOpcode(unsigned Op) { 141 switch (Op) { 142 case Instruction::FAdd: 143 return Instruction::FSub; 144 case Instruction::FSub: 145 return Instruction::FAdd; 146 case Instruction::Add: 147 return Instruction::Sub; 148 case Instruction::Sub: 149 return Instruction::Add; 150 default: 151 return 0; 152 } 153 } 154 155 ///\returns bool representing if Opcode \p Op can be part 156 /// of an alternate sequence which can later be merged as 157 /// a ShuffleVector instruction. 158 static bool canCombineAsAltInst(unsigned Op) { 159 if (Op == Instruction::FAdd || Op == Instruction::FSub || 160 Op == Instruction::Sub || Op == Instruction::Add) 161 return true; 162 return false; 163 } 164 165 /// \returns ShuffleVector instruction if instructions in \p VL have 166 /// alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence. 167 /// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...) 168 static unsigned isAltInst(ArrayRef<Value *> VL) { 169 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 170 unsigned Opcode = I0->getOpcode(); 171 unsigned AltOpcode = getAltOpcode(Opcode); 172 for (int i = 1, e = VL.size(); i < e; i++) { 173 Instruction *I = dyn_cast<Instruction>(VL[i]); 174 if (!I || I->getOpcode() != ((i & 1) ? AltOpcode : Opcode)) 175 return 0; 176 } 177 return Instruction::ShuffleVector; 178 } 179 180 /// \returns The opcode if all of the Instructions in \p VL have the same 181 /// opcode, or zero. 182 static unsigned getSameOpcode(ArrayRef<Value *> VL) { 183 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 184 if (!I0) 185 return 0; 186 unsigned Opcode = I0->getOpcode(); 187 for (int i = 1, e = VL.size(); i < e; i++) { 188 Instruction *I = dyn_cast<Instruction>(VL[i]); 189 if (!I || Opcode != I->getOpcode()) { 190 if (canCombineAsAltInst(Opcode) && i == 1) 191 return isAltInst(VL); 192 return 0; 193 } 194 } 195 return Opcode; 196 } 197 198 /// Get the intersection (logical and) of all of the potential IR flags 199 /// of each scalar operation (VL) that will be converted into a vector (I). 200 /// Flag set: NSW, NUW, exact, and all of fast-math. 201 static void propagateIRFlags(Value *I, ArrayRef<Value *> VL) { 202 if (auto *VecOp = dyn_cast<BinaryOperator>(I)) { 203 if (auto *Intersection = dyn_cast<BinaryOperator>(VL[0])) { 204 // Intersection is initialized to the 0th scalar, 205 // so start counting from index '1'. 206 for (int i = 1, e = VL.size(); i < e; ++i) { 207 if (auto *Scalar = dyn_cast<BinaryOperator>(VL[i])) 208 Intersection->andIRFlags(Scalar); 209 } 210 VecOp->copyIRFlags(Intersection); 211 } 212 } 213 } 214 215 /// \returns \p I after propagating metadata from \p VL. 216 static Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL) { 217 Instruction *I0 = cast<Instruction>(VL[0]); 218 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 219 I0->getAllMetadataOtherThanDebugLoc(Metadata); 220 221 for (unsigned i = 0, n = Metadata.size(); i != n; ++i) { 222 unsigned Kind = Metadata[i].first; 223 MDNode *MD = Metadata[i].second; 224 225 for (int i = 1, e = VL.size(); MD && i != e; i++) { 226 Instruction *I = cast<Instruction>(VL[i]); 227 MDNode *IMD = I->getMetadata(Kind); 228 229 switch (Kind) { 230 default: 231 MD = nullptr; // Remove unknown metadata 232 break; 233 case LLVMContext::MD_tbaa: 234 MD = MDNode::getMostGenericTBAA(MD, IMD); 235 break; 236 case LLVMContext::MD_alias_scope: 237 MD = MDNode::getMostGenericAliasScope(MD, IMD); 238 break; 239 case LLVMContext::MD_noalias: 240 MD = MDNode::intersect(MD, IMD); 241 break; 242 case LLVMContext::MD_fpmath: 243 MD = MDNode::getMostGenericFPMath(MD, IMD); 244 break; 245 case LLVMContext::MD_nontemporal: 246 MD = MDNode::intersect(MD, IMD); 247 break; 248 } 249 } 250 I->setMetadata(Kind, MD); 251 } 252 return I; 253 } 254 255 /// \returns The type that all of the values in \p VL have or null if there 256 /// are different types. 257 static Type* getSameType(ArrayRef<Value *> VL) { 258 Type *Ty = VL[0]->getType(); 259 for (int i = 1, e = VL.size(); i < e; i++) 260 if (VL[i]->getType() != Ty) 261 return nullptr; 262 263 return Ty; 264 } 265 266 /// \returns True if the ExtractElement instructions in VL can be vectorized 267 /// to use the original vector. 268 static bool CanReuseExtract(ArrayRef<Value *> VL) { 269 assert(Instruction::ExtractElement == getSameOpcode(VL) && "Invalid opcode"); 270 // Check if all of the extracts come from the same vector and from the 271 // correct offset. 272 Value *VL0 = VL[0]; 273 ExtractElementInst *E0 = cast<ExtractElementInst>(VL0); 274 Value *Vec = E0->getOperand(0); 275 276 // We have to extract from the same vector type. 277 unsigned NElts = Vec->getType()->getVectorNumElements(); 278 279 if (NElts != VL.size()) 280 return false; 281 282 // Check that all of the indices extract from the correct offset. 283 ConstantInt *CI = dyn_cast<ConstantInt>(E0->getOperand(1)); 284 if (!CI || CI->getZExtValue()) 285 return false; 286 287 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 288 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 289 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 290 291 if (!CI || CI->getZExtValue() != i || E->getOperand(0) != Vec) 292 return false; 293 } 294 295 return true; 296 } 297 298 /// \returns True if in-tree use also needs extract. This refers to 299 /// possible scalar operand in vectorized instruction. 300 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 301 TargetLibraryInfo *TLI) { 302 303 unsigned Opcode = UserInst->getOpcode(); 304 switch (Opcode) { 305 case Instruction::Load: { 306 LoadInst *LI = cast<LoadInst>(UserInst); 307 return (LI->getPointerOperand() == Scalar); 308 } 309 case Instruction::Store: { 310 StoreInst *SI = cast<StoreInst>(UserInst); 311 return (SI->getPointerOperand() == Scalar); 312 } 313 case Instruction::Call: { 314 CallInst *CI = cast<CallInst>(UserInst); 315 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 316 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 317 return (CI->getArgOperand(1) == Scalar); 318 } 319 } 320 default: 321 return false; 322 } 323 } 324 325 /// \returns the AA location that is being access by the instruction. 326 static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) { 327 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 328 return MemoryLocation::get(SI); 329 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 330 return MemoryLocation::get(LI); 331 return MemoryLocation(); 332 } 333 334 /// \returns True if the instruction is not a volatile or atomic load/store. 335 static bool isSimple(Instruction *I) { 336 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 337 return LI->isSimple(); 338 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 339 return SI->isSimple(); 340 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 341 return !MI->isVolatile(); 342 return true; 343 } 344 345 /// Bottom Up SLP Vectorizer. 346 class BoUpSLP { 347 public: 348 typedef SmallVector<Value *, 8> ValueList; 349 typedef SmallVector<Instruction *, 16> InstrList; 350 typedef SmallPtrSet<Value *, 16> ValueSet; 351 typedef SmallVector<StoreInst *, 8> StoreList; 352 353 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 354 TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li, 355 DominatorTree *Dt, AssumptionCache *AC) 356 : NumLoadsWantToKeepOrder(0), NumLoadsWantToChangeOrder(0), F(Func), 357 SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), 358 Builder(Se->getContext()) { 359 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 360 } 361 362 /// \brief Vectorize the tree that starts with the elements in \p VL. 363 /// Returns the vectorized root. 364 Value *vectorizeTree(); 365 366 /// \returns the cost incurred by unwanted spills and fills, caused by 367 /// holding live values over call sites. 368 int getSpillCost(); 369 370 /// \returns the vectorization cost of the subtree that starts at \p VL. 371 /// A negative number means that this is profitable. 372 int getTreeCost(); 373 374 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 375 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 376 void buildTree(ArrayRef<Value *> Roots, 377 ArrayRef<Value *> UserIgnoreLst = None); 378 379 /// Clear the internal data structures that are created by 'buildTree'. 380 void deleteTree() { 381 VectorizableTree.clear(); 382 ScalarToTreeEntry.clear(); 383 MustGather.clear(); 384 ExternalUses.clear(); 385 NumLoadsWantToKeepOrder = 0; 386 NumLoadsWantToChangeOrder = 0; 387 for (auto &Iter : BlocksSchedules) { 388 BlockScheduling *BS = Iter.second.get(); 389 BS->clear(); 390 } 391 } 392 393 /// \returns true if the memory operations A and B are consecutive. 394 bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL); 395 396 /// \brief Perform LICM and CSE on the newly generated gather sequences. 397 void optimizeGatherSequence(); 398 399 /// \returns true if it is beneficial to reverse the vector order. 400 bool shouldReorder() const { 401 return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder; 402 } 403 404 private: 405 struct TreeEntry; 406 407 /// \returns the cost of the vectorizable entry. 408 int getEntryCost(TreeEntry *E); 409 410 /// This is the recursive part of buildTree. 411 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth); 412 413 /// Vectorize a single entry in the tree. 414 Value *vectorizeTree(TreeEntry *E); 415 416 /// Vectorize a single entry in the tree, starting in \p VL. 417 Value *vectorizeTree(ArrayRef<Value *> VL); 418 419 /// \returns the pointer to the vectorized value if \p VL is already 420 /// vectorized, or NULL. They may happen in cycles. 421 Value *alreadyVectorized(ArrayRef<Value *> VL) const; 422 423 /// \brief Take the pointer operand from the Load/Store instruction. 424 /// \returns NULL if this is not a valid Load/Store instruction. 425 static Value *getPointerOperand(Value *I); 426 427 /// \brief Take the address space operand from the Load/Store instruction. 428 /// \returns -1 if this is not a valid Load/Store instruction. 429 static unsigned getAddressSpaceOperand(Value *I); 430 431 /// \returns the scalarization cost for this type. Scalarization in this 432 /// context means the creation of vectors from a group of scalars. 433 int getGatherCost(Type *Ty); 434 435 /// \returns the scalarization cost for this list of values. Assuming that 436 /// this subtree gets vectorized, we may need to extract the values from the 437 /// roots. This method calculates the cost of extracting the values. 438 int getGatherCost(ArrayRef<Value *> VL); 439 440 /// \brief Set the Builder insert point to one after the last instruction in 441 /// the bundle 442 void setInsertPointAfterBundle(ArrayRef<Value *> VL); 443 444 /// \returns a vector from a collection of scalars in \p VL. 445 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 446 447 /// \returns whether the VectorizableTree is fully vectorizable and will 448 /// be beneficial even the tree height is tiny. 449 bool isFullyVectorizableTinyTree(); 450 451 /// \reorder commutative operands in alt shuffle if they result in 452 /// vectorized code. 453 void reorderAltShuffleOperands(ArrayRef<Value *> VL, 454 SmallVectorImpl<Value *> &Left, 455 SmallVectorImpl<Value *> &Right); 456 /// \reorder commutative operands to get better probability of 457 /// generating vectorized code. 458 void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 459 SmallVectorImpl<Value *> &Left, 460 SmallVectorImpl<Value *> &Right); 461 struct TreeEntry { 462 TreeEntry() : Scalars(), VectorizedValue(nullptr), 463 NeedToGather(0) {} 464 465 /// \returns true if the scalars in VL are equal to this entry. 466 bool isSame(ArrayRef<Value *> VL) const { 467 assert(VL.size() == Scalars.size() && "Invalid size"); 468 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 469 } 470 471 /// A vector of scalars. 472 ValueList Scalars; 473 474 /// The Scalars are vectorized into this value. It is initialized to Null. 475 Value *VectorizedValue; 476 477 /// Do we need to gather this sequence ? 478 bool NeedToGather; 479 }; 480 481 /// Create a new VectorizableTree entry. 482 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized) { 483 VectorizableTree.emplace_back(); 484 int idx = VectorizableTree.size() - 1; 485 TreeEntry *Last = &VectorizableTree[idx]; 486 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 487 Last->NeedToGather = !Vectorized; 488 if (Vectorized) { 489 for (int i = 0, e = VL.size(); i != e; ++i) { 490 assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!"); 491 ScalarToTreeEntry[VL[i]] = idx; 492 } 493 } else { 494 MustGather.insert(VL.begin(), VL.end()); 495 } 496 return Last; 497 } 498 499 /// -- Vectorization State -- 500 /// Holds all of the tree entries. 501 std::vector<TreeEntry> VectorizableTree; 502 503 /// Maps a specific scalar to its tree entry. 504 SmallDenseMap<Value*, int> ScalarToTreeEntry; 505 506 /// A list of scalars that we found that we need to keep as scalars. 507 ValueSet MustGather; 508 509 /// This POD struct describes one external user in the vectorized tree. 510 struct ExternalUser { 511 ExternalUser (Value *S, llvm::User *U, int L) : 512 Scalar(S), User(U), Lane(L){} 513 // Which scalar in our function. 514 Value *Scalar; 515 // Which user that uses the scalar. 516 llvm::User *User; 517 // Which lane does the scalar belong to. 518 int Lane; 519 }; 520 typedef SmallVector<ExternalUser, 16> UserList; 521 522 /// Checks if two instructions may access the same memory. 523 /// 524 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 525 /// is invariant in the calling loop. 526 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 527 Instruction *Inst2) { 528 529 // First check if the result is already in the cache. 530 AliasCacheKey key = std::make_pair(Inst1, Inst2); 531 Optional<bool> &result = AliasCache[key]; 532 if (result.hasValue()) { 533 return result.getValue(); 534 } 535 MemoryLocation Loc2 = getLocation(Inst2, AA); 536 bool aliased = true; 537 if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) { 538 // Do the alias check. 539 aliased = AA->alias(Loc1, Loc2); 540 } 541 // Store the result in the cache. 542 result = aliased; 543 return aliased; 544 } 545 546 typedef std::pair<Instruction *, Instruction *> AliasCacheKey; 547 548 /// Cache for alias results. 549 /// TODO: consider moving this to the AliasAnalysis itself. 550 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 551 552 /// Removes an instruction from its block and eventually deletes it. 553 /// It's like Instruction::eraseFromParent() except that the actual deletion 554 /// is delayed until BoUpSLP is destructed. 555 /// This is required to ensure that there are no incorrect collisions in the 556 /// AliasCache, which can happen if a new instruction is allocated at the 557 /// same address as a previously deleted instruction. 558 void eraseInstruction(Instruction *I) { 559 I->removeFromParent(); 560 I->dropAllReferences(); 561 DeletedInstructions.push_back(std::unique_ptr<Instruction>(I)); 562 } 563 564 /// Temporary store for deleted instructions. Instructions will be deleted 565 /// eventually when the BoUpSLP is destructed. 566 SmallVector<std::unique_ptr<Instruction>, 8> DeletedInstructions; 567 568 /// A list of values that need to extracted out of the tree. 569 /// This list holds pairs of (Internal Scalar : External User). 570 UserList ExternalUses; 571 572 /// Values used only by @llvm.assume calls. 573 SmallPtrSet<const Value *, 32> EphValues; 574 575 /// Holds all of the instructions that we gathered. 576 SetVector<Instruction *> GatherSeq; 577 /// A list of blocks that we are going to CSE. 578 SetVector<BasicBlock *> CSEBlocks; 579 580 /// Contains all scheduling relevant data for an instruction. 581 /// A ScheduleData either represents a single instruction or a member of an 582 /// instruction bundle (= a group of instructions which is combined into a 583 /// vector instruction). 584 struct ScheduleData { 585 586 // The initial value for the dependency counters. It means that the 587 // dependencies are not calculated yet. 588 enum { InvalidDeps = -1 }; 589 590 ScheduleData() 591 : Inst(nullptr), FirstInBundle(nullptr), NextInBundle(nullptr), 592 NextLoadStore(nullptr), SchedulingRegionID(0), SchedulingPriority(0), 593 Dependencies(InvalidDeps), UnscheduledDeps(InvalidDeps), 594 UnscheduledDepsInBundle(InvalidDeps), IsScheduled(false) {} 595 596 void init(int BlockSchedulingRegionID) { 597 FirstInBundle = this; 598 NextInBundle = nullptr; 599 NextLoadStore = nullptr; 600 IsScheduled = false; 601 SchedulingRegionID = BlockSchedulingRegionID; 602 UnscheduledDepsInBundle = UnscheduledDeps; 603 clearDependencies(); 604 } 605 606 /// Returns true if the dependency information has been calculated. 607 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 608 609 /// Returns true for single instructions and for bundle representatives 610 /// (= the head of a bundle). 611 bool isSchedulingEntity() const { return FirstInBundle == this; } 612 613 /// Returns true if it represents an instruction bundle and not only a 614 /// single instruction. 615 bool isPartOfBundle() const { 616 return NextInBundle != nullptr || FirstInBundle != this; 617 } 618 619 /// Returns true if it is ready for scheduling, i.e. it has no more 620 /// unscheduled depending instructions/bundles. 621 bool isReady() const { 622 assert(isSchedulingEntity() && 623 "can't consider non-scheduling entity for ready list"); 624 return UnscheduledDepsInBundle == 0 && !IsScheduled; 625 } 626 627 /// Modifies the number of unscheduled dependencies, also updating it for 628 /// the whole bundle. 629 int incrementUnscheduledDeps(int Incr) { 630 UnscheduledDeps += Incr; 631 return FirstInBundle->UnscheduledDepsInBundle += Incr; 632 } 633 634 /// Sets the number of unscheduled dependencies to the number of 635 /// dependencies. 636 void resetUnscheduledDeps() { 637 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 638 } 639 640 /// Clears all dependency information. 641 void clearDependencies() { 642 Dependencies = InvalidDeps; 643 resetUnscheduledDeps(); 644 MemoryDependencies.clear(); 645 } 646 647 void dump(raw_ostream &os) const { 648 if (!isSchedulingEntity()) { 649 os << "/ " << *Inst; 650 } else if (NextInBundle) { 651 os << '[' << *Inst; 652 ScheduleData *SD = NextInBundle; 653 while (SD) { 654 os << ';' << *SD->Inst; 655 SD = SD->NextInBundle; 656 } 657 os << ']'; 658 } else { 659 os << *Inst; 660 } 661 } 662 663 Instruction *Inst; 664 665 /// Points to the head in an instruction bundle (and always to this for 666 /// single instructions). 667 ScheduleData *FirstInBundle; 668 669 /// Single linked list of all instructions in a bundle. Null if it is a 670 /// single instruction. 671 ScheduleData *NextInBundle; 672 673 /// Single linked list of all memory instructions (e.g. load, store, call) 674 /// in the block - until the end of the scheduling region. 675 ScheduleData *NextLoadStore; 676 677 /// The dependent memory instructions. 678 /// This list is derived on demand in calculateDependencies(). 679 SmallVector<ScheduleData *, 4> MemoryDependencies; 680 681 /// This ScheduleData is in the current scheduling region if this matches 682 /// the current SchedulingRegionID of BlockScheduling. 683 int SchedulingRegionID; 684 685 /// Used for getting a "good" final ordering of instructions. 686 int SchedulingPriority; 687 688 /// The number of dependencies. Constitutes of the number of users of the 689 /// instruction plus the number of dependent memory instructions (if any). 690 /// This value is calculated on demand. 691 /// If InvalidDeps, the number of dependencies is not calculated yet. 692 /// 693 int Dependencies; 694 695 /// The number of dependencies minus the number of dependencies of scheduled 696 /// instructions. As soon as this is zero, the instruction/bundle gets ready 697 /// for scheduling. 698 /// Note that this is negative as long as Dependencies is not calculated. 699 int UnscheduledDeps; 700 701 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 702 /// single instructions. 703 int UnscheduledDepsInBundle; 704 705 /// True if this instruction is scheduled (or considered as scheduled in the 706 /// dry-run). 707 bool IsScheduled; 708 }; 709 710 #ifndef NDEBUG 711 friend raw_ostream &operator<<(raw_ostream &os, 712 const BoUpSLP::ScheduleData &SD); 713 #endif 714 715 /// Contains all scheduling data for a basic block. 716 /// 717 struct BlockScheduling { 718 719 BlockScheduling(BasicBlock *BB) 720 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize), 721 ScheduleStart(nullptr), ScheduleEnd(nullptr), 722 FirstLoadStoreInRegion(nullptr), LastLoadStoreInRegion(nullptr), 723 // Make sure that the initial SchedulingRegionID is greater than the 724 // initial SchedulingRegionID in ScheduleData (which is 0). 725 SchedulingRegionID(1) {} 726 727 void clear() { 728 ReadyInsts.clear(); 729 ScheduleStart = nullptr; 730 ScheduleEnd = nullptr; 731 FirstLoadStoreInRegion = nullptr; 732 LastLoadStoreInRegion = nullptr; 733 734 // Make a new scheduling region, i.e. all existing ScheduleData is not 735 // in the new region yet. 736 ++SchedulingRegionID; 737 } 738 739 ScheduleData *getScheduleData(Value *V) { 740 ScheduleData *SD = ScheduleDataMap[V]; 741 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 742 return SD; 743 return nullptr; 744 } 745 746 bool isInSchedulingRegion(ScheduleData *SD) { 747 return SD->SchedulingRegionID == SchedulingRegionID; 748 } 749 750 /// Marks an instruction as scheduled and puts all dependent ready 751 /// instructions into the ready-list. 752 template <typename ReadyListType> 753 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 754 SD->IsScheduled = true; 755 DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 756 757 ScheduleData *BundleMember = SD; 758 while (BundleMember) { 759 // Handle the def-use chain dependencies. 760 for (Use &U : BundleMember->Inst->operands()) { 761 ScheduleData *OpDef = getScheduleData(U.get()); 762 if (OpDef && OpDef->hasValidDependencies() && 763 OpDef->incrementUnscheduledDeps(-1) == 0) { 764 // There are no more unscheduled dependencies after decrementing, 765 // so we can put the dependent instruction into the ready list. 766 ScheduleData *DepBundle = OpDef->FirstInBundle; 767 assert(!DepBundle->IsScheduled && 768 "already scheduled bundle gets ready"); 769 ReadyList.insert(DepBundle); 770 DEBUG(dbgs() << "SLP: gets ready (def): " << *DepBundle << "\n"); 771 } 772 } 773 // Handle the memory dependencies. 774 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 775 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 776 // There are no more unscheduled dependencies after decrementing, 777 // so we can put the dependent instruction into the ready list. 778 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 779 assert(!DepBundle->IsScheduled && 780 "already scheduled bundle gets ready"); 781 ReadyList.insert(DepBundle); 782 DEBUG(dbgs() << "SLP: gets ready (mem): " << *DepBundle << "\n"); 783 } 784 } 785 BundleMember = BundleMember->NextInBundle; 786 } 787 } 788 789 /// Put all instructions into the ReadyList which are ready for scheduling. 790 template <typename ReadyListType> 791 void initialFillReadyList(ReadyListType &ReadyList) { 792 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 793 ScheduleData *SD = getScheduleData(I); 794 if (SD->isSchedulingEntity() && SD->isReady()) { 795 ReadyList.insert(SD); 796 DEBUG(dbgs() << "SLP: initially in ready list: " << *I << "\n"); 797 } 798 } 799 } 800 801 /// Checks if a bundle of instructions can be scheduled, i.e. has no 802 /// cyclic dependencies. This is only a dry-run, no instructions are 803 /// actually moved at this stage. 804 bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP); 805 806 /// Un-bundles a group of instructions. 807 void cancelScheduling(ArrayRef<Value *> VL); 808 809 /// Extends the scheduling region so that V is inside the region. 810 void extendSchedulingRegion(Value *V); 811 812 /// Initialize the ScheduleData structures for new instructions in the 813 /// scheduling region. 814 void initScheduleData(Instruction *FromI, Instruction *ToI, 815 ScheduleData *PrevLoadStore, 816 ScheduleData *NextLoadStore); 817 818 /// Updates the dependency information of a bundle and of all instructions/ 819 /// bundles which depend on the original bundle. 820 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 821 BoUpSLP *SLP); 822 823 /// Sets all instruction in the scheduling region to un-scheduled. 824 void resetSchedule(); 825 826 BasicBlock *BB; 827 828 /// Simple memory allocation for ScheduleData. 829 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 830 831 /// The size of a ScheduleData array in ScheduleDataChunks. 832 int ChunkSize; 833 834 /// The allocator position in the current chunk, which is the last entry 835 /// of ScheduleDataChunks. 836 int ChunkPos; 837 838 /// Attaches ScheduleData to Instruction. 839 /// Note that the mapping survives during all vectorization iterations, i.e. 840 /// ScheduleData structures are recycled. 841 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 842 843 struct ReadyList : SmallVector<ScheduleData *, 8> { 844 void insert(ScheduleData *SD) { push_back(SD); } 845 }; 846 847 /// The ready-list for scheduling (only used for the dry-run). 848 ReadyList ReadyInsts; 849 850 /// The first instruction of the scheduling region. 851 Instruction *ScheduleStart; 852 853 /// The first instruction _after_ the scheduling region. 854 Instruction *ScheduleEnd; 855 856 /// The first memory accessing instruction in the scheduling region 857 /// (can be null). 858 ScheduleData *FirstLoadStoreInRegion; 859 860 /// The last memory accessing instruction in the scheduling region 861 /// (can be null). 862 ScheduleData *LastLoadStoreInRegion; 863 864 /// The ID of the scheduling region. For a new vectorization iteration this 865 /// is incremented which "removes" all ScheduleData from the region. 866 int SchedulingRegionID; 867 }; 868 869 /// Attaches the BlockScheduling structures to basic blocks. 870 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 871 872 /// Performs the "real" scheduling. Done before vectorization is actually 873 /// performed in a basic block. 874 void scheduleBlock(BlockScheduling *BS); 875 876 /// List of users to ignore during scheduling and that don't need extracting. 877 ArrayRef<Value *> UserIgnoreList; 878 879 // Number of load-bundles, which contain consecutive loads. 880 int NumLoadsWantToKeepOrder; 881 882 // Number of load-bundles of size 2, which are consecutive loads if reversed. 883 int NumLoadsWantToChangeOrder; 884 885 // Analysis and block reference. 886 Function *F; 887 ScalarEvolution *SE; 888 TargetTransformInfo *TTI; 889 TargetLibraryInfo *TLI; 890 AliasAnalysis *AA; 891 LoopInfo *LI; 892 DominatorTree *DT; 893 /// Instruction builder to construct the vectorized tree. 894 IRBuilder<> Builder; 895 }; 896 897 #ifndef NDEBUG 898 raw_ostream &operator<<(raw_ostream &os, const BoUpSLP::ScheduleData &SD) { 899 SD.dump(os); 900 return os; 901 } 902 #endif 903 904 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 905 ArrayRef<Value *> UserIgnoreLst) { 906 deleteTree(); 907 UserIgnoreList = UserIgnoreLst; 908 if (!getSameType(Roots)) 909 return; 910 buildTree_rec(Roots, 0); 911 912 // Collect the values that we need to extract from the tree. 913 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 914 TreeEntry *Entry = &VectorizableTree[EIdx]; 915 916 // For each lane: 917 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 918 Value *Scalar = Entry->Scalars[Lane]; 919 920 // No need to handle users of gathered values. 921 if (Entry->NeedToGather) 922 continue; 923 924 for (User *U : Scalar->users()) { 925 DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 926 927 Instruction *UserInst = dyn_cast<Instruction>(U); 928 if (!UserInst) 929 continue; 930 931 // Skip in-tree scalars that become vectors 932 if (ScalarToTreeEntry.count(U)) { 933 int Idx = ScalarToTreeEntry[U]; 934 TreeEntry *UseEntry = &VectorizableTree[Idx]; 935 Value *UseScalar = UseEntry->Scalars[0]; 936 // Some in-tree scalars will remain as scalar in vectorized 937 // instructions. If that is the case, the one in Lane 0 will 938 // be used. 939 if (UseScalar != U || 940 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 941 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 942 << ".\n"); 943 assert(!VectorizableTree[Idx].NeedToGather && "Bad state"); 944 continue; 945 } 946 } 947 948 // Ignore users in the user ignore list. 949 if (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), UserInst) != 950 UserIgnoreList.end()) 951 continue; 952 953 DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " << 954 Lane << " from " << *Scalar << ".\n"); 955 ExternalUses.push_back(ExternalUser(Scalar, U, Lane)); 956 } 957 } 958 } 959 } 960 961 962 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { 963 bool SameTy = getSameType(VL); (void)SameTy; 964 bool isAltShuffle = false; 965 assert(SameTy && "Invalid types!"); 966 967 if (Depth == RecursionMaxDepth) { 968 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 969 newTreeEntry(VL, false); 970 return; 971 } 972 973 // Don't handle vectors. 974 if (VL[0]->getType()->isVectorTy()) { 975 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 976 newTreeEntry(VL, false); 977 return; 978 } 979 980 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 981 if (SI->getValueOperand()->getType()->isVectorTy()) { 982 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 983 newTreeEntry(VL, false); 984 return; 985 } 986 unsigned Opcode = getSameOpcode(VL); 987 988 // Check that this shuffle vector refers to the alternate 989 // sequence of opcodes. 990 if (Opcode == Instruction::ShuffleVector) { 991 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 992 unsigned Op = I0->getOpcode(); 993 if (Op != Instruction::ShuffleVector) 994 isAltShuffle = true; 995 } 996 997 // If all of the operands are identical or constant we have a simple solution. 998 if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || !Opcode) { 999 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 1000 newTreeEntry(VL, false); 1001 return; 1002 } 1003 1004 // We now know that this is a vector of instructions of the same type from 1005 // the same block. 1006 1007 // Don't vectorize ephemeral values. 1008 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1009 if (EphValues.count(VL[i])) { 1010 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1011 ") is ephemeral.\n"); 1012 newTreeEntry(VL, false); 1013 return; 1014 } 1015 } 1016 1017 // Check if this is a duplicate of another entry. 1018 if (ScalarToTreeEntry.count(VL[0])) { 1019 int Idx = ScalarToTreeEntry[VL[0]]; 1020 TreeEntry *E = &VectorizableTree[Idx]; 1021 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1022 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 1023 if (E->Scalars[i] != VL[i]) { 1024 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 1025 newTreeEntry(VL, false); 1026 return; 1027 } 1028 } 1029 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n"); 1030 return; 1031 } 1032 1033 // Check that none of the instructions in the bundle are already in the tree. 1034 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1035 if (ScalarToTreeEntry.count(VL[i])) { 1036 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1037 ") is already in tree.\n"); 1038 newTreeEntry(VL, false); 1039 return; 1040 } 1041 } 1042 1043 // If any of the scalars is marked as a value that needs to stay scalar then 1044 // we need to gather the scalars. 1045 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1046 if (MustGather.count(VL[i])) { 1047 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 1048 newTreeEntry(VL, false); 1049 return; 1050 } 1051 } 1052 1053 // Check that all of the users of the scalars that we want to vectorize are 1054 // schedulable. 1055 Instruction *VL0 = cast<Instruction>(VL[0]); 1056 BasicBlock *BB = cast<Instruction>(VL0)->getParent(); 1057 1058 if (!DT->isReachableFromEntry(BB)) { 1059 // Don't go into unreachable blocks. They may contain instructions with 1060 // dependency cycles which confuse the final scheduling. 1061 DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 1062 newTreeEntry(VL, false); 1063 return; 1064 } 1065 1066 // Check that every instructions appears once in this bundle. 1067 for (unsigned i = 0, e = VL.size(); i < e; ++i) 1068 for (unsigned j = i+1; j < e; ++j) 1069 if (VL[i] == VL[j]) { 1070 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 1071 newTreeEntry(VL, false); 1072 return; 1073 } 1074 1075 auto &BSRef = BlocksSchedules[BB]; 1076 if (!BSRef) { 1077 BSRef = llvm::make_unique<BlockScheduling>(BB); 1078 } 1079 BlockScheduling &BS = *BSRef.get(); 1080 1081 if (!BS.tryScheduleBundle(VL, this)) { 1082 DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 1083 BS.cancelScheduling(VL); 1084 newTreeEntry(VL, false); 1085 return; 1086 } 1087 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 1088 1089 switch (Opcode) { 1090 case Instruction::PHI: { 1091 PHINode *PH = dyn_cast<PHINode>(VL0); 1092 1093 // Check for terminator values (e.g. invoke). 1094 for (unsigned j = 0; j < VL.size(); ++j) 1095 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1096 TerminatorInst *Term = dyn_cast<TerminatorInst>( 1097 cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i))); 1098 if (Term) { 1099 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); 1100 BS.cancelScheduling(VL); 1101 newTreeEntry(VL, false); 1102 return; 1103 } 1104 } 1105 1106 newTreeEntry(VL, true); 1107 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 1108 1109 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1110 ValueList Operands; 1111 // Prepare the operand vector. 1112 for (unsigned j = 0; j < VL.size(); ++j) 1113 Operands.push_back(cast<PHINode>(VL[j])->getIncomingValueForBlock( 1114 PH->getIncomingBlock(i))); 1115 1116 buildTree_rec(Operands, Depth + 1); 1117 } 1118 return; 1119 } 1120 case Instruction::ExtractElement: { 1121 bool Reuse = CanReuseExtract(VL); 1122 if (Reuse) { 1123 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 1124 } else { 1125 BS.cancelScheduling(VL); 1126 } 1127 newTreeEntry(VL, Reuse); 1128 return; 1129 } 1130 case Instruction::Load: { 1131 // Check if the loads are consecutive or of we need to swizzle them. 1132 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 1133 LoadInst *L = cast<LoadInst>(VL[i]); 1134 if (!L->isSimple()) { 1135 BS.cancelScheduling(VL); 1136 newTreeEntry(VL, false); 1137 DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 1138 return; 1139 } 1140 const DataLayout &DL = F->getParent()->getDataLayout(); 1141 if (!isConsecutiveAccess(VL[i], VL[i + 1], DL)) { 1142 if (VL.size() == 2 && isConsecutiveAccess(VL[1], VL[0], DL)) { 1143 ++NumLoadsWantToChangeOrder; 1144 } 1145 BS.cancelScheduling(VL); 1146 newTreeEntry(VL, false); 1147 DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 1148 return; 1149 } 1150 } 1151 ++NumLoadsWantToKeepOrder; 1152 newTreeEntry(VL, true); 1153 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 1154 return; 1155 } 1156 case Instruction::ZExt: 1157 case Instruction::SExt: 1158 case Instruction::FPToUI: 1159 case Instruction::FPToSI: 1160 case Instruction::FPExt: 1161 case Instruction::PtrToInt: 1162 case Instruction::IntToPtr: 1163 case Instruction::SIToFP: 1164 case Instruction::UIToFP: 1165 case Instruction::Trunc: 1166 case Instruction::FPTrunc: 1167 case Instruction::BitCast: { 1168 Type *SrcTy = VL0->getOperand(0)->getType(); 1169 for (unsigned i = 0; i < VL.size(); ++i) { 1170 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 1171 if (Ty != SrcTy || !isValidElementType(Ty)) { 1172 BS.cancelScheduling(VL); 1173 newTreeEntry(VL, false); 1174 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 1175 return; 1176 } 1177 } 1178 newTreeEntry(VL, true); 1179 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 1180 1181 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1182 ValueList Operands; 1183 // Prepare the operand vector. 1184 for (unsigned j = 0; j < VL.size(); ++j) 1185 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1186 1187 buildTree_rec(Operands, Depth+1); 1188 } 1189 return; 1190 } 1191 case Instruction::ICmp: 1192 case Instruction::FCmp: { 1193 // Check that all of the compares have the same predicate. 1194 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 1195 Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType(); 1196 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 1197 CmpInst *Cmp = cast<CmpInst>(VL[i]); 1198 if (Cmp->getPredicate() != P0 || 1199 Cmp->getOperand(0)->getType() != ComparedTy) { 1200 BS.cancelScheduling(VL); 1201 newTreeEntry(VL, false); 1202 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 1203 return; 1204 } 1205 } 1206 1207 newTreeEntry(VL, true); 1208 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 1209 1210 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1211 ValueList Operands; 1212 // Prepare the operand vector. 1213 for (unsigned j = 0; j < VL.size(); ++j) 1214 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1215 1216 buildTree_rec(Operands, Depth+1); 1217 } 1218 return; 1219 } 1220 case Instruction::Select: 1221 case Instruction::Add: 1222 case Instruction::FAdd: 1223 case Instruction::Sub: 1224 case Instruction::FSub: 1225 case Instruction::Mul: 1226 case Instruction::FMul: 1227 case Instruction::UDiv: 1228 case Instruction::SDiv: 1229 case Instruction::FDiv: 1230 case Instruction::URem: 1231 case Instruction::SRem: 1232 case Instruction::FRem: 1233 case Instruction::Shl: 1234 case Instruction::LShr: 1235 case Instruction::AShr: 1236 case Instruction::And: 1237 case Instruction::Or: 1238 case Instruction::Xor: { 1239 newTreeEntry(VL, true); 1240 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 1241 1242 // Sort operands of the instructions so that each side is more likely to 1243 // have the same opcode. 1244 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 1245 ValueList Left, Right; 1246 reorderInputsAccordingToOpcode(VL, Left, Right); 1247 buildTree_rec(Left, Depth + 1); 1248 buildTree_rec(Right, Depth + 1); 1249 return; 1250 } 1251 1252 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1253 ValueList Operands; 1254 // Prepare the operand vector. 1255 for (unsigned j = 0; j < VL.size(); ++j) 1256 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1257 1258 buildTree_rec(Operands, Depth+1); 1259 } 1260 return; 1261 } 1262 case Instruction::GetElementPtr: { 1263 // We don't combine GEPs with complicated (nested) indexing. 1264 for (unsigned j = 0; j < VL.size(); ++j) { 1265 if (cast<Instruction>(VL[j])->getNumOperands() != 2) { 1266 DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 1267 BS.cancelScheduling(VL); 1268 newTreeEntry(VL, false); 1269 return; 1270 } 1271 } 1272 1273 // We can't combine several GEPs into one vector if they operate on 1274 // different types. 1275 Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType(); 1276 for (unsigned j = 0; j < VL.size(); ++j) { 1277 Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType(); 1278 if (Ty0 != CurTy) { 1279 DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); 1280 BS.cancelScheduling(VL); 1281 newTreeEntry(VL, false); 1282 return; 1283 } 1284 } 1285 1286 // We don't combine GEPs with non-constant indexes. 1287 for (unsigned j = 0; j < VL.size(); ++j) { 1288 auto Op = cast<Instruction>(VL[j])->getOperand(1); 1289 if (!isa<ConstantInt>(Op)) { 1290 DEBUG( 1291 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 1292 BS.cancelScheduling(VL); 1293 newTreeEntry(VL, false); 1294 return; 1295 } 1296 } 1297 1298 newTreeEntry(VL, true); 1299 DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 1300 for (unsigned i = 0, e = 2; i < e; ++i) { 1301 ValueList Operands; 1302 // Prepare the operand vector. 1303 for (unsigned j = 0; j < VL.size(); ++j) 1304 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1305 1306 buildTree_rec(Operands, Depth + 1); 1307 } 1308 return; 1309 } 1310 case Instruction::Store: { 1311 const DataLayout &DL = F->getParent()->getDataLayout(); 1312 // Check if the stores are consecutive or of we need to swizzle them. 1313 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 1314 if (!isConsecutiveAccess(VL[i], VL[i + 1], DL)) { 1315 BS.cancelScheduling(VL); 1316 newTreeEntry(VL, false); 1317 DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 1318 return; 1319 } 1320 1321 newTreeEntry(VL, true); 1322 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 1323 1324 ValueList Operands; 1325 for (unsigned j = 0; j < VL.size(); ++j) 1326 Operands.push_back(cast<Instruction>(VL[j])->getOperand(0)); 1327 1328 buildTree_rec(Operands, Depth + 1); 1329 return; 1330 } 1331 case Instruction::Call: { 1332 // Check if the calls are all to the same vectorizable intrinsic. 1333 CallInst *CI = cast<CallInst>(VL[0]); 1334 // Check if this is an Intrinsic call or something that can be 1335 // represented by an intrinsic call 1336 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 1337 if (!isTriviallyVectorizable(ID)) { 1338 BS.cancelScheduling(VL); 1339 newTreeEntry(VL, false); 1340 DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 1341 return; 1342 } 1343 Function *Int = CI->getCalledFunction(); 1344 Value *A1I = nullptr; 1345 if (hasVectorInstrinsicScalarOpd(ID, 1)) 1346 A1I = CI->getArgOperand(1); 1347 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 1348 CallInst *CI2 = dyn_cast<CallInst>(VL[i]); 1349 if (!CI2 || CI2->getCalledFunction() != Int || 1350 getIntrinsicIDForCall(CI2, TLI) != ID) { 1351 BS.cancelScheduling(VL); 1352 newTreeEntry(VL, false); 1353 DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] 1354 << "\n"); 1355 return; 1356 } 1357 // ctlz,cttz and powi are special intrinsics whose second argument 1358 // should be same in order for them to be vectorized. 1359 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 1360 Value *A1J = CI2->getArgOperand(1); 1361 if (A1I != A1J) { 1362 BS.cancelScheduling(VL); 1363 newTreeEntry(VL, false); 1364 DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 1365 << " argument "<< A1I<<"!=" << A1J 1366 << "\n"); 1367 return; 1368 } 1369 } 1370 } 1371 1372 newTreeEntry(VL, true); 1373 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 1374 ValueList Operands; 1375 // Prepare the operand vector. 1376 for (unsigned j = 0; j < VL.size(); ++j) { 1377 CallInst *CI2 = dyn_cast<CallInst>(VL[j]); 1378 Operands.push_back(CI2->getArgOperand(i)); 1379 } 1380 buildTree_rec(Operands, Depth + 1); 1381 } 1382 return; 1383 } 1384 case Instruction::ShuffleVector: { 1385 // If this is not an alternate sequence of opcode like add-sub 1386 // then do not vectorize this instruction. 1387 if (!isAltShuffle) { 1388 BS.cancelScheduling(VL); 1389 newTreeEntry(VL, false); 1390 DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 1391 return; 1392 } 1393 newTreeEntry(VL, true); 1394 DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 1395 1396 // Reorder operands if reordering would enable vectorization. 1397 if (isa<BinaryOperator>(VL0)) { 1398 ValueList Left, Right; 1399 reorderAltShuffleOperands(VL, Left, Right); 1400 buildTree_rec(Left, Depth + 1); 1401 buildTree_rec(Right, Depth + 1); 1402 return; 1403 } 1404 1405 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1406 ValueList Operands; 1407 // Prepare the operand vector. 1408 for (unsigned j = 0; j < VL.size(); ++j) 1409 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1410 1411 buildTree_rec(Operands, Depth + 1); 1412 } 1413 return; 1414 } 1415 default: 1416 BS.cancelScheduling(VL); 1417 newTreeEntry(VL, false); 1418 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 1419 return; 1420 } 1421 } 1422 1423 int BoUpSLP::getEntryCost(TreeEntry *E) { 1424 ArrayRef<Value*> VL = E->Scalars; 1425 1426 Type *ScalarTy = VL[0]->getType(); 1427 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1428 ScalarTy = SI->getValueOperand()->getType(); 1429 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1430 1431 if (E->NeedToGather) { 1432 if (allConstant(VL)) 1433 return 0; 1434 if (isSplat(VL)) { 1435 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 1436 } 1437 return getGatherCost(E->Scalars); 1438 } 1439 unsigned Opcode = getSameOpcode(VL); 1440 assert(Opcode && getSameType(VL) && getSameBlock(VL) && "Invalid VL"); 1441 Instruction *VL0 = cast<Instruction>(VL[0]); 1442 switch (Opcode) { 1443 case Instruction::PHI: { 1444 return 0; 1445 } 1446 case Instruction::ExtractElement: { 1447 if (CanReuseExtract(VL)) { 1448 int DeadCost = 0; 1449 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 1450 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 1451 if (E->hasOneUse()) 1452 // Take credit for instruction that will become dead. 1453 DeadCost += 1454 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); 1455 } 1456 return -DeadCost; 1457 } 1458 return getGatherCost(VecTy); 1459 } 1460 case Instruction::ZExt: 1461 case Instruction::SExt: 1462 case Instruction::FPToUI: 1463 case Instruction::FPToSI: 1464 case Instruction::FPExt: 1465 case Instruction::PtrToInt: 1466 case Instruction::IntToPtr: 1467 case Instruction::SIToFP: 1468 case Instruction::UIToFP: 1469 case Instruction::Trunc: 1470 case Instruction::FPTrunc: 1471 case Instruction::BitCast: { 1472 Type *SrcTy = VL0->getOperand(0)->getType(); 1473 1474 // Calculate the cost of this instruction. 1475 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 1476 VL0->getType(), SrcTy); 1477 1478 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 1479 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy); 1480 return VecCost - ScalarCost; 1481 } 1482 case Instruction::FCmp: 1483 case Instruction::ICmp: 1484 case Instruction::Select: 1485 case Instruction::Add: 1486 case Instruction::FAdd: 1487 case Instruction::Sub: 1488 case Instruction::FSub: 1489 case Instruction::Mul: 1490 case Instruction::FMul: 1491 case Instruction::UDiv: 1492 case Instruction::SDiv: 1493 case Instruction::FDiv: 1494 case Instruction::URem: 1495 case Instruction::SRem: 1496 case Instruction::FRem: 1497 case Instruction::Shl: 1498 case Instruction::LShr: 1499 case Instruction::AShr: 1500 case Instruction::And: 1501 case Instruction::Or: 1502 case Instruction::Xor: { 1503 // Calculate the cost of this instruction. 1504 int ScalarCost = 0; 1505 int VecCost = 0; 1506 if (Opcode == Instruction::FCmp || Opcode == Instruction::ICmp || 1507 Opcode == Instruction::Select) { 1508 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 1509 ScalarCost = VecTy->getNumElements() * 1510 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty()); 1511 VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy); 1512 } else { 1513 // Certain instructions can be cheaper to vectorize if they have a 1514 // constant second vector operand. 1515 TargetTransformInfo::OperandValueKind Op1VK = 1516 TargetTransformInfo::OK_AnyValue; 1517 TargetTransformInfo::OperandValueKind Op2VK = 1518 TargetTransformInfo::OK_UniformConstantValue; 1519 TargetTransformInfo::OperandValueProperties Op1VP = 1520 TargetTransformInfo::OP_None; 1521 TargetTransformInfo::OperandValueProperties Op2VP = 1522 TargetTransformInfo::OP_None; 1523 1524 // If all operands are exactly the same ConstantInt then set the 1525 // operand kind to OK_UniformConstantValue. 1526 // If instead not all operands are constants, then set the operand kind 1527 // to OK_AnyValue. If all operands are constants but not the same, 1528 // then set the operand kind to OK_NonUniformConstantValue. 1529 ConstantInt *CInt = nullptr; 1530 for (unsigned i = 0; i < VL.size(); ++i) { 1531 const Instruction *I = cast<Instruction>(VL[i]); 1532 if (!isa<ConstantInt>(I->getOperand(1))) { 1533 Op2VK = TargetTransformInfo::OK_AnyValue; 1534 break; 1535 } 1536 if (i == 0) { 1537 CInt = cast<ConstantInt>(I->getOperand(1)); 1538 continue; 1539 } 1540 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && 1541 CInt != cast<ConstantInt>(I->getOperand(1))) 1542 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 1543 } 1544 // FIXME: Currently cost of model modification for division by 1545 // power of 2 is handled only for X86. Add support for other targets. 1546 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt && 1547 CInt->getValue().isPowerOf2()) 1548 Op2VP = TargetTransformInfo::OP_PowerOf2; 1549 1550 ScalarCost = VecTy->getNumElements() * 1551 TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK, 1552 Op1VP, Op2VP); 1553 VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK, 1554 Op1VP, Op2VP); 1555 } 1556 return VecCost - ScalarCost; 1557 } 1558 case Instruction::GetElementPtr: { 1559 TargetTransformInfo::OperandValueKind Op1VK = 1560 TargetTransformInfo::OK_AnyValue; 1561 TargetTransformInfo::OperandValueKind Op2VK = 1562 TargetTransformInfo::OK_UniformConstantValue; 1563 1564 int ScalarCost = 1565 VecTy->getNumElements() * 1566 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK); 1567 int VecCost = 1568 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK); 1569 1570 return VecCost - ScalarCost; 1571 } 1572 case Instruction::Load: { 1573 // Cost of wide load - cost of scalar loads. 1574 int ScalarLdCost = VecTy->getNumElements() * 1575 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); 1576 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, 1, 0); 1577 return VecLdCost - ScalarLdCost; 1578 } 1579 case Instruction::Store: { 1580 // We know that we can merge the stores. Calculate the cost. 1581 int ScalarStCost = VecTy->getNumElements() * 1582 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); 1583 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, VecTy, 1, 0); 1584 return VecStCost - ScalarStCost; 1585 } 1586 case Instruction::Call: { 1587 CallInst *CI = cast<CallInst>(VL0); 1588 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 1589 1590 // Calculate the cost of the scalar and vector calls. 1591 SmallVector<Type*, 4> ScalarTys, VecTys; 1592 for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) { 1593 ScalarTys.push_back(CI->getArgOperand(op)->getType()); 1594 VecTys.push_back(VectorType::get(CI->getArgOperand(op)->getType(), 1595 VecTy->getNumElements())); 1596 } 1597 1598 int ScalarCallCost = VecTy->getNumElements() * 1599 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys); 1600 1601 int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys); 1602 1603 DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost 1604 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 1605 << " for " << *CI << "\n"); 1606 1607 return VecCallCost - ScalarCallCost; 1608 } 1609 case Instruction::ShuffleVector: { 1610 TargetTransformInfo::OperandValueKind Op1VK = 1611 TargetTransformInfo::OK_AnyValue; 1612 TargetTransformInfo::OperandValueKind Op2VK = 1613 TargetTransformInfo::OK_AnyValue; 1614 int ScalarCost = 0; 1615 int VecCost = 0; 1616 for (unsigned i = 0; i < VL.size(); ++i) { 1617 Instruction *I = cast<Instruction>(VL[i]); 1618 if (!I) 1619 break; 1620 ScalarCost += 1621 TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK); 1622 } 1623 // VecCost is equal to sum of the cost of creating 2 vectors 1624 // and the cost of creating shuffle. 1625 Instruction *I0 = cast<Instruction>(VL[0]); 1626 VecCost = 1627 TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK); 1628 Instruction *I1 = cast<Instruction>(VL[1]); 1629 VecCost += 1630 TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK); 1631 VecCost += 1632 TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0); 1633 return VecCost - ScalarCost; 1634 } 1635 default: 1636 llvm_unreachable("Unknown instruction"); 1637 } 1638 } 1639 1640 bool BoUpSLP::isFullyVectorizableTinyTree() { 1641 DEBUG(dbgs() << "SLP: Check whether the tree with height " << 1642 VectorizableTree.size() << " is fully vectorizable .\n"); 1643 1644 // We only handle trees of height 2. 1645 if (VectorizableTree.size() != 2) 1646 return false; 1647 1648 // Handle splat and all-constants stores. 1649 if (!VectorizableTree[0].NeedToGather && 1650 (allConstant(VectorizableTree[1].Scalars) || 1651 isSplat(VectorizableTree[1].Scalars))) 1652 return true; 1653 1654 // Gathering cost would be too much for tiny trees. 1655 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 1656 return false; 1657 1658 return true; 1659 } 1660 1661 int BoUpSLP::getSpillCost() { 1662 // Walk from the bottom of the tree to the top, tracking which values are 1663 // live. When we see a call instruction that is not part of our tree, 1664 // query TTI to see if there is a cost to keeping values live over it 1665 // (for example, if spills and fills are required). 1666 unsigned BundleWidth = VectorizableTree.front().Scalars.size(); 1667 int Cost = 0; 1668 1669 SmallPtrSet<Instruction*, 4> LiveValues; 1670 Instruction *PrevInst = nullptr; 1671 1672 for (unsigned N = 0; N < VectorizableTree.size(); ++N) { 1673 Instruction *Inst = dyn_cast<Instruction>(VectorizableTree[N].Scalars[0]); 1674 if (!Inst) 1675 continue; 1676 1677 if (!PrevInst) { 1678 PrevInst = Inst; 1679 continue; 1680 } 1681 1682 DEBUG( 1683 dbgs() << "SLP: #LV: " << LiveValues.size(); 1684 for (auto *X : LiveValues) 1685 dbgs() << " " << X->getName(); 1686 dbgs() << ", Looking at "; 1687 Inst->dump(); 1688 ); 1689 1690 // Update LiveValues. 1691 LiveValues.erase(PrevInst); 1692 for (auto &J : PrevInst->operands()) { 1693 if (isa<Instruction>(&*J) && ScalarToTreeEntry.count(&*J)) 1694 LiveValues.insert(cast<Instruction>(&*J)); 1695 } 1696 1697 // Now find the sequence of instructions between PrevInst and Inst. 1698 BasicBlock::reverse_iterator InstIt(Inst), PrevInstIt(PrevInst); 1699 --PrevInstIt; 1700 while (InstIt != PrevInstIt) { 1701 if (PrevInstIt == PrevInst->getParent()->rend()) { 1702 PrevInstIt = Inst->getParent()->rbegin(); 1703 continue; 1704 } 1705 1706 if (isa<CallInst>(&*PrevInstIt) && &*PrevInstIt != PrevInst) { 1707 SmallVector<Type*, 4> V; 1708 for (auto *II : LiveValues) 1709 V.push_back(VectorType::get(II->getType(), BundleWidth)); 1710 Cost += TTI->getCostOfKeepingLiveOverCall(V); 1711 } 1712 1713 ++PrevInstIt; 1714 } 1715 1716 PrevInst = Inst; 1717 } 1718 1719 DEBUG(dbgs() << "SLP: SpillCost=" << Cost << "\n"); 1720 return Cost; 1721 } 1722 1723 int BoUpSLP::getTreeCost() { 1724 int Cost = 0; 1725 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 1726 VectorizableTree.size() << ".\n"); 1727 1728 // We only vectorize tiny trees if it is fully vectorizable. 1729 if (VectorizableTree.size() < 3 && !isFullyVectorizableTinyTree()) { 1730 if (VectorizableTree.empty()) { 1731 assert(!ExternalUses.size() && "We should not have any external users"); 1732 } 1733 return INT_MAX; 1734 } 1735 1736 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 1737 1738 for (unsigned i = 0, e = VectorizableTree.size(); i != e; ++i) { 1739 int C = getEntryCost(&VectorizableTree[i]); 1740 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 1741 << *VectorizableTree[i].Scalars[0] << " .\n"); 1742 Cost += C; 1743 } 1744 1745 SmallSet<Value *, 16> ExtractCostCalculated; 1746 int ExtractCost = 0; 1747 for (UserList::iterator I = ExternalUses.begin(), E = ExternalUses.end(); 1748 I != E; ++I) { 1749 // We only add extract cost once for the same scalar. 1750 if (!ExtractCostCalculated.insert(I->Scalar).second) 1751 continue; 1752 1753 // Uses by ephemeral values are free (because the ephemeral value will be 1754 // removed prior to code generation, and so the extraction will be 1755 // removed as well). 1756 if (EphValues.count(I->User)) 1757 continue; 1758 1759 VectorType *VecTy = VectorType::get(I->Scalar->getType(), BundleWidth); 1760 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 1761 I->Lane); 1762 } 1763 1764 Cost += getSpillCost(); 1765 1766 DEBUG(dbgs() << "SLP: Total Cost " << Cost + ExtractCost<< ".\n"); 1767 return Cost + ExtractCost; 1768 } 1769 1770 int BoUpSLP::getGatherCost(Type *Ty) { 1771 int Cost = 0; 1772 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 1773 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 1774 return Cost; 1775 } 1776 1777 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 1778 // Find the type of the operands in VL. 1779 Type *ScalarTy = VL[0]->getType(); 1780 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1781 ScalarTy = SI->getValueOperand()->getType(); 1782 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1783 // Find the cost of inserting/extracting values from the vector. 1784 return getGatherCost(VecTy); 1785 } 1786 1787 Value *BoUpSLP::getPointerOperand(Value *I) { 1788 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1789 return LI->getPointerOperand(); 1790 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1791 return SI->getPointerOperand(); 1792 return nullptr; 1793 } 1794 1795 unsigned BoUpSLP::getAddressSpaceOperand(Value *I) { 1796 if (LoadInst *L = dyn_cast<LoadInst>(I)) 1797 return L->getPointerAddressSpace(); 1798 if (StoreInst *S = dyn_cast<StoreInst>(I)) 1799 return S->getPointerAddressSpace(); 1800 return -1; 1801 } 1802 1803 bool BoUpSLP::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL) { 1804 Value *PtrA = getPointerOperand(A); 1805 Value *PtrB = getPointerOperand(B); 1806 unsigned ASA = getAddressSpaceOperand(A); 1807 unsigned ASB = getAddressSpaceOperand(B); 1808 1809 // Check that the address spaces match and that the pointers are valid. 1810 if (!PtrA || !PtrB || (ASA != ASB)) 1811 return false; 1812 1813 // Make sure that A and B are different pointers of the same type. 1814 if (PtrA == PtrB || PtrA->getType() != PtrB->getType()) 1815 return false; 1816 1817 unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA); 1818 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1819 APInt Size(PtrBitWidth, DL.getTypeStoreSize(Ty)); 1820 1821 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); 1822 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); 1823 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); 1824 1825 APInt OffsetDelta = OffsetB - OffsetA; 1826 1827 // Check if they are based on the same pointer. That makes the offsets 1828 // sufficient. 1829 if (PtrA == PtrB) 1830 return OffsetDelta == Size; 1831 1832 // Compute the necessary base pointer delta to have the necessary final delta 1833 // equal to the size. 1834 APInt BaseDelta = Size - OffsetDelta; 1835 1836 // Otherwise compute the distance with SCEV between the base pointers. 1837 const SCEV *PtrSCEVA = SE->getSCEV(PtrA); 1838 const SCEV *PtrSCEVB = SE->getSCEV(PtrB); 1839 const SCEV *C = SE->getConstant(BaseDelta); 1840 const SCEV *X = SE->getAddExpr(PtrSCEVA, C); 1841 return X == PtrSCEVB; 1842 } 1843 1844 // Reorder commutative operations in alternate shuffle if the resulting vectors 1845 // are consecutive loads. This would allow us to vectorize the tree. 1846 // If we have something like- 1847 // load a[0] - load b[0] 1848 // load b[1] + load a[1] 1849 // load a[2] - load b[2] 1850 // load a[3] + load b[3] 1851 // Reordering the second load b[1] load a[1] would allow us to vectorize this 1852 // code. 1853 void BoUpSLP::reorderAltShuffleOperands(ArrayRef<Value *> VL, 1854 SmallVectorImpl<Value *> &Left, 1855 SmallVectorImpl<Value *> &Right) { 1856 const DataLayout &DL = F->getParent()->getDataLayout(); 1857 1858 // Push left and right operands of binary operation into Left and Right 1859 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 1860 Left.push_back(cast<Instruction>(VL[i])->getOperand(0)); 1861 Right.push_back(cast<Instruction>(VL[i])->getOperand(1)); 1862 } 1863 1864 // Reorder if we have a commutative operation and consecutive access 1865 // are on either side of the alternate instructions. 1866 for (unsigned j = 0; j < VL.size() - 1; ++j) { 1867 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 1868 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 1869 Instruction *VL1 = cast<Instruction>(VL[j]); 1870 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 1871 if (isConsecutiveAccess(L, L1, DL) && VL1->isCommutative()) { 1872 std::swap(Left[j], Right[j]); 1873 continue; 1874 } else if (isConsecutiveAccess(L, L1, DL) && VL2->isCommutative()) { 1875 std::swap(Left[j + 1], Right[j + 1]); 1876 continue; 1877 } 1878 // else unchanged 1879 } 1880 } 1881 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 1882 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 1883 Instruction *VL1 = cast<Instruction>(VL[j]); 1884 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 1885 if (isConsecutiveAccess(L, L1, DL) && VL1->isCommutative()) { 1886 std::swap(Left[j], Right[j]); 1887 continue; 1888 } else if (isConsecutiveAccess(L, L1, DL) && VL2->isCommutative()) { 1889 std::swap(Left[j + 1], Right[j + 1]); 1890 continue; 1891 } 1892 // else unchanged 1893 } 1894 } 1895 } 1896 } 1897 1898 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 1899 SmallVectorImpl<Value *> &Left, 1900 SmallVectorImpl<Value *> &Right) { 1901 1902 SmallVector<Value *, 16> OrigLeft, OrigRight; 1903 1904 bool AllSameOpcodeLeft = true; 1905 bool AllSameOpcodeRight = true; 1906 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1907 Instruction *I = cast<Instruction>(VL[i]); 1908 Value *VLeft = I->getOperand(0); 1909 Value *VRight = I->getOperand(1); 1910 1911 OrigLeft.push_back(VLeft); 1912 OrigRight.push_back(VRight); 1913 1914 Instruction *ILeft = dyn_cast<Instruction>(VLeft); 1915 Instruction *IRight = dyn_cast<Instruction>(VRight); 1916 1917 // Check whether all operands on one side have the same opcode. In this case 1918 // we want to preserve the original order and not make things worse by 1919 // reordering. 1920 if (i && AllSameOpcodeLeft && ILeft) { 1921 if (Instruction *PLeft = dyn_cast<Instruction>(OrigLeft[i - 1])) { 1922 if (PLeft->getOpcode() != ILeft->getOpcode()) 1923 AllSameOpcodeLeft = false; 1924 } else 1925 AllSameOpcodeLeft = false; 1926 } 1927 if (i && AllSameOpcodeRight && IRight) { 1928 if (Instruction *PRight = dyn_cast<Instruction>(OrigRight[i - 1])) { 1929 if (PRight->getOpcode() != IRight->getOpcode()) 1930 AllSameOpcodeRight = false; 1931 } else 1932 AllSameOpcodeRight = false; 1933 } 1934 1935 // Sort two opcodes. In the code below we try to preserve the ability to use 1936 // broadcast of values instead of individual inserts. 1937 // vl1 = load 1938 // vl2 = phi 1939 // vr1 = load 1940 // vr2 = vr2 1941 // = vl1 x vr1 1942 // = vl2 x vr2 1943 // If we just sorted according to opcode we would leave the first line in 1944 // tact but we would swap vl2 with vr2 because opcode(phi) > opcode(load). 1945 // = vl1 x vr1 1946 // = vr2 x vl2 1947 // Because vr2 and vr1 are from the same load we loose the opportunity of a 1948 // broadcast for the packed right side in the backend: we have [vr1, vl2] 1949 // instead of [vr1, vr2=vr1]. 1950 if (ILeft && IRight) { 1951 if (!i && ILeft->getOpcode() > IRight->getOpcode()) { 1952 Left.push_back(IRight); 1953 Right.push_back(ILeft); 1954 } else if (i && ILeft->getOpcode() > IRight->getOpcode() && 1955 Right[i - 1] != IRight) { 1956 // Try not to destroy a broad cast for no apparent benefit. 1957 Left.push_back(IRight); 1958 Right.push_back(ILeft); 1959 } else if (i && ILeft->getOpcode() == IRight->getOpcode() && 1960 Right[i - 1] == ILeft) { 1961 // Try preserve broadcasts. 1962 Left.push_back(IRight); 1963 Right.push_back(ILeft); 1964 } else if (i && ILeft->getOpcode() == IRight->getOpcode() && 1965 Left[i - 1] == IRight) { 1966 // Try preserve broadcasts. 1967 Left.push_back(IRight); 1968 Right.push_back(ILeft); 1969 } else { 1970 Left.push_back(ILeft); 1971 Right.push_back(IRight); 1972 } 1973 continue; 1974 } 1975 // One opcode, put the instruction on the right. 1976 if (ILeft) { 1977 Left.push_back(VRight); 1978 Right.push_back(ILeft); 1979 continue; 1980 } 1981 Left.push_back(VLeft); 1982 Right.push_back(VRight); 1983 } 1984 1985 bool LeftBroadcast = isSplat(Left); 1986 bool RightBroadcast = isSplat(Right); 1987 1988 // If operands end up being broadcast return this operand order. 1989 if (LeftBroadcast || RightBroadcast) 1990 return; 1991 1992 // Don't reorder if the operands where good to begin. 1993 if (AllSameOpcodeRight || AllSameOpcodeLeft) { 1994 Left = OrigLeft; 1995 Right = OrigRight; 1996 } 1997 1998 const DataLayout &DL = F->getParent()->getDataLayout(); 1999 2000 // Finally check if we can get longer vectorizable chain by reordering 2001 // without breaking the good operand order detected above. 2002 // E.g. If we have something like- 2003 // load a[0] load b[0] 2004 // load b[1] load a[1] 2005 // load a[2] load b[2] 2006 // load a[3] load b[3] 2007 // Reordering the second load b[1] load a[1] would allow us to vectorize 2008 // this code and we still retain AllSameOpcode property. 2009 // FIXME: This load reordering might break AllSameOpcode in some rare cases 2010 // such as- 2011 // add a[0],c[0] load b[0] 2012 // add a[1],c[2] load b[1] 2013 // b[2] load b[2] 2014 // add a[3],c[3] load b[3] 2015 for (unsigned j = 0; j < VL.size() - 1; ++j) { 2016 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 2017 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 2018 if (isConsecutiveAccess(L, L1, DL)) { 2019 std::swap(Left[j + 1], Right[j + 1]); 2020 continue; 2021 } 2022 } 2023 } 2024 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 2025 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 2026 if (isConsecutiveAccess(L, L1, DL)) { 2027 std::swap(Left[j + 1], Right[j + 1]); 2028 continue; 2029 } 2030 } 2031 } 2032 // else unchanged 2033 } 2034 } 2035 2036 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) { 2037 Instruction *VL0 = cast<Instruction>(VL[0]); 2038 BasicBlock::iterator NextInst = VL0; 2039 ++NextInst; 2040 Builder.SetInsertPoint(VL0->getParent(), NextInst); 2041 Builder.SetCurrentDebugLocation(VL0->getDebugLoc()); 2042 } 2043 2044 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 2045 Value *Vec = UndefValue::get(Ty); 2046 // Generate the 'InsertElement' instruction. 2047 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 2048 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 2049 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 2050 GatherSeq.insert(Insrt); 2051 CSEBlocks.insert(Insrt->getParent()); 2052 2053 // Add to our 'need-to-extract' list. 2054 if (ScalarToTreeEntry.count(VL[i])) { 2055 int Idx = ScalarToTreeEntry[VL[i]]; 2056 TreeEntry *E = &VectorizableTree[Idx]; 2057 // Find which lane we need to extract. 2058 int FoundLane = -1; 2059 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 2060 // Is this the lane of the scalar that we are looking for ? 2061 if (E->Scalars[Lane] == VL[i]) { 2062 FoundLane = Lane; 2063 break; 2064 } 2065 } 2066 assert(FoundLane >= 0 && "Could not find the correct lane"); 2067 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 2068 } 2069 } 2070 } 2071 2072 return Vec; 2073 } 2074 2075 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const { 2076 SmallDenseMap<Value*, int>::const_iterator Entry 2077 = ScalarToTreeEntry.find(VL[0]); 2078 if (Entry != ScalarToTreeEntry.end()) { 2079 int Idx = Entry->second; 2080 const TreeEntry *En = &VectorizableTree[Idx]; 2081 if (En->isSame(VL) && En->VectorizedValue) 2082 return En->VectorizedValue; 2083 } 2084 return nullptr; 2085 } 2086 2087 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 2088 if (ScalarToTreeEntry.count(VL[0])) { 2089 int Idx = ScalarToTreeEntry[VL[0]]; 2090 TreeEntry *E = &VectorizableTree[Idx]; 2091 if (E->isSame(VL)) 2092 return vectorizeTree(E); 2093 } 2094 2095 Type *ScalarTy = VL[0]->getType(); 2096 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 2097 ScalarTy = SI->getValueOperand()->getType(); 2098 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 2099 2100 return Gather(VL, VecTy); 2101 } 2102 2103 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 2104 IRBuilder<>::InsertPointGuard Guard(Builder); 2105 2106 if (E->VectorizedValue) { 2107 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 2108 return E->VectorizedValue; 2109 } 2110 2111 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 2112 Type *ScalarTy = VL0->getType(); 2113 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 2114 ScalarTy = SI->getValueOperand()->getType(); 2115 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 2116 2117 if (E->NeedToGather) { 2118 setInsertPointAfterBundle(E->Scalars); 2119 return Gather(E->Scalars, VecTy); 2120 } 2121 2122 const DataLayout &DL = F->getParent()->getDataLayout(); 2123 unsigned Opcode = getSameOpcode(E->Scalars); 2124 2125 switch (Opcode) { 2126 case Instruction::PHI: { 2127 PHINode *PH = dyn_cast<PHINode>(VL0); 2128 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 2129 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2130 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 2131 E->VectorizedValue = NewPhi; 2132 2133 // PHINodes may have multiple entries from the same block. We want to 2134 // visit every block once. 2135 SmallSet<BasicBlock*, 4> VisitedBBs; 2136 2137 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 2138 ValueList Operands; 2139 BasicBlock *IBB = PH->getIncomingBlock(i); 2140 2141 if (!VisitedBBs.insert(IBB).second) { 2142 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 2143 continue; 2144 } 2145 2146 // Prepare the operand vector. 2147 for (Value *V : E->Scalars) 2148 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(IBB)); 2149 2150 Builder.SetInsertPoint(IBB->getTerminator()); 2151 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2152 Value *Vec = vectorizeTree(Operands); 2153 NewPhi->addIncoming(Vec, IBB); 2154 } 2155 2156 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 2157 "Invalid number of incoming values"); 2158 return NewPhi; 2159 } 2160 2161 case Instruction::ExtractElement: { 2162 if (CanReuseExtract(E->Scalars)) { 2163 Value *V = VL0->getOperand(0); 2164 E->VectorizedValue = V; 2165 return V; 2166 } 2167 return Gather(E->Scalars, VecTy); 2168 } 2169 case Instruction::ZExt: 2170 case Instruction::SExt: 2171 case Instruction::FPToUI: 2172 case Instruction::FPToSI: 2173 case Instruction::FPExt: 2174 case Instruction::PtrToInt: 2175 case Instruction::IntToPtr: 2176 case Instruction::SIToFP: 2177 case Instruction::UIToFP: 2178 case Instruction::Trunc: 2179 case Instruction::FPTrunc: 2180 case Instruction::BitCast: { 2181 ValueList INVL; 2182 for (Value *V : E->Scalars) 2183 INVL.push_back(cast<Instruction>(V)->getOperand(0)); 2184 2185 setInsertPointAfterBundle(E->Scalars); 2186 2187 Value *InVec = vectorizeTree(INVL); 2188 2189 if (Value *V = alreadyVectorized(E->Scalars)) 2190 return V; 2191 2192 CastInst *CI = dyn_cast<CastInst>(VL0); 2193 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 2194 E->VectorizedValue = V; 2195 ++NumVectorInstructions; 2196 return V; 2197 } 2198 case Instruction::FCmp: 2199 case Instruction::ICmp: { 2200 ValueList LHSV, RHSV; 2201 for (Value *V : E->Scalars) { 2202 LHSV.push_back(cast<Instruction>(V)->getOperand(0)); 2203 RHSV.push_back(cast<Instruction>(V)->getOperand(1)); 2204 } 2205 2206 setInsertPointAfterBundle(E->Scalars); 2207 2208 Value *L = vectorizeTree(LHSV); 2209 Value *R = vectorizeTree(RHSV); 2210 2211 if (Value *V = alreadyVectorized(E->Scalars)) 2212 return V; 2213 2214 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 2215 Value *V; 2216 if (Opcode == Instruction::FCmp) 2217 V = Builder.CreateFCmp(P0, L, R); 2218 else 2219 V = Builder.CreateICmp(P0, L, R); 2220 2221 E->VectorizedValue = V; 2222 ++NumVectorInstructions; 2223 return V; 2224 } 2225 case Instruction::Select: { 2226 ValueList TrueVec, FalseVec, CondVec; 2227 for (Value *V : E->Scalars) { 2228 CondVec.push_back(cast<Instruction>(V)->getOperand(0)); 2229 TrueVec.push_back(cast<Instruction>(V)->getOperand(1)); 2230 FalseVec.push_back(cast<Instruction>(V)->getOperand(2)); 2231 } 2232 2233 setInsertPointAfterBundle(E->Scalars); 2234 2235 Value *Cond = vectorizeTree(CondVec); 2236 Value *True = vectorizeTree(TrueVec); 2237 Value *False = vectorizeTree(FalseVec); 2238 2239 if (Value *V = alreadyVectorized(E->Scalars)) 2240 return V; 2241 2242 Value *V = Builder.CreateSelect(Cond, True, False); 2243 E->VectorizedValue = V; 2244 ++NumVectorInstructions; 2245 return V; 2246 } 2247 case Instruction::Add: 2248 case Instruction::FAdd: 2249 case Instruction::Sub: 2250 case Instruction::FSub: 2251 case Instruction::Mul: 2252 case Instruction::FMul: 2253 case Instruction::UDiv: 2254 case Instruction::SDiv: 2255 case Instruction::FDiv: 2256 case Instruction::URem: 2257 case Instruction::SRem: 2258 case Instruction::FRem: 2259 case Instruction::Shl: 2260 case Instruction::LShr: 2261 case Instruction::AShr: 2262 case Instruction::And: 2263 case Instruction::Or: 2264 case Instruction::Xor: { 2265 ValueList LHSVL, RHSVL; 2266 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) 2267 reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL); 2268 else 2269 for (Value *V : E->Scalars) { 2270 LHSVL.push_back(cast<Instruction>(V)->getOperand(0)); 2271 RHSVL.push_back(cast<Instruction>(V)->getOperand(1)); 2272 } 2273 2274 setInsertPointAfterBundle(E->Scalars); 2275 2276 Value *LHS = vectorizeTree(LHSVL); 2277 Value *RHS = vectorizeTree(RHSVL); 2278 2279 if (LHS == RHS && isa<Instruction>(LHS)) { 2280 assert((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order"); 2281 } 2282 2283 if (Value *V = alreadyVectorized(E->Scalars)) 2284 return V; 2285 2286 BinaryOperator *BinOp = cast<BinaryOperator>(VL0); 2287 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS); 2288 E->VectorizedValue = V; 2289 propagateIRFlags(E->VectorizedValue, E->Scalars); 2290 ++NumVectorInstructions; 2291 2292 if (Instruction *I = dyn_cast<Instruction>(V)) 2293 return propagateMetadata(I, E->Scalars); 2294 2295 return V; 2296 } 2297 case Instruction::Load: { 2298 // Loads are inserted at the head of the tree because we don't want to 2299 // sink them all the way down past store instructions. 2300 setInsertPointAfterBundle(E->Scalars); 2301 2302 LoadInst *LI = cast<LoadInst>(VL0); 2303 Type *ScalarLoadTy = LI->getType(); 2304 unsigned AS = LI->getPointerAddressSpace(); 2305 2306 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 2307 VecTy->getPointerTo(AS)); 2308 2309 // The pointer operand uses an in-tree scalar so we add the new BitCast to 2310 // ExternalUses list to make sure that an extract will be generated in the 2311 // future. 2312 if (ScalarToTreeEntry.count(LI->getPointerOperand())) 2313 ExternalUses.push_back( 2314 ExternalUser(LI->getPointerOperand(), cast<User>(VecPtr), 0)); 2315 2316 unsigned Alignment = LI->getAlignment(); 2317 LI = Builder.CreateLoad(VecPtr); 2318 if (!Alignment) { 2319 Alignment = DL.getABITypeAlignment(ScalarLoadTy); 2320 } 2321 LI->setAlignment(Alignment); 2322 E->VectorizedValue = LI; 2323 ++NumVectorInstructions; 2324 return propagateMetadata(LI, E->Scalars); 2325 } 2326 case Instruction::Store: { 2327 StoreInst *SI = cast<StoreInst>(VL0); 2328 unsigned Alignment = SI->getAlignment(); 2329 unsigned AS = SI->getPointerAddressSpace(); 2330 2331 ValueList ValueOp; 2332 for (Value *V : E->Scalars) 2333 ValueOp.push_back(cast<StoreInst>(V)->getValueOperand()); 2334 2335 setInsertPointAfterBundle(E->Scalars); 2336 2337 Value *VecValue = vectorizeTree(ValueOp); 2338 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), 2339 VecTy->getPointerTo(AS)); 2340 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 2341 2342 // The pointer operand uses an in-tree scalar so we add the new BitCast to 2343 // ExternalUses list to make sure that an extract will be generated in the 2344 // future. 2345 if (ScalarToTreeEntry.count(SI->getPointerOperand())) 2346 ExternalUses.push_back( 2347 ExternalUser(SI->getPointerOperand(), cast<User>(VecPtr), 0)); 2348 2349 if (!Alignment) { 2350 Alignment = DL.getABITypeAlignment(SI->getValueOperand()->getType()); 2351 } 2352 S->setAlignment(Alignment); 2353 E->VectorizedValue = S; 2354 ++NumVectorInstructions; 2355 return propagateMetadata(S, E->Scalars); 2356 } 2357 case Instruction::GetElementPtr: { 2358 setInsertPointAfterBundle(E->Scalars); 2359 2360 ValueList Op0VL; 2361 for (Value *V : E->Scalars) 2362 Op0VL.push_back(cast<GetElementPtrInst>(V)->getOperand(0)); 2363 2364 Value *Op0 = vectorizeTree(Op0VL); 2365 2366 std::vector<Value *> OpVecs; 2367 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 2368 ++j) { 2369 ValueList OpVL; 2370 for (Value *V : E->Scalars) 2371 OpVL.push_back(cast<GetElementPtrInst>(V)->getOperand(j)); 2372 2373 Value *OpVec = vectorizeTree(OpVL); 2374 OpVecs.push_back(OpVec); 2375 } 2376 2377 Value *V = Builder.CreateGEP( 2378 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs); 2379 E->VectorizedValue = V; 2380 ++NumVectorInstructions; 2381 2382 if (Instruction *I = dyn_cast<Instruction>(V)) 2383 return propagateMetadata(I, E->Scalars); 2384 2385 return V; 2386 } 2387 case Instruction::Call: { 2388 CallInst *CI = cast<CallInst>(VL0); 2389 setInsertPointAfterBundle(E->Scalars); 2390 Function *FI; 2391 Intrinsic::ID IID = Intrinsic::not_intrinsic; 2392 Value *ScalarArg = nullptr; 2393 if (CI && (FI = CI->getCalledFunction())) { 2394 IID = FI->getIntrinsicID(); 2395 } 2396 std::vector<Value *> OpVecs; 2397 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 2398 ValueList OpVL; 2399 // ctlz,cttz and powi are special intrinsics whose second argument is 2400 // a scalar. This argument should not be vectorized. 2401 if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) { 2402 CallInst *CEI = cast<CallInst>(E->Scalars[0]); 2403 ScalarArg = CEI->getArgOperand(j); 2404 OpVecs.push_back(CEI->getArgOperand(j)); 2405 continue; 2406 } 2407 for (Value *V : E->Scalars) { 2408 CallInst *CEI = cast<CallInst>(V); 2409 OpVL.push_back(CEI->getArgOperand(j)); 2410 } 2411 2412 Value *OpVec = vectorizeTree(OpVL); 2413 DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 2414 OpVecs.push_back(OpVec); 2415 } 2416 2417 Module *M = F->getParent(); 2418 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 2419 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; 2420 Function *CF = Intrinsic::getDeclaration(M, ID, Tys); 2421 Value *V = Builder.CreateCall(CF, OpVecs); 2422 2423 // The scalar argument uses an in-tree scalar so we add the new vectorized 2424 // call to ExternalUses list to make sure that an extract will be 2425 // generated in the future. 2426 if (ScalarArg && ScalarToTreeEntry.count(ScalarArg)) 2427 ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0)); 2428 2429 E->VectorizedValue = V; 2430 ++NumVectorInstructions; 2431 return V; 2432 } 2433 case Instruction::ShuffleVector: { 2434 ValueList LHSVL, RHSVL; 2435 assert(isa<BinaryOperator>(VL0) && "Invalid Shuffle Vector Operand"); 2436 reorderAltShuffleOperands(E->Scalars, LHSVL, RHSVL); 2437 setInsertPointAfterBundle(E->Scalars); 2438 2439 Value *LHS = vectorizeTree(LHSVL); 2440 Value *RHS = vectorizeTree(RHSVL); 2441 2442 if (Value *V = alreadyVectorized(E->Scalars)) 2443 return V; 2444 2445 // Create a vector of LHS op1 RHS 2446 BinaryOperator *BinOp0 = cast<BinaryOperator>(VL0); 2447 Value *V0 = Builder.CreateBinOp(BinOp0->getOpcode(), LHS, RHS); 2448 2449 // Create a vector of LHS op2 RHS 2450 Instruction *VL1 = cast<Instruction>(E->Scalars[1]); 2451 BinaryOperator *BinOp1 = cast<BinaryOperator>(VL1); 2452 Value *V1 = Builder.CreateBinOp(BinOp1->getOpcode(), LHS, RHS); 2453 2454 // Create shuffle to take alternate operations from the vector. 2455 // Also, gather up odd and even scalar ops to propagate IR flags to 2456 // each vector operation. 2457 ValueList OddScalars, EvenScalars; 2458 unsigned e = E->Scalars.size(); 2459 SmallVector<Constant *, 8> Mask(e); 2460 for (unsigned i = 0; i < e; ++i) { 2461 if (i & 1) { 2462 Mask[i] = Builder.getInt32(e + i); 2463 OddScalars.push_back(E->Scalars[i]); 2464 } else { 2465 Mask[i] = Builder.getInt32(i); 2466 EvenScalars.push_back(E->Scalars[i]); 2467 } 2468 } 2469 2470 Value *ShuffleMask = ConstantVector::get(Mask); 2471 propagateIRFlags(V0, EvenScalars); 2472 propagateIRFlags(V1, OddScalars); 2473 2474 Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 2475 E->VectorizedValue = V; 2476 ++NumVectorInstructions; 2477 if (Instruction *I = dyn_cast<Instruction>(V)) 2478 return propagateMetadata(I, E->Scalars); 2479 2480 return V; 2481 } 2482 default: 2483 llvm_unreachable("unknown inst"); 2484 } 2485 return nullptr; 2486 } 2487 2488 Value *BoUpSLP::vectorizeTree() { 2489 2490 // All blocks must be scheduled before any instructions are inserted. 2491 for (auto &BSIter : BlocksSchedules) { 2492 scheduleBlock(BSIter.second.get()); 2493 } 2494 2495 Builder.SetInsertPoint(F->getEntryBlock().begin()); 2496 vectorizeTree(&VectorizableTree[0]); 2497 2498 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 2499 2500 // Extract all of the elements with the external uses. 2501 for (UserList::iterator it = ExternalUses.begin(), e = ExternalUses.end(); 2502 it != e; ++it) { 2503 Value *Scalar = it->Scalar; 2504 llvm::User *User = it->User; 2505 2506 // Skip users that we already RAUW. This happens when one instruction 2507 // has multiple uses of the same value. 2508 if (std::find(Scalar->user_begin(), Scalar->user_end(), User) == 2509 Scalar->user_end()) 2510 continue; 2511 assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar"); 2512 2513 int Idx = ScalarToTreeEntry[Scalar]; 2514 TreeEntry *E = &VectorizableTree[Idx]; 2515 assert(!E->NeedToGather && "Extracting from a gather list"); 2516 2517 Value *Vec = E->VectorizedValue; 2518 assert(Vec && "Can't find vectorizable value"); 2519 2520 Value *Lane = Builder.getInt32(it->Lane); 2521 // Generate extracts for out-of-tree users. 2522 // Find the insertion point for the extractelement lane. 2523 if (isa<Instruction>(Vec)){ 2524 if (PHINode *PH = dyn_cast<PHINode>(User)) { 2525 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 2526 if (PH->getIncomingValue(i) == Scalar) { 2527 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 2528 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2529 CSEBlocks.insert(PH->getIncomingBlock(i)); 2530 PH->setOperand(i, Ex); 2531 } 2532 } 2533 } else { 2534 Builder.SetInsertPoint(cast<Instruction>(User)); 2535 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2536 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 2537 User->replaceUsesOfWith(Scalar, Ex); 2538 } 2539 } else { 2540 Builder.SetInsertPoint(F->getEntryBlock().begin()); 2541 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2542 CSEBlocks.insert(&F->getEntryBlock()); 2543 User->replaceUsesOfWith(Scalar, Ex); 2544 } 2545 2546 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 2547 } 2548 2549 // For each vectorized value: 2550 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 2551 TreeEntry *Entry = &VectorizableTree[EIdx]; 2552 2553 // For each lane: 2554 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 2555 Value *Scalar = Entry->Scalars[Lane]; 2556 // No need to handle users of gathered values. 2557 if (Entry->NeedToGather) 2558 continue; 2559 2560 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 2561 2562 Type *Ty = Scalar->getType(); 2563 if (!Ty->isVoidTy()) { 2564 #ifndef NDEBUG 2565 for (User *U : Scalar->users()) { 2566 DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 2567 2568 assert((ScalarToTreeEntry.count(U) || 2569 // It is legal to replace users in the ignorelist by undef. 2570 (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), U) != 2571 UserIgnoreList.end())) && 2572 "Replacing out-of-tree value with undef"); 2573 } 2574 #endif 2575 Value *Undef = UndefValue::get(Ty); 2576 Scalar->replaceAllUsesWith(Undef); 2577 } 2578 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 2579 eraseInstruction(cast<Instruction>(Scalar)); 2580 } 2581 } 2582 2583 Builder.ClearInsertionPoint(); 2584 2585 return VectorizableTree[0].VectorizedValue; 2586 } 2587 2588 void BoUpSLP::optimizeGatherSequence() { 2589 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 2590 << " gather sequences instructions.\n"); 2591 // LICM InsertElementInst sequences. 2592 for (SetVector<Instruction *>::iterator it = GatherSeq.begin(), 2593 e = GatherSeq.end(); it != e; ++it) { 2594 InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it); 2595 2596 if (!Insert) 2597 continue; 2598 2599 // Check if this block is inside a loop. 2600 Loop *L = LI->getLoopFor(Insert->getParent()); 2601 if (!L) 2602 continue; 2603 2604 // Check if it has a preheader. 2605 BasicBlock *PreHeader = L->getLoopPreheader(); 2606 if (!PreHeader) 2607 continue; 2608 2609 // If the vector or the element that we insert into it are 2610 // instructions that are defined in this basic block then we can't 2611 // hoist this instruction. 2612 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 2613 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 2614 if (CurrVec && L->contains(CurrVec)) 2615 continue; 2616 if (NewElem && L->contains(NewElem)) 2617 continue; 2618 2619 // We can hoist this instruction. Move it to the pre-header. 2620 Insert->moveBefore(PreHeader->getTerminator()); 2621 } 2622 2623 // Make a list of all reachable blocks in our CSE queue. 2624 SmallVector<const DomTreeNode *, 8> CSEWorkList; 2625 CSEWorkList.reserve(CSEBlocks.size()); 2626 for (BasicBlock *BB : CSEBlocks) 2627 if (DomTreeNode *N = DT->getNode(BB)) { 2628 assert(DT->isReachableFromEntry(N)); 2629 CSEWorkList.push_back(N); 2630 } 2631 2632 // Sort blocks by domination. This ensures we visit a block after all blocks 2633 // dominating it are visited. 2634 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), 2635 [this](const DomTreeNode *A, const DomTreeNode *B) { 2636 return DT->properlyDominates(A, B); 2637 }); 2638 2639 // Perform O(N^2) search over the gather sequences and merge identical 2640 // instructions. TODO: We can further optimize this scan if we split the 2641 // instructions into different buckets based on the insert lane. 2642 SmallVector<Instruction *, 16> Visited; 2643 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 2644 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 2645 "Worklist not sorted properly!"); 2646 BasicBlock *BB = (*I)->getBlock(); 2647 // For all instructions in blocks containing gather sequences: 2648 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 2649 Instruction *In = it++; 2650 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 2651 continue; 2652 2653 // Check if we can replace this instruction with any of the 2654 // visited instructions. 2655 for (SmallVectorImpl<Instruction *>::iterator v = Visited.begin(), 2656 ve = Visited.end(); 2657 v != ve; ++v) { 2658 if (In->isIdenticalTo(*v) && 2659 DT->dominates((*v)->getParent(), In->getParent())) { 2660 In->replaceAllUsesWith(*v); 2661 eraseInstruction(In); 2662 In = nullptr; 2663 break; 2664 } 2665 } 2666 if (In) { 2667 assert(std::find(Visited.begin(), Visited.end(), In) == Visited.end()); 2668 Visited.push_back(In); 2669 } 2670 } 2671 } 2672 CSEBlocks.clear(); 2673 GatherSeq.clear(); 2674 } 2675 2676 // Groups the instructions to a bundle (which is then a single scheduling entity) 2677 // and schedules instructions until the bundle gets ready. 2678 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, 2679 BoUpSLP *SLP) { 2680 if (isa<PHINode>(VL[0])) 2681 return true; 2682 2683 // Initialize the instruction bundle. 2684 Instruction *OldScheduleEnd = ScheduleEnd; 2685 ScheduleData *PrevInBundle = nullptr; 2686 ScheduleData *Bundle = nullptr; 2687 bool ReSchedule = false; 2688 DEBUG(dbgs() << "SLP: bundle: " << *VL[0] << "\n"); 2689 for (Value *V : VL) { 2690 extendSchedulingRegion(V); 2691 ScheduleData *BundleMember = getScheduleData(V); 2692 assert(BundleMember && 2693 "no ScheduleData for bundle member (maybe not in same basic block)"); 2694 if (BundleMember->IsScheduled) { 2695 // A bundle member was scheduled as single instruction before and now 2696 // needs to be scheduled as part of the bundle. We just get rid of the 2697 // existing schedule. 2698 DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 2699 << " was already scheduled\n"); 2700 ReSchedule = true; 2701 } 2702 assert(BundleMember->isSchedulingEntity() && 2703 "bundle member already part of other bundle"); 2704 if (PrevInBundle) { 2705 PrevInBundle->NextInBundle = BundleMember; 2706 } else { 2707 Bundle = BundleMember; 2708 } 2709 BundleMember->UnscheduledDepsInBundle = 0; 2710 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 2711 2712 // Group the instructions to a bundle. 2713 BundleMember->FirstInBundle = Bundle; 2714 PrevInBundle = BundleMember; 2715 } 2716 if (ScheduleEnd != OldScheduleEnd) { 2717 // The scheduling region got new instructions at the lower end (or it is a 2718 // new region for the first bundle). This makes it necessary to 2719 // recalculate all dependencies. 2720 // It is seldom that this needs to be done a second time after adding the 2721 // initial bundle to the region. 2722 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 2723 ScheduleData *SD = getScheduleData(I); 2724 SD->clearDependencies(); 2725 } 2726 ReSchedule = true; 2727 } 2728 if (ReSchedule) { 2729 resetSchedule(); 2730 initialFillReadyList(ReadyInsts); 2731 } 2732 2733 DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " 2734 << BB->getName() << "\n"); 2735 2736 calculateDependencies(Bundle, true, SLP); 2737 2738 // Now try to schedule the new bundle. As soon as the bundle is "ready" it 2739 // means that there are no cyclic dependencies and we can schedule it. 2740 // Note that's important that we don't "schedule" the bundle yet (see 2741 // cancelScheduling). 2742 while (!Bundle->isReady() && !ReadyInsts.empty()) { 2743 2744 ScheduleData *pickedSD = ReadyInsts.back(); 2745 ReadyInsts.pop_back(); 2746 2747 if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) { 2748 schedule(pickedSD, ReadyInsts); 2749 } 2750 } 2751 return Bundle->isReady(); 2752 } 2753 2754 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL) { 2755 if (isa<PHINode>(VL[0])) 2756 return; 2757 2758 ScheduleData *Bundle = getScheduleData(VL[0]); 2759 DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 2760 assert(!Bundle->IsScheduled && 2761 "Can't cancel bundle which is already scheduled"); 2762 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 2763 "tried to unbundle something which is not a bundle"); 2764 2765 // Un-bundle: make single instructions out of the bundle. 2766 ScheduleData *BundleMember = Bundle; 2767 while (BundleMember) { 2768 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 2769 BundleMember->FirstInBundle = BundleMember; 2770 ScheduleData *Next = BundleMember->NextInBundle; 2771 BundleMember->NextInBundle = nullptr; 2772 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 2773 if (BundleMember->UnscheduledDepsInBundle == 0) { 2774 ReadyInsts.insert(BundleMember); 2775 } 2776 BundleMember = Next; 2777 } 2778 } 2779 2780 void BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V) { 2781 if (getScheduleData(V)) 2782 return; 2783 Instruction *I = dyn_cast<Instruction>(V); 2784 assert(I && "bundle member must be an instruction"); 2785 assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled"); 2786 if (!ScheduleStart) { 2787 // It's the first instruction in the new region. 2788 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 2789 ScheduleStart = I; 2790 ScheduleEnd = I->getNextNode(); 2791 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 2792 DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 2793 return; 2794 } 2795 // Search up and down at the same time, because we don't know if the new 2796 // instruction is above or below the existing scheduling region. 2797 BasicBlock::reverse_iterator UpIter(ScheduleStart); 2798 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 2799 BasicBlock::iterator DownIter(ScheduleEnd); 2800 BasicBlock::iterator LowerEnd = BB->end(); 2801 for (;;) { 2802 if (UpIter != UpperEnd) { 2803 if (&*UpIter == I) { 2804 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 2805 ScheduleStart = I; 2806 DEBUG(dbgs() << "SLP: extend schedule region start to " << *I << "\n"); 2807 return; 2808 } 2809 UpIter++; 2810 } 2811 if (DownIter != LowerEnd) { 2812 if (&*DownIter == I) { 2813 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 2814 nullptr); 2815 ScheduleEnd = I->getNextNode(); 2816 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 2817 DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 2818 return; 2819 } 2820 DownIter++; 2821 } 2822 assert((UpIter != UpperEnd || DownIter != LowerEnd) && 2823 "instruction not found in block"); 2824 } 2825 } 2826 2827 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 2828 Instruction *ToI, 2829 ScheduleData *PrevLoadStore, 2830 ScheduleData *NextLoadStore) { 2831 ScheduleData *CurrentLoadStore = PrevLoadStore; 2832 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 2833 ScheduleData *SD = ScheduleDataMap[I]; 2834 if (!SD) { 2835 // Allocate a new ScheduleData for the instruction. 2836 if (ChunkPos >= ChunkSize) { 2837 ScheduleDataChunks.push_back( 2838 llvm::make_unique<ScheduleData[]>(ChunkSize)); 2839 ChunkPos = 0; 2840 } 2841 SD = &(ScheduleDataChunks.back()[ChunkPos++]); 2842 ScheduleDataMap[I] = SD; 2843 SD->Inst = I; 2844 } 2845 assert(!isInSchedulingRegion(SD) && 2846 "new ScheduleData already in scheduling region"); 2847 SD->init(SchedulingRegionID); 2848 2849 if (I->mayReadOrWriteMemory()) { 2850 // Update the linked list of memory accessing instructions. 2851 if (CurrentLoadStore) { 2852 CurrentLoadStore->NextLoadStore = SD; 2853 } else { 2854 FirstLoadStoreInRegion = SD; 2855 } 2856 CurrentLoadStore = SD; 2857 } 2858 } 2859 if (NextLoadStore) { 2860 if (CurrentLoadStore) 2861 CurrentLoadStore->NextLoadStore = NextLoadStore; 2862 } else { 2863 LastLoadStoreInRegion = CurrentLoadStore; 2864 } 2865 } 2866 2867 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 2868 bool InsertInReadyList, 2869 BoUpSLP *SLP) { 2870 assert(SD->isSchedulingEntity()); 2871 2872 SmallVector<ScheduleData *, 10> WorkList; 2873 WorkList.push_back(SD); 2874 2875 while (!WorkList.empty()) { 2876 ScheduleData *SD = WorkList.back(); 2877 WorkList.pop_back(); 2878 2879 ScheduleData *BundleMember = SD; 2880 while (BundleMember) { 2881 assert(isInSchedulingRegion(BundleMember)); 2882 if (!BundleMember->hasValidDependencies()) { 2883 2884 DEBUG(dbgs() << "SLP: update deps of " << *BundleMember << "\n"); 2885 BundleMember->Dependencies = 0; 2886 BundleMember->resetUnscheduledDeps(); 2887 2888 // Handle def-use chain dependencies. 2889 for (User *U : BundleMember->Inst->users()) { 2890 if (isa<Instruction>(U)) { 2891 ScheduleData *UseSD = getScheduleData(U); 2892 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 2893 BundleMember->Dependencies++; 2894 ScheduleData *DestBundle = UseSD->FirstInBundle; 2895 if (!DestBundle->IsScheduled) { 2896 BundleMember->incrementUnscheduledDeps(1); 2897 } 2898 if (!DestBundle->hasValidDependencies()) { 2899 WorkList.push_back(DestBundle); 2900 } 2901 } 2902 } else { 2903 // I'm not sure if this can ever happen. But we need to be safe. 2904 // This lets the instruction/bundle never be scheduled and 2905 // eventually disable vectorization. 2906 BundleMember->Dependencies++; 2907 BundleMember->incrementUnscheduledDeps(1); 2908 } 2909 } 2910 2911 // Handle the memory dependencies. 2912 ScheduleData *DepDest = BundleMember->NextLoadStore; 2913 if (DepDest) { 2914 Instruction *SrcInst = BundleMember->Inst; 2915 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); 2916 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 2917 unsigned numAliased = 0; 2918 unsigned DistToSrc = 1; 2919 2920 while (DepDest) { 2921 assert(isInSchedulingRegion(DepDest)); 2922 2923 // We have two limits to reduce the complexity: 2924 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 2925 // SLP->isAliased (which is the expensive part in this loop). 2926 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 2927 // the whole loop (even if the loop is fast, it's quadratic). 2928 // It's important for the loop break condition (see below) to 2929 // check this limit even between two read-only instructions. 2930 if (DistToSrc >= MaxMemDepDistance || 2931 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 2932 (numAliased >= AliasedCheckLimit || 2933 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 2934 2935 // We increment the counter only if the locations are aliased 2936 // (instead of counting all alias checks). This gives a better 2937 // balance between reduced runtime and accurate dependencies. 2938 numAliased++; 2939 2940 DepDest->MemoryDependencies.push_back(BundleMember); 2941 BundleMember->Dependencies++; 2942 ScheduleData *DestBundle = DepDest->FirstInBundle; 2943 if (!DestBundle->IsScheduled) { 2944 BundleMember->incrementUnscheduledDeps(1); 2945 } 2946 if (!DestBundle->hasValidDependencies()) { 2947 WorkList.push_back(DestBundle); 2948 } 2949 } 2950 DepDest = DepDest->NextLoadStore; 2951 2952 // Example, explaining the loop break condition: Let's assume our 2953 // starting instruction is i0 and MaxMemDepDistance = 3. 2954 // 2955 // +--------v--v--v 2956 // i0,i1,i2,i3,i4,i5,i6,i7,i8 2957 // +--------^--^--^ 2958 // 2959 // MaxMemDepDistance let us stop alias-checking at i3 and we add 2960 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 2961 // Previously we already added dependencies from i3 to i6,i7,i8 2962 // (because of MaxMemDepDistance). As we added a dependency from 2963 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 2964 // and we can abort this loop at i6. 2965 if (DistToSrc >= 2 * MaxMemDepDistance) 2966 break; 2967 DistToSrc++; 2968 } 2969 } 2970 } 2971 BundleMember = BundleMember->NextInBundle; 2972 } 2973 if (InsertInReadyList && SD->isReady()) { 2974 ReadyInsts.push_back(SD); 2975 DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n"); 2976 } 2977 } 2978 } 2979 2980 void BoUpSLP::BlockScheduling::resetSchedule() { 2981 assert(ScheduleStart && 2982 "tried to reset schedule on block which has not been scheduled"); 2983 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 2984 ScheduleData *SD = getScheduleData(I); 2985 assert(isInSchedulingRegion(SD)); 2986 SD->IsScheduled = false; 2987 SD->resetUnscheduledDeps(); 2988 } 2989 ReadyInsts.clear(); 2990 } 2991 2992 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 2993 2994 if (!BS->ScheduleStart) 2995 return; 2996 2997 DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 2998 2999 BS->resetSchedule(); 3000 3001 // For the real scheduling we use a more sophisticated ready-list: it is 3002 // sorted by the original instruction location. This lets the final schedule 3003 // be as close as possible to the original instruction order. 3004 struct ScheduleDataCompare { 3005 bool operator()(ScheduleData *SD1, ScheduleData *SD2) { 3006 return SD2->SchedulingPriority < SD1->SchedulingPriority; 3007 } 3008 }; 3009 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 3010 3011 // Ensure that all dependency data is updated and fill the ready-list with 3012 // initial instructions. 3013 int Idx = 0; 3014 int NumToSchedule = 0; 3015 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 3016 I = I->getNextNode()) { 3017 ScheduleData *SD = BS->getScheduleData(I); 3018 assert( 3019 SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD->Inst) != 0) && 3020 "scheduler and vectorizer have different opinion on what is a bundle"); 3021 SD->FirstInBundle->SchedulingPriority = Idx++; 3022 if (SD->isSchedulingEntity()) { 3023 BS->calculateDependencies(SD, false, this); 3024 NumToSchedule++; 3025 } 3026 } 3027 BS->initialFillReadyList(ReadyInsts); 3028 3029 Instruction *LastScheduledInst = BS->ScheduleEnd; 3030 3031 // Do the "real" scheduling. 3032 while (!ReadyInsts.empty()) { 3033 ScheduleData *picked = *ReadyInsts.begin(); 3034 ReadyInsts.erase(ReadyInsts.begin()); 3035 3036 // Move the scheduled instruction(s) to their dedicated places, if not 3037 // there yet. 3038 ScheduleData *BundleMember = picked; 3039 while (BundleMember) { 3040 Instruction *pickedInst = BundleMember->Inst; 3041 if (LastScheduledInst->getNextNode() != pickedInst) { 3042 BS->BB->getInstList().remove(pickedInst); 3043 BS->BB->getInstList().insert(LastScheduledInst, pickedInst); 3044 } 3045 LastScheduledInst = pickedInst; 3046 BundleMember = BundleMember->NextInBundle; 3047 } 3048 3049 BS->schedule(picked, ReadyInsts); 3050 NumToSchedule--; 3051 } 3052 assert(NumToSchedule == 0 && "could not schedule all instructions"); 3053 3054 // Avoid duplicate scheduling of the block. 3055 BS->ScheduleStart = nullptr; 3056 } 3057 3058 /// The SLPVectorizer Pass. 3059 struct SLPVectorizer : public FunctionPass { 3060 typedef SmallVector<StoreInst *, 8> StoreList; 3061 typedef MapVector<Value *, StoreList> StoreListMap; 3062 3063 /// Pass identification, replacement for typeid 3064 static char ID; 3065 3066 explicit SLPVectorizer() : FunctionPass(ID) { 3067 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 3068 } 3069 3070 ScalarEvolution *SE; 3071 TargetTransformInfo *TTI; 3072 TargetLibraryInfo *TLI; 3073 AliasAnalysis *AA; 3074 LoopInfo *LI; 3075 DominatorTree *DT; 3076 AssumptionCache *AC; 3077 3078 bool runOnFunction(Function &F) override { 3079 if (skipOptnoneFunction(F)) 3080 return false; 3081 3082 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 3083 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 3084 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 3085 TLI = TLIP ? &TLIP->getTLI() : nullptr; 3086 AA = &getAnalysis<AliasAnalysis>(); 3087 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 3088 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 3089 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 3090 3091 StoreRefs.clear(); 3092 bool Changed = false; 3093 3094 // If the target claims to have no vector registers don't attempt 3095 // vectorization. 3096 if (!TTI->getNumberOfRegisters(true)) 3097 return false; 3098 3099 // Use the vector register size specified by the target unless overridden 3100 // by a command-line option. 3101 // TODO: It would be better to limit the vectorization factor based on 3102 // data type rather than just register size. For example, x86 AVX has 3103 // 256-bit registers, but it does not support integer operations 3104 // at that width (that requires AVX2). 3105 if (MaxVectorRegSizeOption.getNumOccurrences()) 3106 MaxVecRegSize = MaxVectorRegSizeOption; 3107 else 3108 MaxVecRegSize = TTI->getRegisterBitWidth(true); 3109 3110 // Don't vectorize when the attribute NoImplicitFloat is used. 3111 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 3112 return false; 3113 3114 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 3115 3116 // Use the bottom up slp vectorizer to construct chains that start with 3117 // store instructions. 3118 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC); 3119 3120 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 3121 // delete instructions. 3122 3123 // Scan the blocks in the function in post order. 3124 for (auto BB : post_order(&F.getEntryBlock())) { 3125 // Vectorize trees that end at stores. 3126 if (unsigned count = collectStores(BB, R)) { 3127 (void)count; 3128 DEBUG(dbgs() << "SLP: Found " << count << " stores to vectorize.\n"); 3129 Changed |= vectorizeStoreChains(R); 3130 } 3131 3132 // Vectorize trees that end at reductions. 3133 Changed |= vectorizeChainsInBlock(BB, R); 3134 } 3135 3136 if (Changed) { 3137 R.optimizeGatherSequence(); 3138 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 3139 DEBUG(verifyFunction(F)); 3140 } 3141 return Changed; 3142 } 3143 3144 void getAnalysisUsage(AnalysisUsage &AU) const override { 3145 FunctionPass::getAnalysisUsage(AU); 3146 AU.addRequired<AssumptionCacheTracker>(); 3147 AU.addRequired<ScalarEvolutionWrapperPass>(); 3148 AU.addRequired<AliasAnalysis>(); 3149 AU.addRequired<TargetTransformInfoWrapperPass>(); 3150 AU.addRequired<LoopInfoWrapperPass>(); 3151 AU.addRequired<DominatorTreeWrapperPass>(); 3152 AU.addPreserved<LoopInfoWrapperPass>(); 3153 AU.addPreserved<DominatorTreeWrapperPass>(); 3154 AU.setPreservesCFG(); 3155 } 3156 3157 private: 3158 3159 /// \brief Collect memory references and sort them according to their base 3160 /// object. We sort the stores to their base objects to reduce the cost of the 3161 /// quadratic search on the stores. TODO: We can further reduce this cost 3162 /// if we flush the chain creation every time we run into a memory barrier. 3163 unsigned collectStores(BasicBlock *BB, BoUpSLP &R); 3164 3165 /// \brief Try to vectorize a chain that starts at two arithmetic instrs. 3166 bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R); 3167 3168 /// \brief Try to vectorize a list of operands. 3169 /// \@param BuildVector A list of users to ignore for the purpose of 3170 /// scheduling and that don't need extracting. 3171 /// \returns true if a value was vectorized. 3172 bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 3173 ArrayRef<Value *> BuildVector = None, 3174 bool allowReorder = false); 3175 3176 /// \brief Try to vectorize a chain that may start at the operands of \V; 3177 bool tryToVectorize(BinaryOperator *V, BoUpSLP &R); 3178 3179 /// \brief Vectorize the stores that were collected in StoreRefs. 3180 bool vectorizeStoreChains(BoUpSLP &R); 3181 3182 /// \brief Scan the basic block and look for patterns that are likely to start 3183 /// a vectorization chain. 3184 bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R); 3185 3186 bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold, 3187 BoUpSLP &R, unsigned VecRegSize); 3188 3189 bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold, 3190 BoUpSLP &R); 3191 private: 3192 StoreListMap StoreRefs; 3193 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 3194 }; 3195 3196 /// \brief Check that the Values in the slice in VL array are still existent in 3197 /// the WeakVH array. 3198 /// Vectorization of part of the VL array may cause later values in the VL array 3199 /// to become invalid. We track when this has happened in the WeakVH array. 3200 static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, ArrayRef<WeakVH> VH, 3201 unsigned SliceBegin, unsigned SliceSize) { 3202 VL = VL.slice(SliceBegin, SliceSize); 3203 VH = VH.slice(SliceBegin, SliceSize); 3204 return !std::equal(VL.begin(), VL.end(), VH.begin()); 3205 } 3206 3207 bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain, 3208 int CostThreshold, BoUpSLP &R, 3209 unsigned VecRegSize) { 3210 unsigned ChainLen = Chain.size(); 3211 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 3212 << "\n"); 3213 Type *StoreTy = cast<StoreInst>(Chain[0])->getValueOperand()->getType(); 3214 auto &DL = cast<StoreInst>(Chain[0])->getModule()->getDataLayout(); 3215 unsigned Sz = DL.getTypeSizeInBits(StoreTy); 3216 unsigned VF = VecRegSize / Sz; 3217 3218 if (!isPowerOf2_32(Sz) || VF < 2) 3219 return false; 3220 3221 // Keep track of values that were deleted by vectorizing in the loop below. 3222 SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end()); 3223 3224 bool Changed = false; 3225 // Look for profitable vectorizable trees at all offsets, starting at zero. 3226 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 3227 if (i + VF > e) 3228 break; 3229 3230 // Check that a previous iteration of this loop did not delete the Value. 3231 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 3232 continue; 3233 3234 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 3235 << "\n"); 3236 ArrayRef<Value *> Operands = Chain.slice(i, VF); 3237 3238 R.buildTree(Operands); 3239 3240 int Cost = R.getTreeCost(); 3241 3242 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 3243 if (Cost < CostThreshold) { 3244 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 3245 R.vectorizeTree(); 3246 3247 // Move to the next bundle. 3248 i += VF - 1; 3249 Changed = true; 3250 } 3251 } 3252 3253 return Changed; 3254 } 3255 3256 bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, 3257 int costThreshold, BoUpSLP &R) { 3258 SetVector<StoreInst *> Heads, Tails; 3259 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; 3260 3261 // We may run into multiple chains that merge into a single chain. We mark the 3262 // stores that we vectorized so that we don't visit the same store twice. 3263 BoUpSLP::ValueSet VectorizedStores; 3264 bool Changed = false; 3265 3266 // Do a quadratic search on all of the given stores and find 3267 // all of the pairs of stores that follow each other. 3268 SmallVector<unsigned, 16> IndexQueue; 3269 for (unsigned i = 0, e = Stores.size(); i < e; ++i) { 3270 const DataLayout &DL = Stores[i]->getModule()->getDataLayout(); 3271 IndexQueue.clear(); 3272 // If a store has multiple consecutive store candidates, search Stores 3273 // array according to the sequence: from i+1 to e, then from i-1 to 0. 3274 // This is because usually pairing with immediate succeeding or preceding 3275 // candidate create the best chance to find slp vectorization opportunity. 3276 unsigned j = 0; 3277 for (j = i + 1; j < e; ++j) 3278 IndexQueue.push_back(j); 3279 for (j = i; j > 0; --j) 3280 IndexQueue.push_back(j - 1); 3281 3282 for (auto &k : IndexQueue) { 3283 if (R.isConsecutiveAccess(Stores[i], Stores[k], DL)) { 3284 Tails.insert(Stores[k]); 3285 Heads.insert(Stores[i]); 3286 ConsecutiveChain[Stores[i]] = Stores[k]; 3287 break; 3288 } 3289 } 3290 } 3291 3292 // For stores that start but don't end a link in the chain: 3293 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end(); 3294 it != e; ++it) { 3295 if (Tails.count(*it)) 3296 continue; 3297 3298 // We found a store instr that starts a chain. Now follow the chain and try 3299 // to vectorize it. 3300 BoUpSLP::ValueList Operands; 3301 StoreInst *I = *it; 3302 // Collect the chain into a list. 3303 while (Tails.count(I) || Heads.count(I)) { 3304 if (VectorizedStores.count(I)) 3305 break; 3306 Operands.push_back(I); 3307 // Move to the next value in the chain. 3308 I = ConsecutiveChain[I]; 3309 } 3310 3311 // FIXME: Is division-by-2 the correct step? Should we assert that the 3312 // register size is a power-of-2? 3313 for (unsigned Size = MaxVecRegSize; Size >= MinVecRegSize; Size /= 2) { 3314 if (vectorizeStoreChain(Operands, costThreshold, R, Size)) { 3315 // Mark the vectorized stores so that we don't vectorize them again. 3316 VectorizedStores.insert(Operands.begin(), Operands.end()); 3317 Changed = true; 3318 break; 3319 } 3320 } 3321 } 3322 3323 return Changed; 3324 } 3325 3326 3327 unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) { 3328 unsigned count = 0; 3329 StoreRefs.clear(); 3330 const DataLayout &DL = BB->getModule()->getDataLayout(); 3331 for (Instruction &I : *BB) { 3332 StoreInst *SI = dyn_cast<StoreInst>(&I); 3333 if (!SI) 3334 continue; 3335 3336 // Don't touch volatile stores. 3337 if (!SI->isSimple()) 3338 continue; 3339 3340 // Check that the pointer points to scalars. 3341 Type *Ty = SI->getValueOperand()->getType(); 3342 if (!isValidElementType(Ty)) 3343 continue; 3344 3345 // Find the base pointer. 3346 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), DL); 3347 3348 // Save the store locations. 3349 StoreRefs[Ptr].push_back(SI); 3350 count++; 3351 } 3352 return count; 3353 } 3354 3355 bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 3356 if (!A || !B) 3357 return false; 3358 Value *VL[] = { A, B }; 3359 return tryToVectorizeList(VL, R, None, true); 3360 } 3361 3362 bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 3363 ArrayRef<Value *> BuildVector, 3364 bool allowReorder) { 3365 if (VL.size() < 2) 3366 return false; 3367 3368 DEBUG(dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n"); 3369 3370 // Check that all of the parts are scalar instructions of the same type. 3371 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 3372 if (!I0) 3373 return false; 3374 3375 unsigned Opcode0 = I0->getOpcode(); 3376 const DataLayout &DL = I0->getModule()->getDataLayout(); 3377 3378 Type *Ty0 = I0->getType(); 3379 unsigned Sz = DL.getTypeSizeInBits(Ty0); 3380 // FIXME: Register size should be a parameter to this function, so we can 3381 // try different vectorization factors. 3382 unsigned VF = MinVecRegSize / Sz; 3383 3384 for (Value *V : VL) { 3385 Type *Ty = V->getType(); 3386 if (!isValidElementType(Ty)) 3387 return false; 3388 Instruction *Inst = dyn_cast<Instruction>(V); 3389 if (!Inst || Inst->getOpcode() != Opcode0) 3390 return false; 3391 } 3392 3393 bool Changed = false; 3394 3395 // Keep track of values that were deleted by vectorizing in the loop below. 3396 SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end()); 3397 3398 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 3399 unsigned OpsWidth = 0; 3400 3401 if (i + VF > e) 3402 OpsWidth = e - i; 3403 else 3404 OpsWidth = VF; 3405 3406 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 3407 break; 3408 3409 // Check that a previous iteration of this loop did not delete the Value. 3410 if (hasValueBeenRAUWed(VL, TrackValues, i, OpsWidth)) 3411 continue; 3412 3413 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 3414 << "\n"); 3415 ArrayRef<Value *> Ops = VL.slice(i, OpsWidth); 3416 3417 ArrayRef<Value *> BuildVectorSlice; 3418 if (!BuildVector.empty()) 3419 BuildVectorSlice = BuildVector.slice(i, OpsWidth); 3420 3421 R.buildTree(Ops, BuildVectorSlice); 3422 // TODO: check if we can allow reordering also for other cases than 3423 // tryToVectorizePair() 3424 if (allowReorder && R.shouldReorder()) { 3425 assert(Ops.size() == 2); 3426 assert(BuildVectorSlice.empty()); 3427 Value *ReorderedOps[] = { Ops[1], Ops[0] }; 3428 R.buildTree(ReorderedOps, None); 3429 } 3430 int Cost = R.getTreeCost(); 3431 3432 if (Cost < -SLPCostThreshold) { 3433 DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 3434 Value *VectorizedRoot = R.vectorizeTree(); 3435 3436 // Reconstruct the build vector by extracting the vectorized root. This 3437 // way we handle the case where some elements of the vector are undefined. 3438 // (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2)) 3439 if (!BuildVectorSlice.empty()) { 3440 // The insert point is the last build vector instruction. The vectorized 3441 // root will precede it. This guarantees that we get an instruction. The 3442 // vectorized tree could have been constant folded. 3443 Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back()); 3444 unsigned VecIdx = 0; 3445 for (auto &V : BuildVectorSlice) { 3446 IRBuilder<true, NoFolder> Builder( 3447 ++BasicBlock::iterator(InsertAfter)); 3448 InsertElementInst *IE = cast<InsertElementInst>(V); 3449 Instruction *Extract = cast<Instruction>(Builder.CreateExtractElement( 3450 VectorizedRoot, Builder.getInt32(VecIdx++))); 3451 IE->setOperand(1, Extract); 3452 IE->removeFromParent(); 3453 IE->insertAfter(Extract); 3454 InsertAfter = IE; 3455 } 3456 } 3457 // Move to the next bundle. 3458 i += VF - 1; 3459 Changed = true; 3460 } 3461 } 3462 3463 return Changed; 3464 } 3465 3466 bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { 3467 if (!V) 3468 return false; 3469 3470 // Try to vectorize V. 3471 if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R)) 3472 return true; 3473 3474 BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0)); 3475 BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1)); 3476 // Try to skip B. 3477 if (B && B->hasOneUse()) { 3478 BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 3479 BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 3480 if (tryToVectorizePair(A, B0, R)) { 3481 return true; 3482 } 3483 if (tryToVectorizePair(A, B1, R)) { 3484 return true; 3485 } 3486 } 3487 3488 // Try to skip A. 3489 if (A && A->hasOneUse()) { 3490 BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 3491 BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 3492 if (tryToVectorizePair(A0, B, R)) { 3493 return true; 3494 } 3495 if (tryToVectorizePair(A1, B, R)) { 3496 return true; 3497 } 3498 } 3499 return 0; 3500 } 3501 3502 /// \brief Generate a shuffle mask to be used in a reduction tree. 3503 /// 3504 /// \param VecLen The length of the vector to be reduced. 3505 /// \param NumEltsToRdx The number of elements that should be reduced in the 3506 /// vector. 3507 /// \param IsPairwise Whether the reduction is a pairwise or splitting 3508 /// reduction. A pairwise reduction will generate a mask of 3509 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 3510 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 3511 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 3512 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 3513 bool IsPairwise, bool IsLeft, 3514 IRBuilder<> &Builder) { 3515 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 3516 3517 SmallVector<Constant *, 32> ShuffleMask( 3518 VecLen, UndefValue::get(Builder.getInt32Ty())); 3519 3520 if (IsPairwise) 3521 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 3522 for (unsigned i = 0; i != NumEltsToRdx; ++i) 3523 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 3524 else 3525 // Move the upper half of the vector to the lower half. 3526 for (unsigned i = 0; i != NumEltsToRdx; ++i) 3527 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 3528 3529 return ConstantVector::get(ShuffleMask); 3530 } 3531 3532 3533 /// Model horizontal reductions. 3534 /// 3535 /// A horizontal reduction is a tree of reduction operations (currently add and 3536 /// fadd) that has operations that can be put into a vector as its leaf. 3537 /// For example, this tree: 3538 /// 3539 /// mul mul mul mul 3540 /// \ / \ / 3541 /// + + 3542 /// \ / 3543 /// + 3544 /// This tree has "mul" as its reduced values and "+" as its reduction 3545 /// operations. A reduction might be feeding into a store or a binary operation 3546 /// feeding a phi. 3547 /// ... 3548 /// \ / 3549 /// + 3550 /// | 3551 /// phi += 3552 /// 3553 /// Or: 3554 /// ... 3555 /// \ / 3556 /// + 3557 /// | 3558 /// *p = 3559 /// 3560 class HorizontalReduction { 3561 SmallVector<Value *, 16> ReductionOps; 3562 SmallVector<Value *, 32> ReducedVals; 3563 3564 BinaryOperator *ReductionRoot; 3565 PHINode *ReductionPHI; 3566 3567 /// The opcode of the reduction. 3568 unsigned ReductionOpcode; 3569 /// The opcode of the values we perform a reduction on. 3570 unsigned ReducedValueOpcode; 3571 /// The width of one full horizontal reduction operation. 3572 unsigned ReduxWidth; 3573 /// Should we model this reduction as a pairwise reduction tree or a tree that 3574 /// splits the vector in halves and adds those halves. 3575 bool IsPairwiseReduction; 3576 3577 public: 3578 HorizontalReduction() 3579 : ReductionRoot(nullptr), ReductionPHI(nullptr), ReductionOpcode(0), 3580 ReducedValueOpcode(0), ReduxWidth(0), IsPairwiseReduction(false) {} 3581 3582 /// \brief Try to find a reduction tree. 3583 bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B) { 3584 assert((!Phi || 3585 std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && 3586 "Thi phi needs to use the binary operator"); 3587 3588 // We could have a initial reductions that is not an add. 3589 // r *= v1 + v2 + v3 + v4 3590 // In such a case start looking for a tree rooted in the first '+'. 3591 if (Phi) { 3592 if (B->getOperand(0) == Phi) { 3593 Phi = nullptr; 3594 B = dyn_cast<BinaryOperator>(B->getOperand(1)); 3595 } else if (B->getOperand(1) == Phi) { 3596 Phi = nullptr; 3597 B = dyn_cast<BinaryOperator>(B->getOperand(0)); 3598 } 3599 } 3600 3601 if (!B) 3602 return false; 3603 3604 Type *Ty = B->getType(); 3605 if (!isValidElementType(Ty)) 3606 return false; 3607 3608 const DataLayout &DL = B->getModule()->getDataLayout(); 3609 ReductionOpcode = B->getOpcode(); 3610 ReducedValueOpcode = 0; 3611 // FIXME: Register size should be a parameter to this function, so we can 3612 // try different vectorization factors. 3613 ReduxWidth = MinVecRegSize / DL.getTypeSizeInBits(Ty); 3614 ReductionRoot = B; 3615 ReductionPHI = Phi; 3616 3617 if (ReduxWidth < 4) 3618 return false; 3619 3620 // We currently only support adds. 3621 if (ReductionOpcode != Instruction::Add && 3622 ReductionOpcode != Instruction::FAdd) 3623 return false; 3624 3625 // Post order traverse the reduction tree starting at B. We only handle true 3626 // trees containing only binary operators. 3627 SmallVector<std::pair<BinaryOperator *, unsigned>, 32> Stack; 3628 Stack.push_back(std::make_pair(B, 0)); 3629 while (!Stack.empty()) { 3630 BinaryOperator *TreeN = Stack.back().first; 3631 unsigned EdgeToVist = Stack.back().second++; 3632 bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode; 3633 3634 // Only handle trees in the current basic block. 3635 if (TreeN->getParent() != B->getParent()) 3636 return false; 3637 3638 // Each tree node needs to have one user except for the ultimate 3639 // reduction. 3640 if (!TreeN->hasOneUse() && TreeN != B) 3641 return false; 3642 3643 // Postorder vist. 3644 if (EdgeToVist == 2 || IsReducedValue) { 3645 if (IsReducedValue) { 3646 // Make sure that the opcodes of the operations that we are going to 3647 // reduce match. 3648 if (!ReducedValueOpcode) 3649 ReducedValueOpcode = TreeN->getOpcode(); 3650 else if (ReducedValueOpcode != TreeN->getOpcode()) 3651 return false; 3652 ReducedVals.push_back(TreeN); 3653 } else { 3654 // We need to be able to reassociate the adds. 3655 if (!TreeN->isAssociative()) 3656 return false; 3657 ReductionOps.push_back(TreeN); 3658 } 3659 // Retract. 3660 Stack.pop_back(); 3661 continue; 3662 } 3663 3664 // Visit left or right. 3665 Value *NextV = TreeN->getOperand(EdgeToVist); 3666 BinaryOperator *Next = dyn_cast<BinaryOperator>(NextV); 3667 if (Next) 3668 Stack.push_back(std::make_pair(Next, 0)); 3669 else if (NextV != Phi) 3670 return false; 3671 } 3672 return true; 3673 } 3674 3675 /// \brief Attempt to vectorize the tree found by 3676 /// matchAssociativeReduction. 3677 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 3678 if (ReducedVals.empty()) 3679 return false; 3680 3681 unsigned NumReducedVals = ReducedVals.size(); 3682 if (NumReducedVals < ReduxWidth) 3683 return false; 3684 3685 Value *VectorizedTree = nullptr; 3686 IRBuilder<> Builder(ReductionRoot); 3687 FastMathFlags Unsafe; 3688 Unsafe.setUnsafeAlgebra(); 3689 Builder.SetFastMathFlags(Unsafe); 3690 unsigned i = 0; 3691 3692 for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) { 3693 V.buildTree(makeArrayRef(&ReducedVals[i], ReduxWidth), ReductionOps); 3694 3695 // Estimate cost. 3696 int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]); 3697 if (Cost >= -SLPCostThreshold) 3698 break; 3699 3700 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost 3701 << ". (HorRdx)\n"); 3702 3703 // Vectorize a tree. 3704 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 3705 Value *VectorizedRoot = V.vectorizeTree(); 3706 3707 // Emit a reduction. 3708 Value *ReducedSubTree = emitReduction(VectorizedRoot, Builder); 3709 if (VectorizedTree) { 3710 Builder.SetCurrentDebugLocation(Loc); 3711 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 3712 ReducedSubTree, "bin.rdx"); 3713 } else 3714 VectorizedTree = ReducedSubTree; 3715 } 3716 3717 if (VectorizedTree) { 3718 // Finish the reduction. 3719 for (; i < NumReducedVals; ++i) { 3720 Builder.SetCurrentDebugLocation( 3721 cast<Instruction>(ReducedVals[i])->getDebugLoc()); 3722 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 3723 ReducedVals[i]); 3724 } 3725 // Update users. 3726 if (ReductionPHI) { 3727 assert(ReductionRoot && "Need a reduction operation"); 3728 ReductionRoot->setOperand(0, VectorizedTree); 3729 ReductionRoot->setOperand(1, ReductionPHI); 3730 } else 3731 ReductionRoot->replaceAllUsesWith(VectorizedTree); 3732 } 3733 return VectorizedTree != nullptr; 3734 } 3735 3736 private: 3737 3738 /// \brief Calculate the cost of a reduction. 3739 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) { 3740 Type *ScalarTy = FirstReducedVal->getType(); 3741 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 3742 3743 int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true); 3744 int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false); 3745 3746 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 3747 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 3748 3749 int ScalarReduxCost = 3750 ReduxWidth * TTI->getArithmeticInstrCost(ReductionOpcode, VecTy); 3751 3752 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 3753 << " for reduction that starts with " << *FirstReducedVal 3754 << " (It is a " 3755 << (IsPairwiseReduction ? "pairwise" : "splitting") 3756 << " reduction)\n"); 3757 3758 return VecReduxCost - ScalarReduxCost; 3759 } 3760 3761 static Value *createBinOp(IRBuilder<> &Builder, unsigned Opcode, Value *L, 3762 Value *R, const Twine &Name = "") { 3763 if (Opcode == Instruction::FAdd) 3764 return Builder.CreateFAdd(L, R, Name); 3765 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, L, R, Name); 3766 } 3767 3768 /// \brief Emit a horizontal reduction of the vectorized value. 3769 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder) { 3770 assert(VectorizedValue && "Need to have a vectorized tree node"); 3771 assert(isPowerOf2_32(ReduxWidth) && 3772 "We only handle power-of-two reductions for now"); 3773 3774 Value *TmpVec = VectorizedValue; 3775 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 3776 if (IsPairwiseReduction) { 3777 Value *LeftMask = 3778 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 3779 Value *RightMask = 3780 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 3781 3782 Value *LeftShuf = Builder.CreateShuffleVector( 3783 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 3784 Value *RightShuf = Builder.CreateShuffleVector( 3785 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 3786 "rdx.shuf.r"); 3787 TmpVec = createBinOp(Builder, ReductionOpcode, LeftShuf, RightShuf, 3788 "bin.rdx"); 3789 } else { 3790 Value *UpperHalf = 3791 createRdxShuffleMask(ReduxWidth, i, false, false, Builder); 3792 Value *Shuf = Builder.CreateShuffleVector( 3793 TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf"); 3794 TmpVec = createBinOp(Builder, ReductionOpcode, TmpVec, Shuf, "bin.rdx"); 3795 } 3796 } 3797 3798 // The result is in the first element of the vector. 3799 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 3800 } 3801 }; 3802 3803 /// \brief Recognize construction of vectors like 3804 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 3805 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 3806 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 3807 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 3808 /// 3809 /// Returns true if it matches 3810 /// 3811 static bool findBuildVector(InsertElementInst *FirstInsertElem, 3812 SmallVectorImpl<Value *> &BuildVector, 3813 SmallVectorImpl<Value *> &BuildVectorOpds) { 3814 if (!isa<UndefValue>(FirstInsertElem->getOperand(0))) 3815 return false; 3816 3817 InsertElementInst *IE = FirstInsertElem; 3818 while (true) { 3819 BuildVector.push_back(IE); 3820 BuildVectorOpds.push_back(IE->getOperand(1)); 3821 3822 if (IE->use_empty()) 3823 return false; 3824 3825 InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back()); 3826 if (!NextUse) 3827 return true; 3828 3829 // If this isn't the final use, make sure the next insertelement is the only 3830 // use. It's OK if the final constructed vector is used multiple times 3831 if (!IE->hasOneUse()) 3832 return false; 3833 3834 IE = NextUse; 3835 } 3836 3837 return false; 3838 } 3839 3840 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 3841 return V->getType() < V2->getType(); 3842 } 3843 3844 bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 3845 bool Changed = false; 3846 SmallVector<Value *, 4> Incoming; 3847 SmallSet<Value *, 16> VisitedInstrs; 3848 3849 bool HaveVectorizedPhiNodes = true; 3850 while (HaveVectorizedPhiNodes) { 3851 HaveVectorizedPhiNodes = false; 3852 3853 // Collect the incoming values from the PHIs. 3854 Incoming.clear(); 3855 for (BasicBlock::iterator instr = BB->begin(), ie = BB->end(); instr != ie; 3856 ++instr) { 3857 PHINode *P = dyn_cast<PHINode>(instr); 3858 if (!P) 3859 break; 3860 3861 if (!VisitedInstrs.count(P)) 3862 Incoming.push_back(P); 3863 } 3864 3865 // Sort by type. 3866 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); 3867 3868 // Try to vectorize elements base on their type. 3869 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 3870 E = Incoming.end(); 3871 IncIt != E;) { 3872 3873 // Look for the next elements with the same type. 3874 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 3875 while (SameTypeIt != E && 3876 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 3877 VisitedInstrs.insert(*SameTypeIt); 3878 ++SameTypeIt; 3879 } 3880 3881 // Try to vectorize them. 3882 unsigned NumElts = (SameTypeIt - IncIt); 3883 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); 3884 if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R)) { 3885 // Success start over because instructions might have been changed. 3886 HaveVectorizedPhiNodes = true; 3887 Changed = true; 3888 break; 3889 } 3890 3891 // Start over at the next instruction of a different type (or the end). 3892 IncIt = SameTypeIt; 3893 } 3894 } 3895 3896 VisitedInstrs.clear(); 3897 3898 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 3899 // We may go through BB multiple times so skip the one we have checked. 3900 if (!VisitedInstrs.insert(it).second) 3901 continue; 3902 3903 if (isa<DbgInfoIntrinsic>(it)) 3904 continue; 3905 3906 // Try to vectorize reductions that use PHINodes. 3907 if (PHINode *P = dyn_cast<PHINode>(it)) { 3908 // Check that the PHI is a reduction PHI. 3909 if (P->getNumIncomingValues() != 2) 3910 return Changed; 3911 Value *Rdx = 3912 (P->getIncomingBlock(0) == BB 3913 ? (P->getIncomingValue(0)) 3914 : (P->getIncomingBlock(1) == BB ? P->getIncomingValue(1) 3915 : nullptr)); 3916 // Check if this is a Binary Operator. 3917 BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx); 3918 if (!BI) 3919 continue; 3920 3921 // Try to match and vectorize a horizontal reduction. 3922 HorizontalReduction HorRdx; 3923 if (ShouldVectorizeHor && HorRdx.matchAssociativeReduction(P, BI) && 3924 HorRdx.tryToReduce(R, TTI)) { 3925 Changed = true; 3926 it = BB->begin(); 3927 e = BB->end(); 3928 continue; 3929 } 3930 3931 Value *Inst = BI->getOperand(0); 3932 if (Inst == P) 3933 Inst = BI->getOperand(1); 3934 3935 if (tryToVectorize(dyn_cast<BinaryOperator>(Inst), R)) { 3936 // We would like to start over since some instructions are deleted 3937 // and the iterator may become invalid value. 3938 Changed = true; 3939 it = BB->begin(); 3940 e = BB->end(); 3941 continue; 3942 } 3943 3944 continue; 3945 } 3946 3947 // Try to vectorize horizontal reductions feeding into a store. 3948 if (ShouldStartVectorizeHorAtStore) 3949 if (StoreInst *SI = dyn_cast<StoreInst>(it)) 3950 if (BinaryOperator *BinOp = 3951 dyn_cast<BinaryOperator>(SI->getValueOperand())) { 3952 HorizontalReduction HorRdx; 3953 if (((HorRdx.matchAssociativeReduction(nullptr, BinOp) && 3954 HorRdx.tryToReduce(R, TTI)) || 3955 tryToVectorize(BinOp, R))) { 3956 Changed = true; 3957 it = BB->begin(); 3958 e = BB->end(); 3959 continue; 3960 } 3961 } 3962 3963 // Try to vectorize horizontal reductions feeding into a return. 3964 if (ReturnInst *RI = dyn_cast<ReturnInst>(it)) 3965 if (RI->getNumOperands() != 0) 3966 if (BinaryOperator *BinOp = 3967 dyn_cast<BinaryOperator>(RI->getOperand(0))) { 3968 DEBUG(dbgs() << "SLP: Found a return to vectorize.\n"); 3969 if (tryToVectorizePair(BinOp->getOperand(0), 3970 BinOp->getOperand(1), R)) { 3971 Changed = true; 3972 it = BB->begin(); 3973 e = BB->end(); 3974 continue; 3975 } 3976 } 3977 3978 // Try to vectorize trees that start at compare instructions. 3979 if (CmpInst *CI = dyn_cast<CmpInst>(it)) { 3980 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) { 3981 Changed = true; 3982 // We would like to start over since some instructions are deleted 3983 // and the iterator may become invalid value. 3984 it = BB->begin(); 3985 e = BB->end(); 3986 continue; 3987 } 3988 3989 for (int i = 0; i < 2; ++i) { 3990 if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) { 3991 if (tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R)) { 3992 Changed = true; 3993 // We would like to start over since some instructions are deleted 3994 // and the iterator may become invalid value. 3995 it = BB->begin(); 3996 e = BB->end(); 3997 break; 3998 } 3999 } 4000 } 4001 continue; 4002 } 4003 4004 // Try to vectorize trees that start at insertelement instructions. 4005 if (InsertElementInst *FirstInsertElem = dyn_cast<InsertElementInst>(it)) { 4006 SmallVector<Value *, 16> BuildVector; 4007 SmallVector<Value *, 16> BuildVectorOpds; 4008 if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds)) 4009 continue; 4010 4011 // Vectorize starting with the build vector operands ignoring the 4012 // BuildVector instructions for the purpose of scheduling and user 4013 // extraction. 4014 if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) { 4015 Changed = true; 4016 it = BB->begin(); 4017 e = BB->end(); 4018 } 4019 4020 continue; 4021 } 4022 } 4023 4024 return Changed; 4025 } 4026 4027 bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) { 4028 bool Changed = false; 4029 // Attempt to sort and vectorize each of the store-groups. 4030 for (StoreListMap::iterator it = StoreRefs.begin(), e = StoreRefs.end(); 4031 it != e; ++it) { 4032 if (it->second.size() < 2) 4033 continue; 4034 4035 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 4036 << it->second.size() << ".\n"); 4037 4038 // Process the stores in chunks of 16. 4039 // TODO: The limit of 16 inhibits greater vectorization factors. 4040 // For example, AVX2 supports v32i8. Increasing this limit, however, 4041 // may cause a significant compile-time increase. 4042 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 4043 unsigned Len = std::min<unsigned>(CE - CI, 16); 4044 Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len), 4045 -SLPCostThreshold, R); 4046 } 4047 } 4048 return Changed; 4049 } 4050 4051 } // end anonymous namespace 4052 4053 char SLPVectorizer::ID = 0; 4054 static const char lv_name[] = "SLP Vectorizer"; 4055 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 4056 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 4057 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 4058 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 4059 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 4060 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 4061 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 4062 4063 namespace llvm { 4064 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); } 4065 } 4066