1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 #include "llvm/Transforms/Vectorize.h" 19 #include "llvm/ADT/MapVector.h" 20 #include "llvm/ADT/PostOrderIterator.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/Optional.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Analysis/AliasAnalysis.h" 25 #include "llvm/Analysis/AssumptionCache.h" 26 #include "llvm/Analysis/CodeMetrics.h" 27 #include "llvm/Analysis/LoopInfo.h" 28 #include "llvm/Analysis/ScalarEvolution.h" 29 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 30 #include "llvm/Analysis/TargetTransformInfo.h" 31 #include "llvm/Analysis/ValueTracking.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/Dominators.h" 34 #include "llvm/IR/IRBuilder.h" 35 #include "llvm/IR/Instructions.h" 36 #include "llvm/IR/IntrinsicInst.h" 37 #include "llvm/IR/Module.h" 38 #include "llvm/IR/NoFolder.h" 39 #include "llvm/IR/Type.h" 40 #include "llvm/IR/Value.h" 41 #include "llvm/IR/Verifier.h" 42 #include "llvm/Pass.h" 43 #include "llvm/Support/CommandLine.h" 44 #include "llvm/Support/Debug.h" 45 #include "llvm/Support/raw_ostream.h" 46 #include "llvm/Transforms/Utils/VectorUtils.h" 47 #include <algorithm> 48 #include <map> 49 #include <memory> 50 51 using namespace llvm; 52 53 #define SV_NAME "slp-vectorizer" 54 #define DEBUG_TYPE "SLP" 55 56 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 57 58 static cl::opt<int> 59 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 60 cl::desc("Only vectorize if you gain more than this " 61 "number ")); 62 63 static cl::opt<bool> 64 ShouldVectorizeHor("slp-vectorize-hor", cl::init(false), cl::Hidden, 65 cl::desc("Attempt to vectorize horizontal reductions")); 66 67 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 68 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 69 cl::desc( 70 "Attempt to vectorize horizontal reductions feeding into a store")); 71 72 namespace { 73 74 static const unsigned MinVecRegSize = 128; 75 76 static const unsigned RecursionMaxDepth = 12; 77 78 // Limit the number of alias checks. The limit is chosen so that 79 // it has no negative effect on the llvm benchmarks. 80 static const unsigned AliasedCheckLimit = 10; 81 82 /// \returns the parent basic block if all of the instructions in \p VL 83 /// are in the same block or null otherwise. 84 static BasicBlock *getSameBlock(ArrayRef<Value *> VL) { 85 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 86 if (!I0) 87 return nullptr; 88 BasicBlock *BB = I0->getParent(); 89 for (int i = 1, e = VL.size(); i < e; i++) { 90 Instruction *I = dyn_cast<Instruction>(VL[i]); 91 if (!I) 92 return nullptr; 93 94 if (BB != I->getParent()) 95 return nullptr; 96 } 97 return BB; 98 } 99 100 /// \returns True if all of the values in \p VL are constants. 101 static bool allConstant(ArrayRef<Value *> VL) { 102 for (unsigned i = 0, e = VL.size(); i < e; ++i) 103 if (!isa<Constant>(VL[i])) 104 return false; 105 return true; 106 } 107 108 /// \returns True if all of the values in \p VL are identical. 109 static bool isSplat(ArrayRef<Value *> VL) { 110 for (unsigned i = 1, e = VL.size(); i < e; ++i) 111 if (VL[i] != VL[0]) 112 return false; 113 return true; 114 } 115 116 ///\returns Opcode that can be clubbed with \p Op to create an alternate 117 /// sequence which can later be merged as a ShuffleVector instruction. 118 static unsigned getAltOpcode(unsigned Op) { 119 switch (Op) { 120 case Instruction::FAdd: 121 return Instruction::FSub; 122 case Instruction::FSub: 123 return Instruction::FAdd; 124 case Instruction::Add: 125 return Instruction::Sub; 126 case Instruction::Sub: 127 return Instruction::Add; 128 default: 129 return 0; 130 } 131 } 132 133 ///\returns bool representing if Opcode \p Op can be part 134 /// of an alternate sequence which can later be merged as 135 /// a ShuffleVector instruction. 136 static bool canCombineAsAltInst(unsigned Op) { 137 if (Op == Instruction::FAdd || Op == Instruction::FSub || 138 Op == Instruction::Sub || Op == Instruction::Add) 139 return true; 140 return false; 141 } 142 143 /// \returns ShuffleVector instruction if intructions in \p VL have 144 /// alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence. 145 /// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...) 146 static unsigned isAltInst(ArrayRef<Value *> VL) { 147 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 148 unsigned Opcode = I0->getOpcode(); 149 unsigned AltOpcode = getAltOpcode(Opcode); 150 for (int i = 1, e = VL.size(); i < e; i++) { 151 Instruction *I = dyn_cast<Instruction>(VL[i]); 152 if (!I || I->getOpcode() != ((i & 1) ? AltOpcode : Opcode)) 153 return 0; 154 } 155 return Instruction::ShuffleVector; 156 } 157 158 /// \returns The opcode if all of the Instructions in \p VL have the same 159 /// opcode, or zero. 160 static unsigned getSameOpcode(ArrayRef<Value *> VL) { 161 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 162 if (!I0) 163 return 0; 164 unsigned Opcode = I0->getOpcode(); 165 for (int i = 1, e = VL.size(); i < e; i++) { 166 Instruction *I = dyn_cast<Instruction>(VL[i]); 167 if (!I || Opcode != I->getOpcode()) { 168 if (canCombineAsAltInst(Opcode) && i == 1) 169 return isAltInst(VL); 170 return 0; 171 } 172 } 173 return Opcode; 174 } 175 176 /// Get the intersection (logical and) of all of the potential IR flags 177 /// of each scalar operation (VL) that will be converted into a vector (I). 178 /// Flag set: NSW, NUW, exact, and all of fast-math. 179 static void propagateIRFlags(Value *I, ArrayRef<Value *> VL) { 180 if (auto *VecOp = dyn_cast<BinaryOperator>(I)) { 181 if (auto *Intersection = dyn_cast<BinaryOperator>(VL[0])) { 182 // Intersection is initialized to the 0th scalar, 183 // so start counting from index '1'. 184 for (int i = 1, e = VL.size(); i < e; ++i) { 185 if (auto *Scalar = dyn_cast<BinaryOperator>(VL[i])) 186 Intersection->andIRFlags(Scalar); 187 } 188 VecOp->copyIRFlags(Intersection); 189 } 190 } 191 } 192 193 /// \returns \p I after propagating metadata from \p VL. 194 static Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL) { 195 Instruction *I0 = cast<Instruction>(VL[0]); 196 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 197 I0->getAllMetadataOtherThanDebugLoc(Metadata); 198 199 for (unsigned i = 0, n = Metadata.size(); i != n; ++i) { 200 unsigned Kind = Metadata[i].first; 201 MDNode *MD = Metadata[i].second; 202 203 for (int i = 1, e = VL.size(); MD && i != e; i++) { 204 Instruction *I = cast<Instruction>(VL[i]); 205 MDNode *IMD = I->getMetadata(Kind); 206 207 switch (Kind) { 208 default: 209 MD = nullptr; // Remove unknown metadata 210 break; 211 case LLVMContext::MD_tbaa: 212 MD = MDNode::getMostGenericTBAA(MD, IMD); 213 break; 214 case LLVMContext::MD_alias_scope: 215 case LLVMContext::MD_noalias: 216 MD = MDNode::intersect(MD, IMD); 217 break; 218 case LLVMContext::MD_fpmath: 219 MD = MDNode::getMostGenericFPMath(MD, IMD); 220 break; 221 } 222 } 223 I->setMetadata(Kind, MD); 224 } 225 return I; 226 } 227 228 /// \returns The type that all of the values in \p VL have or null if there 229 /// are different types. 230 static Type* getSameType(ArrayRef<Value *> VL) { 231 Type *Ty = VL[0]->getType(); 232 for (int i = 1, e = VL.size(); i < e; i++) 233 if (VL[i]->getType() != Ty) 234 return nullptr; 235 236 return Ty; 237 } 238 239 /// \returns True if the ExtractElement instructions in VL can be vectorized 240 /// to use the original vector. 241 static bool CanReuseExtract(ArrayRef<Value *> VL) { 242 assert(Instruction::ExtractElement == getSameOpcode(VL) && "Invalid opcode"); 243 // Check if all of the extracts come from the same vector and from the 244 // correct offset. 245 Value *VL0 = VL[0]; 246 ExtractElementInst *E0 = cast<ExtractElementInst>(VL0); 247 Value *Vec = E0->getOperand(0); 248 249 // We have to extract from the same vector type. 250 unsigned NElts = Vec->getType()->getVectorNumElements(); 251 252 if (NElts != VL.size()) 253 return false; 254 255 // Check that all of the indices extract from the correct offset. 256 ConstantInt *CI = dyn_cast<ConstantInt>(E0->getOperand(1)); 257 if (!CI || CI->getZExtValue()) 258 return false; 259 260 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 261 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 262 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 263 264 if (!CI || CI->getZExtValue() != i || E->getOperand(0) != Vec) 265 return false; 266 } 267 268 return true; 269 } 270 271 /// \returns True if in-tree use also needs extract. This refers to 272 /// possible scalar operand in vectorized instruction. 273 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 274 TargetLibraryInfo *TLI) { 275 276 unsigned Opcode = UserInst->getOpcode(); 277 switch (Opcode) { 278 case Instruction::Load: { 279 LoadInst *LI = cast<LoadInst>(UserInst); 280 return (LI->getPointerOperand() == Scalar); 281 } 282 case Instruction::Store: { 283 StoreInst *SI = cast<StoreInst>(UserInst); 284 return (SI->getPointerOperand() == Scalar); 285 } 286 case Instruction::Call: { 287 CallInst *CI = cast<CallInst>(UserInst); 288 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 289 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 290 return (CI->getArgOperand(1) == Scalar); 291 } 292 } 293 default: 294 return false; 295 } 296 } 297 298 /// \returns the AA location that is being access by the instruction. 299 static AliasAnalysis::Location getLocation(Instruction *I, AliasAnalysis *AA) { 300 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 301 return AA->getLocation(SI); 302 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 303 return AA->getLocation(LI); 304 return AliasAnalysis::Location(); 305 } 306 307 /// Bottom Up SLP Vectorizer. 308 class BoUpSLP { 309 public: 310 typedef SmallVector<Value *, 8> ValueList; 311 typedef SmallVector<Instruction *, 16> InstrList; 312 typedef SmallPtrSet<Value *, 16> ValueSet; 313 typedef SmallVector<StoreInst *, 8> StoreList; 314 315 BoUpSLP(Function *Func, ScalarEvolution *Se, const DataLayout *Dl, 316 TargetTransformInfo *Tti, TargetLibraryInfo *TLi, AliasAnalysis *Aa, 317 LoopInfo *Li, DominatorTree *Dt, AssumptionCache *AC) 318 : NumLoadsWantToKeepOrder(0), NumLoadsWantToChangeOrder(0), F(Func), 319 SE(Se), DL(Dl), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), 320 Builder(Se->getContext()) { 321 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 322 } 323 324 /// \brief Vectorize the tree that starts with the elements in \p VL. 325 /// Returns the vectorized root. 326 Value *vectorizeTree(); 327 328 /// \returns the cost incurred by unwanted spills and fills, caused by 329 /// holding live values over call sites. 330 int getSpillCost(); 331 332 /// \returns the vectorization cost of the subtree that starts at \p VL. 333 /// A negative number means that this is profitable. 334 int getTreeCost(); 335 336 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 337 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 338 void buildTree(ArrayRef<Value *> Roots, 339 ArrayRef<Value *> UserIgnoreLst = None); 340 341 /// Clear the internal data structures that are created by 'buildTree'. 342 void deleteTree() { 343 VectorizableTree.clear(); 344 ScalarToTreeEntry.clear(); 345 MustGather.clear(); 346 ExternalUses.clear(); 347 NumLoadsWantToKeepOrder = 0; 348 NumLoadsWantToChangeOrder = 0; 349 for (auto &Iter : BlocksSchedules) { 350 BlockScheduling *BS = Iter.second.get(); 351 BS->clear(); 352 } 353 } 354 355 /// \returns true if the memory operations A and B are consecutive. 356 bool isConsecutiveAccess(Value *A, Value *B); 357 358 /// \brief Perform LICM and CSE on the newly generated gather sequences. 359 void optimizeGatherSequence(); 360 361 /// \returns true if it is benefitial to reverse the vector order. 362 bool shouldReorder() const { 363 return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder; 364 } 365 366 private: 367 struct TreeEntry; 368 369 /// \returns the cost of the vectorizable entry. 370 int getEntryCost(TreeEntry *E); 371 372 /// This is the recursive part of buildTree. 373 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth); 374 375 /// Vectorize a single entry in the tree. 376 Value *vectorizeTree(TreeEntry *E); 377 378 /// Vectorize a single entry in the tree, starting in \p VL. 379 Value *vectorizeTree(ArrayRef<Value *> VL); 380 381 /// \returns the pointer to the vectorized value if \p VL is already 382 /// vectorized, or NULL. They may happen in cycles. 383 Value *alreadyVectorized(ArrayRef<Value *> VL) const; 384 385 /// \brief Take the pointer operand from the Load/Store instruction. 386 /// \returns NULL if this is not a valid Load/Store instruction. 387 static Value *getPointerOperand(Value *I); 388 389 /// \brief Take the address space operand from the Load/Store instruction. 390 /// \returns -1 if this is not a valid Load/Store instruction. 391 static unsigned getAddressSpaceOperand(Value *I); 392 393 /// \returns the scalarization cost for this type. Scalarization in this 394 /// context means the creation of vectors from a group of scalars. 395 int getGatherCost(Type *Ty); 396 397 /// \returns the scalarization cost for this list of values. Assuming that 398 /// this subtree gets vectorized, we may need to extract the values from the 399 /// roots. This method calculates the cost of extracting the values. 400 int getGatherCost(ArrayRef<Value *> VL); 401 402 /// \brief Set the Builder insert point to one after the last instruction in 403 /// the bundle 404 void setInsertPointAfterBundle(ArrayRef<Value *> VL); 405 406 /// \returns a vector from a collection of scalars in \p VL. 407 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 408 409 /// \returns whether the VectorizableTree is fully vectoriable and will 410 /// be beneficial even the tree height is tiny. 411 bool isFullyVectorizableTinyTree(); 412 413 /// \reorder commutative operands in alt shuffle if they result in 414 /// vectorized code. 415 void reorderAltShuffleOperands(ArrayRef<Value *> VL, 416 SmallVectorImpl<Value *> &Left, 417 SmallVectorImpl<Value *> &Right); 418 /// \reorder commutative operands to get better probability of 419 /// generating vectorized code. 420 void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 421 SmallVectorImpl<Value *> &Left, 422 SmallVectorImpl<Value *> &Right); 423 struct TreeEntry { 424 TreeEntry() : Scalars(), VectorizedValue(nullptr), 425 NeedToGather(0) {} 426 427 /// \returns true if the scalars in VL are equal to this entry. 428 bool isSame(ArrayRef<Value *> VL) const { 429 assert(VL.size() == Scalars.size() && "Invalid size"); 430 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 431 } 432 433 /// A vector of scalars. 434 ValueList Scalars; 435 436 /// The Scalars are vectorized into this value. It is initialized to Null. 437 Value *VectorizedValue; 438 439 /// Do we need to gather this sequence ? 440 bool NeedToGather; 441 }; 442 443 /// Create a new VectorizableTree entry. 444 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized) { 445 VectorizableTree.push_back(TreeEntry()); 446 int idx = VectorizableTree.size() - 1; 447 TreeEntry *Last = &VectorizableTree[idx]; 448 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 449 Last->NeedToGather = !Vectorized; 450 if (Vectorized) { 451 for (int i = 0, e = VL.size(); i != e; ++i) { 452 assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!"); 453 ScalarToTreeEntry[VL[i]] = idx; 454 } 455 } else { 456 MustGather.insert(VL.begin(), VL.end()); 457 } 458 return Last; 459 } 460 461 /// -- Vectorization State -- 462 /// Holds all of the tree entries. 463 std::vector<TreeEntry> VectorizableTree; 464 465 /// Maps a specific scalar to its tree entry. 466 SmallDenseMap<Value*, int> ScalarToTreeEntry; 467 468 /// A list of scalars that we found that we need to keep as scalars. 469 ValueSet MustGather; 470 471 /// This POD struct describes one external user in the vectorized tree. 472 struct ExternalUser { 473 ExternalUser (Value *S, llvm::User *U, int L) : 474 Scalar(S), User(U), Lane(L){}; 475 // Which scalar in our function. 476 Value *Scalar; 477 // Which user that uses the scalar. 478 llvm::User *User; 479 // Which lane does the scalar belong to. 480 int Lane; 481 }; 482 typedef SmallVector<ExternalUser, 16> UserList; 483 484 /// Checks if two instructions may access the same memory. 485 /// 486 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 487 /// is invariant in the calling loop. 488 bool isAliased(const AliasAnalysis::Location &Loc1, Instruction *Inst1, 489 Instruction *Inst2) { 490 491 // First check if the result is already in the cache. 492 AliasCacheKey key = std::make_pair(Inst1, Inst2); 493 Optional<bool> &result = AliasCache[key]; 494 if (result.hasValue()) { 495 return result.getValue(); 496 } 497 AliasAnalysis::Location Loc2 = getLocation(Inst2, AA); 498 bool aliased = true; 499 if (Loc1.Ptr && Loc2.Ptr) { 500 // Do the alias check. 501 aliased = AA->alias(Loc1, Loc2); 502 } 503 // Store the result in the cache. 504 result = aliased; 505 return aliased; 506 } 507 508 typedef std::pair<Instruction *, Instruction *> AliasCacheKey; 509 510 /// Cache for alias results. 511 /// TODO: consider moving this to the AliasAnalysis itself. 512 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 513 514 /// Removes an instruction from its block and eventually deletes it. 515 /// It's like Instruction::eraseFromParent() except that the actual deletion 516 /// is delayed until BoUpSLP is destructed. 517 /// This is required to ensure that there are no incorrect collisions in the 518 /// AliasCache, which can happen if a new instruction is allocated at the 519 /// same address as a previously deleted instruction. 520 void eraseInstruction(Instruction *I) { 521 I->removeFromParent(); 522 I->dropAllReferences(); 523 DeletedInstructions.push_back(std::unique_ptr<Instruction>(I)); 524 } 525 526 /// Temporary store for deleted instructions. Instructions will be deleted 527 /// eventually when the BoUpSLP is destructed. 528 SmallVector<std::unique_ptr<Instruction>, 8> DeletedInstructions; 529 530 /// A list of values that need to extracted out of the tree. 531 /// This list holds pairs of (Internal Scalar : External User). 532 UserList ExternalUses; 533 534 /// Values used only by @llvm.assume calls. 535 SmallPtrSet<const Value *, 32> EphValues; 536 537 /// Holds all of the instructions that we gathered. 538 SetVector<Instruction *> GatherSeq; 539 /// A list of blocks that we are going to CSE. 540 SetVector<BasicBlock *> CSEBlocks; 541 542 /// Contains all scheduling relevant data for an instruction. 543 /// A ScheduleData either represents a single instruction or a member of an 544 /// instruction bundle (= a group of instructions which is combined into a 545 /// vector instruction). 546 struct ScheduleData { 547 548 // The initial value for the dependency counters. It means that the 549 // dependencies are not calculated yet. 550 enum { InvalidDeps = -1 }; 551 552 ScheduleData() 553 : Inst(nullptr), FirstInBundle(nullptr), NextInBundle(nullptr), 554 NextLoadStore(nullptr), SchedulingRegionID(0), SchedulingPriority(0), 555 Dependencies(InvalidDeps), UnscheduledDeps(InvalidDeps), 556 UnscheduledDepsInBundle(InvalidDeps), IsScheduled(false) {} 557 558 void init(int BlockSchedulingRegionID) { 559 FirstInBundle = this; 560 NextInBundle = nullptr; 561 NextLoadStore = nullptr; 562 IsScheduled = false; 563 SchedulingRegionID = BlockSchedulingRegionID; 564 UnscheduledDepsInBundle = UnscheduledDeps; 565 clearDependencies(); 566 } 567 568 /// Returns true if the dependency information has been calculated. 569 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 570 571 /// Returns true for single instructions and for bundle representatives 572 /// (= the head of a bundle). 573 bool isSchedulingEntity() const { return FirstInBundle == this; } 574 575 /// Returns true if it represents an instruction bundle and not only a 576 /// single instruction. 577 bool isPartOfBundle() const { 578 return NextInBundle != nullptr || FirstInBundle != this; 579 } 580 581 /// Returns true if it is ready for scheduling, i.e. it has no more 582 /// unscheduled depending instructions/bundles. 583 bool isReady() const { 584 assert(isSchedulingEntity() && 585 "can't consider non-scheduling entity for ready list"); 586 return UnscheduledDepsInBundle == 0 && !IsScheduled; 587 } 588 589 /// Modifies the number of unscheduled dependencies, also updating it for 590 /// the whole bundle. 591 int incrementUnscheduledDeps(int Incr) { 592 UnscheduledDeps += Incr; 593 return FirstInBundle->UnscheduledDepsInBundle += Incr; 594 } 595 596 /// Sets the number of unscheduled dependencies to the number of 597 /// dependencies. 598 void resetUnscheduledDeps() { 599 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 600 } 601 602 /// Clears all dependency information. 603 void clearDependencies() { 604 Dependencies = InvalidDeps; 605 resetUnscheduledDeps(); 606 MemoryDependencies.clear(); 607 } 608 609 void dump(raw_ostream &os) const { 610 if (!isSchedulingEntity()) { 611 os << "/ " << *Inst; 612 } else if (NextInBundle) { 613 os << '[' << *Inst; 614 ScheduleData *SD = NextInBundle; 615 while (SD) { 616 os << ';' << *SD->Inst; 617 SD = SD->NextInBundle; 618 } 619 os << ']'; 620 } else { 621 os << *Inst; 622 } 623 } 624 625 Instruction *Inst; 626 627 /// Points to the head in an instruction bundle (and always to this for 628 /// single instructions). 629 ScheduleData *FirstInBundle; 630 631 /// Single linked list of all instructions in a bundle. Null if it is a 632 /// single instruction. 633 ScheduleData *NextInBundle; 634 635 /// Single linked list of all memory instructions (e.g. load, store, call) 636 /// in the block - until the end of the scheduling region. 637 ScheduleData *NextLoadStore; 638 639 /// The dependent memory instructions. 640 /// This list is derived on demand in calculateDependencies(). 641 SmallVector<ScheduleData *, 4> MemoryDependencies; 642 643 /// This ScheduleData is in the current scheduling region if this matches 644 /// the current SchedulingRegionID of BlockScheduling. 645 int SchedulingRegionID; 646 647 /// Used for getting a "good" final ordering of instructions. 648 int SchedulingPriority; 649 650 /// The number of dependencies. Constitutes of the number of users of the 651 /// instruction plus the number of dependent memory instructions (if any). 652 /// This value is calculated on demand. 653 /// If InvalidDeps, the number of dependencies is not calculated yet. 654 /// 655 int Dependencies; 656 657 /// The number of dependencies minus the number of dependencies of scheduled 658 /// instructions. As soon as this is zero, the instruction/bundle gets ready 659 /// for scheduling. 660 /// Note that this is negative as long as Dependencies is not calculated. 661 int UnscheduledDeps; 662 663 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 664 /// single instructions. 665 int UnscheduledDepsInBundle; 666 667 /// True if this instruction is scheduled (or considered as scheduled in the 668 /// dry-run). 669 bool IsScheduled; 670 }; 671 672 #ifndef NDEBUG 673 friend raw_ostream &operator<<(raw_ostream &os, 674 const BoUpSLP::ScheduleData &SD); 675 #endif 676 677 /// Contains all scheduling data for a basic block. 678 /// 679 struct BlockScheduling { 680 681 BlockScheduling(BasicBlock *BB) 682 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize), 683 ScheduleStart(nullptr), ScheduleEnd(nullptr), 684 FirstLoadStoreInRegion(nullptr), LastLoadStoreInRegion(nullptr), 685 // Make sure that the initial SchedulingRegionID is greater than the 686 // initial SchedulingRegionID in ScheduleData (which is 0). 687 SchedulingRegionID(1) {} 688 689 void clear() { 690 ReadyInsts.clear(); 691 ScheduleStart = nullptr; 692 ScheduleEnd = nullptr; 693 FirstLoadStoreInRegion = nullptr; 694 LastLoadStoreInRegion = nullptr; 695 696 // Make a new scheduling region, i.e. all existing ScheduleData is not 697 // in the new region yet. 698 ++SchedulingRegionID; 699 } 700 701 ScheduleData *getScheduleData(Value *V) { 702 ScheduleData *SD = ScheduleDataMap[V]; 703 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 704 return SD; 705 return nullptr; 706 } 707 708 bool isInSchedulingRegion(ScheduleData *SD) { 709 return SD->SchedulingRegionID == SchedulingRegionID; 710 } 711 712 /// Marks an instruction as scheduled and puts all dependent ready 713 /// instructions into the ready-list. 714 template <typename ReadyListType> 715 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 716 SD->IsScheduled = true; 717 DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 718 719 ScheduleData *BundleMember = SD; 720 while (BundleMember) { 721 // Handle the def-use chain dependencies. 722 for (Use &U : BundleMember->Inst->operands()) { 723 ScheduleData *OpDef = getScheduleData(U.get()); 724 if (OpDef && OpDef->hasValidDependencies() && 725 OpDef->incrementUnscheduledDeps(-1) == 0) { 726 // There are no more unscheduled dependencies after decrementing, 727 // so we can put the dependent instruction into the ready list. 728 ScheduleData *DepBundle = OpDef->FirstInBundle; 729 assert(!DepBundle->IsScheduled && 730 "already scheduled bundle gets ready"); 731 ReadyList.insert(DepBundle); 732 DEBUG(dbgs() << "SLP: gets ready (def): " << *DepBundle << "\n"); 733 } 734 } 735 // Handle the memory dependencies. 736 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 737 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 738 // There are no more unscheduled dependencies after decrementing, 739 // so we can put the dependent instruction into the ready list. 740 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 741 assert(!DepBundle->IsScheduled && 742 "already scheduled bundle gets ready"); 743 ReadyList.insert(DepBundle); 744 DEBUG(dbgs() << "SLP: gets ready (mem): " << *DepBundle << "\n"); 745 } 746 } 747 BundleMember = BundleMember->NextInBundle; 748 } 749 } 750 751 /// Put all instructions into the ReadyList which are ready for scheduling. 752 template <typename ReadyListType> 753 void initialFillReadyList(ReadyListType &ReadyList) { 754 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 755 ScheduleData *SD = getScheduleData(I); 756 if (SD->isSchedulingEntity() && SD->isReady()) { 757 ReadyList.insert(SD); 758 DEBUG(dbgs() << "SLP: initially in ready list: " << *I << "\n"); 759 } 760 } 761 } 762 763 /// Checks if a bundle of instructions can be scheduled, i.e. has no 764 /// cyclic dependencies. This is only a dry-run, no instructions are 765 /// actually moved at this stage. 766 bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP); 767 768 /// Un-bundles a group of instructions. 769 void cancelScheduling(ArrayRef<Value *> VL); 770 771 /// Extends the scheduling region so that V is inside the region. 772 void extendSchedulingRegion(Value *V); 773 774 /// Initialize the ScheduleData structures for new instructions in the 775 /// scheduling region. 776 void initScheduleData(Instruction *FromI, Instruction *ToI, 777 ScheduleData *PrevLoadStore, 778 ScheduleData *NextLoadStore); 779 780 /// Updates the dependency information of a bundle and of all instructions/ 781 /// bundles which depend on the original bundle. 782 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 783 BoUpSLP *SLP); 784 785 /// Sets all instruction in the scheduling region to un-scheduled. 786 void resetSchedule(); 787 788 BasicBlock *BB; 789 790 /// Simple memory allocation for ScheduleData. 791 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 792 793 /// The size of a ScheduleData array in ScheduleDataChunks. 794 int ChunkSize; 795 796 /// The allocator position in the current chunk, which is the last entry 797 /// of ScheduleDataChunks. 798 int ChunkPos; 799 800 /// Attaches ScheduleData to Instruction. 801 /// Note that the mapping survives during all vectorization iterations, i.e. 802 /// ScheduleData structures are recycled. 803 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 804 805 struct ReadyList : SmallVector<ScheduleData *, 8> { 806 void insert(ScheduleData *SD) { push_back(SD); } 807 }; 808 809 /// The ready-list for scheduling (only used for the dry-run). 810 ReadyList ReadyInsts; 811 812 /// The first instruction of the scheduling region. 813 Instruction *ScheduleStart; 814 815 /// The first instruction _after_ the scheduling region. 816 Instruction *ScheduleEnd; 817 818 /// The first memory accessing instruction in the scheduling region 819 /// (can be null). 820 ScheduleData *FirstLoadStoreInRegion; 821 822 /// The last memory accessing instruction in the scheduling region 823 /// (can be null). 824 ScheduleData *LastLoadStoreInRegion; 825 826 /// The ID of the scheduling region. For a new vectorization iteration this 827 /// is incremented which "removes" all ScheduleData from the region. 828 int SchedulingRegionID; 829 }; 830 831 /// Attaches the BlockScheduling structures to basic blocks. 832 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 833 834 /// Performs the "real" scheduling. Done before vectorization is actually 835 /// performed in a basic block. 836 void scheduleBlock(BlockScheduling *BS); 837 838 /// List of users to ignore during scheduling and that don't need extracting. 839 ArrayRef<Value *> UserIgnoreList; 840 841 // Number of load-bundles, which contain consecutive loads. 842 int NumLoadsWantToKeepOrder; 843 844 // Number of load-bundles of size 2, which are consecutive loads if reversed. 845 int NumLoadsWantToChangeOrder; 846 847 // Analysis and block reference. 848 Function *F; 849 ScalarEvolution *SE; 850 const DataLayout *DL; 851 TargetTransformInfo *TTI; 852 TargetLibraryInfo *TLI; 853 AliasAnalysis *AA; 854 LoopInfo *LI; 855 DominatorTree *DT; 856 /// Instruction builder to construct the vectorized tree. 857 IRBuilder<> Builder; 858 }; 859 860 #ifndef NDEBUG 861 raw_ostream &operator<<(raw_ostream &os, const BoUpSLP::ScheduleData &SD) { 862 SD.dump(os); 863 return os; 864 } 865 #endif 866 867 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 868 ArrayRef<Value *> UserIgnoreLst) { 869 deleteTree(); 870 UserIgnoreList = UserIgnoreLst; 871 if (!getSameType(Roots)) 872 return; 873 buildTree_rec(Roots, 0); 874 875 // Collect the values that we need to extract from the tree. 876 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 877 TreeEntry *Entry = &VectorizableTree[EIdx]; 878 879 // For each lane: 880 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 881 Value *Scalar = Entry->Scalars[Lane]; 882 883 // No need to handle users of gathered values. 884 if (Entry->NeedToGather) 885 continue; 886 887 for (User *U : Scalar->users()) { 888 DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 889 890 Instruction *UserInst = dyn_cast<Instruction>(U); 891 if (!UserInst) 892 continue; 893 894 // Skip in-tree scalars that become vectors 895 if (ScalarToTreeEntry.count(U)) { 896 int Idx = ScalarToTreeEntry[U]; 897 TreeEntry *UseEntry = &VectorizableTree[Idx]; 898 Value *UseScalar = UseEntry->Scalars[0]; 899 // Some in-tree scalars will remain as scalar in vectorized 900 // instructions. If that is the case, the one in Lane 0 will 901 // be used. 902 if (UseScalar != U || 903 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 904 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 905 << ".\n"); 906 assert(!VectorizableTree[Idx].NeedToGather && "Bad state"); 907 continue; 908 } 909 } 910 911 // Ignore users in the user ignore list. 912 if (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), UserInst) != 913 UserIgnoreList.end()) 914 continue; 915 916 DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " << 917 Lane << " from " << *Scalar << ".\n"); 918 ExternalUses.push_back(ExternalUser(Scalar, U, Lane)); 919 } 920 } 921 } 922 } 923 924 925 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { 926 bool SameTy = getSameType(VL); (void)SameTy; 927 bool isAltShuffle = false; 928 assert(SameTy && "Invalid types!"); 929 930 if (Depth == RecursionMaxDepth) { 931 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 932 newTreeEntry(VL, false); 933 return; 934 } 935 936 // Don't handle vectors. 937 if (VL[0]->getType()->isVectorTy()) { 938 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 939 newTreeEntry(VL, false); 940 return; 941 } 942 943 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 944 if (SI->getValueOperand()->getType()->isVectorTy()) { 945 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 946 newTreeEntry(VL, false); 947 return; 948 } 949 unsigned Opcode = getSameOpcode(VL); 950 951 // Check that this shuffle vector refers to the alternate 952 // sequence of opcodes. 953 if (Opcode == Instruction::ShuffleVector) { 954 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 955 unsigned Op = I0->getOpcode(); 956 if (Op != Instruction::ShuffleVector) 957 isAltShuffle = true; 958 } 959 960 // If all of the operands are identical or constant we have a simple solution. 961 if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || !Opcode) { 962 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 963 newTreeEntry(VL, false); 964 return; 965 } 966 967 // We now know that this is a vector of instructions of the same type from 968 // the same block. 969 970 // Don't vectorize ephemeral values. 971 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 972 if (EphValues.count(VL[i])) { 973 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 974 ") is ephemeral.\n"); 975 newTreeEntry(VL, false); 976 return; 977 } 978 } 979 980 // Check if this is a duplicate of another entry. 981 if (ScalarToTreeEntry.count(VL[0])) { 982 int Idx = ScalarToTreeEntry[VL[0]]; 983 TreeEntry *E = &VectorizableTree[Idx]; 984 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 985 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 986 if (E->Scalars[i] != VL[i]) { 987 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 988 newTreeEntry(VL, false); 989 return; 990 } 991 } 992 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n"); 993 return; 994 } 995 996 // Check that none of the instructions in the bundle are already in the tree. 997 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 998 if (ScalarToTreeEntry.count(VL[i])) { 999 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1000 ") is already in tree.\n"); 1001 newTreeEntry(VL, false); 1002 return; 1003 } 1004 } 1005 1006 // If any of the scalars is marked as a value that needs to stay scalar then 1007 // we need to gather the scalars. 1008 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1009 if (MustGather.count(VL[i])) { 1010 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 1011 newTreeEntry(VL, false); 1012 return; 1013 } 1014 } 1015 1016 // Check that all of the users of the scalars that we want to vectorize are 1017 // schedulable. 1018 Instruction *VL0 = cast<Instruction>(VL[0]); 1019 BasicBlock *BB = cast<Instruction>(VL0)->getParent(); 1020 1021 if (!DT->isReachableFromEntry(BB)) { 1022 // Don't go into unreachable blocks. They may contain instructions with 1023 // dependency cycles which confuse the final scheduling. 1024 DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 1025 newTreeEntry(VL, false); 1026 return; 1027 } 1028 1029 // Check that every instructions appears once in this bundle. 1030 for (unsigned i = 0, e = VL.size(); i < e; ++i) 1031 for (unsigned j = i+1; j < e; ++j) 1032 if (VL[i] == VL[j]) { 1033 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 1034 newTreeEntry(VL, false); 1035 return; 1036 } 1037 1038 auto &BSRef = BlocksSchedules[BB]; 1039 if (!BSRef) { 1040 BSRef = llvm::make_unique<BlockScheduling>(BB); 1041 } 1042 BlockScheduling &BS = *BSRef.get(); 1043 1044 if (!BS.tryScheduleBundle(VL, this)) { 1045 DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 1046 BS.cancelScheduling(VL); 1047 newTreeEntry(VL, false); 1048 return; 1049 } 1050 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 1051 1052 switch (Opcode) { 1053 case Instruction::PHI: { 1054 PHINode *PH = dyn_cast<PHINode>(VL0); 1055 1056 // Check for terminator values (e.g. invoke). 1057 for (unsigned j = 0; j < VL.size(); ++j) 1058 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1059 TerminatorInst *Term = dyn_cast<TerminatorInst>( 1060 cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i))); 1061 if (Term) { 1062 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); 1063 BS.cancelScheduling(VL); 1064 newTreeEntry(VL, false); 1065 return; 1066 } 1067 } 1068 1069 newTreeEntry(VL, true); 1070 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 1071 1072 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1073 ValueList Operands; 1074 // Prepare the operand vector. 1075 for (unsigned j = 0; j < VL.size(); ++j) 1076 Operands.push_back(cast<PHINode>(VL[j])->getIncomingValueForBlock( 1077 PH->getIncomingBlock(i))); 1078 1079 buildTree_rec(Operands, Depth + 1); 1080 } 1081 return; 1082 } 1083 case Instruction::ExtractElement: { 1084 bool Reuse = CanReuseExtract(VL); 1085 if (Reuse) { 1086 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 1087 } else { 1088 BS.cancelScheduling(VL); 1089 } 1090 newTreeEntry(VL, Reuse); 1091 return; 1092 } 1093 case Instruction::Load: { 1094 // Check if the loads are consecutive or of we need to swizzle them. 1095 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 1096 LoadInst *L = cast<LoadInst>(VL[i]); 1097 if (!L->isSimple()) { 1098 BS.cancelScheduling(VL); 1099 newTreeEntry(VL, false); 1100 DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 1101 return; 1102 } 1103 if (!isConsecutiveAccess(VL[i], VL[i + 1])) { 1104 if (VL.size() == 2 && isConsecutiveAccess(VL[1], VL[0])) { 1105 ++NumLoadsWantToChangeOrder; 1106 } 1107 BS.cancelScheduling(VL); 1108 newTreeEntry(VL, false); 1109 DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 1110 return; 1111 } 1112 } 1113 ++NumLoadsWantToKeepOrder; 1114 newTreeEntry(VL, true); 1115 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 1116 return; 1117 } 1118 case Instruction::ZExt: 1119 case Instruction::SExt: 1120 case Instruction::FPToUI: 1121 case Instruction::FPToSI: 1122 case Instruction::FPExt: 1123 case Instruction::PtrToInt: 1124 case Instruction::IntToPtr: 1125 case Instruction::SIToFP: 1126 case Instruction::UIToFP: 1127 case Instruction::Trunc: 1128 case Instruction::FPTrunc: 1129 case Instruction::BitCast: { 1130 Type *SrcTy = VL0->getOperand(0)->getType(); 1131 for (unsigned i = 0; i < VL.size(); ++i) { 1132 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 1133 if (Ty != SrcTy || Ty->isAggregateType() || Ty->isVectorTy()) { 1134 BS.cancelScheduling(VL); 1135 newTreeEntry(VL, false); 1136 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 1137 return; 1138 } 1139 } 1140 newTreeEntry(VL, true); 1141 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 1142 1143 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1144 ValueList Operands; 1145 // Prepare the operand vector. 1146 for (unsigned j = 0; j < VL.size(); ++j) 1147 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1148 1149 buildTree_rec(Operands, Depth+1); 1150 } 1151 return; 1152 } 1153 case Instruction::ICmp: 1154 case Instruction::FCmp: { 1155 // Check that all of the compares have the same predicate. 1156 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 1157 Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType(); 1158 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 1159 CmpInst *Cmp = cast<CmpInst>(VL[i]); 1160 if (Cmp->getPredicate() != P0 || 1161 Cmp->getOperand(0)->getType() != ComparedTy) { 1162 BS.cancelScheduling(VL); 1163 newTreeEntry(VL, false); 1164 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 1165 return; 1166 } 1167 } 1168 1169 newTreeEntry(VL, true); 1170 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 1171 1172 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1173 ValueList Operands; 1174 // Prepare the operand vector. 1175 for (unsigned j = 0; j < VL.size(); ++j) 1176 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1177 1178 buildTree_rec(Operands, Depth+1); 1179 } 1180 return; 1181 } 1182 case Instruction::Select: 1183 case Instruction::Add: 1184 case Instruction::FAdd: 1185 case Instruction::Sub: 1186 case Instruction::FSub: 1187 case Instruction::Mul: 1188 case Instruction::FMul: 1189 case Instruction::UDiv: 1190 case Instruction::SDiv: 1191 case Instruction::FDiv: 1192 case Instruction::URem: 1193 case Instruction::SRem: 1194 case Instruction::FRem: 1195 case Instruction::Shl: 1196 case Instruction::LShr: 1197 case Instruction::AShr: 1198 case Instruction::And: 1199 case Instruction::Or: 1200 case Instruction::Xor: { 1201 newTreeEntry(VL, true); 1202 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 1203 1204 // Sort operands of the instructions so that each side is more likely to 1205 // have the same opcode. 1206 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 1207 ValueList Left, Right; 1208 reorderInputsAccordingToOpcode(VL, Left, Right); 1209 buildTree_rec(Left, Depth + 1); 1210 buildTree_rec(Right, Depth + 1); 1211 return; 1212 } 1213 1214 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1215 ValueList Operands; 1216 // Prepare the operand vector. 1217 for (unsigned j = 0; j < VL.size(); ++j) 1218 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1219 1220 buildTree_rec(Operands, Depth+1); 1221 } 1222 return; 1223 } 1224 case Instruction::GetElementPtr: { 1225 // We don't combine GEPs with complicated (nested) indexing. 1226 for (unsigned j = 0; j < VL.size(); ++j) { 1227 if (cast<Instruction>(VL[j])->getNumOperands() != 2) { 1228 DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 1229 BS.cancelScheduling(VL); 1230 newTreeEntry(VL, false); 1231 return; 1232 } 1233 } 1234 1235 // We can't combine several GEPs into one vector if they operate on 1236 // different types. 1237 Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType(); 1238 for (unsigned j = 0; j < VL.size(); ++j) { 1239 Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType(); 1240 if (Ty0 != CurTy) { 1241 DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); 1242 BS.cancelScheduling(VL); 1243 newTreeEntry(VL, false); 1244 return; 1245 } 1246 } 1247 1248 // We don't combine GEPs with non-constant indexes. 1249 for (unsigned j = 0; j < VL.size(); ++j) { 1250 auto Op = cast<Instruction>(VL[j])->getOperand(1); 1251 if (!isa<ConstantInt>(Op)) { 1252 DEBUG( 1253 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 1254 BS.cancelScheduling(VL); 1255 newTreeEntry(VL, false); 1256 return; 1257 } 1258 } 1259 1260 newTreeEntry(VL, true); 1261 DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 1262 for (unsigned i = 0, e = 2; i < e; ++i) { 1263 ValueList Operands; 1264 // Prepare the operand vector. 1265 for (unsigned j = 0; j < VL.size(); ++j) 1266 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1267 1268 buildTree_rec(Operands, Depth + 1); 1269 } 1270 return; 1271 } 1272 case Instruction::Store: { 1273 // Check if the stores are consecutive or of we need to swizzle them. 1274 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 1275 if (!isConsecutiveAccess(VL[i], VL[i + 1])) { 1276 BS.cancelScheduling(VL); 1277 newTreeEntry(VL, false); 1278 DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 1279 return; 1280 } 1281 1282 newTreeEntry(VL, true); 1283 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 1284 1285 ValueList Operands; 1286 for (unsigned j = 0; j < VL.size(); ++j) 1287 Operands.push_back(cast<Instruction>(VL[j])->getOperand(0)); 1288 1289 buildTree_rec(Operands, Depth + 1); 1290 return; 1291 } 1292 case Instruction::Call: { 1293 // Check if the calls are all to the same vectorizable intrinsic. 1294 CallInst *CI = cast<CallInst>(VL[0]); 1295 // Check if this is an Intrinsic call or something that can be 1296 // represented by an intrinsic call 1297 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 1298 if (!isTriviallyVectorizable(ID)) { 1299 BS.cancelScheduling(VL); 1300 newTreeEntry(VL, false); 1301 DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 1302 return; 1303 } 1304 Function *Int = CI->getCalledFunction(); 1305 Value *A1I = nullptr; 1306 if (hasVectorInstrinsicScalarOpd(ID, 1)) 1307 A1I = CI->getArgOperand(1); 1308 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 1309 CallInst *CI2 = dyn_cast<CallInst>(VL[i]); 1310 if (!CI2 || CI2->getCalledFunction() != Int || 1311 getIntrinsicIDForCall(CI2, TLI) != ID) { 1312 BS.cancelScheduling(VL); 1313 newTreeEntry(VL, false); 1314 DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] 1315 << "\n"); 1316 return; 1317 } 1318 // ctlz,cttz and powi are special intrinsics whose second argument 1319 // should be same in order for them to be vectorized. 1320 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 1321 Value *A1J = CI2->getArgOperand(1); 1322 if (A1I != A1J) { 1323 BS.cancelScheduling(VL); 1324 newTreeEntry(VL, false); 1325 DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 1326 << " argument "<< A1I<<"!=" << A1J 1327 << "\n"); 1328 return; 1329 } 1330 } 1331 } 1332 1333 newTreeEntry(VL, true); 1334 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 1335 ValueList Operands; 1336 // Prepare the operand vector. 1337 for (unsigned j = 0; j < VL.size(); ++j) { 1338 CallInst *CI2 = dyn_cast<CallInst>(VL[j]); 1339 Operands.push_back(CI2->getArgOperand(i)); 1340 } 1341 buildTree_rec(Operands, Depth + 1); 1342 } 1343 return; 1344 } 1345 case Instruction::ShuffleVector: { 1346 // If this is not an alternate sequence of opcode like add-sub 1347 // then do not vectorize this instruction. 1348 if (!isAltShuffle) { 1349 BS.cancelScheduling(VL); 1350 newTreeEntry(VL, false); 1351 DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 1352 return; 1353 } 1354 newTreeEntry(VL, true); 1355 DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 1356 1357 // Reorder operands if reordering would enable vectorization. 1358 if (isa<BinaryOperator>(VL0)) { 1359 ValueList Left, Right; 1360 reorderAltShuffleOperands(VL, Left, Right); 1361 buildTree_rec(Left, Depth + 1); 1362 buildTree_rec(Right, Depth + 1); 1363 return; 1364 } 1365 1366 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1367 ValueList Operands; 1368 // Prepare the operand vector. 1369 for (unsigned j = 0; j < VL.size(); ++j) 1370 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1371 1372 buildTree_rec(Operands, Depth + 1); 1373 } 1374 return; 1375 } 1376 default: 1377 BS.cancelScheduling(VL); 1378 newTreeEntry(VL, false); 1379 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 1380 return; 1381 } 1382 } 1383 1384 int BoUpSLP::getEntryCost(TreeEntry *E) { 1385 ArrayRef<Value*> VL = E->Scalars; 1386 1387 Type *ScalarTy = VL[0]->getType(); 1388 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1389 ScalarTy = SI->getValueOperand()->getType(); 1390 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1391 1392 if (E->NeedToGather) { 1393 if (allConstant(VL)) 1394 return 0; 1395 if (isSplat(VL)) { 1396 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 1397 } 1398 return getGatherCost(E->Scalars); 1399 } 1400 unsigned Opcode = getSameOpcode(VL); 1401 assert(Opcode && getSameType(VL) && getSameBlock(VL) && "Invalid VL"); 1402 Instruction *VL0 = cast<Instruction>(VL[0]); 1403 switch (Opcode) { 1404 case Instruction::PHI: { 1405 return 0; 1406 } 1407 case Instruction::ExtractElement: { 1408 if (CanReuseExtract(VL)) { 1409 int DeadCost = 0; 1410 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 1411 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 1412 if (E->hasOneUse()) 1413 // Take credit for instruction that will become dead. 1414 DeadCost += 1415 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); 1416 } 1417 return -DeadCost; 1418 } 1419 return getGatherCost(VecTy); 1420 } 1421 case Instruction::ZExt: 1422 case Instruction::SExt: 1423 case Instruction::FPToUI: 1424 case Instruction::FPToSI: 1425 case Instruction::FPExt: 1426 case Instruction::PtrToInt: 1427 case Instruction::IntToPtr: 1428 case Instruction::SIToFP: 1429 case Instruction::UIToFP: 1430 case Instruction::Trunc: 1431 case Instruction::FPTrunc: 1432 case Instruction::BitCast: { 1433 Type *SrcTy = VL0->getOperand(0)->getType(); 1434 1435 // Calculate the cost of this instruction. 1436 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 1437 VL0->getType(), SrcTy); 1438 1439 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 1440 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy); 1441 return VecCost - ScalarCost; 1442 } 1443 case Instruction::FCmp: 1444 case Instruction::ICmp: 1445 case Instruction::Select: 1446 case Instruction::Add: 1447 case Instruction::FAdd: 1448 case Instruction::Sub: 1449 case Instruction::FSub: 1450 case Instruction::Mul: 1451 case Instruction::FMul: 1452 case Instruction::UDiv: 1453 case Instruction::SDiv: 1454 case Instruction::FDiv: 1455 case Instruction::URem: 1456 case Instruction::SRem: 1457 case Instruction::FRem: 1458 case Instruction::Shl: 1459 case Instruction::LShr: 1460 case Instruction::AShr: 1461 case Instruction::And: 1462 case Instruction::Or: 1463 case Instruction::Xor: { 1464 // Calculate the cost of this instruction. 1465 int ScalarCost = 0; 1466 int VecCost = 0; 1467 if (Opcode == Instruction::FCmp || Opcode == Instruction::ICmp || 1468 Opcode == Instruction::Select) { 1469 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 1470 ScalarCost = VecTy->getNumElements() * 1471 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty()); 1472 VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy); 1473 } else { 1474 // Certain instructions can be cheaper to vectorize if they have a 1475 // constant second vector operand. 1476 TargetTransformInfo::OperandValueKind Op1VK = 1477 TargetTransformInfo::OK_AnyValue; 1478 TargetTransformInfo::OperandValueKind Op2VK = 1479 TargetTransformInfo::OK_UniformConstantValue; 1480 TargetTransformInfo::OperandValueProperties Op1VP = 1481 TargetTransformInfo::OP_None; 1482 TargetTransformInfo::OperandValueProperties Op2VP = 1483 TargetTransformInfo::OP_None; 1484 1485 // If all operands are exactly the same ConstantInt then set the 1486 // operand kind to OK_UniformConstantValue. 1487 // If instead not all operands are constants, then set the operand kind 1488 // to OK_AnyValue. If all operands are constants but not the same, 1489 // then set the operand kind to OK_NonUniformConstantValue. 1490 ConstantInt *CInt = nullptr; 1491 for (unsigned i = 0; i < VL.size(); ++i) { 1492 const Instruction *I = cast<Instruction>(VL[i]); 1493 if (!isa<ConstantInt>(I->getOperand(1))) { 1494 Op2VK = TargetTransformInfo::OK_AnyValue; 1495 break; 1496 } 1497 if (i == 0) { 1498 CInt = cast<ConstantInt>(I->getOperand(1)); 1499 continue; 1500 } 1501 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && 1502 CInt != cast<ConstantInt>(I->getOperand(1))) 1503 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 1504 } 1505 // FIXME: Currently cost of model modification for division by 1506 // power of 2 is handled only for X86. Add support for other targets. 1507 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt && 1508 CInt->getValue().isPowerOf2()) 1509 Op2VP = TargetTransformInfo::OP_PowerOf2; 1510 1511 ScalarCost = VecTy->getNumElements() * 1512 TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK, 1513 Op1VP, Op2VP); 1514 VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK, 1515 Op1VP, Op2VP); 1516 } 1517 return VecCost - ScalarCost; 1518 } 1519 case Instruction::GetElementPtr: { 1520 TargetTransformInfo::OperandValueKind Op1VK = 1521 TargetTransformInfo::OK_AnyValue; 1522 TargetTransformInfo::OperandValueKind Op2VK = 1523 TargetTransformInfo::OK_UniformConstantValue; 1524 1525 int ScalarCost = 1526 VecTy->getNumElements() * 1527 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK); 1528 int VecCost = 1529 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK); 1530 1531 return VecCost - ScalarCost; 1532 } 1533 case Instruction::Load: { 1534 // Cost of wide load - cost of scalar loads. 1535 int ScalarLdCost = VecTy->getNumElements() * 1536 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); 1537 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, 1, 0); 1538 return VecLdCost - ScalarLdCost; 1539 } 1540 case Instruction::Store: { 1541 // We know that we can merge the stores. Calculate the cost. 1542 int ScalarStCost = VecTy->getNumElements() * 1543 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); 1544 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, VecTy, 1, 0); 1545 return VecStCost - ScalarStCost; 1546 } 1547 case Instruction::Call: { 1548 CallInst *CI = cast<CallInst>(VL0); 1549 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 1550 1551 // Calculate the cost of the scalar and vector calls. 1552 SmallVector<Type*, 4> ScalarTys, VecTys; 1553 for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) { 1554 ScalarTys.push_back(CI->getArgOperand(op)->getType()); 1555 VecTys.push_back(VectorType::get(CI->getArgOperand(op)->getType(), 1556 VecTy->getNumElements())); 1557 } 1558 1559 int ScalarCallCost = VecTy->getNumElements() * 1560 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys); 1561 1562 int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys); 1563 1564 DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost 1565 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 1566 << " for " << *CI << "\n"); 1567 1568 return VecCallCost - ScalarCallCost; 1569 } 1570 case Instruction::ShuffleVector: { 1571 TargetTransformInfo::OperandValueKind Op1VK = 1572 TargetTransformInfo::OK_AnyValue; 1573 TargetTransformInfo::OperandValueKind Op2VK = 1574 TargetTransformInfo::OK_AnyValue; 1575 int ScalarCost = 0; 1576 int VecCost = 0; 1577 for (unsigned i = 0; i < VL.size(); ++i) { 1578 Instruction *I = cast<Instruction>(VL[i]); 1579 if (!I) 1580 break; 1581 ScalarCost += 1582 TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK); 1583 } 1584 // VecCost is equal to sum of the cost of creating 2 vectors 1585 // and the cost of creating shuffle. 1586 Instruction *I0 = cast<Instruction>(VL[0]); 1587 VecCost = 1588 TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK); 1589 Instruction *I1 = cast<Instruction>(VL[1]); 1590 VecCost += 1591 TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK); 1592 VecCost += 1593 TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0); 1594 return VecCost - ScalarCost; 1595 } 1596 default: 1597 llvm_unreachable("Unknown instruction"); 1598 } 1599 } 1600 1601 bool BoUpSLP::isFullyVectorizableTinyTree() { 1602 DEBUG(dbgs() << "SLP: Check whether the tree with height " << 1603 VectorizableTree.size() << " is fully vectorizable .\n"); 1604 1605 // We only handle trees of height 2. 1606 if (VectorizableTree.size() != 2) 1607 return false; 1608 1609 // Handle splat stores. 1610 if (!VectorizableTree[0].NeedToGather && isSplat(VectorizableTree[1].Scalars)) 1611 return true; 1612 1613 // Gathering cost would be too much for tiny trees. 1614 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 1615 return false; 1616 1617 return true; 1618 } 1619 1620 int BoUpSLP::getSpillCost() { 1621 // Walk from the bottom of the tree to the top, tracking which values are 1622 // live. When we see a call instruction that is not part of our tree, 1623 // query TTI to see if there is a cost to keeping values live over it 1624 // (for example, if spills and fills are required). 1625 unsigned BundleWidth = VectorizableTree.front().Scalars.size(); 1626 int Cost = 0; 1627 1628 SmallPtrSet<Instruction*, 4> LiveValues; 1629 Instruction *PrevInst = nullptr; 1630 1631 for (unsigned N = 0; N < VectorizableTree.size(); ++N) { 1632 Instruction *Inst = dyn_cast<Instruction>(VectorizableTree[N].Scalars[0]); 1633 if (!Inst) 1634 continue; 1635 1636 if (!PrevInst) { 1637 PrevInst = Inst; 1638 continue; 1639 } 1640 1641 DEBUG( 1642 dbgs() << "SLP: #LV: " << LiveValues.size(); 1643 for (auto *X : LiveValues) 1644 dbgs() << " " << X->getName(); 1645 dbgs() << ", Looking at "; 1646 Inst->dump(); 1647 ); 1648 1649 // Update LiveValues. 1650 LiveValues.erase(PrevInst); 1651 for (auto &J : PrevInst->operands()) { 1652 if (isa<Instruction>(&*J) && ScalarToTreeEntry.count(&*J)) 1653 LiveValues.insert(cast<Instruction>(&*J)); 1654 } 1655 1656 // Now find the sequence of instructions between PrevInst and Inst. 1657 BasicBlock::reverse_iterator InstIt(Inst), PrevInstIt(PrevInst); 1658 --PrevInstIt; 1659 while (InstIt != PrevInstIt) { 1660 if (PrevInstIt == PrevInst->getParent()->rend()) { 1661 PrevInstIt = Inst->getParent()->rbegin(); 1662 continue; 1663 } 1664 1665 if (isa<CallInst>(&*PrevInstIt) && &*PrevInstIt != PrevInst) { 1666 SmallVector<Type*, 4> V; 1667 for (auto *II : LiveValues) 1668 V.push_back(VectorType::get(II->getType(), BundleWidth)); 1669 Cost += TTI->getCostOfKeepingLiveOverCall(V); 1670 } 1671 1672 ++PrevInstIt; 1673 } 1674 1675 PrevInst = Inst; 1676 } 1677 1678 DEBUG(dbgs() << "SLP: SpillCost=" << Cost << "\n"); 1679 return Cost; 1680 } 1681 1682 int BoUpSLP::getTreeCost() { 1683 int Cost = 0; 1684 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 1685 VectorizableTree.size() << ".\n"); 1686 1687 // We only vectorize tiny trees if it is fully vectorizable. 1688 if (VectorizableTree.size() < 3 && !isFullyVectorizableTinyTree()) { 1689 if (VectorizableTree.empty()) { 1690 assert(!ExternalUses.size() && "We should not have any external users"); 1691 } 1692 return INT_MAX; 1693 } 1694 1695 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 1696 1697 for (unsigned i = 0, e = VectorizableTree.size(); i != e; ++i) { 1698 int C = getEntryCost(&VectorizableTree[i]); 1699 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 1700 << *VectorizableTree[i].Scalars[0] << " .\n"); 1701 Cost += C; 1702 } 1703 1704 SmallSet<Value *, 16> ExtractCostCalculated; 1705 int ExtractCost = 0; 1706 for (UserList::iterator I = ExternalUses.begin(), E = ExternalUses.end(); 1707 I != E; ++I) { 1708 // We only add extract cost once for the same scalar. 1709 if (!ExtractCostCalculated.insert(I->Scalar).second) 1710 continue; 1711 1712 // Uses by ephemeral values are free (because the ephemeral value will be 1713 // removed prior to code generation, and so the extraction will be 1714 // removed as well). 1715 if (EphValues.count(I->User)) 1716 continue; 1717 1718 VectorType *VecTy = VectorType::get(I->Scalar->getType(), BundleWidth); 1719 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 1720 I->Lane); 1721 } 1722 1723 Cost += getSpillCost(); 1724 1725 DEBUG(dbgs() << "SLP: Total Cost " << Cost + ExtractCost<< ".\n"); 1726 return Cost + ExtractCost; 1727 } 1728 1729 int BoUpSLP::getGatherCost(Type *Ty) { 1730 int Cost = 0; 1731 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 1732 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 1733 return Cost; 1734 } 1735 1736 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 1737 // Find the type of the operands in VL. 1738 Type *ScalarTy = VL[0]->getType(); 1739 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1740 ScalarTy = SI->getValueOperand()->getType(); 1741 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1742 // Find the cost of inserting/extracting values from the vector. 1743 return getGatherCost(VecTy); 1744 } 1745 1746 Value *BoUpSLP::getPointerOperand(Value *I) { 1747 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1748 return LI->getPointerOperand(); 1749 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1750 return SI->getPointerOperand(); 1751 return nullptr; 1752 } 1753 1754 unsigned BoUpSLP::getAddressSpaceOperand(Value *I) { 1755 if (LoadInst *L = dyn_cast<LoadInst>(I)) 1756 return L->getPointerAddressSpace(); 1757 if (StoreInst *S = dyn_cast<StoreInst>(I)) 1758 return S->getPointerAddressSpace(); 1759 return -1; 1760 } 1761 1762 bool BoUpSLP::isConsecutiveAccess(Value *A, Value *B) { 1763 Value *PtrA = getPointerOperand(A); 1764 Value *PtrB = getPointerOperand(B); 1765 unsigned ASA = getAddressSpaceOperand(A); 1766 unsigned ASB = getAddressSpaceOperand(B); 1767 1768 // Check that the address spaces match and that the pointers are valid. 1769 if (!PtrA || !PtrB || (ASA != ASB)) 1770 return false; 1771 1772 // Make sure that A and B are different pointers of the same type. 1773 if (PtrA == PtrB || PtrA->getType() != PtrB->getType()) 1774 return false; 1775 1776 unsigned PtrBitWidth = DL->getPointerSizeInBits(ASA); 1777 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1778 APInt Size(PtrBitWidth, DL->getTypeStoreSize(Ty)); 1779 1780 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); 1781 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetA); 1782 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetB); 1783 1784 APInt OffsetDelta = OffsetB - OffsetA; 1785 1786 // Check if they are based on the same pointer. That makes the offsets 1787 // sufficient. 1788 if (PtrA == PtrB) 1789 return OffsetDelta == Size; 1790 1791 // Compute the necessary base pointer delta to have the necessary final delta 1792 // equal to the size. 1793 APInt BaseDelta = Size - OffsetDelta; 1794 1795 // Otherwise compute the distance with SCEV between the base pointers. 1796 const SCEV *PtrSCEVA = SE->getSCEV(PtrA); 1797 const SCEV *PtrSCEVB = SE->getSCEV(PtrB); 1798 const SCEV *C = SE->getConstant(BaseDelta); 1799 const SCEV *X = SE->getAddExpr(PtrSCEVA, C); 1800 return X == PtrSCEVB; 1801 } 1802 1803 // Reorder commutative operations in alternate shuffle if the resulting vectors 1804 // are consecutive loads. This would allow us to vectorize the tree. 1805 // If we have something like- 1806 // load a[0] - load b[0] 1807 // load b[1] + load a[1] 1808 // load a[2] - load b[2] 1809 // load a[3] + load b[3] 1810 // Reordering the second load b[1] load a[1] would allow us to vectorize this 1811 // code. 1812 void BoUpSLP::reorderAltShuffleOperands(ArrayRef<Value *> VL, 1813 SmallVectorImpl<Value *> &Left, 1814 SmallVectorImpl<Value *> &Right) { 1815 1816 // Push left and right operands of binary operation into Left and Right 1817 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 1818 Left.push_back(cast<Instruction>(VL[i])->getOperand(0)); 1819 Right.push_back(cast<Instruction>(VL[i])->getOperand(1)); 1820 } 1821 1822 // Reorder if we have a commutative operation and consecutive access 1823 // are on either side of the alternate instructions. 1824 for (unsigned j = 0; j < VL.size() - 1; ++j) { 1825 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 1826 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 1827 Instruction *VL1 = cast<Instruction>(VL[j]); 1828 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 1829 if (isConsecutiveAccess(L, L1) && VL1->isCommutative()) { 1830 std::swap(Left[j], Right[j]); 1831 continue; 1832 } else if (isConsecutiveAccess(L, L1) && VL2->isCommutative()) { 1833 std::swap(Left[j + 1], Right[j + 1]); 1834 continue; 1835 } 1836 // else unchanged 1837 } 1838 } 1839 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 1840 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 1841 Instruction *VL1 = cast<Instruction>(VL[j]); 1842 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 1843 if (isConsecutiveAccess(L, L1) && VL1->isCommutative()) { 1844 std::swap(Left[j], Right[j]); 1845 continue; 1846 } else if (isConsecutiveAccess(L, L1) && VL2->isCommutative()) { 1847 std::swap(Left[j + 1], Right[j + 1]); 1848 continue; 1849 } 1850 // else unchanged 1851 } 1852 } 1853 } 1854 } 1855 1856 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 1857 SmallVectorImpl<Value *> &Left, 1858 SmallVectorImpl<Value *> &Right) { 1859 1860 SmallVector<Value *, 16> OrigLeft, OrigRight; 1861 1862 bool AllSameOpcodeLeft = true; 1863 bool AllSameOpcodeRight = true; 1864 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1865 Instruction *I = cast<Instruction>(VL[i]); 1866 Value *VLeft = I->getOperand(0); 1867 Value *VRight = I->getOperand(1); 1868 1869 OrigLeft.push_back(VLeft); 1870 OrigRight.push_back(VRight); 1871 1872 Instruction *ILeft = dyn_cast<Instruction>(VLeft); 1873 Instruction *IRight = dyn_cast<Instruction>(VRight); 1874 1875 // Check whether all operands on one side have the same opcode. In this case 1876 // we want to preserve the original order and not make things worse by 1877 // reordering. 1878 if (i && AllSameOpcodeLeft && ILeft) { 1879 if (Instruction *PLeft = dyn_cast<Instruction>(OrigLeft[i - 1])) { 1880 if (PLeft->getOpcode() != ILeft->getOpcode()) 1881 AllSameOpcodeLeft = false; 1882 } else 1883 AllSameOpcodeLeft = false; 1884 } 1885 if (i && AllSameOpcodeRight && IRight) { 1886 if (Instruction *PRight = dyn_cast<Instruction>(OrigRight[i - 1])) { 1887 if (PRight->getOpcode() != IRight->getOpcode()) 1888 AllSameOpcodeRight = false; 1889 } else 1890 AllSameOpcodeRight = false; 1891 } 1892 1893 // Sort two opcodes. In the code below we try to preserve the ability to use 1894 // broadcast of values instead of individual inserts. 1895 // vl1 = load 1896 // vl2 = phi 1897 // vr1 = load 1898 // vr2 = vr2 1899 // = vl1 x vr1 1900 // = vl2 x vr2 1901 // If we just sorted according to opcode we would leave the first line in 1902 // tact but we would swap vl2 with vr2 because opcode(phi) > opcode(load). 1903 // = vl1 x vr1 1904 // = vr2 x vl2 1905 // Because vr2 and vr1 are from the same load we loose the opportunity of a 1906 // broadcast for the packed right side in the backend: we have [vr1, vl2] 1907 // instead of [vr1, vr2=vr1]. 1908 if (ILeft && IRight) { 1909 if (!i && ILeft->getOpcode() > IRight->getOpcode()) { 1910 Left.push_back(IRight); 1911 Right.push_back(ILeft); 1912 } else if (i && ILeft->getOpcode() > IRight->getOpcode() && 1913 Right[i - 1] != IRight) { 1914 // Try not to destroy a broad cast for no apparent benefit. 1915 Left.push_back(IRight); 1916 Right.push_back(ILeft); 1917 } else if (i && ILeft->getOpcode() == IRight->getOpcode() && 1918 Right[i - 1] == ILeft) { 1919 // Try preserve broadcasts. 1920 Left.push_back(IRight); 1921 Right.push_back(ILeft); 1922 } else if (i && ILeft->getOpcode() == IRight->getOpcode() && 1923 Left[i - 1] == IRight) { 1924 // Try preserve broadcasts. 1925 Left.push_back(IRight); 1926 Right.push_back(ILeft); 1927 } else { 1928 Left.push_back(ILeft); 1929 Right.push_back(IRight); 1930 } 1931 continue; 1932 } 1933 // One opcode, put the instruction on the right. 1934 if (ILeft) { 1935 Left.push_back(VRight); 1936 Right.push_back(ILeft); 1937 continue; 1938 } 1939 Left.push_back(VLeft); 1940 Right.push_back(VRight); 1941 } 1942 1943 bool LeftBroadcast = isSplat(Left); 1944 bool RightBroadcast = isSplat(Right); 1945 1946 // If operands end up being broadcast return this operand order. 1947 if (LeftBroadcast || RightBroadcast) 1948 return; 1949 1950 // Don't reorder if the operands where good to begin. 1951 if (AllSameOpcodeRight || AllSameOpcodeLeft) { 1952 Left = OrigLeft; 1953 Right = OrigRight; 1954 } 1955 1956 // Finally check if we can get longer vectorizable chain by reordering 1957 // without breaking the good operand order detected above. 1958 // E.g. If we have something like- 1959 // load a[0] load b[0] 1960 // load b[1] load a[1] 1961 // load a[2] load b[2] 1962 // load a[3] load b[3] 1963 // Reordering the second load b[1] load a[1] would allow us to vectorize 1964 // this code and we still retain AllSameOpcode property. 1965 // FIXME: This load reordering might break AllSameOpcode in some rare cases 1966 // such as- 1967 // add a[0],c[0] load b[0] 1968 // add a[1],c[2] load b[1] 1969 // b[2] load b[2] 1970 // add a[3],c[3] load b[3] 1971 for (unsigned j = 0; j < VL.size() - 1; ++j) { 1972 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 1973 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 1974 if (isConsecutiveAccess(L, L1)) { 1975 std::swap(Left[j + 1], Right[j + 1]); 1976 continue; 1977 } 1978 } 1979 } 1980 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 1981 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 1982 if (isConsecutiveAccess(L, L1)) { 1983 std::swap(Left[j + 1], Right[j + 1]); 1984 continue; 1985 } 1986 } 1987 } 1988 // else unchanged 1989 } 1990 } 1991 1992 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) { 1993 Instruction *VL0 = cast<Instruction>(VL[0]); 1994 BasicBlock::iterator NextInst = VL0; 1995 ++NextInst; 1996 Builder.SetInsertPoint(VL0->getParent(), NextInst); 1997 Builder.SetCurrentDebugLocation(VL0->getDebugLoc()); 1998 } 1999 2000 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 2001 Value *Vec = UndefValue::get(Ty); 2002 // Generate the 'InsertElement' instruction. 2003 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 2004 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 2005 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 2006 GatherSeq.insert(Insrt); 2007 CSEBlocks.insert(Insrt->getParent()); 2008 2009 // Add to our 'need-to-extract' list. 2010 if (ScalarToTreeEntry.count(VL[i])) { 2011 int Idx = ScalarToTreeEntry[VL[i]]; 2012 TreeEntry *E = &VectorizableTree[Idx]; 2013 // Find which lane we need to extract. 2014 int FoundLane = -1; 2015 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 2016 // Is this the lane of the scalar that we are looking for ? 2017 if (E->Scalars[Lane] == VL[i]) { 2018 FoundLane = Lane; 2019 break; 2020 } 2021 } 2022 assert(FoundLane >= 0 && "Could not find the correct lane"); 2023 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 2024 } 2025 } 2026 } 2027 2028 return Vec; 2029 } 2030 2031 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const { 2032 SmallDenseMap<Value*, int>::const_iterator Entry 2033 = ScalarToTreeEntry.find(VL[0]); 2034 if (Entry != ScalarToTreeEntry.end()) { 2035 int Idx = Entry->second; 2036 const TreeEntry *En = &VectorizableTree[Idx]; 2037 if (En->isSame(VL) && En->VectorizedValue) 2038 return En->VectorizedValue; 2039 } 2040 return nullptr; 2041 } 2042 2043 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 2044 if (ScalarToTreeEntry.count(VL[0])) { 2045 int Idx = ScalarToTreeEntry[VL[0]]; 2046 TreeEntry *E = &VectorizableTree[Idx]; 2047 if (E->isSame(VL)) 2048 return vectorizeTree(E); 2049 } 2050 2051 Type *ScalarTy = VL[0]->getType(); 2052 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 2053 ScalarTy = SI->getValueOperand()->getType(); 2054 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 2055 2056 return Gather(VL, VecTy); 2057 } 2058 2059 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 2060 IRBuilder<>::InsertPointGuard Guard(Builder); 2061 2062 if (E->VectorizedValue) { 2063 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 2064 return E->VectorizedValue; 2065 } 2066 2067 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 2068 Type *ScalarTy = VL0->getType(); 2069 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 2070 ScalarTy = SI->getValueOperand()->getType(); 2071 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 2072 2073 if (E->NeedToGather) { 2074 setInsertPointAfterBundle(E->Scalars); 2075 return Gather(E->Scalars, VecTy); 2076 } 2077 2078 unsigned Opcode = getSameOpcode(E->Scalars); 2079 2080 switch (Opcode) { 2081 case Instruction::PHI: { 2082 PHINode *PH = dyn_cast<PHINode>(VL0); 2083 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 2084 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2085 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 2086 E->VectorizedValue = NewPhi; 2087 2088 // PHINodes may have multiple entries from the same block. We want to 2089 // visit every block once. 2090 SmallSet<BasicBlock*, 4> VisitedBBs; 2091 2092 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 2093 ValueList Operands; 2094 BasicBlock *IBB = PH->getIncomingBlock(i); 2095 2096 if (!VisitedBBs.insert(IBB).second) { 2097 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 2098 continue; 2099 } 2100 2101 // Prepare the operand vector. 2102 for (unsigned j = 0; j < E->Scalars.size(); ++j) 2103 Operands.push_back(cast<PHINode>(E->Scalars[j])-> 2104 getIncomingValueForBlock(IBB)); 2105 2106 Builder.SetInsertPoint(IBB->getTerminator()); 2107 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2108 Value *Vec = vectorizeTree(Operands); 2109 NewPhi->addIncoming(Vec, IBB); 2110 } 2111 2112 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 2113 "Invalid number of incoming values"); 2114 return NewPhi; 2115 } 2116 2117 case Instruction::ExtractElement: { 2118 if (CanReuseExtract(E->Scalars)) { 2119 Value *V = VL0->getOperand(0); 2120 E->VectorizedValue = V; 2121 return V; 2122 } 2123 return Gather(E->Scalars, VecTy); 2124 } 2125 case Instruction::ZExt: 2126 case Instruction::SExt: 2127 case Instruction::FPToUI: 2128 case Instruction::FPToSI: 2129 case Instruction::FPExt: 2130 case Instruction::PtrToInt: 2131 case Instruction::IntToPtr: 2132 case Instruction::SIToFP: 2133 case Instruction::UIToFP: 2134 case Instruction::Trunc: 2135 case Instruction::FPTrunc: 2136 case Instruction::BitCast: { 2137 ValueList INVL; 2138 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 2139 INVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 2140 2141 setInsertPointAfterBundle(E->Scalars); 2142 2143 Value *InVec = vectorizeTree(INVL); 2144 2145 if (Value *V = alreadyVectorized(E->Scalars)) 2146 return V; 2147 2148 CastInst *CI = dyn_cast<CastInst>(VL0); 2149 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 2150 E->VectorizedValue = V; 2151 ++NumVectorInstructions; 2152 return V; 2153 } 2154 case Instruction::FCmp: 2155 case Instruction::ICmp: { 2156 ValueList LHSV, RHSV; 2157 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 2158 LHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 2159 RHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 2160 } 2161 2162 setInsertPointAfterBundle(E->Scalars); 2163 2164 Value *L = vectorizeTree(LHSV); 2165 Value *R = vectorizeTree(RHSV); 2166 2167 if (Value *V = alreadyVectorized(E->Scalars)) 2168 return V; 2169 2170 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 2171 Value *V; 2172 if (Opcode == Instruction::FCmp) 2173 V = Builder.CreateFCmp(P0, L, R); 2174 else 2175 V = Builder.CreateICmp(P0, L, R); 2176 2177 E->VectorizedValue = V; 2178 ++NumVectorInstructions; 2179 return V; 2180 } 2181 case Instruction::Select: { 2182 ValueList TrueVec, FalseVec, CondVec; 2183 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 2184 CondVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 2185 TrueVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 2186 FalseVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(2)); 2187 } 2188 2189 setInsertPointAfterBundle(E->Scalars); 2190 2191 Value *Cond = vectorizeTree(CondVec); 2192 Value *True = vectorizeTree(TrueVec); 2193 Value *False = vectorizeTree(FalseVec); 2194 2195 if (Value *V = alreadyVectorized(E->Scalars)) 2196 return V; 2197 2198 Value *V = Builder.CreateSelect(Cond, True, False); 2199 E->VectorizedValue = V; 2200 ++NumVectorInstructions; 2201 return V; 2202 } 2203 case Instruction::Add: 2204 case Instruction::FAdd: 2205 case Instruction::Sub: 2206 case Instruction::FSub: 2207 case Instruction::Mul: 2208 case Instruction::FMul: 2209 case Instruction::UDiv: 2210 case Instruction::SDiv: 2211 case Instruction::FDiv: 2212 case Instruction::URem: 2213 case Instruction::SRem: 2214 case Instruction::FRem: 2215 case Instruction::Shl: 2216 case Instruction::LShr: 2217 case Instruction::AShr: 2218 case Instruction::And: 2219 case Instruction::Or: 2220 case Instruction::Xor: { 2221 ValueList LHSVL, RHSVL; 2222 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) 2223 reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL); 2224 else 2225 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 2226 LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 2227 RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 2228 } 2229 2230 setInsertPointAfterBundle(E->Scalars); 2231 2232 Value *LHS = vectorizeTree(LHSVL); 2233 Value *RHS = vectorizeTree(RHSVL); 2234 2235 if (LHS == RHS && isa<Instruction>(LHS)) { 2236 assert((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order"); 2237 } 2238 2239 if (Value *V = alreadyVectorized(E->Scalars)) 2240 return V; 2241 2242 BinaryOperator *BinOp = cast<BinaryOperator>(VL0); 2243 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS); 2244 E->VectorizedValue = V; 2245 propagateIRFlags(E->VectorizedValue, E->Scalars); 2246 ++NumVectorInstructions; 2247 2248 if (Instruction *I = dyn_cast<Instruction>(V)) 2249 return propagateMetadata(I, E->Scalars); 2250 2251 return V; 2252 } 2253 case Instruction::Load: { 2254 // Loads are inserted at the head of the tree because we don't want to 2255 // sink them all the way down past store instructions. 2256 setInsertPointAfterBundle(E->Scalars); 2257 2258 LoadInst *LI = cast<LoadInst>(VL0); 2259 Type *ScalarLoadTy = LI->getType(); 2260 unsigned AS = LI->getPointerAddressSpace(); 2261 2262 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 2263 VecTy->getPointerTo(AS)); 2264 2265 // The pointer operand uses an in-tree scalar so we add the new BitCast to 2266 // ExternalUses list to make sure that an extract will be generated in the 2267 // future. 2268 if (ScalarToTreeEntry.count(LI->getPointerOperand())) 2269 ExternalUses.push_back( 2270 ExternalUser(LI->getPointerOperand(), cast<User>(VecPtr), 0)); 2271 2272 unsigned Alignment = LI->getAlignment(); 2273 LI = Builder.CreateLoad(VecPtr); 2274 if (!Alignment) 2275 Alignment = DL->getABITypeAlignment(ScalarLoadTy); 2276 LI->setAlignment(Alignment); 2277 E->VectorizedValue = LI; 2278 ++NumVectorInstructions; 2279 return propagateMetadata(LI, E->Scalars); 2280 } 2281 case Instruction::Store: { 2282 StoreInst *SI = cast<StoreInst>(VL0); 2283 unsigned Alignment = SI->getAlignment(); 2284 unsigned AS = SI->getPointerAddressSpace(); 2285 2286 ValueList ValueOp; 2287 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 2288 ValueOp.push_back(cast<StoreInst>(E->Scalars[i])->getValueOperand()); 2289 2290 setInsertPointAfterBundle(E->Scalars); 2291 2292 Value *VecValue = vectorizeTree(ValueOp); 2293 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), 2294 VecTy->getPointerTo(AS)); 2295 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 2296 2297 // The pointer operand uses an in-tree scalar so we add the new BitCast to 2298 // ExternalUses list to make sure that an extract will be generated in the 2299 // future. 2300 if (ScalarToTreeEntry.count(SI->getPointerOperand())) 2301 ExternalUses.push_back( 2302 ExternalUser(SI->getPointerOperand(), cast<User>(VecPtr), 0)); 2303 2304 if (!Alignment) 2305 Alignment = DL->getABITypeAlignment(SI->getValueOperand()->getType()); 2306 S->setAlignment(Alignment); 2307 E->VectorizedValue = S; 2308 ++NumVectorInstructions; 2309 return propagateMetadata(S, E->Scalars); 2310 } 2311 case Instruction::GetElementPtr: { 2312 setInsertPointAfterBundle(E->Scalars); 2313 2314 ValueList Op0VL; 2315 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 2316 Op0VL.push_back(cast<GetElementPtrInst>(E->Scalars[i])->getOperand(0)); 2317 2318 Value *Op0 = vectorizeTree(Op0VL); 2319 2320 std::vector<Value *> OpVecs; 2321 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 2322 ++j) { 2323 ValueList OpVL; 2324 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 2325 OpVL.push_back(cast<GetElementPtrInst>(E->Scalars[i])->getOperand(j)); 2326 2327 Value *OpVec = vectorizeTree(OpVL); 2328 OpVecs.push_back(OpVec); 2329 } 2330 2331 Value *V = Builder.CreateGEP(Op0, OpVecs); 2332 E->VectorizedValue = V; 2333 ++NumVectorInstructions; 2334 2335 if (Instruction *I = dyn_cast<Instruction>(V)) 2336 return propagateMetadata(I, E->Scalars); 2337 2338 return V; 2339 } 2340 case Instruction::Call: { 2341 CallInst *CI = cast<CallInst>(VL0); 2342 setInsertPointAfterBundle(E->Scalars); 2343 Function *FI; 2344 Intrinsic::ID IID = Intrinsic::not_intrinsic; 2345 Value *ScalarArg = nullptr; 2346 if (CI && (FI = CI->getCalledFunction())) { 2347 IID = (Intrinsic::ID) FI->getIntrinsicID(); 2348 } 2349 std::vector<Value *> OpVecs; 2350 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 2351 ValueList OpVL; 2352 // ctlz,cttz and powi are special intrinsics whose second argument is 2353 // a scalar. This argument should not be vectorized. 2354 if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) { 2355 CallInst *CEI = cast<CallInst>(E->Scalars[0]); 2356 ScalarArg = CEI->getArgOperand(j); 2357 OpVecs.push_back(CEI->getArgOperand(j)); 2358 continue; 2359 } 2360 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 2361 CallInst *CEI = cast<CallInst>(E->Scalars[i]); 2362 OpVL.push_back(CEI->getArgOperand(j)); 2363 } 2364 2365 Value *OpVec = vectorizeTree(OpVL); 2366 DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 2367 OpVecs.push_back(OpVec); 2368 } 2369 2370 Module *M = F->getParent(); 2371 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 2372 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; 2373 Function *CF = Intrinsic::getDeclaration(M, ID, Tys); 2374 Value *V = Builder.CreateCall(CF, OpVecs); 2375 2376 // The scalar argument uses an in-tree scalar so we add the new vectorized 2377 // call to ExternalUses list to make sure that an extract will be 2378 // generated in the future. 2379 if (ScalarArg && ScalarToTreeEntry.count(ScalarArg)) 2380 ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0)); 2381 2382 E->VectorizedValue = V; 2383 ++NumVectorInstructions; 2384 return V; 2385 } 2386 case Instruction::ShuffleVector: { 2387 ValueList LHSVL, RHSVL; 2388 assert(isa<BinaryOperator>(VL0) && "Invalid Shuffle Vector Operand"); 2389 reorderAltShuffleOperands(E->Scalars, LHSVL, RHSVL); 2390 setInsertPointAfterBundle(E->Scalars); 2391 2392 Value *LHS = vectorizeTree(LHSVL); 2393 Value *RHS = vectorizeTree(RHSVL); 2394 2395 if (Value *V = alreadyVectorized(E->Scalars)) 2396 return V; 2397 2398 // Create a vector of LHS op1 RHS 2399 BinaryOperator *BinOp0 = cast<BinaryOperator>(VL0); 2400 Value *V0 = Builder.CreateBinOp(BinOp0->getOpcode(), LHS, RHS); 2401 2402 // Create a vector of LHS op2 RHS 2403 Instruction *VL1 = cast<Instruction>(E->Scalars[1]); 2404 BinaryOperator *BinOp1 = cast<BinaryOperator>(VL1); 2405 Value *V1 = Builder.CreateBinOp(BinOp1->getOpcode(), LHS, RHS); 2406 2407 // Create shuffle to take alternate operations from the vector. 2408 // Also, gather up odd and even scalar ops to propagate IR flags to 2409 // each vector operation. 2410 ValueList OddScalars, EvenScalars; 2411 unsigned e = E->Scalars.size(); 2412 SmallVector<Constant *, 8> Mask(e); 2413 for (unsigned i = 0; i < e; ++i) { 2414 if (i & 1) { 2415 Mask[i] = Builder.getInt32(e + i); 2416 OddScalars.push_back(E->Scalars[i]); 2417 } else { 2418 Mask[i] = Builder.getInt32(i); 2419 EvenScalars.push_back(E->Scalars[i]); 2420 } 2421 } 2422 2423 Value *ShuffleMask = ConstantVector::get(Mask); 2424 propagateIRFlags(V0, EvenScalars); 2425 propagateIRFlags(V1, OddScalars); 2426 2427 Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 2428 E->VectorizedValue = V; 2429 ++NumVectorInstructions; 2430 if (Instruction *I = dyn_cast<Instruction>(V)) 2431 return propagateMetadata(I, E->Scalars); 2432 2433 return V; 2434 } 2435 default: 2436 llvm_unreachable("unknown inst"); 2437 } 2438 return nullptr; 2439 } 2440 2441 Value *BoUpSLP::vectorizeTree() { 2442 2443 // All blocks must be scheduled before any instructions are inserted. 2444 for (auto &BSIter : BlocksSchedules) { 2445 scheduleBlock(BSIter.second.get()); 2446 } 2447 2448 Builder.SetInsertPoint(F->getEntryBlock().begin()); 2449 vectorizeTree(&VectorizableTree[0]); 2450 2451 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 2452 2453 // Extract all of the elements with the external uses. 2454 for (UserList::iterator it = ExternalUses.begin(), e = ExternalUses.end(); 2455 it != e; ++it) { 2456 Value *Scalar = it->Scalar; 2457 llvm::User *User = it->User; 2458 2459 // Skip users that we already RAUW. This happens when one instruction 2460 // has multiple uses of the same value. 2461 if (std::find(Scalar->user_begin(), Scalar->user_end(), User) == 2462 Scalar->user_end()) 2463 continue; 2464 assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar"); 2465 2466 int Idx = ScalarToTreeEntry[Scalar]; 2467 TreeEntry *E = &VectorizableTree[Idx]; 2468 assert(!E->NeedToGather && "Extracting from a gather list"); 2469 2470 Value *Vec = E->VectorizedValue; 2471 assert(Vec && "Can't find vectorizable value"); 2472 2473 Value *Lane = Builder.getInt32(it->Lane); 2474 // Generate extracts for out-of-tree users. 2475 // Find the insertion point for the extractelement lane. 2476 if (isa<Instruction>(Vec)){ 2477 if (PHINode *PH = dyn_cast<PHINode>(User)) { 2478 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 2479 if (PH->getIncomingValue(i) == Scalar) { 2480 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 2481 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2482 CSEBlocks.insert(PH->getIncomingBlock(i)); 2483 PH->setOperand(i, Ex); 2484 } 2485 } 2486 } else { 2487 Builder.SetInsertPoint(cast<Instruction>(User)); 2488 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2489 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 2490 User->replaceUsesOfWith(Scalar, Ex); 2491 } 2492 } else { 2493 Builder.SetInsertPoint(F->getEntryBlock().begin()); 2494 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2495 CSEBlocks.insert(&F->getEntryBlock()); 2496 User->replaceUsesOfWith(Scalar, Ex); 2497 } 2498 2499 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 2500 } 2501 2502 // For each vectorized value: 2503 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 2504 TreeEntry *Entry = &VectorizableTree[EIdx]; 2505 2506 // For each lane: 2507 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 2508 Value *Scalar = Entry->Scalars[Lane]; 2509 // No need to handle users of gathered values. 2510 if (Entry->NeedToGather) 2511 continue; 2512 2513 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 2514 2515 Type *Ty = Scalar->getType(); 2516 if (!Ty->isVoidTy()) { 2517 #ifndef NDEBUG 2518 for (User *U : Scalar->users()) { 2519 DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 2520 2521 assert((ScalarToTreeEntry.count(U) || 2522 // It is legal to replace users in the ignorelist by undef. 2523 (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), U) != 2524 UserIgnoreList.end())) && 2525 "Replacing out-of-tree value with undef"); 2526 } 2527 #endif 2528 Value *Undef = UndefValue::get(Ty); 2529 Scalar->replaceAllUsesWith(Undef); 2530 } 2531 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 2532 eraseInstruction(cast<Instruction>(Scalar)); 2533 } 2534 } 2535 2536 Builder.ClearInsertionPoint(); 2537 2538 return VectorizableTree[0].VectorizedValue; 2539 } 2540 2541 void BoUpSLP::optimizeGatherSequence() { 2542 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 2543 << " gather sequences instructions.\n"); 2544 // LICM InsertElementInst sequences. 2545 for (SetVector<Instruction *>::iterator it = GatherSeq.begin(), 2546 e = GatherSeq.end(); it != e; ++it) { 2547 InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it); 2548 2549 if (!Insert) 2550 continue; 2551 2552 // Check if this block is inside a loop. 2553 Loop *L = LI->getLoopFor(Insert->getParent()); 2554 if (!L) 2555 continue; 2556 2557 // Check if it has a preheader. 2558 BasicBlock *PreHeader = L->getLoopPreheader(); 2559 if (!PreHeader) 2560 continue; 2561 2562 // If the vector or the element that we insert into it are 2563 // instructions that are defined in this basic block then we can't 2564 // hoist this instruction. 2565 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 2566 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 2567 if (CurrVec && L->contains(CurrVec)) 2568 continue; 2569 if (NewElem && L->contains(NewElem)) 2570 continue; 2571 2572 // We can hoist this instruction. Move it to the pre-header. 2573 Insert->moveBefore(PreHeader->getTerminator()); 2574 } 2575 2576 // Make a list of all reachable blocks in our CSE queue. 2577 SmallVector<const DomTreeNode *, 8> CSEWorkList; 2578 CSEWorkList.reserve(CSEBlocks.size()); 2579 for (BasicBlock *BB : CSEBlocks) 2580 if (DomTreeNode *N = DT->getNode(BB)) { 2581 assert(DT->isReachableFromEntry(N)); 2582 CSEWorkList.push_back(N); 2583 } 2584 2585 // Sort blocks by domination. This ensures we visit a block after all blocks 2586 // dominating it are visited. 2587 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), 2588 [this](const DomTreeNode *A, const DomTreeNode *B) { 2589 return DT->properlyDominates(A, B); 2590 }); 2591 2592 // Perform O(N^2) search over the gather sequences and merge identical 2593 // instructions. TODO: We can further optimize this scan if we split the 2594 // instructions into different buckets based on the insert lane. 2595 SmallVector<Instruction *, 16> Visited; 2596 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 2597 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 2598 "Worklist not sorted properly!"); 2599 BasicBlock *BB = (*I)->getBlock(); 2600 // For all instructions in blocks containing gather sequences: 2601 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 2602 Instruction *In = it++; 2603 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 2604 continue; 2605 2606 // Check if we can replace this instruction with any of the 2607 // visited instructions. 2608 for (SmallVectorImpl<Instruction *>::iterator v = Visited.begin(), 2609 ve = Visited.end(); 2610 v != ve; ++v) { 2611 if (In->isIdenticalTo(*v) && 2612 DT->dominates((*v)->getParent(), In->getParent())) { 2613 In->replaceAllUsesWith(*v); 2614 eraseInstruction(In); 2615 In = nullptr; 2616 break; 2617 } 2618 } 2619 if (In) { 2620 assert(std::find(Visited.begin(), Visited.end(), In) == Visited.end()); 2621 Visited.push_back(In); 2622 } 2623 } 2624 } 2625 CSEBlocks.clear(); 2626 GatherSeq.clear(); 2627 } 2628 2629 // Groups the instructions to a bundle (which is then a single scheduling entity) 2630 // and schedules instructions until the bundle gets ready. 2631 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, 2632 BoUpSLP *SLP) { 2633 if (isa<PHINode>(VL[0])) 2634 return true; 2635 2636 // Initialize the instruction bundle. 2637 Instruction *OldScheduleEnd = ScheduleEnd; 2638 ScheduleData *PrevInBundle = nullptr; 2639 ScheduleData *Bundle = nullptr; 2640 bool ReSchedule = false; 2641 DEBUG(dbgs() << "SLP: bundle: " << *VL[0] << "\n"); 2642 for (Value *V : VL) { 2643 extendSchedulingRegion(V); 2644 ScheduleData *BundleMember = getScheduleData(V); 2645 assert(BundleMember && 2646 "no ScheduleData for bundle member (maybe not in same basic block)"); 2647 if (BundleMember->IsScheduled) { 2648 // A bundle member was scheduled as single instruction before and now 2649 // needs to be scheduled as part of the bundle. We just get rid of the 2650 // existing schedule. 2651 DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 2652 << " was already scheduled\n"); 2653 ReSchedule = true; 2654 } 2655 assert(BundleMember->isSchedulingEntity() && 2656 "bundle member already part of other bundle"); 2657 if (PrevInBundle) { 2658 PrevInBundle->NextInBundle = BundleMember; 2659 } else { 2660 Bundle = BundleMember; 2661 } 2662 BundleMember->UnscheduledDepsInBundle = 0; 2663 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 2664 2665 // Group the instructions to a bundle. 2666 BundleMember->FirstInBundle = Bundle; 2667 PrevInBundle = BundleMember; 2668 } 2669 if (ScheduleEnd != OldScheduleEnd) { 2670 // The scheduling region got new instructions at the lower end (or it is a 2671 // new region for the first bundle). This makes it necessary to 2672 // recalculate all dependencies. 2673 // It is seldom that this needs to be done a second time after adding the 2674 // initial bundle to the region. 2675 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 2676 ScheduleData *SD = getScheduleData(I); 2677 SD->clearDependencies(); 2678 } 2679 ReSchedule = true; 2680 } 2681 if (ReSchedule) { 2682 resetSchedule(); 2683 initialFillReadyList(ReadyInsts); 2684 } 2685 2686 DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " 2687 << BB->getName() << "\n"); 2688 2689 calculateDependencies(Bundle, true, SLP); 2690 2691 // Now try to schedule the new bundle. As soon as the bundle is "ready" it 2692 // means that there are no cyclic dependencies and we can schedule it. 2693 // Note that's important that we don't "schedule" the bundle yet (see 2694 // cancelScheduling). 2695 while (!Bundle->isReady() && !ReadyInsts.empty()) { 2696 2697 ScheduleData *pickedSD = ReadyInsts.back(); 2698 ReadyInsts.pop_back(); 2699 2700 if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) { 2701 schedule(pickedSD, ReadyInsts); 2702 } 2703 } 2704 return Bundle->isReady(); 2705 } 2706 2707 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL) { 2708 if (isa<PHINode>(VL[0])) 2709 return; 2710 2711 ScheduleData *Bundle = getScheduleData(VL[0]); 2712 DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 2713 assert(!Bundle->IsScheduled && 2714 "Can't cancel bundle which is already scheduled"); 2715 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 2716 "tried to unbundle something which is not a bundle"); 2717 2718 // Un-bundle: make single instructions out of the bundle. 2719 ScheduleData *BundleMember = Bundle; 2720 while (BundleMember) { 2721 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 2722 BundleMember->FirstInBundle = BundleMember; 2723 ScheduleData *Next = BundleMember->NextInBundle; 2724 BundleMember->NextInBundle = nullptr; 2725 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 2726 if (BundleMember->UnscheduledDepsInBundle == 0) { 2727 ReadyInsts.insert(BundleMember); 2728 } 2729 BundleMember = Next; 2730 } 2731 } 2732 2733 void BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V) { 2734 if (getScheduleData(V)) 2735 return; 2736 Instruction *I = dyn_cast<Instruction>(V); 2737 assert(I && "bundle member must be an instruction"); 2738 assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled"); 2739 if (!ScheduleStart) { 2740 // It's the first instruction in the new region. 2741 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 2742 ScheduleStart = I; 2743 ScheduleEnd = I->getNextNode(); 2744 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 2745 DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 2746 return; 2747 } 2748 // Search up and down at the same time, because we don't know if the new 2749 // instruction is above or below the existing scheduling region. 2750 BasicBlock::reverse_iterator UpIter(ScheduleStart); 2751 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 2752 BasicBlock::iterator DownIter(ScheduleEnd); 2753 BasicBlock::iterator LowerEnd = BB->end(); 2754 for (;;) { 2755 if (UpIter != UpperEnd) { 2756 if (&*UpIter == I) { 2757 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 2758 ScheduleStart = I; 2759 DEBUG(dbgs() << "SLP: extend schedule region start to " << *I << "\n"); 2760 return; 2761 } 2762 UpIter++; 2763 } 2764 if (DownIter != LowerEnd) { 2765 if (&*DownIter == I) { 2766 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 2767 nullptr); 2768 ScheduleEnd = I->getNextNode(); 2769 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 2770 DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 2771 return; 2772 } 2773 DownIter++; 2774 } 2775 assert((UpIter != UpperEnd || DownIter != LowerEnd) && 2776 "instruction not found in block"); 2777 } 2778 } 2779 2780 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 2781 Instruction *ToI, 2782 ScheduleData *PrevLoadStore, 2783 ScheduleData *NextLoadStore) { 2784 ScheduleData *CurrentLoadStore = PrevLoadStore; 2785 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 2786 ScheduleData *SD = ScheduleDataMap[I]; 2787 if (!SD) { 2788 // Allocate a new ScheduleData for the instruction. 2789 if (ChunkPos >= ChunkSize) { 2790 ScheduleDataChunks.push_back( 2791 llvm::make_unique<ScheduleData[]>(ChunkSize)); 2792 ChunkPos = 0; 2793 } 2794 SD = &(ScheduleDataChunks.back()[ChunkPos++]); 2795 ScheduleDataMap[I] = SD; 2796 SD->Inst = I; 2797 } 2798 assert(!isInSchedulingRegion(SD) && 2799 "new ScheduleData already in scheduling region"); 2800 SD->init(SchedulingRegionID); 2801 2802 if (I->mayReadOrWriteMemory()) { 2803 // Update the linked list of memory accessing instructions. 2804 if (CurrentLoadStore) { 2805 CurrentLoadStore->NextLoadStore = SD; 2806 } else { 2807 FirstLoadStoreInRegion = SD; 2808 } 2809 CurrentLoadStore = SD; 2810 } 2811 } 2812 if (NextLoadStore) { 2813 if (CurrentLoadStore) 2814 CurrentLoadStore->NextLoadStore = NextLoadStore; 2815 } else { 2816 LastLoadStoreInRegion = CurrentLoadStore; 2817 } 2818 } 2819 2820 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 2821 bool InsertInReadyList, 2822 BoUpSLP *SLP) { 2823 assert(SD->isSchedulingEntity()); 2824 2825 SmallVector<ScheduleData *, 10> WorkList; 2826 WorkList.push_back(SD); 2827 2828 while (!WorkList.empty()) { 2829 ScheduleData *SD = WorkList.back(); 2830 WorkList.pop_back(); 2831 2832 ScheduleData *BundleMember = SD; 2833 while (BundleMember) { 2834 assert(isInSchedulingRegion(BundleMember)); 2835 if (!BundleMember->hasValidDependencies()) { 2836 2837 DEBUG(dbgs() << "SLP: update deps of " << *BundleMember << "\n"); 2838 BundleMember->Dependencies = 0; 2839 BundleMember->resetUnscheduledDeps(); 2840 2841 // Handle def-use chain dependencies. 2842 for (User *U : BundleMember->Inst->users()) { 2843 if (isa<Instruction>(U)) { 2844 ScheduleData *UseSD = getScheduleData(U); 2845 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 2846 BundleMember->Dependencies++; 2847 ScheduleData *DestBundle = UseSD->FirstInBundle; 2848 if (!DestBundle->IsScheduled) { 2849 BundleMember->incrementUnscheduledDeps(1); 2850 } 2851 if (!DestBundle->hasValidDependencies()) { 2852 WorkList.push_back(DestBundle); 2853 } 2854 } 2855 } else { 2856 // I'm not sure if this can ever happen. But we need to be safe. 2857 // This lets the instruction/bundle never be scheduled and eventally 2858 // disable vectorization. 2859 BundleMember->Dependencies++; 2860 BundleMember->incrementUnscheduledDeps(1); 2861 } 2862 } 2863 2864 // Handle the memory dependencies. 2865 ScheduleData *DepDest = BundleMember->NextLoadStore; 2866 if (DepDest) { 2867 Instruction *SrcInst = BundleMember->Inst; 2868 AliasAnalysis::Location SrcLoc = getLocation(SrcInst, SLP->AA); 2869 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 2870 unsigned numAliased = 0; 2871 2872 while (DepDest) { 2873 assert(isInSchedulingRegion(DepDest)); 2874 if (SrcMayWrite || DepDest->Inst->mayWriteToMemory()) { 2875 2876 // Limit the number of alias checks, becaus SLP->isAliased() is 2877 // the expensive part in the following loop. 2878 if (numAliased >= AliasedCheckLimit 2879 || SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)) { 2880 2881 // We increment the counter only if the locations are aliased 2882 // (instead of counting all alias checks). This gives a better 2883 // balance between reduced runtime accurate dependencies. 2884 numAliased++; 2885 2886 DepDest->MemoryDependencies.push_back(BundleMember); 2887 BundleMember->Dependencies++; 2888 ScheduleData *DestBundle = DepDest->FirstInBundle; 2889 if (!DestBundle->IsScheduled) { 2890 BundleMember->incrementUnscheduledDeps(1); 2891 } 2892 if (!DestBundle->hasValidDependencies()) { 2893 WorkList.push_back(DestBundle); 2894 } 2895 } 2896 } 2897 DepDest = DepDest->NextLoadStore; 2898 } 2899 } 2900 } 2901 BundleMember = BundleMember->NextInBundle; 2902 } 2903 if (InsertInReadyList && SD->isReady()) { 2904 ReadyInsts.push_back(SD); 2905 DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n"); 2906 } 2907 } 2908 } 2909 2910 void BoUpSLP::BlockScheduling::resetSchedule() { 2911 assert(ScheduleStart && 2912 "tried to reset schedule on block which has not been scheduled"); 2913 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 2914 ScheduleData *SD = getScheduleData(I); 2915 assert(isInSchedulingRegion(SD)); 2916 SD->IsScheduled = false; 2917 SD->resetUnscheduledDeps(); 2918 } 2919 ReadyInsts.clear(); 2920 } 2921 2922 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 2923 2924 if (!BS->ScheduleStart) 2925 return; 2926 2927 DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 2928 2929 BS->resetSchedule(); 2930 2931 // For the real scheduling we use a more sophisticated ready-list: it is 2932 // sorted by the original instruction location. This lets the final schedule 2933 // be as close as possible to the original instruction order. 2934 struct ScheduleDataCompare { 2935 bool operator()(ScheduleData *SD1, ScheduleData *SD2) { 2936 return SD2->SchedulingPriority < SD1->SchedulingPriority; 2937 } 2938 }; 2939 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 2940 2941 // Ensure that all depencency data is updated and fill the ready-list with 2942 // initial instructions. 2943 int Idx = 0; 2944 int NumToSchedule = 0; 2945 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 2946 I = I->getNextNode()) { 2947 ScheduleData *SD = BS->getScheduleData(I); 2948 assert( 2949 SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD->Inst) != 0) && 2950 "scheduler and vectorizer have different opinion on what is a bundle"); 2951 SD->FirstInBundle->SchedulingPriority = Idx++; 2952 if (SD->isSchedulingEntity()) { 2953 BS->calculateDependencies(SD, false, this); 2954 NumToSchedule++; 2955 } 2956 } 2957 BS->initialFillReadyList(ReadyInsts); 2958 2959 Instruction *LastScheduledInst = BS->ScheduleEnd; 2960 2961 // Do the "real" scheduling. 2962 while (!ReadyInsts.empty()) { 2963 ScheduleData *picked = *ReadyInsts.begin(); 2964 ReadyInsts.erase(ReadyInsts.begin()); 2965 2966 // Move the scheduled instruction(s) to their dedicated places, if not 2967 // there yet. 2968 ScheduleData *BundleMember = picked; 2969 while (BundleMember) { 2970 Instruction *pickedInst = BundleMember->Inst; 2971 if (LastScheduledInst->getNextNode() != pickedInst) { 2972 BS->BB->getInstList().remove(pickedInst); 2973 BS->BB->getInstList().insert(LastScheduledInst, pickedInst); 2974 } 2975 LastScheduledInst = pickedInst; 2976 BundleMember = BundleMember->NextInBundle; 2977 } 2978 2979 BS->schedule(picked, ReadyInsts); 2980 NumToSchedule--; 2981 } 2982 assert(NumToSchedule == 0 && "could not schedule all instructions"); 2983 2984 // Avoid duplicate scheduling of the block. 2985 BS->ScheduleStart = nullptr; 2986 } 2987 2988 /// The SLPVectorizer Pass. 2989 struct SLPVectorizer : public FunctionPass { 2990 typedef SmallVector<StoreInst *, 8> StoreList; 2991 typedef MapVector<Value *, StoreList> StoreListMap; 2992 2993 /// Pass identification, replacement for typeid 2994 static char ID; 2995 2996 explicit SLPVectorizer() : FunctionPass(ID) { 2997 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 2998 } 2999 3000 ScalarEvolution *SE; 3001 const DataLayout *DL; 3002 TargetTransformInfo *TTI; 3003 TargetLibraryInfo *TLI; 3004 AliasAnalysis *AA; 3005 LoopInfo *LI; 3006 DominatorTree *DT; 3007 AssumptionCache *AC; 3008 3009 bool runOnFunction(Function &F) override { 3010 if (skipOptnoneFunction(F)) 3011 return false; 3012 3013 SE = &getAnalysis<ScalarEvolution>(); 3014 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 3015 DL = DLP ? &DLP->getDataLayout() : nullptr; 3016 TTI = &getAnalysis<TargetTransformInfo>(); 3017 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 3018 TLI = TLIP ? &TLIP->getTLI() : nullptr; 3019 AA = &getAnalysis<AliasAnalysis>(); 3020 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 3021 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 3022 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 3023 3024 StoreRefs.clear(); 3025 bool Changed = false; 3026 3027 // If the target claims to have no vector registers don't attempt 3028 // vectorization. 3029 if (!TTI->getNumberOfRegisters(true)) 3030 return false; 3031 3032 // Must have DataLayout. We can't require it because some tests run w/o 3033 // triple. 3034 if (!DL) 3035 return false; 3036 3037 // Don't vectorize when the attribute NoImplicitFloat is used. 3038 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 3039 return false; 3040 3041 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 3042 3043 // Use the bottom up slp vectorizer to construct chains that start with 3044 // store instructions. 3045 BoUpSLP R(&F, SE, DL, TTI, TLI, AA, LI, DT, AC); 3046 3047 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 3048 // delete instructions. 3049 3050 // Scan the blocks in the function in post order. 3051 for (po_iterator<BasicBlock*> it = po_begin(&F.getEntryBlock()), 3052 e = po_end(&F.getEntryBlock()); it != e; ++it) { 3053 BasicBlock *BB = *it; 3054 // Vectorize trees that end at stores. 3055 if (unsigned count = collectStores(BB, R)) { 3056 (void)count; 3057 DEBUG(dbgs() << "SLP: Found " << count << " stores to vectorize.\n"); 3058 Changed |= vectorizeStoreChains(R); 3059 } 3060 3061 // Vectorize trees that end at reductions. 3062 Changed |= vectorizeChainsInBlock(BB, R); 3063 } 3064 3065 if (Changed) { 3066 R.optimizeGatherSequence(); 3067 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 3068 DEBUG(verifyFunction(F)); 3069 } 3070 return Changed; 3071 } 3072 3073 void getAnalysisUsage(AnalysisUsage &AU) const override { 3074 FunctionPass::getAnalysisUsage(AU); 3075 AU.addRequired<AssumptionCacheTracker>(); 3076 AU.addRequired<ScalarEvolution>(); 3077 AU.addRequired<AliasAnalysis>(); 3078 AU.addRequired<TargetTransformInfo>(); 3079 AU.addRequired<LoopInfoWrapperPass>(); 3080 AU.addRequired<DominatorTreeWrapperPass>(); 3081 AU.addPreserved<LoopInfoWrapperPass>(); 3082 AU.addPreserved<DominatorTreeWrapperPass>(); 3083 AU.setPreservesCFG(); 3084 } 3085 3086 private: 3087 3088 /// \brief Collect memory references and sort them according to their base 3089 /// object. We sort the stores to their base objects to reduce the cost of the 3090 /// quadratic search on the stores. TODO: We can further reduce this cost 3091 /// if we flush the chain creation every time we run into a memory barrier. 3092 unsigned collectStores(BasicBlock *BB, BoUpSLP &R); 3093 3094 /// \brief Try to vectorize a chain that starts at two arithmetic instrs. 3095 bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R); 3096 3097 /// \brief Try to vectorize a list of operands. 3098 /// \@param BuildVector A list of users to ignore for the purpose of 3099 /// scheduling and that don't need extracting. 3100 /// \returns true if a value was vectorized. 3101 bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 3102 ArrayRef<Value *> BuildVector = None, 3103 bool allowReorder = false); 3104 3105 /// \brief Try to vectorize a chain that may start at the operands of \V; 3106 bool tryToVectorize(BinaryOperator *V, BoUpSLP &R); 3107 3108 /// \brief Vectorize the stores that were collected in StoreRefs. 3109 bool vectorizeStoreChains(BoUpSLP &R); 3110 3111 /// \brief Scan the basic block and look for patterns that are likely to start 3112 /// a vectorization chain. 3113 bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R); 3114 3115 bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold, 3116 BoUpSLP &R); 3117 3118 bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold, 3119 BoUpSLP &R); 3120 private: 3121 StoreListMap StoreRefs; 3122 }; 3123 3124 /// \brief Check that the Values in the slice in VL array are still existent in 3125 /// the WeakVH array. 3126 /// Vectorization of part of the VL array may cause later values in the VL array 3127 /// to become invalid. We track when this has happened in the WeakVH array. 3128 static bool hasValueBeenRAUWed(ArrayRef<Value *> &VL, 3129 SmallVectorImpl<WeakVH> &VH, 3130 unsigned SliceBegin, 3131 unsigned SliceSize) { 3132 for (unsigned i = SliceBegin; i < SliceBegin + SliceSize; ++i) 3133 if (VH[i] != VL[i]) 3134 return true; 3135 3136 return false; 3137 } 3138 3139 bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain, 3140 int CostThreshold, BoUpSLP &R) { 3141 unsigned ChainLen = Chain.size(); 3142 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 3143 << "\n"); 3144 Type *StoreTy = cast<StoreInst>(Chain[0])->getValueOperand()->getType(); 3145 unsigned Sz = DL->getTypeSizeInBits(StoreTy); 3146 unsigned VF = MinVecRegSize / Sz; 3147 3148 if (!isPowerOf2_32(Sz) || VF < 2) 3149 return false; 3150 3151 // Keep track of values that were deleted by vectorizing in the loop below. 3152 SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end()); 3153 3154 bool Changed = false; 3155 // Look for profitable vectorizable trees at all offsets, starting at zero. 3156 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 3157 if (i + VF > e) 3158 break; 3159 3160 // Check that a previous iteration of this loop did not delete the Value. 3161 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 3162 continue; 3163 3164 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 3165 << "\n"); 3166 ArrayRef<Value *> Operands = Chain.slice(i, VF); 3167 3168 R.buildTree(Operands); 3169 3170 int Cost = R.getTreeCost(); 3171 3172 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 3173 if (Cost < CostThreshold) { 3174 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 3175 R.vectorizeTree(); 3176 3177 // Move to the next bundle. 3178 i += VF - 1; 3179 Changed = true; 3180 } 3181 } 3182 3183 return Changed; 3184 } 3185 3186 bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, 3187 int costThreshold, BoUpSLP &R) { 3188 SetVector<Value *> Heads, Tails; 3189 SmallDenseMap<Value *, Value *> ConsecutiveChain; 3190 3191 // We may run into multiple chains that merge into a single chain. We mark the 3192 // stores that we vectorized so that we don't visit the same store twice. 3193 BoUpSLP::ValueSet VectorizedStores; 3194 bool Changed = false; 3195 3196 // Do a quadratic search on all of the given stores and find 3197 // all of the pairs of stores that follow each other. 3198 for (unsigned i = 0, e = Stores.size(); i < e; ++i) { 3199 for (unsigned j = 0; j < e; ++j) { 3200 if (i == j) 3201 continue; 3202 3203 if (R.isConsecutiveAccess(Stores[i], Stores[j])) { 3204 Tails.insert(Stores[j]); 3205 Heads.insert(Stores[i]); 3206 ConsecutiveChain[Stores[i]] = Stores[j]; 3207 } 3208 } 3209 } 3210 3211 // For stores that start but don't end a link in the chain: 3212 for (SetVector<Value *>::iterator it = Heads.begin(), e = Heads.end(); 3213 it != e; ++it) { 3214 if (Tails.count(*it)) 3215 continue; 3216 3217 // We found a store instr that starts a chain. Now follow the chain and try 3218 // to vectorize it. 3219 BoUpSLP::ValueList Operands; 3220 Value *I = *it; 3221 // Collect the chain into a list. 3222 while (Tails.count(I) || Heads.count(I)) { 3223 if (VectorizedStores.count(I)) 3224 break; 3225 Operands.push_back(I); 3226 // Move to the next value in the chain. 3227 I = ConsecutiveChain[I]; 3228 } 3229 3230 bool Vectorized = vectorizeStoreChain(Operands, costThreshold, R); 3231 3232 // Mark the vectorized stores so that we don't vectorize them again. 3233 if (Vectorized) 3234 VectorizedStores.insert(Operands.begin(), Operands.end()); 3235 Changed |= Vectorized; 3236 } 3237 3238 return Changed; 3239 } 3240 3241 3242 unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) { 3243 unsigned count = 0; 3244 StoreRefs.clear(); 3245 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 3246 StoreInst *SI = dyn_cast<StoreInst>(it); 3247 if (!SI) 3248 continue; 3249 3250 // Don't touch volatile stores. 3251 if (!SI->isSimple()) 3252 continue; 3253 3254 // Check that the pointer points to scalars. 3255 Type *Ty = SI->getValueOperand()->getType(); 3256 if (Ty->isAggregateType() || Ty->isVectorTy()) 3257 continue; 3258 3259 // Find the base pointer. 3260 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), DL); 3261 3262 // Save the store locations. 3263 StoreRefs[Ptr].push_back(SI); 3264 count++; 3265 } 3266 return count; 3267 } 3268 3269 bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 3270 if (!A || !B) 3271 return false; 3272 Value *VL[] = { A, B }; 3273 return tryToVectorizeList(VL, R, None, true); 3274 } 3275 3276 bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 3277 ArrayRef<Value *> BuildVector, 3278 bool allowReorder) { 3279 if (VL.size() < 2) 3280 return false; 3281 3282 DEBUG(dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n"); 3283 3284 // Check that all of the parts are scalar instructions of the same type. 3285 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 3286 if (!I0) 3287 return false; 3288 3289 unsigned Opcode0 = I0->getOpcode(); 3290 3291 Type *Ty0 = I0->getType(); 3292 unsigned Sz = DL->getTypeSizeInBits(Ty0); 3293 unsigned VF = MinVecRegSize / Sz; 3294 3295 for (int i = 0, e = VL.size(); i < e; ++i) { 3296 Type *Ty = VL[i]->getType(); 3297 if (Ty->isAggregateType() || Ty->isVectorTy()) 3298 return false; 3299 Instruction *Inst = dyn_cast<Instruction>(VL[i]); 3300 if (!Inst || Inst->getOpcode() != Opcode0) 3301 return false; 3302 } 3303 3304 bool Changed = false; 3305 3306 // Keep track of values that were deleted by vectorizing in the loop below. 3307 SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end()); 3308 3309 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 3310 unsigned OpsWidth = 0; 3311 3312 if (i + VF > e) 3313 OpsWidth = e - i; 3314 else 3315 OpsWidth = VF; 3316 3317 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 3318 break; 3319 3320 // Check that a previous iteration of this loop did not delete the Value. 3321 if (hasValueBeenRAUWed(VL, TrackValues, i, OpsWidth)) 3322 continue; 3323 3324 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 3325 << "\n"); 3326 ArrayRef<Value *> Ops = VL.slice(i, OpsWidth); 3327 3328 ArrayRef<Value *> BuildVectorSlice; 3329 if (!BuildVector.empty()) 3330 BuildVectorSlice = BuildVector.slice(i, OpsWidth); 3331 3332 R.buildTree(Ops, BuildVectorSlice); 3333 // TODO: check if we can allow reordering also for other cases than 3334 // tryToVectorizePair() 3335 if (allowReorder && R.shouldReorder()) { 3336 assert(Ops.size() == 2); 3337 assert(BuildVectorSlice.empty()); 3338 Value *ReorderedOps[] = { Ops[1], Ops[0] }; 3339 R.buildTree(ReorderedOps, None); 3340 } 3341 int Cost = R.getTreeCost(); 3342 3343 if (Cost < -SLPCostThreshold) { 3344 DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 3345 Value *VectorizedRoot = R.vectorizeTree(); 3346 3347 // Reconstruct the build vector by extracting the vectorized root. This 3348 // way we handle the case where some elements of the vector are undefined. 3349 // (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2)) 3350 if (!BuildVectorSlice.empty()) { 3351 // The insert point is the last build vector instruction. The vectorized 3352 // root will precede it. This guarantees that we get an instruction. The 3353 // vectorized tree could have been constant folded. 3354 Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back()); 3355 unsigned VecIdx = 0; 3356 for (auto &V : BuildVectorSlice) { 3357 IRBuilder<true, NoFolder> Builder( 3358 ++BasicBlock::iterator(InsertAfter)); 3359 InsertElementInst *IE = cast<InsertElementInst>(V); 3360 Instruction *Extract = cast<Instruction>(Builder.CreateExtractElement( 3361 VectorizedRoot, Builder.getInt32(VecIdx++))); 3362 IE->setOperand(1, Extract); 3363 IE->removeFromParent(); 3364 IE->insertAfter(Extract); 3365 InsertAfter = IE; 3366 } 3367 } 3368 // Move to the next bundle. 3369 i += VF - 1; 3370 Changed = true; 3371 } 3372 } 3373 3374 return Changed; 3375 } 3376 3377 bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { 3378 if (!V) 3379 return false; 3380 3381 // Try to vectorize V. 3382 if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R)) 3383 return true; 3384 3385 BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0)); 3386 BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1)); 3387 // Try to skip B. 3388 if (B && B->hasOneUse()) { 3389 BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 3390 BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 3391 if (tryToVectorizePair(A, B0, R)) { 3392 return true; 3393 } 3394 if (tryToVectorizePair(A, B1, R)) { 3395 return true; 3396 } 3397 } 3398 3399 // Try to skip A. 3400 if (A && A->hasOneUse()) { 3401 BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 3402 BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 3403 if (tryToVectorizePair(A0, B, R)) { 3404 return true; 3405 } 3406 if (tryToVectorizePair(A1, B, R)) { 3407 return true; 3408 } 3409 } 3410 return 0; 3411 } 3412 3413 /// \brief Generate a shuffle mask to be used in a reduction tree. 3414 /// 3415 /// \param VecLen The length of the vector to be reduced. 3416 /// \param NumEltsToRdx The number of elements that should be reduced in the 3417 /// vector. 3418 /// \param IsPairwise Whether the reduction is a pairwise or splitting 3419 /// reduction. A pairwise reduction will generate a mask of 3420 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 3421 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 3422 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 3423 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 3424 bool IsPairwise, bool IsLeft, 3425 IRBuilder<> &Builder) { 3426 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 3427 3428 SmallVector<Constant *, 32> ShuffleMask( 3429 VecLen, UndefValue::get(Builder.getInt32Ty())); 3430 3431 if (IsPairwise) 3432 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 3433 for (unsigned i = 0; i != NumEltsToRdx; ++i) 3434 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 3435 else 3436 // Move the upper half of the vector to the lower half. 3437 for (unsigned i = 0; i != NumEltsToRdx; ++i) 3438 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 3439 3440 return ConstantVector::get(ShuffleMask); 3441 } 3442 3443 3444 /// Model horizontal reductions. 3445 /// 3446 /// A horizontal reduction is a tree of reduction operations (currently add and 3447 /// fadd) that has operations that can be put into a vector as its leaf. 3448 /// For example, this tree: 3449 /// 3450 /// mul mul mul mul 3451 /// \ / \ / 3452 /// + + 3453 /// \ / 3454 /// + 3455 /// This tree has "mul" as its reduced values and "+" as its reduction 3456 /// operations. A reduction might be feeding into a store or a binary operation 3457 /// feeding a phi. 3458 /// ... 3459 /// \ / 3460 /// + 3461 /// | 3462 /// phi += 3463 /// 3464 /// Or: 3465 /// ... 3466 /// \ / 3467 /// + 3468 /// | 3469 /// *p = 3470 /// 3471 class HorizontalReduction { 3472 SmallVector<Value *, 16> ReductionOps; 3473 SmallVector<Value *, 32> ReducedVals; 3474 3475 BinaryOperator *ReductionRoot; 3476 PHINode *ReductionPHI; 3477 3478 /// The opcode of the reduction. 3479 unsigned ReductionOpcode; 3480 /// The opcode of the values we perform a reduction on. 3481 unsigned ReducedValueOpcode; 3482 /// The width of one full horizontal reduction operation. 3483 unsigned ReduxWidth; 3484 /// Should we model this reduction as a pairwise reduction tree or a tree that 3485 /// splits the vector in halves and adds those halves. 3486 bool IsPairwiseReduction; 3487 3488 public: 3489 HorizontalReduction() 3490 : ReductionRoot(nullptr), ReductionPHI(nullptr), ReductionOpcode(0), 3491 ReducedValueOpcode(0), ReduxWidth(0), IsPairwiseReduction(false) {} 3492 3493 /// \brief Try to find a reduction tree. 3494 bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B, 3495 const DataLayout *DL) { 3496 assert((!Phi || 3497 std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && 3498 "Thi phi needs to use the binary operator"); 3499 3500 // We could have a initial reductions that is not an add. 3501 // r *= v1 + v2 + v3 + v4 3502 // In such a case start looking for a tree rooted in the first '+'. 3503 if (Phi) { 3504 if (B->getOperand(0) == Phi) { 3505 Phi = nullptr; 3506 B = dyn_cast<BinaryOperator>(B->getOperand(1)); 3507 } else if (B->getOperand(1) == Phi) { 3508 Phi = nullptr; 3509 B = dyn_cast<BinaryOperator>(B->getOperand(0)); 3510 } 3511 } 3512 3513 if (!B) 3514 return false; 3515 3516 Type *Ty = B->getType(); 3517 if (Ty->isVectorTy()) 3518 return false; 3519 3520 ReductionOpcode = B->getOpcode(); 3521 ReducedValueOpcode = 0; 3522 ReduxWidth = MinVecRegSize / DL->getTypeSizeInBits(Ty); 3523 ReductionRoot = B; 3524 ReductionPHI = Phi; 3525 3526 if (ReduxWidth < 4) 3527 return false; 3528 3529 // We currently only support adds. 3530 if (ReductionOpcode != Instruction::Add && 3531 ReductionOpcode != Instruction::FAdd) 3532 return false; 3533 3534 // Post order traverse the reduction tree starting at B. We only handle true 3535 // trees containing only binary operators. 3536 SmallVector<std::pair<BinaryOperator *, unsigned>, 32> Stack; 3537 Stack.push_back(std::make_pair(B, 0)); 3538 while (!Stack.empty()) { 3539 BinaryOperator *TreeN = Stack.back().first; 3540 unsigned EdgeToVist = Stack.back().second++; 3541 bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode; 3542 3543 // Only handle trees in the current basic block. 3544 if (TreeN->getParent() != B->getParent()) 3545 return false; 3546 3547 // Each tree node needs to have one user except for the ultimate 3548 // reduction. 3549 if (!TreeN->hasOneUse() && TreeN != B) 3550 return false; 3551 3552 // Postorder vist. 3553 if (EdgeToVist == 2 || IsReducedValue) { 3554 if (IsReducedValue) { 3555 // Make sure that the opcodes of the operations that we are going to 3556 // reduce match. 3557 if (!ReducedValueOpcode) 3558 ReducedValueOpcode = TreeN->getOpcode(); 3559 else if (ReducedValueOpcode != TreeN->getOpcode()) 3560 return false; 3561 ReducedVals.push_back(TreeN); 3562 } else { 3563 // We need to be able to reassociate the adds. 3564 if (!TreeN->isAssociative()) 3565 return false; 3566 ReductionOps.push_back(TreeN); 3567 } 3568 // Retract. 3569 Stack.pop_back(); 3570 continue; 3571 } 3572 3573 // Visit left or right. 3574 Value *NextV = TreeN->getOperand(EdgeToVist); 3575 BinaryOperator *Next = dyn_cast<BinaryOperator>(NextV); 3576 if (Next) 3577 Stack.push_back(std::make_pair(Next, 0)); 3578 else if (NextV != Phi) 3579 return false; 3580 } 3581 return true; 3582 } 3583 3584 /// \brief Attempt to vectorize the tree found by 3585 /// matchAssociativeReduction. 3586 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 3587 if (ReducedVals.empty()) 3588 return false; 3589 3590 unsigned NumReducedVals = ReducedVals.size(); 3591 if (NumReducedVals < ReduxWidth) 3592 return false; 3593 3594 Value *VectorizedTree = nullptr; 3595 IRBuilder<> Builder(ReductionRoot); 3596 FastMathFlags Unsafe; 3597 Unsafe.setUnsafeAlgebra(); 3598 Builder.SetFastMathFlags(Unsafe); 3599 unsigned i = 0; 3600 3601 for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) { 3602 V.buildTree(makeArrayRef(&ReducedVals[i], ReduxWidth), ReductionOps); 3603 3604 // Estimate cost. 3605 int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]); 3606 if (Cost >= -SLPCostThreshold) 3607 break; 3608 3609 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost 3610 << ". (HorRdx)\n"); 3611 3612 // Vectorize a tree. 3613 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 3614 Value *VectorizedRoot = V.vectorizeTree(); 3615 3616 // Emit a reduction. 3617 Value *ReducedSubTree = emitReduction(VectorizedRoot, Builder); 3618 if (VectorizedTree) { 3619 Builder.SetCurrentDebugLocation(Loc); 3620 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 3621 ReducedSubTree, "bin.rdx"); 3622 } else 3623 VectorizedTree = ReducedSubTree; 3624 } 3625 3626 if (VectorizedTree) { 3627 // Finish the reduction. 3628 for (; i < NumReducedVals; ++i) { 3629 Builder.SetCurrentDebugLocation( 3630 cast<Instruction>(ReducedVals[i])->getDebugLoc()); 3631 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 3632 ReducedVals[i]); 3633 } 3634 // Update users. 3635 if (ReductionPHI) { 3636 assert(ReductionRoot && "Need a reduction operation"); 3637 ReductionRoot->setOperand(0, VectorizedTree); 3638 ReductionRoot->setOperand(1, ReductionPHI); 3639 } else 3640 ReductionRoot->replaceAllUsesWith(VectorizedTree); 3641 } 3642 return VectorizedTree != nullptr; 3643 } 3644 3645 private: 3646 3647 /// \brief Calcuate the cost of a reduction. 3648 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) { 3649 Type *ScalarTy = FirstReducedVal->getType(); 3650 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 3651 3652 int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true); 3653 int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false); 3654 3655 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 3656 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 3657 3658 int ScalarReduxCost = 3659 ReduxWidth * TTI->getArithmeticInstrCost(ReductionOpcode, VecTy); 3660 3661 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 3662 << " for reduction that starts with " << *FirstReducedVal 3663 << " (It is a " 3664 << (IsPairwiseReduction ? "pairwise" : "splitting") 3665 << " reduction)\n"); 3666 3667 return VecReduxCost - ScalarReduxCost; 3668 } 3669 3670 static Value *createBinOp(IRBuilder<> &Builder, unsigned Opcode, Value *L, 3671 Value *R, const Twine &Name = "") { 3672 if (Opcode == Instruction::FAdd) 3673 return Builder.CreateFAdd(L, R, Name); 3674 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, L, R, Name); 3675 } 3676 3677 /// \brief Emit a horizontal reduction of the vectorized value. 3678 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder) { 3679 assert(VectorizedValue && "Need to have a vectorized tree node"); 3680 assert(isPowerOf2_32(ReduxWidth) && 3681 "We only handle power-of-two reductions for now"); 3682 3683 Value *TmpVec = VectorizedValue; 3684 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 3685 if (IsPairwiseReduction) { 3686 Value *LeftMask = 3687 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 3688 Value *RightMask = 3689 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 3690 3691 Value *LeftShuf = Builder.CreateShuffleVector( 3692 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 3693 Value *RightShuf = Builder.CreateShuffleVector( 3694 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 3695 "rdx.shuf.r"); 3696 TmpVec = createBinOp(Builder, ReductionOpcode, LeftShuf, RightShuf, 3697 "bin.rdx"); 3698 } else { 3699 Value *UpperHalf = 3700 createRdxShuffleMask(ReduxWidth, i, false, false, Builder); 3701 Value *Shuf = Builder.CreateShuffleVector( 3702 TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf"); 3703 TmpVec = createBinOp(Builder, ReductionOpcode, TmpVec, Shuf, "bin.rdx"); 3704 } 3705 } 3706 3707 // The result is in the first element of the vector. 3708 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 3709 } 3710 }; 3711 3712 /// \brief Recognize construction of vectors like 3713 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 3714 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 3715 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 3716 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 3717 /// 3718 /// Returns true if it matches 3719 /// 3720 static bool findBuildVector(InsertElementInst *FirstInsertElem, 3721 SmallVectorImpl<Value *> &BuildVector, 3722 SmallVectorImpl<Value *> &BuildVectorOpds) { 3723 if (!isa<UndefValue>(FirstInsertElem->getOperand(0))) 3724 return false; 3725 3726 InsertElementInst *IE = FirstInsertElem; 3727 while (true) { 3728 BuildVector.push_back(IE); 3729 BuildVectorOpds.push_back(IE->getOperand(1)); 3730 3731 if (IE->use_empty()) 3732 return false; 3733 3734 InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back()); 3735 if (!NextUse) 3736 return true; 3737 3738 // If this isn't the final use, make sure the next insertelement is the only 3739 // use. It's OK if the final constructed vector is used multiple times 3740 if (!IE->hasOneUse()) 3741 return false; 3742 3743 IE = NextUse; 3744 } 3745 3746 return false; 3747 } 3748 3749 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 3750 return V->getType() < V2->getType(); 3751 } 3752 3753 bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 3754 bool Changed = false; 3755 SmallVector<Value *, 4> Incoming; 3756 SmallSet<Value *, 16> VisitedInstrs; 3757 3758 bool HaveVectorizedPhiNodes = true; 3759 while (HaveVectorizedPhiNodes) { 3760 HaveVectorizedPhiNodes = false; 3761 3762 // Collect the incoming values from the PHIs. 3763 Incoming.clear(); 3764 for (BasicBlock::iterator instr = BB->begin(), ie = BB->end(); instr != ie; 3765 ++instr) { 3766 PHINode *P = dyn_cast<PHINode>(instr); 3767 if (!P) 3768 break; 3769 3770 if (!VisitedInstrs.count(P)) 3771 Incoming.push_back(P); 3772 } 3773 3774 // Sort by type. 3775 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); 3776 3777 // Try to vectorize elements base on their type. 3778 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 3779 E = Incoming.end(); 3780 IncIt != E;) { 3781 3782 // Look for the next elements with the same type. 3783 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 3784 while (SameTypeIt != E && 3785 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 3786 VisitedInstrs.insert(*SameTypeIt); 3787 ++SameTypeIt; 3788 } 3789 3790 // Try to vectorize them. 3791 unsigned NumElts = (SameTypeIt - IncIt); 3792 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); 3793 if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R)) { 3794 // Success start over because instructions might have been changed. 3795 HaveVectorizedPhiNodes = true; 3796 Changed = true; 3797 break; 3798 } 3799 3800 // Start over at the next instruction of a different type (or the end). 3801 IncIt = SameTypeIt; 3802 } 3803 } 3804 3805 VisitedInstrs.clear(); 3806 3807 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 3808 // We may go through BB multiple times so skip the one we have checked. 3809 if (!VisitedInstrs.insert(it).second) 3810 continue; 3811 3812 if (isa<DbgInfoIntrinsic>(it)) 3813 continue; 3814 3815 // Try to vectorize reductions that use PHINodes. 3816 if (PHINode *P = dyn_cast<PHINode>(it)) { 3817 // Check that the PHI is a reduction PHI. 3818 if (P->getNumIncomingValues() != 2) 3819 return Changed; 3820 Value *Rdx = 3821 (P->getIncomingBlock(0) == BB 3822 ? (P->getIncomingValue(0)) 3823 : (P->getIncomingBlock(1) == BB ? P->getIncomingValue(1) 3824 : nullptr)); 3825 // Check if this is a Binary Operator. 3826 BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx); 3827 if (!BI) 3828 continue; 3829 3830 // Try to match and vectorize a horizontal reduction. 3831 HorizontalReduction HorRdx; 3832 if (ShouldVectorizeHor && 3833 HorRdx.matchAssociativeReduction(P, BI, DL) && 3834 HorRdx.tryToReduce(R, TTI)) { 3835 Changed = true; 3836 it = BB->begin(); 3837 e = BB->end(); 3838 continue; 3839 } 3840 3841 Value *Inst = BI->getOperand(0); 3842 if (Inst == P) 3843 Inst = BI->getOperand(1); 3844 3845 if (tryToVectorize(dyn_cast<BinaryOperator>(Inst), R)) { 3846 // We would like to start over since some instructions are deleted 3847 // and the iterator may become invalid value. 3848 Changed = true; 3849 it = BB->begin(); 3850 e = BB->end(); 3851 continue; 3852 } 3853 3854 continue; 3855 } 3856 3857 // Try to vectorize horizontal reductions feeding into a store. 3858 if (ShouldStartVectorizeHorAtStore) 3859 if (StoreInst *SI = dyn_cast<StoreInst>(it)) 3860 if (BinaryOperator *BinOp = 3861 dyn_cast<BinaryOperator>(SI->getValueOperand())) { 3862 HorizontalReduction HorRdx; 3863 if (((HorRdx.matchAssociativeReduction(nullptr, BinOp, DL) && 3864 HorRdx.tryToReduce(R, TTI)) || 3865 tryToVectorize(BinOp, R))) { 3866 Changed = true; 3867 it = BB->begin(); 3868 e = BB->end(); 3869 continue; 3870 } 3871 } 3872 3873 // Try to vectorize horizontal reductions feeding into a return. 3874 if (ReturnInst *RI = dyn_cast<ReturnInst>(it)) 3875 if (RI->getNumOperands() != 0) 3876 if (BinaryOperator *BinOp = 3877 dyn_cast<BinaryOperator>(RI->getOperand(0))) { 3878 DEBUG(dbgs() << "SLP: Found a return to vectorize.\n"); 3879 if (tryToVectorizePair(BinOp->getOperand(0), 3880 BinOp->getOperand(1), R)) { 3881 Changed = true; 3882 it = BB->begin(); 3883 e = BB->end(); 3884 continue; 3885 } 3886 } 3887 3888 // Try to vectorize trees that start at compare instructions. 3889 if (CmpInst *CI = dyn_cast<CmpInst>(it)) { 3890 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) { 3891 Changed = true; 3892 // We would like to start over since some instructions are deleted 3893 // and the iterator may become invalid value. 3894 it = BB->begin(); 3895 e = BB->end(); 3896 continue; 3897 } 3898 3899 for (int i = 0; i < 2; ++i) { 3900 if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) { 3901 if (tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R)) { 3902 Changed = true; 3903 // We would like to start over since some instructions are deleted 3904 // and the iterator may become invalid value. 3905 it = BB->begin(); 3906 e = BB->end(); 3907 } 3908 } 3909 } 3910 continue; 3911 } 3912 3913 // Try to vectorize trees that start at insertelement instructions. 3914 if (InsertElementInst *FirstInsertElem = dyn_cast<InsertElementInst>(it)) { 3915 SmallVector<Value *, 16> BuildVector; 3916 SmallVector<Value *, 16> BuildVectorOpds; 3917 if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds)) 3918 continue; 3919 3920 // Vectorize starting with the build vector operands ignoring the 3921 // BuildVector instructions for the purpose of scheduling and user 3922 // extraction. 3923 if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) { 3924 Changed = true; 3925 it = BB->begin(); 3926 e = BB->end(); 3927 } 3928 3929 continue; 3930 } 3931 } 3932 3933 return Changed; 3934 } 3935 3936 bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) { 3937 bool Changed = false; 3938 // Attempt to sort and vectorize each of the store-groups. 3939 for (StoreListMap::iterator it = StoreRefs.begin(), e = StoreRefs.end(); 3940 it != e; ++it) { 3941 if (it->second.size() < 2) 3942 continue; 3943 3944 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 3945 << it->second.size() << ".\n"); 3946 3947 // Process the stores in chunks of 16. 3948 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 3949 unsigned Len = std::min<unsigned>(CE - CI, 16); 3950 Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len), 3951 -SLPCostThreshold, R); 3952 } 3953 } 3954 return Changed; 3955 } 3956 3957 } // end anonymous namespace 3958 3959 char SLPVectorizer::ID = 0; 3960 static const char lv_name[] = "SLP Vectorizer"; 3961 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 3962 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 3963 INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) 3964 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 3965 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 3966 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 3967 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 3968 3969 namespace llvm { 3970 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); } 3971 } 3972