1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 #include "llvm/Transforms/Vectorize.h" 19 #include "llvm/ADT/MapVector.h" 20 #include "llvm/ADT/PostOrderIterator.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/Analysis/LoopInfo.h" 25 #include "llvm/Analysis/ScalarEvolution.h" 26 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 27 #include "llvm/Analysis/TargetTransformInfo.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/Dominators.h" 31 #include "llvm/IR/IRBuilder.h" 32 #include "llvm/IR/Instructions.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/Module.h" 35 #include "llvm/IR/NoFolder.h" 36 #include "llvm/IR/Type.h" 37 #include "llvm/IR/Value.h" 38 #include "llvm/IR/Verifier.h" 39 #include "llvm/Pass.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include "llvm/Transforms/Utils/VectorUtils.h" 44 #include <algorithm> 45 #include <map> 46 #include <memory> 47 48 using namespace llvm; 49 50 #define SV_NAME "slp-vectorizer" 51 #define DEBUG_TYPE "SLP" 52 53 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 54 55 static cl::opt<int> 56 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 57 cl::desc("Only vectorize if you gain more than this " 58 "number ")); 59 60 static cl::opt<bool> 61 ShouldVectorizeHor("slp-vectorize-hor", cl::init(false), cl::Hidden, 62 cl::desc("Attempt to vectorize horizontal reductions")); 63 64 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 65 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 66 cl::desc( 67 "Attempt to vectorize horizontal reductions feeding into a store")); 68 69 namespace { 70 71 static const unsigned MinVecRegSize = 128; 72 73 static const unsigned RecursionMaxDepth = 12; 74 75 /// \returns the parent basic block if all of the instructions in \p VL 76 /// are in the same block or null otherwise. 77 static BasicBlock *getSameBlock(ArrayRef<Value *> VL) { 78 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 79 if (!I0) 80 return nullptr; 81 BasicBlock *BB = I0->getParent(); 82 for (int i = 1, e = VL.size(); i < e; i++) { 83 Instruction *I = dyn_cast<Instruction>(VL[i]); 84 if (!I) 85 return nullptr; 86 87 if (BB != I->getParent()) 88 return nullptr; 89 } 90 return BB; 91 } 92 93 /// \returns True if all of the values in \p VL are constants. 94 static bool allConstant(ArrayRef<Value *> VL) { 95 for (unsigned i = 0, e = VL.size(); i < e; ++i) 96 if (!isa<Constant>(VL[i])) 97 return false; 98 return true; 99 } 100 101 /// \returns True if all of the values in \p VL are identical. 102 static bool isSplat(ArrayRef<Value *> VL) { 103 for (unsigned i = 1, e = VL.size(); i < e; ++i) 104 if (VL[i] != VL[0]) 105 return false; 106 return true; 107 } 108 109 ///\returns Opcode that can be clubbed with \p Op to create an alternate 110 /// sequence which can later be merged as a ShuffleVector instruction. 111 static unsigned getAltOpcode(unsigned Op) { 112 switch (Op) { 113 case Instruction::FAdd: 114 return Instruction::FSub; 115 case Instruction::FSub: 116 return Instruction::FAdd; 117 case Instruction::Add: 118 return Instruction::Sub; 119 case Instruction::Sub: 120 return Instruction::Add; 121 default: 122 return 0; 123 } 124 } 125 126 ///\returns bool representing if Opcode \p Op can be part 127 /// of an alternate sequence which can later be merged as 128 /// a ShuffleVector instruction. 129 static bool canCombineAsAltInst(unsigned Op) { 130 if (Op == Instruction::FAdd || Op == Instruction::FSub || 131 Op == Instruction::Sub || Op == Instruction::Add) 132 return true; 133 return false; 134 } 135 136 /// \returns ShuffleVector instruction if intructions in \p VL have 137 /// alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence. 138 /// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...) 139 static unsigned isAltInst(ArrayRef<Value *> VL) { 140 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 141 unsigned Opcode = I0->getOpcode(); 142 unsigned AltOpcode = getAltOpcode(Opcode); 143 for (int i = 1, e = VL.size(); i < e; i++) { 144 Instruction *I = dyn_cast<Instruction>(VL[i]); 145 if (!I || I->getOpcode() != ((i & 1) ? AltOpcode : Opcode)) 146 return 0; 147 } 148 return Instruction::ShuffleVector; 149 } 150 151 /// \returns The opcode if all of the Instructions in \p VL have the same 152 /// opcode, or zero. 153 static unsigned getSameOpcode(ArrayRef<Value *> VL) { 154 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 155 if (!I0) 156 return 0; 157 unsigned Opcode = I0->getOpcode(); 158 for (int i = 1, e = VL.size(); i < e; i++) { 159 Instruction *I = dyn_cast<Instruction>(VL[i]); 160 if (!I || Opcode != I->getOpcode()) { 161 if (canCombineAsAltInst(Opcode) && i == 1) 162 return isAltInst(VL); 163 return 0; 164 } 165 } 166 return Opcode; 167 } 168 169 /// \returns \p I after propagating metadata from \p VL. 170 static Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL) { 171 Instruction *I0 = cast<Instruction>(VL[0]); 172 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 173 I0->getAllMetadataOtherThanDebugLoc(Metadata); 174 175 for (unsigned i = 0, n = Metadata.size(); i != n; ++i) { 176 unsigned Kind = Metadata[i].first; 177 MDNode *MD = Metadata[i].second; 178 179 for (int i = 1, e = VL.size(); MD && i != e; i++) { 180 Instruction *I = cast<Instruction>(VL[i]); 181 MDNode *IMD = I->getMetadata(Kind); 182 183 switch (Kind) { 184 default: 185 MD = nullptr; // Remove unknown metadata 186 break; 187 case LLVMContext::MD_tbaa: 188 MD = MDNode::getMostGenericTBAA(MD, IMD); 189 break; 190 case LLVMContext::MD_alias_scope: 191 case LLVMContext::MD_noalias: 192 MD = MDNode::intersect(MD, IMD); 193 break; 194 case LLVMContext::MD_fpmath: 195 MD = MDNode::getMostGenericFPMath(MD, IMD); 196 break; 197 } 198 } 199 I->setMetadata(Kind, MD); 200 } 201 return I; 202 } 203 204 /// \returns The type that all of the values in \p VL have or null if there 205 /// are different types. 206 static Type* getSameType(ArrayRef<Value *> VL) { 207 Type *Ty = VL[0]->getType(); 208 for (int i = 1, e = VL.size(); i < e; i++) 209 if (VL[i]->getType() != Ty) 210 return nullptr; 211 212 return Ty; 213 } 214 215 /// \returns True if the ExtractElement instructions in VL can be vectorized 216 /// to use the original vector. 217 static bool CanReuseExtract(ArrayRef<Value *> VL) { 218 assert(Instruction::ExtractElement == getSameOpcode(VL) && "Invalid opcode"); 219 // Check if all of the extracts come from the same vector and from the 220 // correct offset. 221 Value *VL0 = VL[0]; 222 ExtractElementInst *E0 = cast<ExtractElementInst>(VL0); 223 Value *Vec = E0->getOperand(0); 224 225 // We have to extract from the same vector type. 226 unsigned NElts = Vec->getType()->getVectorNumElements(); 227 228 if (NElts != VL.size()) 229 return false; 230 231 // Check that all of the indices extract from the correct offset. 232 ConstantInt *CI = dyn_cast<ConstantInt>(E0->getOperand(1)); 233 if (!CI || CI->getZExtValue()) 234 return false; 235 236 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 237 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 238 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 239 240 if (!CI || CI->getZExtValue() != i || E->getOperand(0) != Vec) 241 return false; 242 } 243 244 return true; 245 } 246 247 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 248 SmallVectorImpl<Value *> &Left, 249 SmallVectorImpl<Value *> &Right) { 250 251 SmallVector<Value *, 16> OrigLeft, OrigRight; 252 253 bool AllSameOpcodeLeft = true; 254 bool AllSameOpcodeRight = true; 255 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 256 Instruction *I = cast<Instruction>(VL[i]); 257 Value *V0 = I->getOperand(0); 258 Value *V1 = I->getOperand(1); 259 260 OrigLeft.push_back(V0); 261 OrigRight.push_back(V1); 262 263 Instruction *I0 = dyn_cast<Instruction>(V0); 264 Instruction *I1 = dyn_cast<Instruction>(V1); 265 266 // Check whether all operands on one side have the same opcode. In this case 267 // we want to preserve the original order and not make things worse by 268 // reordering. 269 AllSameOpcodeLeft = I0; 270 AllSameOpcodeRight = I1; 271 272 if (i && AllSameOpcodeLeft) { 273 if(Instruction *P0 = dyn_cast<Instruction>(OrigLeft[i-1])) { 274 if(P0->getOpcode() != I0->getOpcode()) 275 AllSameOpcodeLeft = false; 276 } else 277 AllSameOpcodeLeft = false; 278 } 279 if (i && AllSameOpcodeRight) { 280 if(Instruction *P1 = dyn_cast<Instruction>(OrigRight[i-1])) { 281 if(P1->getOpcode() != I1->getOpcode()) 282 AllSameOpcodeRight = false; 283 } else 284 AllSameOpcodeRight = false; 285 } 286 287 // Sort two opcodes. In the code below we try to preserve the ability to use 288 // broadcast of values instead of individual inserts. 289 // vl1 = load 290 // vl2 = phi 291 // vr1 = load 292 // vr2 = vr2 293 // = vl1 x vr1 294 // = vl2 x vr2 295 // If we just sorted according to opcode we would leave the first line in 296 // tact but we would swap vl2 with vr2 because opcode(phi) > opcode(load). 297 // = vl1 x vr1 298 // = vr2 x vl2 299 // Because vr2 and vr1 are from the same load we loose the opportunity of a 300 // broadcast for the packed right side in the backend: we have [vr1, vl2] 301 // instead of [vr1, vr2=vr1]. 302 if (I0 && I1) { 303 if(!i && I0->getOpcode() > I1->getOpcode()) { 304 Left.push_back(I1); 305 Right.push_back(I0); 306 } else if (i && I0->getOpcode() > I1->getOpcode() && Right[i-1] != I1) { 307 // Try not to destroy a broad cast for no apparent benefit. 308 Left.push_back(I1); 309 Right.push_back(I0); 310 } else if (i && I0->getOpcode() == I1->getOpcode() && Right[i-1] == I0) { 311 // Try preserve broadcasts. 312 Left.push_back(I1); 313 Right.push_back(I0); 314 } else if (i && I0->getOpcode() == I1->getOpcode() && Left[i-1] == I1) { 315 // Try preserve broadcasts. 316 Left.push_back(I1); 317 Right.push_back(I0); 318 } else { 319 Left.push_back(I0); 320 Right.push_back(I1); 321 } 322 continue; 323 } 324 // One opcode, put the instruction on the right. 325 if (I0) { 326 Left.push_back(V1); 327 Right.push_back(I0); 328 continue; 329 } 330 Left.push_back(V0); 331 Right.push_back(V1); 332 } 333 334 bool LeftBroadcast = isSplat(Left); 335 bool RightBroadcast = isSplat(Right); 336 337 // Don't reorder if the operands where good to begin with. 338 if (!(LeftBroadcast || RightBroadcast) && 339 (AllSameOpcodeRight || AllSameOpcodeLeft)) { 340 Left = OrigLeft; 341 Right = OrigRight; 342 } 343 } 344 345 /// Bottom Up SLP Vectorizer. 346 class BoUpSLP { 347 public: 348 typedef SmallVector<Value *, 8> ValueList; 349 typedef SmallVector<Instruction *, 16> InstrList; 350 typedef SmallPtrSet<Value *, 16> ValueSet; 351 typedef SmallVector<StoreInst *, 8> StoreList; 352 353 BoUpSLP(Function *Func, ScalarEvolution *Se, const DataLayout *Dl, 354 TargetTransformInfo *Tti, TargetLibraryInfo *TLi, AliasAnalysis *Aa, 355 LoopInfo *Li, DominatorTree *Dt) 356 : NumLoadsWantToKeepOrder(0), NumLoadsWantToChangeOrder(0), 357 F(Func), SE(Se), DL(Dl), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), 358 Builder(Se->getContext()) {} 359 360 /// \brief Vectorize the tree that starts with the elements in \p VL. 361 /// Returns the vectorized root. 362 Value *vectorizeTree(); 363 364 /// \returns the cost incurred by unwanted spills and fills, caused by 365 /// holding live values over call sites. 366 int getSpillCost(); 367 368 /// \returns the vectorization cost of the subtree that starts at \p VL. 369 /// A negative number means that this is profitable. 370 int getTreeCost(); 371 372 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 373 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 374 void buildTree(ArrayRef<Value *> Roots, 375 ArrayRef<Value *> UserIgnoreLst = None); 376 377 /// Clear the internal data structures that are created by 'buildTree'. 378 void deleteTree() { 379 VectorizableTree.clear(); 380 ScalarToTreeEntry.clear(); 381 MustGather.clear(); 382 ExternalUses.clear(); 383 NumLoadsWantToKeepOrder = 0; 384 NumLoadsWantToChangeOrder = 0; 385 for (auto &Iter : BlocksSchedules) { 386 BlockScheduling *BS = Iter.second.get(); 387 BS->clear(); 388 } 389 } 390 391 /// \returns true if the memory operations A and B are consecutive. 392 bool isConsecutiveAccess(Value *A, Value *B); 393 394 /// \brief Perform LICM and CSE on the newly generated gather sequences. 395 void optimizeGatherSequence(); 396 397 /// \returns true if it is benefitial to reverse the vector order. 398 bool shouldReorder() const { 399 return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder; 400 } 401 402 private: 403 struct TreeEntry; 404 405 /// \returns the cost of the vectorizable entry. 406 int getEntryCost(TreeEntry *E); 407 408 /// This is the recursive part of buildTree. 409 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth); 410 411 /// Vectorize a single entry in the tree. 412 Value *vectorizeTree(TreeEntry *E); 413 414 /// Vectorize a single entry in the tree, starting in \p VL. 415 Value *vectorizeTree(ArrayRef<Value *> VL); 416 417 /// \returns the pointer to the vectorized value if \p VL is already 418 /// vectorized, or NULL. They may happen in cycles. 419 Value *alreadyVectorized(ArrayRef<Value *> VL) const; 420 421 /// \brief Take the pointer operand from the Load/Store instruction. 422 /// \returns NULL if this is not a valid Load/Store instruction. 423 static Value *getPointerOperand(Value *I); 424 425 /// \brief Take the address space operand from the Load/Store instruction. 426 /// \returns -1 if this is not a valid Load/Store instruction. 427 static unsigned getAddressSpaceOperand(Value *I); 428 429 /// \returns the scalarization cost for this type. Scalarization in this 430 /// context means the creation of vectors from a group of scalars. 431 int getGatherCost(Type *Ty); 432 433 /// \returns the scalarization cost for this list of values. Assuming that 434 /// this subtree gets vectorized, we may need to extract the values from the 435 /// roots. This method calculates the cost of extracting the values. 436 int getGatherCost(ArrayRef<Value *> VL); 437 438 /// \brief Set the Builder insert point to one after the last instruction in 439 /// the bundle 440 void setInsertPointAfterBundle(ArrayRef<Value *> VL); 441 442 /// \returns a vector from a collection of scalars in \p VL. 443 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 444 445 /// \returns whether the VectorizableTree is fully vectoriable and will 446 /// be beneficial even the tree height is tiny. 447 bool isFullyVectorizableTinyTree(); 448 449 struct TreeEntry { 450 TreeEntry() : Scalars(), VectorizedValue(nullptr), 451 NeedToGather(0) {} 452 453 /// \returns true if the scalars in VL are equal to this entry. 454 bool isSame(ArrayRef<Value *> VL) const { 455 assert(VL.size() == Scalars.size() && "Invalid size"); 456 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 457 } 458 459 /// A vector of scalars. 460 ValueList Scalars; 461 462 /// The Scalars are vectorized into this value. It is initialized to Null. 463 Value *VectorizedValue; 464 465 /// Do we need to gather this sequence ? 466 bool NeedToGather; 467 }; 468 469 /// Create a new VectorizableTree entry. 470 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized) { 471 VectorizableTree.push_back(TreeEntry()); 472 int idx = VectorizableTree.size() - 1; 473 TreeEntry *Last = &VectorizableTree[idx]; 474 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 475 Last->NeedToGather = !Vectorized; 476 if (Vectorized) { 477 for (int i = 0, e = VL.size(); i != e; ++i) { 478 assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!"); 479 ScalarToTreeEntry[VL[i]] = idx; 480 } 481 } else { 482 MustGather.insert(VL.begin(), VL.end()); 483 } 484 return Last; 485 } 486 487 /// -- Vectorization State -- 488 /// Holds all of the tree entries. 489 std::vector<TreeEntry> VectorizableTree; 490 491 /// Maps a specific scalar to its tree entry. 492 SmallDenseMap<Value*, int> ScalarToTreeEntry; 493 494 /// A list of scalars that we found that we need to keep as scalars. 495 ValueSet MustGather; 496 497 /// This POD struct describes one external user in the vectorized tree. 498 struct ExternalUser { 499 ExternalUser (Value *S, llvm::User *U, int L) : 500 Scalar(S), User(U), Lane(L){}; 501 // Which scalar in our function. 502 Value *Scalar; 503 // Which user that uses the scalar. 504 llvm::User *User; 505 // Which lane does the scalar belong to. 506 int Lane; 507 }; 508 typedef SmallVector<ExternalUser, 16> UserList; 509 510 /// A list of values that need to extracted out of the tree. 511 /// This list holds pairs of (Internal Scalar : External User). 512 UserList ExternalUses; 513 514 /// Holds all of the instructions that we gathered. 515 SetVector<Instruction *> GatherSeq; 516 /// A list of blocks that we are going to CSE. 517 SetVector<BasicBlock *> CSEBlocks; 518 519 /// Contains all scheduling relevant data for an instruction. 520 /// A ScheduleData either represents a single instruction or a member of an 521 /// instruction bundle (= a group of instructions which is combined into a 522 /// vector instruction). 523 struct ScheduleData { 524 525 // The initial value for the dependency counters. It means that the 526 // dependencies are not calculated yet. 527 enum { InvalidDeps = -1 }; 528 529 ScheduleData() 530 : Inst(nullptr), FirstInBundle(nullptr), NextInBundle(nullptr), 531 NextLoadStore(nullptr), SchedulingRegionID(0), SchedulingPriority(0), 532 Dependencies(InvalidDeps), UnscheduledDeps(InvalidDeps), 533 UnscheduledDepsInBundle(InvalidDeps), IsScheduled(false) {} 534 535 void init(int BlockSchedulingRegionID) { 536 FirstInBundle = this; 537 NextInBundle = nullptr; 538 NextLoadStore = nullptr; 539 IsScheduled = false; 540 SchedulingRegionID = BlockSchedulingRegionID; 541 UnscheduledDepsInBundle = UnscheduledDeps; 542 clearDependencies(); 543 } 544 545 /// Returns true if the dependency information has been calculated. 546 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 547 548 /// Returns true for single instructions and for bundle representatives 549 /// (= the head of a bundle). 550 bool isSchedulingEntity() const { return FirstInBundle == this; } 551 552 /// Returns true if it represents an instruction bundle and not only a 553 /// single instruction. 554 bool isPartOfBundle() const { 555 return NextInBundle != nullptr || FirstInBundle != this; 556 } 557 558 /// Returns true if it is ready for scheduling, i.e. it has no more 559 /// unscheduled depending instructions/bundles. 560 bool isReady() const { 561 assert(isSchedulingEntity() && 562 "can't consider non-scheduling entity for ready list"); 563 return UnscheduledDepsInBundle == 0 && !IsScheduled; 564 } 565 566 /// Modifies the number of unscheduled dependencies, also updating it for 567 /// the whole bundle. 568 int incrementUnscheduledDeps(int Incr) { 569 UnscheduledDeps += Incr; 570 return FirstInBundle->UnscheduledDepsInBundle += Incr; 571 } 572 573 /// Sets the number of unscheduled dependencies to the number of 574 /// dependencies. 575 void resetUnscheduledDeps() { 576 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 577 } 578 579 /// Clears all dependency information. 580 void clearDependencies() { 581 Dependencies = InvalidDeps; 582 resetUnscheduledDeps(); 583 MemoryDependencies.clear(); 584 } 585 586 void dump(raw_ostream &os) const { 587 if (!isSchedulingEntity()) { 588 os << "/ " << *Inst; 589 } else if (NextInBundle) { 590 os << '[' << *Inst; 591 ScheduleData *SD = NextInBundle; 592 while (SD) { 593 os << ';' << *SD->Inst; 594 SD = SD->NextInBundle; 595 } 596 os << ']'; 597 } else { 598 os << *Inst; 599 } 600 } 601 602 Instruction *Inst; 603 604 /// Points to the head in an instruction bundle (and always to this for 605 /// single instructions). 606 ScheduleData *FirstInBundle; 607 608 /// Single linked list of all instructions in a bundle. Null if it is a 609 /// single instruction. 610 ScheduleData *NextInBundle; 611 612 /// Single linked list of all memory instructions (e.g. load, store, call) 613 /// in the block - until the end of the scheduling region. 614 ScheduleData *NextLoadStore; 615 616 /// The dependent memory instructions. 617 /// This list is derived on demand in calculateDependencies(). 618 SmallVector<ScheduleData *, 4> MemoryDependencies; 619 620 /// This ScheduleData is in the current scheduling region if this matches 621 /// the current SchedulingRegionID of BlockScheduling. 622 int SchedulingRegionID; 623 624 /// Used for getting a "good" final ordering of instructions. 625 int SchedulingPriority; 626 627 /// The number of dependencies. Constitutes of the number of users of the 628 /// instruction plus the number of dependent memory instructions (if any). 629 /// This value is calculated on demand. 630 /// If InvalidDeps, the number of dependencies is not calculated yet. 631 /// 632 int Dependencies; 633 634 /// The number of dependencies minus the number of dependencies of scheduled 635 /// instructions. As soon as this is zero, the instruction/bundle gets ready 636 /// for scheduling. 637 /// Note that this is negative as long as Dependencies is not calculated. 638 int UnscheduledDeps; 639 640 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 641 /// single instructions. 642 int UnscheduledDepsInBundle; 643 644 /// True if this instruction is scheduled (or considered as scheduled in the 645 /// dry-run). 646 bool IsScheduled; 647 }; 648 649 #ifndef NDEBUG 650 friend raw_ostream &operator<<(raw_ostream &os, 651 const BoUpSLP::ScheduleData &SD); 652 #endif 653 654 /// Contains all scheduling data for a basic block. 655 /// 656 struct BlockScheduling { 657 658 BlockScheduling(BasicBlock *BB) 659 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize), 660 ScheduleStart(nullptr), ScheduleEnd(nullptr), 661 FirstLoadStoreInRegion(nullptr), LastLoadStoreInRegion(nullptr), 662 // Make sure that the initial SchedulingRegionID is greater than the 663 // initial SchedulingRegionID in ScheduleData (which is 0). 664 SchedulingRegionID(1) {} 665 666 void clear() { 667 ReadyInsts.clear(); 668 ScheduleStart = nullptr; 669 ScheduleEnd = nullptr; 670 FirstLoadStoreInRegion = nullptr; 671 LastLoadStoreInRegion = nullptr; 672 673 // Make a new scheduling region, i.e. all existing ScheduleData is not 674 // in the new region yet. 675 ++SchedulingRegionID; 676 } 677 678 ScheduleData *getScheduleData(Value *V) { 679 ScheduleData *SD = ScheduleDataMap[V]; 680 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 681 return SD; 682 return nullptr; 683 } 684 685 bool isInSchedulingRegion(ScheduleData *SD) { 686 return SD->SchedulingRegionID == SchedulingRegionID; 687 } 688 689 /// Marks an instruction as scheduled and puts all dependent ready 690 /// instructions into the ready-list. 691 template <typename ReadyListType> 692 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 693 SD->IsScheduled = true; 694 DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 695 696 ScheduleData *BundleMember = SD; 697 while (BundleMember) { 698 // Handle the def-use chain dependencies. 699 for (Use &U : BundleMember->Inst->operands()) { 700 ScheduleData *OpDef = getScheduleData(U.get()); 701 if (OpDef && OpDef->hasValidDependencies() && 702 OpDef->incrementUnscheduledDeps(-1) == 0) { 703 // There are no more unscheduled dependencies after decrementing, 704 // so we can put the dependent instruction into the ready list. 705 ScheduleData *DepBundle = OpDef->FirstInBundle; 706 assert(!DepBundle->IsScheduled && 707 "already scheduled bundle gets ready"); 708 ReadyList.insert(DepBundle); 709 DEBUG(dbgs() << "SLP: gets ready (def): " << *DepBundle << "\n"); 710 } 711 } 712 // Handle the memory dependencies. 713 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 714 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 715 // There are no more unscheduled dependencies after decrementing, 716 // so we can put the dependent instruction into the ready list. 717 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 718 assert(!DepBundle->IsScheduled && 719 "already scheduled bundle gets ready"); 720 ReadyList.insert(DepBundle); 721 DEBUG(dbgs() << "SLP: gets ready (mem): " << *DepBundle << "\n"); 722 } 723 } 724 BundleMember = BundleMember->NextInBundle; 725 } 726 } 727 728 /// Put all instructions into the ReadyList which are ready for scheduling. 729 template <typename ReadyListType> 730 void initialFillReadyList(ReadyListType &ReadyList) { 731 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 732 ScheduleData *SD = getScheduleData(I); 733 if (SD->isSchedulingEntity() && SD->isReady()) { 734 ReadyList.insert(SD); 735 DEBUG(dbgs() << "SLP: initially in ready list: " << *I << "\n"); 736 } 737 } 738 } 739 740 /// Checks if a bundle of instructions can be scheduled, i.e. has no 741 /// cyclic dependencies. This is only a dry-run, no instructions are 742 /// actually moved at this stage. 743 bool tryScheduleBundle(ArrayRef<Value *> VL, AliasAnalysis *AA); 744 745 /// Un-bundles a group of instructions. 746 void cancelScheduling(ArrayRef<Value *> VL); 747 748 /// Extends the scheduling region so that V is inside the region. 749 void extendSchedulingRegion(Value *V); 750 751 /// Initialize the ScheduleData structures for new instructions in the 752 /// scheduling region. 753 void initScheduleData(Instruction *FromI, Instruction *ToI, 754 ScheduleData *PrevLoadStore, 755 ScheduleData *NextLoadStore); 756 757 /// Updates the dependency information of a bundle and of all instructions/ 758 /// bundles which depend on the original bundle. 759 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 760 AliasAnalysis *AA); 761 762 /// Sets all instruction in the scheduling region to un-scheduled. 763 void resetSchedule(); 764 765 BasicBlock *BB; 766 767 /// Simple memory allocation for ScheduleData. 768 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 769 770 /// The size of a ScheduleData array in ScheduleDataChunks. 771 int ChunkSize; 772 773 /// The allocator position in the current chunk, which is the last entry 774 /// of ScheduleDataChunks. 775 int ChunkPos; 776 777 /// Attaches ScheduleData to Instruction. 778 /// Note that the mapping survives during all vectorization iterations, i.e. 779 /// ScheduleData structures are recycled. 780 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 781 782 struct ReadyList : SmallVector<ScheduleData *, 8> { 783 void insert(ScheduleData *SD) { push_back(SD); } 784 }; 785 786 /// The ready-list for scheduling (only used for the dry-run). 787 ReadyList ReadyInsts; 788 789 /// The first instruction of the scheduling region. 790 Instruction *ScheduleStart; 791 792 /// The first instruction _after_ the scheduling region. 793 Instruction *ScheduleEnd; 794 795 /// The first memory accessing instruction in the scheduling region 796 /// (can be null). 797 ScheduleData *FirstLoadStoreInRegion; 798 799 /// The last memory accessing instruction in the scheduling region 800 /// (can be null). 801 ScheduleData *LastLoadStoreInRegion; 802 803 /// The ID of the scheduling region. For a new vectorization iteration this 804 /// is incremented which "removes" all ScheduleData from the region. 805 int SchedulingRegionID; 806 }; 807 808 /// Attaches the BlockScheduling structures to basic blocks. 809 DenseMap<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 810 811 /// Performs the "real" scheduling. Done before vectorization is actually 812 /// performed in a basic block. 813 void scheduleBlock(BlockScheduling *BS); 814 815 /// List of users to ignore during scheduling and that don't need extracting. 816 ArrayRef<Value *> UserIgnoreList; 817 818 // Number of load-bundles, which contain consecutive loads. 819 int NumLoadsWantToKeepOrder; 820 821 // Number of load-bundles of size 2, which are consecutive loads if reversed. 822 int NumLoadsWantToChangeOrder; 823 824 // Analysis and block reference. 825 Function *F; 826 ScalarEvolution *SE; 827 const DataLayout *DL; 828 TargetTransformInfo *TTI; 829 TargetLibraryInfo *TLI; 830 AliasAnalysis *AA; 831 LoopInfo *LI; 832 DominatorTree *DT; 833 /// Instruction builder to construct the vectorized tree. 834 IRBuilder<> Builder; 835 }; 836 837 #ifndef NDEBUG 838 raw_ostream &operator<<(raw_ostream &os, const BoUpSLP::ScheduleData &SD) { 839 SD.dump(os); 840 return os; 841 } 842 #endif 843 844 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 845 ArrayRef<Value *> UserIgnoreLst) { 846 deleteTree(); 847 UserIgnoreList = UserIgnoreLst; 848 if (!getSameType(Roots)) 849 return; 850 buildTree_rec(Roots, 0); 851 852 // Collect the values that we need to extract from the tree. 853 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 854 TreeEntry *Entry = &VectorizableTree[EIdx]; 855 856 // For each lane: 857 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 858 Value *Scalar = Entry->Scalars[Lane]; 859 860 // No need to handle users of gathered values. 861 if (Entry->NeedToGather) 862 continue; 863 864 for (User *U : Scalar->users()) { 865 DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 866 867 // Skip in-tree scalars that become vectors. 868 if (ScalarToTreeEntry.count(U)) { 869 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << 870 *U << ".\n"); 871 int Idx = ScalarToTreeEntry[U]; (void) Idx; 872 assert(!VectorizableTree[Idx].NeedToGather && "Bad state"); 873 continue; 874 } 875 Instruction *UserInst = dyn_cast<Instruction>(U); 876 if (!UserInst) 877 continue; 878 879 // Ignore users in the user ignore list. 880 if (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), UserInst) != 881 UserIgnoreList.end()) 882 continue; 883 884 DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " << 885 Lane << " from " << *Scalar << ".\n"); 886 ExternalUses.push_back(ExternalUser(Scalar, U, Lane)); 887 } 888 } 889 } 890 } 891 892 893 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { 894 bool SameTy = getSameType(VL); (void)SameTy; 895 bool isAltShuffle = false; 896 assert(SameTy && "Invalid types!"); 897 898 if (Depth == RecursionMaxDepth) { 899 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 900 newTreeEntry(VL, false); 901 return; 902 } 903 904 // Don't handle vectors. 905 if (VL[0]->getType()->isVectorTy()) { 906 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 907 newTreeEntry(VL, false); 908 return; 909 } 910 911 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 912 if (SI->getValueOperand()->getType()->isVectorTy()) { 913 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 914 newTreeEntry(VL, false); 915 return; 916 } 917 unsigned Opcode = getSameOpcode(VL); 918 919 // Check that this shuffle vector refers to the alternate 920 // sequence of opcodes. 921 if (Opcode == Instruction::ShuffleVector) { 922 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 923 unsigned Op = I0->getOpcode(); 924 if (Op != Instruction::ShuffleVector) 925 isAltShuffle = true; 926 } 927 928 // If all of the operands are identical or constant we have a simple solution. 929 if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || !Opcode) { 930 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 931 newTreeEntry(VL, false); 932 return; 933 } 934 935 // We now know that this is a vector of instructions of the same type from 936 // the same block. 937 938 // Check if this is a duplicate of another entry. 939 if (ScalarToTreeEntry.count(VL[0])) { 940 int Idx = ScalarToTreeEntry[VL[0]]; 941 TreeEntry *E = &VectorizableTree[Idx]; 942 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 943 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 944 if (E->Scalars[i] != VL[i]) { 945 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 946 newTreeEntry(VL, false); 947 return; 948 } 949 } 950 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n"); 951 return; 952 } 953 954 // Check that none of the instructions in the bundle are already in the tree. 955 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 956 if (ScalarToTreeEntry.count(VL[i])) { 957 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 958 ") is already in tree.\n"); 959 newTreeEntry(VL, false); 960 return; 961 } 962 } 963 964 // If any of the scalars appears in the table OR it is marked as a value that 965 // needs to stat scalar then we need to gather the scalars. 966 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 967 if (ScalarToTreeEntry.count(VL[i]) || MustGather.count(VL[i])) { 968 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar. \n"); 969 newTreeEntry(VL, false); 970 return; 971 } 972 } 973 974 // Check that all of the users of the scalars that we want to vectorize are 975 // schedulable. 976 Instruction *VL0 = cast<Instruction>(VL[0]); 977 BasicBlock *BB = cast<Instruction>(VL0)->getParent(); 978 979 if (!DT->isReachableFromEntry(BB)) { 980 // Don't go into unreachable blocks. They may contain instructions with 981 // dependency cycles which confuse the final scheduling. 982 DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 983 newTreeEntry(VL, false); 984 return; 985 } 986 987 // Check that every instructions appears once in this bundle. 988 for (unsigned i = 0, e = VL.size(); i < e; ++i) 989 for (unsigned j = i+1; j < e; ++j) 990 if (VL[i] == VL[j]) { 991 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 992 newTreeEntry(VL, false); 993 return; 994 } 995 996 auto &BSRef = BlocksSchedules[BB]; 997 if (!BSRef) { 998 BSRef = llvm::make_unique<BlockScheduling>(BB); 999 } 1000 BlockScheduling &BS = *BSRef.get(); 1001 1002 if (!BS.tryScheduleBundle(VL, AA)) { 1003 DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 1004 BS.cancelScheduling(VL); 1005 newTreeEntry(VL, false); 1006 return; 1007 } 1008 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 1009 1010 switch (Opcode) { 1011 case Instruction::PHI: { 1012 PHINode *PH = dyn_cast<PHINode>(VL0); 1013 1014 // Check for terminator values (e.g. invoke). 1015 for (unsigned j = 0; j < VL.size(); ++j) 1016 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1017 TerminatorInst *Term = dyn_cast<TerminatorInst>( 1018 cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i))); 1019 if (Term) { 1020 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); 1021 BS.cancelScheduling(VL); 1022 newTreeEntry(VL, false); 1023 return; 1024 } 1025 } 1026 1027 newTreeEntry(VL, true); 1028 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 1029 1030 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1031 ValueList Operands; 1032 // Prepare the operand vector. 1033 for (unsigned j = 0; j < VL.size(); ++j) 1034 Operands.push_back(cast<PHINode>(VL[j])->getIncomingValueForBlock( 1035 PH->getIncomingBlock(i))); 1036 1037 buildTree_rec(Operands, Depth + 1); 1038 } 1039 return; 1040 } 1041 case Instruction::ExtractElement: { 1042 bool Reuse = CanReuseExtract(VL); 1043 if (Reuse) { 1044 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 1045 } else { 1046 BS.cancelScheduling(VL); 1047 } 1048 newTreeEntry(VL, Reuse); 1049 return; 1050 } 1051 case Instruction::Load: { 1052 // Check if the loads are consecutive or of we need to swizzle them. 1053 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 1054 LoadInst *L = cast<LoadInst>(VL[i]); 1055 if (!L->isSimple()) { 1056 BS.cancelScheduling(VL); 1057 newTreeEntry(VL, false); 1058 DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 1059 return; 1060 } 1061 if (!isConsecutiveAccess(VL[i], VL[i + 1])) { 1062 if (VL.size() == 2 && isConsecutiveAccess(VL[1], VL[0])) { 1063 ++NumLoadsWantToChangeOrder; 1064 } 1065 BS.cancelScheduling(VL); 1066 newTreeEntry(VL, false); 1067 DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 1068 return; 1069 } 1070 } 1071 ++NumLoadsWantToKeepOrder; 1072 newTreeEntry(VL, true); 1073 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 1074 return; 1075 } 1076 case Instruction::ZExt: 1077 case Instruction::SExt: 1078 case Instruction::FPToUI: 1079 case Instruction::FPToSI: 1080 case Instruction::FPExt: 1081 case Instruction::PtrToInt: 1082 case Instruction::IntToPtr: 1083 case Instruction::SIToFP: 1084 case Instruction::UIToFP: 1085 case Instruction::Trunc: 1086 case Instruction::FPTrunc: 1087 case Instruction::BitCast: { 1088 Type *SrcTy = VL0->getOperand(0)->getType(); 1089 for (unsigned i = 0; i < VL.size(); ++i) { 1090 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 1091 if (Ty != SrcTy || Ty->isAggregateType() || Ty->isVectorTy()) { 1092 BS.cancelScheduling(VL); 1093 newTreeEntry(VL, false); 1094 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 1095 return; 1096 } 1097 } 1098 newTreeEntry(VL, true); 1099 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 1100 1101 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1102 ValueList Operands; 1103 // Prepare the operand vector. 1104 for (unsigned j = 0; j < VL.size(); ++j) 1105 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1106 1107 buildTree_rec(Operands, Depth+1); 1108 } 1109 return; 1110 } 1111 case Instruction::ICmp: 1112 case Instruction::FCmp: { 1113 // Check that all of the compares have the same predicate. 1114 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 1115 Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType(); 1116 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 1117 CmpInst *Cmp = cast<CmpInst>(VL[i]); 1118 if (Cmp->getPredicate() != P0 || 1119 Cmp->getOperand(0)->getType() != ComparedTy) { 1120 BS.cancelScheduling(VL); 1121 newTreeEntry(VL, false); 1122 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 1123 return; 1124 } 1125 } 1126 1127 newTreeEntry(VL, true); 1128 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 1129 1130 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1131 ValueList Operands; 1132 // Prepare the operand vector. 1133 for (unsigned j = 0; j < VL.size(); ++j) 1134 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1135 1136 buildTree_rec(Operands, Depth+1); 1137 } 1138 return; 1139 } 1140 case Instruction::Select: 1141 case Instruction::Add: 1142 case Instruction::FAdd: 1143 case Instruction::Sub: 1144 case Instruction::FSub: 1145 case Instruction::Mul: 1146 case Instruction::FMul: 1147 case Instruction::UDiv: 1148 case Instruction::SDiv: 1149 case Instruction::FDiv: 1150 case Instruction::URem: 1151 case Instruction::SRem: 1152 case Instruction::FRem: 1153 case Instruction::Shl: 1154 case Instruction::LShr: 1155 case Instruction::AShr: 1156 case Instruction::And: 1157 case Instruction::Or: 1158 case Instruction::Xor: { 1159 newTreeEntry(VL, true); 1160 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 1161 1162 // Sort operands of the instructions so that each side is more likely to 1163 // have the same opcode. 1164 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 1165 ValueList Left, Right; 1166 reorderInputsAccordingToOpcode(VL, Left, Right); 1167 buildTree_rec(Left, Depth + 1); 1168 buildTree_rec(Right, Depth + 1); 1169 return; 1170 } 1171 1172 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1173 ValueList Operands; 1174 // Prepare the operand vector. 1175 for (unsigned j = 0; j < VL.size(); ++j) 1176 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1177 1178 buildTree_rec(Operands, Depth+1); 1179 } 1180 return; 1181 } 1182 case Instruction::GetElementPtr: { 1183 // We don't combine GEPs with complicated (nested) indexing. 1184 for (unsigned j = 0; j < VL.size(); ++j) { 1185 if (cast<Instruction>(VL[j])->getNumOperands() != 2) { 1186 DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 1187 BS.cancelScheduling(VL); 1188 newTreeEntry(VL, false); 1189 return; 1190 } 1191 } 1192 1193 // We combine only GEPs with a single use. 1194 for (unsigned j = 0; j < VL.size(); ++j) { 1195 if (cast<Instruction>(VL[j])->getNumUses() > 1) { 1196 DEBUG(dbgs() << "SLP: not-vectorizable GEP (multiple uses).\n"); 1197 BS.cancelScheduling(VL); 1198 newTreeEntry(VL, false); 1199 return; 1200 } 1201 } 1202 1203 // We can't combine several GEPs into one vector if they operate on 1204 // different types. 1205 Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType(); 1206 for (unsigned j = 0; j < VL.size(); ++j) { 1207 Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType(); 1208 if (Ty0 != CurTy) { 1209 DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); 1210 BS.cancelScheduling(VL); 1211 newTreeEntry(VL, false); 1212 return; 1213 } 1214 } 1215 1216 // We don't combine GEPs with non-constant indexes. 1217 for (unsigned j = 0; j < VL.size(); ++j) { 1218 auto Op = cast<Instruction>(VL[j])->getOperand(1); 1219 if (!isa<ConstantInt>(Op)) { 1220 DEBUG( 1221 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 1222 BS.cancelScheduling(VL); 1223 newTreeEntry(VL, false); 1224 return; 1225 } 1226 } 1227 1228 newTreeEntry(VL, true); 1229 DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 1230 for (unsigned i = 0, e = 2; i < e; ++i) { 1231 ValueList Operands; 1232 // Prepare the operand vector. 1233 for (unsigned j = 0; j < VL.size(); ++j) 1234 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1235 1236 buildTree_rec(Operands, Depth + 1); 1237 } 1238 return; 1239 } 1240 case Instruction::Store: { 1241 // Check if the stores are consecutive or of we need to swizzle them. 1242 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 1243 if (!isConsecutiveAccess(VL[i], VL[i + 1])) { 1244 BS.cancelScheduling(VL); 1245 newTreeEntry(VL, false); 1246 DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 1247 return; 1248 } 1249 1250 newTreeEntry(VL, true); 1251 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 1252 1253 ValueList Operands; 1254 for (unsigned j = 0; j < VL.size(); ++j) 1255 Operands.push_back(cast<Instruction>(VL[j])->getOperand(0)); 1256 1257 buildTree_rec(Operands, Depth + 1); 1258 return; 1259 } 1260 case Instruction::Call: { 1261 // Check if the calls are all to the same vectorizable intrinsic. 1262 CallInst *CI = cast<CallInst>(VL[0]); 1263 // Check if this is an Intrinsic call or something that can be 1264 // represented by an intrinsic call 1265 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 1266 if (!isTriviallyVectorizable(ID)) { 1267 BS.cancelScheduling(VL); 1268 newTreeEntry(VL, false); 1269 DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 1270 return; 1271 } 1272 Function *Int = CI->getCalledFunction(); 1273 Value *A1I = nullptr; 1274 if (hasVectorInstrinsicScalarOpd(ID, 1)) 1275 A1I = CI->getArgOperand(1); 1276 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 1277 CallInst *CI2 = dyn_cast<CallInst>(VL[i]); 1278 if (!CI2 || CI2->getCalledFunction() != Int || 1279 getIntrinsicIDForCall(CI2, TLI) != ID) { 1280 BS.cancelScheduling(VL); 1281 newTreeEntry(VL, false); 1282 DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] 1283 << "\n"); 1284 return; 1285 } 1286 // ctlz,cttz and powi are special intrinsics whose second argument 1287 // should be same in order for them to be vectorized. 1288 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 1289 Value *A1J = CI2->getArgOperand(1); 1290 if (A1I != A1J) { 1291 BS.cancelScheduling(VL); 1292 newTreeEntry(VL, false); 1293 DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 1294 << " argument "<< A1I<<"!=" << A1J 1295 << "\n"); 1296 return; 1297 } 1298 } 1299 } 1300 1301 newTreeEntry(VL, true); 1302 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 1303 ValueList Operands; 1304 // Prepare the operand vector. 1305 for (unsigned j = 0; j < VL.size(); ++j) { 1306 CallInst *CI2 = dyn_cast<CallInst>(VL[j]); 1307 Operands.push_back(CI2->getArgOperand(i)); 1308 } 1309 buildTree_rec(Operands, Depth + 1); 1310 } 1311 return; 1312 } 1313 case Instruction::ShuffleVector: { 1314 // If this is not an alternate sequence of opcode like add-sub 1315 // then do not vectorize this instruction. 1316 if (!isAltShuffle) { 1317 BS.cancelScheduling(VL); 1318 newTreeEntry(VL, false); 1319 DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 1320 return; 1321 } 1322 newTreeEntry(VL, true); 1323 DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 1324 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1325 ValueList Operands; 1326 // Prepare the operand vector. 1327 for (unsigned j = 0; j < VL.size(); ++j) 1328 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1329 1330 buildTree_rec(Operands, Depth + 1); 1331 } 1332 return; 1333 } 1334 default: 1335 BS.cancelScheduling(VL); 1336 newTreeEntry(VL, false); 1337 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 1338 return; 1339 } 1340 } 1341 1342 int BoUpSLP::getEntryCost(TreeEntry *E) { 1343 ArrayRef<Value*> VL = E->Scalars; 1344 1345 Type *ScalarTy = VL[0]->getType(); 1346 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1347 ScalarTy = SI->getValueOperand()->getType(); 1348 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1349 1350 if (E->NeedToGather) { 1351 if (allConstant(VL)) 1352 return 0; 1353 if (isSplat(VL)) { 1354 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 1355 } 1356 return getGatherCost(E->Scalars); 1357 } 1358 unsigned Opcode = getSameOpcode(VL); 1359 assert(Opcode && getSameType(VL) && getSameBlock(VL) && "Invalid VL"); 1360 Instruction *VL0 = cast<Instruction>(VL[0]); 1361 switch (Opcode) { 1362 case Instruction::PHI: { 1363 return 0; 1364 } 1365 case Instruction::ExtractElement: { 1366 if (CanReuseExtract(VL)) { 1367 int DeadCost = 0; 1368 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 1369 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 1370 if (E->hasOneUse()) 1371 // Take credit for instruction that will become dead. 1372 DeadCost += 1373 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); 1374 } 1375 return -DeadCost; 1376 } 1377 return getGatherCost(VecTy); 1378 } 1379 case Instruction::ZExt: 1380 case Instruction::SExt: 1381 case Instruction::FPToUI: 1382 case Instruction::FPToSI: 1383 case Instruction::FPExt: 1384 case Instruction::PtrToInt: 1385 case Instruction::IntToPtr: 1386 case Instruction::SIToFP: 1387 case Instruction::UIToFP: 1388 case Instruction::Trunc: 1389 case Instruction::FPTrunc: 1390 case Instruction::BitCast: { 1391 Type *SrcTy = VL0->getOperand(0)->getType(); 1392 1393 // Calculate the cost of this instruction. 1394 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 1395 VL0->getType(), SrcTy); 1396 1397 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 1398 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy); 1399 return VecCost - ScalarCost; 1400 } 1401 case Instruction::FCmp: 1402 case Instruction::ICmp: 1403 case Instruction::Select: 1404 case Instruction::Add: 1405 case Instruction::FAdd: 1406 case Instruction::Sub: 1407 case Instruction::FSub: 1408 case Instruction::Mul: 1409 case Instruction::FMul: 1410 case Instruction::UDiv: 1411 case Instruction::SDiv: 1412 case Instruction::FDiv: 1413 case Instruction::URem: 1414 case Instruction::SRem: 1415 case Instruction::FRem: 1416 case Instruction::Shl: 1417 case Instruction::LShr: 1418 case Instruction::AShr: 1419 case Instruction::And: 1420 case Instruction::Or: 1421 case Instruction::Xor: { 1422 // Calculate the cost of this instruction. 1423 int ScalarCost = 0; 1424 int VecCost = 0; 1425 if (Opcode == Instruction::FCmp || Opcode == Instruction::ICmp || 1426 Opcode == Instruction::Select) { 1427 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 1428 ScalarCost = VecTy->getNumElements() * 1429 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty()); 1430 VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy); 1431 } else { 1432 // Certain instructions can be cheaper to vectorize if they have a 1433 // constant second vector operand. 1434 TargetTransformInfo::OperandValueKind Op1VK = 1435 TargetTransformInfo::OK_AnyValue; 1436 TargetTransformInfo::OperandValueKind Op2VK = 1437 TargetTransformInfo::OK_UniformConstantValue; 1438 TargetTransformInfo::OperandValueProperties Op1VP = 1439 TargetTransformInfo::OP_None; 1440 TargetTransformInfo::OperandValueProperties Op2VP = 1441 TargetTransformInfo::OP_None; 1442 1443 // If all operands are exactly the same ConstantInt then set the 1444 // operand kind to OK_UniformConstantValue. 1445 // If instead not all operands are constants, then set the operand kind 1446 // to OK_AnyValue. If all operands are constants but not the same, 1447 // then set the operand kind to OK_NonUniformConstantValue. 1448 ConstantInt *CInt = nullptr; 1449 for (unsigned i = 0; i < VL.size(); ++i) { 1450 const Instruction *I = cast<Instruction>(VL[i]); 1451 if (!isa<ConstantInt>(I->getOperand(1))) { 1452 Op2VK = TargetTransformInfo::OK_AnyValue; 1453 break; 1454 } 1455 if (i == 0) { 1456 CInt = cast<ConstantInt>(I->getOperand(1)); 1457 continue; 1458 } 1459 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && 1460 CInt != cast<ConstantInt>(I->getOperand(1))) 1461 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 1462 } 1463 // FIXME: Currently cost of model modification for division by 1464 // power of 2 is handled only for X86. Add support for other targets. 1465 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt && 1466 CInt->getValue().isPowerOf2()) 1467 Op2VP = TargetTransformInfo::OP_PowerOf2; 1468 1469 ScalarCost = VecTy->getNumElements() * 1470 TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK, 1471 Op1VP, Op2VP); 1472 VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK, 1473 Op1VP, Op2VP); 1474 } 1475 return VecCost - ScalarCost; 1476 } 1477 case Instruction::GetElementPtr: { 1478 TargetTransformInfo::OperandValueKind Op1VK = 1479 TargetTransformInfo::OK_AnyValue; 1480 TargetTransformInfo::OperandValueKind Op2VK = 1481 TargetTransformInfo::OK_UniformConstantValue; 1482 1483 int ScalarCost = 1484 VecTy->getNumElements() * 1485 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK); 1486 int VecCost = 1487 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK); 1488 1489 return VecCost - ScalarCost; 1490 } 1491 case Instruction::Load: { 1492 // Cost of wide load - cost of scalar loads. 1493 int ScalarLdCost = VecTy->getNumElements() * 1494 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); 1495 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, 1, 0); 1496 return VecLdCost - ScalarLdCost; 1497 } 1498 case Instruction::Store: { 1499 // We know that we can merge the stores. Calculate the cost. 1500 int ScalarStCost = VecTy->getNumElements() * 1501 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); 1502 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, VecTy, 1, 0); 1503 return VecStCost - ScalarStCost; 1504 } 1505 case Instruction::Call: { 1506 CallInst *CI = cast<CallInst>(VL0); 1507 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 1508 1509 // Calculate the cost of the scalar and vector calls. 1510 SmallVector<Type*, 4> ScalarTys, VecTys; 1511 for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) { 1512 ScalarTys.push_back(CI->getArgOperand(op)->getType()); 1513 VecTys.push_back(VectorType::get(CI->getArgOperand(op)->getType(), 1514 VecTy->getNumElements())); 1515 } 1516 1517 int ScalarCallCost = VecTy->getNumElements() * 1518 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys); 1519 1520 int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys); 1521 1522 DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost 1523 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 1524 << " for " << *CI << "\n"); 1525 1526 return VecCallCost - ScalarCallCost; 1527 } 1528 case Instruction::ShuffleVector: { 1529 TargetTransformInfo::OperandValueKind Op1VK = 1530 TargetTransformInfo::OK_AnyValue; 1531 TargetTransformInfo::OperandValueKind Op2VK = 1532 TargetTransformInfo::OK_AnyValue; 1533 int ScalarCost = 0; 1534 int VecCost = 0; 1535 for (unsigned i = 0; i < VL.size(); ++i) { 1536 Instruction *I = cast<Instruction>(VL[i]); 1537 if (!I) 1538 break; 1539 ScalarCost += 1540 TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK); 1541 } 1542 // VecCost is equal to sum of the cost of creating 2 vectors 1543 // and the cost of creating shuffle. 1544 Instruction *I0 = cast<Instruction>(VL[0]); 1545 VecCost = 1546 TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK); 1547 Instruction *I1 = cast<Instruction>(VL[1]); 1548 VecCost += 1549 TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK); 1550 VecCost += 1551 TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0); 1552 return VecCost - ScalarCost; 1553 } 1554 default: 1555 llvm_unreachable("Unknown instruction"); 1556 } 1557 } 1558 1559 bool BoUpSLP::isFullyVectorizableTinyTree() { 1560 DEBUG(dbgs() << "SLP: Check whether the tree with height " << 1561 VectorizableTree.size() << " is fully vectorizable .\n"); 1562 1563 // We only handle trees of height 2. 1564 if (VectorizableTree.size() != 2) 1565 return false; 1566 1567 // Handle splat stores. 1568 if (!VectorizableTree[0].NeedToGather && isSplat(VectorizableTree[1].Scalars)) 1569 return true; 1570 1571 // Gathering cost would be too much for tiny trees. 1572 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 1573 return false; 1574 1575 return true; 1576 } 1577 1578 int BoUpSLP::getSpillCost() { 1579 // Walk from the bottom of the tree to the top, tracking which values are 1580 // live. When we see a call instruction that is not part of our tree, 1581 // query TTI to see if there is a cost to keeping values live over it 1582 // (for example, if spills and fills are required). 1583 unsigned BundleWidth = VectorizableTree.front().Scalars.size(); 1584 int Cost = 0; 1585 1586 SmallPtrSet<Instruction*, 4> LiveValues; 1587 Instruction *PrevInst = nullptr; 1588 1589 for (unsigned N = 0; N < VectorizableTree.size(); ++N) { 1590 Instruction *Inst = dyn_cast<Instruction>(VectorizableTree[N].Scalars[0]); 1591 if (!Inst) 1592 continue; 1593 1594 if (!PrevInst) { 1595 PrevInst = Inst; 1596 continue; 1597 } 1598 1599 DEBUG( 1600 dbgs() << "SLP: #LV: " << LiveValues.size(); 1601 for (auto *X : LiveValues) 1602 dbgs() << " " << X->getName(); 1603 dbgs() << ", Looking at "; 1604 Inst->dump(); 1605 ); 1606 1607 // Update LiveValues. 1608 LiveValues.erase(PrevInst); 1609 for (auto &J : PrevInst->operands()) { 1610 if (isa<Instruction>(&*J) && ScalarToTreeEntry.count(&*J)) 1611 LiveValues.insert(cast<Instruction>(&*J)); 1612 } 1613 1614 // Now find the sequence of instructions between PrevInst and Inst. 1615 BasicBlock::reverse_iterator InstIt(Inst), PrevInstIt(PrevInst); 1616 --PrevInstIt; 1617 while (InstIt != PrevInstIt) { 1618 if (PrevInstIt == PrevInst->getParent()->rend()) { 1619 PrevInstIt = Inst->getParent()->rbegin(); 1620 continue; 1621 } 1622 1623 if (isa<CallInst>(&*PrevInstIt) && &*PrevInstIt != PrevInst) { 1624 SmallVector<Type*, 4> V; 1625 for (auto *II : LiveValues) 1626 V.push_back(VectorType::get(II->getType(), BundleWidth)); 1627 Cost += TTI->getCostOfKeepingLiveOverCall(V); 1628 } 1629 1630 ++PrevInstIt; 1631 } 1632 1633 PrevInst = Inst; 1634 } 1635 1636 DEBUG(dbgs() << "SLP: SpillCost=" << Cost << "\n"); 1637 return Cost; 1638 } 1639 1640 int BoUpSLP::getTreeCost() { 1641 int Cost = 0; 1642 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 1643 VectorizableTree.size() << ".\n"); 1644 1645 // We only vectorize tiny trees if it is fully vectorizable. 1646 if (VectorizableTree.size() < 3 && !isFullyVectorizableTinyTree()) { 1647 if (!VectorizableTree.size()) { 1648 assert(!ExternalUses.size() && "We should not have any external users"); 1649 } 1650 return INT_MAX; 1651 } 1652 1653 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 1654 1655 for (unsigned i = 0, e = VectorizableTree.size(); i != e; ++i) { 1656 int C = getEntryCost(&VectorizableTree[i]); 1657 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 1658 << *VectorizableTree[i].Scalars[0] << " .\n"); 1659 Cost += C; 1660 } 1661 1662 SmallSet<Value *, 16> ExtractCostCalculated; 1663 int ExtractCost = 0; 1664 for (UserList::iterator I = ExternalUses.begin(), E = ExternalUses.end(); 1665 I != E; ++I) { 1666 // We only add extract cost once for the same scalar. 1667 if (!ExtractCostCalculated.insert(I->Scalar)) 1668 continue; 1669 1670 VectorType *VecTy = VectorType::get(I->Scalar->getType(), BundleWidth); 1671 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 1672 I->Lane); 1673 } 1674 1675 Cost += getSpillCost(); 1676 1677 DEBUG(dbgs() << "SLP: Total Cost " << Cost + ExtractCost<< ".\n"); 1678 return Cost + ExtractCost; 1679 } 1680 1681 int BoUpSLP::getGatherCost(Type *Ty) { 1682 int Cost = 0; 1683 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 1684 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 1685 return Cost; 1686 } 1687 1688 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 1689 // Find the type of the operands in VL. 1690 Type *ScalarTy = VL[0]->getType(); 1691 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1692 ScalarTy = SI->getValueOperand()->getType(); 1693 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1694 // Find the cost of inserting/extracting values from the vector. 1695 return getGatherCost(VecTy); 1696 } 1697 1698 Value *BoUpSLP::getPointerOperand(Value *I) { 1699 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1700 return LI->getPointerOperand(); 1701 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1702 return SI->getPointerOperand(); 1703 return nullptr; 1704 } 1705 1706 unsigned BoUpSLP::getAddressSpaceOperand(Value *I) { 1707 if (LoadInst *L = dyn_cast<LoadInst>(I)) 1708 return L->getPointerAddressSpace(); 1709 if (StoreInst *S = dyn_cast<StoreInst>(I)) 1710 return S->getPointerAddressSpace(); 1711 return -1; 1712 } 1713 1714 bool BoUpSLP::isConsecutiveAccess(Value *A, Value *B) { 1715 Value *PtrA = getPointerOperand(A); 1716 Value *PtrB = getPointerOperand(B); 1717 unsigned ASA = getAddressSpaceOperand(A); 1718 unsigned ASB = getAddressSpaceOperand(B); 1719 1720 // Check that the address spaces match and that the pointers are valid. 1721 if (!PtrA || !PtrB || (ASA != ASB)) 1722 return false; 1723 1724 // Make sure that A and B are different pointers of the same type. 1725 if (PtrA == PtrB || PtrA->getType() != PtrB->getType()) 1726 return false; 1727 1728 unsigned PtrBitWidth = DL->getPointerSizeInBits(ASA); 1729 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1730 APInt Size(PtrBitWidth, DL->getTypeStoreSize(Ty)); 1731 1732 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); 1733 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetA); 1734 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetB); 1735 1736 APInt OffsetDelta = OffsetB - OffsetA; 1737 1738 // Check if they are based on the same pointer. That makes the offsets 1739 // sufficient. 1740 if (PtrA == PtrB) 1741 return OffsetDelta == Size; 1742 1743 // Compute the necessary base pointer delta to have the necessary final delta 1744 // equal to the size. 1745 APInt BaseDelta = Size - OffsetDelta; 1746 1747 // Otherwise compute the distance with SCEV between the base pointers. 1748 const SCEV *PtrSCEVA = SE->getSCEV(PtrA); 1749 const SCEV *PtrSCEVB = SE->getSCEV(PtrB); 1750 const SCEV *C = SE->getConstant(BaseDelta); 1751 const SCEV *X = SE->getAddExpr(PtrSCEVA, C); 1752 return X == PtrSCEVB; 1753 } 1754 1755 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) { 1756 Instruction *VL0 = cast<Instruction>(VL[0]); 1757 BasicBlock::iterator NextInst = VL0; 1758 ++NextInst; 1759 Builder.SetInsertPoint(VL0->getParent(), NextInst); 1760 Builder.SetCurrentDebugLocation(VL0->getDebugLoc()); 1761 } 1762 1763 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 1764 Value *Vec = UndefValue::get(Ty); 1765 // Generate the 'InsertElement' instruction. 1766 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 1767 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 1768 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 1769 GatherSeq.insert(Insrt); 1770 CSEBlocks.insert(Insrt->getParent()); 1771 1772 // Add to our 'need-to-extract' list. 1773 if (ScalarToTreeEntry.count(VL[i])) { 1774 int Idx = ScalarToTreeEntry[VL[i]]; 1775 TreeEntry *E = &VectorizableTree[Idx]; 1776 // Find which lane we need to extract. 1777 int FoundLane = -1; 1778 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 1779 // Is this the lane of the scalar that we are looking for ? 1780 if (E->Scalars[Lane] == VL[i]) { 1781 FoundLane = Lane; 1782 break; 1783 } 1784 } 1785 assert(FoundLane >= 0 && "Could not find the correct lane"); 1786 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 1787 } 1788 } 1789 } 1790 1791 return Vec; 1792 } 1793 1794 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const { 1795 SmallDenseMap<Value*, int>::const_iterator Entry 1796 = ScalarToTreeEntry.find(VL[0]); 1797 if (Entry != ScalarToTreeEntry.end()) { 1798 int Idx = Entry->second; 1799 const TreeEntry *En = &VectorizableTree[Idx]; 1800 if (En->isSame(VL) && En->VectorizedValue) 1801 return En->VectorizedValue; 1802 } 1803 return nullptr; 1804 } 1805 1806 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 1807 if (ScalarToTreeEntry.count(VL[0])) { 1808 int Idx = ScalarToTreeEntry[VL[0]]; 1809 TreeEntry *E = &VectorizableTree[Idx]; 1810 if (E->isSame(VL)) 1811 return vectorizeTree(E); 1812 } 1813 1814 Type *ScalarTy = VL[0]->getType(); 1815 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1816 ScalarTy = SI->getValueOperand()->getType(); 1817 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1818 1819 return Gather(VL, VecTy); 1820 } 1821 1822 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 1823 IRBuilder<>::InsertPointGuard Guard(Builder); 1824 1825 if (E->VectorizedValue) { 1826 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 1827 return E->VectorizedValue; 1828 } 1829 1830 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 1831 Type *ScalarTy = VL0->getType(); 1832 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 1833 ScalarTy = SI->getValueOperand()->getType(); 1834 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 1835 1836 if (E->NeedToGather) { 1837 setInsertPointAfterBundle(E->Scalars); 1838 return Gather(E->Scalars, VecTy); 1839 } 1840 1841 unsigned Opcode = getSameOpcode(E->Scalars); 1842 1843 switch (Opcode) { 1844 case Instruction::PHI: { 1845 PHINode *PH = dyn_cast<PHINode>(VL0); 1846 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 1847 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 1848 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 1849 E->VectorizedValue = NewPhi; 1850 1851 // PHINodes may have multiple entries from the same block. We want to 1852 // visit every block once. 1853 SmallSet<BasicBlock*, 4> VisitedBBs; 1854 1855 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1856 ValueList Operands; 1857 BasicBlock *IBB = PH->getIncomingBlock(i); 1858 1859 if (!VisitedBBs.insert(IBB)) { 1860 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 1861 continue; 1862 } 1863 1864 // Prepare the operand vector. 1865 for (unsigned j = 0; j < E->Scalars.size(); ++j) 1866 Operands.push_back(cast<PHINode>(E->Scalars[j])-> 1867 getIncomingValueForBlock(IBB)); 1868 1869 Builder.SetInsertPoint(IBB->getTerminator()); 1870 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 1871 Value *Vec = vectorizeTree(Operands); 1872 NewPhi->addIncoming(Vec, IBB); 1873 } 1874 1875 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 1876 "Invalid number of incoming values"); 1877 return NewPhi; 1878 } 1879 1880 case Instruction::ExtractElement: { 1881 if (CanReuseExtract(E->Scalars)) { 1882 Value *V = VL0->getOperand(0); 1883 E->VectorizedValue = V; 1884 return V; 1885 } 1886 return Gather(E->Scalars, VecTy); 1887 } 1888 case Instruction::ZExt: 1889 case Instruction::SExt: 1890 case Instruction::FPToUI: 1891 case Instruction::FPToSI: 1892 case Instruction::FPExt: 1893 case Instruction::PtrToInt: 1894 case Instruction::IntToPtr: 1895 case Instruction::SIToFP: 1896 case Instruction::UIToFP: 1897 case Instruction::Trunc: 1898 case Instruction::FPTrunc: 1899 case Instruction::BitCast: { 1900 ValueList INVL; 1901 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1902 INVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1903 1904 setInsertPointAfterBundle(E->Scalars); 1905 1906 Value *InVec = vectorizeTree(INVL); 1907 1908 if (Value *V = alreadyVectorized(E->Scalars)) 1909 return V; 1910 1911 CastInst *CI = dyn_cast<CastInst>(VL0); 1912 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 1913 E->VectorizedValue = V; 1914 ++NumVectorInstructions; 1915 return V; 1916 } 1917 case Instruction::FCmp: 1918 case Instruction::ICmp: { 1919 ValueList LHSV, RHSV; 1920 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1921 LHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1922 RHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1923 } 1924 1925 setInsertPointAfterBundle(E->Scalars); 1926 1927 Value *L = vectorizeTree(LHSV); 1928 Value *R = vectorizeTree(RHSV); 1929 1930 if (Value *V = alreadyVectorized(E->Scalars)) 1931 return V; 1932 1933 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 1934 Value *V; 1935 if (Opcode == Instruction::FCmp) 1936 V = Builder.CreateFCmp(P0, L, R); 1937 else 1938 V = Builder.CreateICmp(P0, L, R); 1939 1940 E->VectorizedValue = V; 1941 ++NumVectorInstructions; 1942 return V; 1943 } 1944 case Instruction::Select: { 1945 ValueList TrueVec, FalseVec, CondVec; 1946 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1947 CondVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1948 TrueVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1949 FalseVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(2)); 1950 } 1951 1952 setInsertPointAfterBundle(E->Scalars); 1953 1954 Value *Cond = vectorizeTree(CondVec); 1955 Value *True = vectorizeTree(TrueVec); 1956 Value *False = vectorizeTree(FalseVec); 1957 1958 if (Value *V = alreadyVectorized(E->Scalars)) 1959 return V; 1960 1961 Value *V = Builder.CreateSelect(Cond, True, False); 1962 E->VectorizedValue = V; 1963 ++NumVectorInstructions; 1964 return V; 1965 } 1966 case Instruction::Add: 1967 case Instruction::FAdd: 1968 case Instruction::Sub: 1969 case Instruction::FSub: 1970 case Instruction::Mul: 1971 case Instruction::FMul: 1972 case Instruction::UDiv: 1973 case Instruction::SDiv: 1974 case Instruction::FDiv: 1975 case Instruction::URem: 1976 case Instruction::SRem: 1977 case Instruction::FRem: 1978 case Instruction::Shl: 1979 case Instruction::LShr: 1980 case Instruction::AShr: 1981 case Instruction::And: 1982 case Instruction::Or: 1983 case Instruction::Xor: { 1984 ValueList LHSVL, RHSVL; 1985 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) 1986 reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL); 1987 else 1988 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1989 LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1990 RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1991 } 1992 1993 setInsertPointAfterBundle(E->Scalars); 1994 1995 Value *LHS = vectorizeTree(LHSVL); 1996 Value *RHS = vectorizeTree(RHSVL); 1997 1998 if (LHS == RHS && isa<Instruction>(LHS)) { 1999 assert((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order"); 2000 } 2001 2002 if (Value *V = alreadyVectorized(E->Scalars)) 2003 return V; 2004 2005 BinaryOperator *BinOp = cast<BinaryOperator>(VL0); 2006 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS); 2007 E->VectorizedValue = V; 2008 ++NumVectorInstructions; 2009 2010 if (Instruction *I = dyn_cast<Instruction>(V)) 2011 return propagateMetadata(I, E->Scalars); 2012 2013 return V; 2014 } 2015 case Instruction::Load: { 2016 // Loads are inserted at the head of the tree because we don't want to 2017 // sink them all the way down past store instructions. 2018 setInsertPointAfterBundle(E->Scalars); 2019 2020 LoadInst *LI = cast<LoadInst>(VL0); 2021 Type *ScalarLoadTy = LI->getType(); 2022 unsigned AS = LI->getPointerAddressSpace(); 2023 2024 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 2025 VecTy->getPointerTo(AS)); 2026 unsigned Alignment = LI->getAlignment(); 2027 LI = Builder.CreateLoad(VecPtr); 2028 if (!Alignment) 2029 Alignment = DL->getABITypeAlignment(ScalarLoadTy); 2030 LI->setAlignment(Alignment); 2031 E->VectorizedValue = LI; 2032 ++NumVectorInstructions; 2033 return propagateMetadata(LI, E->Scalars); 2034 } 2035 case Instruction::Store: { 2036 StoreInst *SI = cast<StoreInst>(VL0); 2037 unsigned Alignment = SI->getAlignment(); 2038 unsigned AS = SI->getPointerAddressSpace(); 2039 2040 ValueList ValueOp; 2041 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 2042 ValueOp.push_back(cast<StoreInst>(E->Scalars[i])->getValueOperand()); 2043 2044 setInsertPointAfterBundle(E->Scalars); 2045 2046 Value *VecValue = vectorizeTree(ValueOp); 2047 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), 2048 VecTy->getPointerTo(AS)); 2049 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 2050 if (!Alignment) 2051 Alignment = DL->getABITypeAlignment(SI->getValueOperand()->getType()); 2052 S->setAlignment(Alignment); 2053 E->VectorizedValue = S; 2054 ++NumVectorInstructions; 2055 return propagateMetadata(S, E->Scalars); 2056 } 2057 case Instruction::GetElementPtr: { 2058 setInsertPointAfterBundle(E->Scalars); 2059 2060 ValueList Op0VL; 2061 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 2062 Op0VL.push_back(cast<GetElementPtrInst>(E->Scalars[i])->getOperand(0)); 2063 2064 Value *Op0 = vectorizeTree(Op0VL); 2065 2066 std::vector<Value *> OpVecs; 2067 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 2068 ++j) { 2069 ValueList OpVL; 2070 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 2071 OpVL.push_back(cast<GetElementPtrInst>(E->Scalars[i])->getOperand(j)); 2072 2073 Value *OpVec = vectorizeTree(OpVL); 2074 OpVecs.push_back(OpVec); 2075 } 2076 2077 Value *V = Builder.CreateGEP(Op0, OpVecs); 2078 E->VectorizedValue = V; 2079 ++NumVectorInstructions; 2080 2081 if (Instruction *I = dyn_cast<Instruction>(V)) 2082 return propagateMetadata(I, E->Scalars); 2083 2084 return V; 2085 } 2086 case Instruction::Call: { 2087 CallInst *CI = cast<CallInst>(VL0); 2088 setInsertPointAfterBundle(E->Scalars); 2089 Function *FI; 2090 Intrinsic::ID IID = Intrinsic::not_intrinsic; 2091 if (CI && (FI = CI->getCalledFunction())) { 2092 IID = (Intrinsic::ID) FI->getIntrinsicID(); 2093 } 2094 std::vector<Value *> OpVecs; 2095 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 2096 ValueList OpVL; 2097 // ctlz,cttz and powi are special intrinsics whose second argument is 2098 // a scalar. This argument should not be vectorized. 2099 if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) { 2100 CallInst *CEI = cast<CallInst>(E->Scalars[0]); 2101 OpVecs.push_back(CEI->getArgOperand(j)); 2102 continue; 2103 } 2104 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 2105 CallInst *CEI = cast<CallInst>(E->Scalars[i]); 2106 OpVL.push_back(CEI->getArgOperand(j)); 2107 } 2108 2109 Value *OpVec = vectorizeTree(OpVL); 2110 DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 2111 OpVecs.push_back(OpVec); 2112 } 2113 2114 Module *M = F->getParent(); 2115 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 2116 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; 2117 Function *CF = Intrinsic::getDeclaration(M, ID, Tys); 2118 Value *V = Builder.CreateCall(CF, OpVecs); 2119 E->VectorizedValue = V; 2120 ++NumVectorInstructions; 2121 return V; 2122 } 2123 case Instruction::ShuffleVector: { 2124 ValueList LHSVL, RHSVL; 2125 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 2126 LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 2127 RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 2128 } 2129 setInsertPointAfterBundle(E->Scalars); 2130 2131 Value *LHS = vectorizeTree(LHSVL); 2132 Value *RHS = vectorizeTree(RHSVL); 2133 2134 if (Value *V = alreadyVectorized(E->Scalars)) 2135 return V; 2136 2137 // Create a vector of LHS op1 RHS 2138 BinaryOperator *BinOp0 = cast<BinaryOperator>(VL0); 2139 Value *V0 = Builder.CreateBinOp(BinOp0->getOpcode(), LHS, RHS); 2140 2141 // Create a vector of LHS op2 RHS 2142 Instruction *VL1 = cast<Instruction>(E->Scalars[1]); 2143 BinaryOperator *BinOp1 = cast<BinaryOperator>(VL1); 2144 Value *V1 = Builder.CreateBinOp(BinOp1->getOpcode(), LHS, RHS); 2145 2146 // Create appropriate shuffle to take alternative operations from 2147 // the vector. 2148 std::vector<Constant *> Mask(E->Scalars.size()); 2149 unsigned e = E->Scalars.size(); 2150 for (unsigned i = 0; i < e; ++i) { 2151 if (i & 1) 2152 Mask[i] = Builder.getInt32(e + i); 2153 else 2154 Mask[i] = Builder.getInt32(i); 2155 } 2156 2157 Value *ShuffleMask = ConstantVector::get(Mask); 2158 2159 Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 2160 E->VectorizedValue = V; 2161 ++NumVectorInstructions; 2162 if (Instruction *I = dyn_cast<Instruction>(V)) 2163 return propagateMetadata(I, E->Scalars); 2164 2165 return V; 2166 } 2167 default: 2168 llvm_unreachable("unknown inst"); 2169 } 2170 return nullptr; 2171 } 2172 2173 Value *BoUpSLP::vectorizeTree() { 2174 2175 // All blocks must be scheduled before any instructions are inserted. 2176 for (auto &BSIter : BlocksSchedules) { 2177 scheduleBlock(BSIter.second.get()); 2178 } 2179 2180 Builder.SetInsertPoint(F->getEntryBlock().begin()); 2181 vectorizeTree(&VectorizableTree[0]); 2182 2183 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 2184 2185 // Extract all of the elements with the external uses. 2186 for (UserList::iterator it = ExternalUses.begin(), e = ExternalUses.end(); 2187 it != e; ++it) { 2188 Value *Scalar = it->Scalar; 2189 llvm::User *User = it->User; 2190 2191 // Skip users that we already RAUW. This happens when one instruction 2192 // has multiple uses of the same value. 2193 if (std::find(Scalar->user_begin(), Scalar->user_end(), User) == 2194 Scalar->user_end()) 2195 continue; 2196 assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar"); 2197 2198 int Idx = ScalarToTreeEntry[Scalar]; 2199 TreeEntry *E = &VectorizableTree[Idx]; 2200 assert(!E->NeedToGather && "Extracting from a gather list"); 2201 2202 Value *Vec = E->VectorizedValue; 2203 assert(Vec && "Can't find vectorizable value"); 2204 2205 Value *Lane = Builder.getInt32(it->Lane); 2206 // Generate extracts for out-of-tree users. 2207 // Find the insertion point for the extractelement lane. 2208 if (isa<Instruction>(Vec)){ 2209 if (PHINode *PH = dyn_cast<PHINode>(User)) { 2210 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 2211 if (PH->getIncomingValue(i) == Scalar) { 2212 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 2213 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2214 CSEBlocks.insert(PH->getIncomingBlock(i)); 2215 PH->setOperand(i, Ex); 2216 } 2217 } 2218 } else { 2219 Builder.SetInsertPoint(cast<Instruction>(User)); 2220 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2221 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 2222 User->replaceUsesOfWith(Scalar, Ex); 2223 } 2224 } else { 2225 Builder.SetInsertPoint(F->getEntryBlock().begin()); 2226 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2227 CSEBlocks.insert(&F->getEntryBlock()); 2228 User->replaceUsesOfWith(Scalar, Ex); 2229 } 2230 2231 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 2232 } 2233 2234 // For each vectorized value: 2235 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 2236 TreeEntry *Entry = &VectorizableTree[EIdx]; 2237 2238 // For each lane: 2239 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 2240 Value *Scalar = Entry->Scalars[Lane]; 2241 // No need to handle users of gathered values. 2242 if (Entry->NeedToGather) 2243 continue; 2244 2245 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 2246 2247 Type *Ty = Scalar->getType(); 2248 if (!Ty->isVoidTy()) { 2249 #ifndef NDEBUG 2250 for (User *U : Scalar->users()) { 2251 DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 2252 2253 assert((ScalarToTreeEntry.count(U) || 2254 // It is legal to replace users in the ignorelist by undef. 2255 (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), U) != 2256 UserIgnoreList.end())) && 2257 "Replacing out-of-tree value with undef"); 2258 } 2259 #endif 2260 Value *Undef = UndefValue::get(Ty); 2261 Scalar->replaceAllUsesWith(Undef); 2262 } 2263 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 2264 cast<Instruction>(Scalar)->eraseFromParent(); 2265 } 2266 } 2267 2268 Builder.ClearInsertionPoint(); 2269 2270 return VectorizableTree[0].VectorizedValue; 2271 } 2272 2273 void BoUpSLP::optimizeGatherSequence() { 2274 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 2275 << " gather sequences instructions.\n"); 2276 // LICM InsertElementInst sequences. 2277 for (SetVector<Instruction *>::iterator it = GatherSeq.begin(), 2278 e = GatherSeq.end(); it != e; ++it) { 2279 InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it); 2280 2281 if (!Insert) 2282 continue; 2283 2284 // Check if this block is inside a loop. 2285 Loop *L = LI->getLoopFor(Insert->getParent()); 2286 if (!L) 2287 continue; 2288 2289 // Check if it has a preheader. 2290 BasicBlock *PreHeader = L->getLoopPreheader(); 2291 if (!PreHeader) 2292 continue; 2293 2294 // If the vector or the element that we insert into it are 2295 // instructions that are defined in this basic block then we can't 2296 // hoist this instruction. 2297 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 2298 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 2299 if (CurrVec && L->contains(CurrVec)) 2300 continue; 2301 if (NewElem && L->contains(NewElem)) 2302 continue; 2303 2304 // We can hoist this instruction. Move it to the pre-header. 2305 Insert->moveBefore(PreHeader->getTerminator()); 2306 } 2307 2308 // Make a list of all reachable blocks in our CSE queue. 2309 SmallVector<const DomTreeNode *, 8> CSEWorkList; 2310 CSEWorkList.reserve(CSEBlocks.size()); 2311 for (BasicBlock *BB : CSEBlocks) 2312 if (DomTreeNode *N = DT->getNode(BB)) { 2313 assert(DT->isReachableFromEntry(N)); 2314 CSEWorkList.push_back(N); 2315 } 2316 2317 // Sort blocks by domination. This ensures we visit a block after all blocks 2318 // dominating it are visited. 2319 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), 2320 [this](const DomTreeNode *A, const DomTreeNode *B) { 2321 return DT->properlyDominates(A, B); 2322 }); 2323 2324 // Perform O(N^2) search over the gather sequences and merge identical 2325 // instructions. TODO: We can further optimize this scan if we split the 2326 // instructions into different buckets based on the insert lane. 2327 SmallVector<Instruction *, 16> Visited; 2328 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 2329 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 2330 "Worklist not sorted properly!"); 2331 BasicBlock *BB = (*I)->getBlock(); 2332 // For all instructions in blocks containing gather sequences: 2333 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 2334 Instruction *In = it++; 2335 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 2336 continue; 2337 2338 // Check if we can replace this instruction with any of the 2339 // visited instructions. 2340 for (SmallVectorImpl<Instruction *>::iterator v = Visited.begin(), 2341 ve = Visited.end(); 2342 v != ve; ++v) { 2343 if (In->isIdenticalTo(*v) && 2344 DT->dominates((*v)->getParent(), In->getParent())) { 2345 In->replaceAllUsesWith(*v); 2346 In->eraseFromParent(); 2347 In = nullptr; 2348 break; 2349 } 2350 } 2351 if (In) { 2352 assert(std::find(Visited.begin(), Visited.end(), In) == Visited.end()); 2353 Visited.push_back(In); 2354 } 2355 } 2356 } 2357 CSEBlocks.clear(); 2358 GatherSeq.clear(); 2359 } 2360 2361 // Groups the instructions to a bundle (which is then a single scheduling entity) 2362 // and schedules instructions until the bundle gets ready. 2363 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, 2364 AliasAnalysis *AA) { 2365 if (isa<PHINode>(VL[0])) 2366 return true; 2367 2368 // Initialize the instruction bundle. 2369 Instruction *OldScheduleEnd = ScheduleEnd; 2370 ScheduleData *PrevInBundle = nullptr; 2371 ScheduleData *Bundle = nullptr; 2372 bool ReSchedule = false; 2373 DEBUG(dbgs() << "SLP: bundle: " << *VL[0] << "\n"); 2374 for (Value *V : VL) { 2375 extendSchedulingRegion(V); 2376 ScheduleData *BundleMember = getScheduleData(V); 2377 assert(BundleMember && 2378 "no ScheduleData for bundle member (maybe not in same basic block)"); 2379 if (BundleMember->IsScheduled) { 2380 // A bundle member was scheduled as single instruction before and now 2381 // needs to be scheduled as part of the bundle. We just get rid of the 2382 // existing schedule. 2383 DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 2384 << " was already scheduled\n"); 2385 ReSchedule = true; 2386 } 2387 assert(BundleMember->isSchedulingEntity() && 2388 "bundle member already part of other bundle"); 2389 if (PrevInBundle) { 2390 PrevInBundle->NextInBundle = BundleMember; 2391 } else { 2392 Bundle = BundleMember; 2393 } 2394 BundleMember->UnscheduledDepsInBundle = 0; 2395 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 2396 2397 // Group the instructions to a bundle. 2398 BundleMember->FirstInBundle = Bundle; 2399 PrevInBundle = BundleMember; 2400 } 2401 if (ScheduleEnd != OldScheduleEnd) { 2402 // The scheduling region got new instructions at the lower end (or it is a 2403 // new region for the first bundle). This makes it necessary to 2404 // recalculate all dependencies. 2405 // It is seldom that this needs to be done a second time after adding the 2406 // initial bundle to the region. 2407 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 2408 ScheduleData *SD = getScheduleData(I); 2409 SD->clearDependencies(); 2410 } 2411 ReSchedule = true; 2412 } 2413 if (ReSchedule) { 2414 resetSchedule(); 2415 initialFillReadyList(ReadyInsts); 2416 } 2417 2418 DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " 2419 << BB->getName() << "\n"); 2420 2421 calculateDependencies(Bundle, true, AA); 2422 2423 // Now try to schedule the new bundle. As soon as the bundle is "ready" it 2424 // means that there are no cyclic dependencies and we can schedule it. 2425 // Note that's important that we don't "schedule" the bundle yet (see 2426 // cancelScheduling). 2427 while (!Bundle->isReady() && !ReadyInsts.empty()) { 2428 2429 ScheduleData *pickedSD = ReadyInsts.back(); 2430 ReadyInsts.pop_back(); 2431 2432 if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) { 2433 schedule(pickedSD, ReadyInsts); 2434 } 2435 } 2436 return Bundle->isReady(); 2437 } 2438 2439 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL) { 2440 if (isa<PHINode>(VL[0])) 2441 return; 2442 2443 ScheduleData *Bundle = getScheduleData(VL[0]); 2444 DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 2445 assert(!Bundle->IsScheduled && 2446 "Can't cancel bundle which is already scheduled"); 2447 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 2448 "tried to unbundle something which is not a bundle"); 2449 2450 // Un-bundle: make single instructions out of the bundle. 2451 ScheduleData *BundleMember = Bundle; 2452 while (BundleMember) { 2453 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 2454 BundleMember->FirstInBundle = BundleMember; 2455 ScheduleData *Next = BundleMember->NextInBundle; 2456 BundleMember->NextInBundle = nullptr; 2457 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 2458 if (BundleMember->UnscheduledDepsInBundle == 0) { 2459 ReadyInsts.insert(BundleMember); 2460 } 2461 BundleMember = Next; 2462 } 2463 } 2464 2465 void BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V) { 2466 if (getScheduleData(V)) 2467 return; 2468 Instruction *I = dyn_cast<Instruction>(V); 2469 assert(I && "bundle member must be an instruction"); 2470 assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled"); 2471 if (!ScheduleStart) { 2472 // It's the first instruction in the new region. 2473 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 2474 ScheduleStart = I; 2475 ScheduleEnd = I->getNextNode(); 2476 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 2477 DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 2478 return; 2479 } 2480 // Search up and down at the same time, because we don't know if the new 2481 // instruction is above or below the existing scheduling region. 2482 BasicBlock::reverse_iterator UpIter(ScheduleStart); 2483 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 2484 BasicBlock::iterator DownIter(ScheduleEnd); 2485 BasicBlock::iterator LowerEnd = BB->end(); 2486 for (;;) { 2487 if (UpIter != UpperEnd) { 2488 if (&*UpIter == I) { 2489 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 2490 ScheduleStart = I; 2491 DEBUG(dbgs() << "SLP: extend schedule region start to " << *I << "\n"); 2492 return; 2493 } 2494 UpIter++; 2495 } 2496 if (DownIter != LowerEnd) { 2497 if (&*DownIter == I) { 2498 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 2499 nullptr); 2500 ScheduleEnd = I->getNextNode(); 2501 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 2502 DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 2503 return; 2504 } 2505 DownIter++; 2506 } 2507 assert((UpIter != UpperEnd || DownIter != LowerEnd) && 2508 "instruction not found in block"); 2509 } 2510 } 2511 2512 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 2513 Instruction *ToI, 2514 ScheduleData *PrevLoadStore, 2515 ScheduleData *NextLoadStore) { 2516 ScheduleData *CurrentLoadStore = PrevLoadStore; 2517 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 2518 ScheduleData *SD = ScheduleDataMap[I]; 2519 if (!SD) { 2520 // Allocate a new ScheduleData for the instruction. 2521 if (ChunkPos >= ChunkSize) { 2522 ScheduleDataChunks.push_back( 2523 llvm::make_unique<ScheduleData[]>(ChunkSize)); 2524 ChunkPos = 0; 2525 } 2526 SD = &(ScheduleDataChunks.back()[ChunkPos++]); 2527 ScheduleDataMap[I] = SD; 2528 SD->Inst = I; 2529 } 2530 assert(!isInSchedulingRegion(SD) && 2531 "new ScheduleData already in scheduling region"); 2532 SD->init(SchedulingRegionID); 2533 2534 if (I->mayReadOrWriteMemory()) { 2535 // Update the linked list of memory accessing instructions. 2536 if (CurrentLoadStore) { 2537 CurrentLoadStore->NextLoadStore = SD; 2538 } else { 2539 FirstLoadStoreInRegion = SD; 2540 } 2541 CurrentLoadStore = SD; 2542 } 2543 } 2544 if (NextLoadStore) { 2545 if (CurrentLoadStore) 2546 CurrentLoadStore->NextLoadStore = NextLoadStore; 2547 } else { 2548 LastLoadStoreInRegion = CurrentLoadStore; 2549 } 2550 } 2551 2552 /// \returns the AA location that is being access by the instruction. 2553 static AliasAnalysis::Location getLocation(Instruction *I, AliasAnalysis *AA) { 2554 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 2555 return AA->getLocation(SI); 2556 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 2557 return AA->getLocation(LI); 2558 return AliasAnalysis::Location(); 2559 } 2560 2561 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 2562 bool InsertInReadyList, 2563 AliasAnalysis *AA) { 2564 assert(SD->isSchedulingEntity()); 2565 2566 SmallVector<ScheduleData *, 10> WorkList; 2567 WorkList.push_back(SD); 2568 2569 while (!WorkList.empty()) { 2570 ScheduleData *SD = WorkList.back(); 2571 WorkList.pop_back(); 2572 2573 ScheduleData *BundleMember = SD; 2574 while (BundleMember) { 2575 assert(isInSchedulingRegion(BundleMember)); 2576 if (!BundleMember->hasValidDependencies()) { 2577 2578 DEBUG(dbgs() << "SLP: update deps of " << *BundleMember << "\n"); 2579 BundleMember->Dependencies = 0; 2580 BundleMember->resetUnscheduledDeps(); 2581 2582 // Handle def-use chain dependencies. 2583 for (User *U : BundleMember->Inst->users()) { 2584 if (isa<Instruction>(U)) { 2585 ScheduleData *UseSD = getScheduleData(U); 2586 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 2587 BundleMember->Dependencies++; 2588 ScheduleData *DestBundle = UseSD->FirstInBundle; 2589 if (!DestBundle->IsScheduled) { 2590 BundleMember->incrementUnscheduledDeps(1); 2591 } 2592 if (!DestBundle->hasValidDependencies()) { 2593 WorkList.push_back(DestBundle); 2594 } 2595 } 2596 } else { 2597 // I'm not sure if this can ever happen. But we need to be safe. 2598 // This lets the instruction/bundle never be scheduled and eventally 2599 // disable vectorization. 2600 BundleMember->Dependencies++; 2601 BundleMember->incrementUnscheduledDeps(1); 2602 } 2603 } 2604 2605 // Handle the memory dependencies. 2606 ScheduleData *DepDest = BundleMember->NextLoadStore; 2607 if (DepDest) { 2608 AliasAnalysis::Location SrcLoc = getLocation(BundleMember->Inst, AA); 2609 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 2610 2611 while (DepDest) { 2612 assert(isInSchedulingRegion(DepDest)); 2613 if (SrcMayWrite || DepDest->Inst->mayWriteToMemory()) { 2614 AliasAnalysis::Location DstLoc = getLocation(DepDest->Inst, AA); 2615 if (!SrcLoc.Ptr || !DstLoc.Ptr || AA->alias(SrcLoc, DstLoc)) { 2616 DepDest->MemoryDependencies.push_back(BundleMember); 2617 BundleMember->Dependencies++; 2618 ScheduleData *DestBundle = DepDest->FirstInBundle; 2619 if (!DestBundle->IsScheduled) { 2620 BundleMember->incrementUnscheduledDeps(1); 2621 } 2622 if (!DestBundle->hasValidDependencies()) { 2623 WorkList.push_back(DestBundle); 2624 } 2625 } 2626 } 2627 DepDest = DepDest->NextLoadStore; 2628 } 2629 } 2630 } 2631 BundleMember = BundleMember->NextInBundle; 2632 } 2633 if (InsertInReadyList && SD->isReady()) { 2634 ReadyInsts.push_back(SD); 2635 DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n"); 2636 } 2637 } 2638 } 2639 2640 void BoUpSLP::BlockScheduling::resetSchedule() { 2641 assert(ScheduleStart && 2642 "tried to reset schedule on block which has not been scheduled"); 2643 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 2644 ScheduleData *SD = getScheduleData(I); 2645 assert(isInSchedulingRegion(SD)); 2646 SD->IsScheduled = false; 2647 SD->resetUnscheduledDeps(); 2648 } 2649 ReadyInsts.clear(); 2650 } 2651 2652 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 2653 2654 if (!BS->ScheduleStart) 2655 return; 2656 2657 DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 2658 2659 BS->resetSchedule(); 2660 2661 // For the real scheduling we use a more sophisticated ready-list: it is 2662 // sorted by the original instruction location. This lets the final schedule 2663 // be as close as possible to the original instruction order. 2664 struct ScheduleDataCompare { 2665 bool operator()(ScheduleData *SD1, ScheduleData *SD2) { 2666 return SD2->SchedulingPriority < SD1->SchedulingPriority; 2667 } 2668 }; 2669 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 2670 2671 // Ensure that all depencency data is updated and fill the ready-list with 2672 // initial instructions. 2673 int Idx = 0; 2674 int NumToSchedule = 0; 2675 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 2676 I = I->getNextNode()) { 2677 ScheduleData *SD = BS->getScheduleData(I); 2678 assert( 2679 SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD->Inst) != 0) && 2680 "scheduler and vectorizer have different opinion on what is a bundle"); 2681 SD->FirstInBundle->SchedulingPriority = Idx++; 2682 if (SD->isSchedulingEntity()) { 2683 BS->calculateDependencies(SD, false, AA); 2684 NumToSchedule++; 2685 } 2686 } 2687 BS->initialFillReadyList(ReadyInsts); 2688 2689 Instruction *LastScheduledInst = BS->ScheduleEnd; 2690 2691 // Do the "real" scheduling. 2692 while (!ReadyInsts.empty()) { 2693 ScheduleData *picked = *ReadyInsts.begin(); 2694 ReadyInsts.erase(ReadyInsts.begin()); 2695 2696 // Move the scheduled instruction(s) to their dedicated places, if not 2697 // there yet. 2698 ScheduleData *BundleMember = picked; 2699 while (BundleMember) { 2700 Instruction *pickedInst = BundleMember->Inst; 2701 if (LastScheduledInst->getNextNode() != pickedInst) { 2702 BS->BB->getInstList().remove(pickedInst); 2703 BS->BB->getInstList().insert(LastScheduledInst, pickedInst); 2704 } 2705 LastScheduledInst = pickedInst; 2706 BundleMember = BundleMember->NextInBundle; 2707 } 2708 2709 BS->schedule(picked, ReadyInsts); 2710 NumToSchedule--; 2711 } 2712 assert(NumToSchedule == 0 && "could not schedule all instructions"); 2713 2714 // Avoid duplicate scheduling of the block. 2715 BS->ScheduleStart = nullptr; 2716 } 2717 2718 /// The SLPVectorizer Pass. 2719 struct SLPVectorizer : public FunctionPass { 2720 typedef SmallVector<StoreInst *, 8> StoreList; 2721 typedef MapVector<Value *, StoreList> StoreListMap; 2722 2723 /// Pass identification, replacement for typeid 2724 static char ID; 2725 2726 explicit SLPVectorizer() : FunctionPass(ID) { 2727 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 2728 } 2729 2730 ScalarEvolution *SE; 2731 const DataLayout *DL; 2732 TargetTransformInfo *TTI; 2733 TargetLibraryInfo *TLI; 2734 AliasAnalysis *AA; 2735 LoopInfo *LI; 2736 DominatorTree *DT; 2737 2738 bool runOnFunction(Function &F) override { 2739 if (skipOptnoneFunction(F)) 2740 return false; 2741 2742 SE = &getAnalysis<ScalarEvolution>(); 2743 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 2744 DL = DLP ? &DLP->getDataLayout() : nullptr; 2745 TTI = &getAnalysis<TargetTransformInfo>(); 2746 TLI = getAnalysisIfAvailable<TargetLibraryInfo>(); 2747 AA = &getAnalysis<AliasAnalysis>(); 2748 LI = &getAnalysis<LoopInfo>(); 2749 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2750 2751 StoreRefs.clear(); 2752 bool Changed = false; 2753 2754 // If the target claims to have no vector registers don't attempt 2755 // vectorization. 2756 if (!TTI->getNumberOfRegisters(true)) 2757 return false; 2758 2759 // Must have DataLayout. We can't require it because some tests run w/o 2760 // triple. 2761 if (!DL) 2762 return false; 2763 2764 // Don't vectorize when the attribute NoImplicitFloat is used. 2765 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 2766 return false; 2767 2768 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 2769 2770 // Use the bottom up slp vectorizer to construct chains that start with 2771 // store instructions. 2772 BoUpSLP R(&F, SE, DL, TTI, TLI, AA, LI, DT); 2773 2774 // Scan the blocks in the function in post order. 2775 for (po_iterator<BasicBlock*> it = po_begin(&F.getEntryBlock()), 2776 e = po_end(&F.getEntryBlock()); it != e; ++it) { 2777 BasicBlock *BB = *it; 2778 // Vectorize trees that end at stores. 2779 if (unsigned count = collectStores(BB, R)) { 2780 (void)count; 2781 DEBUG(dbgs() << "SLP: Found " << count << " stores to vectorize.\n"); 2782 Changed |= vectorizeStoreChains(R); 2783 } 2784 2785 // Vectorize trees that end at reductions. 2786 Changed |= vectorizeChainsInBlock(BB, R); 2787 } 2788 2789 if (Changed) { 2790 R.optimizeGatherSequence(); 2791 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 2792 DEBUG(verifyFunction(F)); 2793 } 2794 return Changed; 2795 } 2796 2797 void getAnalysisUsage(AnalysisUsage &AU) const override { 2798 FunctionPass::getAnalysisUsage(AU); 2799 AU.addRequired<ScalarEvolution>(); 2800 AU.addRequired<AliasAnalysis>(); 2801 AU.addRequired<TargetTransformInfo>(); 2802 AU.addRequired<LoopInfo>(); 2803 AU.addRequired<DominatorTreeWrapperPass>(); 2804 AU.addPreserved<LoopInfo>(); 2805 AU.addPreserved<DominatorTreeWrapperPass>(); 2806 AU.setPreservesCFG(); 2807 } 2808 2809 private: 2810 2811 /// \brief Collect memory references and sort them according to their base 2812 /// object. We sort the stores to their base objects to reduce the cost of the 2813 /// quadratic search on the stores. TODO: We can further reduce this cost 2814 /// if we flush the chain creation every time we run into a memory barrier. 2815 unsigned collectStores(BasicBlock *BB, BoUpSLP &R); 2816 2817 /// \brief Try to vectorize a chain that starts at two arithmetic instrs. 2818 bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R); 2819 2820 /// \brief Try to vectorize a list of operands. 2821 /// \@param BuildVector A list of users to ignore for the purpose of 2822 /// scheduling and that don't need extracting. 2823 /// \returns true if a value was vectorized. 2824 bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 2825 ArrayRef<Value *> BuildVector = None, 2826 bool allowReorder = false); 2827 2828 /// \brief Try to vectorize a chain that may start at the operands of \V; 2829 bool tryToVectorize(BinaryOperator *V, BoUpSLP &R); 2830 2831 /// \brief Vectorize the stores that were collected in StoreRefs. 2832 bool vectorizeStoreChains(BoUpSLP &R); 2833 2834 /// \brief Scan the basic block and look for patterns that are likely to start 2835 /// a vectorization chain. 2836 bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R); 2837 2838 bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold, 2839 BoUpSLP &R); 2840 2841 bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold, 2842 BoUpSLP &R); 2843 private: 2844 StoreListMap StoreRefs; 2845 }; 2846 2847 /// \brief Check that the Values in the slice in VL array are still existent in 2848 /// the WeakVH array. 2849 /// Vectorization of part of the VL array may cause later values in the VL array 2850 /// to become invalid. We track when this has happened in the WeakVH array. 2851 static bool hasValueBeenRAUWed(ArrayRef<Value *> &VL, 2852 SmallVectorImpl<WeakVH> &VH, 2853 unsigned SliceBegin, 2854 unsigned SliceSize) { 2855 for (unsigned i = SliceBegin; i < SliceBegin + SliceSize; ++i) 2856 if (VH[i] != VL[i]) 2857 return true; 2858 2859 return false; 2860 } 2861 2862 bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain, 2863 int CostThreshold, BoUpSLP &R) { 2864 unsigned ChainLen = Chain.size(); 2865 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 2866 << "\n"); 2867 Type *StoreTy = cast<StoreInst>(Chain[0])->getValueOperand()->getType(); 2868 unsigned Sz = DL->getTypeSizeInBits(StoreTy); 2869 unsigned VF = MinVecRegSize / Sz; 2870 2871 if (!isPowerOf2_32(Sz) || VF < 2) 2872 return false; 2873 2874 // Keep track of values that were deleted by vectorizing in the loop below. 2875 SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end()); 2876 2877 bool Changed = false; 2878 // Look for profitable vectorizable trees at all offsets, starting at zero. 2879 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 2880 if (i + VF > e) 2881 break; 2882 2883 // Check that a previous iteration of this loop did not delete the Value. 2884 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 2885 continue; 2886 2887 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 2888 << "\n"); 2889 ArrayRef<Value *> Operands = Chain.slice(i, VF); 2890 2891 R.buildTree(Operands); 2892 2893 int Cost = R.getTreeCost(); 2894 2895 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 2896 if (Cost < CostThreshold) { 2897 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 2898 R.vectorizeTree(); 2899 2900 // Move to the next bundle. 2901 i += VF - 1; 2902 Changed = true; 2903 } 2904 } 2905 2906 return Changed; 2907 } 2908 2909 bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, 2910 int costThreshold, BoUpSLP &R) { 2911 SetVector<Value *> Heads, Tails; 2912 SmallDenseMap<Value *, Value *> ConsecutiveChain; 2913 2914 // We may run into multiple chains that merge into a single chain. We mark the 2915 // stores that we vectorized so that we don't visit the same store twice. 2916 BoUpSLP::ValueSet VectorizedStores; 2917 bool Changed = false; 2918 2919 // Do a quadratic search on all of the given stores and find 2920 // all of the pairs of stores that follow each other. 2921 for (unsigned i = 0, e = Stores.size(); i < e; ++i) { 2922 for (unsigned j = 0; j < e; ++j) { 2923 if (i == j) 2924 continue; 2925 2926 if (R.isConsecutiveAccess(Stores[i], Stores[j])) { 2927 Tails.insert(Stores[j]); 2928 Heads.insert(Stores[i]); 2929 ConsecutiveChain[Stores[i]] = Stores[j]; 2930 } 2931 } 2932 } 2933 2934 // For stores that start but don't end a link in the chain: 2935 for (SetVector<Value *>::iterator it = Heads.begin(), e = Heads.end(); 2936 it != e; ++it) { 2937 if (Tails.count(*it)) 2938 continue; 2939 2940 // We found a store instr that starts a chain. Now follow the chain and try 2941 // to vectorize it. 2942 BoUpSLP::ValueList Operands; 2943 Value *I = *it; 2944 // Collect the chain into a list. 2945 while (Tails.count(I) || Heads.count(I)) { 2946 if (VectorizedStores.count(I)) 2947 break; 2948 Operands.push_back(I); 2949 // Move to the next value in the chain. 2950 I = ConsecutiveChain[I]; 2951 } 2952 2953 bool Vectorized = vectorizeStoreChain(Operands, costThreshold, R); 2954 2955 // Mark the vectorized stores so that we don't vectorize them again. 2956 if (Vectorized) 2957 VectorizedStores.insert(Operands.begin(), Operands.end()); 2958 Changed |= Vectorized; 2959 } 2960 2961 return Changed; 2962 } 2963 2964 2965 unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) { 2966 unsigned count = 0; 2967 StoreRefs.clear(); 2968 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 2969 StoreInst *SI = dyn_cast<StoreInst>(it); 2970 if (!SI) 2971 continue; 2972 2973 // Don't touch volatile stores. 2974 if (!SI->isSimple()) 2975 continue; 2976 2977 // Check that the pointer points to scalars. 2978 Type *Ty = SI->getValueOperand()->getType(); 2979 if (Ty->isAggregateType() || Ty->isVectorTy()) 2980 continue; 2981 2982 // Find the base pointer. 2983 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), DL); 2984 2985 // Save the store locations. 2986 StoreRefs[Ptr].push_back(SI); 2987 count++; 2988 } 2989 return count; 2990 } 2991 2992 bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 2993 if (!A || !B) 2994 return false; 2995 Value *VL[] = { A, B }; 2996 return tryToVectorizeList(VL, R, None, true); 2997 } 2998 2999 bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 3000 ArrayRef<Value *> BuildVector, 3001 bool allowReorder) { 3002 if (VL.size() < 2) 3003 return false; 3004 3005 DEBUG(dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n"); 3006 3007 // Check that all of the parts are scalar instructions of the same type. 3008 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 3009 if (!I0) 3010 return false; 3011 3012 unsigned Opcode0 = I0->getOpcode(); 3013 3014 Type *Ty0 = I0->getType(); 3015 unsigned Sz = DL->getTypeSizeInBits(Ty0); 3016 unsigned VF = MinVecRegSize / Sz; 3017 3018 for (int i = 0, e = VL.size(); i < e; ++i) { 3019 Type *Ty = VL[i]->getType(); 3020 if (Ty->isAggregateType() || Ty->isVectorTy()) 3021 return false; 3022 Instruction *Inst = dyn_cast<Instruction>(VL[i]); 3023 if (!Inst || Inst->getOpcode() != Opcode0) 3024 return false; 3025 } 3026 3027 bool Changed = false; 3028 3029 // Keep track of values that were deleted by vectorizing in the loop below. 3030 SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end()); 3031 3032 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 3033 unsigned OpsWidth = 0; 3034 3035 if (i + VF > e) 3036 OpsWidth = e - i; 3037 else 3038 OpsWidth = VF; 3039 3040 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 3041 break; 3042 3043 // Check that a previous iteration of this loop did not delete the Value. 3044 if (hasValueBeenRAUWed(VL, TrackValues, i, OpsWidth)) 3045 continue; 3046 3047 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 3048 << "\n"); 3049 ArrayRef<Value *> Ops = VL.slice(i, OpsWidth); 3050 3051 ArrayRef<Value *> BuildVectorSlice; 3052 if (!BuildVector.empty()) 3053 BuildVectorSlice = BuildVector.slice(i, OpsWidth); 3054 3055 R.buildTree(Ops, BuildVectorSlice); 3056 // TODO: check if we can allow reordering also for other cases than 3057 // tryToVectorizePair() 3058 if (allowReorder && R.shouldReorder()) { 3059 assert(Ops.size() == 2); 3060 assert(BuildVectorSlice.empty()); 3061 Value *ReorderedOps[] = { Ops[1], Ops[0] }; 3062 R.buildTree(ReorderedOps, None); 3063 } 3064 int Cost = R.getTreeCost(); 3065 3066 if (Cost < -SLPCostThreshold) { 3067 DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 3068 Value *VectorizedRoot = R.vectorizeTree(); 3069 3070 // Reconstruct the build vector by extracting the vectorized root. This 3071 // way we handle the case where some elements of the vector are undefined. 3072 // (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2)) 3073 if (!BuildVectorSlice.empty()) { 3074 // The insert point is the last build vector instruction. The vectorized 3075 // root will precede it. This guarantees that we get an instruction. The 3076 // vectorized tree could have been constant folded. 3077 Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back()); 3078 unsigned VecIdx = 0; 3079 for (auto &V : BuildVectorSlice) { 3080 IRBuilder<true, NoFolder> Builder( 3081 ++BasicBlock::iterator(InsertAfter)); 3082 InsertElementInst *IE = cast<InsertElementInst>(V); 3083 Instruction *Extract = cast<Instruction>(Builder.CreateExtractElement( 3084 VectorizedRoot, Builder.getInt32(VecIdx++))); 3085 IE->setOperand(1, Extract); 3086 IE->removeFromParent(); 3087 IE->insertAfter(Extract); 3088 InsertAfter = IE; 3089 } 3090 } 3091 // Move to the next bundle. 3092 i += VF - 1; 3093 Changed = true; 3094 } 3095 } 3096 3097 return Changed; 3098 } 3099 3100 bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { 3101 if (!V) 3102 return false; 3103 3104 // Try to vectorize V. 3105 if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R)) 3106 return true; 3107 3108 BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0)); 3109 BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1)); 3110 // Try to skip B. 3111 if (B && B->hasOneUse()) { 3112 BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 3113 BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 3114 if (tryToVectorizePair(A, B0, R)) { 3115 return true; 3116 } 3117 if (tryToVectorizePair(A, B1, R)) { 3118 return true; 3119 } 3120 } 3121 3122 // Try to skip A. 3123 if (A && A->hasOneUse()) { 3124 BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 3125 BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 3126 if (tryToVectorizePair(A0, B, R)) { 3127 return true; 3128 } 3129 if (tryToVectorizePair(A1, B, R)) { 3130 return true; 3131 } 3132 } 3133 return 0; 3134 } 3135 3136 /// \brief Generate a shuffle mask to be used in a reduction tree. 3137 /// 3138 /// \param VecLen The length of the vector to be reduced. 3139 /// \param NumEltsToRdx The number of elements that should be reduced in the 3140 /// vector. 3141 /// \param IsPairwise Whether the reduction is a pairwise or splitting 3142 /// reduction. A pairwise reduction will generate a mask of 3143 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 3144 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 3145 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 3146 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 3147 bool IsPairwise, bool IsLeft, 3148 IRBuilder<> &Builder) { 3149 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 3150 3151 SmallVector<Constant *, 32> ShuffleMask( 3152 VecLen, UndefValue::get(Builder.getInt32Ty())); 3153 3154 if (IsPairwise) 3155 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 3156 for (unsigned i = 0; i != NumEltsToRdx; ++i) 3157 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 3158 else 3159 // Move the upper half of the vector to the lower half. 3160 for (unsigned i = 0; i != NumEltsToRdx; ++i) 3161 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 3162 3163 return ConstantVector::get(ShuffleMask); 3164 } 3165 3166 3167 /// Model horizontal reductions. 3168 /// 3169 /// A horizontal reduction is a tree of reduction operations (currently add and 3170 /// fadd) that has operations that can be put into a vector as its leaf. 3171 /// For example, this tree: 3172 /// 3173 /// mul mul mul mul 3174 /// \ / \ / 3175 /// + + 3176 /// \ / 3177 /// + 3178 /// This tree has "mul" as its reduced values and "+" as its reduction 3179 /// operations. A reduction might be feeding into a store or a binary operation 3180 /// feeding a phi. 3181 /// ... 3182 /// \ / 3183 /// + 3184 /// | 3185 /// phi += 3186 /// 3187 /// Or: 3188 /// ... 3189 /// \ / 3190 /// + 3191 /// | 3192 /// *p = 3193 /// 3194 class HorizontalReduction { 3195 SmallVector<Value *, 16> ReductionOps; 3196 SmallVector<Value *, 32> ReducedVals; 3197 3198 BinaryOperator *ReductionRoot; 3199 PHINode *ReductionPHI; 3200 3201 /// The opcode of the reduction. 3202 unsigned ReductionOpcode; 3203 /// The opcode of the values we perform a reduction on. 3204 unsigned ReducedValueOpcode; 3205 /// The width of one full horizontal reduction operation. 3206 unsigned ReduxWidth; 3207 /// Should we model this reduction as a pairwise reduction tree or a tree that 3208 /// splits the vector in halves and adds those halves. 3209 bool IsPairwiseReduction; 3210 3211 public: 3212 HorizontalReduction() 3213 : ReductionRoot(nullptr), ReductionPHI(nullptr), ReductionOpcode(0), 3214 ReducedValueOpcode(0), ReduxWidth(0), IsPairwiseReduction(false) {} 3215 3216 /// \brief Try to find a reduction tree. 3217 bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B, 3218 const DataLayout *DL) { 3219 assert((!Phi || 3220 std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && 3221 "Thi phi needs to use the binary operator"); 3222 3223 // We could have a initial reductions that is not an add. 3224 // r *= v1 + v2 + v3 + v4 3225 // In such a case start looking for a tree rooted in the first '+'. 3226 if (Phi) { 3227 if (B->getOperand(0) == Phi) { 3228 Phi = nullptr; 3229 B = dyn_cast<BinaryOperator>(B->getOperand(1)); 3230 } else if (B->getOperand(1) == Phi) { 3231 Phi = nullptr; 3232 B = dyn_cast<BinaryOperator>(B->getOperand(0)); 3233 } 3234 } 3235 3236 if (!B) 3237 return false; 3238 3239 Type *Ty = B->getType(); 3240 if (Ty->isVectorTy()) 3241 return false; 3242 3243 ReductionOpcode = B->getOpcode(); 3244 ReducedValueOpcode = 0; 3245 ReduxWidth = MinVecRegSize / DL->getTypeSizeInBits(Ty); 3246 ReductionRoot = B; 3247 ReductionPHI = Phi; 3248 3249 if (ReduxWidth < 4) 3250 return false; 3251 3252 // We currently only support adds. 3253 if (ReductionOpcode != Instruction::Add && 3254 ReductionOpcode != Instruction::FAdd) 3255 return false; 3256 3257 // Post order traverse the reduction tree starting at B. We only handle true 3258 // trees containing only binary operators. 3259 SmallVector<std::pair<BinaryOperator *, unsigned>, 32> Stack; 3260 Stack.push_back(std::make_pair(B, 0)); 3261 while (!Stack.empty()) { 3262 BinaryOperator *TreeN = Stack.back().first; 3263 unsigned EdgeToVist = Stack.back().second++; 3264 bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode; 3265 3266 // Only handle trees in the current basic block. 3267 if (TreeN->getParent() != B->getParent()) 3268 return false; 3269 3270 // Each tree node needs to have one user except for the ultimate 3271 // reduction. 3272 if (!TreeN->hasOneUse() && TreeN != B) 3273 return false; 3274 3275 // Postorder vist. 3276 if (EdgeToVist == 2 || IsReducedValue) { 3277 if (IsReducedValue) { 3278 // Make sure that the opcodes of the operations that we are going to 3279 // reduce match. 3280 if (!ReducedValueOpcode) 3281 ReducedValueOpcode = TreeN->getOpcode(); 3282 else if (ReducedValueOpcode != TreeN->getOpcode()) 3283 return false; 3284 ReducedVals.push_back(TreeN); 3285 } else { 3286 // We need to be able to reassociate the adds. 3287 if (!TreeN->isAssociative()) 3288 return false; 3289 ReductionOps.push_back(TreeN); 3290 } 3291 // Retract. 3292 Stack.pop_back(); 3293 continue; 3294 } 3295 3296 // Visit left or right. 3297 Value *NextV = TreeN->getOperand(EdgeToVist); 3298 BinaryOperator *Next = dyn_cast<BinaryOperator>(NextV); 3299 if (Next) 3300 Stack.push_back(std::make_pair(Next, 0)); 3301 else if (NextV != Phi) 3302 return false; 3303 } 3304 return true; 3305 } 3306 3307 /// \brief Attempt to vectorize the tree found by 3308 /// matchAssociativeReduction. 3309 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 3310 if (ReducedVals.empty()) 3311 return false; 3312 3313 unsigned NumReducedVals = ReducedVals.size(); 3314 if (NumReducedVals < ReduxWidth) 3315 return false; 3316 3317 Value *VectorizedTree = nullptr; 3318 IRBuilder<> Builder(ReductionRoot); 3319 FastMathFlags Unsafe; 3320 Unsafe.setUnsafeAlgebra(); 3321 Builder.SetFastMathFlags(Unsafe); 3322 unsigned i = 0; 3323 3324 for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) { 3325 V.buildTree(makeArrayRef(&ReducedVals[i], ReduxWidth), ReductionOps); 3326 3327 // Estimate cost. 3328 int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]); 3329 if (Cost >= -SLPCostThreshold) 3330 break; 3331 3332 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost 3333 << ". (HorRdx)\n"); 3334 3335 // Vectorize a tree. 3336 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 3337 Value *VectorizedRoot = V.vectorizeTree(); 3338 3339 // Emit a reduction. 3340 Value *ReducedSubTree = emitReduction(VectorizedRoot, Builder); 3341 if (VectorizedTree) { 3342 Builder.SetCurrentDebugLocation(Loc); 3343 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 3344 ReducedSubTree, "bin.rdx"); 3345 } else 3346 VectorizedTree = ReducedSubTree; 3347 } 3348 3349 if (VectorizedTree) { 3350 // Finish the reduction. 3351 for (; i < NumReducedVals; ++i) { 3352 Builder.SetCurrentDebugLocation( 3353 cast<Instruction>(ReducedVals[i])->getDebugLoc()); 3354 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 3355 ReducedVals[i]); 3356 } 3357 // Update users. 3358 if (ReductionPHI) { 3359 assert(ReductionRoot && "Need a reduction operation"); 3360 ReductionRoot->setOperand(0, VectorizedTree); 3361 ReductionRoot->setOperand(1, ReductionPHI); 3362 } else 3363 ReductionRoot->replaceAllUsesWith(VectorizedTree); 3364 } 3365 return VectorizedTree != nullptr; 3366 } 3367 3368 private: 3369 3370 /// \brief Calcuate the cost of a reduction. 3371 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) { 3372 Type *ScalarTy = FirstReducedVal->getType(); 3373 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 3374 3375 int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true); 3376 int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false); 3377 3378 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 3379 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 3380 3381 int ScalarReduxCost = 3382 ReduxWidth * TTI->getArithmeticInstrCost(ReductionOpcode, VecTy); 3383 3384 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 3385 << " for reduction that starts with " << *FirstReducedVal 3386 << " (It is a " 3387 << (IsPairwiseReduction ? "pairwise" : "splitting") 3388 << " reduction)\n"); 3389 3390 return VecReduxCost - ScalarReduxCost; 3391 } 3392 3393 static Value *createBinOp(IRBuilder<> &Builder, unsigned Opcode, Value *L, 3394 Value *R, const Twine &Name = "") { 3395 if (Opcode == Instruction::FAdd) 3396 return Builder.CreateFAdd(L, R, Name); 3397 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, L, R, Name); 3398 } 3399 3400 /// \brief Emit a horizontal reduction of the vectorized value. 3401 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder) { 3402 assert(VectorizedValue && "Need to have a vectorized tree node"); 3403 Instruction *ValToReduce = dyn_cast<Instruction>(VectorizedValue); 3404 assert(isPowerOf2_32(ReduxWidth) && 3405 "We only handle power-of-two reductions for now"); 3406 3407 Value *TmpVec = ValToReduce; 3408 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 3409 if (IsPairwiseReduction) { 3410 Value *LeftMask = 3411 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 3412 Value *RightMask = 3413 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 3414 3415 Value *LeftShuf = Builder.CreateShuffleVector( 3416 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 3417 Value *RightShuf = Builder.CreateShuffleVector( 3418 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 3419 "rdx.shuf.r"); 3420 TmpVec = createBinOp(Builder, ReductionOpcode, LeftShuf, RightShuf, 3421 "bin.rdx"); 3422 } else { 3423 Value *UpperHalf = 3424 createRdxShuffleMask(ReduxWidth, i, false, false, Builder); 3425 Value *Shuf = Builder.CreateShuffleVector( 3426 TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf"); 3427 TmpVec = createBinOp(Builder, ReductionOpcode, TmpVec, Shuf, "bin.rdx"); 3428 } 3429 } 3430 3431 // The result is in the first element of the vector. 3432 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 3433 } 3434 }; 3435 3436 /// \brief Recognize construction of vectors like 3437 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 3438 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 3439 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 3440 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 3441 /// 3442 /// Returns true if it matches 3443 /// 3444 static bool findBuildVector(InsertElementInst *FirstInsertElem, 3445 SmallVectorImpl<Value *> &BuildVector, 3446 SmallVectorImpl<Value *> &BuildVectorOpds) { 3447 if (!isa<UndefValue>(FirstInsertElem->getOperand(0))) 3448 return false; 3449 3450 InsertElementInst *IE = FirstInsertElem; 3451 while (true) { 3452 BuildVector.push_back(IE); 3453 BuildVectorOpds.push_back(IE->getOperand(1)); 3454 3455 if (IE->use_empty()) 3456 return false; 3457 3458 InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back()); 3459 if (!NextUse) 3460 return true; 3461 3462 // If this isn't the final use, make sure the next insertelement is the only 3463 // use. It's OK if the final constructed vector is used multiple times 3464 if (!IE->hasOneUse()) 3465 return false; 3466 3467 IE = NextUse; 3468 } 3469 3470 return false; 3471 } 3472 3473 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 3474 return V->getType() < V2->getType(); 3475 } 3476 3477 bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 3478 bool Changed = false; 3479 SmallVector<Value *, 4> Incoming; 3480 SmallSet<Value *, 16> VisitedInstrs; 3481 3482 bool HaveVectorizedPhiNodes = true; 3483 while (HaveVectorizedPhiNodes) { 3484 HaveVectorizedPhiNodes = false; 3485 3486 // Collect the incoming values from the PHIs. 3487 Incoming.clear(); 3488 for (BasicBlock::iterator instr = BB->begin(), ie = BB->end(); instr != ie; 3489 ++instr) { 3490 PHINode *P = dyn_cast<PHINode>(instr); 3491 if (!P) 3492 break; 3493 3494 if (!VisitedInstrs.count(P)) 3495 Incoming.push_back(P); 3496 } 3497 3498 // Sort by type. 3499 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); 3500 3501 // Try to vectorize elements base on their type. 3502 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 3503 E = Incoming.end(); 3504 IncIt != E;) { 3505 3506 // Look for the next elements with the same type. 3507 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 3508 while (SameTypeIt != E && 3509 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 3510 VisitedInstrs.insert(*SameTypeIt); 3511 ++SameTypeIt; 3512 } 3513 3514 // Try to vectorize them. 3515 unsigned NumElts = (SameTypeIt - IncIt); 3516 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); 3517 if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R)) { 3518 // Success start over because instructions might have been changed. 3519 HaveVectorizedPhiNodes = true; 3520 Changed = true; 3521 break; 3522 } 3523 3524 // Start over at the next instruction of a different type (or the end). 3525 IncIt = SameTypeIt; 3526 } 3527 } 3528 3529 VisitedInstrs.clear(); 3530 3531 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 3532 // We may go through BB multiple times so skip the one we have checked. 3533 if (!VisitedInstrs.insert(it)) 3534 continue; 3535 3536 if (isa<DbgInfoIntrinsic>(it)) 3537 continue; 3538 3539 // Try to vectorize reductions that use PHINodes. 3540 if (PHINode *P = dyn_cast<PHINode>(it)) { 3541 // Check that the PHI is a reduction PHI. 3542 if (P->getNumIncomingValues() != 2) 3543 return Changed; 3544 Value *Rdx = 3545 (P->getIncomingBlock(0) == BB 3546 ? (P->getIncomingValue(0)) 3547 : (P->getIncomingBlock(1) == BB ? P->getIncomingValue(1) 3548 : nullptr)); 3549 // Check if this is a Binary Operator. 3550 BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx); 3551 if (!BI) 3552 continue; 3553 3554 // Try to match and vectorize a horizontal reduction. 3555 HorizontalReduction HorRdx; 3556 if (ShouldVectorizeHor && 3557 HorRdx.matchAssociativeReduction(P, BI, DL) && 3558 HorRdx.tryToReduce(R, TTI)) { 3559 Changed = true; 3560 it = BB->begin(); 3561 e = BB->end(); 3562 continue; 3563 } 3564 3565 Value *Inst = BI->getOperand(0); 3566 if (Inst == P) 3567 Inst = BI->getOperand(1); 3568 3569 if (tryToVectorize(dyn_cast<BinaryOperator>(Inst), R)) { 3570 // We would like to start over since some instructions are deleted 3571 // and the iterator may become invalid value. 3572 Changed = true; 3573 it = BB->begin(); 3574 e = BB->end(); 3575 continue; 3576 } 3577 3578 continue; 3579 } 3580 3581 // Try to vectorize horizontal reductions feeding into a store. 3582 if (ShouldStartVectorizeHorAtStore) 3583 if (StoreInst *SI = dyn_cast<StoreInst>(it)) 3584 if (BinaryOperator *BinOp = 3585 dyn_cast<BinaryOperator>(SI->getValueOperand())) { 3586 HorizontalReduction HorRdx; 3587 if (((HorRdx.matchAssociativeReduction(nullptr, BinOp, DL) && 3588 HorRdx.tryToReduce(R, TTI)) || 3589 tryToVectorize(BinOp, R))) { 3590 Changed = true; 3591 it = BB->begin(); 3592 e = BB->end(); 3593 continue; 3594 } 3595 } 3596 3597 // Try to vectorize trees that start at compare instructions. 3598 if (CmpInst *CI = dyn_cast<CmpInst>(it)) { 3599 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) { 3600 Changed = true; 3601 // We would like to start over since some instructions are deleted 3602 // and the iterator may become invalid value. 3603 it = BB->begin(); 3604 e = BB->end(); 3605 continue; 3606 } 3607 3608 for (int i = 0; i < 2; ++i) { 3609 if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) { 3610 if (tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R)) { 3611 Changed = true; 3612 // We would like to start over since some instructions are deleted 3613 // and the iterator may become invalid value. 3614 it = BB->begin(); 3615 e = BB->end(); 3616 } 3617 } 3618 } 3619 continue; 3620 } 3621 3622 // Try to vectorize trees that start at insertelement instructions. 3623 if (InsertElementInst *FirstInsertElem = dyn_cast<InsertElementInst>(it)) { 3624 SmallVector<Value *, 16> BuildVector; 3625 SmallVector<Value *, 16> BuildVectorOpds; 3626 if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds)) 3627 continue; 3628 3629 // Vectorize starting with the build vector operands ignoring the 3630 // BuildVector instructions for the purpose of scheduling and user 3631 // extraction. 3632 if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) { 3633 Changed = true; 3634 it = BB->begin(); 3635 e = BB->end(); 3636 } 3637 3638 continue; 3639 } 3640 } 3641 3642 return Changed; 3643 } 3644 3645 bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) { 3646 bool Changed = false; 3647 // Attempt to sort and vectorize each of the store-groups. 3648 for (StoreListMap::iterator it = StoreRefs.begin(), e = StoreRefs.end(); 3649 it != e; ++it) { 3650 if (it->second.size() < 2) 3651 continue; 3652 3653 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 3654 << it->second.size() << ".\n"); 3655 3656 // Process the stores in chunks of 16. 3657 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 3658 unsigned Len = std::min<unsigned>(CE - CI, 16); 3659 Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len), 3660 -SLPCostThreshold, R); 3661 } 3662 } 3663 return Changed; 3664 } 3665 3666 } // end anonymous namespace 3667 3668 char SLPVectorizer::ID = 0; 3669 static const char lv_name[] = "SLP Vectorizer"; 3670 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 3671 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 3672 INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) 3673 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 3674 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 3675 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 3676 3677 namespace llvm { 3678 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); } 3679 } 3680