1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 #include "llvm/Transforms/Vectorize.h" 19 #include "llvm/ADT/MapVector.h" 20 #include "llvm/ADT/PostOrderIterator.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/LoopInfo.h" 24 #include "llvm/Analysis/ScalarEvolution.h" 25 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 26 #include "llvm/Analysis/TargetTransformInfo.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/Dominators.h" 30 #include "llvm/IR/IRBuilder.h" 31 #include "llvm/IR/Instructions.h" 32 #include "llvm/IR/IntrinsicInst.h" 33 #include "llvm/IR/Module.h" 34 #include "llvm/IR/NoFolder.h" 35 #include "llvm/IR/Type.h" 36 #include "llvm/IR/Value.h" 37 #include "llvm/IR/Verifier.h" 38 #include "llvm/Pass.h" 39 #include "llvm/Support/CommandLine.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/raw_ostream.h" 42 #include "llvm/Transforms/Utils/VectorUtils.h" 43 #include <algorithm> 44 #include <map> 45 46 using namespace llvm; 47 48 #define SV_NAME "slp-vectorizer" 49 #define DEBUG_TYPE "SLP" 50 51 static cl::opt<int> 52 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 53 cl::desc("Only vectorize if you gain more than this " 54 "number ")); 55 56 static cl::opt<bool> 57 ShouldVectorizeHor("slp-vectorize-hor", cl::init(false), cl::Hidden, 58 cl::desc("Attempt to vectorize horizontal reductions")); 59 60 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 61 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 62 cl::desc( 63 "Attempt to vectorize horizontal reductions feeding into a store")); 64 65 namespace { 66 67 static const unsigned MinVecRegSize = 128; 68 69 static const unsigned RecursionMaxDepth = 12; 70 71 /// A helper class for numbering instructions in multiple blocks. 72 /// Numbers start at zero for each basic block. 73 struct BlockNumbering { 74 75 BlockNumbering(BasicBlock *Bb) : BB(Bb), Valid(false) {} 76 77 void numberInstructions() { 78 unsigned Loc = 0; 79 InstrIdx.clear(); 80 InstrVec.clear(); 81 // Number the instructions in the block. 82 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 83 InstrIdx[it] = Loc++; 84 InstrVec.push_back(it); 85 assert(InstrVec[InstrIdx[it]] == it && "Invalid allocation"); 86 } 87 Valid = true; 88 } 89 90 int getIndex(Instruction *I) { 91 assert(I->getParent() == BB && "Invalid instruction"); 92 if (!Valid) 93 numberInstructions(); 94 assert(InstrIdx.count(I) && "Unknown instruction"); 95 return InstrIdx[I]; 96 } 97 98 Instruction *getInstruction(unsigned loc) { 99 if (!Valid) 100 numberInstructions(); 101 assert(InstrVec.size() > loc && "Invalid Index"); 102 return InstrVec[loc]; 103 } 104 105 void forget() { Valid = false; } 106 107 private: 108 /// The block we are numbering. 109 BasicBlock *BB; 110 /// Is the block numbered. 111 bool Valid; 112 /// Maps instructions to numbers and back. 113 SmallDenseMap<Instruction *, int> InstrIdx; 114 /// Maps integers to Instructions. 115 SmallVector<Instruction *, 32> InstrVec; 116 }; 117 118 /// \returns the parent basic block if all of the instructions in \p VL 119 /// are in the same block or null otherwise. 120 static BasicBlock *getSameBlock(ArrayRef<Value *> VL) { 121 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 122 if (!I0) 123 return nullptr; 124 BasicBlock *BB = I0->getParent(); 125 for (int i = 1, e = VL.size(); i < e; i++) { 126 Instruction *I = dyn_cast<Instruction>(VL[i]); 127 if (!I) 128 return nullptr; 129 130 if (BB != I->getParent()) 131 return nullptr; 132 } 133 return BB; 134 } 135 136 /// \returns True if all of the values in \p VL are constants. 137 static bool allConstant(ArrayRef<Value *> VL) { 138 for (unsigned i = 0, e = VL.size(); i < e; ++i) 139 if (!isa<Constant>(VL[i])) 140 return false; 141 return true; 142 } 143 144 /// \returns True if all of the values in \p VL are identical. 145 static bool isSplat(ArrayRef<Value *> VL) { 146 for (unsigned i = 1, e = VL.size(); i < e; ++i) 147 if (VL[i] != VL[0]) 148 return false; 149 return true; 150 } 151 152 /// \returns The opcode if all of the Instructions in \p VL have the same 153 /// opcode, or zero. 154 static unsigned getSameOpcode(ArrayRef<Value *> VL) { 155 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 156 if (!I0) 157 return 0; 158 unsigned Opcode = I0->getOpcode(); 159 for (int i = 1, e = VL.size(); i < e; i++) { 160 Instruction *I = dyn_cast<Instruction>(VL[i]); 161 if (!I || Opcode != I->getOpcode()) 162 return 0; 163 } 164 return Opcode; 165 } 166 167 /// \returns \p I after propagating metadata from \p VL. 168 static Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL) { 169 Instruction *I0 = cast<Instruction>(VL[0]); 170 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 171 I0->getAllMetadataOtherThanDebugLoc(Metadata); 172 173 for (unsigned i = 0, n = Metadata.size(); i != n; ++i) { 174 unsigned Kind = Metadata[i].first; 175 MDNode *MD = Metadata[i].second; 176 177 for (int i = 1, e = VL.size(); MD && i != e; i++) { 178 Instruction *I = cast<Instruction>(VL[i]); 179 MDNode *IMD = I->getMetadata(Kind); 180 181 switch (Kind) { 182 default: 183 MD = nullptr; // Remove unknown metadata 184 break; 185 case LLVMContext::MD_tbaa: 186 MD = MDNode::getMostGenericTBAA(MD, IMD); 187 break; 188 case LLVMContext::MD_fpmath: 189 MD = MDNode::getMostGenericFPMath(MD, IMD); 190 break; 191 } 192 } 193 I->setMetadata(Kind, MD); 194 } 195 return I; 196 } 197 198 /// \returns The type that all of the values in \p VL have or null if there 199 /// are different types. 200 static Type* getSameType(ArrayRef<Value *> VL) { 201 Type *Ty = VL[0]->getType(); 202 for (int i = 1, e = VL.size(); i < e; i++) 203 if (VL[i]->getType() != Ty) 204 return nullptr; 205 206 return Ty; 207 } 208 209 /// \returns True if the ExtractElement instructions in VL can be vectorized 210 /// to use the original vector. 211 static bool CanReuseExtract(ArrayRef<Value *> VL) { 212 assert(Instruction::ExtractElement == getSameOpcode(VL) && "Invalid opcode"); 213 // Check if all of the extracts come from the same vector and from the 214 // correct offset. 215 Value *VL0 = VL[0]; 216 ExtractElementInst *E0 = cast<ExtractElementInst>(VL0); 217 Value *Vec = E0->getOperand(0); 218 219 // We have to extract from the same vector type. 220 unsigned NElts = Vec->getType()->getVectorNumElements(); 221 222 if (NElts != VL.size()) 223 return false; 224 225 // Check that all of the indices extract from the correct offset. 226 ConstantInt *CI = dyn_cast<ConstantInt>(E0->getOperand(1)); 227 if (!CI || CI->getZExtValue()) 228 return false; 229 230 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 231 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 232 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 233 234 if (!CI || CI->getZExtValue() != i || E->getOperand(0) != Vec) 235 return false; 236 } 237 238 return true; 239 } 240 241 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 242 SmallVectorImpl<Value *> &Left, 243 SmallVectorImpl<Value *> &Right) { 244 245 SmallVector<Value *, 16> OrigLeft, OrigRight; 246 247 bool AllSameOpcodeLeft = true; 248 bool AllSameOpcodeRight = true; 249 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 250 Instruction *I = cast<Instruction>(VL[i]); 251 Value *V0 = I->getOperand(0); 252 Value *V1 = I->getOperand(1); 253 254 OrigLeft.push_back(V0); 255 OrigRight.push_back(V1); 256 257 Instruction *I0 = dyn_cast<Instruction>(V0); 258 Instruction *I1 = dyn_cast<Instruction>(V1); 259 260 // Check whether all operands on one side have the same opcode. In this case 261 // we want to preserve the original order and not make things worse by 262 // reordering. 263 AllSameOpcodeLeft = I0; 264 AllSameOpcodeRight = I1; 265 266 if (i && AllSameOpcodeLeft) { 267 if(Instruction *P0 = dyn_cast<Instruction>(OrigLeft[i-1])) { 268 if(P0->getOpcode() != I0->getOpcode()) 269 AllSameOpcodeLeft = false; 270 } else 271 AllSameOpcodeLeft = false; 272 } 273 if (i && AllSameOpcodeRight) { 274 if(Instruction *P1 = dyn_cast<Instruction>(OrigRight[i-1])) { 275 if(P1->getOpcode() != I1->getOpcode()) 276 AllSameOpcodeRight = false; 277 } else 278 AllSameOpcodeRight = false; 279 } 280 281 // Sort two opcodes. In the code below we try to preserve the ability to use 282 // broadcast of values instead of individual inserts. 283 // vl1 = load 284 // vl2 = phi 285 // vr1 = load 286 // vr2 = vr2 287 // = vl1 x vr1 288 // = vl2 x vr2 289 // If we just sorted according to opcode we would leave the first line in 290 // tact but we would swap vl2 with vr2 because opcode(phi) > opcode(load). 291 // = vl1 x vr1 292 // = vr2 x vl2 293 // Because vr2 and vr1 are from the same load we loose the opportunity of a 294 // broadcast for the packed right side in the backend: we have [vr1, vl2] 295 // instead of [vr1, vr2=vr1]. 296 if (I0 && I1) { 297 if(!i && I0->getOpcode() > I1->getOpcode()) { 298 Left.push_back(I1); 299 Right.push_back(I0); 300 } else if (i && I0->getOpcode() > I1->getOpcode() && Right[i-1] != I1) { 301 // Try not to destroy a broad cast for no apparent benefit. 302 Left.push_back(I1); 303 Right.push_back(I0); 304 } else if (i && I0->getOpcode() == I1->getOpcode() && Right[i-1] == I0) { 305 // Try preserve broadcasts. 306 Left.push_back(I1); 307 Right.push_back(I0); 308 } else if (i && I0->getOpcode() == I1->getOpcode() && Left[i-1] == I1) { 309 // Try preserve broadcasts. 310 Left.push_back(I1); 311 Right.push_back(I0); 312 } else { 313 Left.push_back(I0); 314 Right.push_back(I1); 315 } 316 continue; 317 } 318 // One opcode, put the instruction on the right. 319 if (I0) { 320 Left.push_back(V1); 321 Right.push_back(I0); 322 continue; 323 } 324 Left.push_back(V0); 325 Right.push_back(V1); 326 } 327 328 bool LeftBroadcast = isSplat(Left); 329 bool RightBroadcast = isSplat(Right); 330 331 // Don't reorder if the operands where good to begin with. 332 if (!(LeftBroadcast || RightBroadcast) && 333 (AllSameOpcodeRight || AllSameOpcodeLeft)) { 334 Left = OrigLeft; 335 Right = OrigRight; 336 } 337 } 338 339 /// Bottom Up SLP Vectorizer. 340 class BoUpSLP { 341 public: 342 typedef SmallVector<Value *, 8> ValueList; 343 typedef SmallVector<Instruction *, 16> InstrList; 344 typedef SmallPtrSet<Value *, 16> ValueSet; 345 typedef SmallVector<StoreInst *, 8> StoreList; 346 347 BoUpSLP(Function *Func, ScalarEvolution *Se, const DataLayout *Dl, 348 TargetTransformInfo *Tti, TargetLibraryInfo *TLi, AliasAnalysis *Aa, 349 LoopInfo *Li, DominatorTree *Dt) 350 : F(Func), SE(Se), DL(Dl), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), 351 Builder(Se->getContext()) {} 352 353 /// \brief Vectorize the tree that starts with the elements in \p VL. 354 /// Returns the vectorized root. 355 Value *vectorizeTree(); 356 357 /// \returns the vectorization cost of the subtree that starts at \p VL. 358 /// A negative number means that this is profitable. 359 int getTreeCost(); 360 361 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 362 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 363 void buildTree(ArrayRef<Value *> Roots, 364 ArrayRef<Value *> UserIgnoreLst = None); 365 366 /// Clear the internal data structures that are created by 'buildTree'. 367 void deleteTree() { 368 VectorizableTree.clear(); 369 ScalarToTreeEntry.clear(); 370 MustGather.clear(); 371 ExternalUses.clear(); 372 MemBarrierIgnoreList.clear(); 373 } 374 375 /// \returns true if the memory operations A and B are consecutive. 376 bool isConsecutiveAccess(Value *A, Value *B); 377 378 /// \brief Perform LICM and CSE on the newly generated gather sequences. 379 void optimizeGatherSequence(); 380 private: 381 struct TreeEntry; 382 383 /// \returns the cost of the vectorizable entry. 384 int getEntryCost(TreeEntry *E); 385 386 /// This is the recursive part of buildTree. 387 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth); 388 389 /// Vectorize a single entry in the tree. 390 Value *vectorizeTree(TreeEntry *E); 391 392 /// Vectorize a single entry in the tree, starting in \p VL. 393 Value *vectorizeTree(ArrayRef<Value *> VL); 394 395 /// \returns the pointer to the vectorized value if \p VL is already 396 /// vectorized, or NULL. They may happen in cycles. 397 Value *alreadyVectorized(ArrayRef<Value *> VL) const; 398 399 /// \brief Take the pointer operand from the Load/Store instruction. 400 /// \returns NULL if this is not a valid Load/Store instruction. 401 static Value *getPointerOperand(Value *I); 402 403 /// \brief Take the address space operand from the Load/Store instruction. 404 /// \returns -1 if this is not a valid Load/Store instruction. 405 static unsigned getAddressSpaceOperand(Value *I); 406 407 /// \returns the scalarization cost for this type. Scalarization in this 408 /// context means the creation of vectors from a group of scalars. 409 int getGatherCost(Type *Ty); 410 411 /// \returns the scalarization cost for this list of values. Assuming that 412 /// this subtree gets vectorized, we may need to extract the values from the 413 /// roots. This method calculates the cost of extracting the values. 414 int getGatherCost(ArrayRef<Value *> VL); 415 416 /// \returns the AA location that is being access by the instruction. 417 AliasAnalysis::Location getLocation(Instruction *I); 418 419 /// \brief Checks if it is possible to sink an instruction from 420 /// \p Src to \p Dst. 421 /// \returns the pointer to the barrier instruction if we can't sink. 422 Value *getSinkBarrier(Instruction *Src, Instruction *Dst); 423 424 /// \returns the index of the last instruction in the BB from \p VL. 425 int getLastIndex(ArrayRef<Value *> VL); 426 427 /// \returns the Instruction in the bundle \p VL. 428 Instruction *getLastInstruction(ArrayRef<Value *> VL); 429 430 /// \brief Set the Builder insert point to one after the last instruction in 431 /// the bundle 432 void setInsertPointAfterBundle(ArrayRef<Value *> VL); 433 434 /// \returns a vector from a collection of scalars in \p VL. 435 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 436 437 /// \returns whether the VectorizableTree is fully vectoriable and will 438 /// be beneficial even the tree height is tiny. 439 bool isFullyVectorizableTinyTree(); 440 441 struct TreeEntry { 442 TreeEntry() : Scalars(), VectorizedValue(nullptr), LastScalarIndex(0), 443 NeedToGather(0) {} 444 445 /// \returns true if the scalars in VL are equal to this entry. 446 bool isSame(ArrayRef<Value *> VL) const { 447 assert(VL.size() == Scalars.size() && "Invalid size"); 448 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 449 } 450 451 /// A vector of scalars. 452 ValueList Scalars; 453 454 /// The Scalars are vectorized into this value. It is initialized to Null. 455 Value *VectorizedValue; 456 457 /// The index in the basic block of the last scalar. 458 int LastScalarIndex; 459 460 /// Do we need to gather this sequence ? 461 bool NeedToGather; 462 }; 463 464 /// Create a new VectorizableTree entry. 465 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized) { 466 VectorizableTree.push_back(TreeEntry()); 467 int idx = VectorizableTree.size() - 1; 468 TreeEntry *Last = &VectorizableTree[idx]; 469 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 470 Last->NeedToGather = !Vectorized; 471 if (Vectorized) { 472 Last->LastScalarIndex = getLastIndex(VL); 473 for (int i = 0, e = VL.size(); i != e; ++i) { 474 assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!"); 475 ScalarToTreeEntry[VL[i]] = idx; 476 } 477 } else { 478 Last->LastScalarIndex = 0; 479 MustGather.insert(VL.begin(), VL.end()); 480 } 481 return Last; 482 } 483 484 /// -- Vectorization State -- 485 /// Holds all of the tree entries. 486 std::vector<TreeEntry> VectorizableTree; 487 488 /// Maps a specific scalar to its tree entry. 489 SmallDenseMap<Value*, int> ScalarToTreeEntry; 490 491 /// A list of scalars that we found that we need to keep as scalars. 492 ValueSet MustGather; 493 494 /// This POD struct describes one external user in the vectorized tree. 495 struct ExternalUser { 496 ExternalUser (Value *S, llvm::User *U, int L) : 497 Scalar(S), User(U), Lane(L){}; 498 // Which scalar in our function. 499 Value *Scalar; 500 // Which user that uses the scalar. 501 llvm::User *User; 502 // Which lane does the scalar belong to. 503 int Lane; 504 }; 505 typedef SmallVector<ExternalUser, 16> UserList; 506 507 /// A list of values that need to extracted out of the tree. 508 /// This list holds pairs of (Internal Scalar : External User). 509 UserList ExternalUses; 510 511 /// A list of instructions to ignore while sinking 512 /// memory instructions. This map must be reset between runs of getCost. 513 ValueSet MemBarrierIgnoreList; 514 515 /// Holds all of the instructions that we gathered. 516 SetVector<Instruction *> GatherSeq; 517 /// A list of blocks that we are going to CSE. 518 SetVector<BasicBlock *> CSEBlocks; 519 520 /// Numbers instructions in different blocks. 521 DenseMap<BasicBlock *, BlockNumbering> BlocksNumbers; 522 523 /// \brief Get the corresponding instruction numbering list for a given 524 /// BasicBlock. The list is allocated lazily. 525 BlockNumbering &getBlockNumbering(BasicBlock *BB) { 526 auto I = BlocksNumbers.insert(std::make_pair(BB, BlockNumbering(BB))); 527 return I.first->second; 528 } 529 530 /// List of users to ignore during scheduling and that don't need extracting. 531 ArrayRef<Value *> UserIgnoreList; 532 533 // Analysis and block reference. 534 Function *F; 535 ScalarEvolution *SE; 536 const DataLayout *DL; 537 TargetTransformInfo *TTI; 538 TargetLibraryInfo *TLI; 539 AliasAnalysis *AA; 540 LoopInfo *LI; 541 DominatorTree *DT; 542 /// Instruction builder to construct the vectorized tree. 543 IRBuilder<> Builder; 544 }; 545 546 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 547 ArrayRef<Value *> UserIgnoreLst) { 548 deleteTree(); 549 UserIgnoreList = UserIgnoreLst; 550 if (!getSameType(Roots)) 551 return; 552 buildTree_rec(Roots, 0); 553 554 // Collect the values that we need to extract from the tree. 555 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 556 TreeEntry *Entry = &VectorizableTree[EIdx]; 557 558 // For each lane: 559 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 560 Value *Scalar = Entry->Scalars[Lane]; 561 562 // No need to handle users of gathered values. 563 if (Entry->NeedToGather) 564 continue; 565 566 for (User *U : Scalar->users()) { 567 DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 568 569 // Skip in-tree scalars that become vectors. 570 if (ScalarToTreeEntry.count(U)) { 571 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << 572 *U << ".\n"); 573 int Idx = ScalarToTreeEntry[U]; (void) Idx; 574 assert(!VectorizableTree[Idx].NeedToGather && "Bad state"); 575 continue; 576 } 577 Instruction *UserInst = dyn_cast<Instruction>(U); 578 if (!UserInst) 579 continue; 580 581 // Ignore users in the user ignore list. 582 if (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), UserInst) != 583 UserIgnoreList.end()) 584 continue; 585 586 DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " << 587 Lane << " from " << *Scalar << ".\n"); 588 ExternalUses.push_back(ExternalUser(Scalar, U, Lane)); 589 } 590 } 591 } 592 } 593 594 595 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { 596 bool SameTy = getSameType(VL); (void)SameTy; 597 assert(SameTy && "Invalid types!"); 598 599 if (Depth == RecursionMaxDepth) { 600 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 601 newTreeEntry(VL, false); 602 return; 603 } 604 605 // Don't handle vectors. 606 if (VL[0]->getType()->isVectorTy()) { 607 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 608 newTreeEntry(VL, false); 609 return; 610 } 611 612 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 613 if (SI->getValueOperand()->getType()->isVectorTy()) { 614 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 615 newTreeEntry(VL, false); 616 return; 617 } 618 619 // If all of the operands are identical or constant we have a simple solution. 620 if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || 621 !getSameOpcode(VL)) { 622 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 623 newTreeEntry(VL, false); 624 return; 625 } 626 627 // We now know that this is a vector of instructions of the same type from 628 // the same block. 629 630 // Check if this is a duplicate of another entry. 631 if (ScalarToTreeEntry.count(VL[0])) { 632 int Idx = ScalarToTreeEntry[VL[0]]; 633 TreeEntry *E = &VectorizableTree[Idx]; 634 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 635 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 636 if (E->Scalars[i] != VL[i]) { 637 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 638 newTreeEntry(VL, false); 639 return; 640 } 641 } 642 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n"); 643 return; 644 } 645 646 // Check that none of the instructions in the bundle are already in the tree. 647 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 648 if (ScalarToTreeEntry.count(VL[i])) { 649 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 650 ") is already in tree.\n"); 651 newTreeEntry(VL, false); 652 return; 653 } 654 } 655 656 // If any of the scalars appears in the table OR it is marked as a value that 657 // needs to stat scalar then we need to gather the scalars. 658 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 659 if (ScalarToTreeEntry.count(VL[i]) || MustGather.count(VL[i])) { 660 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar. \n"); 661 newTreeEntry(VL, false); 662 return; 663 } 664 } 665 666 // Check that all of the users of the scalars that we want to vectorize are 667 // schedulable. 668 Instruction *VL0 = cast<Instruction>(VL[0]); 669 int MyLastIndex = getLastIndex(VL); 670 BasicBlock *BB = cast<Instruction>(VL0)->getParent(); 671 672 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 673 Instruction *Scalar = cast<Instruction>(VL[i]); 674 DEBUG(dbgs() << "SLP: Checking users of " << *Scalar << ". \n"); 675 for (User *U : Scalar->users()) { 676 DEBUG(dbgs() << "SLP: \tUser " << *U << ". \n"); 677 Instruction *UI = dyn_cast<Instruction>(U); 678 if (!UI) { 679 DEBUG(dbgs() << "SLP: Gathering due unknown user. \n"); 680 newTreeEntry(VL, false); 681 return; 682 } 683 684 // We don't care if the user is in a different basic block. 685 BasicBlock *UserBlock = UI->getParent(); 686 if (UserBlock != BB) { 687 DEBUG(dbgs() << "SLP: User from a different basic block " 688 << *UI << ". \n"); 689 continue; 690 } 691 692 // If this is a PHINode within this basic block then we can place the 693 // extract wherever we want. 694 if (isa<PHINode>(*UI)) { 695 DEBUG(dbgs() << "SLP: \tWe can schedule PHIs:" << *UI << ". \n"); 696 continue; 697 } 698 699 // Check if this is a safe in-tree user. 700 if (ScalarToTreeEntry.count(UI)) { 701 int Idx = ScalarToTreeEntry[UI]; 702 int VecLocation = VectorizableTree[Idx].LastScalarIndex; 703 if (VecLocation <= MyLastIndex) { 704 DEBUG(dbgs() << "SLP: Gathering due to unschedulable vector. \n"); 705 newTreeEntry(VL, false); 706 return; 707 } 708 DEBUG(dbgs() << "SLP: In-tree user (" << *UI << ") at #" << 709 VecLocation << " vector value (" << *Scalar << ") at #" 710 << MyLastIndex << ".\n"); 711 continue; 712 } 713 714 // Ignore users in the user ignore list. 715 if (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), UI) != 716 UserIgnoreList.end()) 717 continue; 718 719 // Make sure that we can schedule this unknown user. 720 BlockNumbering &BN = getBlockNumbering(BB); 721 int UserIndex = BN.getIndex(UI); 722 if (UserIndex < MyLastIndex) { 723 724 DEBUG(dbgs() << "SLP: Can't schedule extractelement for " 725 << *UI << ". \n"); 726 newTreeEntry(VL, false); 727 return; 728 } 729 } 730 } 731 732 // Check that every instructions appears once in this bundle. 733 for (unsigned i = 0, e = VL.size(); i < e; ++i) 734 for (unsigned j = i+1; j < e; ++j) 735 if (VL[i] == VL[j]) { 736 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 737 newTreeEntry(VL, false); 738 return; 739 } 740 741 // Check that instructions in this bundle don't reference other instructions. 742 // The runtime of this check is O(N * N-1 * uses(N)) and a typical N is 4. 743 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 744 for (User *U : VL[i]->users()) { 745 for (unsigned j = 0; j < e; ++j) { 746 if (i != j && U == VL[j]) { 747 DEBUG(dbgs() << "SLP: Intra-bundle dependencies!" << *U << ". \n"); 748 newTreeEntry(VL, false); 749 return; 750 } 751 } 752 } 753 } 754 755 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 756 757 unsigned Opcode = getSameOpcode(VL); 758 759 // Check if it is safe to sink the loads or the stores. 760 if (Opcode == Instruction::Load || Opcode == Instruction::Store) { 761 Instruction *Last = getLastInstruction(VL); 762 763 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 764 if (VL[i] == Last) 765 continue; 766 Value *Barrier = getSinkBarrier(cast<Instruction>(VL[i]), Last); 767 if (Barrier) { 768 DEBUG(dbgs() << "SLP: Can't sink " << *VL[i] << "\n down to " << *Last 769 << "\n because of " << *Barrier << ". Gathering.\n"); 770 newTreeEntry(VL, false); 771 return; 772 } 773 } 774 } 775 776 switch (Opcode) { 777 case Instruction::PHI: { 778 PHINode *PH = dyn_cast<PHINode>(VL0); 779 780 // Check for terminator values (e.g. invoke). 781 for (unsigned j = 0; j < VL.size(); ++j) 782 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 783 TerminatorInst *Term = dyn_cast<TerminatorInst>( 784 cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i))); 785 if (Term) { 786 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); 787 newTreeEntry(VL, false); 788 return; 789 } 790 } 791 792 newTreeEntry(VL, true); 793 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 794 795 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 796 ValueList Operands; 797 // Prepare the operand vector. 798 for (unsigned j = 0; j < VL.size(); ++j) 799 Operands.push_back(cast<PHINode>(VL[j])->getIncomingValueForBlock( 800 PH->getIncomingBlock(i))); 801 802 buildTree_rec(Operands, Depth + 1); 803 } 804 return; 805 } 806 case Instruction::ExtractElement: { 807 bool Reuse = CanReuseExtract(VL); 808 if (Reuse) { 809 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 810 } 811 newTreeEntry(VL, Reuse); 812 return; 813 } 814 case Instruction::Load: { 815 // Check if the loads are consecutive or of we need to swizzle them. 816 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 817 LoadInst *L = cast<LoadInst>(VL[i]); 818 if (!L->isSimple() || !isConsecutiveAccess(VL[i], VL[i + 1])) { 819 newTreeEntry(VL, false); 820 DEBUG(dbgs() << "SLP: Need to swizzle loads.\n"); 821 return; 822 } 823 } 824 newTreeEntry(VL, true); 825 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 826 return; 827 } 828 case Instruction::ZExt: 829 case Instruction::SExt: 830 case Instruction::FPToUI: 831 case Instruction::FPToSI: 832 case Instruction::FPExt: 833 case Instruction::PtrToInt: 834 case Instruction::IntToPtr: 835 case Instruction::SIToFP: 836 case Instruction::UIToFP: 837 case Instruction::Trunc: 838 case Instruction::FPTrunc: 839 case Instruction::BitCast: { 840 Type *SrcTy = VL0->getOperand(0)->getType(); 841 for (unsigned i = 0; i < VL.size(); ++i) { 842 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 843 if (Ty != SrcTy || Ty->isAggregateType() || Ty->isVectorTy()) { 844 newTreeEntry(VL, false); 845 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 846 return; 847 } 848 } 849 newTreeEntry(VL, true); 850 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 851 852 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 853 ValueList Operands; 854 // Prepare the operand vector. 855 for (unsigned j = 0; j < VL.size(); ++j) 856 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 857 858 buildTree_rec(Operands, Depth+1); 859 } 860 return; 861 } 862 case Instruction::ICmp: 863 case Instruction::FCmp: { 864 // Check that all of the compares have the same predicate. 865 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 866 Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType(); 867 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 868 CmpInst *Cmp = cast<CmpInst>(VL[i]); 869 if (Cmp->getPredicate() != P0 || 870 Cmp->getOperand(0)->getType() != ComparedTy) { 871 newTreeEntry(VL, false); 872 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 873 return; 874 } 875 } 876 877 newTreeEntry(VL, true); 878 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 879 880 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 881 ValueList Operands; 882 // Prepare the operand vector. 883 for (unsigned j = 0; j < VL.size(); ++j) 884 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 885 886 buildTree_rec(Operands, Depth+1); 887 } 888 return; 889 } 890 case Instruction::Select: 891 case Instruction::Add: 892 case Instruction::FAdd: 893 case Instruction::Sub: 894 case Instruction::FSub: 895 case Instruction::Mul: 896 case Instruction::FMul: 897 case Instruction::UDiv: 898 case Instruction::SDiv: 899 case Instruction::FDiv: 900 case Instruction::URem: 901 case Instruction::SRem: 902 case Instruction::FRem: 903 case Instruction::Shl: 904 case Instruction::LShr: 905 case Instruction::AShr: 906 case Instruction::And: 907 case Instruction::Or: 908 case Instruction::Xor: { 909 newTreeEntry(VL, true); 910 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 911 912 // Sort operands of the instructions so that each side is more likely to 913 // have the same opcode. 914 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 915 ValueList Left, Right; 916 reorderInputsAccordingToOpcode(VL, Left, Right); 917 BasicBlock *LeftBB = getSameBlock(Left); 918 BasicBlock *RightBB = getSameBlock(Right); 919 // If we have common uses on separate paths in the tree make sure we 920 // process the one with greater common depth first. 921 // We can use block numbering to determine the subtree traversal as 922 // earler user has to come in between the common use and the later user. 923 if (LeftBB && RightBB && LeftBB == RightBB && 924 getLastIndex(Right) > getLastIndex(Left)) { 925 buildTree_rec(Right, Depth + 1); 926 buildTree_rec(Left, Depth + 1); 927 } else { 928 buildTree_rec(Left, Depth + 1); 929 buildTree_rec(Right, Depth + 1); 930 } 931 return; 932 } 933 934 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 935 ValueList Operands; 936 // Prepare the operand vector. 937 for (unsigned j = 0; j < VL.size(); ++j) 938 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 939 940 buildTree_rec(Operands, Depth+1); 941 } 942 return; 943 } 944 case Instruction::GetElementPtr: { 945 // We don't combine GEPs with complicated (nested) indexing. 946 for (unsigned j = 0; j < VL.size(); ++j) { 947 if (cast<Instruction>(VL[j])->getNumOperands() != 2) { 948 DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 949 newTreeEntry(VL, false); 950 return; 951 } 952 } 953 954 // We can't combine several GEPs into one vector if they operate on 955 // different types. 956 Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType(); 957 for (unsigned j = 0; j < VL.size(); ++j) { 958 Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType(); 959 if (Ty0 != CurTy) { 960 DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); 961 newTreeEntry(VL, false); 962 return; 963 } 964 } 965 966 // We don't combine GEPs with non-constant indexes. 967 for (unsigned j = 0; j < VL.size(); ++j) { 968 auto Op = cast<Instruction>(VL[j])->getOperand(1); 969 if (!isa<ConstantInt>(Op)) { 970 DEBUG( 971 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 972 newTreeEntry(VL, false); 973 return; 974 } 975 } 976 977 newTreeEntry(VL, true); 978 DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 979 for (unsigned i = 0, e = 2; i < e; ++i) { 980 ValueList Operands; 981 // Prepare the operand vector. 982 for (unsigned j = 0; j < VL.size(); ++j) 983 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 984 985 buildTree_rec(Operands, Depth + 1); 986 } 987 return; 988 } 989 case Instruction::Store: { 990 // Check if the stores are consecutive or of we need to swizzle them. 991 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 992 if (!isConsecutiveAccess(VL[i], VL[i + 1])) { 993 newTreeEntry(VL, false); 994 DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 995 return; 996 } 997 998 newTreeEntry(VL, true); 999 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 1000 1001 ValueList Operands; 1002 for (unsigned j = 0; j < VL.size(); ++j) 1003 Operands.push_back(cast<Instruction>(VL[j])->getOperand(0)); 1004 1005 // We can ignore these values because we are sinking them down. 1006 MemBarrierIgnoreList.insert(VL.begin(), VL.end()); 1007 buildTree_rec(Operands, Depth + 1); 1008 return; 1009 } 1010 case Instruction::Call: { 1011 // Check if the calls are all to the same vectorizable intrinsic. 1012 CallInst *CI = cast<CallInst>(VL[0]); 1013 // Check if this is an Intrinsic call or something that can be 1014 // represented by an intrinsic call 1015 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 1016 if (!isTriviallyVectorizable(ID)) { 1017 newTreeEntry(VL, false); 1018 DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 1019 return; 1020 } 1021 Function *Int = CI->getCalledFunction(); 1022 Value *A1I = nullptr; 1023 if (hasVectorInstrinsicScalarOpd(ID, 1)) 1024 A1I = CI->getArgOperand(1); 1025 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 1026 CallInst *CI2 = dyn_cast<CallInst>(VL[i]); 1027 if (!CI2 || CI2->getCalledFunction() != Int || 1028 getIntrinsicIDForCall(CI2, TLI) != ID) { 1029 newTreeEntry(VL, false); 1030 DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] 1031 << "\n"); 1032 return; 1033 } 1034 // ctlz,cttz and powi are special intrinsics whose second argument 1035 // should be same in order for them to be vectorized. 1036 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 1037 Value *A1J = CI2->getArgOperand(1); 1038 if (A1I != A1J) { 1039 newTreeEntry(VL, false); 1040 DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 1041 << " argument "<< A1I<<"!=" << A1J 1042 << "\n"); 1043 return; 1044 } 1045 } 1046 } 1047 1048 newTreeEntry(VL, true); 1049 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 1050 ValueList Operands; 1051 // Prepare the operand vector. 1052 for (unsigned j = 0; j < VL.size(); ++j) { 1053 CallInst *CI2 = dyn_cast<CallInst>(VL[j]); 1054 Operands.push_back(CI2->getArgOperand(i)); 1055 } 1056 buildTree_rec(Operands, Depth + 1); 1057 } 1058 return; 1059 } 1060 default: 1061 newTreeEntry(VL, false); 1062 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 1063 return; 1064 } 1065 } 1066 1067 int BoUpSLP::getEntryCost(TreeEntry *E) { 1068 ArrayRef<Value*> VL = E->Scalars; 1069 1070 Type *ScalarTy = VL[0]->getType(); 1071 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1072 ScalarTy = SI->getValueOperand()->getType(); 1073 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1074 1075 if (E->NeedToGather) { 1076 if (allConstant(VL)) 1077 return 0; 1078 if (isSplat(VL)) { 1079 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 1080 } 1081 return getGatherCost(E->Scalars); 1082 } 1083 1084 assert(getSameOpcode(VL) && getSameType(VL) && getSameBlock(VL) && 1085 "Invalid VL"); 1086 Instruction *VL0 = cast<Instruction>(VL[0]); 1087 unsigned Opcode = VL0->getOpcode(); 1088 switch (Opcode) { 1089 case Instruction::PHI: { 1090 return 0; 1091 } 1092 case Instruction::ExtractElement: { 1093 if (CanReuseExtract(VL)) { 1094 int DeadCost = 0; 1095 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 1096 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 1097 if (E->hasOneUse()) 1098 // Take credit for instruction that will become dead. 1099 DeadCost += 1100 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); 1101 } 1102 return -DeadCost; 1103 } 1104 return getGatherCost(VecTy); 1105 } 1106 case Instruction::ZExt: 1107 case Instruction::SExt: 1108 case Instruction::FPToUI: 1109 case Instruction::FPToSI: 1110 case Instruction::FPExt: 1111 case Instruction::PtrToInt: 1112 case Instruction::IntToPtr: 1113 case Instruction::SIToFP: 1114 case Instruction::UIToFP: 1115 case Instruction::Trunc: 1116 case Instruction::FPTrunc: 1117 case Instruction::BitCast: { 1118 Type *SrcTy = VL0->getOperand(0)->getType(); 1119 1120 // Calculate the cost of this instruction. 1121 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 1122 VL0->getType(), SrcTy); 1123 1124 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 1125 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy); 1126 return VecCost - ScalarCost; 1127 } 1128 case Instruction::FCmp: 1129 case Instruction::ICmp: 1130 case Instruction::Select: 1131 case Instruction::Add: 1132 case Instruction::FAdd: 1133 case Instruction::Sub: 1134 case Instruction::FSub: 1135 case Instruction::Mul: 1136 case Instruction::FMul: 1137 case Instruction::UDiv: 1138 case Instruction::SDiv: 1139 case Instruction::FDiv: 1140 case Instruction::URem: 1141 case Instruction::SRem: 1142 case Instruction::FRem: 1143 case Instruction::Shl: 1144 case Instruction::LShr: 1145 case Instruction::AShr: 1146 case Instruction::And: 1147 case Instruction::Or: 1148 case Instruction::Xor: { 1149 // Calculate the cost of this instruction. 1150 int ScalarCost = 0; 1151 int VecCost = 0; 1152 if (Opcode == Instruction::FCmp || Opcode == Instruction::ICmp || 1153 Opcode == Instruction::Select) { 1154 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 1155 ScalarCost = VecTy->getNumElements() * 1156 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty()); 1157 VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy); 1158 } else { 1159 // Certain instructions can be cheaper to vectorize if they have a 1160 // constant second vector operand. 1161 TargetTransformInfo::OperandValueKind Op1VK = 1162 TargetTransformInfo::OK_AnyValue; 1163 TargetTransformInfo::OperandValueKind Op2VK = 1164 TargetTransformInfo::OK_UniformConstantValue; 1165 1166 // If all operands are exactly the same ConstantInt then set the 1167 // operand kind to OK_UniformConstantValue. 1168 // If instead not all operands are constants, then set the operand kind 1169 // to OK_AnyValue. If all operands are constants but not the same, 1170 // then set the operand kind to OK_NonUniformConstantValue. 1171 ConstantInt *CInt = nullptr; 1172 for (unsigned i = 0; i < VL.size(); ++i) { 1173 const Instruction *I = cast<Instruction>(VL[i]); 1174 if (!isa<ConstantInt>(I->getOperand(1))) { 1175 Op2VK = TargetTransformInfo::OK_AnyValue; 1176 break; 1177 } 1178 if (i == 0) { 1179 CInt = cast<ConstantInt>(I->getOperand(1)); 1180 continue; 1181 } 1182 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && 1183 CInt != cast<ConstantInt>(I->getOperand(1))) 1184 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 1185 } 1186 1187 ScalarCost = 1188 VecTy->getNumElements() * 1189 TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK); 1190 VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK); 1191 } 1192 return VecCost - ScalarCost; 1193 } 1194 case Instruction::GetElementPtr: { 1195 TargetTransformInfo::OperandValueKind Op1VK = 1196 TargetTransformInfo::OK_AnyValue; 1197 TargetTransformInfo::OperandValueKind Op2VK = 1198 TargetTransformInfo::OK_UniformConstantValue; 1199 1200 int ScalarCost = 1201 VecTy->getNumElements() * 1202 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK); 1203 int VecCost = 1204 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK); 1205 1206 return VecCost - ScalarCost; 1207 } 1208 case Instruction::Load: { 1209 // Cost of wide load - cost of scalar loads. 1210 int ScalarLdCost = VecTy->getNumElements() * 1211 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); 1212 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, 1, 0); 1213 return VecLdCost - ScalarLdCost; 1214 } 1215 case Instruction::Store: { 1216 // We know that we can merge the stores. Calculate the cost. 1217 int ScalarStCost = VecTy->getNumElements() * 1218 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); 1219 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, VecTy, 1, 0); 1220 return VecStCost - ScalarStCost; 1221 } 1222 case Instruction::Call: { 1223 CallInst *CI = cast<CallInst>(VL0); 1224 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 1225 1226 // Calculate the cost of the scalar and vector calls. 1227 SmallVector<Type*, 4> ScalarTys, VecTys; 1228 for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) { 1229 ScalarTys.push_back(CI->getArgOperand(op)->getType()); 1230 VecTys.push_back(VectorType::get(CI->getArgOperand(op)->getType(), 1231 VecTy->getNumElements())); 1232 } 1233 1234 int ScalarCallCost = VecTy->getNumElements() * 1235 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys); 1236 1237 int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys); 1238 1239 DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost 1240 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 1241 << " for " << *CI << "\n"); 1242 1243 return VecCallCost - ScalarCallCost; 1244 } 1245 default: 1246 llvm_unreachable("Unknown instruction"); 1247 } 1248 } 1249 1250 bool BoUpSLP::isFullyVectorizableTinyTree() { 1251 DEBUG(dbgs() << "SLP: Check whether the tree with height " << 1252 VectorizableTree.size() << " is fully vectorizable .\n"); 1253 1254 // We only handle trees of height 2. 1255 if (VectorizableTree.size() != 2) 1256 return false; 1257 1258 // Handle splat stores. 1259 if (!VectorizableTree[0].NeedToGather && isSplat(VectorizableTree[1].Scalars)) 1260 return true; 1261 1262 // Gathering cost would be too much for tiny trees. 1263 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 1264 return false; 1265 1266 return true; 1267 } 1268 1269 int BoUpSLP::getTreeCost() { 1270 int Cost = 0; 1271 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 1272 VectorizableTree.size() << ".\n"); 1273 1274 // We only vectorize tiny trees if it is fully vectorizable. 1275 if (VectorizableTree.size() < 3 && !isFullyVectorizableTinyTree()) { 1276 if (!VectorizableTree.size()) { 1277 assert(!ExternalUses.size() && "We should not have any external users"); 1278 } 1279 return INT_MAX; 1280 } 1281 1282 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 1283 1284 for (unsigned i = 0, e = VectorizableTree.size(); i != e; ++i) { 1285 int C = getEntryCost(&VectorizableTree[i]); 1286 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 1287 << *VectorizableTree[i].Scalars[0] << " .\n"); 1288 Cost += C; 1289 } 1290 1291 SmallSet<Value *, 16> ExtractCostCalculated; 1292 int ExtractCost = 0; 1293 for (UserList::iterator I = ExternalUses.begin(), E = ExternalUses.end(); 1294 I != E; ++I) { 1295 // We only add extract cost once for the same scalar. 1296 if (!ExtractCostCalculated.insert(I->Scalar)) 1297 continue; 1298 1299 VectorType *VecTy = VectorType::get(I->Scalar->getType(), BundleWidth); 1300 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 1301 I->Lane); 1302 } 1303 1304 DEBUG(dbgs() << "SLP: Total Cost " << Cost + ExtractCost<< ".\n"); 1305 return Cost + ExtractCost; 1306 } 1307 1308 int BoUpSLP::getGatherCost(Type *Ty) { 1309 int Cost = 0; 1310 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 1311 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 1312 return Cost; 1313 } 1314 1315 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 1316 // Find the type of the operands in VL. 1317 Type *ScalarTy = VL[0]->getType(); 1318 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1319 ScalarTy = SI->getValueOperand()->getType(); 1320 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1321 // Find the cost of inserting/extracting values from the vector. 1322 return getGatherCost(VecTy); 1323 } 1324 1325 AliasAnalysis::Location BoUpSLP::getLocation(Instruction *I) { 1326 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1327 return AA->getLocation(SI); 1328 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1329 return AA->getLocation(LI); 1330 return AliasAnalysis::Location(); 1331 } 1332 1333 Value *BoUpSLP::getPointerOperand(Value *I) { 1334 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1335 return LI->getPointerOperand(); 1336 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1337 return SI->getPointerOperand(); 1338 return nullptr; 1339 } 1340 1341 unsigned BoUpSLP::getAddressSpaceOperand(Value *I) { 1342 if (LoadInst *L = dyn_cast<LoadInst>(I)) 1343 return L->getPointerAddressSpace(); 1344 if (StoreInst *S = dyn_cast<StoreInst>(I)) 1345 return S->getPointerAddressSpace(); 1346 return -1; 1347 } 1348 1349 bool BoUpSLP::isConsecutiveAccess(Value *A, Value *B) { 1350 Value *PtrA = getPointerOperand(A); 1351 Value *PtrB = getPointerOperand(B); 1352 unsigned ASA = getAddressSpaceOperand(A); 1353 unsigned ASB = getAddressSpaceOperand(B); 1354 1355 // Check that the address spaces match and that the pointers are valid. 1356 if (!PtrA || !PtrB || (ASA != ASB)) 1357 return false; 1358 1359 // Make sure that A and B are different pointers of the same type. 1360 if (PtrA == PtrB || PtrA->getType() != PtrB->getType()) 1361 return false; 1362 1363 unsigned PtrBitWidth = DL->getPointerSizeInBits(ASA); 1364 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1365 APInt Size(PtrBitWidth, DL->getTypeStoreSize(Ty)); 1366 1367 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); 1368 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetA); 1369 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetB); 1370 1371 APInt OffsetDelta = OffsetB - OffsetA; 1372 1373 // Check if they are based on the same pointer. That makes the offsets 1374 // sufficient. 1375 if (PtrA == PtrB) 1376 return OffsetDelta == Size; 1377 1378 // Compute the necessary base pointer delta to have the necessary final delta 1379 // equal to the size. 1380 APInt BaseDelta = Size - OffsetDelta; 1381 1382 // Otherwise compute the distance with SCEV between the base pointers. 1383 const SCEV *PtrSCEVA = SE->getSCEV(PtrA); 1384 const SCEV *PtrSCEVB = SE->getSCEV(PtrB); 1385 const SCEV *C = SE->getConstant(BaseDelta); 1386 const SCEV *X = SE->getAddExpr(PtrSCEVA, C); 1387 return X == PtrSCEVB; 1388 } 1389 1390 Value *BoUpSLP::getSinkBarrier(Instruction *Src, Instruction *Dst) { 1391 assert(Src->getParent() == Dst->getParent() && "Not the same BB"); 1392 BasicBlock::iterator I = Src, E = Dst; 1393 /// Scan all of the instruction from SRC to DST and check if 1394 /// the source may alias. 1395 for (++I; I != E; ++I) { 1396 // Ignore store instructions that are marked as 'ignore'. 1397 if (MemBarrierIgnoreList.count(I)) 1398 continue; 1399 if (Src->mayWriteToMemory()) /* Write */ { 1400 if (!I->mayReadOrWriteMemory()) 1401 continue; 1402 } else /* Read */ { 1403 if (!I->mayWriteToMemory()) 1404 continue; 1405 } 1406 AliasAnalysis::Location A = getLocation(&*I); 1407 AliasAnalysis::Location B = getLocation(Src); 1408 1409 if (!A.Ptr || !B.Ptr || AA->alias(A, B)) 1410 return I; 1411 } 1412 return nullptr; 1413 } 1414 1415 int BoUpSLP::getLastIndex(ArrayRef<Value *> VL) { 1416 BasicBlock *BB = cast<Instruction>(VL[0])->getParent(); 1417 assert(BB == getSameBlock(VL) && "Invalid block"); 1418 BlockNumbering &BN = getBlockNumbering(BB); 1419 1420 int MaxIdx = BN.getIndex(BB->getFirstNonPHI()); 1421 for (unsigned i = 0, e = VL.size(); i < e; ++i) 1422 MaxIdx = std::max(MaxIdx, BN.getIndex(cast<Instruction>(VL[i]))); 1423 return MaxIdx; 1424 } 1425 1426 Instruction *BoUpSLP::getLastInstruction(ArrayRef<Value *> VL) { 1427 BasicBlock *BB = cast<Instruction>(VL[0])->getParent(); 1428 assert(BB == getSameBlock(VL) && "Invalid block"); 1429 BlockNumbering &BN = getBlockNumbering(BB); 1430 1431 int MaxIdx = BN.getIndex(cast<Instruction>(VL[0])); 1432 for (unsigned i = 1, e = VL.size(); i < e; ++i) 1433 MaxIdx = std::max(MaxIdx, BN.getIndex(cast<Instruction>(VL[i]))); 1434 Instruction *I = BN.getInstruction(MaxIdx); 1435 assert(I && "bad location"); 1436 return I; 1437 } 1438 1439 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) { 1440 Instruction *VL0 = cast<Instruction>(VL[0]); 1441 Instruction *LastInst = getLastInstruction(VL); 1442 BasicBlock::iterator NextInst = LastInst; 1443 ++NextInst; 1444 Builder.SetInsertPoint(VL0->getParent(), NextInst); 1445 Builder.SetCurrentDebugLocation(VL0->getDebugLoc()); 1446 } 1447 1448 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 1449 Value *Vec = UndefValue::get(Ty); 1450 // Generate the 'InsertElement' instruction. 1451 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 1452 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 1453 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 1454 GatherSeq.insert(Insrt); 1455 CSEBlocks.insert(Insrt->getParent()); 1456 1457 // Add to our 'need-to-extract' list. 1458 if (ScalarToTreeEntry.count(VL[i])) { 1459 int Idx = ScalarToTreeEntry[VL[i]]; 1460 TreeEntry *E = &VectorizableTree[Idx]; 1461 // Find which lane we need to extract. 1462 int FoundLane = -1; 1463 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 1464 // Is this the lane of the scalar that we are looking for ? 1465 if (E->Scalars[Lane] == VL[i]) { 1466 FoundLane = Lane; 1467 break; 1468 } 1469 } 1470 assert(FoundLane >= 0 && "Could not find the correct lane"); 1471 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 1472 } 1473 } 1474 } 1475 1476 return Vec; 1477 } 1478 1479 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const { 1480 SmallDenseMap<Value*, int>::const_iterator Entry 1481 = ScalarToTreeEntry.find(VL[0]); 1482 if (Entry != ScalarToTreeEntry.end()) { 1483 int Idx = Entry->second; 1484 const TreeEntry *En = &VectorizableTree[Idx]; 1485 if (En->isSame(VL) && En->VectorizedValue) 1486 return En->VectorizedValue; 1487 } 1488 return nullptr; 1489 } 1490 1491 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 1492 if (ScalarToTreeEntry.count(VL[0])) { 1493 int Idx = ScalarToTreeEntry[VL[0]]; 1494 TreeEntry *E = &VectorizableTree[Idx]; 1495 if (E->isSame(VL)) 1496 return vectorizeTree(E); 1497 } 1498 1499 Type *ScalarTy = VL[0]->getType(); 1500 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1501 ScalarTy = SI->getValueOperand()->getType(); 1502 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1503 1504 return Gather(VL, VecTy); 1505 } 1506 1507 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 1508 IRBuilder<>::InsertPointGuard Guard(Builder); 1509 1510 if (E->VectorizedValue) { 1511 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 1512 return E->VectorizedValue; 1513 } 1514 1515 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 1516 Type *ScalarTy = VL0->getType(); 1517 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 1518 ScalarTy = SI->getValueOperand()->getType(); 1519 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 1520 1521 if (E->NeedToGather) { 1522 setInsertPointAfterBundle(E->Scalars); 1523 return Gather(E->Scalars, VecTy); 1524 } 1525 1526 unsigned Opcode = VL0->getOpcode(); 1527 assert(Opcode == getSameOpcode(E->Scalars) && "Invalid opcode"); 1528 1529 switch (Opcode) { 1530 case Instruction::PHI: { 1531 PHINode *PH = dyn_cast<PHINode>(VL0); 1532 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 1533 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 1534 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 1535 E->VectorizedValue = NewPhi; 1536 1537 // PHINodes may have multiple entries from the same block. We want to 1538 // visit every block once. 1539 SmallSet<BasicBlock*, 4> VisitedBBs; 1540 1541 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1542 ValueList Operands; 1543 BasicBlock *IBB = PH->getIncomingBlock(i); 1544 1545 if (!VisitedBBs.insert(IBB)) { 1546 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 1547 continue; 1548 } 1549 1550 // Prepare the operand vector. 1551 for (unsigned j = 0; j < E->Scalars.size(); ++j) 1552 Operands.push_back(cast<PHINode>(E->Scalars[j])-> 1553 getIncomingValueForBlock(IBB)); 1554 1555 Builder.SetInsertPoint(IBB->getTerminator()); 1556 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 1557 Value *Vec = vectorizeTree(Operands); 1558 NewPhi->addIncoming(Vec, IBB); 1559 } 1560 1561 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 1562 "Invalid number of incoming values"); 1563 return NewPhi; 1564 } 1565 1566 case Instruction::ExtractElement: { 1567 if (CanReuseExtract(E->Scalars)) { 1568 Value *V = VL0->getOperand(0); 1569 E->VectorizedValue = V; 1570 return V; 1571 } 1572 return Gather(E->Scalars, VecTy); 1573 } 1574 case Instruction::ZExt: 1575 case Instruction::SExt: 1576 case Instruction::FPToUI: 1577 case Instruction::FPToSI: 1578 case Instruction::FPExt: 1579 case Instruction::PtrToInt: 1580 case Instruction::IntToPtr: 1581 case Instruction::SIToFP: 1582 case Instruction::UIToFP: 1583 case Instruction::Trunc: 1584 case Instruction::FPTrunc: 1585 case Instruction::BitCast: { 1586 ValueList INVL; 1587 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1588 INVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1589 1590 setInsertPointAfterBundle(E->Scalars); 1591 1592 Value *InVec = vectorizeTree(INVL); 1593 1594 if (Value *V = alreadyVectorized(E->Scalars)) 1595 return V; 1596 1597 CastInst *CI = dyn_cast<CastInst>(VL0); 1598 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 1599 E->VectorizedValue = V; 1600 return V; 1601 } 1602 case Instruction::FCmp: 1603 case Instruction::ICmp: { 1604 ValueList LHSV, RHSV; 1605 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1606 LHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1607 RHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1608 } 1609 1610 setInsertPointAfterBundle(E->Scalars); 1611 1612 Value *L = vectorizeTree(LHSV); 1613 Value *R = vectorizeTree(RHSV); 1614 1615 if (Value *V = alreadyVectorized(E->Scalars)) 1616 return V; 1617 1618 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 1619 Value *V; 1620 if (Opcode == Instruction::FCmp) 1621 V = Builder.CreateFCmp(P0, L, R); 1622 else 1623 V = Builder.CreateICmp(P0, L, R); 1624 1625 E->VectorizedValue = V; 1626 return V; 1627 } 1628 case Instruction::Select: { 1629 ValueList TrueVec, FalseVec, CondVec; 1630 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1631 CondVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1632 TrueVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1633 FalseVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(2)); 1634 } 1635 1636 setInsertPointAfterBundle(E->Scalars); 1637 1638 Value *Cond = vectorizeTree(CondVec); 1639 Value *True = vectorizeTree(TrueVec); 1640 Value *False = vectorizeTree(FalseVec); 1641 1642 if (Value *V = alreadyVectorized(E->Scalars)) 1643 return V; 1644 1645 Value *V = Builder.CreateSelect(Cond, True, False); 1646 E->VectorizedValue = V; 1647 return V; 1648 } 1649 case Instruction::Add: 1650 case Instruction::FAdd: 1651 case Instruction::Sub: 1652 case Instruction::FSub: 1653 case Instruction::Mul: 1654 case Instruction::FMul: 1655 case Instruction::UDiv: 1656 case Instruction::SDiv: 1657 case Instruction::FDiv: 1658 case Instruction::URem: 1659 case Instruction::SRem: 1660 case Instruction::FRem: 1661 case Instruction::Shl: 1662 case Instruction::LShr: 1663 case Instruction::AShr: 1664 case Instruction::And: 1665 case Instruction::Or: 1666 case Instruction::Xor: { 1667 ValueList LHSVL, RHSVL; 1668 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) 1669 reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL); 1670 else 1671 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1672 LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1673 RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1674 } 1675 1676 setInsertPointAfterBundle(E->Scalars); 1677 1678 Value *LHS = vectorizeTree(LHSVL); 1679 Value *RHS = vectorizeTree(RHSVL); 1680 1681 if (LHS == RHS && isa<Instruction>(LHS)) { 1682 assert((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order"); 1683 } 1684 1685 if (Value *V = alreadyVectorized(E->Scalars)) 1686 return V; 1687 1688 BinaryOperator *BinOp = cast<BinaryOperator>(VL0); 1689 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS); 1690 E->VectorizedValue = V; 1691 1692 if (Instruction *I = dyn_cast<Instruction>(V)) 1693 return propagateMetadata(I, E->Scalars); 1694 1695 return V; 1696 } 1697 case Instruction::Load: { 1698 // Loads are inserted at the head of the tree because we don't want to 1699 // sink them all the way down past store instructions. 1700 setInsertPointAfterBundle(E->Scalars); 1701 1702 LoadInst *LI = cast<LoadInst>(VL0); 1703 unsigned AS = LI->getPointerAddressSpace(); 1704 1705 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 1706 VecTy->getPointerTo(AS)); 1707 unsigned Alignment = LI->getAlignment(); 1708 LI = Builder.CreateLoad(VecPtr); 1709 if (!Alignment) 1710 Alignment = DL->getABITypeAlignment(LI->getPointerOperand()->getType()); 1711 LI->setAlignment(Alignment); 1712 E->VectorizedValue = LI; 1713 return propagateMetadata(LI, E->Scalars); 1714 } 1715 case Instruction::Store: { 1716 StoreInst *SI = cast<StoreInst>(VL0); 1717 unsigned Alignment = SI->getAlignment(); 1718 unsigned AS = SI->getPointerAddressSpace(); 1719 1720 ValueList ValueOp; 1721 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1722 ValueOp.push_back(cast<StoreInst>(E->Scalars[i])->getValueOperand()); 1723 1724 setInsertPointAfterBundle(E->Scalars); 1725 1726 Value *VecValue = vectorizeTree(ValueOp); 1727 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), 1728 VecTy->getPointerTo(AS)); 1729 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 1730 if (!Alignment) 1731 Alignment = DL->getABITypeAlignment(SI->getPointerOperand()->getType()); 1732 S->setAlignment(Alignment); 1733 E->VectorizedValue = S; 1734 return propagateMetadata(S, E->Scalars); 1735 } 1736 case Instruction::GetElementPtr: { 1737 setInsertPointAfterBundle(E->Scalars); 1738 1739 ValueList Op0VL; 1740 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1741 Op0VL.push_back(cast<GetElementPtrInst>(E->Scalars[i])->getOperand(0)); 1742 1743 Value *Op0 = vectorizeTree(Op0VL); 1744 1745 std::vector<Value *> OpVecs; 1746 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 1747 ++j) { 1748 ValueList OpVL; 1749 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1750 OpVL.push_back(cast<GetElementPtrInst>(E->Scalars[i])->getOperand(j)); 1751 1752 Value *OpVec = vectorizeTree(OpVL); 1753 OpVecs.push_back(OpVec); 1754 } 1755 1756 Value *V = Builder.CreateGEP(Op0, OpVecs); 1757 E->VectorizedValue = V; 1758 1759 if (Instruction *I = dyn_cast<Instruction>(V)) 1760 return propagateMetadata(I, E->Scalars); 1761 1762 return V; 1763 } 1764 case Instruction::Call: { 1765 CallInst *CI = cast<CallInst>(VL0); 1766 setInsertPointAfterBundle(E->Scalars); 1767 Function *FI; 1768 Intrinsic::ID IID = Intrinsic::not_intrinsic; 1769 if (CI && (FI = CI->getCalledFunction())) { 1770 IID = (Intrinsic::ID) FI->getIntrinsicID(); 1771 } 1772 std::vector<Value *> OpVecs; 1773 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 1774 ValueList OpVL; 1775 // ctlz,cttz and powi are special intrinsics whose second argument is 1776 // a scalar. This argument should not be vectorized. 1777 if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) { 1778 CallInst *CEI = cast<CallInst>(E->Scalars[0]); 1779 OpVecs.push_back(CEI->getArgOperand(j)); 1780 continue; 1781 } 1782 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1783 CallInst *CEI = cast<CallInst>(E->Scalars[i]); 1784 OpVL.push_back(CEI->getArgOperand(j)); 1785 } 1786 1787 Value *OpVec = vectorizeTree(OpVL); 1788 DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 1789 OpVecs.push_back(OpVec); 1790 } 1791 1792 Module *M = F->getParent(); 1793 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 1794 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; 1795 Function *CF = Intrinsic::getDeclaration(M, ID, Tys); 1796 Value *V = Builder.CreateCall(CF, OpVecs); 1797 E->VectorizedValue = V; 1798 return V; 1799 } 1800 default: 1801 llvm_unreachable("unknown inst"); 1802 } 1803 return nullptr; 1804 } 1805 1806 Value *BoUpSLP::vectorizeTree() { 1807 Builder.SetInsertPoint(F->getEntryBlock().begin()); 1808 vectorizeTree(&VectorizableTree[0]); 1809 1810 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 1811 1812 // Extract all of the elements with the external uses. 1813 for (UserList::iterator it = ExternalUses.begin(), e = ExternalUses.end(); 1814 it != e; ++it) { 1815 Value *Scalar = it->Scalar; 1816 llvm::User *User = it->User; 1817 1818 // Skip users that we already RAUW. This happens when one instruction 1819 // has multiple uses of the same value. 1820 if (std::find(Scalar->user_begin(), Scalar->user_end(), User) == 1821 Scalar->user_end()) 1822 continue; 1823 assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar"); 1824 1825 int Idx = ScalarToTreeEntry[Scalar]; 1826 TreeEntry *E = &VectorizableTree[Idx]; 1827 assert(!E->NeedToGather && "Extracting from a gather list"); 1828 1829 Value *Vec = E->VectorizedValue; 1830 assert(Vec && "Can't find vectorizable value"); 1831 1832 Value *Lane = Builder.getInt32(it->Lane); 1833 // Generate extracts for out-of-tree users. 1834 // Find the insertion point for the extractelement lane. 1835 if (isa<Instruction>(Vec)){ 1836 if (PHINode *PH = dyn_cast<PHINode>(User)) { 1837 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 1838 if (PH->getIncomingValue(i) == Scalar) { 1839 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 1840 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1841 CSEBlocks.insert(PH->getIncomingBlock(i)); 1842 PH->setOperand(i, Ex); 1843 } 1844 } 1845 } else { 1846 Builder.SetInsertPoint(cast<Instruction>(User)); 1847 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1848 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 1849 User->replaceUsesOfWith(Scalar, Ex); 1850 } 1851 } else { 1852 Builder.SetInsertPoint(F->getEntryBlock().begin()); 1853 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1854 CSEBlocks.insert(&F->getEntryBlock()); 1855 User->replaceUsesOfWith(Scalar, Ex); 1856 } 1857 1858 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 1859 } 1860 1861 // For each vectorized value: 1862 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 1863 TreeEntry *Entry = &VectorizableTree[EIdx]; 1864 1865 // For each lane: 1866 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 1867 Value *Scalar = Entry->Scalars[Lane]; 1868 1869 // No need to handle users of gathered values. 1870 if (Entry->NeedToGather) 1871 continue; 1872 1873 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 1874 1875 Type *Ty = Scalar->getType(); 1876 if (!Ty->isVoidTy()) { 1877 #ifndef NDEBUG 1878 for (User *U : Scalar->users()) { 1879 DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 1880 1881 assert((ScalarToTreeEntry.count(U) || 1882 // It is legal to replace users in the ignorelist by undef. 1883 (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), U) != 1884 UserIgnoreList.end())) && 1885 "Replacing out-of-tree value with undef"); 1886 } 1887 #endif 1888 Value *Undef = UndefValue::get(Ty); 1889 Scalar->replaceAllUsesWith(Undef); 1890 } 1891 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 1892 cast<Instruction>(Scalar)->eraseFromParent(); 1893 } 1894 } 1895 1896 for (auto &BN : BlocksNumbers) 1897 BN.second.forget(); 1898 1899 Builder.ClearInsertionPoint(); 1900 1901 return VectorizableTree[0].VectorizedValue; 1902 } 1903 1904 void BoUpSLP::optimizeGatherSequence() { 1905 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 1906 << " gather sequences instructions.\n"); 1907 // LICM InsertElementInst sequences. 1908 for (SetVector<Instruction *>::iterator it = GatherSeq.begin(), 1909 e = GatherSeq.end(); it != e; ++it) { 1910 InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it); 1911 1912 if (!Insert) 1913 continue; 1914 1915 // Check if this block is inside a loop. 1916 Loop *L = LI->getLoopFor(Insert->getParent()); 1917 if (!L) 1918 continue; 1919 1920 // Check if it has a preheader. 1921 BasicBlock *PreHeader = L->getLoopPreheader(); 1922 if (!PreHeader) 1923 continue; 1924 1925 // If the vector or the element that we insert into it are 1926 // instructions that are defined in this basic block then we can't 1927 // hoist this instruction. 1928 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 1929 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 1930 if (CurrVec && L->contains(CurrVec)) 1931 continue; 1932 if (NewElem && L->contains(NewElem)) 1933 continue; 1934 1935 // We can hoist this instruction. Move it to the pre-header. 1936 Insert->moveBefore(PreHeader->getTerminator()); 1937 } 1938 1939 // Make a list of all reachable blocks in our CSE queue. 1940 SmallVector<const DomTreeNode *, 8> CSEWorkList; 1941 CSEWorkList.reserve(CSEBlocks.size()); 1942 for (BasicBlock *BB : CSEBlocks) 1943 if (DomTreeNode *N = DT->getNode(BB)) { 1944 assert(DT->isReachableFromEntry(N)); 1945 CSEWorkList.push_back(N); 1946 } 1947 1948 // Sort blocks by domination. This ensures we visit a block after all blocks 1949 // dominating it are visited. 1950 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), 1951 [this](const DomTreeNode *A, const DomTreeNode *B) { 1952 return DT->properlyDominates(A, B); 1953 }); 1954 1955 // Perform O(N^2) search over the gather sequences and merge identical 1956 // instructions. TODO: We can further optimize this scan if we split the 1957 // instructions into different buckets based on the insert lane. 1958 SmallVector<Instruction *, 16> Visited; 1959 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 1960 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 1961 "Worklist not sorted properly!"); 1962 BasicBlock *BB = (*I)->getBlock(); 1963 // For all instructions in blocks containing gather sequences: 1964 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 1965 Instruction *In = it++; 1966 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 1967 continue; 1968 1969 // Check if we can replace this instruction with any of the 1970 // visited instructions. 1971 for (SmallVectorImpl<Instruction *>::iterator v = Visited.begin(), 1972 ve = Visited.end(); 1973 v != ve; ++v) { 1974 if (In->isIdenticalTo(*v) && 1975 DT->dominates((*v)->getParent(), In->getParent())) { 1976 In->replaceAllUsesWith(*v); 1977 In->eraseFromParent(); 1978 In = nullptr; 1979 break; 1980 } 1981 } 1982 if (In) { 1983 assert(std::find(Visited.begin(), Visited.end(), In) == Visited.end()); 1984 Visited.push_back(In); 1985 } 1986 } 1987 } 1988 CSEBlocks.clear(); 1989 GatherSeq.clear(); 1990 } 1991 1992 /// The SLPVectorizer Pass. 1993 struct SLPVectorizer : public FunctionPass { 1994 typedef SmallVector<StoreInst *, 8> StoreList; 1995 typedef MapVector<Value *, StoreList> StoreListMap; 1996 1997 /// Pass identification, replacement for typeid 1998 static char ID; 1999 2000 explicit SLPVectorizer() : FunctionPass(ID) { 2001 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 2002 } 2003 2004 ScalarEvolution *SE; 2005 const DataLayout *DL; 2006 TargetTransformInfo *TTI; 2007 TargetLibraryInfo *TLI; 2008 AliasAnalysis *AA; 2009 LoopInfo *LI; 2010 DominatorTree *DT; 2011 2012 bool runOnFunction(Function &F) override { 2013 if (skipOptnoneFunction(F)) 2014 return false; 2015 2016 SE = &getAnalysis<ScalarEvolution>(); 2017 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 2018 DL = DLP ? &DLP->getDataLayout() : nullptr; 2019 TTI = &getAnalysis<TargetTransformInfo>(); 2020 TLI = getAnalysisIfAvailable<TargetLibraryInfo>(); 2021 AA = &getAnalysis<AliasAnalysis>(); 2022 LI = &getAnalysis<LoopInfo>(); 2023 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2024 2025 StoreRefs.clear(); 2026 bool Changed = false; 2027 2028 // If the target claims to have no vector registers don't attempt 2029 // vectorization. 2030 if (!TTI->getNumberOfRegisters(true)) 2031 return false; 2032 2033 // Must have DataLayout. We can't require it because some tests run w/o 2034 // triple. 2035 if (!DL) 2036 return false; 2037 2038 // Don't vectorize when the attribute NoImplicitFloat is used. 2039 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 2040 return false; 2041 2042 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 2043 2044 // Use the bottom up slp vectorizer to construct chains that start with 2045 // store instructions. 2046 BoUpSLP R(&F, SE, DL, TTI, TLI, AA, LI, DT); 2047 2048 // Scan the blocks in the function in post order. 2049 for (po_iterator<BasicBlock*> it = po_begin(&F.getEntryBlock()), 2050 e = po_end(&F.getEntryBlock()); it != e; ++it) { 2051 BasicBlock *BB = *it; 2052 2053 // Vectorize trees that end at stores. 2054 if (unsigned count = collectStores(BB, R)) { 2055 (void)count; 2056 DEBUG(dbgs() << "SLP: Found " << count << " stores to vectorize.\n"); 2057 Changed |= vectorizeStoreChains(R); 2058 } 2059 2060 // Vectorize trees that end at reductions. 2061 Changed |= vectorizeChainsInBlock(BB, R); 2062 } 2063 2064 if (Changed) { 2065 R.optimizeGatherSequence(); 2066 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 2067 DEBUG(verifyFunction(F)); 2068 } 2069 return Changed; 2070 } 2071 2072 void getAnalysisUsage(AnalysisUsage &AU) const override { 2073 FunctionPass::getAnalysisUsage(AU); 2074 AU.addRequired<ScalarEvolution>(); 2075 AU.addRequired<AliasAnalysis>(); 2076 AU.addRequired<TargetTransformInfo>(); 2077 AU.addRequired<LoopInfo>(); 2078 AU.addRequired<DominatorTreeWrapperPass>(); 2079 AU.addPreserved<LoopInfo>(); 2080 AU.addPreserved<DominatorTreeWrapperPass>(); 2081 AU.setPreservesCFG(); 2082 } 2083 2084 private: 2085 2086 /// \brief Collect memory references and sort them according to their base 2087 /// object. We sort the stores to their base objects to reduce the cost of the 2088 /// quadratic search on the stores. TODO: We can further reduce this cost 2089 /// if we flush the chain creation every time we run into a memory barrier. 2090 unsigned collectStores(BasicBlock *BB, BoUpSLP &R); 2091 2092 /// \brief Try to vectorize a chain that starts at two arithmetic instrs. 2093 bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R); 2094 2095 /// \brief Try to vectorize a list of operands. 2096 /// \@param BuildVector A list of users to ignore for the purpose of 2097 /// scheduling and that don't need extracting. 2098 /// \returns true if a value was vectorized. 2099 bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 2100 ArrayRef<Value *> BuildVector = None); 2101 2102 /// \brief Try to vectorize a chain that may start at the operands of \V; 2103 bool tryToVectorize(BinaryOperator *V, BoUpSLP &R); 2104 2105 /// \brief Vectorize the stores that were collected in StoreRefs. 2106 bool vectorizeStoreChains(BoUpSLP &R); 2107 2108 /// \brief Scan the basic block and look for patterns that are likely to start 2109 /// a vectorization chain. 2110 bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R); 2111 2112 bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold, 2113 BoUpSLP &R); 2114 2115 bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold, 2116 BoUpSLP &R); 2117 private: 2118 StoreListMap StoreRefs; 2119 }; 2120 2121 /// \brief Check that the Values in the slice in VL array are still existent in 2122 /// the WeakVH array. 2123 /// Vectorization of part of the VL array may cause later values in the VL array 2124 /// to become invalid. We track when this has happened in the WeakVH array. 2125 static bool hasValueBeenRAUWed(ArrayRef<Value *> &VL, 2126 SmallVectorImpl<WeakVH> &VH, 2127 unsigned SliceBegin, 2128 unsigned SliceSize) { 2129 for (unsigned i = SliceBegin; i < SliceBegin + SliceSize; ++i) 2130 if (VH[i] != VL[i]) 2131 return true; 2132 2133 return false; 2134 } 2135 2136 bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain, 2137 int CostThreshold, BoUpSLP &R) { 2138 unsigned ChainLen = Chain.size(); 2139 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 2140 << "\n"); 2141 Type *StoreTy = cast<StoreInst>(Chain[0])->getValueOperand()->getType(); 2142 unsigned Sz = DL->getTypeSizeInBits(StoreTy); 2143 unsigned VF = MinVecRegSize / Sz; 2144 2145 if (!isPowerOf2_32(Sz) || VF < 2) 2146 return false; 2147 2148 // Keep track of values that were deleted by vectorizing in the loop below. 2149 SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end()); 2150 2151 bool Changed = false; 2152 // Look for profitable vectorizable trees at all offsets, starting at zero. 2153 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 2154 if (i + VF > e) 2155 break; 2156 2157 // Check that a previous iteration of this loop did not delete the Value. 2158 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 2159 continue; 2160 2161 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 2162 << "\n"); 2163 ArrayRef<Value *> Operands = Chain.slice(i, VF); 2164 2165 R.buildTree(Operands); 2166 2167 int Cost = R.getTreeCost(); 2168 2169 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 2170 if (Cost < CostThreshold) { 2171 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 2172 R.vectorizeTree(); 2173 2174 // Move to the next bundle. 2175 i += VF - 1; 2176 Changed = true; 2177 } 2178 } 2179 2180 return Changed; 2181 } 2182 2183 bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, 2184 int costThreshold, BoUpSLP &R) { 2185 SetVector<Value *> Heads, Tails; 2186 SmallDenseMap<Value *, Value *> ConsecutiveChain; 2187 2188 // We may run into multiple chains that merge into a single chain. We mark the 2189 // stores that we vectorized so that we don't visit the same store twice. 2190 BoUpSLP::ValueSet VectorizedStores; 2191 bool Changed = false; 2192 2193 // Do a quadratic search on all of the given stores and find 2194 // all of the pairs of stores that follow each other. 2195 for (unsigned i = 0, e = Stores.size(); i < e; ++i) { 2196 for (unsigned j = 0; j < e; ++j) { 2197 if (i == j) 2198 continue; 2199 2200 if (R.isConsecutiveAccess(Stores[i], Stores[j])) { 2201 Tails.insert(Stores[j]); 2202 Heads.insert(Stores[i]); 2203 ConsecutiveChain[Stores[i]] = Stores[j]; 2204 } 2205 } 2206 } 2207 2208 // For stores that start but don't end a link in the chain: 2209 for (SetVector<Value *>::iterator it = Heads.begin(), e = Heads.end(); 2210 it != e; ++it) { 2211 if (Tails.count(*it)) 2212 continue; 2213 2214 // We found a store instr that starts a chain. Now follow the chain and try 2215 // to vectorize it. 2216 BoUpSLP::ValueList Operands; 2217 Value *I = *it; 2218 // Collect the chain into a list. 2219 while (Tails.count(I) || Heads.count(I)) { 2220 if (VectorizedStores.count(I)) 2221 break; 2222 Operands.push_back(I); 2223 // Move to the next value in the chain. 2224 I = ConsecutiveChain[I]; 2225 } 2226 2227 bool Vectorized = vectorizeStoreChain(Operands, costThreshold, R); 2228 2229 // Mark the vectorized stores so that we don't vectorize them again. 2230 if (Vectorized) 2231 VectorizedStores.insert(Operands.begin(), Operands.end()); 2232 Changed |= Vectorized; 2233 } 2234 2235 return Changed; 2236 } 2237 2238 2239 unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) { 2240 unsigned count = 0; 2241 StoreRefs.clear(); 2242 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 2243 StoreInst *SI = dyn_cast<StoreInst>(it); 2244 if (!SI) 2245 continue; 2246 2247 // Don't touch volatile stores. 2248 if (!SI->isSimple()) 2249 continue; 2250 2251 // Check that the pointer points to scalars. 2252 Type *Ty = SI->getValueOperand()->getType(); 2253 if (Ty->isAggregateType() || Ty->isVectorTy()) 2254 continue; 2255 2256 // Find the base pointer. 2257 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), DL); 2258 2259 // Save the store locations. 2260 StoreRefs[Ptr].push_back(SI); 2261 count++; 2262 } 2263 return count; 2264 } 2265 2266 bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 2267 if (!A || !B) 2268 return false; 2269 Value *VL[] = { A, B }; 2270 return tryToVectorizeList(VL, R); 2271 } 2272 2273 bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 2274 ArrayRef<Value *> BuildVector) { 2275 if (VL.size() < 2) 2276 return false; 2277 2278 DEBUG(dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n"); 2279 2280 // Check that all of the parts are scalar instructions of the same type. 2281 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 2282 if (!I0) 2283 return false; 2284 2285 unsigned Opcode0 = I0->getOpcode(); 2286 2287 Type *Ty0 = I0->getType(); 2288 unsigned Sz = DL->getTypeSizeInBits(Ty0); 2289 unsigned VF = MinVecRegSize / Sz; 2290 2291 for (int i = 0, e = VL.size(); i < e; ++i) { 2292 Type *Ty = VL[i]->getType(); 2293 if (Ty->isAggregateType() || Ty->isVectorTy()) 2294 return false; 2295 Instruction *Inst = dyn_cast<Instruction>(VL[i]); 2296 if (!Inst || Inst->getOpcode() != Opcode0) 2297 return false; 2298 } 2299 2300 bool Changed = false; 2301 2302 // Keep track of values that were deleted by vectorizing in the loop below. 2303 SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end()); 2304 2305 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 2306 unsigned OpsWidth = 0; 2307 2308 if (i + VF > e) 2309 OpsWidth = e - i; 2310 else 2311 OpsWidth = VF; 2312 2313 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 2314 break; 2315 2316 // Check that a previous iteration of this loop did not delete the Value. 2317 if (hasValueBeenRAUWed(VL, TrackValues, i, OpsWidth)) 2318 continue; 2319 2320 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 2321 << "\n"); 2322 ArrayRef<Value *> Ops = VL.slice(i, OpsWidth); 2323 2324 ArrayRef<Value *> BuildVectorSlice; 2325 if (!BuildVector.empty()) 2326 BuildVectorSlice = BuildVector.slice(i, OpsWidth); 2327 2328 R.buildTree(Ops, BuildVectorSlice); 2329 int Cost = R.getTreeCost(); 2330 2331 if (Cost < -SLPCostThreshold) { 2332 DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 2333 Value *VectorizedRoot = R.vectorizeTree(); 2334 2335 // Reconstruct the build vector by extracting the vectorized root. This 2336 // way we handle the case where some elements of the vector are undefined. 2337 // (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2)) 2338 if (!BuildVectorSlice.empty()) { 2339 // The insert point is the last build vector instruction. The vectorized 2340 // root will precede it. This guarantees that we get an instruction. The 2341 // vectorized tree could have been constant folded. 2342 Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back()); 2343 unsigned VecIdx = 0; 2344 for (auto &V : BuildVectorSlice) { 2345 IRBuilder<true, NoFolder> Builder( 2346 ++BasicBlock::iterator(InsertAfter)); 2347 InsertElementInst *IE = cast<InsertElementInst>(V); 2348 Instruction *Extract = cast<Instruction>(Builder.CreateExtractElement( 2349 VectorizedRoot, Builder.getInt32(VecIdx++))); 2350 IE->setOperand(1, Extract); 2351 IE->removeFromParent(); 2352 IE->insertAfter(Extract); 2353 InsertAfter = IE; 2354 } 2355 } 2356 // Move to the next bundle. 2357 i += VF - 1; 2358 Changed = true; 2359 } 2360 } 2361 2362 return Changed; 2363 } 2364 2365 bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { 2366 if (!V) 2367 return false; 2368 2369 // Try to vectorize V. 2370 if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R)) 2371 return true; 2372 2373 BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0)); 2374 BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1)); 2375 // Try to skip B. 2376 if (B && B->hasOneUse()) { 2377 BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 2378 BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 2379 if (tryToVectorizePair(A, B0, R)) { 2380 B->moveBefore(V); 2381 return true; 2382 } 2383 if (tryToVectorizePair(A, B1, R)) { 2384 B->moveBefore(V); 2385 return true; 2386 } 2387 } 2388 2389 // Try to skip A. 2390 if (A && A->hasOneUse()) { 2391 BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 2392 BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 2393 if (tryToVectorizePair(A0, B, R)) { 2394 A->moveBefore(V); 2395 return true; 2396 } 2397 if (tryToVectorizePair(A1, B, R)) { 2398 A->moveBefore(V); 2399 return true; 2400 } 2401 } 2402 return 0; 2403 } 2404 2405 /// \brief Generate a shuffle mask to be used in a reduction tree. 2406 /// 2407 /// \param VecLen The length of the vector to be reduced. 2408 /// \param NumEltsToRdx The number of elements that should be reduced in the 2409 /// vector. 2410 /// \param IsPairwise Whether the reduction is a pairwise or splitting 2411 /// reduction. A pairwise reduction will generate a mask of 2412 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 2413 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 2414 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 2415 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 2416 bool IsPairwise, bool IsLeft, 2417 IRBuilder<> &Builder) { 2418 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 2419 2420 SmallVector<Constant *, 32> ShuffleMask( 2421 VecLen, UndefValue::get(Builder.getInt32Ty())); 2422 2423 if (IsPairwise) 2424 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 2425 for (unsigned i = 0; i != NumEltsToRdx; ++i) 2426 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 2427 else 2428 // Move the upper half of the vector to the lower half. 2429 for (unsigned i = 0; i != NumEltsToRdx; ++i) 2430 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 2431 2432 return ConstantVector::get(ShuffleMask); 2433 } 2434 2435 2436 /// Model horizontal reductions. 2437 /// 2438 /// A horizontal reduction is a tree of reduction operations (currently add and 2439 /// fadd) that has operations that can be put into a vector as its leaf. 2440 /// For example, this tree: 2441 /// 2442 /// mul mul mul mul 2443 /// \ / \ / 2444 /// + + 2445 /// \ / 2446 /// + 2447 /// This tree has "mul" as its reduced values and "+" as its reduction 2448 /// operations. A reduction might be feeding into a store or a binary operation 2449 /// feeding a phi. 2450 /// ... 2451 /// \ / 2452 /// + 2453 /// | 2454 /// phi += 2455 /// 2456 /// Or: 2457 /// ... 2458 /// \ / 2459 /// + 2460 /// | 2461 /// *p = 2462 /// 2463 class HorizontalReduction { 2464 SmallVector<Value *, 16> ReductionOps; 2465 SmallVector<Value *, 32> ReducedVals; 2466 2467 BinaryOperator *ReductionRoot; 2468 PHINode *ReductionPHI; 2469 2470 /// The opcode of the reduction. 2471 unsigned ReductionOpcode; 2472 /// The opcode of the values we perform a reduction on. 2473 unsigned ReducedValueOpcode; 2474 /// The width of one full horizontal reduction operation. 2475 unsigned ReduxWidth; 2476 /// Should we model this reduction as a pairwise reduction tree or a tree that 2477 /// splits the vector in halves and adds those halves. 2478 bool IsPairwiseReduction; 2479 2480 public: 2481 HorizontalReduction() 2482 : ReductionRoot(nullptr), ReductionPHI(nullptr), ReductionOpcode(0), 2483 ReducedValueOpcode(0), ReduxWidth(0), IsPairwiseReduction(false) {} 2484 2485 /// \brief Try to find a reduction tree. 2486 bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B, 2487 const DataLayout *DL) { 2488 assert((!Phi || 2489 std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && 2490 "Thi phi needs to use the binary operator"); 2491 2492 // We could have a initial reductions that is not an add. 2493 // r *= v1 + v2 + v3 + v4 2494 // In such a case start looking for a tree rooted in the first '+'. 2495 if (Phi) { 2496 if (B->getOperand(0) == Phi) { 2497 Phi = nullptr; 2498 B = dyn_cast<BinaryOperator>(B->getOperand(1)); 2499 } else if (B->getOperand(1) == Phi) { 2500 Phi = nullptr; 2501 B = dyn_cast<BinaryOperator>(B->getOperand(0)); 2502 } 2503 } 2504 2505 if (!B) 2506 return false; 2507 2508 Type *Ty = B->getType(); 2509 if (Ty->isVectorTy()) 2510 return false; 2511 2512 ReductionOpcode = B->getOpcode(); 2513 ReducedValueOpcode = 0; 2514 ReduxWidth = MinVecRegSize / DL->getTypeSizeInBits(Ty); 2515 ReductionRoot = B; 2516 ReductionPHI = Phi; 2517 2518 if (ReduxWidth < 4) 2519 return false; 2520 2521 // We currently only support adds. 2522 if (ReductionOpcode != Instruction::Add && 2523 ReductionOpcode != Instruction::FAdd) 2524 return false; 2525 2526 // Post order traverse the reduction tree starting at B. We only handle true 2527 // trees containing only binary operators. 2528 SmallVector<std::pair<BinaryOperator *, unsigned>, 32> Stack; 2529 Stack.push_back(std::make_pair(B, 0)); 2530 while (!Stack.empty()) { 2531 BinaryOperator *TreeN = Stack.back().first; 2532 unsigned EdgeToVist = Stack.back().second++; 2533 bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode; 2534 2535 // Only handle trees in the current basic block. 2536 if (TreeN->getParent() != B->getParent()) 2537 return false; 2538 2539 // Each tree node needs to have one user except for the ultimate 2540 // reduction. 2541 if (!TreeN->hasOneUse() && TreeN != B) 2542 return false; 2543 2544 // Postorder vist. 2545 if (EdgeToVist == 2 || IsReducedValue) { 2546 if (IsReducedValue) { 2547 // Make sure that the opcodes of the operations that we are going to 2548 // reduce match. 2549 if (!ReducedValueOpcode) 2550 ReducedValueOpcode = TreeN->getOpcode(); 2551 else if (ReducedValueOpcode != TreeN->getOpcode()) 2552 return false; 2553 ReducedVals.push_back(TreeN); 2554 } else { 2555 // We need to be able to reassociate the adds. 2556 if (!TreeN->isAssociative()) 2557 return false; 2558 ReductionOps.push_back(TreeN); 2559 } 2560 // Retract. 2561 Stack.pop_back(); 2562 continue; 2563 } 2564 2565 // Visit left or right. 2566 Value *NextV = TreeN->getOperand(EdgeToVist); 2567 BinaryOperator *Next = dyn_cast<BinaryOperator>(NextV); 2568 if (Next) 2569 Stack.push_back(std::make_pair(Next, 0)); 2570 else if (NextV != Phi) 2571 return false; 2572 } 2573 return true; 2574 } 2575 2576 /// \brief Attempt to vectorize the tree found by 2577 /// matchAssociativeReduction. 2578 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 2579 if (ReducedVals.empty()) 2580 return false; 2581 2582 unsigned NumReducedVals = ReducedVals.size(); 2583 if (NumReducedVals < ReduxWidth) 2584 return false; 2585 2586 Value *VectorizedTree = nullptr; 2587 IRBuilder<> Builder(ReductionRoot); 2588 FastMathFlags Unsafe; 2589 Unsafe.setUnsafeAlgebra(); 2590 Builder.SetFastMathFlags(Unsafe); 2591 unsigned i = 0; 2592 2593 for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) { 2594 ArrayRef<Value *> ValsToReduce(&ReducedVals[i], ReduxWidth); 2595 V.buildTree(ValsToReduce, ReductionOps); 2596 2597 // Estimate cost. 2598 int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]); 2599 if (Cost >= -SLPCostThreshold) 2600 break; 2601 2602 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost 2603 << ". (HorRdx)\n"); 2604 2605 // Vectorize a tree. 2606 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 2607 Value *VectorizedRoot = V.vectorizeTree(); 2608 2609 // Emit a reduction. 2610 Value *ReducedSubTree = emitReduction(VectorizedRoot, Builder); 2611 if (VectorizedTree) { 2612 Builder.SetCurrentDebugLocation(Loc); 2613 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 2614 ReducedSubTree, "bin.rdx"); 2615 } else 2616 VectorizedTree = ReducedSubTree; 2617 } 2618 2619 if (VectorizedTree) { 2620 // Finish the reduction. 2621 for (; i < NumReducedVals; ++i) { 2622 Builder.SetCurrentDebugLocation( 2623 cast<Instruction>(ReducedVals[i])->getDebugLoc()); 2624 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 2625 ReducedVals[i]); 2626 } 2627 // Update users. 2628 if (ReductionPHI) { 2629 assert(ReductionRoot && "Need a reduction operation"); 2630 ReductionRoot->setOperand(0, VectorizedTree); 2631 ReductionRoot->setOperand(1, ReductionPHI); 2632 } else 2633 ReductionRoot->replaceAllUsesWith(VectorizedTree); 2634 } 2635 return VectorizedTree != nullptr; 2636 } 2637 2638 private: 2639 2640 /// \brief Calcuate the cost of a reduction. 2641 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) { 2642 Type *ScalarTy = FirstReducedVal->getType(); 2643 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 2644 2645 int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true); 2646 int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false); 2647 2648 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 2649 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 2650 2651 int ScalarReduxCost = 2652 ReduxWidth * TTI->getArithmeticInstrCost(ReductionOpcode, VecTy); 2653 2654 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 2655 << " for reduction that starts with " << *FirstReducedVal 2656 << " (It is a " 2657 << (IsPairwiseReduction ? "pairwise" : "splitting") 2658 << " reduction)\n"); 2659 2660 return VecReduxCost - ScalarReduxCost; 2661 } 2662 2663 static Value *createBinOp(IRBuilder<> &Builder, unsigned Opcode, Value *L, 2664 Value *R, const Twine &Name = "") { 2665 if (Opcode == Instruction::FAdd) 2666 return Builder.CreateFAdd(L, R, Name); 2667 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, L, R, Name); 2668 } 2669 2670 /// \brief Emit a horizontal reduction of the vectorized value. 2671 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder) { 2672 assert(VectorizedValue && "Need to have a vectorized tree node"); 2673 Instruction *ValToReduce = dyn_cast<Instruction>(VectorizedValue); 2674 assert(isPowerOf2_32(ReduxWidth) && 2675 "We only handle power-of-two reductions for now"); 2676 2677 Value *TmpVec = ValToReduce; 2678 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 2679 if (IsPairwiseReduction) { 2680 Value *LeftMask = 2681 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 2682 Value *RightMask = 2683 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 2684 2685 Value *LeftShuf = Builder.CreateShuffleVector( 2686 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 2687 Value *RightShuf = Builder.CreateShuffleVector( 2688 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 2689 "rdx.shuf.r"); 2690 TmpVec = createBinOp(Builder, ReductionOpcode, LeftShuf, RightShuf, 2691 "bin.rdx"); 2692 } else { 2693 Value *UpperHalf = 2694 createRdxShuffleMask(ReduxWidth, i, false, false, Builder); 2695 Value *Shuf = Builder.CreateShuffleVector( 2696 TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf"); 2697 TmpVec = createBinOp(Builder, ReductionOpcode, TmpVec, Shuf, "bin.rdx"); 2698 } 2699 } 2700 2701 // The result is in the first element of the vector. 2702 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 2703 } 2704 }; 2705 2706 /// \brief Recognize construction of vectors like 2707 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 2708 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 2709 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 2710 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 2711 /// 2712 /// Returns true if it matches 2713 /// 2714 static bool findBuildVector(InsertElementInst *FirstInsertElem, 2715 SmallVectorImpl<Value *> &BuildVector, 2716 SmallVectorImpl<Value *> &BuildVectorOpds) { 2717 if (!isa<UndefValue>(FirstInsertElem->getOperand(0))) 2718 return false; 2719 2720 InsertElementInst *IE = FirstInsertElem; 2721 while (true) { 2722 BuildVector.push_back(IE); 2723 BuildVectorOpds.push_back(IE->getOperand(1)); 2724 2725 if (IE->use_empty()) 2726 return false; 2727 2728 InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back()); 2729 if (!NextUse) 2730 return true; 2731 2732 // If this isn't the final use, make sure the next insertelement is the only 2733 // use. It's OK if the final constructed vector is used multiple times 2734 if (!IE->hasOneUse()) 2735 return false; 2736 2737 IE = NextUse; 2738 } 2739 2740 return false; 2741 } 2742 2743 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 2744 return V->getType() < V2->getType(); 2745 } 2746 2747 bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 2748 bool Changed = false; 2749 SmallVector<Value *, 4> Incoming; 2750 SmallSet<Value *, 16> VisitedInstrs; 2751 2752 bool HaveVectorizedPhiNodes = true; 2753 while (HaveVectorizedPhiNodes) { 2754 HaveVectorizedPhiNodes = false; 2755 2756 // Collect the incoming values from the PHIs. 2757 Incoming.clear(); 2758 for (BasicBlock::iterator instr = BB->begin(), ie = BB->end(); instr != ie; 2759 ++instr) { 2760 PHINode *P = dyn_cast<PHINode>(instr); 2761 if (!P) 2762 break; 2763 2764 if (!VisitedInstrs.count(P)) 2765 Incoming.push_back(P); 2766 } 2767 2768 // Sort by type. 2769 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); 2770 2771 // Try to vectorize elements base on their type. 2772 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 2773 E = Incoming.end(); 2774 IncIt != E;) { 2775 2776 // Look for the next elements with the same type. 2777 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 2778 while (SameTypeIt != E && 2779 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 2780 VisitedInstrs.insert(*SameTypeIt); 2781 ++SameTypeIt; 2782 } 2783 2784 // Try to vectorize them. 2785 unsigned NumElts = (SameTypeIt - IncIt); 2786 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); 2787 if (NumElts > 1 && 2788 tryToVectorizeList(ArrayRef<Value *>(IncIt, NumElts), R)) { 2789 // Success start over because instructions might have been changed. 2790 HaveVectorizedPhiNodes = true; 2791 Changed = true; 2792 break; 2793 } 2794 2795 // Start over at the next instruction of a different type (or the end). 2796 IncIt = SameTypeIt; 2797 } 2798 } 2799 2800 VisitedInstrs.clear(); 2801 2802 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 2803 // We may go through BB multiple times so skip the one we have checked. 2804 if (!VisitedInstrs.insert(it)) 2805 continue; 2806 2807 if (isa<DbgInfoIntrinsic>(it)) 2808 continue; 2809 2810 // Try to vectorize reductions that use PHINodes. 2811 if (PHINode *P = dyn_cast<PHINode>(it)) { 2812 // Check that the PHI is a reduction PHI. 2813 if (P->getNumIncomingValues() != 2) 2814 return Changed; 2815 Value *Rdx = 2816 (P->getIncomingBlock(0) == BB 2817 ? (P->getIncomingValue(0)) 2818 : (P->getIncomingBlock(1) == BB ? P->getIncomingValue(1) 2819 : nullptr)); 2820 // Check if this is a Binary Operator. 2821 BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx); 2822 if (!BI) 2823 continue; 2824 2825 // Try to match and vectorize a horizontal reduction. 2826 HorizontalReduction HorRdx; 2827 if (ShouldVectorizeHor && 2828 HorRdx.matchAssociativeReduction(P, BI, DL) && 2829 HorRdx.tryToReduce(R, TTI)) { 2830 Changed = true; 2831 it = BB->begin(); 2832 e = BB->end(); 2833 continue; 2834 } 2835 2836 Value *Inst = BI->getOperand(0); 2837 if (Inst == P) 2838 Inst = BI->getOperand(1); 2839 2840 if (tryToVectorize(dyn_cast<BinaryOperator>(Inst), R)) { 2841 // We would like to start over since some instructions are deleted 2842 // and the iterator may become invalid value. 2843 Changed = true; 2844 it = BB->begin(); 2845 e = BB->end(); 2846 continue; 2847 } 2848 2849 continue; 2850 } 2851 2852 // Try to vectorize horizontal reductions feeding into a store. 2853 if (ShouldStartVectorizeHorAtStore) 2854 if (StoreInst *SI = dyn_cast<StoreInst>(it)) 2855 if (BinaryOperator *BinOp = 2856 dyn_cast<BinaryOperator>(SI->getValueOperand())) { 2857 HorizontalReduction HorRdx; 2858 if (((HorRdx.matchAssociativeReduction(nullptr, BinOp, DL) && 2859 HorRdx.tryToReduce(R, TTI)) || 2860 tryToVectorize(BinOp, R))) { 2861 Changed = true; 2862 it = BB->begin(); 2863 e = BB->end(); 2864 continue; 2865 } 2866 } 2867 2868 // Try to vectorize trees that start at compare instructions. 2869 if (CmpInst *CI = dyn_cast<CmpInst>(it)) { 2870 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) { 2871 Changed = true; 2872 // We would like to start over since some instructions are deleted 2873 // and the iterator may become invalid value. 2874 it = BB->begin(); 2875 e = BB->end(); 2876 continue; 2877 } 2878 2879 for (int i = 0; i < 2; ++i) { 2880 if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) { 2881 if (tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R)) { 2882 Changed = true; 2883 // We would like to start over since some instructions are deleted 2884 // and the iterator may become invalid value. 2885 it = BB->begin(); 2886 e = BB->end(); 2887 } 2888 } 2889 } 2890 continue; 2891 } 2892 2893 // Try to vectorize trees that start at insertelement instructions. 2894 if (InsertElementInst *FirstInsertElem = dyn_cast<InsertElementInst>(it)) { 2895 SmallVector<Value *, 16> BuildVector; 2896 SmallVector<Value *, 16> BuildVectorOpds; 2897 if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds)) 2898 continue; 2899 2900 // Vectorize starting with the build vector operands ignoring the 2901 // BuildVector instructions for the purpose of scheduling and user 2902 // extraction. 2903 if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) { 2904 Changed = true; 2905 it = BB->begin(); 2906 e = BB->end(); 2907 } 2908 2909 continue; 2910 } 2911 } 2912 2913 return Changed; 2914 } 2915 2916 bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) { 2917 bool Changed = false; 2918 // Attempt to sort and vectorize each of the store-groups. 2919 for (StoreListMap::iterator it = StoreRefs.begin(), e = StoreRefs.end(); 2920 it != e; ++it) { 2921 if (it->second.size() < 2) 2922 continue; 2923 2924 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 2925 << it->second.size() << ".\n"); 2926 2927 // Process the stores in chunks of 16. 2928 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 2929 unsigned Len = std::min<unsigned>(CE - CI, 16); 2930 ArrayRef<StoreInst *> Chunk(&it->second[CI], Len); 2931 Changed |= vectorizeStores(Chunk, -SLPCostThreshold, R); 2932 } 2933 } 2934 return Changed; 2935 } 2936 2937 } // end anonymous namespace 2938 2939 char SLPVectorizer::ID = 0; 2940 static const char lv_name[] = "SLP Vectorizer"; 2941 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 2942 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 2943 INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) 2944 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 2945 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 2946 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 2947 2948 namespace llvm { 2949 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); } 2950 } 2951