1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 #define SV_NAME "slp-vectorizer" 19 #define DEBUG_TYPE "SLP" 20 21 #include "llvm/Transforms/Vectorize.h" 22 #include "llvm/ADT/MapVector.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/SetVector.h" 25 #include "llvm/Analysis/AliasAnalysis.h" 26 #include "llvm/Analysis/LoopInfo.h" 27 #include "llvm/Analysis/ScalarEvolution.h" 28 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 29 #include "llvm/Analysis/TargetTransformInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/DataLayout.h" 32 #include "llvm/IR/Dominators.h" 33 #include "llvm/IR/IRBuilder.h" 34 #include "llvm/IR/Instructions.h" 35 #include "llvm/IR/IntrinsicInst.h" 36 #include "llvm/IR/Module.h" 37 #include "llvm/IR/Type.h" 38 #include "llvm/IR/Value.h" 39 #include "llvm/IR/Verifier.h" 40 #include "llvm/Pass.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include <algorithm> 45 #include <map> 46 47 using namespace llvm; 48 49 static cl::opt<int> 50 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 51 cl::desc("Only vectorize if you gain more than this " 52 "number ")); 53 54 static cl::opt<bool> 55 ShouldVectorizeHor("slp-vectorize-hor", cl::init(false), cl::Hidden, 56 cl::desc("Attempt to vectorize horizontal reductions")); 57 58 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 59 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 60 cl::desc( 61 "Attempt to vectorize horizontal reductions feeding into a store")); 62 63 namespace { 64 65 static const unsigned MinVecRegSize = 128; 66 67 static const unsigned RecursionMaxDepth = 12; 68 69 /// A helper class for numbering instructions in multiple blocks. 70 /// Numbers start at zero for each basic block. 71 struct BlockNumbering { 72 73 BlockNumbering(BasicBlock *Bb) : BB(Bb), Valid(false) {} 74 75 BlockNumbering() : BB(0), Valid(false) {} 76 77 void numberInstructions() { 78 unsigned Loc = 0; 79 InstrIdx.clear(); 80 InstrVec.clear(); 81 // Number the instructions in the block. 82 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 83 InstrIdx[it] = Loc++; 84 InstrVec.push_back(it); 85 assert(InstrVec[InstrIdx[it]] == it && "Invalid allocation"); 86 } 87 Valid = true; 88 } 89 90 int getIndex(Instruction *I) { 91 assert(I->getParent() == BB && "Invalid instruction"); 92 if (!Valid) 93 numberInstructions(); 94 assert(InstrIdx.count(I) && "Unknown instruction"); 95 return InstrIdx[I]; 96 } 97 98 Instruction *getInstruction(unsigned loc) { 99 if (!Valid) 100 numberInstructions(); 101 assert(InstrVec.size() > loc && "Invalid Index"); 102 return InstrVec[loc]; 103 } 104 105 void forget() { Valid = false; } 106 107 private: 108 /// The block we are numbering. 109 BasicBlock *BB; 110 /// Is the block numbered. 111 bool Valid; 112 /// Maps instructions to numbers and back. 113 SmallDenseMap<Instruction *, int> InstrIdx; 114 /// Maps integers to Instructions. 115 SmallVector<Instruction *, 32> InstrVec; 116 }; 117 118 /// \returns the parent basic block if all of the instructions in \p VL 119 /// are in the same block or null otherwise. 120 static BasicBlock *getSameBlock(ArrayRef<Value *> VL) { 121 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 122 if (!I0) 123 return 0; 124 BasicBlock *BB = I0->getParent(); 125 for (int i = 1, e = VL.size(); i < e; i++) { 126 Instruction *I = dyn_cast<Instruction>(VL[i]); 127 if (!I) 128 return 0; 129 130 if (BB != I->getParent()) 131 return 0; 132 } 133 return BB; 134 } 135 136 /// \returns True if all of the values in \p VL are constants. 137 static bool allConstant(ArrayRef<Value *> VL) { 138 for (unsigned i = 0, e = VL.size(); i < e; ++i) 139 if (!isa<Constant>(VL[i])) 140 return false; 141 return true; 142 } 143 144 /// \returns True if all of the values in \p VL are identical. 145 static bool isSplat(ArrayRef<Value *> VL) { 146 for (unsigned i = 1, e = VL.size(); i < e; ++i) 147 if (VL[i] != VL[0]) 148 return false; 149 return true; 150 } 151 152 /// \returns The opcode if all of the Instructions in \p VL have the same 153 /// opcode, or zero. 154 static unsigned getSameOpcode(ArrayRef<Value *> VL) { 155 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 156 if (!I0) 157 return 0; 158 unsigned Opcode = I0->getOpcode(); 159 for (int i = 1, e = VL.size(); i < e; i++) { 160 Instruction *I = dyn_cast<Instruction>(VL[i]); 161 if (!I || Opcode != I->getOpcode()) 162 return 0; 163 } 164 return Opcode; 165 } 166 167 /// \returns \p I after propagating metadata from \p VL. 168 static Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL) { 169 Instruction *I0 = cast<Instruction>(VL[0]); 170 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 171 I0->getAllMetadataOtherThanDebugLoc(Metadata); 172 173 for (unsigned i = 0, n = Metadata.size(); i != n; ++i) { 174 unsigned Kind = Metadata[i].first; 175 MDNode *MD = Metadata[i].second; 176 177 for (int i = 1, e = VL.size(); MD && i != e; i++) { 178 Instruction *I = cast<Instruction>(VL[i]); 179 MDNode *IMD = I->getMetadata(Kind); 180 181 switch (Kind) { 182 default: 183 MD = 0; // Remove unknown metadata 184 break; 185 case LLVMContext::MD_tbaa: 186 MD = MDNode::getMostGenericTBAA(MD, IMD); 187 break; 188 case LLVMContext::MD_fpmath: 189 MD = MDNode::getMostGenericFPMath(MD, IMD); 190 break; 191 } 192 } 193 I->setMetadata(Kind, MD); 194 } 195 return I; 196 } 197 198 /// \returns The type that all of the values in \p VL have or null if there 199 /// are different types. 200 static Type* getSameType(ArrayRef<Value *> VL) { 201 Type *Ty = VL[0]->getType(); 202 for (int i = 1, e = VL.size(); i < e; i++) 203 if (VL[i]->getType() != Ty) 204 return 0; 205 206 return Ty; 207 } 208 209 /// \returns True if the ExtractElement instructions in VL can be vectorized 210 /// to use the original vector. 211 static bool CanReuseExtract(ArrayRef<Value *> VL) { 212 assert(Instruction::ExtractElement == getSameOpcode(VL) && "Invalid opcode"); 213 // Check if all of the extracts come from the same vector and from the 214 // correct offset. 215 Value *VL0 = VL[0]; 216 ExtractElementInst *E0 = cast<ExtractElementInst>(VL0); 217 Value *Vec = E0->getOperand(0); 218 219 // We have to extract from the same vector type. 220 unsigned NElts = Vec->getType()->getVectorNumElements(); 221 222 if (NElts != VL.size()) 223 return false; 224 225 // Check that all of the indices extract from the correct offset. 226 ConstantInt *CI = dyn_cast<ConstantInt>(E0->getOperand(1)); 227 if (!CI || CI->getZExtValue()) 228 return false; 229 230 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 231 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 232 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 233 234 if (!CI || CI->getZExtValue() != i || E->getOperand(0) != Vec) 235 return false; 236 } 237 238 return true; 239 } 240 241 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 242 SmallVectorImpl<Value *> &Left, 243 SmallVectorImpl<Value *> &Right) { 244 245 SmallVector<Value *, 16> OrigLeft, OrigRight; 246 247 bool AllSameOpcodeLeft = true; 248 bool AllSameOpcodeRight = true; 249 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 250 Instruction *I = cast<Instruction>(VL[i]); 251 Value *V0 = I->getOperand(0); 252 Value *V1 = I->getOperand(1); 253 254 OrigLeft.push_back(V0); 255 OrigRight.push_back(V1); 256 257 Instruction *I0 = dyn_cast<Instruction>(V0); 258 Instruction *I1 = dyn_cast<Instruction>(V1); 259 260 // Check whether all operands on one side have the same opcode. In this case 261 // we want to preserve the original order and not make things worse by 262 // reordering. 263 AllSameOpcodeLeft = I0; 264 AllSameOpcodeRight = I1; 265 266 if (i && AllSameOpcodeLeft) { 267 if(Instruction *P0 = dyn_cast<Instruction>(OrigLeft[i-1])) { 268 if(P0->getOpcode() != I0->getOpcode()) 269 AllSameOpcodeLeft = false; 270 } else 271 AllSameOpcodeLeft = false; 272 } 273 if (i && AllSameOpcodeRight) { 274 if(Instruction *P1 = dyn_cast<Instruction>(OrigRight[i-1])) { 275 if(P1->getOpcode() != I1->getOpcode()) 276 AllSameOpcodeRight = false; 277 } else 278 AllSameOpcodeRight = false; 279 } 280 281 // Sort two opcodes. In the code below we try to preserve the ability to use 282 // broadcast of values instead of individual inserts. 283 // vl1 = load 284 // vl2 = phi 285 // vr1 = load 286 // vr2 = vr2 287 // = vl1 x vr1 288 // = vl2 x vr2 289 // If we just sorted according to opcode we would leave the first line in 290 // tact but we would swap vl2 with vr2 because opcode(phi) > opcode(load). 291 // = vl1 x vr1 292 // = vr2 x vl2 293 // Because vr2 and vr1 are from the same load we loose the opportunity of a 294 // broadcast for the packed right side in the backend: we have [vr1, vl2] 295 // instead of [vr1, vr2=vr1]. 296 if (I0 && I1) { 297 if(!i && I0->getOpcode() > I1->getOpcode()) { 298 Left.push_back(I1); 299 Right.push_back(I0); 300 } else if (i && I0->getOpcode() > I1->getOpcode() && Right[i-1] != I1) { 301 // Try not to destroy a broad cast for no apparent benefit. 302 Left.push_back(I1); 303 Right.push_back(I0); 304 } else if (i && I0->getOpcode() == I1->getOpcode() && Right[i-1] == I0) { 305 // Try preserve broadcasts. 306 Left.push_back(I1); 307 Right.push_back(I0); 308 } else if (i && I0->getOpcode() == I1->getOpcode() && Left[i-1] == I1) { 309 // Try preserve broadcasts. 310 Left.push_back(I1); 311 Right.push_back(I0); 312 } else { 313 Left.push_back(I0); 314 Right.push_back(I1); 315 } 316 continue; 317 } 318 // One opcode, put the instruction on the right. 319 if (I0) { 320 Left.push_back(V1); 321 Right.push_back(I0); 322 continue; 323 } 324 Left.push_back(V0); 325 Right.push_back(V1); 326 } 327 328 bool LeftBroadcast = isSplat(Left); 329 bool RightBroadcast = isSplat(Right); 330 331 // Don't reorder if the operands where good to begin with. 332 if (!(LeftBroadcast || RightBroadcast) && 333 (AllSameOpcodeRight || AllSameOpcodeLeft)) { 334 Left = OrigLeft; 335 Right = OrigRight; 336 } 337 } 338 339 /// Bottom Up SLP Vectorizer. 340 class BoUpSLP { 341 public: 342 typedef SmallVector<Value *, 8> ValueList; 343 typedef SmallVector<Instruction *, 16> InstrList; 344 typedef SmallPtrSet<Value *, 16> ValueSet; 345 typedef SmallVector<StoreInst *, 8> StoreList; 346 347 BoUpSLP(Function *Func, ScalarEvolution *Se, const DataLayout *Dl, 348 TargetTransformInfo *Tti, AliasAnalysis *Aa, LoopInfo *Li, 349 DominatorTree *Dt) : 350 F(Func), SE(Se), DL(Dl), TTI(Tti), AA(Aa), LI(Li), DT(Dt), 351 Builder(Se->getContext()) { 352 // Setup the block numbering utility for all of the blocks in the 353 // function. 354 for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it) { 355 BasicBlock *BB = it; 356 BlocksNumbers[BB] = BlockNumbering(BB); 357 } 358 } 359 360 /// \brief Vectorize the tree that starts with the elements in \p VL. 361 /// Returns the vectorized root. 362 Value *vectorizeTree(); 363 364 /// \returns the vectorization cost of the subtree that starts at \p VL. 365 /// A negative number means that this is profitable. 366 int getTreeCost(); 367 368 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 369 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 370 void buildTree(ArrayRef<Value *> Roots, 371 ArrayRef<Value *> UserIgnoreLst = None); 372 373 /// Clear the internal data structures that are created by 'buildTree'. 374 void deleteTree() { 375 VectorizableTree.clear(); 376 ScalarToTreeEntry.clear(); 377 MustGather.clear(); 378 ExternalUses.clear(); 379 MemBarrierIgnoreList.clear(); 380 } 381 382 /// \returns true if the memory operations A and B are consecutive. 383 bool isConsecutiveAccess(Value *A, Value *B); 384 385 /// \brief Perform LICM and CSE on the newly generated gather sequences. 386 void optimizeGatherSequence(); 387 private: 388 struct TreeEntry; 389 390 /// \returns the cost of the vectorizable entry. 391 int getEntryCost(TreeEntry *E); 392 393 /// This is the recursive part of buildTree. 394 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth); 395 396 /// Vectorize a single entry in the tree. 397 Value *vectorizeTree(TreeEntry *E); 398 399 /// Vectorize a single entry in the tree, starting in \p VL. 400 Value *vectorizeTree(ArrayRef<Value *> VL); 401 402 /// \returns the pointer to the vectorized value if \p VL is already 403 /// vectorized, or NULL. They may happen in cycles. 404 Value *alreadyVectorized(ArrayRef<Value *> VL) const; 405 406 /// \brief Take the pointer operand from the Load/Store instruction. 407 /// \returns NULL if this is not a valid Load/Store instruction. 408 static Value *getPointerOperand(Value *I); 409 410 /// \brief Take the address space operand from the Load/Store instruction. 411 /// \returns -1 if this is not a valid Load/Store instruction. 412 static unsigned getAddressSpaceOperand(Value *I); 413 414 /// \returns the scalarization cost for this type. Scalarization in this 415 /// context means the creation of vectors from a group of scalars. 416 int getGatherCost(Type *Ty); 417 418 /// \returns the scalarization cost for this list of values. Assuming that 419 /// this subtree gets vectorized, we may need to extract the values from the 420 /// roots. This method calculates the cost of extracting the values. 421 int getGatherCost(ArrayRef<Value *> VL); 422 423 /// \returns the AA location that is being access by the instruction. 424 AliasAnalysis::Location getLocation(Instruction *I); 425 426 /// \brief Checks if it is possible to sink an instruction from 427 /// \p Src to \p Dst. 428 /// \returns the pointer to the barrier instruction if we can't sink. 429 Value *getSinkBarrier(Instruction *Src, Instruction *Dst); 430 431 /// \returns the index of the last instruction in the BB from \p VL. 432 int getLastIndex(ArrayRef<Value *> VL); 433 434 /// \returns the Instruction in the bundle \p VL. 435 Instruction *getLastInstruction(ArrayRef<Value *> VL); 436 437 /// \brief Set the Builder insert point to one after the last instruction in 438 /// the bundle 439 void setInsertPointAfterBundle(ArrayRef<Value *> VL); 440 441 /// \returns a vector from a collection of scalars in \p VL. 442 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 443 444 /// \returns whether the VectorizableTree is fully vectoriable and will 445 /// be beneficial even the tree height is tiny. 446 bool isFullyVectorizableTinyTree(); 447 448 struct TreeEntry { 449 TreeEntry() : Scalars(), VectorizedValue(0), LastScalarIndex(0), 450 NeedToGather(0) {} 451 452 /// \returns true if the scalars in VL are equal to this entry. 453 bool isSame(ArrayRef<Value *> VL) const { 454 assert(VL.size() == Scalars.size() && "Invalid size"); 455 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 456 } 457 458 /// A vector of scalars. 459 ValueList Scalars; 460 461 /// The Scalars are vectorized into this value. It is initialized to Null. 462 Value *VectorizedValue; 463 464 /// The index in the basic block of the last scalar. 465 int LastScalarIndex; 466 467 /// Do we need to gather this sequence ? 468 bool NeedToGather; 469 }; 470 471 /// Create a new VectorizableTree entry. 472 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized) { 473 VectorizableTree.push_back(TreeEntry()); 474 int idx = VectorizableTree.size() - 1; 475 TreeEntry *Last = &VectorizableTree[idx]; 476 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 477 Last->NeedToGather = !Vectorized; 478 if (Vectorized) { 479 Last->LastScalarIndex = getLastIndex(VL); 480 for (int i = 0, e = VL.size(); i != e; ++i) { 481 assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!"); 482 ScalarToTreeEntry[VL[i]] = idx; 483 } 484 } else { 485 Last->LastScalarIndex = 0; 486 MustGather.insert(VL.begin(), VL.end()); 487 } 488 return Last; 489 } 490 491 /// -- Vectorization State -- 492 /// Holds all of the tree entries. 493 std::vector<TreeEntry> VectorizableTree; 494 495 /// Maps a specific scalar to its tree entry. 496 SmallDenseMap<Value*, int> ScalarToTreeEntry; 497 498 /// A list of scalars that we found that we need to keep as scalars. 499 ValueSet MustGather; 500 501 /// This POD struct describes one external user in the vectorized tree. 502 struct ExternalUser { 503 ExternalUser (Value *S, llvm::User *U, int L) : 504 Scalar(S), User(U), Lane(L){}; 505 // Which scalar in our function. 506 Value *Scalar; 507 // Which user that uses the scalar. 508 llvm::User *User; 509 // Which lane does the scalar belong to. 510 int Lane; 511 }; 512 typedef SmallVector<ExternalUser, 16> UserList; 513 514 /// A list of values that need to extracted out of the tree. 515 /// This list holds pairs of (Internal Scalar : External User). 516 UserList ExternalUses; 517 518 /// A list of instructions to ignore while sinking 519 /// memory instructions. This map must be reset between runs of getCost. 520 ValueSet MemBarrierIgnoreList; 521 522 /// Holds all of the instructions that we gathered. 523 SetVector<Instruction *> GatherSeq; 524 /// A list of blocks that we are going to CSE. 525 SetVector<BasicBlock *> CSEBlocks; 526 527 /// Numbers instructions in different blocks. 528 DenseMap<BasicBlock *, BlockNumbering> BlocksNumbers; 529 530 /// List of users to ignore during scheduling and that don't need extracting. 531 ArrayRef<Value *> UserIgnoreList; 532 533 // Analysis and block reference. 534 Function *F; 535 ScalarEvolution *SE; 536 const DataLayout *DL; 537 TargetTransformInfo *TTI; 538 AliasAnalysis *AA; 539 LoopInfo *LI; 540 DominatorTree *DT; 541 /// Instruction builder to construct the vectorized tree. 542 IRBuilder<> Builder; 543 }; 544 545 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 546 ArrayRef<Value *> UserIgnoreLst) { 547 deleteTree(); 548 UserIgnoreList = UserIgnoreLst; 549 if (!getSameType(Roots)) 550 return; 551 buildTree_rec(Roots, 0); 552 553 // Collect the values that we need to extract from the tree. 554 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 555 TreeEntry *Entry = &VectorizableTree[EIdx]; 556 557 // For each lane: 558 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 559 Value *Scalar = Entry->Scalars[Lane]; 560 561 // No need to handle users of gathered values. 562 if (Entry->NeedToGather) 563 continue; 564 565 for (User *U : Scalar->users()) { 566 DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 567 568 // Skip in-tree scalars that become vectors. 569 if (ScalarToTreeEntry.count(U)) { 570 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << 571 *U << ".\n"); 572 int Idx = ScalarToTreeEntry[U]; (void) Idx; 573 assert(!VectorizableTree[Idx].NeedToGather && "Bad state"); 574 continue; 575 } 576 Instruction *UserInst = dyn_cast<Instruction>(U); 577 if (!UserInst) 578 continue; 579 580 // Ignore users in the user ignore list. 581 if (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), UserInst) != 582 UserIgnoreList.end()) 583 continue; 584 585 DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " << 586 Lane << " from " << *Scalar << ".\n"); 587 ExternalUses.push_back(ExternalUser(Scalar, U, Lane)); 588 } 589 } 590 } 591 } 592 593 594 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { 595 bool SameTy = getSameType(VL); (void)SameTy; 596 assert(SameTy && "Invalid types!"); 597 598 if (Depth == RecursionMaxDepth) { 599 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 600 newTreeEntry(VL, false); 601 return; 602 } 603 604 // Don't handle vectors. 605 if (VL[0]->getType()->isVectorTy()) { 606 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 607 newTreeEntry(VL, false); 608 return; 609 } 610 611 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 612 if (SI->getValueOperand()->getType()->isVectorTy()) { 613 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 614 newTreeEntry(VL, false); 615 return; 616 } 617 618 // If all of the operands are identical or constant we have a simple solution. 619 if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || 620 !getSameOpcode(VL)) { 621 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 622 newTreeEntry(VL, false); 623 return; 624 } 625 626 // We now know that this is a vector of instructions of the same type from 627 // the same block. 628 629 // Check if this is a duplicate of another entry. 630 if (ScalarToTreeEntry.count(VL[0])) { 631 int Idx = ScalarToTreeEntry[VL[0]]; 632 TreeEntry *E = &VectorizableTree[Idx]; 633 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 634 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 635 if (E->Scalars[i] != VL[i]) { 636 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 637 newTreeEntry(VL, false); 638 return; 639 } 640 } 641 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n"); 642 return; 643 } 644 645 // Check that none of the instructions in the bundle are already in the tree. 646 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 647 if (ScalarToTreeEntry.count(VL[i])) { 648 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 649 ") is already in tree.\n"); 650 newTreeEntry(VL, false); 651 return; 652 } 653 } 654 655 // If any of the scalars appears in the table OR it is marked as a value that 656 // needs to stat scalar then we need to gather the scalars. 657 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 658 if (ScalarToTreeEntry.count(VL[i]) || MustGather.count(VL[i])) { 659 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar. \n"); 660 newTreeEntry(VL, false); 661 return; 662 } 663 } 664 665 // Check that all of the users of the scalars that we want to vectorize are 666 // schedulable. 667 Instruction *VL0 = cast<Instruction>(VL[0]); 668 int MyLastIndex = getLastIndex(VL); 669 BasicBlock *BB = cast<Instruction>(VL0)->getParent(); 670 671 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 672 Instruction *Scalar = cast<Instruction>(VL[i]); 673 DEBUG(dbgs() << "SLP: Checking users of " << *Scalar << ". \n"); 674 for (User *U : Scalar->users()) { 675 DEBUG(dbgs() << "SLP: \tUser " << *U << ". \n"); 676 Instruction *UI = dyn_cast<Instruction>(U); 677 if (!UI) { 678 DEBUG(dbgs() << "SLP: Gathering due unknown user. \n"); 679 newTreeEntry(VL, false); 680 return; 681 } 682 683 // We don't care if the user is in a different basic block. 684 BasicBlock *UserBlock = UI->getParent(); 685 if (UserBlock != BB) { 686 DEBUG(dbgs() << "SLP: User from a different basic block " 687 << *UI << ". \n"); 688 continue; 689 } 690 691 // If this is a PHINode within this basic block then we can place the 692 // extract wherever we want. 693 if (isa<PHINode>(*UI)) { 694 DEBUG(dbgs() << "SLP: \tWe can schedule PHIs:" << *UI << ". \n"); 695 continue; 696 } 697 698 // Check if this is a safe in-tree user. 699 if (ScalarToTreeEntry.count(UI)) { 700 int Idx = ScalarToTreeEntry[UI]; 701 int VecLocation = VectorizableTree[Idx].LastScalarIndex; 702 if (VecLocation <= MyLastIndex) { 703 DEBUG(dbgs() << "SLP: Gathering due to unschedulable vector. \n"); 704 newTreeEntry(VL, false); 705 return; 706 } 707 DEBUG(dbgs() << "SLP: In-tree user (" << *UI << ") at #" << 708 VecLocation << " vector value (" << *Scalar << ") at #" 709 << MyLastIndex << ".\n"); 710 continue; 711 } 712 713 // Ignore users in the user ignore list. 714 if (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), UI) != 715 UserIgnoreList.end()) 716 continue; 717 718 // Make sure that we can schedule this unknown user. 719 BlockNumbering &BN = BlocksNumbers[BB]; 720 int UserIndex = BN.getIndex(UI); 721 if (UserIndex < MyLastIndex) { 722 723 DEBUG(dbgs() << "SLP: Can't schedule extractelement for " 724 << *UI << ". \n"); 725 newTreeEntry(VL, false); 726 return; 727 } 728 } 729 } 730 731 // Check that every instructions appears once in this bundle. 732 for (unsigned i = 0, e = VL.size(); i < e; ++i) 733 for (unsigned j = i+1; j < e; ++j) 734 if (VL[i] == VL[j]) { 735 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 736 newTreeEntry(VL, false); 737 return; 738 } 739 740 // Check that instructions in this bundle don't reference other instructions. 741 // The runtime of this check is O(N * N-1 * uses(N)) and a typical N is 4. 742 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 743 for (User *U : VL[i]->users()) { 744 for (unsigned j = 0; j < e; ++j) { 745 if (i != j && U == VL[j]) { 746 DEBUG(dbgs() << "SLP: Intra-bundle dependencies!" << *U << ". \n"); 747 newTreeEntry(VL, false); 748 return; 749 } 750 } 751 } 752 } 753 754 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 755 756 unsigned Opcode = getSameOpcode(VL); 757 758 // Check if it is safe to sink the loads or the stores. 759 if (Opcode == Instruction::Load || Opcode == Instruction::Store) { 760 Instruction *Last = getLastInstruction(VL); 761 762 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 763 if (VL[i] == Last) 764 continue; 765 Value *Barrier = getSinkBarrier(cast<Instruction>(VL[i]), Last); 766 if (Barrier) { 767 DEBUG(dbgs() << "SLP: Can't sink " << *VL[i] << "\n down to " << *Last 768 << "\n because of " << *Barrier << ". Gathering.\n"); 769 newTreeEntry(VL, false); 770 return; 771 } 772 } 773 } 774 775 switch (Opcode) { 776 case Instruction::PHI: { 777 PHINode *PH = dyn_cast<PHINode>(VL0); 778 779 // Check for terminator values (e.g. invoke). 780 for (unsigned j = 0; j < VL.size(); ++j) 781 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 782 TerminatorInst *Term = dyn_cast<TerminatorInst>( 783 cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i))); 784 if (Term) { 785 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); 786 newTreeEntry(VL, false); 787 return; 788 } 789 } 790 791 newTreeEntry(VL, true); 792 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 793 794 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 795 ValueList Operands; 796 // Prepare the operand vector. 797 for (unsigned j = 0; j < VL.size(); ++j) 798 Operands.push_back(cast<PHINode>(VL[j])->getIncomingValueForBlock( 799 PH->getIncomingBlock(i))); 800 801 buildTree_rec(Operands, Depth + 1); 802 } 803 return; 804 } 805 case Instruction::ExtractElement: { 806 bool Reuse = CanReuseExtract(VL); 807 if (Reuse) { 808 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 809 } 810 newTreeEntry(VL, Reuse); 811 return; 812 } 813 case Instruction::Load: { 814 // Check if the loads are consecutive or of we need to swizzle them. 815 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 816 LoadInst *L = cast<LoadInst>(VL[i]); 817 if (!L->isSimple() || !isConsecutiveAccess(VL[i], VL[i + 1])) { 818 newTreeEntry(VL, false); 819 DEBUG(dbgs() << "SLP: Need to swizzle loads.\n"); 820 return; 821 } 822 } 823 newTreeEntry(VL, true); 824 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 825 return; 826 } 827 case Instruction::ZExt: 828 case Instruction::SExt: 829 case Instruction::FPToUI: 830 case Instruction::FPToSI: 831 case Instruction::FPExt: 832 case Instruction::PtrToInt: 833 case Instruction::IntToPtr: 834 case Instruction::SIToFP: 835 case Instruction::UIToFP: 836 case Instruction::Trunc: 837 case Instruction::FPTrunc: 838 case Instruction::BitCast: { 839 Type *SrcTy = VL0->getOperand(0)->getType(); 840 for (unsigned i = 0; i < VL.size(); ++i) { 841 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 842 if (Ty != SrcTy || Ty->isAggregateType() || Ty->isVectorTy()) { 843 newTreeEntry(VL, false); 844 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 845 return; 846 } 847 } 848 newTreeEntry(VL, true); 849 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 850 851 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 852 ValueList Operands; 853 // Prepare the operand vector. 854 for (unsigned j = 0; j < VL.size(); ++j) 855 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 856 857 buildTree_rec(Operands, Depth+1); 858 } 859 return; 860 } 861 case Instruction::ICmp: 862 case Instruction::FCmp: { 863 // Check that all of the compares have the same predicate. 864 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 865 Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType(); 866 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 867 CmpInst *Cmp = cast<CmpInst>(VL[i]); 868 if (Cmp->getPredicate() != P0 || 869 Cmp->getOperand(0)->getType() != ComparedTy) { 870 newTreeEntry(VL, false); 871 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 872 return; 873 } 874 } 875 876 newTreeEntry(VL, true); 877 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 878 879 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 880 ValueList Operands; 881 // Prepare the operand vector. 882 for (unsigned j = 0; j < VL.size(); ++j) 883 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 884 885 buildTree_rec(Operands, Depth+1); 886 } 887 return; 888 } 889 case Instruction::Select: 890 case Instruction::Add: 891 case Instruction::FAdd: 892 case Instruction::Sub: 893 case Instruction::FSub: 894 case Instruction::Mul: 895 case Instruction::FMul: 896 case Instruction::UDiv: 897 case Instruction::SDiv: 898 case Instruction::FDiv: 899 case Instruction::URem: 900 case Instruction::SRem: 901 case Instruction::FRem: 902 case Instruction::Shl: 903 case Instruction::LShr: 904 case Instruction::AShr: 905 case Instruction::And: 906 case Instruction::Or: 907 case Instruction::Xor: { 908 newTreeEntry(VL, true); 909 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 910 911 // Sort operands of the instructions so that each side is more likely to 912 // have the same opcode. 913 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 914 ValueList Left, Right; 915 reorderInputsAccordingToOpcode(VL, Left, Right); 916 buildTree_rec(Left, Depth + 1); 917 buildTree_rec(Right, Depth + 1); 918 return; 919 } 920 921 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 922 ValueList Operands; 923 // Prepare the operand vector. 924 for (unsigned j = 0; j < VL.size(); ++j) 925 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 926 927 buildTree_rec(Operands, Depth+1); 928 } 929 return; 930 } 931 case Instruction::Store: { 932 // Check if the stores are consecutive or of we need to swizzle them. 933 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 934 if (!isConsecutiveAccess(VL[i], VL[i + 1])) { 935 newTreeEntry(VL, false); 936 DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 937 return; 938 } 939 940 newTreeEntry(VL, true); 941 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 942 943 ValueList Operands; 944 for (unsigned j = 0; j < VL.size(); ++j) 945 Operands.push_back(cast<Instruction>(VL[j])->getOperand(0)); 946 947 // We can ignore these values because we are sinking them down. 948 MemBarrierIgnoreList.insert(VL.begin(), VL.end()); 949 buildTree_rec(Operands, Depth + 1); 950 return; 951 } 952 case Instruction::Call: { 953 // Check if the calls are all to the same vectorizable intrinsic. 954 IntrinsicInst *II = dyn_cast<IntrinsicInst>(VL[0]); 955 if (II==NULL) { 956 newTreeEntry(VL, false); 957 DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 958 return; 959 } 960 961 Intrinsic::ID ID = II->getIntrinsicID(); 962 963 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 964 IntrinsicInst *II2 = dyn_cast<IntrinsicInst>(VL[i]); 965 if (!II2 || II2->getIntrinsicID() != ID) { 966 newTreeEntry(VL, false); 967 DEBUG(dbgs() << "SLP: mismatched calls:" << *II << "!=" << *VL[i] 968 << "\n"); 969 return; 970 } 971 } 972 973 newTreeEntry(VL, true); 974 for (unsigned i = 0, e = II->getNumArgOperands(); i != e; ++i) { 975 ValueList Operands; 976 // Prepare the operand vector. 977 for (unsigned j = 0; j < VL.size(); ++j) { 978 IntrinsicInst *II2 = dyn_cast<IntrinsicInst>(VL[j]); 979 Operands.push_back(II2->getArgOperand(i)); 980 } 981 buildTree_rec(Operands, Depth + 1); 982 } 983 return; 984 } 985 default: 986 newTreeEntry(VL, false); 987 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 988 return; 989 } 990 } 991 992 int BoUpSLP::getEntryCost(TreeEntry *E) { 993 ArrayRef<Value*> VL = E->Scalars; 994 995 Type *ScalarTy = VL[0]->getType(); 996 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 997 ScalarTy = SI->getValueOperand()->getType(); 998 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 999 1000 if (E->NeedToGather) { 1001 if (allConstant(VL)) 1002 return 0; 1003 if (isSplat(VL)) { 1004 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 1005 } 1006 return getGatherCost(E->Scalars); 1007 } 1008 1009 assert(getSameOpcode(VL) && getSameType(VL) && getSameBlock(VL) && 1010 "Invalid VL"); 1011 Instruction *VL0 = cast<Instruction>(VL[0]); 1012 unsigned Opcode = VL0->getOpcode(); 1013 switch (Opcode) { 1014 case Instruction::PHI: { 1015 return 0; 1016 } 1017 case Instruction::ExtractElement: { 1018 if (CanReuseExtract(VL)) { 1019 int DeadCost = 0; 1020 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 1021 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 1022 if (E->hasOneUse()) 1023 // Take credit for instruction that will become dead. 1024 DeadCost += 1025 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); 1026 } 1027 return -DeadCost; 1028 } 1029 return getGatherCost(VecTy); 1030 } 1031 case Instruction::ZExt: 1032 case Instruction::SExt: 1033 case Instruction::FPToUI: 1034 case Instruction::FPToSI: 1035 case Instruction::FPExt: 1036 case Instruction::PtrToInt: 1037 case Instruction::IntToPtr: 1038 case Instruction::SIToFP: 1039 case Instruction::UIToFP: 1040 case Instruction::Trunc: 1041 case Instruction::FPTrunc: 1042 case Instruction::BitCast: { 1043 Type *SrcTy = VL0->getOperand(0)->getType(); 1044 1045 // Calculate the cost of this instruction. 1046 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 1047 VL0->getType(), SrcTy); 1048 1049 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 1050 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy); 1051 return VecCost - ScalarCost; 1052 } 1053 case Instruction::FCmp: 1054 case Instruction::ICmp: 1055 case Instruction::Select: 1056 case Instruction::Add: 1057 case Instruction::FAdd: 1058 case Instruction::Sub: 1059 case Instruction::FSub: 1060 case Instruction::Mul: 1061 case Instruction::FMul: 1062 case Instruction::UDiv: 1063 case Instruction::SDiv: 1064 case Instruction::FDiv: 1065 case Instruction::URem: 1066 case Instruction::SRem: 1067 case Instruction::FRem: 1068 case Instruction::Shl: 1069 case Instruction::LShr: 1070 case Instruction::AShr: 1071 case Instruction::And: 1072 case Instruction::Or: 1073 case Instruction::Xor: { 1074 // Calculate the cost of this instruction. 1075 int ScalarCost = 0; 1076 int VecCost = 0; 1077 if (Opcode == Instruction::FCmp || Opcode == Instruction::ICmp || 1078 Opcode == Instruction::Select) { 1079 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 1080 ScalarCost = VecTy->getNumElements() * 1081 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty()); 1082 VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy); 1083 } else { 1084 // Certain instructions can be cheaper to vectorize if they have a 1085 // constant second vector operand. 1086 TargetTransformInfo::OperandValueKind Op1VK = 1087 TargetTransformInfo::OK_AnyValue; 1088 TargetTransformInfo::OperandValueKind Op2VK = 1089 TargetTransformInfo::OK_UniformConstantValue; 1090 1091 // If all operands are exactly the same ConstantInt then set the 1092 // operand kind to OK_UniformConstantValue. 1093 // If instead not all operands are constants, then set the operand kind 1094 // to OK_AnyValue. If all operands are constants but not the same, 1095 // then set the operand kind to OK_NonUniformConstantValue. 1096 ConstantInt *CInt = NULL; 1097 for (unsigned i = 0; i < VL.size(); ++i) { 1098 const Instruction *I = cast<Instruction>(VL[i]); 1099 if (!isa<ConstantInt>(I->getOperand(1))) { 1100 Op2VK = TargetTransformInfo::OK_AnyValue; 1101 break; 1102 } 1103 if (i == 0) { 1104 CInt = cast<ConstantInt>(I->getOperand(1)); 1105 continue; 1106 } 1107 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && 1108 CInt != cast<ConstantInt>(I->getOperand(1))) 1109 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 1110 } 1111 1112 ScalarCost = 1113 VecTy->getNumElements() * 1114 TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK); 1115 VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK); 1116 } 1117 return VecCost - ScalarCost; 1118 } 1119 case Instruction::Load: { 1120 // Cost of wide load - cost of scalar loads. 1121 int ScalarLdCost = VecTy->getNumElements() * 1122 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); 1123 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, 1, 0); 1124 return VecLdCost - ScalarLdCost; 1125 } 1126 case Instruction::Store: { 1127 // We know that we can merge the stores. Calculate the cost. 1128 int ScalarStCost = VecTy->getNumElements() * 1129 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); 1130 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, VecTy, 1, 0); 1131 return VecStCost - ScalarStCost; 1132 } 1133 case Instruction::Call: { 1134 CallInst *CI = cast<CallInst>(VL0); 1135 IntrinsicInst *II = cast<IntrinsicInst>(CI); 1136 Intrinsic::ID ID = II->getIntrinsicID(); 1137 1138 // Calculate the cost of the scalar and vector calls. 1139 SmallVector<Type*, 4> ScalarTys, VecTys; 1140 for (unsigned op = 0, opc = II->getNumArgOperands(); op!= opc; ++op) { 1141 ScalarTys.push_back(CI->getArgOperand(op)->getType()); 1142 VecTys.push_back(VectorType::get(CI->getArgOperand(op)->getType(), 1143 VecTy->getNumElements())); 1144 } 1145 1146 int ScalarCallCost = VecTy->getNumElements() * 1147 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys); 1148 1149 int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys); 1150 1151 DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost 1152 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 1153 << " for " << *II << "\n"); 1154 1155 return VecCallCost - ScalarCallCost; 1156 } 1157 default: 1158 llvm_unreachable("Unknown instruction"); 1159 } 1160 } 1161 1162 bool BoUpSLP::isFullyVectorizableTinyTree() { 1163 DEBUG(dbgs() << "SLP: Check whether the tree with height " << 1164 VectorizableTree.size() << " is fully vectorizable .\n"); 1165 1166 // We only handle trees of height 2. 1167 if (VectorizableTree.size() != 2) 1168 return false; 1169 1170 // Handle splat stores. 1171 if (!VectorizableTree[0].NeedToGather && isSplat(VectorizableTree[1].Scalars)) 1172 return true; 1173 1174 // Gathering cost would be too much for tiny trees. 1175 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 1176 return false; 1177 1178 return true; 1179 } 1180 1181 int BoUpSLP::getTreeCost() { 1182 int Cost = 0; 1183 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 1184 VectorizableTree.size() << ".\n"); 1185 1186 // We only vectorize tiny trees if it is fully vectorizable. 1187 if (VectorizableTree.size() < 3 && !isFullyVectorizableTinyTree()) { 1188 if (!VectorizableTree.size()) { 1189 assert(!ExternalUses.size() && "We should not have any external users"); 1190 } 1191 return INT_MAX; 1192 } 1193 1194 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 1195 1196 for (unsigned i = 0, e = VectorizableTree.size(); i != e; ++i) { 1197 int C = getEntryCost(&VectorizableTree[i]); 1198 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 1199 << *VectorizableTree[i].Scalars[0] << " .\n"); 1200 Cost += C; 1201 } 1202 1203 SmallSet<Value *, 16> ExtractCostCalculated; 1204 int ExtractCost = 0; 1205 for (UserList::iterator I = ExternalUses.begin(), E = ExternalUses.end(); 1206 I != E; ++I) { 1207 // We only add extract cost once for the same scalar. 1208 if (!ExtractCostCalculated.insert(I->Scalar)) 1209 continue; 1210 1211 VectorType *VecTy = VectorType::get(I->Scalar->getType(), BundleWidth); 1212 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 1213 I->Lane); 1214 } 1215 1216 DEBUG(dbgs() << "SLP: Total Cost " << Cost + ExtractCost<< ".\n"); 1217 return Cost + ExtractCost; 1218 } 1219 1220 int BoUpSLP::getGatherCost(Type *Ty) { 1221 int Cost = 0; 1222 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 1223 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 1224 return Cost; 1225 } 1226 1227 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 1228 // Find the type of the operands in VL. 1229 Type *ScalarTy = VL[0]->getType(); 1230 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1231 ScalarTy = SI->getValueOperand()->getType(); 1232 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1233 // Find the cost of inserting/extracting values from the vector. 1234 return getGatherCost(VecTy); 1235 } 1236 1237 AliasAnalysis::Location BoUpSLP::getLocation(Instruction *I) { 1238 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1239 return AA->getLocation(SI); 1240 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1241 return AA->getLocation(LI); 1242 return AliasAnalysis::Location(); 1243 } 1244 1245 Value *BoUpSLP::getPointerOperand(Value *I) { 1246 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1247 return LI->getPointerOperand(); 1248 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1249 return SI->getPointerOperand(); 1250 return 0; 1251 } 1252 1253 unsigned BoUpSLP::getAddressSpaceOperand(Value *I) { 1254 if (LoadInst *L = dyn_cast<LoadInst>(I)) 1255 return L->getPointerAddressSpace(); 1256 if (StoreInst *S = dyn_cast<StoreInst>(I)) 1257 return S->getPointerAddressSpace(); 1258 return -1; 1259 } 1260 1261 bool BoUpSLP::isConsecutiveAccess(Value *A, Value *B) { 1262 Value *PtrA = getPointerOperand(A); 1263 Value *PtrB = getPointerOperand(B); 1264 unsigned ASA = getAddressSpaceOperand(A); 1265 unsigned ASB = getAddressSpaceOperand(B); 1266 1267 // Check that the address spaces match and that the pointers are valid. 1268 if (!PtrA || !PtrB || (ASA != ASB)) 1269 return false; 1270 1271 // Make sure that A and B are different pointers of the same type. 1272 if (PtrA == PtrB || PtrA->getType() != PtrB->getType()) 1273 return false; 1274 1275 unsigned PtrBitWidth = DL->getPointerSizeInBits(ASA); 1276 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1277 APInt Size(PtrBitWidth, DL->getTypeStoreSize(Ty)); 1278 1279 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); 1280 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetA); 1281 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetB); 1282 1283 APInt OffsetDelta = OffsetB - OffsetA; 1284 1285 // Check if they are based on the same pointer. That makes the offsets 1286 // sufficient. 1287 if (PtrA == PtrB) 1288 return OffsetDelta == Size; 1289 1290 // Compute the necessary base pointer delta to have the necessary final delta 1291 // equal to the size. 1292 APInt BaseDelta = Size - OffsetDelta; 1293 1294 // Otherwise compute the distance with SCEV between the base pointers. 1295 const SCEV *PtrSCEVA = SE->getSCEV(PtrA); 1296 const SCEV *PtrSCEVB = SE->getSCEV(PtrB); 1297 const SCEV *C = SE->getConstant(BaseDelta); 1298 const SCEV *X = SE->getAddExpr(PtrSCEVA, C); 1299 return X == PtrSCEVB; 1300 } 1301 1302 Value *BoUpSLP::getSinkBarrier(Instruction *Src, Instruction *Dst) { 1303 assert(Src->getParent() == Dst->getParent() && "Not the same BB"); 1304 BasicBlock::iterator I = Src, E = Dst; 1305 /// Scan all of the instruction from SRC to DST and check if 1306 /// the source may alias. 1307 for (++I; I != E; ++I) { 1308 // Ignore store instructions that are marked as 'ignore'. 1309 if (MemBarrierIgnoreList.count(I)) 1310 continue; 1311 if (Src->mayWriteToMemory()) /* Write */ { 1312 if (!I->mayReadOrWriteMemory()) 1313 continue; 1314 } else /* Read */ { 1315 if (!I->mayWriteToMemory()) 1316 continue; 1317 } 1318 AliasAnalysis::Location A = getLocation(&*I); 1319 AliasAnalysis::Location B = getLocation(Src); 1320 1321 if (!A.Ptr || !B.Ptr || AA->alias(A, B)) 1322 return I; 1323 } 1324 return 0; 1325 } 1326 1327 int BoUpSLP::getLastIndex(ArrayRef<Value *> VL) { 1328 BasicBlock *BB = cast<Instruction>(VL[0])->getParent(); 1329 assert(BB == getSameBlock(VL) && BlocksNumbers.count(BB) && "Invalid block"); 1330 BlockNumbering &BN = BlocksNumbers[BB]; 1331 1332 int MaxIdx = BN.getIndex(BB->getFirstNonPHI()); 1333 for (unsigned i = 0, e = VL.size(); i < e; ++i) 1334 MaxIdx = std::max(MaxIdx, BN.getIndex(cast<Instruction>(VL[i]))); 1335 return MaxIdx; 1336 } 1337 1338 Instruction *BoUpSLP::getLastInstruction(ArrayRef<Value *> VL) { 1339 BasicBlock *BB = cast<Instruction>(VL[0])->getParent(); 1340 assert(BB == getSameBlock(VL) && BlocksNumbers.count(BB) && "Invalid block"); 1341 BlockNumbering &BN = BlocksNumbers[BB]; 1342 1343 int MaxIdx = BN.getIndex(cast<Instruction>(VL[0])); 1344 for (unsigned i = 1, e = VL.size(); i < e; ++i) 1345 MaxIdx = std::max(MaxIdx, BN.getIndex(cast<Instruction>(VL[i]))); 1346 Instruction *I = BN.getInstruction(MaxIdx); 1347 assert(I && "bad location"); 1348 return I; 1349 } 1350 1351 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) { 1352 Instruction *VL0 = cast<Instruction>(VL[0]); 1353 Instruction *LastInst = getLastInstruction(VL); 1354 BasicBlock::iterator NextInst = LastInst; 1355 ++NextInst; 1356 Builder.SetInsertPoint(VL0->getParent(), NextInst); 1357 Builder.SetCurrentDebugLocation(VL0->getDebugLoc()); 1358 } 1359 1360 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 1361 Value *Vec = UndefValue::get(Ty); 1362 // Generate the 'InsertElement' instruction. 1363 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 1364 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 1365 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 1366 GatherSeq.insert(Insrt); 1367 CSEBlocks.insert(Insrt->getParent()); 1368 1369 // Add to our 'need-to-extract' list. 1370 if (ScalarToTreeEntry.count(VL[i])) { 1371 int Idx = ScalarToTreeEntry[VL[i]]; 1372 TreeEntry *E = &VectorizableTree[Idx]; 1373 // Find which lane we need to extract. 1374 int FoundLane = -1; 1375 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 1376 // Is this the lane of the scalar that we are looking for ? 1377 if (E->Scalars[Lane] == VL[i]) { 1378 FoundLane = Lane; 1379 break; 1380 } 1381 } 1382 assert(FoundLane >= 0 && "Could not find the correct lane"); 1383 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 1384 } 1385 } 1386 } 1387 1388 return Vec; 1389 } 1390 1391 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const { 1392 SmallDenseMap<Value*, int>::const_iterator Entry 1393 = ScalarToTreeEntry.find(VL[0]); 1394 if (Entry != ScalarToTreeEntry.end()) { 1395 int Idx = Entry->second; 1396 const TreeEntry *En = &VectorizableTree[Idx]; 1397 if (En->isSame(VL) && En->VectorizedValue) 1398 return En->VectorizedValue; 1399 } 1400 return 0; 1401 } 1402 1403 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 1404 if (ScalarToTreeEntry.count(VL[0])) { 1405 int Idx = ScalarToTreeEntry[VL[0]]; 1406 TreeEntry *E = &VectorizableTree[Idx]; 1407 if (E->isSame(VL)) 1408 return vectorizeTree(E); 1409 } 1410 1411 Type *ScalarTy = VL[0]->getType(); 1412 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1413 ScalarTy = SI->getValueOperand()->getType(); 1414 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1415 1416 return Gather(VL, VecTy); 1417 } 1418 1419 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 1420 IRBuilder<>::InsertPointGuard Guard(Builder); 1421 1422 if (E->VectorizedValue) { 1423 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 1424 return E->VectorizedValue; 1425 } 1426 1427 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 1428 Type *ScalarTy = VL0->getType(); 1429 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 1430 ScalarTy = SI->getValueOperand()->getType(); 1431 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 1432 1433 if (E->NeedToGather) { 1434 setInsertPointAfterBundle(E->Scalars); 1435 return Gather(E->Scalars, VecTy); 1436 } 1437 1438 unsigned Opcode = VL0->getOpcode(); 1439 assert(Opcode == getSameOpcode(E->Scalars) && "Invalid opcode"); 1440 1441 switch (Opcode) { 1442 case Instruction::PHI: { 1443 PHINode *PH = dyn_cast<PHINode>(VL0); 1444 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 1445 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 1446 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 1447 E->VectorizedValue = NewPhi; 1448 1449 // PHINodes may have multiple entries from the same block. We want to 1450 // visit every block once. 1451 SmallSet<BasicBlock*, 4> VisitedBBs; 1452 1453 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1454 ValueList Operands; 1455 BasicBlock *IBB = PH->getIncomingBlock(i); 1456 1457 if (!VisitedBBs.insert(IBB)) { 1458 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 1459 continue; 1460 } 1461 1462 // Prepare the operand vector. 1463 for (unsigned j = 0; j < E->Scalars.size(); ++j) 1464 Operands.push_back(cast<PHINode>(E->Scalars[j])-> 1465 getIncomingValueForBlock(IBB)); 1466 1467 Builder.SetInsertPoint(IBB->getTerminator()); 1468 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 1469 Value *Vec = vectorizeTree(Operands); 1470 NewPhi->addIncoming(Vec, IBB); 1471 } 1472 1473 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 1474 "Invalid number of incoming values"); 1475 return NewPhi; 1476 } 1477 1478 case Instruction::ExtractElement: { 1479 if (CanReuseExtract(E->Scalars)) { 1480 Value *V = VL0->getOperand(0); 1481 E->VectorizedValue = V; 1482 return V; 1483 } 1484 return Gather(E->Scalars, VecTy); 1485 } 1486 case Instruction::ZExt: 1487 case Instruction::SExt: 1488 case Instruction::FPToUI: 1489 case Instruction::FPToSI: 1490 case Instruction::FPExt: 1491 case Instruction::PtrToInt: 1492 case Instruction::IntToPtr: 1493 case Instruction::SIToFP: 1494 case Instruction::UIToFP: 1495 case Instruction::Trunc: 1496 case Instruction::FPTrunc: 1497 case Instruction::BitCast: { 1498 ValueList INVL; 1499 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1500 INVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1501 1502 setInsertPointAfterBundle(E->Scalars); 1503 1504 Value *InVec = vectorizeTree(INVL); 1505 1506 if (Value *V = alreadyVectorized(E->Scalars)) 1507 return V; 1508 1509 CastInst *CI = dyn_cast<CastInst>(VL0); 1510 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 1511 E->VectorizedValue = V; 1512 return V; 1513 } 1514 case Instruction::FCmp: 1515 case Instruction::ICmp: { 1516 ValueList LHSV, RHSV; 1517 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1518 LHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1519 RHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1520 } 1521 1522 setInsertPointAfterBundle(E->Scalars); 1523 1524 Value *L = vectorizeTree(LHSV); 1525 Value *R = vectorizeTree(RHSV); 1526 1527 if (Value *V = alreadyVectorized(E->Scalars)) 1528 return V; 1529 1530 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 1531 Value *V; 1532 if (Opcode == Instruction::FCmp) 1533 V = Builder.CreateFCmp(P0, L, R); 1534 else 1535 V = Builder.CreateICmp(P0, L, R); 1536 1537 E->VectorizedValue = V; 1538 return V; 1539 } 1540 case Instruction::Select: { 1541 ValueList TrueVec, FalseVec, CondVec; 1542 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1543 CondVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1544 TrueVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1545 FalseVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(2)); 1546 } 1547 1548 setInsertPointAfterBundle(E->Scalars); 1549 1550 Value *Cond = vectorizeTree(CondVec); 1551 Value *True = vectorizeTree(TrueVec); 1552 Value *False = vectorizeTree(FalseVec); 1553 1554 if (Value *V = alreadyVectorized(E->Scalars)) 1555 return V; 1556 1557 Value *V = Builder.CreateSelect(Cond, True, False); 1558 E->VectorizedValue = V; 1559 return V; 1560 } 1561 case Instruction::Add: 1562 case Instruction::FAdd: 1563 case Instruction::Sub: 1564 case Instruction::FSub: 1565 case Instruction::Mul: 1566 case Instruction::FMul: 1567 case Instruction::UDiv: 1568 case Instruction::SDiv: 1569 case Instruction::FDiv: 1570 case Instruction::URem: 1571 case Instruction::SRem: 1572 case Instruction::FRem: 1573 case Instruction::Shl: 1574 case Instruction::LShr: 1575 case Instruction::AShr: 1576 case Instruction::And: 1577 case Instruction::Or: 1578 case Instruction::Xor: { 1579 ValueList LHSVL, RHSVL; 1580 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) 1581 reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL); 1582 else 1583 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1584 LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1585 RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1586 } 1587 1588 setInsertPointAfterBundle(E->Scalars); 1589 1590 Value *LHS = vectorizeTree(LHSVL); 1591 Value *RHS = vectorizeTree(RHSVL); 1592 1593 if (LHS == RHS && isa<Instruction>(LHS)) { 1594 assert((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order"); 1595 } 1596 1597 if (Value *V = alreadyVectorized(E->Scalars)) 1598 return V; 1599 1600 BinaryOperator *BinOp = cast<BinaryOperator>(VL0); 1601 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS); 1602 E->VectorizedValue = V; 1603 1604 if (Instruction *I = dyn_cast<Instruction>(V)) 1605 return propagateMetadata(I, E->Scalars); 1606 1607 return V; 1608 } 1609 case Instruction::Load: { 1610 // Loads are inserted at the head of the tree because we don't want to 1611 // sink them all the way down past store instructions. 1612 setInsertPointAfterBundle(E->Scalars); 1613 1614 LoadInst *LI = cast<LoadInst>(VL0); 1615 unsigned AS = LI->getPointerAddressSpace(); 1616 1617 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 1618 VecTy->getPointerTo(AS)); 1619 unsigned Alignment = LI->getAlignment(); 1620 LI = Builder.CreateLoad(VecPtr); 1621 LI->setAlignment(Alignment); 1622 E->VectorizedValue = LI; 1623 return propagateMetadata(LI, E->Scalars); 1624 } 1625 case Instruction::Store: { 1626 StoreInst *SI = cast<StoreInst>(VL0); 1627 unsigned Alignment = SI->getAlignment(); 1628 unsigned AS = SI->getPointerAddressSpace(); 1629 1630 ValueList ValueOp; 1631 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1632 ValueOp.push_back(cast<StoreInst>(E->Scalars[i])->getValueOperand()); 1633 1634 setInsertPointAfterBundle(E->Scalars); 1635 1636 Value *VecValue = vectorizeTree(ValueOp); 1637 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), 1638 VecTy->getPointerTo(AS)); 1639 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 1640 S->setAlignment(Alignment); 1641 E->VectorizedValue = S; 1642 return propagateMetadata(S, E->Scalars); 1643 } 1644 case Instruction::Call: { 1645 CallInst *CI = cast<CallInst>(VL0); 1646 1647 setInsertPointAfterBundle(E->Scalars); 1648 std::vector<Value *> OpVecs; 1649 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 1650 ValueList OpVL; 1651 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1652 CallInst *CEI = cast<CallInst>(E->Scalars[i]); 1653 OpVL.push_back(CEI->getArgOperand(j)); 1654 } 1655 1656 Value *OpVec = vectorizeTree(OpVL); 1657 DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 1658 OpVecs.push_back(OpVec); 1659 } 1660 1661 Module *M = F->getParent(); 1662 IntrinsicInst *II = cast<IntrinsicInst>(CI); 1663 Intrinsic::ID ID = II->getIntrinsicID(); 1664 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; 1665 Function *CF = Intrinsic::getDeclaration(M, ID, Tys); 1666 Value *V = Builder.CreateCall(CF, OpVecs); 1667 E->VectorizedValue = V; 1668 return V; 1669 } 1670 default: 1671 llvm_unreachable("unknown inst"); 1672 } 1673 return 0; 1674 } 1675 1676 Value *BoUpSLP::vectorizeTree() { 1677 Builder.SetInsertPoint(F->getEntryBlock().begin()); 1678 vectorizeTree(&VectorizableTree[0]); 1679 1680 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 1681 1682 // Extract all of the elements with the external uses. 1683 for (UserList::iterator it = ExternalUses.begin(), e = ExternalUses.end(); 1684 it != e; ++it) { 1685 Value *Scalar = it->Scalar; 1686 llvm::User *User = it->User; 1687 1688 // Skip users that we already RAUW. This happens when one instruction 1689 // has multiple uses of the same value. 1690 if (std::find(Scalar->user_begin(), Scalar->user_end(), User) == 1691 Scalar->user_end()) 1692 continue; 1693 assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar"); 1694 1695 int Idx = ScalarToTreeEntry[Scalar]; 1696 TreeEntry *E = &VectorizableTree[Idx]; 1697 assert(!E->NeedToGather && "Extracting from a gather list"); 1698 1699 Value *Vec = E->VectorizedValue; 1700 assert(Vec && "Can't find vectorizable value"); 1701 1702 Value *Lane = Builder.getInt32(it->Lane); 1703 // Generate extracts for out-of-tree users. 1704 // Find the insertion point for the extractelement lane. 1705 if (isa<Instruction>(Vec)){ 1706 if (PHINode *PH = dyn_cast<PHINode>(User)) { 1707 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 1708 if (PH->getIncomingValue(i) == Scalar) { 1709 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 1710 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1711 CSEBlocks.insert(PH->getIncomingBlock(i)); 1712 PH->setOperand(i, Ex); 1713 } 1714 } 1715 } else { 1716 Builder.SetInsertPoint(cast<Instruction>(User)); 1717 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1718 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 1719 User->replaceUsesOfWith(Scalar, Ex); 1720 } 1721 } else { 1722 Builder.SetInsertPoint(F->getEntryBlock().begin()); 1723 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1724 CSEBlocks.insert(&F->getEntryBlock()); 1725 User->replaceUsesOfWith(Scalar, Ex); 1726 } 1727 1728 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 1729 } 1730 1731 // For each vectorized value: 1732 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 1733 TreeEntry *Entry = &VectorizableTree[EIdx]; 1734 1735 // For each lane: 1736 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 1737 Value *Scalar = Entry->Scalars[Lane]; 1738 1739 // No need to handle users of gathered values. 1740 if (Entry->NeedToGather) 1741 continue; 1742 1743 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 1744 1745 Type *Ty = Scalar->getType(); 1746 if (!Ty->isVoidTy()) { 1747 #ifndef NDEBUG 1748 for (User *U : Scalar->users()) { 1749 DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 1750 1751 assert((ScalarToTreeEntry.count(U) || 1752 // It is legal to replace users in the ignorelist by undef. 1753 (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), U) != 1754 UserIgnoreList.end())) && 1755 "Replacing out-of-tree value with undef"); 1756 } 1757 #endif 1758 Value *Undef = UndefValue::get(Ty); 1759 Scalar->replaceAllUsesWith(Undef); 1760 } 1761 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 1762 cast<Instruction>(Scalar)->eraseFromParent(); 1763 } 1764 } 1765 1766 for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it) { 1767 BlocksNumbers[it].forget(); 1768 } 1769 Builder.ClearInsertionPoint(); 1770 1771 return VectorizableTree[0].VectorizedValue; 1772 } 1773 1774 void BoUpSLP::optimizeGatherSequence() { 1775 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 1776 << " gather sequences instructions.\n"); 1777 // LICM InsertElementInst sequences. 1778 for (SetVector<Instruction *>::iterator it = GatherSeq.begin(), 1779 e = GatherSeq.end(); it != e; ++it) { 1780 InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it); 1781 1782 if (!Insert) 1783 continue; 1784 1785 // Check if this block is inside a loop. 1786 Loop *L = LI->getLoopFor(Insert->getParent()); 1787 if (!L) 1788 continue; 1789 1790 // Check if it has a preheader. 1791 BasicBlock *PreHeader = L->getLoopPreheader(); 1792 if (!PreHeader) 1793 continue; 1794 1795 // If the vector or the element that we insert into it are 1796 // instructions that are defined in this basic block then we can't 1797 // hoist this instruction. 1798 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 1799 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 1800 if (CurrVec && L->contains(CurrVec)) 1801 continue; 1802 if (NewElem && L->contains(NewElem)) 1803 continue; 1804 1805 // We can hoist this instruction. Move it to the pre-header. 1806 Insert->moveBefore(PreHeader->getTerminator()); 1807 } 1808 1809 // Sort blocks by domination. This ensures we visit a block after all blocks 1810 // dominating it are visited. 1811 SmallVector<BasicBlock *, 8> CSEWorkList(CSEBlocks.begin(), CSEBlocks.end()); 1812 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), 1813 [this](const BasicBlock *A, const BasicBlock *B) { 1814 return DT->properlyDominates(A, B); 1815 }); 1816 1817 // Perform O(N^2) search over the gather sequences and merge identical 1818 // instructions. TODO: We can further optimize this scan if we split the 1819 // instructions into different buckets based on the insert lane. 1820 SmallVector<Instruction *, 16> Visited; 1821 for (SmallVectorImpl<BasicBlock *>::iterator I = CSEWorkList.begin(), 1822 E = CSEWorkList.end(); 1823 I != E; ++I) { 1824 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 1825 "Worklist not sorted properly!"); 1826 BasicBlock *BB = *I; 1827 // For all instructions in blocks containing gather sequences: 1828 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 1829 Instruction *In = it++; 1830 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 1831 continue; 1832 1833 // Check if we can replace this instruction with any of the 1834 // visited instructions. 1835 for (SmallVectorImpl<Instruction *>::iterator v = Visited.begin(), 1836 ve = Visited.end(); 1837 v != ve; ++v) { 1838 if (In->isIdenticalTo(*v) && 1839 DT->dominates((*v)->getParent(), In->getParent())) { 1840 In->replaceAllUsesWith(*v); 1841 In->eraseFromParent(); 1842 In = 0; 1843 break; 1844 } 1845 } 1846 if (In) { 1847 assert(std::find(Visited.begin(), Visited.end(), In) == Visited.end()); 1848 Visited.push_back(In); 1849 } 1850 } 1851 } 1852 CSEBlocks.clear(); 1853 GatherSeq.clear(); 1854 } 1855 1856 /// The SLPVectorizer Pass. 1857 struct SLPVectorizer : public FunctionPass { 1858 typedef SmallVector<StoreInst *, 8> StoreList; 1859 typedef MapVector<Value *, StoreList> StoreListMap; 1860 1861 /// Pass identification, replacement for typeid 1862 static char ID; 1863 1864 explicit SLPVectorizer() : FunctionPass(ID) { 1865 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 1866 } 1867 1868 ScalarEvolution *SE; 1869 const DataLayout *DL; 1870 TargetTransformInfo *TTI; 1871 AliasAnalysis *AA; 1872 LoopInfo *LI; 1873 DominatorTree *DT; 1874 1875 bool runOnFunction(Function &F) override { 1876 if (skipOptnoneFunction(F)) 1877 return false; 1878 1879 SE = &getAnalysis<ScalarEvolution>(); 1880 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 1881 DL = DLP ? &DLP->getDataLayout() : 0; 1882 TTI = &getAnalysis<TargetTransformInfo>(); 1883 AA = &getAnalysis<AliasAnalysis>(); 1884 LI = &getAnalysis<LoopInfo>(); 1885 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1886 1887 StoreRefs.clear(); 1888 bool Changed = false; 1889 1890 // If the target claims to have no vector registers don't attempt 1891 // vectorization. 1892 if (!TTI->getNumberOfRegisters(true)) 1893 return false; 1894 1895 // Must have DataLayout. We can't require it because some tests run w/o 1896 // triple. 1897 if (!DL) 1898 return false; 1899 1900 // Don't vectorize when the attribute NoImplicitFloat is used. 1901 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 1902 return false; 1903 1904 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 1905 1906 // Use the bottom up slp vectorizer to construct chains that start with 1907 // he store instructions. 1908 BoUpSLP R(&F, SE, DL, TTI, AA, LI, DT); 1909 1910 // Scan the blocks in the function in post order. 1911 for (po_iterator<BasicBlock*> it = po_begin(&F.getEntryBlock()), 1912 e = po_end(&F.getEntryBlock()); it != e; ++it) { 1913 BasicBlock *BB = *it; 1914 1915 // Vectorize trees that end at stores. 1916 if (unsigned count = collectStores(BB, R)) { 1917 (void)count; 1918 DEBUG(dbgs() << "SLP: Found " << count << " stores to vectorize.\n"); 1919 Changed |= vectorizeStoreChains(R); 1920 } 1921 1922 // Vectorize trees that end at reductions. 1923 Changed |= vectorizeChainsInBlock(BB, R); 1924 } 1925 1926 if (Changed) { 1927 R.optimizeGatherSequence(); 1928 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 1929 DEBUG(verifyFunction(F)); 1930 } 1931 return Changed; 1932 } 1933 1934 void getAnalysisUsage(AnalysisUsage &AU) const override { 1935 FunctionPass::getAnalysisUsage(AU); 1936 AU.addRequired<ScalarEvolution>(); 1937 AU.addRequired<AliasAnalysis>(); 1938 AU.addRequired<TargetTransformInfo>(); 1939 AU.addRequired<LoopInfo>(); 1940 AU.addRequired<DominatorTreeWrapperPass>(); 1941 AU.addPreserved<LoopInfo>(); 1942 AU.addPreserved<DominatorTreeWrapperPass>(); 1943 AU.setPreservesCFG(); 1944 } 1945 1946 private: 1947 1948 /// \brief Collect memory references and sort them according to their base 1949 /// object. We sort the stores to their base objects to reduce the cost of the 1950 /// quadratic search on the stores. TODO: We can further reduce this cost 1951 /// if we flush the chain creation every time we run into a memory barrier. 1952 unsigned collectStores(BasicBlock *BB, BoUpSLP &R); 1953 1954 /// \brief Try to vectorize a chain that starts at two arithmetic instrs. 1955 bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R); 1956 1957 /// \brief Try to vectorize a list of operands. 1958 /// \@param BuildVector A list of users to ignore for the purpose of 1959 /// scheduling and that don't need extracting. 1960 /// \returns true if a value was vectorized. 1961 bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 1962 ArrayRef<Value *> BuildVector = None); 1963 1964 /// \brief Try to vectorize a chain that may start at the operands of \V; 1965 bool tryToVectorize(BinaryOperator *V, BoUpSLP &R); 1966 1967 /// \brief Vectorize the stores that were collected in StoreRefs. 1968 bool vectorizeStoreChains(BoUpSLP &R); 1969 1970 /// \brief Scan the basic block and look for patterns that are likely to start 1971 /// a vectorization chain. 1972 bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R); 1973 1974 bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold, 1975 BoUpSLP &R); 1976 1977 bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold, 1978 BoUpSLP &R); 1979 private: 1980 StoreListMap StoreRefs; 1981 }; 1982 1983 /// \brief Check that the Values in the slice in VL array are still existent in 1984 /// the WeakVH array. 1985 /// Vectorization of part of the VL array may cause later values in the VL array 1986 /// to become invalid. We track when this has happened in the WeakVH array. 1987 static bool hasValueBeenRAUWed(ArrayRef<Value *> &VL, 1988 SmallVectorImpl<WeakVH> &VH, 1989 unsigned SliceBegin, 1990 unsigned SliceSize) { 1991 for (unsigned i = SliceBegin; i < SliceBegin + SliceSize; ++i) 1992 if (VH[i] != VL[i]) 1993 return true; 1994 1995 return false; 1996 } 1997 1998 bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain, 1999 int CostThreshold, BoUpSLP &R) { 2000 unsigned ChainLen = Chain.size(); 2001 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 2002 << "\n"); 2003 Type *StoreTy = cast<StoreInst>(Chain[0])->getValueOperand()->getType(); 2004 unsigned Sz = DL->getTypeSizeInBits(StoreTy); 2005 unsigned VF = MinVecRegSize / Sz; 2006 2007 if (!isPowerOf2_32(Sz) || VF < 2) 2008 return false; 2009 2010 // Keep track of values that were deleted by vectorizing in the loop below. 2011 SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end()); 2012 2013 bool Changed = false; 2014 // Look for profitable vectorizable trees at all offsets, starting at zero. 2015 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 2016 if (i + VF > e) 2017 break; 2018 2019 // Check that a previous iteration of this loop did not delete the Value. 2020 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 2021 continue; 2022 2023 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 2024 << "\n"); 2025 ArrayRef<Value *> Operands = Chain.slice(i, VF); 2026 2027 R.buildTree(Operands); 2028 2029 int Cost = R.getTreeCost(); 2030 2031 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 2032 if (Cost < CostThreshold) { 2033 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 2034 R.vectorizeTree(); 2035 2036 // Move to the next bundle. 2037 i += VF - 1; 2038 Changed = true; 2039 } 2040 } 2041 2042 return Changed; 2043 } 2044 2045 bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, 2046 int costThreshold, BoUpSLP &R) { 2047 SetVector<Value *> Heads, Tails; 2048 SmallDenseMap<Value *, Value *> ConsecutiveChain; 2049 2050 // We may run into multiple chains that merge into a single chain. We mark the 2051 // stores that we vectorized so that we don't visit the same store twice. 2052 BoUpSLP::ValueSet VectorizedStores; 2053 bool Changed = false; 2054 2055 // Do a quadratic search on all of the given stores and find 2056 // all of the pairs of stores that follow each other. 2057 for (unsigned i = 0, e = Stores.size(); i < e; ++i) { 2058 for (unsigned j = 0; j < e; ++j) { 2059 if (i == j) 2060 continue; 2061 2062 if (R.isConsecutiveAccess(Stores[i], Stores[j])) { 2063 Tails.insert(Stores[j]); 2064 Heads.insert(Stores[i]); 2065 ConsecutiveChain[Stores[i]] = Stores[j]; 2066 } 2067 } 2068 } 2069 2070 // For stores that start but don't end a link in the chain: 2071 for (SetVector<Value *>::iterator it = Heads.begin(), e = Heads.end(); 2072 it != e; ++it) { 2073 if (Tails.count(*it)) 2074 continue; 2075 2076 // We found a store instr that starts a chain. Now follow the chain and try 2077 // to vectorize it. 2078 BoUpSLP::ValueList Operands; 2079 Value *I = *it; 2080 // Collect the chain into a list. 2081 while (Tails.count(I) || Heads.count(I)) { 2082 if (VectorizedStores.count(I)) 2083 break; 2084 Operands.push_back(I); 2085 // Move to the next value in the chain. 2086 I = ConsecutiveChain[I]; 2087 } 2088 2089 bool Vectorized = vectorizeStoreChain(Operands, costThreshold, R); 2090 2091 // Mark the vectorized stores so that we don't vectorize them again. 2092 if (Vectorized) 2093 VectorizedStores.insert(Operands.begin(), Operands.end()); 2094 Changed |= Vectorized; 2095 } 2096 2097 return Changed; 2098 } 2099 2100 2101 unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) { 2102 unsigned count = 0; 2103 StoreRefs.clear(); 2104 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 2105 StoreInst *SI = dyn_cast<StoreInst>(it); 2106 if (!SI) 2107 continue; 2108 2109 // Don't touch volatile stores. 2110 if (!SI->isSimple()) 2111 continue; 2112 2113 // Check that the pointer points to scalars. 2114 Type *Ty = SI->getValueOperand()->getType(); 2115 if (Ty->isAggregateType() || Ty->isVectorTy()) 2116 return 0; 2117 2118 // Find the base pointer. 2119 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), DL); 2120 2121 // Save the store locations. 2122 StoreRefs[Ptr].push_back(SI); 2123 count++; 2124 } 2125 return count; 2126 } 2127 2128 bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 2129 if (!A || !B) 2130 return false; 2131 Value *VL[] = { A, B }; 2132 return tryToVectorizeList(VL, R); 2133 } 2134 2135 bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 2136 ArrayRef<Value *> BuildVector) { 2137 if (VL.size() < 2) 2138 return false; 2139 2140 DEBUG(dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n"); 2141 2142 // Check that all of the parts are scalar instructions of the same type. 2143 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 2144 if (!I0) 2145 return false; 2146 2147 unsigned Opcode0 = I0->getOpcode(); 2148 2149 Type *Ty0 = I0->getType(); 2150 unsigned Sz = DL->getTypeSizeInBits(Ty0); 2151 unsigned VF = MinVecRegSize / Sz; 2152 2153 for (int i = 0, e = VL.size(); i < e; ++i) { 2154 Type *Ty = VL[i]->getType(); 2155 if (Ty->isAggregateType() || Ty->isVectorTy()) 2156 return false; 2157 Instruction *Inst = dyn_cast<Instruction>(VL[i]); 2158 if (!Inst || Inst->getOpcode() != Opcode0) 2159 return false; 2160 } 2161 2162 bool Changed = false; 2163 2164 // Keep track of values that were delete by vectorizing in the loop below. 2165 SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end()); 2166 2167 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 2168 unsigned OpsWidth = 0; 2169 2170 if (i + VF > e) 2171 OpsWidth = e - i; 2172 else 2173 OpsWidth = VF; 2174 2175 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 2176 break; 2177 2178 // Check that a previous iteration of this loop did not delete the Value. 2179 if (hasValueBeenRAUWed(VL, TrackValues, i, OpsWidth)) 2180 continue; 2181 2182 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 2183 << "\n"); 2184 ArrayRef<Value *> Ops = VL.slice(i, OpsWidth); 2185 2186 ArrayRef<Value *> BuildVectorSlice; 2187 if (!BuildVector.empty()) 2188 BuildVectorSlice = BuildVector.slice(i, OpsWidth); 2189 2190 R.buildTree(Ops, BuildVectorSlice); 2191 int Cost = R.getTreeCost(); 2192 2193 if (Cost < -SLPCostThreshold) { 2194 DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 2195 Value *VectorizedRoot = R.vectorizeTree(); 2196 2197 // Reconstruct the build vector by extracting the vectorized root. This 2198 // way we handle the case where some elements of the vector are undefined. 2199 // (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2)) 2200 if (!BuildVectorSlice.empty()) { 2201 Instruction *InsertAfter = cast<Instruction>(VectorizedRoot); 2202 for (auto &V : BuildVectorSlice) { 2203 InsertElementInst *IE = cast<InsertElementInst>(V); 2204 IRBuilder<> Builder(++BasicBlock::iterator(InsertAfter)); 2205 Instruction *Extract = cast<Instruction>( 2206 Builder.CreateExtractElement(VectorizedRoot, IE->getOperand(2))); 2207 IE->setOperand(1, Extract); 2208 IE->removeFromParent(); 2209 IE->insertAfter(Extract); 2210 InsertAfter = IE; 2211 } 2212 } 2213 // Move to the next bundle. 2214 i += VF - 1; 2215 Changed = true; 2216 } 2217 } 2218 2219 return Changed; 2220 } 2221 2222 bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { 2223 if (!V) 2224 return false; 2225 2226 // Try to vectorize V. 2227 if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R)) 2228 return true; 2229 2230 BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0)); 2231 BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1)); 2232 // Try to skip B. 2233 if (B && B->hasOneUse()) { 2234 BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 2235 BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 2236 if (tryToVectorizePair(A, B0, R)) { 2237 B->moveBefore(V); 2238 return true; 2239 } 2240 if (tryToVectorizePair(A, B1, R)) { 2241 B->moveBefore(V); 2242 return true; 2243 } 2244 } 2245 2246 // Try to skip A. 2247 if (A && A->hasOneUse()) { 2248 BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 2249 BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 2250 if (tryToVectorizePair(A0, B, R)) { 2251 A->moveBefore(V); 2252 return true; 2253 } 2254 if (tryToVectorizePair(A1, B, R)) { 2255 A->moveBefore(V); 2256 return true; 2257 } 2258 } 2259 return 0; 2260 } 2261 2262 /// \brief Generate a shuffle mask to be used in a reduction tree. 2263 /// 2264 /// \param VecLen The length of the vector to be reduced. 2265 /// \param NumEltsToRdx The number of elements that should be reduced in the 2266 /// vector. 2267 /// \param IsPairwise Whether the reduction is a pairwise or splitting 2268 /// reduction. A pairwise reduction will generate a mask of 2269 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 2270 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 2271 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 2272 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 2273 bool IsPairwise, bool IsLeft, 2274 IRBuilder<> &Builder) { 2275 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 2276 2277 SmallVector<Constant *, 32> ShuffleMask( 2278 VecLen, UndefValue::get(Builder.getInt32Ty())); 2279 2280 if (IsPairwise) 2281 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 2282 for (unsigned i = 0; i != NumEltsToRdx; ++i) 2283 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 2284 else 2285 // Move the upper half of the vector to the lower half. 2286 for (unsigned i = 0; i != NumEltsToRdx; ++i) 2287 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 2288 2289 return ConstantVector::get(ShuffleMask); 2290 } 2291 2292 2293 /// Model horizontal reductions. 2294 /// 2295 /// A horizontal reduction is a tree of reduction operations (currently add and 2296 /// fadd) that has operations that can be put into a vector as its leaf. 2297 /// For example, this tree: 2298 /// 2299 /// mul mul mul mul 2300 /// \ / \ / 2301 /// + + 2302 /// \ / 2303 /// + 2304 /// This tree has "mul" as its reduced values and "+" as its reduction 2305 /// operations. A reduction might be feeding into a store or a binary operation 2306 /// feeding a phi. 2307 /// ... 2308 /// \ / 2309 /// + 2310 /// | 2311 /// phi += 2312 /// 2313 /// Or: 2314 /// ... 2315 /// \ / 2316 /// + 2317 /// | 2318 /// *p = 2319 /// 2320 class HorizontalReduction { 2321 SmallVector<Value *, 16> ReductionOps; 2322 SmallVector<Value *, 32> ReducedVals; 2323 2324 BinaryOperator *ReductionRoot; 2325 PHINode *ReductionPHI; 2326 2327 /// The opcode of the reduction. 2328 unsigned ReductionOpcode; 2329 /// The opcode of the values we perform a reduction on. 2330 unsigned ReducedValueOpcode; 2331 /// The width of one full horizontal reduction operation. 2332 unsigned ReduxWidth; 2333 /// Should we model this reduction as a pairwise reduction tree or a tree that 2334 /// splits the vector in halves and adds those halves. 2335 bool IsPairwiseReduction; 2336 2337 public: 2338 HorizontalReduction() 2339 : ReductionRoot(0), ReductionPHI(0), ReductionOpcode(0), 2340 ReducedValueOpcode(0), ReduxWidth(0), IsPairwiseReduction(false) {} 2341 2342 /// \brief Try to find a reduction tree. 2343 bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B, 2344 const DataLayout *DL) { 2345 assert((!Phi || 2346 std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && 2347 "Thi phi needs to use the binary operator"); 2348 2349 // We could have a initial reductions that is not an add. 2350 // r *= v1 + v2 + v3 + v4 2351 // In such a case start looking for a tree rooted in the first '+'. 2352 if (Phi) { 2353 if (B->getOperand(0) == Phi) { 2354 Phi = 0; 2355 B = dyn_cast<BinaryOperator>(B->getOperand(1)); 2356 } else if (B->getOperand(1) == Phi) { 2357 Phi = 0; 2358 B = dyn_cast<BinaryOperator>(B->getOperand(0)); 2359 } 2360 } 2361 2362 if (!B) 2363 return false; 2364 2365 Type *Ty = B->getType(); 2366 if (Ty->isVectorTy()) 2367 return false; 2368 2369 ReductionOpcode = B->getOpcode(); 2370 ReducedValueOpcode = 0; 2371 ReduxWidth = MinVecRegSize / DL->getTypeSizeInBits(Ty); 2372 ReductionRoot = B; 2373 ReductionPHI = Phi; 2374 2375 if (ReduxWidth < 4) 2376 return false; 2377 2378 // We currently only support adds. 2379 if (ReductionOpcode != Instruction::Add && 2380 ReductionOpcode != Instruction::FAdd) 2381 return false; 2382 2383 // Post order traverse the reduction tree starting at B. We only handle true 2384 // trees containing only binary operators. 2385 SmallVector<std::pair<BinaryOperator *, unsigned>, 32> Stack; 2386 Stack.push_back(std::make_pair(B, 0)); 2387 while (!Stack.empty()) { 2388 BinaryOperator *TreeN = Stack.back().first; 2389 unsigned EdgeToVist = Stack.back().second++; 2390 bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode; 2391 2392 // Only handle trees in the current basic block. 2393 if (TreeN->getParent() != B->getParent()) 2394 return false; 2395 2396 // Each tree node needs to have one user except for the ultimate 2397 // reduction. 2398 if (!TreeN->hasOneUse() && TreeN != B) 2399 return false; 2400 2401 // Postorder vist. 2402 if (EdgeToVist == 2 || IsReducedValue) { 2403 if (IsReducedValue) { 2404 // Make sure that the opcodes of the operations that we are going to 2405 // reduce match. 2406 if (!ReducedValueOpcode) 2407 ReducedValueOpcode = TreeN->getOpcode(); 2408 else if (ReducedValueOpcode != TreeN->getOpcode()) 2409 return false; 2410 ReducedVals.push_back(TreeN); 2411 } else { 2412 // We need to be able to reassociate the adds. 2413 if (!TreeN->isAssociative()) 2414 return false; 2415 ReductionOps.push_back(TreeN); 2416 } 2417 // Retract. 2418 Stack.pop_back(); 2419 continue; 2420 } 2421 2422 // Visit left or right. 2423 Value *NextV = TreeN->getOperand(EdgeToVist); 2424 BinaryOperator *Next = dyn_cast<BinaryOperator>(NextV); 2425 if (Next) 2426 Stack.push_back(std::make_pair(Next, 0)); 2427 else if (NextV != Phi) 2428 return false; 2429 } 2430 return true; 2431 } 2432 2433 /// \brief Attempt to vectorize the tree found by 2434 /// matchAssociativeReduction. 2435 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 2436 if (ReducedVals.empty()) 2437 return false; 2438 2439 unsigned NumReducedVals = ReducedVals.size(); 2440 if (NumReducedVals < ReduxWidth) 2441 return false; 2442 2443 Value *VectorizedTree = 0; 2444 IRBuilder<> Builder(ReductionRoot); 2445 FastMathFlags Unsafe; 2446 Unsafe.setUnsafeAlgebra(); 2447 Builder.SetFastMathFlags(Unsafe); 2448 unsigned i = 0; 2449 2450 for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) { 2451 ArrayRef<Value *> ValsToReduce(&ReducedVals[i], ReduxWidth); 2452 V.buildTree(ValsToReduce, ReductionOps); 2453 2454 // Estimate cost. 2455 int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]); 2456 if (Cost >= -SLPCostThreshold) 2457 break; 2458 2459 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost 2460 << ". (HorRdx)\n"); 2461 2462 // Vectorize a tree. 2463 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 2464 Value *VectorizedRoot = V.vectorizeTree(); 2465 2466 // Emit a reduction. 2467 Value *ReducedSubTree = emitReduction(VectorizedRoot, Builder); 2468 if (VectorizedTree) { 2469 Builder.SetCurrentDebugLocation(Loc); 2470 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 2471 ReducedSubTree, "bin.rdx"); 2472 } else 2473 VectorizedTree = ReducedSubTree; 2474 } 2475 2476 if (VectorizedTree) { 2477 // Finish the reduction. 2478 for (; i < NumReducedVals; ++i) { 2479 Builder.SetCurrentDebugLocation( 2480 cast<Instruction>(ReducedVals[i])->getDebugLoc()); 2481 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 2482 ReducedVals[i]); 2483 } 2484 // Update users. 2485 if (ReductionPHI) { 2486 assert(ReductionRoot != NULL && "Need a reduction operation"); 2487 ReductionRoot->setOperand(0, VectorizedTree); 2488 ReductionRoot->setOperand(1, ReductionPHI); 2489 } else 2490 ReductionRoot->replaceAllUsesWith(VectorizedTree); 2491 } 2492 return VectorizedTree != 0; 2493 } 2494 2495 private: 2496 2497 /// \brief Calcuate the cost of a reduction. 2498 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) { 2499 Type *ScalarTy = FirstReducedVal->getType(); 2500 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 2501 2502 int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true); 2503 int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false); 2504 2505 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 2506 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 2507 2508 int ScalarReduxCost = 2509 ReduxWidth * TTI->getArithmeticInstrCost(ReductionOpcode, VecTy); 2510 2511 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 2512 << " for reduction that starts with " << *FirstReducedVal 2513 << " (It is a " 2514 << (IsPairwiseReduction ? "pairwise" : "splitting") 2515 << " reduction)\n"); 2516 2517 return VecReduxCost - ScalarReduxCost; 2518 } 2519 2520 static Value *createBinOp(IRBuilder<> &Builder, unsigned Opcode, Value *L, 2521 Value *R, const Twine &Name = "") { 2522 if (Opcode == Instruction::FAdd) 2523 return Builder.CreateFAdd(L, R, Name); 2524 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, L, R, Name); 2525 } 2526 2527 /// \brief Emit a horizontal reduction of the vectorized value. 2528 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder) { 2529 assert(VectorizedValue && "Need to have a vectorized tree node"); 2530 Instruction *ValToReduce = dyn_cast<Instruction>(VectorizedValue); 2531 assert(isPowerOf2_32(ReduxWidth) && 2532 "We only handle power-of-two reductions for now"); 2533 2534 Value *TmpVec = ValToReduce; 2535 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 2536 if (IsPairwiseReduction) { 2537 Value *LeftMask = 2538 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 2539 Value *RightMask = 2540 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 2541 2542 Value *LeftShuf = Builder.CreateShuffleVector( 2543 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 2544 Value *RightShuf = Builder.CreateShuffleVector( 2545 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 2546 "rdx.shuf.r"); 2547 TmpVec = createBinOp(Builder, ReductionOpcode, LeftShuf, RightShuf, 2548 "bin.rdx"); 2549 } else { 2550 Value *UpperHalf = 2551 createRdxShuffleMask(ReduxWidth, i, false, false, Builder); 2552 Value *Shuf = Builder.CreateShuffleVector( 2553 TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf"); 2554 TmpVec = createBinOp(Builder, ReductionOpcode, TmpVec, Shuf, "bin.rdx"); 2555 } 2556 } 2557 2558 // The result is in the first element of the vector. 2559 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 2560 } 2561 }; 2562 2563 /// \brief Recognize construction of vectors like 2564 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 2565 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 2566 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 2567 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 2568 /// 2569 /// Returns true if it matches 2570 /// 2571 static bool findBuildVector(InsertElementInst *FirstInsertElem, 2572 SmallVectorImpl<Value *> &BuildVector, 2573 SmallVectorImpl<Value *> &BuildVectorOpds) { 2574 if (!isa<UndefValue>(FirstInsertElem->getOperand(0))) 2575 return false; 2576 2577 InsertElementInst *IE = FirstInsertElem; 2578 while (true) { 2579 BuildVector.push_back(IE); 2580 BuildVectorOpds.push_back(IE->getOperand(1)); 2581 2582 if (IE->use_empty()) 2583 return false; 2584 2585 InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back()); 2586 if (!NextUse) 2587 return true; 2588 2589 // If this isn't the final use, make sure the next insertelement is the only 2590 // use. It's OK if the final constructed vector is used multiple times 2591 if (!IE->hasOneUse()) 2592 return false; 2593 2594 IE = NextUse; 2595 } 2596 2597 return false; 2598 } 2599 2600 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 2601 return V->getType() < V2->getType(); 2602 } 2603 2604 bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 2605 bool Changed = false; 2606 SmallVector<Value *, 4> Incoming; 2607 SmallSet<Value *, 16> VisitedInstrs; 2608 2609 bool HaveVectorizedPhiNodes = true; 2610 while (HaveVectorizedPhiNodes) { 2611 HaveVectorizedPhiNodes = false; 2612 2613 // Collect the incoming values from the PHIs. 2614 Incoming.clear(); 2615 for (BasicBlock::iterator instr = BB->begin(), ie = BB->end(); instr != ie; 2616 ++instr) { 2617 PHINode *P = dyn_cast<PHINode>(instr); 2618 if (!P) 2619 break; 2620 2621 if (!VisitedInstrs.count(P)) 2622 Incoming.push_back(P); 2623 } 2624 2625 // Sort by type. 2626 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); 2627 2628 // Try to vectorize elements base on their type. 2629 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 2630 E = Incoming.end(); 2631 IncIt != E;) { 2632 2633 // Look for the next elements with the same type. 2634 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 2635 while (SameTypeIt != E && 2636 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 2637 VisitedInstrs.insert(*SameTypeIt); 2638 ++SameTypeIt; 2639 } 2640 2641 // Try to vectorize them. 2642 unsigned NumElts = (SameTypeIt - IncIt); 2643 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); 2644 if (NumElts > 1 && 2645 tryToVectorizeList(ArrayRef<Value *>(IncIt, NumElts), R)) { 2646 // Success start over because instructions might have been changed. 2647 HaveVectorizedPhiNodes = true; 2648 Changed = true; 2649 break; 2650 } 2651 2652 // Start over at the next instruction of a different type (or the end). 2653 IncIt = SameTypeIt; 2654 } 2655 } 2656 2657 VisitedInstrs.clear(); 2658 2659 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 2660 // We may go through BB multiple times so skip the one we have checked. 2661 if (!VisitedInstrs.insert(it)) 2662 continue; 2663 2664 if (isa<DbgInfoIntrinsic>(it)) 2665 continue; 2666 2667 // Try to vectorize reductions that use PHINodes. 2668 if (PHINode *P = dyn_cast<PHINode>(it)) { 2669 // Check that the PHI is a reduction PHI. 2670 if (P->getNumIncomingValues() != 2) 2671 return Changed; 2672 Value *Rdx = 2673 (P->getIncomingBlock(0) == BB 2674 ? (P->getIncomingValue(0)) 2675 : (P->getIncomingBlock(1) == BB ? P->getIncomingValue(1) : 0)); 2676 // Check if this is a Binary Operator. 2677 BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx); 2678 if (!BI) 2679 continue; 2680 2681 // Try to match and vectorize a horizontal reduction. 2682 HorizontalReduction HorRdx; 2683 if (ShouldVectorizeHor && 2684 HorRdx.matchAssociativeReduction(P, BI, DL) && 2685 HorRdx.tryToReduce(R, TTI)) { 2686 Changed = true; 2687 it = BB->begin(); 2688 e = BB->end(); 2689 continue; 2690 } 2691 2692 Value *Inst = BI->getOperand(0); 2693 if (Inst == P) 2694 Inst = BI->getOperand(1); 2695 2696 if (tryToVectorize(dyn_cast<BinaryOperator>(Inst), R)) { 2697 // We would like to start over since some instructions are deleted 2698 // and the iterator may become invalid value. 2699 Changed = true; 2700 it = BB->begin(); 2701 e = BB->end(); 2702 continue; 2703 } 2704 2705 continue; 2706 } 2707 2708 // Try to vectorize horizontal reductions feeding into a store. 2709 if (ShouldStartVectorizeHorAtStore) 2710 if (StoreInst *SI = dyn_cast<StoreInst>(it)) 2711 if (BinaryOperator *BinOp = 2712 dyn_cast<BinaryOperator>(SI->getValueOperand())) { 2713 HorizontalReduction HorRdx; 2714 if (((HorRdx.matchAssociativeReduction(0, BinOp, DL) && 2715 HorRdx.tryToReduce(R, TTI)) || 2716 tryToVectorize(BinOp, R))) { 2717 Changed = true; 2718 it = BB->begin(); 2719 e = BB->end(); 2720 continue; 2721 } 2722 } 2723 2724 // Try to vectorize trees that start at compare instructions. 2725 if (CmpInst *CI = dyn_cast<CmpInst>(it)) { 2726 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) { 2727 Changed = true; 2728 // We would like to start over since some instructions are deleted 2729 // and the iterator may become invalid value. 2730 it = BB->begin(); 2731 e = BB->end(); 2732 continue; 2733 } 2734 2735 for (int i = 0; i < 2; ++i) { 2736 if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) { 2737 if (tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R)) { 2738 Changed = true; 2739 // We would like to start over since some instructions are deleted 2740 // and the iterator may become invalid value. 2741 it = BB->begin(); 2742 e = BB->end(); 2743 } 2744 } 2745 } 2746 continue; 2747 } 2748 2749 // Try to vectorize trees that start at insertelement instructions. 2750 if (InsertElementInst *FirstInsertElem = dyn_cast<InsertElementInst>(it)) { 2751 SmallVector<Value *, 16> BuildVector; 2752 SmallVector<Value *, 16> BuildVectorOpds; 2753 if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds)) 2754 continue; 2755 2756 // Vectorize starting with the build vector operands ignoring the 2757 // BuildVector instructions for the purpose of scheduling and user 2758 // extraction. 2759 if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) { 2760 Changed = true; 2761 it = BB->begin(); 2762 e = BB->end(); 2763 } 2764 2765 continue; 2766 } 2767 } 2768 2769 return Changed; 2770 } 2771 2772 bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) { 2773 bool Changed = false; 2774 // Attempt to sort and vectorize each of the store-groups. 2775 for (StoreListMap::iterator it = StoreRefs.begin(), e = StoreRefs.end(); 2776 it != e; ++it) { 2777 if (it->second.size() < 2) 2778 continue; 2779 2780 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 2781 << it->second.size() << ".\n"); 2782 2783 // Process the stores in chunks of 16. 2784 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 2785 unsigned Len = std::min<unsigned>(CE - CI, 16); 2786 ArrayRef<StoreInst *> Chunk(&it->second[CI], Len); 2787 Changed |= vectorizeStores(Chunk, -SLPCostThreshold, R); 2788 } 2789 } 2790 return Changed; 2791 } 2792 2793 } // end anonymous namespace 2794 2795 char SLPVectorizer::ID = 0; 2796 static const char lv_name[] = "SLP Vectorizer"; 2797 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 2798 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 2799 INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) 2800 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 2801 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 2802 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 2803 2804 namespace llvm { 2805 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); } 2806 } 2807