1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 #define SV_NAME "slp-vectorizer" 19 #define DEBUG_TYPE "SLP" 20 21 #include "llvm/Transforms/Vectorize.h" 22 #include "llvm/ADT/MapVector.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/SetVector.h" 25 #include "llvm/Analysis/AliasAnalysis.h" 26 #include "llvm/Analysis/ScalarEvolution.h" 27 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 28 #include "llvm/Analysis/TargetTransformInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/Analysis/Verifier.h" 31 #include "llvm/Analysis/LoopInfo.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/Instructions.h" 34 #include "llvm/IR/IntrinsicInst.h" 35 #include "llvm/IR/IRBuilder.h" 36 #include "llvm/IR/Module.h" 37 #include "llvm/IR/Type.h" 38 #include "llvm/IR/Value.h" 39 #include "llvm/Pass.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include <algorithm> 44 #include <map> 45 46 using namespace llvm; 47 48 static cl::opt<int> 49 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 50 cl::desc("Only vectorize if you gain more than this " 51 "number ")); 52 53 static cl::opt<bool> 54 ShouldVectorizeHor("slp-vectorize-hor", cl::init(false), cl::Hidden, 55 cl::desc("Attempt to vectorize horizontal reductions")); 56 57 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 58 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 59 cl::desc( 60 "Attempt to vectorize horizontal reductions feeding into a store")); 61 62 namespace { 63 64 static const unsigned MinVecRegSize = 128; 65 66 static const unsigned RecursionMaxDepth = 12; 67 68 /// A helper class for numbering instructions in multiple blocks. 69 /// Numbers start at zero for each basic block. 70 struct BlockNumbering { 71 72 BlockNumbering(BasicBlock *Bb) : BB(Bb), Valid(false) {} 73 74 BlockNumbering() : BB(0), Valid(false) {} 75 76 void numberInstructions() { 77 unsigned Loc = 0; 78 InstrIdx.clear(); 79 InstrVec.clear(); 80 // Number the instructions in the block. 81 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 82 InstrIdx[it] = Loc++; 83 InstrVec.push_back(it); 84 assert(InstrVec[InstrIdx[it]] == it && "Invalid allocation"); 85 } 86 Valid = true; 87 } 88 89 int getIndex(Instruction *I) { 90 assert(I->getParent() == BB && "Invalid instruction"); 91 if (!Valid) 92 numberInstructions(); 93 assert(InstrIdx.count(I) && "Unknown instruction"); 94 return InstrIdx[I]; 95 } 96 97 Instruction *getInstruction(unsigned loc) { 98 if (!Valid) 99 numberInstructions(); 100 assert(InstrVec.size() > loc && "Invalid Index"); 101 return InstrVec[loc]; 102 } 103 104 void forget() { Valid = false; } 105 106 private: 107 /// The block we are numbering. 108 BasicBlock *BB; 109 /// Is the block numbered. 110 bool Valid; 111 /// Maps instructions to numbers and back. 112 SmallDenseMap<Instruction *, int> InstrIdx; 113 /// Maps integers to Instructions. 114 SmallVector<Instruction *, 32> InstrVec; 115 }; 116 117 /// \returns the parent basic block if all of the instructions in \p VL 118 /// are in the same block or null otherwise. 119 static BasicBlock *getSameBlock(ArrayRef<Value *> VL) { 120 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 121 if (!I0) 122 return 0; 123 BasicBlock *BB = I0->getParent(); 124 for (int i = 1, e = VL.size(); i < e; i++) { 125 Instruction *I = dyn_cast<Instruction>(VL[i]); 126 if (!I) 127 return 0; 128 129 if (BB != I->getParent()) 130 return 0; 131 } 132 return BB; 133 } 134 135 /// \returns True if all of the values in \p VL are constants. 136 static bool allConstant(ArrayRef<Value *> VL) { 137 for (unsigned i = 0, e = VL.size(); i < e; ++i) 138 if (!isa<Constant>(VL[i])) 139 return false; 140 return true; 141 } 142 143 /// \returns True if all of the values in \p VL are identical. 144 static bool isSplat(ArrayRef<Value *> VL) { 145 for (unsigned i = 1, e = VL.size(); i < e; ++i) 146 if (VL[i] != VL[0]) 147 return false; 148 return true; 149 } 150 151 /// \returns The opcode if all of the Instructions in \p VL have the same 152 /// opcode, or zero. 153 static unsigned getSameOpcode(ArrayRef<Value *> VL) { 154 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 155 if (!I0) 156 return 0; 157 unsigned Opcode = I0->getOpcode(); 158 for (int i = 1, e = VL.size(); i < e; i++) { 159 Instruction *I = dyn_cast<Instruction>(VL[i]); 160 if (!I || Opcode != I->getOpcode()) 161 return 0; 162 } 163 return Opcode; 164 } 165 166 /// \returns The type that all of the values in \p VL have or null if there 167 /// are different types. 168 static Type* getSameType(ArrayRef<Value *> VL) { 169 Type *Ty = VL[0]->getType(); 170 for (int i = 1, e = VL.size(); i < e; i++) 171 if (VL[i]->getType() != Ty) 172 return 0; 173 174 return Ty; 175 } 176 177 /// \returns True if the ExtractElement instructions in VL can be vectorized 178 /// to use the original vector. 179 static bool CanReuseExtract(ArrayRef<Value *> VL) { 180 assert(Instruction::ExtractElement == getSameOpcode(VL) && "Invalid opcode"); 181 // Check if all of the extracts come from the same vector and from the 182 // correct offset. 183 Value *VL0 = VL[0]; 184 ExtractElementInst *E0 = cast<ExtractElementInst>(VL0); 185 Value *Vec = E0->getOperand(0); 186 187 // We have to extract from the same vector type. 188 unsigned NElts = Vec->getType()->getVectorNumElements(); 189 190 if (NElts != VL.size()) 191 return false; 192 193 // Check that all of the indices extract from the correct offset. 194 ConstantInt *CI = dyn_cast<ConstantInt>(E0->getOperand(1)); 195 if (!CI || CI->getZExtValue()) 196 return false; 197 198 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 199 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 200 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 201 202 if (!CI || CI->getZExtValue() != i || E->getOperand(0) != Vec) 203 return false; 204 } 205 206 return true; 207 } 208 209 static bool all_equal(SmallVectorImpl<Value *> &V) { 210 Value *First = V[0]; 211 for (int i = 1, e = V.size(); i != e; ++i) 212 if (V[i] != First) 213 return false; 214 return true; 215 } 216 217 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 218 SmallVectorImpl<Value *> &Left, 219 SmallVectorImpl<Value *> &Right) { 220 221 SmallVector<Value *, 16> OrigLeft, OrigRight; 222 223 bool AllSameOpcodeLeft = true; 224 bool AllSameOpcodeRight = true; 225 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 226 Instruction *I = cast<Instruction>(VL[i]); 227 Value *V0 = I->getOperand(0); 228 Value *V1 = I->getOperand(1); 229 230 OrigLeft.push_back(V0); 231 OrigRight.push_back(V1); 232 233 Instruction *I0 = dyn_cast<Instruction>(V0); 234 Instruction *I1 = dyn_cast<Instruction>(V1); 235 236 // Check whether all operands on one side have the same opcode. In this case 237 // we want to preserve the original order and not make things worse by 238 // reordering. 239 AllSameOpcodeLeft = I0; 240 AllSameOpcodeRight = I1; 241 242 if (i && AllSameOpcodeLeft) { 243 if(Instruction *P0 = dyn_cast<Instruction>(OrigLeft[i-1])) { 244 if(P0->getOpcode() != I0->getOpcode()) 245 AllSameOpcodeLeft = false; 246 } else 247 AllSameOpcodeLeft = false; 248 } 249 if (i && AllSameOpcodeRight) { 250 if(Instruction *P1 = dyn_cast<Instruction>(OrigRight[i-1])) { 251 if(P1->getOpcode() != I1->getOpcode()) 252 AllSameOpcodeRight = false; 253 } else 254 AllSameOpcodeRight = false; 255 } 256 257 // Sort two opcodes. In the code below we try to preserve the ability to use 258 // broadcast of values instead of individual inserts. 259 // vl1 = load 260 // vl2 = phi 261 // vr1 = load 262 // vr2 = vr2 263 // = vl1 x vr1 264 // = vl2 x vr2 265 // If we just sorted according to opcode we would leave the first line in 266 // tact but we would swap vl2 with vr2 because opcode(phi) > opcode(load). 267 // = vl1 x vr1 268 // = vr2 x vl2 269 // Because vr2 and vr1 are from the same load we loose the opportunity of a 270 // broadcast for the packed right side in the backend: we have [vr1, vl2] 271 // instead of [vr1, vr2=vr1]. 272 if (I0 && I1) { 273 if(!i && I0->getOpcode() > I1->getOpcode()) { 274 Left.push_back(I1); 275 Right.push_back(I0); 276 } else if (i && I0->getOpcode() > I1->getOpcode() && Right[i-1] != I1) { 277 // Try not to destroy a broad cast for no apparent benefit. 278 Left.push_back(I1); 279 Right.push_back(I0); 280 } else if (i && I0->getOpcode() == I1->getOpcode() && Right[i-1] == I0) { 281 // Try preserve broadcasts. 282 Left.push_back(I1); 283 Right.push_back(I0); 284 } else if (i && I0->getOpcode() == I1->getOpcode() && Left[i-1] == I1) { 285 // Try preserve broadcasts. 286 Left.push_back(I1); 287 Right.push_back(I0); 288 } else { 289 Left.push_back(I0); 290 Right.push_back(I1); 291 } 292 continue; 293 } 294 // One opcode, put the instruction on the right. 295 if (I0) { 296 Left.push_back(V1); 297 Right.push_back(I0); 298 continue; 299 } 300 Left.push_back(V0); 301 Right.push_back(V1); 302 } 303 304 bool LeftBroadcast = all_equal(Left); 305 bool RightBroadcast = all_equal(Right); 306 307 // Don't reorder if the operands where good to begin with. 308 if (!(LeftBroadcast || RightBroadcast) && 309 (AllSameOpcodeRight || AllSameOpcodeLeft)) { 310 Left = OrigLeft; 311 Right = OrigRight; 312 } 313 } 314 315 /// Bottom Up SLP Vectorizer. 316 class BoUpSLP { 317 public: 318 typedef SmallVector<Value *, 8> ValueList; 319 typedef SmallVector<Instruction *, 16> InstrList; 320 typedef SmallPtrSet<Value *, 16> ValueSet; 321 typedef SmallVector<StoreInst *, 8> StoreList; 322 323 BoUpSLP(Function *Func, ScalarEvolution *Se, DataLayout *Dl, 324 TargetTransformInfo *Tti, AliasAnalysis *Aa, LoopInfo *Li, 325 DominatorTree *Dt) : 326 F(Func), SE(Se), DL(Dl), TTI(Tti), AA(Aa), LI(Li), DT(Dt), 327 Builder(Se->getContext()) { 328 // Setup the block numbering utility for all of the blocks in the 329 // function. 330 for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it) { 331 BasicBlock *BB = it; 332 BlocksNumbers[BB] = BlockNumbering(BB); 333 } 334 } 335 336 /// \brief Vectorize the tree that starts with the elements in \p VL. 337 /// Returns the vectorized root. 338 Value *vectorizeTree(); 339 340 /// \returns the vectorization cost of the subtree that starts at \p VL. 341 /// A negative number means that this is profitable. 342 int getTreeCost(); 343 344 /// Construct a vectorizable tree that starts at \p Roots and is possibly 345 /// used by a reduction of \p RdxOps. 346 void buildTree(ArrayRef<Value *> Roots, ValueSet *RdxOps = 0); 347 348 /// Clear the internal data structures that are created by 'buildTree'. 349 void deleteTree() { 350 RdxOps = 0; 351 VectorizableTree.clear(); 352 ScalarToTreeEntry.clear(); 353 MustGather.clear(); 354 ExternalUses.clear(); 355 MemBarrierIgnoreList.clear(); 356 } 357 358 /// \returns true if the memory operations A and B are consecutive. 359 bool isConsecutiveAccess(Value *A, Value *B); 360 361 /// \brief Perform LICM and CSE on the newly generated gather sequences. 362 void optimizeGatherSequence(); 363 private: 364 struct TreeEntry; 365 366 /// \returns the cost of the vectorizable entry. 367 int getEntryCost(TreeEntry *E); 368 369 /// This is the recursive part of buildTree. 370 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth); 371 372 /// Vectorize a single entry in the tree. 373 Value *vectorizeTree(TreeEntry *E); 374 375 /// Vectorize a single entry in the tree, starting in \p VL. 376 Value *vectorizeTree(ArrayRef<Value *> VL); 377 378 /// \returns the pointer to the vectorized value if \p VL is already 379 /// vectorized, or NULL. They may happen in cycles. 380 Value *alreadyVectorized(ArrayRef<Value *> VL) const; 381 382 /// \brief Take the pointer operand from the Load/Store instruction. 383 /// \returns NULL if this is not a valid Load/Store instruction. 384 static Value *getPointerOperand(Value *I); 385 386 /// \brief Take the address space operand from the Load/Store instruction. 387 /// \returns -1 if this is not a valid Load/Store instruction. 388 static unsigned getAddressSpaceOperand(Value *I); 389 390 /// \returns the scalarization cost for this type. Scalarization in this 391 /// context means the creation of vectors from a group of scalars. 392 int getGatherCost(Type *Ty); 393 394 /// \returns the scalarization cost for this list of values. Assuming that 395 /// this subtree gets vectorized, we may need to extract the values from the 396 /// roots. This method calculates the cost of extracting the values. 397 int getGatherCost(ArrayRef<Value *> VL); 398 399 /// \returns the AA location that is being access by the instruction. 400 AliasAnalysis::Location getLocation(Instruction *I); 401 402 /// \brief Checks if it is possible to sink an instruction from 403 /// \p Src to \p Dst. 404 /// \returns the pointer to the barrier instruction if we can't sink. 405 Value *getSinkBarrier(Instruction *Src, Instruction *Dst); 406 407 /// \returns the index of the last instruction in the BB from \p VL. 408 int getLastIndex(ArrayRef<Value *> VL); 409 410 /// \returns the Instruction in the bundle \p VL. 411 Instruction *getLastInstruction(ArrayRef<Value *> VL); 412 413 /// \brief Set the Builder insert point to one after the last instruction in 414 /// the bundle 415 void setInsertPointAfterBundle(ArrayRef<Value *> VL); 416 417 /// \returns a vector from a collection of scalars in \p VL. 418 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 419 420 /// \returns whether the VectorizableTree is fully vectoriable and will 421 /// be beneficial even the tree height is tiny. 422 bool isFullyVectorizableTinyTree(); 423 424 struct TreeEntry { 425 TreeEntry() : Scalars(), VectorizedValue(0), LastScalarIndex(0), 426 NeedToGather(0) {} 427 428 /// \returns true if the scalars in VL are equal to this entry. 429 bool isSame(ArrayRef<Value *> VL) const { 430 assert(VL.size() == Scalars.size() && "Invalid size"); 431 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 432 } 433 434 /// A vector of scalars. 435 ValueList Scalars; 436 437 /// The Scalars are vectorized into this value. It is initialized to Null. 438 Value *VectorizedValue; 439 440 /// The index in the basic block of the last scalar. 441 int LastScalarIndex; 442 443 /// Do we need to gather this sequence ? 444 bool NeedToGather; 445 }; 446 447 /// Create a new VectorizableTree entry. 448 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized) { 449 VectorizableTree.push_back(TreeEntry()); 450 int idx = VectorizableTree.size() - 1; 451 TreeEntry *Last = &VectorizableTree[idx]; 452 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 453 Last->NeedToGather = !Vectorized; 454 if (Vectorized) { 455 Last->LastScalarIndex = getLastIndex(VL); 456 for (int i = 0, e = VL.size(); i != e; ++i) { 457 assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!"); 458 ScalarToTreeEntry[VL[i]] = idx; 459 } 460 } else { 461 Last->LastScalarIndex = 0; 462 MustGather.insert(VL.begin(), VL.end()); 463 } 464 return Last; 465 } 466 467 /// -- Vectorization State -- 468 /// Holds all of the tree entries. 469 std::vector<TreeEntry> VectorizableTree; 470 471 /// Maps a specific scalar to its tree entry. 472 SmallDenseMap<Value*, int> ScalarToTreeEntry; 473 474 /// A list of scalars that we found that we need to keep as scalars. 475 ValueSet MustGather; 476 477 /// This POD struct describes one external user in the vectorized tree. 478 struct ExternalUser { 479 ExternalUser (Value *S, llvm::User *U, int L) : 480 Scalar(S), User(U), Lane(L){}; 481 // Which scalar in our function. 482 Value *Scalar; 483 // Which user that uses the scalar. 484 llvm::User *User; 485 // Which lane does the scalar belong to. 486 int Lane; 487 }; 488 typedef SmallVector<ExternalUser, 16> UserList; 489 490 /// A list of values that need to extracted out of the tree. 491 /// This list holds pairs of (Internal Scalar : External User). 492 UserList ExternalUses; 493 494 /// A list of instructions to ignore while sinking 495 /// memory instructions. This map must be reset between runs of getCost. 496 ValueSet MemBarrierIgnoreList; 497 498 /// Holds all of the instructions that we gathered. 499 SetVector<Instruction *> GatherSeq; 500 501 /// Numbers instructions in different blocks. 502 DenseMap<BasicBlock *, BlockNumbering> BlocksNumbers; 503 504 /// Reduction operators. 505 ValueSet *RdxOps; 506 507 // Analysis and block reference. 508 Function *F; 509 ScalarEvolution *SE; 510 DataLayout *DL; 511 TargetTransformInfo *TTI; 512 AliasAnalysis *AA; 513 LoopInfo *LI; 514 DominatorTree *DT; 515 /// Instruction builder to construct the vectorized tree. 516 IRBuilder<> Builder; 517 }; 518 519 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, ValueSet *Rdx) { 520 deleteTree(); 521 RdxOps = Rdx; 522 if (!getSameType(Roots)) 523 return; 524 buildTree_rec(Roots, 0); 525 526 // Collect the values that we need to extract from the tree. 527 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 528 TreeEntry *Entry = &VectorizableTree[EIdx]; 529 530 // For each lane: 531 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 532 Value *Scalar = Entry->Scalars[Lane]; 533 534 // No need to handle users of gathered values. 535 if (Entry->NeedToGather) 536 continue; 537 538 for (Value::use_iterator User = Scalar->use_begin(), 539 UE = Scalar->use_end(); User != UE; ++User) { 540 DEBUG(dbgs() << "SLP: Checking user:" << **User << ".\n"); 541 542 bool Gathered = MustGather.count(*User); 543 544 // Skip in-tree scalars that become vectors. 545 if (ScalarToTreeEntry.count(*User) && !Gathered) { 546 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << 547 **User << ".\n"); 548 int Idx = ScalarToTreeEntry[*User]; (void) Idx; 549 assert(!VectorizableTree[Idx].NeedToGather && "Bad state"); 550 continue; 551 } 552 Instruction *UserInst = dyn_cast<Instruction>(*User); 553 if (!UserInst) 554 continue; 555 556 // Ignore uses that are part of the reduction. 557 if (Rdx && std::find(Rdx->begin(), Rdx->end(), UserInst) != Rdx->end()) 558 continue; 559 560 DEBUG(dbgs() << "SLP: Need to extract:" << **User << " from lane " << 561 Lane << " from " << *Scalar << ".\n"); 562 ExternalUses.push_back(ExternalUser(Scalar, *User, Lane)); 563 } 564 } 565 } 566 } 567 568 569 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { 570 bool SameTy = getSameType(VL); (void)SameTy; 571 assert(SameTy && "Invalid types!"); 572 573 if (Depth == RecursionMaxDepth) { 574 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 575 newTreeEntry(VL, false); 576 return; 577 } 578 579 // Don't handle vectors. 580 if (VL[0]->getType()->isVectorTy()) { 581 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 582 newTreeEntry(VL, false); 583 return; 584 } 585 586 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 587 if (SI->getValueOperand()->getType()->isVectorTy()) { 588 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 589 newTreeEntry(VL, false); 590 return; 591 } 592 593 // If all of the operands are identical or constant we have a simple solution. 594 if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || 595 !getSameOpcode(VL)) { 596 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 597 newTreeEntry(VL, false); 598 return; 599 } 600 601 // We now know that this is a vector of instructions of the same type from 602 // the same block. 603 604 // Check if this is a duplicate of another entry. 605 if (ScalarToTreeEntry.count(VL[0])) { 606 int Idx = ScalarToTreeEntry[VL[0]]; 607 TreeEntry *E = &VectorizableTree[Idx]; 608 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 609 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 610 if (E->Scalars[i] != VL[i]) { 611 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 612 newTreeEntry(VL, false); 613 return; 614 } 615 } 616 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n"); 617 return; 618 } 619 620 // Check that none of the instructions in the bundle are already in the tree. 621 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 622 if (ScalarToTreeEntry.count(VL[i])) { 623 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 624 ") is already in tree.\n"); 625 newTreeEntry(VL, false); 626 return; 627 } 628 } 629 630 // If any of the scalars appears in the table OR it is marked as a value that 631 // needs to stat scalar then we need to gather the scalars. 632 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 633 if (ScalarToTreeEntry.count(VL[i]) || MustGather.count(VL[i])) { 634 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar. \n"); 635 newTreeEntry(VL, false); 636 return; 637 } 638 } 639 640 // Check that all of the users of the scalars that we want to vectorize are 641 // schedulable. 642 Instruction *VL0 = cast<Instruction>(VL[0]); 643 int MyLastIndex = getLastIndex(VL); 644 BasicBlock *BB = cast<Instruction>(VL0)->getParent(); 645 646 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 647 Instruction *Scalar = cast<Instruction>(VL[i]); 648 DEBUG(dbgs() << "SLP: Checking users of " << *Scalar << ". \n"); 649 for (Value::use_iterator U = Scalar->use_begin(), UE = Scalar->use_end(); 650 U != UE; ++U) { 651 DEBUG(dbgs() << "SLP: \tUser " << **U << ". \n"); 652 Instruction *User = dyn_cast<Instruction>(*U); 653 if (!User) { 654 DEBUG(dbgs() << "SLP: Gathering due unknown user. \n"); 655 newTreeEntry(VL, false); 656 return; 657 } 658 659 // We don't care if the user is in a different basic block. 660 BasicBlock *UserBlock = User->getParent(); 661 if (UserBlock != BB) { 662 DEBUG(dbgs() << "SLP: User from a different basic block " 663 << *User << ". \n"); 664 continue; 665 } 666 667 // If this is a PHINode within this basic block then we can place the 668 // extract wherever we want. 669 if (isa<PHINode>(*User)) { 670 DEBUG(dbgs() << "SLP: \tWe can schedule PHIs:" << *User << ". \n"); 671 continue; 672 } 673 674 // Check if this is a safe in-tree user. 675 if (ScalarToTreeEntry.count(User)) { 676 int Idx = ScalarToTreeEntry[User]; 677 int VecLocation = VectorizableTree[Idx].LastScalarIndex; 678 if (VecLocation <= MyLastIndex) { 679 DEBUG(dbgs() << "SLP: Gathering due to unschedulable vector. \n"); 680 newTreeEntry(VL, false); 681 return; 682 } 683 DEBUG(dbgs() << "SLP: In-tree user (" << *User << ") at #" << 684 VecLocation << " vector value (" << *Scalar << ") at #" 685 << MyLastIndex << ".\n"); 686 continue; 687 } 688 689 // This user is part of the reduction. 690 if (RdxOps && RdxOps->count(User)) 691 continue; 692 693 // Make sure that we can schedule this unknown user. 694 BlockNumbering &BN = BlocksNumbers[BB]; 695 int UserIndex = BN.getIndex(User); 696 if (UserIndex < MyLastIndex) { 697 698 DEBUG(dbgs() << "SLP: Can't schedule extractelement for " 699 << *User << ". \n"); 700 newTreeEntry(VL, false); 701 return; 702 } 703 } 704 } 705 706 // Check that every instructions appears once in this bundle. 707 for (unsigned i = 0, e = VL.size(); i < e; ++i) 708 for (unsigned j = i+1; j < e; ++j) 709 if (VL[i] == VL[j]) { 710 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 711 newTreeEntry(VL, false); 712 return; 713 } 714 715 // Check that instructions in this bundle don't reference other instructions. 716 // The runtime of this check is O(N * N-1 * uses(N)) and a typical N is 4. 717 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 718 for (Value::use_iterator U = VL[i]->use_begin(), UE = VL[i]->use_end(); 719 U != UE; ++U) { 720 for (unsigned j = 0; j < e; ++j) { 721 if (i != j && *U == VL[j]) { 722 DEBUG(dbgs() << "SLP: Intra-bundle dependencies!" << **U << ". \n"); 723 newTreeEntry(VL, false); 724 return; 725 } 726 } 727 } 728 } 729 730 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 731 732 unsigned Opcode = getSameOpcode(VL); 733 734 // Check if it is safe to sink the loads or the stores. 735 if (Opcode == Instruction::Load || Opcode == Instruction::Store) { 736 Instruction *Last = getLastInstruction(VL); 737 738 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 739 if (VL[i] == Last) 740 continue; 741 Value *Barrier = getSinkBarrier(cast<Instruction>(VL[i]), Last); 742 if (Barrier) { 743 DEBUG(dbgs() << "SLP: Can't sink " << *VL[i] << "\n down to " << *Last 744 << "\n because of " << *Barrier << ". Gathering.\n"); 745 newTreeEntry(VL, false); 746 return; 747 } 748 } 749 } 750 751 switch (Opcode) { 752 case Instruction::PHI: { 753 PHINode *PH = dyn_cast<PHINode>(VL0); 754 755 // Check for terminator values (e.g. invoke). 756 for (unsigned j = 0; j < VL.size(); ++j) 757 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 758 TerminatorInst *Term = dyn_cast<TerminatorInst>(cast<PHINode>(VL[j])->getIncomingValue(i)); 759 if (Term) { 760 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); 761 newTreeEntry(VL, false); 762 return; 763 } 764 } 765 766 newTreeEntry(VL, true); 767 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 768 769 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 770 ValueList Operands; 771 // Prepare the operand vector. 772 for (unsigned j = 0; j < VL.size(); ++j) 773 Operands.push_back(cast<PHINode>(VL[j])->getIncomingValue(i)); 774 775 buildTree_rec(Operands, Depth + 1); 776 } 777 return; 778 } 779 case Instruction::ExtractElement: { 780 bool Reuse = CanReuseExtract(VL); 781 if (Reuse) { 782 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 783 } 784 newTreeEntry(VL, Reuse); 785 return; 786 } 787 case Instruction::Load: { 788 // Check if the loads are consecutive or of we need to swizzle them. 789 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 790 LoadInst *L = cast<LoadInst>(VL[i]); 791 if (!L->isSimple() || !isConsecutiveAccess(VL[i], VL[i + 1])) { 792 newTreeEntry(VL, false); 793 DEBUG(dbgs() << "SLP: Need to swizzle loads.\n"); 794 return; 795 } 796 } 797 newTreeEntry(VL, true); 798 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 799 return; 800 } 801 case Instruction::ZExt: 802 case Instruction::SExt: 803 case Instruction::FPToUI: 804 case Instruction::FPToSI: 805 case Instruction::FPExt: 806 case Instruction::PtrToInt: 807 case Instruction::IntToPtr: 808 case Instruction::SIToFP: 809 case Instruction::UIToFP: 810 case Instruction::Trunc: 811 case Instruction::FPTrunc: 812 case Instruction::BitCast: { 813 Type *SrcTy = VL0->getOperand(0)->getType(); 814 for (unsigned i = 0; i < VL.size(); ++i) { 815 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 816 if (Ty != SrcTy || Ty->isAggregateType() || Ty->isVectorTy()) { 817 newTreeEntry(VL, false); 818 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 819 return; 820 } 821 } 822 newTreeEntry(VL, true); 823 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 824 825 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 826 ValueList Operands; 827 // Prepare the operand vector. 828 for (unsigned j = 0; j < VL.size(); ++j) 829 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 830 831 buildTree_rec(Operands, Depth+1); 832 } 833 return; 834 } 835 case Instruction::ICmp: 836 case Instruction::FCmp: { 837 // Check that all of the compares have the same predicate. 838 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 839 Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType(); 840 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 841 CmpInst *Cmp = cast<CmpInst>(VL[i]); 842 if (Cmp->getPredicate() != P0 || 843 Cmp->getOperand(0)->getType() != ComparedTy) { 844 newTreeEntry(VL, false); 845 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 846 return; 847 } 848 } 849 850 newTreeEntry(VL, true); 851 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 852 853 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 854 ValueList Operands; 855 // Prepare the operand vector. 856 for (unsigned j = 0; j < VL.size(); ++j) 857 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 858 859 buildTree_rec(Operands, Depth+1); 860 } 861 return; 862 } 863 case Instruction::Select: 864 case Instruction::Add: 865 case Instruction::FAdd: 866 case Instruction::Sub: 867 case Instruction::FSub: 868 case Instruction::Mul: 869 case Instruction::FMul: 870 case Instruction::UDiv: 871 case Instruction::SDiv: 872 case Instruction::FDiv: 873 case Instruction::URem: 874 case Instruction::SRem: 875 case Instruction::FRem: 876 case Instruction::Shl: 877 case Instruction::LShr: 878 case Instruction::AShr: 879 case Instruction::And: 880 case Instruction::Or: 881 case Instruction::Xor: { 882 newTreeEntry(VL, true); 883 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 884 885 // Sort operands of the instructions so that each side is more likely to 886 // have the same opcode. 887 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 888 ValueList Left, Right; 889 reorderInputsAccordingToOpcode(VL, Left, Right); 890 buildTree_rec(Left, Depth + 1); 891 buildTree_rec(Right, Depth + 1); 892 return; 893 } 894 895 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 896 ValueList Operands; 897 // Prepare the operand vector. 898 for (unsigned j = 0; j < VL.size(); ++j) 899 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 900 901 buildTree_rec(Operands, Depth+1); 902 } 903 return; 904 } 905 case Instruction::Store: { 906 // Check if the stores are consecutive or of we need to swizzle them. 907 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 908 if (!isConsecutiveAccess(VL[i], VL[i + 1])) { 909 newTreeEntry(VL, false); 910 DEBUG(dbgs() << "SLP: Non consecutive store.\n"); 911 return; 912 } 913 914 newTreeEntry(VL, true); 915 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 916 917 ValueList Operands; 918 for (unsigned j = 0; j < VL.size(); ++j) 919 Operands.push_back(cast<Instruction>(VL[j])->getOperand(0)); 920 921 // We can ignore these values because we are sinking them down. 922 MemBarrierIgnoreList.insert(VL.begin(), VL.end()); 923 buildTree_rec(Operands, Depth + 1); 924 return; 925 } 926 default: 927 newTreeEntry(VL, false); 928 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 929 return; 930 } 931 } 932 933 int BoUpSLP::getEntryCost(TreeEntry *E) { 934 ArrayRef<Value*> VL = E->Scalars; 935 936 Type *ScalarTy = VL[0]->getType(); 937 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 938 ScalarTy = SI->getValueOperand()->getType(); 939 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 940 941 if (E->NeedToGather) { 942 if (allConstant(VL)) 943 return 0; 944 if (isSplat(VL)) { 945 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 946 } 947 return getGatherCost(E->Scalars); 948 } 949 950 assert(getSameOpcode(VL) && getSameType(VL) && getSameBlock(VL) && 951 "Invalid VL"); 952 Instruction *VL0 = cast<Instruction>(VL[0]); 953 unsigned Opcode = VL0->getOpcode(); 954 switch (Opcode) { 955 case Instruction::PHI: { 956 return 0; 957 } 958 case Instruction::ExtractElement: { 959 if (CanReuseExtract(VL)) 960 return 0; 961 return getGatherCost(VecTy); 962 } 963 case Instruction::ZExt: 964 case Instruction::SExt: 965 case Instruction::FPToUI: 966 case Instruction::FPToSI: 967 case Instruction::FPExt: 968 case Instruction::PtrToInt: 969 case Instruction::IntToPtr: 970 case Instruction::SIToFP: 971 case Instruction::UIToFP: 972 case Instruction::Trunc: 973 case Instruction::FPTrunc: 974 case Instruction::BitCast: { 975 Type *SrcTy = VL0->getOperand(0)->getType(); 976 977 // Calculate the cost of this instruction. 978 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 979 VL0->getType(), SrcTy); 980 981 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 982 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy); 983 return VecCost - ScalarCost; 984 } 985 case Instruction::FCmp: 986 case Instruction::ICmp: 987 case Instruction::Select: 988 case Instruction::Add: 989 case Instruction::FAdd: 990 case Instruction::Sub: 991 case Instruction::FSub: 992 case Instruction::Mul: 993 case Instruction::FMul: 994 case Instruction::UDiv: 995 case Instruction::SDiv: 996 case Instruction::FDiv: 997 case Instruction::URem: 998 case Instruction::SRem: 999 case Instruction::FRem: 1000 case Instruction::Shl: 1001 case Instruction::LShr: 1002 case Instruction::AShr: 1003 case Instruction::And: 1004 case Instruction::Or: 1005 case Instruction::Xor: { 1006 // Calculate the cost of this instruction. 1007 int ScalarCost = 0; 1008 int VecCost = 0; 1009 if (Opcode == Instruction::FCmp || Opcode == Instruction::ICmp || 1010 Opcode == Instruction::Select) { 1011 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 1012 ScalarCost = VecTy->getNumElements() * 1013 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty()); 1014 VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy); 1015 } else { 1016 // Certain instructions can be cheaper to vectorize if they have a 1017 // constant second vector operand. 1018 TargetTransformInfo::OperandValueKind Op1VK = 1019 TargetTransformInfo::OK_AnyValue; 1020 TargetTransformInfo::OperandValueKind Op2VK = 1021 TargetTransformInfo::OK_UniformConstantValue; 1022 1023 // Check whether all second operands are constant. 1024 for (unsigned i = 0; i < VL.size(); ++i) 1025 if (!isa<ConstantInt>(cast<Instruction>(VL[i])->getOperand(1))) { 1026 Op2VK = TargetTransformInfo::OK_AnyValue; 1027 break; 1028 } 1029 1030 ScalarCost = 1031 VecTy->getNumElements() * 1032 TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK); 1033 VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK); 1034 } 1035 return VecCost - ScalarCost; 1036 } 1037 case Instruction::Load: { 1038 // Cost of wide load - cost of scalar loads. 1039 int ScalarLdCost = VecTy->getNumElements() * 1040 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); 1041 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, 1, 0); 1042 return VecLdCost - ScalarLdCost; 1043 } 1044 case Instruction::Store: { 1045 // We know that we can merge the stores. Calculate the cost. 1046 int ScalarStCost = VecTy->getNumElements() * 1047 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); 1048 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, VecTy, 1, 0); 1049 return VecStCost - ScalarStCost; 1050 } 1051 default: 1052 llvm_unreachable("Unknown instruction"); 1053 } 1054 } 1055 1056 bool BoUpSLP::isFullyVectorizableTinyTree() { 1057 DEBUG(dbgs() << "SLP: Check whether the tree with height " << 1058 VectorizableTree.size() << " is fully vectorizable .\n"); 1059 1060 // We only handle trees of height 2. 1061 if (VectorizableTree.size() != 2) 1062 return false; 1063 1064 // Gathering cost would be too much for tiny trees. 1065 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 1066 return false; 1067 1068 return true; 1069 } 1070 1071 int BoUpSLP::getTreeCost() { 1072 int Cost = 0; 1073 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 1074 VectorizableTree.size() << ".\n"); 1075 1076 // We only vectorize tiny trees if it is fully vectorizable. 1077 if (VectorizableTree.size() < 3 && !isFullyVectorizableTinyTree()) { 1078 if (!VectorizableTree.size()) { 1079 assert(!ExternalUses.size() && "We should not have any external users"); 1080 } 1081 return INT_MAX; 1082 } 1083 1084 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 1085 1086 for (unsigned i = 0, e = VectorizableTree.size(); i != e; ++i) { 1087 int C = getEntryCost(&VectorizableTree[i]); 1088 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 1089 << *VectorizableTree[i].Scalars[0] << " .\n"); 1090 Cost += C; 1091 } 1092 1093 int ExtractCost = 0; 1094 for (UserList::iterator I = ExternalUses.begin(), E = ExternalUses.end(); 1095 I != E; ++I) { 1096 1097 VectorType *VecTy = VectorType::get(I->Scalar->getType(), BundleWidth); 1098 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 1099 I->Lane); 1100 } 1101 1102 1103 DEBUG(dbgs() << "SLP: Total Cost " << Cost + ExtractCost<< ".\n"); 1104 return Cost + ExtractCost; 1105 } 1106 1107 int BoUpSLP::getGatherCost(Type *Ty) { 1108 int Cost = 0; 1109 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 1110 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 1111 return Cost; 1112 } 1113 1114 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 1115 // Find the type of the operands in VL. 1116 Type *ScalarTy = VL[0]->getType(); 1117 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1118 ScalarTy = SI->getValueOperand()->getType(); 1119 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1120 // Find the cost of inserting/extracting values from the vector. 1121 return getGatherCost(VecTy); 1122 } 1123 1124 AliasAnalysis::Location BoUpSLP::getLocation(Instruction *I) { 1125 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1126 return AA->getLocation(SI); 1127 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1128 return AA->getLocation(LI); 1129 return AliasAnalysis::Location(); 1130 } 1131 1132 Value *BoUpSLP::getPointerOperand(Value *I) { 1133 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1134 return LI->getPointerOperand(); 1135 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1136 return SI->getPointerOperand(); 1137 return 0; 1138 } 1139 1140 unsigned BoUpSLP::getAddressSpaceOperand(Value *I) { 1141 if (LoadInst *L = dyn_cast<LoadInst>(I)) 1142 return L->getPointerAddressSpace(); 1143 if (StoreInst *S = dyn_cast<StoreInst>(I)) 1144 return S->getPointerAddressSpace(); 1145 return -1; 1146 } 1147 1148 bool BoUpSLP::isConsecutiveAccess(Value *A, Value *B) { 1149 Value *PtrA = getPointerOperand(A); 1150 Value *PtrB = getPointerOperand(B); 1151 unsigned ASA = getAddressSpaceOperand(A); 1152 unsigned ASB = getAddressSpaceOperand(B); 1153 1154 // Check that the address spaces match and that the pointers are valid. 1155 if (!PtrA || !PtrB || (ASA != ASB)) 1156 return false; 1157 1158 // Make sure that A and B are different pointers of the same type. 1159 if (PtrA == PtrB || PtrA->getType() != PtrB->getType()) 1160 return false; 1161 1162 unsigned PtrBitWidth = DL->getPointerSizeInBits(ASA); 1163 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1164 APInt Size(PtrBitWidth, DL->getTypeStoreSize(Ty)); 1165 1166 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); 1167 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetA); 1168 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetB); 1169 1170 APInt OffsetDelta = OffsetB - OffsetA; 1171 1172 // Check if they are based on the same pointer. That makes the offsets 1173 // sufficient. 1174 if (PtrA == PtrB) 1175 return OffsetDelta == Size; 1176 1177 // Compute the necessary base pointer delta to have the necessary final delta 1178 // equal to the size. 1179 APInt BaseDelta = Size - OffsetDelta; 1180 1181 // Otherwise compute the distance with SCEV between the base pointers. 1182 const SCEV *PtrSCEVA = SE->getSCEV(PtrA); 1183 const SCEV *PtrSCEVB = SE->getSCEV(PtrB); 1184 const SCEV *C = SE->getConstant(BaseDelta); 1185 const SCEV *X = SE->getAddExpr(PtrSCEVA, C); 1186 return X == PtrSCEVB; 1187 } 1188 1189 Value *BoUpSLP::getSinkBarrier(Instruction *Src, Instruction *Dst) { 1190 assert(Src->getParent() == Dst->getParent() && "Not the same BB"); 1191 BasicBlock::iterator I = Src, E = Dst; 1192 /// Scan all of the instruction from SRC to DST and check if 1193 /// the source may alias. 1194 for (++I; I != E; ++I) { 1195 // Ignore store instructions that are marked as 'ignore'. 1196 if (MemBarrierIgnoreList.count(I)) 1197 continue; 1198 if (Src->mayWriteToMemory()) /* Write */ { 1199 if (!I->mayReadOrWriteMemory()) 1200 continue; 1201 } else /* Read */ { 1202 if (!I->mayWriteToMemory()) 1203 continue; 1204 } 1205 AliasAnalysis::Location A = getLocation(&*I); 1206 AliasAnalysis::Location B = getLocation(Src); 1207 1208 if (!A.Ptr || !B.Ptr || AA->alias(A, B)) 1209 return I; 1210 } 1211 return 0; 1212 } 1213 1214 int BoUpSLP::getLastIndex(ArrayRef<Value *> VL) { 1215 BasicBlock *BB = cast<Instruction>(VL[0])->getParent(); 1216 assert(BB == getSameBlock(VL) && BlocksNumbers.count(BB) && "Invalid block"); 1217 BlockNumbering &BN = BlocksNumbers[BB]; 1218 1219 int MaxIdx = BN.getIndex(BB->getFirstNonPHI()); 1220 for (unsigned i = 0, e = VL.size(); i < e; ++i) 1221 MaxIdx = std::max(MaxIdx, BN.getIndex(cast<Instruction>(VL[i]))); 1222 return MaxIdx; 1223 } 1224 1225 Instruction *BoUpSLP::getLastInstruction(ArrayRef<Value *> VL) { 1226 BasicBlock *BB = cast<Instruction>(VL[0])->getParent(); 1227 assert(BB == getSameBlock(VL) && BlocksNumbers.count(BB) && "Invalid block"); 1228 BlockNumbering &BN = BlocksNumbers[BB]; 1229 1230 int MaxIdx = BN.getIndex(cast<Instruction>(VL[0])); 1231 for (unsigned i = 1, e = VL.size(); i < e; ++i) 1232 MaxIdx = std::max(MaxIdx, BN.getIndex(cast<Instruction>(VL[i]))); 1233 Instruction *I = BN.getInstruction(MaxIdx); 1234 assert(I && "bad location"); 1235 return I; 1236 } 1237 1238 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) { 1239 Instruction *VL0 = cast<Instruction>(VL[0]); 1240 Instruction *LastInst = getLastInstruction(VL); 1241 BasicBlock::iterator NextInst = LastInst; 1242 ++NextInst; 1243 Builder.SetInsertPoint(VL0->getParent(), NextInst); 1244 Builder.SetCurrentDebugLocation(VL0->getDebugLoc()); 1245 } 1246 1247 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 1248 Value *Vec = UndefValue::get(Ty); 1249 // Generate the 'InsertElement' instruction. 1250 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 1251 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 1252 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 1253 GatherSeq.insert(Insrt); 1254 1255 // Add to our 'need-to-extract' list. 1256 if (ScalarToTreeEntry.count(VL[i])) { 1257 int Idx = ScalarToTreeEntry[VL[i]]; 1258 TreeEntry *E = &VectorizableTree[Idx]; 1259 // Find which lane we need to extract. 1260 int FoundLane = -1; 1261 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 1262 // Is this the lane of the scalar that we are looking for ? 1263 if (E->Scalars[Lane] == VL[i]) { 1264 FoundLane = Lane; 1265 break; 1266 } 1267 } 1268 assert(FoundLane >= 0 && "Could not find the correct lane"); 1269 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 1270 } 1271 } 1272 } 1273 1274 return Vec; 1275 } 1276 1277 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const { 1278 SmallDenseMap<Value*, int>::const_iterator Entry 1279 = ScalarToTreeEntry.find(VL[0]); 1280 if (Entry != ScalarToTreeEntry.end()) { 1281 int Idx = Entry->second; 1282 const TreeEntry *En = &VectorizableTree[Idx]; 1283 if (En->isSame(VL) && En->VectorizedValue) 1284 return En->VectorizedValue; 1285 } 1286 return 0; 1287 } 1288 1289 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 1290 if (ScalarToTreeEntry.count(VL[0])) { 1291 int Idx = ScalarToTreeEntry[VL[0]]; 1292 TreeEntry *E = &VectorizableTree[Idx]; 1293 if (E->isSame(VL)) 1294 return vectorizeTree(E); 1295 } 1296 1297 Type *ScalarTy = VL[0]->getType(); 1298 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1299 ScalarTy = SI->getValueOperand()->getType(); 1300 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1301 1302 return Gather(VL, VecTy); 1303 } 1304 1305 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 1306 IRBuilder<>::InsertPointGuard Guard(Builder); 1307 1308 if (E->VectorizedValue) { 1309 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 1310 return E->VectorizedValue; 1311 } 1312 1313 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 1314 Type *ScalarTy = VL0->getType(); 1315 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 1316 ScalarTy = SI->getValueOperand()->getType(); 1317 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 1318 1319 if (E->NeedToGather) { 1320 setInsertPointAfterBundle(E->Scalars); 1321 return Gather(E->Scalars, VecTy); 1322 } 1323 1324 unsigned Opcode = VL0->getOpcode(); 1325 assert(Opcode == getSameOpcode(E->Scalars) && "Invalid opcode"); 1326 1327 switch (Opcode) { 1328 case Instruction::PHI: { 1329 PHINode *PH = dyn_cast<PHINode>(VL0); 1330 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 1331 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 1332 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 1333 E->VectorizedValue = NewPhi; 1334 1335 // PHINodes may have multiple entries from the same block. We want to 1336 // visit every block once. 1337 SmallSet<BasicBlock*, 4> VisitedBBs; 1338 1339 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1340 ValueList Operands; 1341 BasicBlock *IBB = PH->getIncomingBlock(i); 1342 1343 if (!VisitedBBs.insert(IBB)) { 1344 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 1345 continue; 1346 } 1347 1348 // Prepare the operand vector. 1349 for (unsigned j = 0; j < E->Scalars.size(); ++j) 1350 Operands.push_back(cast<PHINode>(E->Scalars[j])-> 1351 getIncomingValueForBlock(IBB)); 1352 1353 Builder.SetInsertPoint(IBB->getTerminator()); 1354 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 1355 Value *Vec = vectorizeTree(Operands); 1356 NewPhi->addIncoming(Vec, IBB); 1357 } 1358 1359 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 1360 "Invalid number of incoming values"); 1361 return NewPhi; 1362 } 1363 1364 case Instruction::ExtractElement: { 1365 if (CanReuseExtract(E->Scalars)) { 1366 Value *V = VL0->getOperand(0); 1367 E->VectorizedValue = V; 1368 return V; 1369 } 1370 return Gather(E->Scalars, VecTy); 1371 } 1372 case Instruction::ZExt: 1373 case Instruction::SExt: 1374 case Instruction::FPToUI: 1375 case Instruction::FPToSI: 1376 case Instruction::FPExt: 1377 case Instruction::PtrToInt: 1378 case Instruction::IntToPtr: 1379 case Instruction::SIToFP: 1380 case Instruction::UIToFP: 1381 case Instruction::Trunc: 1382 case Instruction::FPTrunc: 1383 case Instruction::BitCast: { 1384 ValueList INVL; 1385 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1386 INVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1387 1388 setInsertPointAfterBundle(E->Scalars); 1389 1390 Value *InVec = vectorizeTree(INVL); 1391 1392 if (Value *V = alreadyVectorized(E->Scalars)) 1393 return V; 1394 1395 CastInst *CI = dyn_cast<CastInst>(VL0); 1396 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 1397 E->VectorizedValue = V; 1398 return V; 1399 } 1400 case Instruction::FCmp: 1401 case Instruction::ICmp: { 1402 ValueList LHSV, RHSV; 1403 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1404 LHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1405 RHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1406 } 1407 1408 setInsertPointAfterBundle(E->Scalars); 1409 1410 Value *L = vectorizeTree(LHSV); 1411 Value *R = vectorizeTree(RHSV); 1412 1413 if (Value *V = alreadyVectorized(E->Scalars)) 1414 return V; 1415 1416 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 1417 Value *V; 1418 if (Opcode == Instruction::FCmp) 1419 V = Builder.CreateFCmp(P0, L, R); 1420 else 1421 V = Builder.CreateICmp(P0, L, R); 1422 1423 E->VectorizedValue = V; 1424 return V; 1425 } 1426 case Instruction::Select: { 1427 ValueList TrueVec, FalseVec, CondVec; 1428 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1429 CondVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1430 TrueVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1431 FalseVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(2)); 1432 } 1433 1434 setInsertPointAfterBundle(E->Scalars); 1435 1436 Value *Cond = vectorizeTree(CondVec); 1437 Value *True = vectorizeTree(TrueVec); 1438 Value *False = vectorizeTree(FalseVec); 1439 1440 if (Value *V = alreadyVectorized(E->Scalars)) 1441 return V; 1442 1443 Value *V = Builder.CreateSelect(Cond, True, False); 1444 E->VectorizedValue = V; 1445 return V; 1446 } 1447 case Instruction::Add: 1448 case Instruction::FAdd: 1449 case Instruction::Sub: 1450 case Instruction::FSub: 1451 case Instruction::Mul: 1452 case Instruction::FMul: 1453 case Instruction::UDiv: 1454 case Instruction::SDiv: 1455 case Instruction::FDiv: 1456 case Instruction::URem: 1457 case Instruction::SRem: 1458 case Instruction::FRem: 1459 case Instruction::Shl: 1460 case Instruction::LShr: 1461 case Instruction::AShr: 1462 case Instruction::And: 1463 case Instruction::Or: 1464 case Instruction::Xor: { 1465 ValueList LHSVL, RHSVL; 1466 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) 1467 reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL); 1468 else 1469 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1470 LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1471 RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1472 } 1473 1474 setInsertPointAfterBundle(E->Scalars); 1475 1476 Value *LHS = vectorizeTree(LHSVL); 1477 Value *RHS = vectorizeTree(RHSVL); 1478 1479 if (LHS == RHS && isa<Instruction>(LHS)) { 1480 assert((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order"); 1481 } 1482 1483 if (Value *V = alreadyVectorized(E->Scalars)) 1484 return V; 1485 1486 BinaryOperator *BinOp = cast<BinaryOperator>(VL0); 1487 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS); 1488 E->VectorizedValue = V; 1489 return V; 1490 } 1491 case Instruction::Load: { 1492 // Loads are inserted at the head of the tree because we don't want to 1493 // sink them all the way down past store instructions. 1494 setInsertPointAfterBundle(E->Scalars); 1495 1496 LoadInst *LI = cast<LoadInst>(VL0); 1497 unsigned AS = LI->getPointerAddressSpace(); 1498 1499 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 1500 VecTy->getPointerTo(AS)); 1501 unsigned Alignment = LI->getAlignment(); 1502 LI = Builder.CreateLoad(VecPtr); 1503 LI->setAlignment(Alignment); 1504 E->VectorizedValue = LI; 1505 return LI; 1506 } 1507 case Instruction::Store: { 1508 StoreInst *SI = cast<StoreInst>(VL0); 1509 unsigned Alignment = SI->getAlignment(); 1510 unsigned AS = SI->getPointerAddressSpace(); 1511 1512 ValueList ValueOp; 1513 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1514 ValueOp.push_back(cast<StoreInst>(E->Scalars[i])->getValueOperand()); 1515 1516 setInsertPointAfterBundle(E->Scalars); 1517 1518 Value *VecValue = vectorizeTree(ValueOp); 1519 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), 1520 VecTy->getPointerTo(AS)); 1521 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 1522 S->setAlignment(Alignment); 1523 E->VectorizedValue = S; 1524 return S; 1525 } 1526 default: 1527 llvm_unreachable("unknown inst"); 1528 } 1529 return 0; 1530 } 1531 1532 Value *BoUpSLP::vectorizeTree() { 1533 Builder.SetInsertPoint(F->getEntryBlock().begin()); 1534 vectorizeTree(&VectorizableTree[0]); 1535 1536 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 1537 1538 // Extract all of the elements with the external uses. 1539 for (UserList::iterator it = ExternalUses.begin(), e = ExternalUses.end(); 1540 it != e; ++it) { 1541 Value *Scalar = it->Scalar; 1542 llvm::User *User = it->User; 1543 1544 // Skip users that we already RAUW. This happens when one instruction 1545 // has multiple uses of the same value. 1546 if (std::find(Scalar->use_begin(), Scalar->use_end(), User) == 1547 Scalar->use_end()) 1548 continue; 1549 assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar"); 1550 1551 int Idx = ScalarToTreeEntry[Scalar]; 1552 TreeEntry *E = &VectorizableTree[Idx]; 1553 assert(!E->NeedToGather && "Extracting from a gather list"); 1554 1555 Value *Vec = E->VectorizedValue; 1556 assert(Vec && "Can't find vectorizable value"); 1557 1558 Value *Lane = Builder.getInt32(it->Lane); 1559 // Generate extracts for out-of-tree users. 1560 // Find the insertion point for the extractelement lane. 1561 if (PHINode *PN = dyn_cast<PHINode>(Vec)) { 1562 Builder.SetInsertPoint(PN->getParent()->getFirstInsertionPt()); 1563 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1564 User->replaceUsesOfWith(Scalar, Ex); 1565 } else if (isa<Instruction>(Vec)){ 1566 if (PHINode *PH = dyn_cast<PHINode>(User)) { 1567 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 1568 if (PH->getIncomingValue(i) == Scalar) { 1569 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 1570 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1571 PH->setOperand(i, Ex); 1572 } 1573 } 1574 } else { 1575 Builder.SetInsertPoint(cast<Instruction>(User)); 1576 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1577 User->replaceUsesOfWith(Scalar, Ex); 1578 } 1579 } else { 1580 Builder.SetInsertPoint(F->getEntryBlock().begin()); 1581 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1582 User->replaceUsesOfWith(Scalar, Ex); 1583 } 1584 1585 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 1586 } 1587 1588 // For each vectorized value: 1589 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 1590 TreeEntry *Entry = &VectorizableTree[EIdx]; 1591 1592 // For each lane: 1593 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 1594 Value *Scalar = Entry->Scalars[Lane]; 1595 1596 // No need to handle users of gathered values. 1597 if (Entry->NeedToGather) 1598 continue; 1599 1600 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 1601 1602 Type *Ty = Scalar->getType(); 1603 if (!Ty->isVoidTy()) { 1604 for (Value::use_iterator User = Scalar->use_begin(), 1605 UE = Scalar->use_end(); User != UE; ++User) { 1606 DEBUG(dbgs() << "SLP: \tvalidating user:" << **User << ".\n"); 1607 assert(!MustGather.count(*User) && 1608 "Replacing gathered value with undef"); 1609 1610 assert((ScalarToTreeEntry.count(*User) || 1611 // It is legal to replace the reduction users by undef. 1612 (RdxOps && RdxOps->count(*User))) && 1613 "Replacing out-of-tree value with undef"); 1614 } 1615 Value *Undef = UndefValue::get(Ty); 1616 Scalar->replaceAllUsesWith(Undef); 1617 } 1618 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 1619 cast<Instruction>(Scalar)->eraseFromParent(); 1620 } 1621 } 1622 1623 for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it) { 1624 BlocksNumbers[it].forget(); 1625 } 1626 Builder.ClearInsertionPoint(); 1627 1628 return VectorizableTree[0].VectorizedValue; 1629 } 1630 1631 void BoUpSLP::optimizeGatherSequence() { 1632 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 1633 << " gather sequences instructions.\n"); 1634 // LICM InsertElementInst sequences. 1635 for (SetVector<Instruction *>::iterator it = GatherSeq.begin(), 1636 e = GatherSeq.end(); it != e; ++it) { 1637 InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it); 1638 1639 if (!Insert) 1640 continue; 1641 1642 // Check if this block is inside a loop. 1643 Loop *L = LI->getLoopFor(Insert->getParent()); 1644 if (!L) 1645 continue; 1646 1647 // Check if it has a preheader. 1648 BasicBlock *PreHeader = L->getLoopPreheader(); 1649 if (!PreHeader) 1650 continue; 1651 1652 // If the vector or the element that we insert into it are 1653 // instructions that are defined in this basic block then we can't 1654 // hoist this instruction. 1655 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 1656 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 1657 if (CurrVec && L->contains(CurrVec)) 1658 continue; 1659 if (NewElem && L->contains(NewElem)) 1660 continue; 1661 1662 // We can hoist this instruction. Move it to the pre-header. 1663 Insert->moveBefore(PreHeader->getTerminator()); 1664 } 1665 1666 // Perform O(N^2) search over the gather sequences and merge identical 1667 // instructions. TODO: We can further optimize this scan if we split the 1668 // instructions into different buckets based on the insert lane. 1669 SmallPtrSet<Instruction*, 16> Visited; 1670 SmallVector<Instruction*, 16> ToRemove; 1671 ReversePostOrderTraversal<Function*> RPOT(F); 1672 for (ReversePostOrderTraversal<Function*>::rpo_iterator I = RPOT.begin(), 1673 E = RPOT.end(); I != E; ++I) { 1674 BasicBlock *BB = *I; 1675 // For all instructions in the function: 1676 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 1677 Instruction *In = it; 1678 if ((!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) || 1679 !GatherSeq.count(In)) 1680 continue; 1681 1682 // Check if we can replace this instruction with any of the 1683 // visited instructions. 1684 for (SmallPtrSet<Instruction*, 16>::iterator v = Visited.begin(), 1685 ve = Visited.end(); v != ve; ++v) { 1686 if (In->isIdenticalTo(*v) && 1687 DT->dominates((*v)->getParent(), In->getParent())) { 1688 In->replaceAllUsesWith(*v); 1689 ToRemove.push_back(In); 1690 In = 0; 1691 break; 1692 } 1693 } 1694 if (In) 1695 Visited.insert(In); 1696 } 1697 } 1698 1699 // Erase all of the instructions that we RAUWed. 1700 for (SmallVectorImpl<Instruction *>::iterator v = ToRemove.begin(), 1701 ve = ToRemove.end(); v != ve; ++v) { 1702 assert((*v)->getNumUses() == 0 && "Can't remove instructions with uses"); 1703 (*v)->eraseFromParent(); 1704 } 1705 } 1706 1707 /// The SLPVectorizer Pass. 1708 struct SLPVectorizer : public FunctionPass { 1709 typedef SmallVector<StoreInst *, 8> StoreList; 1710 typedef MapVector<Value *, StoreList> StoreListMap; 1711 1712 /// Pass identification, replacement for typeid 1713 static char ID; 1714 1715 explicit SLPVectorizer() : FunctionPass(ID) { 1716 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 1717 } 1718 1719 ScalarEvolution *SE; 1720 DataLayout *DL; 1721 TargetTransformInfo *TTI; 1722 AliasAnalysis *AA; 1723 LoopInfo *LI; 1724 DominatorTree *DT; 1725 1726 virtual bool runOnFunction(Function &F) { 1727 SE = &getAnalysis<ScalarEvolution>(); 1728 DL = getAnalysisIfAvailable<DataLayout>(); 1729 TTI = &getAnalysis<TargetTransformInfo>(); 1730 AA = &getAnalysis<AliasAnalysis>(); 1731 LI = &getAnalysis<LoopInfo>(); 1732 DT = &getAnalysis<DominatorTree>(); 1733 1734 StoreRefs.clear(); 1735 bool Changed = false; 1736 1737 // If the target claims to have no vector registers don't attempt 1738 // vectorization. 1739 if (!TTI->getNumberOfRegisters(true)) 1740 return false; 1741 1742 // Must have DataLayout. We can't require it because some tests run w/o 1743 // triple. 1744 if (!DL) 1745 return false; 1746 1747 // Don't vectorize when the attribute NoImplicitFloat is used. 1748 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 1749 return false; 1750 1751 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 1752 1753 // Use the bollom up slp vectorizer to construct chains that start with 1754 // he store instructions. 1755 BoUpSLP R(&F, SE, DL, TTI, AA, LI, DT); 1756 1757 // Scan the blocks in the function in post order. 1758 for (po_iterator<BasicBlock*> it = po_begin(&F.getEntryBlock()), 1759 e = po_end(&F.getEntryBlock()); it != e; ++it) { 1760 BasicBlock *BB = *it; 1761 1762 // Vectorize trees that end at stores. 1763 if (unsigned count = collectStores(BB, R)) { 1764 (void)count; 1765 DEBUG(dbgs() << "SLP: Found " << count << " stores to vectorize.\n"); 1766 Changed |= vectorizeStoreChains(R); 1767 } 1768 1769 // Vectorize trees that end at reductions. 1770 Changed |= vectorizeChainsInBlock(BB, R); 1771 } 1772 1773 if (Changed) { 1774 R.optimizeGatherSequence(); 1775 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 1776 DEBUG(verifyFunction(F)); 1777 } 1778 return Changed; 1779 } 1780 1781 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 1782 FunctionPass::getAnalysisUsage(AU); 1783 AU.addRequired<ScalarEvolution>(); 1784 AU.addRequired<AliasAnalysis>(); 1785 AU.addRequired<TargetTransformInfo>(); 1786 AU.addRequired<LoopInfo>(); 1787 AU.addRequired<DominatorTree>(); 1788 AU.addPreserved<LoopInfo>(); 1789 AU.addPreserved<DominatorTree>(); 1790 AU.setPreservesCFG(); 1791 } 1792 1793 private: 1794 1795 /// \brief Collect memory references and sort them according to their base 1796 /// object. We sort the stores to their base objects to reduce the cost of the 1797 /// quadratic search on the stores. TODO: We can further reduce this cost 1798 /// if we flush the chain creation every time we run into a memory barrier. 1799 unsigned collectStores(BasicBlock *BB, BoUpSLP &R); 1800 1801 /// \brief Try to vectorize a chain that starts at two arithmetic instrs. 1802 bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R); 1803 1804 /// \brief Try to vectorize a list of operands. 1805 /// \returns true if a value was vectorized. 1806 bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R); 1807 1808 /// \brief Try to vectorize a chain that may start at the operands of \V; 1809 bool tryToVectorize(BinaryOperator *V, BoUpSLP &R); 1810 1811 /// \brief Vectorize the stores that were collected in StoreRefs. 1812 bool vectorizeStoreChains(BoUpSLP &R); 1813 1814 /// \brief Scan the basic block and look for patterns that are likely to start 1815 /// a vectorization chain. 1816 bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R); 1817 1818 bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold, 1819 BoUpSLP &R); 1820 1821 bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold, 1822 BoUpSLP &R); 1823 private: 1824 StoreListMap StoreRefs; 1825 }; 1826 1827 bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain, 1828 int CostThreshold, BoUpSLP &R) { 1829 unsigned ChainLen = Chain.size(); 1830 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 1831 << "\n"); 1832 Type *StoreTy = cast<StoreInst>(Chain[0])->getValueOperand()->getType(); 1833 unsigned Sz = DL->getTypeSizeInBits(StoreTy); 1834 unsigned VF = MinVecRegSize / Sz; 1835 1836 if (!isPowerOf2_32(Sz) || VF < 2) 1837 return false; 1838 1839 bool Changed = false; 1840 // Look for profitable vectorizable trees at all offsets, starting at zero. 1841 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 1842 if (i + VF > e) 1843 break; 1844 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 1845 << "\n"); 1846 ArrayRef<Value *> Operands = Chain.slice(i, VF); 1847 1848 R.buildTree(Operands); 1849 1850 int Cost = R.getTreeCost(); 1851 1852 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 1853 if (Cost < CostThreshold) { 1854 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 1855 R.vectorizeTree(); 1856 1857 // Move to the next bundle. 1858 i += VF - 1; 1859 Changed = true; 1860 } 1861 } 1862 1863 return Changed; 1864 } 1865 1866 bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, 1867 int costThreshold, BoUpSLP &R) { 1868 SetVector<Value *> Heads, Tails; 1869 SmallDenseMap<Value *, Value *> ConsecutiveChain; 1870 1871 // We may run into multiple chains that merge into a single chain. We mark the 1872 // stores that we vectorized so that we don't visit the same store twice. 1873 BoUpSLP::ValueSet VectorizedStores; 1874 bool Changed = false; 1875 1876 // Do a quadratic search on all of the given stores and find 1877 // all of the pairs of stores that follow each other. 1878 for (unsigned i = 0, e = Stores.size(); i < e; ++i) { 1879 for (unsigned j = 0; j < e; ++j) { 1880 if (i == j) 1881 continue; 1882 1883 if (R.isConsecutiveAccess(Stores[i], Stores[j])) { 1884 Tails.insert(Stores[j]); 1885 Heads.insert(Stores[i]); 1886 ConsecutiveChain[Stores[i]] = Stores[j]; 1887 } 1888 } 1889 } 1890 1891 // For stores that start but don't end a link in the chain: 1892 for (SetVector<Value *>::iterator it = Heads.begin(), e = Heads.end(); 1893 it != e; ++it) { 1894 if (Tails.count(*it)) 1895 continue; 1896 1897 // We found a store instr that starts a chain. Now follow the chain and try 1898 // to vectorize it. 1899 BoUpSLP::ValueList Operands; 1900 Value *I = *it; 1901 // Collect the chain into a list. 1902 while (Tails.count(I) || Heads.count(I)) { 1903 if (VectorizedStores.count(I)) 1904 break; 1905 Operands.push_back(I); 1906 // Move to the next value in the chain. 1907 I = ConsecutiveChain[I]; 1908 } 1909 1910 bool Vectorized = vectorizeStoreChain(Operands, costThreshold, R); 1911 1912 // Mark the vectorized stores so that we don't vectorize them again. 1913 if (Vectorized) 1914 VectorizedStores.insert(Operands.begin(), Operands.end()); 1915 Changed |= Vectorized; 1916 } 1917 1918 return Changed; 1919 } 1920 1921 1922 unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) { 1923 unsigned count = 0; 1924 StoreRefs.clear(); 1925 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 1926 StoreInst *SI = dyn_cast<StoreInst>(it); 1927 if (!SI) 1928 continue; 1929 1930 // Don't touch volatile stores. 1931 if (!SI->isSimple()) 1932 continue; 1933 1934 // Check that the pointer points to scalars. 1935 Type *Ty = SI->getValueOperand()->getType(); 1936 if (Ty->isAggregateType() || Ty->isVectorTy()) 1937 return 0; 1938 1939 // Find the base pointer. 1940 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), DL); 1941 1942 // Save the store locations. 1943 StoreRefs[Ptr].push_back(SI); 1944 count++; 1945 } 1946 return count; 1947 } 1948 1949 bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 1950 if (!A || !B) 1951 return false; 1952 Value *VL[] = { A, B }; 1953 return tryToVectorizeList(VL, R); 1954 } 1955 1956 bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R) { 1957 if (VL.size() < 2) 1958 return false; 1959 1960 DEBUG(dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n"); 1961 1962 // Check that all of the parts are scalar instructions of the same type. 1963 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 1964 if (!I0) 1965 return false; 1966 1967 unsigned Opcode0 = I0->getOpcode(); 1968 1969 Type *Ty0 = I0->getType(); 1970 unsigned Sz = DL->getTypeSizeInBits(Ty0); 1971 unsigned VF = MinVecRegSize / Sz; 1972 1973 for (int i = 0, e = VL.size(); i < e; ++i) { 1974 Type *Ty = VL[i]->getType(); 1975 if (Ty->isAggregateType() || Ty->isVectorTy()) 1976 return false; 1977 Instruction *Inst = dyn_cast<Instruction>(VL[i]); 1978 if (!Inst || Inst->getOpcode() != Opcode0) 1979 return false; 1980 } 1981 1982 bool Changed = false; 1983 1984 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 1985 unsigned OpsWidth = 0; 1986 1987 if (i + VF > e) 1988 OpsWidth = e - i; 1989 else 1990 OpsWidth = VF; 1991 1992 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 1993 break; 1994 1995 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " << "\n"); 1996 ArrayRef<Value *> Ops = VL.slice(i, OpsWidth); 1997 1998 R.buildTree(Ops); 1999 int Cost = R.getTreeCost(); 2000 2001 if (Cost < -SLPCostThreshold) { 2002 DEBUG(dbgs() << "SLP: Vectorizing pair at cost:" << Cost << ".\n"); 2003 R.vectorizeTree(); 2004 2005 // Move to the next bundle. 2006 i += VF - 1; 2007 Changed = true; 2008 } 2009 } 2010 2011 return Changed; 2012 } 2013 2014 bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { 2015 if (!V) 2016 return false; 2017 2018 // Try to vectorize V. 2019 if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R)) 2020 return true; 2021 2022 BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0)); 2023 BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1)); 2024 // Try to skip B. 2025 if (B && B->hasOneUse()) { 2026 BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 2027 BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 2028 if (tryToVectorizePair(A, B0, R)) { 2029 B->moveBefore(V); 2030 return true; 2031 } 2032 if (tryToVectorizePair(A, B1, R)) { 2033 B->moveBefore(V); 2034 return true; 2035 } 2036 } 2037 2038 // Try to skip A. 2039 if (A && A->hasOneUse()) { 2040 BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 2041 BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 2042 if (tryToVectorizePair(A0, B, R)) { 2043 A->moveBefore(V); 2044 return true; 2045 } 2046 if (tryToVectorizePair(A1, B, R)) { 2047 A->moveBefore(V); 2048 return true; 2049 } 2050 } 2051 return 0; 2052 } 2053 2054 /// \brief Generate a shuffle mask to be used in a reduction tree. 2055 /// 2056 /// \param VecLen The length of the vector to be reduced. 2057 /// \param NumEltsToRdx The number of elements that should be reduced in the 2058 /// vector. 2059 /// \param IsPairwise Whether the reduction is a pairwise or splitting 2060 /// reduction. A pairwise reduction will generate a mask of 2061 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 2062 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 2063 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 2064 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 2065 bool IsPairwise, bool IsLeft, 2066 IRBuilder<> &Builder) { 2067 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 2068 2069 SmallVector<Constant *, 32> ShuffleMask( 2070 VecLen, UndefValue::get(Builder.getInt32Ty())); 2071 2072 if (IsPairwise) 2073 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 2074 for (unsigned i = 0; i != NumEltsToRdx; ++i) 2075 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 2076 else 2077 // Move the upper half of the vector to the lower half. 2078 for (unsigned i = 0; i != NumEltsToRdx; ++i) 2079 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 2080 2081 return ConstantVector::get(ShuffleMask); 2082 } 2083 2084 2085 /// Model horizontal reductions. 2086 /// 2087 /// A horizontal reduction is a tree of reduction operations (currently add and 2088 /// fadd) that has operations that can be put into a vector as its leaf. 2089 /// For example, this tree: 2090 /// 2091 /// mul mul mul mul 2092 /// \ / \ / 2093 /// + + 2094 /// \ / 2095 /// + 2096 /// This tree has "mul" as its reduced values and "+" as its reduction 2097 /// operations. A reduction might be feeding into a store or a binary operation 2098 /// feeding a phi. 2099 /// ... 2100 /// \ / 2101 /// + 2102 /// | 2103 /// phi += 2104 /// 2105 /// Or: 2106 /// ... 2107 /// \ / 2108 /// + 2109 /// | 2110 /// *p = 2111 /// 2112 class HorizontalReduction { 2113 SmallPtrSet<Value *, 16> ReductionOps; 2114 SmallVector<Value *, 32> ReducedVals; 2115 2116 BinaryOperator *ReductionRoot; 2117 PHINode *ReductionPHI; 2118 2119 /// The opcode of the reduction. 2120 unsigned ReductionOpcode; 2121 /// The opcode of the values we perform a reduction on. 2122 unsigned ReducedValueOpcode; 2123 /// The width of one full horizontal reduction operation. 2124 unsigned ReduxWidth; 2125 /// Should we model this reduction as a pairwise reduction tree or a tree that 2126 /// splits the vector in halves and adds those halves. 2127 bool IsPairwiseReduction; 2128 2129 public: 2130 HorizontalReduction() 2131 : ReductionRoot(0), ReductionPHI(0), ReductionOpcode(0), 2132 ReducedValueOpcode(0), ReduxWidth(0), IsPairwiseReduction(false) {} 2133 2134 /// \brief Try to find a reduction tree. 2135 bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B, 2136 DataLayout *DL) { 2137 assert((!Phi || 2138 std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && 2139 "Thi phi needs to use the binary operator"); 2140 2141 // We could have a initial reductions that is not an add. 2142 // r *= v1 + v2 + v3 + v4 2143 // In such a case start looking for a tree rooted in the first '+'. 2144 if (Phi) { 2145 if (B->getOperand(0) == Phi) { 2146 Phi = 0; 2147 B = dyn_cast<BinaryOperator>(B->getOperand(1)); 2148 } else if (B->getOperand(1) == Phi) { 2149 Phi = 0; 2150 B = dyn_cast<BinaryOperator>(B->getOperand(0)); 2151 } 2152 } 2153 2154 if (!B) 2155 return false; 2156 2157 Type *Ty = B->getType(); 2158 if (Ty->isVectorTy()) 2159 return false; 2160 2161 ReductionOpcode = B->getOpcode(); 2162 ReducedValueOpcode = 0; 2163 ReduxWidth = MinVecRegSize / DL->getTypeSizeInBits(Ty); 2164 ReductionRoot = B; 2165 ReductionPHI = Phi; 2166 2167 if (ReduxWidth < 4) 2168 return false; 2169 2170 // We currently only support adds. 2171 if (ReductionOpcode != Instruction::Add && 2172 ReductionOpcode != Instruction::FAdd) 2173 return false; 2174 2175 // Post order traverse the reduction tree starting at B. We only handle true 2176 // trees containing only binary operators. 2177 SmallVector<std::pair<BinaryOperator *, unsigned>, 32> Stack; 2178 Stack.push_back(std::make_pair(B, 0)); 2179 while (!Stack.empty()) { 2180 BinaryOperator *TreeN = Stack.back().first; 2181 unsigned EdgeToVist = Stack.back().second++; 2182 bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode; 2183 2184 // Only handle trees in the current basic block. 2185 if (TreeN->getParent() != B->getParent()) 2186 return false; 2187 2188 // Each tree node needs to have one user except for the ultimate 2189 // reduction. 2190 if (!TreeN->hasOneUse() && TreeN != B) 2191 return false; 2192 2193 // Postorder vist. 2194 if (EdgeToVist == 2 || IsReducedValue) { 2195 if (IsReducedValue) { 2196 // Make sure that the opcodes of the operations that we are going to 2197 // reduce match. 2198 if (!ReducedValueOpcode) 2199 ReducedValueOpcode = TreeN->getOpcode(); 2200 else if (ReducedValueOpcode != TreeN->getOpcode()) 2201 return false; 2202 ReducedVals.push_back(TreeN); 2203 } else { 2204 // We need to be able to reassociate the adds. 2205 if (!TreeN->isAssociative()) 2206 return false; 2207 ReductionOps.insert(TreeN); 2208 } 2209 // Retract. 2210 Stack.pop_back(); 2211 continue; 2212 } 2213 2214 // Visit left or right. 2215 Value *NextV = TreeN->getOperand(EdgeToVist); 2216 BinaryOperator *Next = dyn_cast<BinaryOperator>(NextV); 2217 if (Next) 2218 Stack.push_back(std::make_pair(Next, 0)); 2219 else if (NextV != Phi) 2220 return false; 2221 } 2222 return true; 2223 } 2224 2225 /// \brief Attempt to vectorize the tree found by 2226 /// matchAssociativeReduction. 2227 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 2228 if (ReducedVals.empty()) 2229 return false; 2230 2231 unsigned NumReducedVals = ReducedVals.size(); 2232 if (NumReducedVals < ReduxWidth) 2233 return false; 2234 2235 Value *VectorizedTree = 0; 2236 IRBuilder<> Builder(ReductionRoot); 2237 FastMathFlags Unsafe; 2238 Unsafe.setUnsafeAlgebra(); 2239 Builder.SetFastMathFlags(Unsafe); 2240 unsigned i = 0; 2241 2242 for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) { 2243 ArrayRef<Value *> ValsToReduce(&ReducedVals[i], ReduxWidth); 2244 V.buildTree(ValsToReduce, &ReductionOps); 2245 2246 // Estimate cost. 2247 int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]); 2248 if (Cost >= -SLPCostThreshold) 2249 break; 2250 2251 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost 2252 << ". (HorRdx)\n"); 2253 2254 // Vectorize a tree. 2255 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 2256 Value *VectorizedRoot = V.vectorizeTree(); 2257 2258 // Emit a reduction. 2259 Value *ReducedSubTree = emitReduction(VectorizedRoot, Builder); 2260 if (VectorizedTree) { 2261 Builder.SetCurrentDebugLocation(Loc); 2262 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 2263 ReducedSubTree, "bin.rdx"); 2264 } else 2265 VectorizedTree = ReducedSubTree; 2266 } 2267 2268 if (VectorizedTree) { 2269 // Finish the reduction. 2270 for (; i < NumReducedVals; ++i) { 2271 Builder.SetCurrentDebugLocation( 2272 cast<Instruction>(ReducedVals[i])->getDebugLoc()); 2273 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 2274 ReducedVals[i]); 2275 } 2276 // Update users. 2277 if (ReductionPHI) { 2278 assert(ReductionRoot != NULL && "Need a reduction operation"); 2279 ReductionRoot->setOperand(0, VectorizedTree); 2280 ReductionRoot->setOperand(1, ReductionPHI); 2281 } else 2282 ReductionRoot->replaceAllUsesWith(VectorizedTree); 2283 } 2284 return VectorizedTree != 0; 2285 } 2286 2287 private: 2288 2289 /// \brief Calcuate the cost of a reduction. 2290 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) { 2291 Type *ScalarTy = FirstReducedVal->getType(); 2292 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 2293 2294 int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true); 2295 int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false); 2296 2297 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 2298 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 2299 2300 int ScalarReduxCost = 2301 ReduxWidth * TTI->getArithmeticInstrCost(ReductionOpcode, VecTy); 2302 2303 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 2304 << " for reduction that starts with " << *FirstReducedVal 2305 << " (It is a " 2306 << (IsPairwiseReduction ? "pairwise" : "splitting") 2307 << " reduction)\n"); 2308 2309 return VecReduxCost - ScalarReduxCost; 2310 } 2311 2312 static Value *createBinOp(IRBuilder<> &Builder, unsigned Opcode, Value *L, 2313 Value *R, const Twine &Name = "") { 2314 if (Opcode == Instruction::FAdd) 2315 return Builder.CreateFAdd(L, R, Name); 2316 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, L, R, Name); 2317 } 2318 2319 /// \brief Emit a horizontal reduction of the vectorized value. 2320 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder) { 2321 assert(VectorizedValue && "Need to have a vectorized tree node"); 2322 Instruction *ValToReduce = dyn_cast<Instruction>(VectorizedValue); 2323 assert(isPowerOf2_32(ReduxWidth) && 2324 "We only handle power-of-two reductions for now"); 2325 2326 Value *TmpVec = ValToReduce; 2327 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 2328 if (IsPairwiseReduction) { 2329 Value *LeftMask = 2330 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 2331 Value *RightMask = 2332 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 2333 2334 Value *LeftShuf = Builder.CreateShuffleVector( 2335 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 2336 Value *RightShuf = Builder.CreateShuffleVector( 2337 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 2338 "rdx.shuf.r"); 2339 TmpVec = createBinOp(Builder, ReductionOpcode, LeftShuf, RightShuf, 2340 "bin.rdx"); 2341 } else { 2342 Value *UpperHalf = 2343 createRdxShuffleMask(ReduxWidth, i, false, false, Builder); 2344 Value *Shuf = Builder.CreateShuffleVector( 2345 TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf"); 2346 TmpVec = createBinOp(Builder, ReductionOpcode, TmpVec, Shuf, "bin.rdx"); 2347 } 2348 } 2349 2350 // The result is in the first element of the vector. 2351 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 2352 } 2353 }; 2354 2355 /// \brief Recognize construction of vectors like 2356 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 2357 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 2358 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 2359 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 2360 /// 2361 /// Returns true if it matches 2362 /// 2363 static bool findBuildVector(InsertElementInst *IE, 2364 SmallVectorImpl<Value *> &Ops) { 2365 if (!isa<UndefValue>(IE->getOperand(0))) 2366 return false; 2367 2368 while (true) { 2369 Ops.push_back(IE->getOperand(1)); 2370 2371 if (IE->use_empty()) 2372 return false; 2373 2374 InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->use_back()); 2375 if (!NextUse) 2376 return true; 2377 2378 // If this isn't the final use, make sure the next insertelement is the only 2379 // use. It's OK if the final constructed vector is used multiple times 2380 if (!IE->hasOneUse()) 2381 return false; 2382 2383 IE = NextUse; 2384 } 2385 2386 return false; 2387 } 2388 2389 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 2390 return V->getType() < V2->getType(); 2391 } 2392 2393 bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 2394 bool Changed = false; 2395 SmallVector<Value *, 4> Incoming; 2396 SmallSet<Value *, 16> VisitedInstrs; 2397 2398 bool HaveVectorizedPhiNodes = true; 2399 while (HaveVectorizedPhiNodes) { 2400 HaveVectorizedPhiNodes = false; 2401 2402 // Collect the incoming values from the PHIs. 2403 Incoming.clear(); 2404 for (BasicBlock::iterator instr = BB->begin(), ie = BB->end(); instr != ie; 2405 ++instr) { 2406 PHINode *P = dyn_cast<PHINode>(instr); 2407 if (!P) 2408 break; 2409 2410 if (!VisitedInstrs.count(P)) 2411 Incoming.push_back(P); 2412 } 2413 2414 // Sort by type. 2415 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); 2416 2417 // Try to vectorize elements base on their type. 2418 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 2419 E = Incoming.end(); 2420 IncIt != E;) { 2421 2422 // Look for the next elements with the same type. 2423 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 2424 while (SameTypeIt != E && 2425 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 2426 VisitedInstrs.insert(*SameTypeIt); 2427 ++SameTypeIt; 2428 } 2429 2430 // Try to vectorize them. 2431 unsigned NumElts = (SameTypeIt - IncIt); 2432 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); 2433 if (NumElts > 1 && 2434 tryToVectorizeList(ArrayRef<Value *>(IncIt, NumElts), R)) { 2435 // Success start over because instructions might have been changed. 2436 HaveVectorizedPhiNodes = true; 2437 Changed = true; 2438 break; 2439 } 2440 2441 // Start over at the next instruction of a differnt type (or the end). 2442 IncIt = SameTypeIt; 2443 } 2444 } 2445 2446 VisitedInstrs.clear(); 2447 2448 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 2449 // We may go through BB multiple times so skip the one we have checked. 2450 if (!VisitedInstrs.insert(it)) 2451 continue; 2452 2453 if (isa<DbgInfoIntrinsic>(it)) 2454 continue; 2455 2456 // Try to vectorize reductions that use PHINodes. 2457 if (PHINode *P = dyn_cast<PHINode>(it)) { 2458 // Check that the PHI is a reduction PHI. 2459 if (P->getNumIncomingValues() != 2) 2460 return Changed; 2461 Value *Rdx = 2462 (P->getIncomingBlock(0) == BB 2463 ? (P->getIncomingValue(0)) 2464 : (P->getIncomingBlock(1) == BB ? P->getIncomingValue(1) : 0)); 2465 // Check if this is a Binary Operator. 2466 BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx); 2467 if (!BI) 2468 continue; 2469 2470 // Try to match and vectorize a horizontal reduction. 2471 HorizontalReduction HorRdx; 2472 if (ShouldVectorizeHor && 2473 HorRdx.matchAssociativeReduction(P, BI, DL) && 2474 HorRdx.tryToReduce(R, TTI)) { 2475 Changed = true; 2476 it = BB->begin(); 2477 e = BB->end(); 2478 continue; 2479 } 2480 2481 Value *Inst = BI->getOperand(0); 2482 if (Inst == P) 2483 Inst = BI->getOperand(1); 2484 2485 if (tryToVectorize(dyn_cast<BinaryOperator>(Inst), R)) { 2486 // We would like to start over since some instructions are deleted 2487 // and the iterator may become invalid value. 2488 Changed = true; 2489 it = BB->begin(); 2490 e = BB->end(); 2491 continue; 2492 } 2493 2494 continue; 2495 } 2496 2497 // Try to vectorize horizontal reductions feeding into a store. 2498 if (ShouldStartVectorizeHorAtStore) 2499 if (StoreInst *SI = dyn_cast<StoreInst>(it)) 2500 if (BinaryOperator *BinOp = 2501 dyn_cast<BinaryOperator>(SI->getValueOperand())) { 2502 HorizontalReduction HorRdx; 2503 if (((HorRdx.matchAssociativeReduction(0, BinOp, DL) && 2504 HorRdx.tryToReduce(R, TTI)) || 2505 tryToVectorize(BinOp, R))) { 2506 Changed = true; 2507 it = BB->begin(); 2508 e = BB->end(); 2509 continue; 2510 } 2511 } 2512 2513 // Try to vectorize trees that start at compare instructions. 2514 if (CmpInst *CI = dyn_cast<CmpInst>(it)) { 2515 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) { 2516 Changed = true; 2517 // We would like to start over since some instructions are deleted 2518 // and the iterator may become invalid value. 2519 it = BB->begin(); 2520 e = BB->end(); 2521 continue; 2522 } 2523 2524 for (int i = 0; i < 2; ++i) { 2525 if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) { 2526 if (tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R)) { 2527 Changed = true; 2528 // We would like to start over since some instructions are deleted 2529 // and the iterator may become invalid value. 2530 it = BB->begin(); 2531 e = BB->end(); 2532 } 2533 } 2534 } 2535 continue; 2536 } 2537 2538 // Try to vectorize trees that start at insertelement instructions. 2539 if (InsertElementInst *IE = dyn_cast<InsertElementInst>(it)) { 2540 SmallVector<Value *, 8> Ops; 2541 if (!findBuildVector(IE, Ops)) 2542 continue; 2543 2544 if (tryToVectorizeList(Ops, R)) { 2545 Changed = true; 2546 it = BB->begin(); 2547 e = BB->end(); 2548 } 2549 2550 continue; 2551 } 2552 } 2553 2554 return Changed; 2555 } 2556 2557 bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) { 2558 bool Changed = false; 2559 // Attempt to sort and vectorize each of the store-groups. 2560 for (StoreListMap::iterator it = StoreRefs.begin(), e = StoreRefs.end(); 2561 it != e; ++it) { 2562 if (it->second.size() < 2) 2563 continue; 2564 2565 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 2566 << it->second.size() << ".\n"); 2567 2568 // Process the stores in chunks of 16. 2569 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 2570 unsigned Len = std::min<unsigned>(CE - CI, 16); 2571 ArrayRef<StoreInst *> Chunk(&it->second[CI], Len); 2572 Changed |= vectorizeStores(Chunk, -SLPCostThreshold, R); 2573 } 2574 } 2575 return Changed; 2576 } 2577 2578 } // end anonymous namespace 2579 2580 char SLPVectorizer::ID = 0; 2581 static const char lv_name[] = "SLP Vectorizer"; 2582 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 2583 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 2584 INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) 2585 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 2586 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 2587 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 2588 2589 namespace llvm { 2590 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); } 2591 } 2592