1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 #define SV_NAME "slp-vectorizer" 19 #define DEBUG_TYPE "SLP" 20 21 #include "llvm/Transforms/Vectorize.h" 22 #include "llvm/ADT/MapVector.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/SetVector.h" 25 #include "llvm/Analysis/AliasAnalysis.h" 26 #include "llvm/Analysis/ScalarEvolution.h" 27 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 28 #include "llvm/Analysis/TargetTransformInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/Analysis/Verifier.h" 31 #include "llvm/Analysis/LoopInfo.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/Instructions.h" 34 #include "llvm/IR/IntrinsicInst.h" 35 #include "llvm/IR/IRBuilder.h" 36 #include "llvm/IR/Module.h" 37 #include "llvm/IR/Type.h" 38 #include "llvm/IR/Value.h" 39 #include "llvm/Pass.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include <algorithm> 44 #include <map> 45 46 using namespace llvm; 47 48 static cl::opt<int> 49 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 50 cl::desc("Only vectorize if you gain more than this " 51 "number ")); 52 53 static cl::opt<bool> 54 ShouldVectorizeHor("slp-vectorize-hor", cl::init(false), cl::Hidden, 55 cl::desc("Attempt to vectorize horizontal reductions")); 56 57 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 58 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 59 cl::desc( 60 "Attempt to vectorize horizontal reductions feeding into a store")); 61 62 namespace { 63 64 static const unsigned MinVecRegSize = 128; 65 66 static const unsigned RecursionMaxDepth = 12; 67 68 /// A helper class for numbering instructions in multiple blocks. 69 /// Numbers start at zero for each basic block. 70 struct BlockNumbering { 71 72 BlockNumbering(BasicBlock *Bb) : BB(Bb), Valid(false) {} 73 74 BlockNumbering() : BB(0), Valid(false) {} 75 76 void numberInstructions() { 77 unsigned Loc = 0; 78 InstrIdx.clear(); 79 InstrVec.clear(); 80 // Number the instructions in the block. 81 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 82 InstrIdx[it] = Loc++; 83 InstrVec.push_back(it); 84 assert(InstrVec[InstrIdx[it]] == it && "Invalid allocation"); 85 } 86 Valid = true; 87 } 88 89 int getIndex(Instruction *I) { 90 assert(I->getParent() == BB && "Invalid instruction"); 91 if (!Valid) 92 numberInstructions(); 93 assert(InstrIdx.count(I) && "Unknown instruction"); 94 return InstrIdx[I]; 95 } 96 97 Instruction *getInstruction(unsigned loc) { 98 if (!Valid) 99 numberInstructions(); 100 assert(InstrVec.size() > loc && "Invalid Index"); 101 return InstrVec[loc]; 102 } 103 104 void forget() { Valid = false; } 105 106 private: 107 /// The block we are numbering. 108 BasicBlock *BB; 109 /// Is the block numbered. 110 bool Valid; 111 /// Maps instructions to numbers and back. 112 SmallDenseMap<Instruction *, int> InstrIdx; 113 /// Maps integers to Instructions. 114 SmallVector<Instruction *, 32> InstrVec; 115 }; 116 117 /// \returns the parent basic block if all of the instructions in \p VL 118 /// are in the same block or null otherwise. 119 static BasicBlock *getSameBlock(ArrayRef<Value *> VL) { 120 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 121 if (!I0) 122 return 0; 123 BasicBlock *BB = I0->getParent(); 124 for (int i = 1, e = VL.size(); i < e; i++) { 125 Instruction *I = dyn_cast<Instruction>(VL[i]); 126 if (!I) 127 return 0; 128 129 if (BB != I->getParent()) 130 return 0; 131 } 132 return BB; 133 } 134 135 /// \returns True if all of the values in \p VL are constants. 136 static bool allConstant(ArrayRef<Value *> VL) { 137 for (unsigned i = 0, e = VL.size(); i < e; ++i) 138 if (!isa<Constant>(VL[i])) 139 return false; 140 return true; 141 } 142 143 /// \returns True if all of the values in \p VL are identical. 144 static bool isSplat(ArrayRef<Value *> VL) { 145 for (unsigned i = 1, e = VL.size(); i < e; ++i) 146 if (VL[i] != VL[0]) 147 return false; 148 return true; 149 } 150 151 /// \returns The opcode if all of the Instructions in \p VL have the same 152 /// opcode, or zero. 153 static unsigned getSameOpcode(ArrayRef<Value *> VL) { 154 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 155 if (!I0) 156 return 0; 157 unsigned Opcode = I0->getOpcode(); 158 for (int i = 1, e = VL.size(); i < e; i++) { 159 Instruction *I = dyn_cast<Instruction>(VL[i]); 160 if (!I || Opcode != I->getOpcode()) 161 return 0; 162 } 163 return Opcode; 164 } 165 166 /// \returns The type that all of the values in \p VL have or null if there 167 /// are different types. 168 static Type* getSameType(ArrayRef<Value *> VL) { 169 Type *Ty = VL[0]->getType(); 170 for (int i = 1, e = VL.size(); i < e; i++) 171 if (VL[i]->getType() != Ty) 172 return 0; 173 174 return Ty; 175 } 176 177 /// \returns True if the ExtractElement instructions in VL can be vectorized 178 /// to use the original vector. 179 static bool CanReuseExtract(ArrayRef<Value *> VL) { 180 assert(Instruction::ExtractElement == getSameOpcode(VL) && "Invalid opcode"); 181 // Check if all of the extracts come from the same vector and from the 182 // correct offset. 183 Value *VL0 = VL[0]; 184 ExtractElementInst *E0 = cast<ExtractElementInst>(VL0); 185 Value *Vec = E0->getOperand(0); 186 187 // We have to extract from the same vector type. 188 unsigned NElts = Vec->getType()->getVectorNumElements(); 189 190 if (NElts != VL.size()) 191 return false; 192 193 // Check that all of the indices extract from the correct offset. 194 ConstantInt *CI = dyn_cast<ConstantInt>(E0->getOperand(1)); 195 if (!CI || CI->getZExtValue()) 196 return false; 197 198 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 199 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 200 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 201 202 if (!CI || CI->getZExtValue() != i || E->getOperand(0) != Vec) 203 return false; 204 } 205 206 return true; 207 } 208 209 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 210 SmallVectorImpl<Value *> &Left, 211 SmallVectorImpl<Value *> &Right) { 212 213 SmallVector<Value *, 16> OrigLeft, OrigRight; 214 215 bool AllSameOpcodeLeft = true; 216 bool AllSameOpcodeRight = true; 217 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 218 Instruction *I = cast<Instruction>(VL[i]); 219 Value *V0 = I->getOperand(0); 220 Value *V1 = I->getOperand(1); 221 222 OrigLeft.push_back(V0); 223 OrigRight.push_back(V1); 224 225 Instruction *I0 = dyn_cast<Instruction>(V0); 226 Instruction *I1 = dyn_cast<Instruction>(V1); 227 228 // Check whether all operands on one side have the same opcode. In this case 229 // we want to preserve the original order and not make things worse by 230 // reordering. 231 AllSameOpcodeLeft = I0; 232 AllSameOpcodeRight = I1; 233 234 if (i && AllSameOpcodeLeft) { 235 if(Instruction *P0 = dyn_cast<Instruction>(OrigLeft[i-1])) { 236 if(P0->getOpcode() != I0->getOpcode()) 237 AllSameOpcodeLeft = false; 238 } else 239 AllSameOpcodeLeft = false; 240 } 241 if (i && AllSameOpcodeRight) { 242 if(Instruction *P1 = dyn_cast<Instruction>(OrigRight[i-1])) { 243 if(P1->getOpcode() != I1->getOpcode()) 244 AllSameOpcodeRight = false; 245 } else 246 AllSameOpcodeRight = false; 247 } 248 249 // Sort two opcodes. In the code below we try to preserve the ability to use 250 // broadcast of values instead of individual inserts. 251 // vl1 = load 252 // vl2 = phi 253 // vr1 = load 254 // vr2 = vr2 255 // = vl1 x vr1 256 // = vl2 x vr2 257 // If we just sorted according to opcode we would leave the first line in 258 // tact but we would swap vl2 with vr2 because opcode(phi) > opcode(load). 259 // = vl1 x vr1 260 // = vr2 x vl2 261 // Because vr2 and vr1 are from the same load we loose the opportunity of a 262 // broadcast for the packed right side in the backend: we have [vr1, vl2] 263 // instead of [vr1, vr2=vr1]. 264 if (I0 && I1) { 265 if(!i && I0->getOpcode() > I1->getOpcode()) { 266 Left.push_back(I1); 267 Right.push_back(I0); 268 } else if (i && I0->getOpcode() > I1->getOpcode() && Right[i-1] != I1) { 269 // Try not to destroy a broad cast for no apparent benefit. 270 Left.push_back(I1); 271 Right.push_back(I0); 272 } else if (i && I0->getOpcode() == I1->getOpcode() && Right[i-1] == I0) { 273 // Try preserve broadcasts. 274 Left.push_back(I1); 275 Right.push_back(I0); 276 } else if (i && I0->getOpcode() == I1->getOpcode() && Left[i-1] == I1) { 277 // Try preserve broadcasts. 278 Left.push_back(I1); 279 Right.push_back(I0); 280 } else { 281 Left.push_back(I0); 282 Right.push_back(I1); 283 } 284 continue; 285 } 286 // One opcode, put the instruction on the right. 287 if (I0) { 288 Left.push_back(V1); 289 Right.push_back(I0); 290 continue; 291 } 292 Left.push_back(V0); 293 Right.push_back(V1); 294 } 295 296 bool LeftBroadcast = isSplat(Left); 297 bool RightBroadcast = isSplat(Right); 298 299 // Don't reorder if the operands where good to begin with. 300 if (!(LeftBroadcast || RightBroadcast) && 301 (AllSameOpcodeRight || AllSameOpcodeLeft)) { 302 Left = OrigLeft; 303 Right = OrigRight; 304 } 305 } 306 307 /// Bottom Up SLP Vectorizer. 308 class BoUpSLP { 309 public: 310 typedef SmallVector<Value *, 8> ValueList; 311 typedef SmallVector<Instruction *, 16> InstrList; 312 typedef SmallPtrSet<Value *, 16> ValueSet; 313 typedef SmallVector<StoreInst *, 8> StoreList; 314 315 BoUpSLP(Function *Func, ScalarEvolution *Se, DataLayout *Dl, 316 TargetTransformInfo *Tti, AliasAnalysis *Aa, LoopInfo *Li, 317 DominatorTree *Dt) : 318 F(Func), SE(Se), DL(Dl), TTI(Tti), AA(Aa), LI(Li), DT(Dt), 319 Builder(Se->getContext()) { 320 // Setup the block numbering utility for all of the blocks in the 321 // function. 322 for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it) { 323 BasicBlock *BB = it; 324 BlocksNumbers[BB] = BlockNumbering(BB); 325 } 326 } 327 328 /// \brief Vectorize the tree that starts with the elements in \p VL. 329 /// Returns the vectorized root. 330 Value *vectorizeTree(); 331 332 /// \returns the vectorization cost of the subtree that starts at \p VL. 333 /// A negative number means that this is profitable. 334 int getTreeCost(); 335 336 /// Construct a vectorizable tree that starts at \p Roots and is possibly 337 /// used by a reduction of \p RdxOps. 338 void buildTree(ArrayRef<Value *> Roots, ValueSet *RdxOps = 0); 339 340 /// Clear the internal data structures that are created by 'buildTree'. 341 void deleteTree() { 342 RdxOps = 0; 343 VectorizableTree.clear(); 344 ScalarToTreeEntry.clear(); 345 MustGather.clear(); 346 ExternalUses.clear(); 347 MemBarrierIgnoreList.clear(); 348 } 349 350 /// \returns true if the memory operations A and B are consecutive. 351 bool isConsecutiveAccess(Value *A, Value *B); 352 353 /// \brief Perform LICM and CSE on the newly generated gather sequences. 354 void optimizeGatherSequence(); 355 private: 356 struct TreeEntry; 357 358 /// \returns the cost of the vectorizable entry. 359 int getEntryCost(TreeEntry *E); 360 361 /// This is the recursive part of buildTree. 362 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth); 363 364 /// Vectorize a single entry in the tree. 365 Value *vectorizeTree(TreeEntry *E); 366 367 /// Vectorize a single entry in the tree, starting in \p VL. 368 Value *vectorizeTree(ArrayRef<Value *> VL); 369 370 /// \returns the pointer to the vectorized value if \p VL is already 371 /// vectorized, or NULL. They may happen in cycles. 372 Value *alreadyVectorized(ArrayRef<Value *> VL) const; 373 374 /// \brief Take the pointer operand from the Load/Store instruction. 375 /// \returns NULL if this is not a valid Load/Store instruction. 376 static Value *getPointerOperand(Value *I); 377 378 /// \brief Take the address space operand from the Load/Store instruction. 379 /// \returns -1 if this is not a valid Load/Store instruction. 380 static unsigned getAddressSpaceOperand(Value *I); 381 382 /// \returns the scalarization cost for this type. Scalarization in this 383 /// context means the creation of vectors from a group of scalars. 384 int getGatherCost(Type *Ty); 385 386 /// \returns the scalarization cost for this list of values. Assuming that 387 /// this subtree gets vectorized, we may need to extract the values from the 388 /// roots. This method calculates the cost of extracting the values. 389 int getGatherCost(ArrayRef<Value *> VL); 390 391 /// \returns the AA location that is being access by the instruction. 392 AliasAnalysis::Location getLocation(Instruction *I); 393 394 /// \brief Checks if it is possible to sink an instruction from 395 /// \p Src to \p Dst. 396 /// \returns the pointer to the barrier instruction if we can't sink. 397 Value *getSinkBarrier(Instruction *Src, Instruction *Dst); 398 399 /// \returns the index of the last instruction in the BB from \p VL. 400 int getLastIndex(ArrayRef<Value *> VL); 401 402 /// \returns the Instruction in the bundle \p VL. 403 Instruction *getLastInstruction(ArrayRef<Value *> VL); 404 405 /// \brief Set the Builder insert point to one after the last instruction in 406 /// the bundle 407 void setInsertPointAfterBundle(ArrayRef<Value *> VL); 408 409 /// \returns a vector from a collection of scalars in \p VL. 410 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 411 412 /// \returns whether the VectorizableTree is fully vectoriable and will 413 /// be beneficial even the tree height is tiny. 414 bool isFullyVectorizableTinyTree(); 415 416 struct TreeEntry { 417 TreeEntry() : Scalars(), VectorizedValue(0), LastScalarIndex(0), 418 NeedToGather(0) {} 419 420 /// \returns true if the scalars in VL are equal to this entry. 421 bool isSame(ArrayRef<Value *> VL) const { 422 assert(VL.size() == Scalars.size() && "Invalid size"); 423 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 424 } 425 426 /// A vector of scalars. 427 ValueList Scalars; 428 429 /// The Scalars are vectorized into this value. It is initialized to Null. 430 Value *VectorizedValue; 431 432 /// The index in the basic block of the last scalar. 433 int LastScalarIndex; 434 435 /// Do we need to gather this sequence ? 436 bool NeedToGather; 437 }; 438 439 /// Create a new VectorizableTree entry. 440 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized) { 441 VectorizableTree.push_back(TreeEntry()); 442 int idx = VectorizableTree.size() - 1; 443 TreeEntry *Last = &VectorizableTree[idx]; 444 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 445 Last->NeedToGather = !Vectorized; 446 if (Vectorized) { 447 Last->LastScalarIndex = getLastIndex(VL); 448 for (int i = 0, e = VL.size(); i != e; ++i) { 449 assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!"); 450 ScalarToTreeEntry[VL[i]] = idx; 451 } 452 } else { 453 Last->LastScalarIndex = 0; 454 MustGather.insert(VL.begin(), VL.end()); 455 } 456 return Last; 457 } 458 459 /// -- Vectorization State -- 460 /// Holds all of the tree entries. 461 std::vector<TreeEntry> VectorizableTree; 462 463 /// Maps a specific scalar to its tree entry. 464 SmallDenseMap<Value*, int> ScalarToTreeEntry; 465 466 /// A list of scalars that we found that we need to keep as scalars. 467 ValueSet MustGather; 468 469 /// This POD struct describes one external user in the vectorized tree. 470 struct ExternalUser { 471 ExternalUser (Value *S, llvm::User *U, int L) : 472 Scalar(S), User(U), Lane(L){}; 473 // Which scalar in our function. 474 Value *Scalar; 475 // Which user that uses the scalar. 476 llvm::User *User; 477 // Which lane does the scalar belong to. 478 int Lane; 479 }; 480 typedef SmallVector<ExternalUser, 16> UserList; 481 482 /// A list of values that need to extracted out of the tree. 483 /// This list holds pairs of (Internal Scalar : External User). 484 UserList ExternalUses; 485 486 /// A list of instructions to ignore while sinking 487 /// memory instructions. This map must be reset between runs of getCost. 488 ValueSet MemBarrierIgnoreList; 489 490 /// Holds all of the instructions that we gathered. 491 SetVector<Instruction *> GatherSeq; 492 493 /// Numbers instructions in different blocks. 494 DenseMap<BasicBlock *, BlockNumbering> BlocksNumbers; 495 496 /// Reduction operators. 497 ValueSet *RdxOps; 498 499 // Analysis and block reference. 500 Function *F; 501 ScalarEvolution *SE; 502 DataLayout *DL; 503 TargetTransformInfo *TTI; 504 AliasAnalysis *AA; 505 LoopInfo *LI; 506 DominatorTree *DT; 507 /// Instruction builder to construct the vectorized tree. 508 IRBuilder<> Builder; 509 }; 510 511 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, ValueSet *Rdx) { 512 deleteTree(); 513 RdxOps = Rdx; 514 if (!getSameType(Roots)) 515 return; 516 buildTree_rec(Roots, 0); 517 518 // Collect the values that we need to extract from the tree. 519 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 520 TreeEntry *Entry = &VectorizableTree[EIdx]; 521 522 // For each lane: 523 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 524 Value *Scalar = Entry->Scalars[Lane]; 525 526 // No need to handle users of gathered values. 527 if (Entry->NeedToGather) 528 continue; 529 530 for (Value::use_iterator User = Scalar->use_begin(), 531 UE = Scalar->use_end(); User != UE; ++User) { 532 DEBUG(dbgs() << "SLP: Checking user:" << **User << ".\n"); 533 534 bool Gathered = MustGather.count(*User); 535 536 // Skip in-tree scalars that become vectors. 537 if (ScalarToTreeEntry.count(*User) && !Gathered) { 538 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << 539 **User << ".\n"); 540 int Idx = ScalarToTreeEntry[*User]; (void) Idx; 541 assert(!VectorizableTree[Idx].NeedToGather && "Bad state"); 542 continue; 543 } 544 Instruction *UserInst = dyn_cast<Instruction>(*User); 545 if (!UserInst) 546 continue; 547 548 // Ignore uses that are part of the reduction. 549 if (Rdx && std::find(Rdx->begin(), Rdx->end(), UserInst) != Rdx->end()) 550 continue; 551 552 DEBUG(dbgs() << "SLP: Need to extract:" << **User << " from lane " << 553 Lane << " from " << *Scalar << ".\n"); 554 ExternalUses.push_back(ExternalUser(Scalar, *User, Lane)); 555 } 556 } 557 } 558 } 559 560 561 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { 562 bool SameTy = getSameType(VL); (void)SameTy; 563 assert(SameTy && "Invalid types!"); 564 565 if (Depth == RecursionMaxDepth) { 566 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 567 newTreeEntry(VL, false); 568 return; 569 } 570 571 // Don't handle vectors. 572 if (VL[0]->getType()->isVectorTy()) { 573 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 574 newTreeEntry(VL, false); 575 return; 576 } 577 578 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 579 if (SI->getValueOperand()->getType()->isVectorTy()) { 580 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 581 newTreeEntry(VL, false); 582 return; 583 } 584 585 // If all of the operands are identical or constant we have a simple solution. 586 if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || 587 !getSameOpcode(VL)) { 588 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 589 newTreeEntry(VL, false); 590 return; 591 } 592 593 // We now know that this is a vector of instructions of the same type from 594 // the same block. 595 596 // Check if this is a duplicate of another entry. 597 if (ScalarToTreeEntry.count(VL[0])) { 598 int Idx = ScalarToTreeEntry[VL[0]]; 599 TreeEntry *E = &VectorizableTree[Idx]; 600 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 601 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 602 if (E->Scalars[i] != VL[i]) { 603 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 604 newTreeEntry(VL, false); 605 return; 606 } 607 } 608 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n"); 609 return; 610 } 611 612 // Check that none of the instructions in the bundle are already in the tree. 613 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 614 if (ScalarToTreeEntry.count(VL[i])) { 615 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 616 ") is already in tree.\n"); 617 newTreeEntry(VL, false); 618 return; 619 } 620 } 621 622 // If any of the scalars appears in the table OR it is marked as a value that 623 // needs to stat scalar then we need to gather the scalars. 624 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 625 if (ScalarToTreeEntry.count(VL[i]) || MustGather.count(VL[i])) { 626 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar. \n"); 627 newTreeEntry(VL, false); 628 return; 629 } 630 } 631 632 // Check that all of the users of the scalars that we want to vectorize are 633 // schedulable. 634 Instruction *VL0 = cast<Instruction>(VL[0]); 635 int MyLastIndex = getLastIndex(VL); 636 BasicBlock *BB = cast<Instruction>(VL0)->getParent(); 637 638 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 639 Instruction *Scalar = cast<Instruction>(VL[i]); 640 DEBUG(dbgs() << "SLP: Checking users of " << *Scalar << ". \n"); 641 for (Value::use_iterator U = Scalar->use_begin(), UE = Scalar->use_end(); 642 U != UE; ++U) { 643 DEBUG(dbgs() << "SLP: \tUser " << **U << ". \n"); 644 Instruction *User = dyn_cast<Instruction>(*U); 645 if (!User) { 646 DEBUG(dbgs() << "SLP: Gathering due unknown user. \n"); 647 newTreeEntry(VL, false); 648 return; 649 } 650 651 // We don't care if the user is in a different basic block. 652 BasicBlock *UserBlock = User->getParent(); 653 if (UserBlock != BB) { 654 DEBUG(dbgs() << "SLP: User from a different basic block " 655 << *User << ". \n"); 656 continue; 657 } 658 659 // If this is a PHINode within this basic block then we can place the 660 // extract wherever we want. 661 if (isa<PHINode>(*User)) { 662 DEBUG(dbgs() << "SLP: \tWe can schedule PHIs:" << *User << ". \n"); 663 continue; 664 } 665 666 // Check if this is a safe in-tree user. 667 if (ScalarToTreeEntry.count(User)) { 668 int Idx = ScalarToTreeEntry[User]; 669 int VecLocation = VectorizableTree[Idx].LastScalarIndex; 670 if (VecLocation <= MyLastIndex) { 671 DEBUG(dbgs() << "SLP: Gathering due to unschedulable vector. \n"); 672 newTreeEntry(VL, false); 673 return; 674 } 675 DEBUG(dbgs() << "SLP: In-tree user (" << *User << ") at #" << 676 VecLocation << " vector value (" << *Scalar << ") at #" 677 << MyLastIndex << ".\n"); 678 continue; 679 } 680 681 // This user is part of the reduction. 682 if (RdxOps && RdxOps->count(User)) 683 continue; 684 685 // Make sure that we can schedule this unknown user. 686 BlockNumbering &BN = BlocksNumbers[BB]; 687 int UserIndex = BN.getIndex(User); 688 if (UserIndex < MyLastIndex) { 689 690 DEBUG(dbgs() << "SLP: Can't schedule extractelement for " 691 << *User << ". \n"); 692 newTreeEntry(VL, false); 693 return; 694 } 695 } 696 } 697 698 // Check that every instructions appears once in this bundle. 699 for (unsigned i = 0, e = VL.size(); i < e; ++i) 700 for (unsigned j = i+1; j < e; ++j) 701 if (VL[i] == VL[j]) { 702 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 703 newTreeEntry(VL, false); 704 return; 705 } 706 707 // Check that instructions in this bundle don't reference other instructions. 708 // The runtime of this check is O(N * N-1 * uses(N)) and a typical N is 4. 709 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 710 for (Value::use_iterator U = VL[i]->use_begin(), UE = VL[i]->use_end(); 711 U != UE; ++U) { 712 for (unsigned j = 0; j < e; ++j) { 713 if (i != j && *U == VL[j]) { 714 DEBUG(dbgs() << "SLP: Intra-bundle dependencies!" << **U << ". \n"); 715 newTreeEntry(VL, false); 716 return; 717 } 718 } 719 } 720 } 721 722 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 723 724 unsigned Opcode = getSameOpcode(VL); 725 726 // Check if it is safe to sink the loads or the stores. 727 if (Opcode == Instruction::Load || Opcode == Instruction::Store) { 728 Instruction *Last = getLastInstruction(VL); 729 730 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 731 if (VL[i] == Last) 732 continue; 733 Value *Barrier = getSinkBarrier(cast<Instruction>(VL[i]), Last); 734 if (Barrier) { 735 DEBUG(dbgs() << "SLP: Can't sink " << *VL[i] << "\n down to " << *Last 736 << "\n because of " << *Barrier << ". Gathering.\n"); 737 newTreeEntry(VL, false); 738 return; 739 } 740 } 741 } 742 743 switch (Opcode) { 744 case Instruction::PHI: { 745 PHINode *PH = dyn_cast<PHINode>(VL0); 746 747 // Check for terminator values (e.g. invoke). 748 for (unsigned j = 0; j < VL.size(); ++j) 749 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 750 TerminatorInst *Term = dyn_cast<TerminatorInst>(cast<PHINode>(VL[j])->getIncomingValue(i)); 751 if (Term) { 752 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); 753 newTreeEntry(VL, false); 754 return; 755 } 756 } 757 758 newTreeEntry(VL, true); 759 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 760 761 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 762 ValueList Operands; 763 // Prepare the operand vector. 764 for (unsigned j = 0; j < VL.size(); ++j) 765 Operands.push_back(cast<PHINode>(VL[j])->getIncomingValue(i)); 766 767 buildTree_rec(Operands, Depth + 1); 768 } 769 return; 770 } 771 case Instruction::ExtractElement: { 772 bool Reuse = CanReuseExtract(VL); 773 if (Reuse) { 774 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 775 } 776 newTreeEntry(VL, Reuse); 777 return; 778 } 779 case Instruction::Load: { 780 // Check if the loads are consecutive or of we need to swizzle them. 781 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 782 LoadInst *L = cast<LoadInst>(VL[i]); 783 if (!L->isSimple() || !isConsecutiveAccess(VL[i], VL[i + 1])) { 784 newTreeEntry(VL, false); 785 DEBUG(dbgs() << "SLP: Need to swizzle loads.\n"); 786 return; 787 } 788 } 789 newTreeEntry(VL, true); 790 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 791 return; 792 } 793 case Instruction::ZExt: 794 case Instruction::SExt: 795 case Instruction::FPToUI: 796 case Instruction::FPToSI: 797 case Instruction::FPExt: 798 case Instruction::PtrToInt: 799 case Instruction::IntToPtr: 800 case Instruction::SIToFP: 801 case Instruction::UIToFP: 802 case Instruction::Trunc: 803 case Instruction::FPTrunc: 804 case Instruction::BitCast: { 805 Type *SrcTy = VL0->getOperand(0)->getType(); 806 for (unsigned i = 0; i < VL.size(); ++i) { 807 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 808 if (Ty != SrcTy || Ty->isAggregateType() || Ty->isVectorTy()) { 809 newTreeEntry(VL, false); 810 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 811 return; 812 } 813 } 814 newTreeEntry(VL, true); 815 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 816 817 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 818 ValueList Operands; 819 // Prepare the operand vector. 820 for (unsigned j = 0; j < VL.size(); ++j) 821 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 822 823 buildTree_rec(Operands, Depth+1); 824 } 825 return; 826 } 827 case Instruction::ICmp: 828 case Instruction::FCmp: { 829 // Check that all of the compares have the same predicate. 830 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 831 Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType(); 832 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 833 CmpInst *Cmp = cast<CmpInst>(VL[i]); 834 if (Cmp->getPredicate() != P0 || 835 Cmp->getOperand(0)->getType() != ComparedTy) { 836 newTreeEntry(VL, false); 837 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 838 return; 839 } 840 } 841 842 newTreeEntry(VL, true); 843 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 844 845 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 846 ValueList Operands; 847 // Prepare the operand vector. 848 for (unsigned j = 0; j < VL.size(); ++j) 849 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 850 851 buildTree_rec(Operands, Depth+1); 852 } 853 return; 854 } 855 case Instruction::Select: 856 case Instruction::Add: 857 case Instruction::FAdd: 858 case Instruction::Sub: 859 case Instruction::FSub: 860 case Instruction::Mul: 861 case Instruction::FMul: 862 case Instruction::UDiv: 863 case Instruction::SDiv: 864 case Instruction::FDiv: 865 case Instruction::URem: 866 case Instruction::SRem: 867 case Instruction::FRem: 868 case Instruction::Shl: 869 case Instruction::LShr: 870 case Instruction::AShr: 871 case Instruction::And: 872 case Instruction::Or: 873 case Instruction::Xor: { 874 newTreeEntry(VL, true); 875 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 876 877 // Sort operands of the instructions so that each side is more likely to 878 // have the same opcode. 879 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 880 ValueList Left, Right; 881 reorderInputsAccordingToOpcode(VL, Left, Right); 882 buildTree_rec(Left, Depth + 1); 883 buildTree_rec(Right, Depth + 1); 884 return; 885 } 886 887 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 888 ValueList Operands; 889 // Prepare the operand vector. 890 for (unsigned j = 0; j < VL.size(); ++j) 891 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 892 893 buildTree_rec(Operands, Depth+1); 894 } 895 return; 896 } 897 case Instruction::Store: { 898 // Check if the stores are consecutive or of we need to swizzle them. 899 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 900 if (!isConsecutiveAccess(VL[i], VL[i + 1])) { 901 newTreeEntry(VL, false); 902 DEBUG(dbgs() << "SLP: Non consecutive store.\n"); 903 return; 904 } 905 906 newTreeEntry(VL, true); 907 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 908 909 ValueList Operands; 910 for (unsigned j = 0; j < VL.size(); ++j) 911 Operands.push_back(cast<Instruction>(VL[j])->getOperand(0)); 912 913 // We can ignore these values because we are sinking them down. 914 MemBarrierIgnoreList.insert(VL.begin(), VL.end()); 915 buildTree_rec(Operands, Depth + 1); 916 return; 917 } 918 default: 919 newTreeEntry(VL, false); 920 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 921 return; 922 } 923 } 924 925 int BoUpSLP::getEntryCost(TreeEntry *E) { 926 ArrayRef<Value*> VL = E->Scalars; 927 928 Type *ScalarTy = VL[0]->getType(); 929 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 930 ScalarTy = SI->getValueOperand()->getType(); 931 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 932 933 if (E->NeedToGather) { 934 if (allConstant(VL)) 935 return 0; 936 if (isSplat(VL)) { 937 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 938 } 939 return getGatherCost(E->Scalars); 940 } 941 942 assert(getSameOpcode(VL) && getSameType(VL) && getSameBlock(VL) && 943 "Invalid VL"); 944 Instruction *VL0 = cast<Instruction>(VL[0]); 945 unsigned Opcode = VL0->getOpcode(); 946 switch (Opcode) { 947 case Instruction::PHI: { 948 return 0; 949 } 950 case Instruction::ExtractElement: { 951 if (CanReuseExtract(VL)) 952 return 0; 953 return getGatherCost(VecTy); 954 } 955 case Instruction::ZExt: 956 case Instruction::SExt: 957 case Instruction::FPToUI: 958 case Instruction::FPToSI: 959 case Instruction::FPExt: 960 case Instruction::PtrToInt: 961 case Instruction::IntToPtr: 962 case Instruction::SIToFP: 963 case Instruction::UIToFP: 964 case Instruction::Trunc: 965 case Instruction::FPTrunc: 966 case Instruction::BitCast: { 967 Type *SrcTy = VL0->getOperand(0)->getType(); 968 969 // Calculate the cost of this instruction. 970 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 971 VL0->getType(), SrcTy); 972 973 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 974 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy); 975 return VecCost - ScalarCost; 976 } 977 case Instruction::FCmp: 978 case Instruction::ICmp: 979 case Instruction::Select: 980 case Instruction::Add: 981 case Instruction::FAdd: 982 case Instruction::Sub: 983 case Instruction::FSub: 984 case Instruction::Mul: 985 case Instruction::FMul: 986 case Instruction::UDiv: 987 case Instruction::SDiv: 988 case Instruction::FDiv: 989 case Instruction::URem: 990 case Instruction::SRem: 991 case Instruction::FRem: 992 case Instruction::Shl: 993 case Instruction::LShr: 994 case Instruction::AShr: 995 case Instruction::And: 996 case Instruction::Or: 997 case Instruction::Xor: { 998 // Calculate the cost of this instruction. 999 int ScalarCost = 0; 1000 int VecCost = 0; 1001 if (Opcode == Instruction::FCmp || Opcode == Instruction::ICmp || 1002 Opcode == Instruction::Select) { 1003 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 1004 ScalarCost = VecTy->getNumElements() * 1005 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty()); 1006 VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy); 1007 } else { 1008 // Certain instructions can be cheaper to vectorize if they have a 1009 // constant second vector operand. 1010 TargetTransformInfo::OperandValueKind Op1VK = 1011 TargetTransformInfo::OK_AnyValue; 1012 TargetTransformInfo::OperandValueKind Op2VK = 1013 TargetTransformInfo::OK_UniformConstantValue; 1014 1015 // Check whether all second operands are constant. 1016 for (unsigned i = 0; i < VL.size(); ++i) 1017 if (!isa<ConstantInt>(cast<Instruction>(VL[i])->getOperand(1))) { 1018 Op2VK = TargetTransformInfo::OK_AnyValue; 1019 break; 1020 } 1021 1022 ScalarCost = 1023 VecTy->getNumElements() * 1024 TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK); 1025 VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK); 1026 } 1027 return VecCost - ScalarCost; 1028 } 1029 case Instruction::Load: { 1030 // Cost of wide load - cost of scalar loads. 1031 int ScalarLdCost = VecTy->getNumElements() * 1032 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); 1033 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, 1, 0); 1034 return VecLdCost - ScalarLdCost; 1035 } 1036 case Instruction::Store: { 1037 // We know that we can merge the stores. Calculate the cost. 1038 int ScalarStCost = VecTy->getNumElements() * 1039 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); 1040 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, VecTy, 1, 0); 1041 return VecStCost - ScalarStCost; 1042 } 1043 default: 1044 llvm_unreachable("Unknown instruction"); 1045 } 1046 } 1047 1048 bool BoUpSLP::isFullyVectorizableTinyTree() { 1049 DEBUG(dbgs() << "SLP: Check whether the tree with height " << 1050 VectorizableTree.size() << " is fully vectorizable .\n"); 1051 1052 // We only handle trees of height 2. 1053 if (VectorizableTree.size() != 2) 1054 return false; 1055 1056 // Gathering cost would be too much for tiny trees. 1057 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 1058 return false; 1059 1060 return true; 1061 } 1062 1063 int BoUpSLP::getTreeCost() { 1064 int Cost = 0; 1065 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 1066 VectorizableTree.size() << ".\n"); 1067 1068 // We only vectorize tiny trees if it is fully vectorizable. 1069 if (VectorizableTree.size() < 3 && !isFullyVectorizableTinyTree()) { 1070 if (!VectorizableTree.size()) { 1071 assert(!ExternalUses.size() && "We should not have any external users"); 1072 } 1073 return INT_MAX; 1074 } 1075 1076 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 1077 1078 for (unsigned i = 0, e = VectorizableTree.size(); i != e; ++i) { 1079 int C = getEntryCost(&VectorizableTree[i]); 1080 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 1081 << *VectorizableTree[i].Scalars[0] << " .\n"); 1082 Cost += C; 1083 } 1084 1085 int ExtractCost = 0; 1086 for (UserList::iterator I = ExternalUses.begin(), E = ExternalUses.end(); 1087 I != E; ++I) { 1088 1089 VectorType *VecTy = VectorType::get(I->Scalar->getType(), BundleWidth); 1090 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 1091 I->Lane); 1092 } 1093 1094 1095 DEBUG(dbgs() << "SLP: Total Cost " << Cost + ExtractCost<< ".\n"); 1096 return Cost + ExtractCost; 1097 } 1098 1099 int BoUpSLP::getGatherCost(Type *Ty) { 1100 int Cost = 0; 1101 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 1102 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 1103 return Cost; 1104 } 1105 1106 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 1107 // Find the type of the operands in VL. 1108 Type *ScalarTy = VL[0]->getType(); 1109 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1110 ScalarTy = SI->getValueOperand()->getType(); 1111 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1112 // Find the cost of inserting/extracting values from the vector. 1113 return getGatherCost(VecTy); 1114 } 1115 1116 AliasAnalysis::Location BoUpSLP::getLocation(Instruction *I) { 1117 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1118 return AA->getLocation(SI); 1119 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1120 return AA->getLocation(LI); 1121 return AliasAnalysis::Location(); 1122 } 1123 1124 Value *BoUpSLP::getPointerOperand(Value *I) { 1125 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1126 return LI->getPointerOperand(); 1127 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1128 return SI->getPointerOperand(); 1129 return 0; 1130 } 1131 1132 unsigned BoUpSLP::getAddressSpaceOperand(Value *I) { 1133 if (LoadInst *L = dyn_cast<LoadInst>(I)) 1134 return L->getPointerAddressSpace(); 1135 if (StoreInst *S = dyn_cast<StoreInst>(I)) 1136 return S->getPointerAddressSpace(); 1137 return -1; 1138 } 1139 1140 bool BoUpSLP::isConsecutiveAccess(Value *A, Value *B) { 1141 Value *PtrA = getPointerOperand(A); 1142 Value *PtrB = getPointerOperand(B); 1143 unsigned ASA = getAddressSpaceOperand(A); 1144 unsigned ASB = getAddressSpaceOperand(B); 1145 1146 // Check that the address spaces match and that the pointers are valid. 1147 if (!PtrA || !PtrB || (ASA != ASB)) 1148 return false; 1149 1150 // Make sure that A and B are different pointers of the same type. 1151 if (PtrA == PtrB || PtrA->getType() != PtrB->getType()) 1152 return false; 1153 1154 unsigned PtrBitWidth = DL->getPointerSizeInBits(ASA); 1155 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1156 APInt Size(PtrBitWidth, DL->getTypeStoreSize(Ty)); 1157 1158 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); 1159 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetA); 1160 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetB); 1161 1162 APInt OffsetDelta = OffsetB - OffsetA; 1163 1164 // Check if they are based on the same pointer. That makes the offsets 1165 // sufficient. 1166 if (PtrA == PtrB) 1167 return OffsetDelta == Size; 1168 1169 // Compute the necessary base pointer delta to have the necessary final delta 1170 // equal to the size. 1171 APInt BaseDelta = Size - OffsetDelta; 1172 1173 // Otherwise compute the distance with SCEV between the base pointers. 1174 const SCEV *PtrSCEVA = SE->getSCEV(PtrA); 1175 const SCEV *PtrSCEVB = SE->getSCEV(PtrB); 1176 const SCEV *C = SE->getConstant(BaseDelta); 1177 const SCEV *X = SE->getAddExpr(PtrSCEVA, C); 1178 return X == PtrSCEVB; 1179 } 1180 1181 Value *BoUpSLP::getSinkBarrier(Instruction *Src, Instruction *Dst) { 1182 assert(Src->getParent() == Dst->getParent() && "Not the same BB"); 1183 BasicBlock::iterator I = Src, E = Dst; 1184 /// Scan all of the instruction from SRC to DST and check if 1185 /// the source may alias. 1186 for (++I; I != E; ++I) { 1187 // Ignore store instructions that are marked as 'ignore'. 1188 if (MemBarrierIgnoreList.count(I)) 1189 continue; 1190 if (Src->mayWriteToMemory()) /* Write */ { 1191 if (!I->mayReadOrWriteMemory()) 1192 continue; 1193 } else /* Read */ { 1194 if (!I->mayWriteToMemory()) 1195 continue; 1196 } 1197 AliasAnalysis::Location A = getLocation(&*I); 1198 AliasAnalysis::Location B = getLocation(Src); 1199 1200 if (!A.Ptr || !B.Ptr || AA->alias(A, B)) 1201 return I; 1202 } 1203 return 0; 1204 } 1205 1206 int BoUpSLP::getLastIndex(ArrayRef<Value *> VL) { 1207 BasicBlock *BB = cast<Instruction>(VL[0])->getParent(); 1208 assert(BB == getSameBlock(VL) && BlocksNumbers.count(BB) && "Invalid block"); 1209 BlockNumbering &BN = BlocksNumbers[BB]; 1210 1211 int MaxIdx = BN.getIndex(BB->getFirstNonPHI()); 1212 for (unsigned i = 0, e = VL.size(); i < e; ++i) 1213 MaxIdx = std::max(MaxIdx, BN.getIndex(cast<Instruction>(VL[i]))); 1214 return MaxIdx; 1215 } 1216 1217 Instruction *BoUpSLP::getLastInstruction(ArrayRef<Value *> VL) { 1218 BasicBlock *BB = cast<Instruction>(VL[0])->getParent(); 1219 assert(BB == getSameBlock(VL) && BlocksNumbers.count(BB) && "Invalid block"); 1220 BlockNumbering &BN = BlocksNumbers[BB]; 1221 1222 int MaxIdx = BN.getIndex(cast<Instruction>(VL[0])); 1223 for (unsigned i = 1, e = VL.size(); i < e; ++i) 1224 MaxIdx = std::max(MaxIdx, BN.getIndex(cast<Instruction>(VL[i]))); 1225 Instruction *I = BN.getInstruction(MaxIdx); 1226 assert(I && "bad location"); 1227 return I; 1228 } 1229 1230 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) { 1231 Instruction *VL0 = cast<Instruction>(VL[0]); 1232 Instruction *LastInst = getLastInstruction(VL); 1233 BasicBlock::iterator NextInst = LastInst; 1234 ++NextInst; 1235 Builder.SetInsertPoint(VL0->getParent(), NextInst); 1236 Builder.SetCurrentDebugLocation(VL0->getDebugLoc()); 1237 } 1238 1239 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 1240 Value *Vec = UndefValue::get(Ty); 1241 // Generate the 'InsertElement' instruction. 1242 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 1243 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 1244 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 1245 GatherSeq.insert(Insrt); 1246 1247 // Add to our 'need-to-extract' list. 1248 if (ScalarToTreeEntry.count(VL[i])) { 1249 int Idx = ScalarToTreeEntry[VL[i]]; 1250 TreeEntry *E = &VectorizableTree[Idx]; 1251 // Find which lane we need to extract. 1252 int FoundLane = -1; 1253 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 1254 // Is this the lane of the scalar that we are looking for ? 1255 if (E->Scalars[Lane] == VL[i]) { 1256 FoundLane = Lane; 1257 break; 1258 } 1259 } 1260 assert(FoundLane >= 0 && "Could not find the correct lane"); 1261 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 1262 } 1263 } 1264 } 1265 1266 return Vec; 1267 } 1268 1269 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const { 1270 SmallDenseMap<Value*, int>::const_iterator Entry 1271 = ScalarToTreeEntry.find(VL[0]); 1272 if (Entry != ScalarToTreeEntry.end()) { 1273 int Idx = Entry->second; 1274 const TreeEntry *En = &VectorizableTree[Idx]; 1275 if (En->isSame(VL) && En->VectorizedValue) 1276 return En->VectorizedValue; 1277 } 1278 return 0; 1279 } 1280 1281 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 1282 if (ScalarToTreeEntry.count(VL[0])) { 1283 int Idx = ScalarToTreeEntry[VL[0]]; 1284 TreeEntry *E = &VectorizableTree[Idx]; 1285 if (E->isSame(VL)) 1286 return vectorizeTree(E); 1287 } 1288 1289 Type *ScalarTy = VL[0]->getType(); 1290 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1291 ScalarTy = SI->getValueOperand()->getType(); 1292 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1293 1294 return Gather(VL, VecTy); 1295 } 1296 1297 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 1298 IRBuilder<>::InsertPointGuard Guard(Builder); 1299 1300 if (E->VectorizedValue) { 1301 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 1302 return E->VectorizedValue; 1303 } 1304 1305 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 1306 Type *ScalarTy = VL0->getType(); 1307 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 1308 ScalarTy = SI->getValueOperand()->getType(); 1309 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 1310 1311 if (E->NeedToGather) { 1312 setInsertPointAfterBundle(E->Scalars); 1313 return Gather(E->Scalars, VecTy); 1314 } 1315 1316 unsigned Opcode = VL0->getOpcode(); 1317 assert(Opcode == getSameOpcode(E->Scalars) && "Invalid opcode"); 1318 1319 switch (Opcode) { 1320 case Instruction::PHI: { 1321 PHINode *PH = dyn_cast<PHINode>(VL0); 1322 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 1323 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 1324 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 1325 E->VectorizedValue = NewPhi; 1326 1327 // PHINodes may have multiple entries from the same block. We want to 1328 // visit every block once. 1329 SmallSet<BasicBlock*, 4> VisitedBBs; 1330 1331 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1332 ValueList Operands; 1333 BasicBlock *IBB = PH->getIncomingBlock(i); 1334 1335 if (!VisitedBBs.insert(IBB)) { 1336 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 1337 continue; 1338 } 1339 1340 // Prepare the operand vector. 1341 for (unsigned j = 0; j < E->Scalars.size(); ++j) 1342 Operands.push_back(cast<PHINode>(E->Scalars[j])-> 1343 getIncomingValueForBlock(IBB)); 1344 1345 Builder.SetInsertPoint(IBB->getTerminator()); 1346 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 1347 Value *Vec = vectorizeTree(Operands); 1348 NewPhi->addIncoming(Vec, IBB); 1349 } 1350 1351 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 1352 "Invalid number of incoming values"); 1353 return NewPhi; 1354 } 1355 1356 case Instruction::ExtractElement: { 1357 if (CanReuseExtract(E->Scalars)) { 1358 Value *V = VL0->getOperand(0); 1359 E->VectorizedValue = V; 1360 return V; 1361 } 1362 return Gather(E->Scalars, VecTy); 1363 } 1364 case Instruction::ZExt: 1365 case Instruction::SExt: 1366 case Instruction::FPToUI: 1367 case Instruction::FPToSI: 1368 case Instruction::FPExt: 1369 case Instruction::PtrToInt: 1370 case Instruction::IntToPtr: 1371 case Instruction::SIToFP: 1372 case Instruction::UIToFP: 1373 case Instruction::Trunc: 1374 case Instruction::FPTrunc: 1375 case Instruction::BitCast: { 1376 ValueList INVL; 1377 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1378 INVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1379 1380 setInsertPointAfterBundle(E->Scalars); 1381 1382 Value *InVec = vectorizeTree(INVL); 1383 1384 if (Value *V = alreadyVectorized(E->Scalars)) 1385 return V; 1386 1387 CastInst *CI = dyn_cast<CastInst>(VL0); 1388 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 1389 E->VectorizedValue = V; 1390 return V; 1391 } 1392 case Instruction::FCmp: 1393 case Instruction::ICmp: { 1394 ValueList LHSV, RHSV; 1395 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1396 LHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1397 RHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1398 } 1399 1400 setInsertPointAfterBundle(E->Scalars); 1401 1402 Value *L = vectorizeTree(LHSV); 1403 Value *R = vectorizeTree(RHSV); 1404 1405 if (Value *V = alreadyVectorized(E->Scalars)) 1406 return V; 1407 1408 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 1409 Value *V; 1410 if (Opcode == Instruction::FCmp) 1411 V = Builder.CreateFCmp(P0, L, R); 1412 else 1413 V = Builder.CreateICmp(P0, L, R); 1414 1415 E->VectorizedValue = V; 1416 return V; 1417 } 1418 case Instruction::Select: { 1419 ValueList TrueVec, FalseVec, CondVec; 1420 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1421 CondVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1422 TrueVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1423 FalseVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(2)); 1424 } 1425 1426 setInsertPointAfterBundle(E->Scalars); 1427 1428 Value *Cond = vectorizeTree(CondVec); 1429 Value *True = vectorizeTree(TrueVec); 1430 Value *False = vectorizeTree(FalseVec); 1431 1432 if (Value *V = alreadyVectorized(E->Scalars)) 1433 return V; 1434 1435 Value *V = Builder.CreateSelect(Cond, True, False); 1436 E->VectorizedValue = V; 1437 return V; 1438 } 1439 case Instruction::Add: 1440 case Instruction::FAdd: 1441 case Instruction::Sub: 1442 case Instruction::FSub: 1443 case Instruction::Mul: 1444 case Instruction::FMul: 1445 case Instruction::UDiv: 1446 case Instruction::SDiv: 1447 case Instruction::FDiv: 1448 case Instruction::URem: 1449 case Instruction::SRem: 1450 case Instruction::FRem: 1451 case Instruction::Shl: 1452 case Instruction::LShr: 1453 case Instruction::AShr: 1454 case Instruction::And: 1455 case Instruction::Or: 1456 case Instruction::Xor: { 1457 ValueList LHSVL, RHSVL; 1458 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) 1459 reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL); 1460 else 1461 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1462 LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1463 RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1464 } 1465 1466 setInsertPointAfterBundle(E->Scalars); 1467 1468 Value *LHS = vectorizeTree(LHSVL); 1469 Value *RHS = vectorizeTree(RHSVL); 1470 1471 if (LHS == RHS && isa<Instruction>(LHS)) { 1472 assert((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order"); 1473 } 1474 1475 if (Value *V = alreadyVectorized(E->Scalars)) 1476 return V; 1477 1478 BinaryOperator *BinOp = cast<BinaryOperator>(VL0); 1479 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS); 1480 E->VectorizedValue = V; 1481 return V; 1482 } 1483 case Instruction::Load: { 1484 // Loads are inserted at the head of the tree because we don't want to 1485 // sink them all the way down past store instructions. 1486 setInsertPointAfterBundle(E->Scalars); 1487 1488 LoadInst *LI = cast<LoadInst>(VL0); 1489 unsigned AS = LI->getPointerAddressSpace(); 1490 1491 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 1492 VecTy->getPointerTo(AS)); 1493 unsigned Alignment = LI->getAlignment(); 1494 LI = Builder.CreateLoad(VecPtr); 1495 LI->setAlignment(Alignment); 1496 E->VectorizedValue = LI; 1497 return LI; 1498 } 1499 case Instruction::Store: { 1500 StoreInst *SI = cast<StoreInst>(VL0); 1501 unsigned Alignment = SI->getAlignment(); 1502 unsigned AS = SI->getPointerAddressSpace(); 1503 1504 ValueList ValueOp; 1505 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1506 ValueOp.push_back(cast<StoreInst>(E->Scalars[i])->getValueOperand()); 1507 1508 setInsertPointAfterBundle(E->Scalars); 1509 1510 Value *VecValue = vectorizeTree(ValueOp); 1511 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), 1512 VecTy->getPointerTo(AS)); 1513 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 1514 S->setAlignment(Alignment); 1515 E->VectorizedValue = S; 1516 return S; 1517 } 1518 default: 1519 llvm_unreachable("unknown inst"); 1520 } 1521 return 0; 1522 } 1523 1524 Value *BoUpSLP::vectorizeTree() { 1525 Builder.SetInsertPoint(F->getEntryBlock().begin()); 1526 vectorizeTree(&VectorizableTree[0]); 1527 1528 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 1529 1530 // Extract all of the elements with the external uses. 1531 for (UserList::iterator it = ExternalUses.begin(), e = ExternalUses.end(); 1532 it != e; ++it) { 1533 Value *Scalar = it->Scalar; 1534 llvm::User *User = it->User; 1535 1536 // Skip users that we already RAUW. This happens when one instruction 1537 // has multiple uses of the same value. 1538 if (std::find(Scalar->use_begin(), Scalar->use_end(), User) == 1539 Scalar->use_end()) 1540 continue; 1541 assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar"); 1542 1543 int Idx = ScalarToTreeEntry[Scalar]; 1544 TreeEntry *E = &VectorizableTree[Idx]; 1545 assert(!E->NeedToGather && "Extracting from a gather list"); 1546 1547 Value *Vec = E->VectorizedValue; 1548 assert(Vec && "Can't find vectorizable value"); 1549 1550 Value *Lane = Builder.getInt32(it->Lane); 1551 // Generate extracts for out-of-tree users. 1552 // Find the insertion point for the extractelement lane. 1553 if (PHINode *PN = dyn_cast<PHINode>(Vec)) { 1554 Builder.SetInsertPoint(PN->getParent()->getFirstInsertionPt()); 1555 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1556 User->replaceUsesOfWith(Scalar, Ex); 1557 } else if (isa<Instruction>(Vec)){ 1558 if (PHINode *PH = dyn_cast<PHINode>(User)) { 1559 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 1560 if (PH->getIncomingValue(i) == Scalar) { 1561 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 1562 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1563 PH->setOperand(i, Ex); 1564 } 1565 } 1566 } else { 1567 Builder.SetInsertPoint(cast<Instruction>(User)); 1568 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1569 User->replaceUsesOfWith(Scalar, Ex); 1570 } 1571 } else { 1572 Builder.SetInsertPoint(F->getEntryBlock().begin()); 1573 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1574 User->replaceUsesOfWith(Scalar, Ex); 1575 } 1576 1577 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 1578 } 1579 1580 // For each vectorized value: 1581 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 1582 TreeEntry *Entry = &VectorizableTree[EIdx]; 1583 1584 // For each lane: 1585 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 1586 Value *Scalar = Entry->Scalars[Lane]; 1587 1588 // No need to handle users of gathered values. 1589 if (Entry->NeedToGather) 1590 continue; 1591 1592 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 1593 1594 Type *Ty = Scalar->getType(); 1595 if (!Ty->isVoidTy()) { 1596 for (Value::use_iterator User = Scalar->use_begin(), 1597 UE = Scalar->use_end(); User != UE; ++User) { 1598 DEBUG(dbgs() << "SLP: \tvalidating user:" << **User << ".\n"); 1599 assert(!MustGather.count(*User) && 1600 "Replacing gathered value with undef"); 1601 1602 assert((ScalarToTreeEntry.count(*User) || 1603 // It is legal to replace the reduction users by undef. 1604 (RdxOps && RdxOps->count(*User))) && 1605 "Replacing out-of-tree value with undef"); 1606 } 1607 Value *Undef = UndefValue::get(Ty); 1608 Scalar->replaceAllUsesWith(Undef); 1609 } 1610 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 1611 cast<Instruction>(Scalar)->eraseFromParent(); 1612 } 1613 } 1614 1615 for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it) { 1616 BlocksNumbers[it].forget(); 1617 } 1618 Builder.ClearInsertionPoint(); 1619 1620 return VectorizableTree[0].VectorizedValue; 1621 } 1622 1623 class DTCmp { 1624 const DominatorTree *DT; 1625 1626 public: 1627 DTCmp(const DominatorTree *DT) : DT(DT) {} 1628 bool operator()(const BasicBlock *A, const BasicBlock *B) const { 1629 return DT->properlyDominates(A, B); 1630 } 1631 }; 1632 1633 void BoUpSLP::optimizeGatherSequence() { 1634 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 1635 << " gather sequences instructions.\n"); 1636 // Keep a list of visited BBs to run CSE on. It is typically small. 1637 SmallPtrSet<BasicBlock *, 4> VisitedBBs; 1638 SmallVector<BasicBlock *, 4> CSEWorkList; 1639 // LICM InsertElementInst sequences. 1640 for (SetVector<Instruction *>::iterator it = GatherSeq.begin(), 1641 e = GatherSeq.end(); it != e; ++it) { 1642 InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it); 1643 1644 if (!Insert) 1645 continue; 1646 1647 if (VisitedBBs.insert(Insert->getParent())) 1648 CSEWorkList.push_back(Insert->getParent()); 1649 1650 // Check if this block is inside a loop. 1651 Loop *L = LI->getLoopFor(Insert->getParent()); 1652 if (!L) 1653 continue; 1654 1655 // Check if it has a preheader. 1656 BasicBlock *PreHeader = L->getLoopPreheader(); 1657 if (!PreHeader) 1658 continue; 1659 1660 // If the vector or the element that we insert into it are 1661 // instructions that are defined in this basic block then we can't 1662 // hoist this instruction. 1663 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 1664 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 1665 if (CurrVec && L->contains(CurrVec)) 1666 continue; 1667 if (NewElem && L->contains(NewElem)) 1668 continue; 1669 1670 // We can hoist this instruction. Move it to the pre-header. 1671 Insert->moveBefore(PreHeader->getTerminator()); 1672 } 1673 1674 // Sort blocks by domination. This ensures we visit a block after all blocks 1675 // dominating it are visited. 1676 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), DTCmp(DT)); 1677 1678 // Perform O(N^2) search over the gather sequences and merge identical 1679 // instructions. TODO: We can further optimize this scan if we split the 1680 // instructions into different buckets based on the insert lane. 1681 SmallVector<Instruction *, 16> Visited; 1682 for (SmallVectorImpl<BasicBlock *>::iterator I = CSEWorkList.begin(), 1683 E = CSEWorkList.end(); 1684 I != E; ++I) { 1685 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *llvm::prior(I))) && 1686 "Worklist not sorted properly!"); 1687 BasicBlock *BB = *I; 1688 // For all instructions in blocks containing gather sequences: 1689 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 1690 Instruction *In = it++; 1691 if ((!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) || 1692 !GatherSeq.count(In)) 1693 continue; 1694 1695 // Check if we can replace this instruction with any of the 1696 // visited instructions. 1697 for (SmallVectorImpl<Instruction *>::iterator v = Visited.begin(), 1698 ve = Visited.end(); 1699 v != ve; ++v) { 1700 if (In->isIdenticalTo(*v) && 1701 DT->dominates((*v)->getParent(), In->getParent())) { 1702 In->replaceAllUsesWith(*v); 1703 In->eraseFromParent(); 1704 In = 0; 1705 break; 1706 } 1707 } 1708 if (In) { 1709 assert(std::find(Visited.begin(), Visited.end(), In) == Visited.end()); 1710 Visited.push_back(In); 1711 } 1712 } 1713 } 1714 } 1715 1716 /// The SLPVectorizer Pass. 1717 struct SLPVectorizer : public FunctionPass { 1718 typedef SmallVector<StoreInst *, 8> StoreList; 1719 typedef MapVector<Value *, StoreList> StoreListMap; 1720 1721 /// Pass identification, replacement for typeid 1722 static char ID; 1723 1724 explicit SLPVectorizer() : FunctionPass(ID) { 1725 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 1726 } 1727 1728 ScalarEvolution *SE; 1729 DataLayout *DL; 1730 TargetTransformInfo *TTI; 1731 AliasAnalysis *AA; 1732 LoopInfo *LI; 1733 DominatorTree *DT; 1734 1735 virtual bool runOnFunction(Function &F) { 1736 SE = &getAnalysis<ScalarEvolution>(); 1737 DL = getAnalysisIfAvailable<DataLayout>(); 1738 TTI = &getAnalysis<TargetTransformInfo>(); 1739 AA = &getAnalysis<AliasAnalysis>(); 1740 LI = &getAnalysis<LoopInfo>(); 1741 DT = &getAnalysis<DominatorTree>(); 1742 1743 StoreRefs.clear(); 1744 bool Changed = false; 1745 1746 // If the target claims to have no vector registers don't attempt 1747 // vectorization. 1748 if (!TTI->getNumberOfRegisters(true)) 1749 return false; 1750 1751 // Must have DataLayout. We can't require it because some tests run w/o 1752 // triple. 1753 if (!DL) 1754 return false; 1755 1756 // Don't vectorize when the attribute NoImplicitFloat is used. 1757 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 1758 return false; 1759 1760 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 1761 1762 // Use the bollom up slp vectorizer to construct chains that start with 1763 // he store instructions. 1764 BoUpSLP R(&F, SE, DL, TTI, AA, LI, DT); 1765 1766 // Scan the blocks in the function in post order. 1767 for (po_iterator<BasicBlock*> it = po_begin(&F.getEntryBlock()), 1768 e = po_end(&F.getEntryBlock()); it != e; ++it) { 1769 BasicBlock *BB = *it; 1770 1771 // Vectorize trees that end at stores. 1772 if (unsigned count = collectStores(BB, R)) { 1773 (void)count; 1774 DEBUG(dbgs() << "SLP: Found " << count << " stores to vectorize.\n"); 1775 Changed |= vectorizeStoreChains(R); 1776 } 1777 1778 // Vectorize trees that end at reductions. 1779 Changed |= vectorizeChainsInBlock(BB, R); 1780 } 1781 1782 if (Changed) { 1783 R.optimizeGatherSequence(); 1784 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 1785 DEBUG(verifyFunction(F)); 1786 } 1787 return Changed; 1788 } 1789 1790 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 1791 FunctionPass::getAnalysisUsage(AU); 1792 AU.addRequired<ScalarEvolution>(); 1793 AU.addRequired<AliasAnalysis>(); 1794 AU.addRequired<TargetTransformInfo>(); 1795 AU.addRequired<LoopInfo>(); 1796 AU.addRequired<DominatorTree>(); 1797 AU.addPreserved<LoopInfo>(); 1798 AU.addPreserved<DominatorTree>(); 1799 AU.setPreservesCFG(); 1800 } 1801 1802 private: 1803 1804 /// \brief Collect memory references and sort them according to their base 1805 /// object. We sort the stores to their base objects to reduce the cost of the 1806 /// quadratic search on the stores. TODO: We can further reduce this cost 1807 /// if we flush the chain creation every time we run into a memory barrier. 1808 unsigned collectStores(BasicBlock *BB, BoUpSLP &R); 1809 1810 /// \brief Try to vectorize a chain that starts at two arithmetic instrs. 1811 bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R); 1812 1813 /// \brief Try to vectorize a list of operands. 1814 /// \returns true if a value was vectorized. 1815 bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R); 1816 1817 /// \brief Try to vectorize a chain that may start at the operands of \V; 1818 bool tryToVectorize(BinaryOperator *V, BoUpSLP &R); 1819 1820 /// \brief Vectorize the stores that were collected in StoreRefs. 1821 bool vectorizeStoreChains(BoUpSLP &R); 1822 1823 /// \brief Scan the basic block and look for patterns that are likely to start 1824 /// a vectorization chain. 1825 bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R); 1826 1827 bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold, 1828 BoUpSLP &R); 1829 1830 bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold, 1831 BoUpSLP &R); 1832 private: 1833 StoreListMap StoreRefs; 1834 }; 1835 1836 /// \brief Check that the Values in the slice in VL array are still existant in 1837 /// the WeakVH array. 1838 /// Vectorization of part of the VL array may cause later values in the VL array 1839 /// to become invalid. We track when this has happened in the WeakVH array. 1840 static bool hasValueBeenRAUWed(ArrayRef<Value *> &VL, 1841 SmallVectorImpl<WeakVH> &VH, 1842 unsigned SliceBegin, 1843 unsigned SliceSize) { 1844 for (unsigned i = SliceBegin; i < SliceBegin + SliceSize; ++i) 1845 if (VH[i] != VL[i]) 1846 return true; 1847 1848 return false; 1849 } 1850 1851 bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain, 1852 int CostThreshold, BoUpSLP &R) { 1853 unsigned ChainLen = Chain.size(); 1854 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 1855 << "\n"); 1856 Type *StoreTy = cast<StoreInst>(Chain[0])->getValueOperand()->getType(); 1857 unsigned Sz = DL->getTypeSizeInBits(StoreTy); 1858 unsigned VF = MinVecRegSize / Sz; 1859 1860 if (!isPowerOf2_32(Sz) || VF < 2) 1861 return false; 1862 1863 // Keep track of values that were delete by vectorizing in the loop below. 1864 SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end()); 1865 1866 bool Changed = false; 1867 // Look for profitable vectorizable trees at all offsets, starting at zero. 1868 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 1869 if (i + VF > e) 1870 break; 1871 1872 // Check that a previous iteration of this loop did not delete the Value. 1873 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 1874 continue; 1875 1876 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 1877 << "\n"); 1878 ArrayRef<Value *> Operands = Chain.slice(i, VF); 1879 1880 R.buildTree(Operands); 1881 1882 int Cost = R.getTreeCost(); 1883 1884 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 1885 if (Cost < CostThreshold) { 1886 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 1887 R.vectorizeTree(); 1888 1889 // Move to the next bundle. 1890 i += VF - 1; 1891 Changed = true; 1892 } 1893 } 1894 1895 return Changed; 1896 } 1897 1898 bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, 1899 int costThreshold, BoUpSLP &R) { 1900 SetVector<Value *> Heads, Tails; 1901 SmallDenseMap<Value *, Value *> ConsecutiveChain; 1902 1903 // We may run into multiple chains that merge into a single chain. We mark the 1904 // stores that we vectorized so that we don't visit the same store twice. 1905 BoUpSLP::ValueSet VectorizedStores; 1906 bool Changed = false; 1907 1908 // Do a quadratic search on all of the given stores and find 1909 // all of the pairs of stores that follow each other. 1910 for (unsigned i = 0, e = Stores.size(); i < e; ++i) { 1911 for (unsigned j = 0; j < e; ++j) { 1912 if (i == j) 1913 continue; 1914 1915 if (R.isConsecutiveAccess(Stores[i], Stores[j])) { 1916 Tails.insert(Stores[j]); 1917 Heads.insert(Stores[i]); 1918 ConsecutiveChain[Stores[i]] = Stores[j]; 1919 } 1920 } 1921 } 1922 1923 // For stores that start but don't end a link in the chain: 1924 for (SetVector<Value *>::iterator it = Heads.begin(), e = Heads.end(); 1925 it != e; ++it) { 1926 if (Tails.count(*it)) 1927 continue; 1928 1929 // We found a store instr that starts a chain. Now follow the chain and try 1930 // to vectorize it. 1931 BoUpSLP::ValueList Operands; 1932 Value *I = *it; 1933 // Collect the chain into a list. 1934 while (Tails.count(I) || Heads.count(I)) { 1935 if (VectorizedStores.count(I)) 1936 break; 1937 Operands.push_back(I); 1938 // Move to the next value in the chain. 1939 I = ConsecutiveChain[I]; 1940 } 1941 1942 bool Vectorized = vectorizeStoreChain(Operands, costThreshold, R); 1943 1944 // Mark the vectorized stores so that we don't vectorize them again. 1945 if (Vectorized) 1946 VectorizedStores.insert(Operands.begin(), Operands.end()); 1947 Changed |= Vectorized; 1948 } 1949 1950 return Changed; 1951 } 1952 1953 1954 unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) { 1955 unsigned count = 0; 1956 StoreRefs.clear(); 1957 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 1958 StoreInst *SI = dyn_cast<StoreInst>(it); 1959 if (!SI) 1960 continue; 1961 1962 // Don't touch volatile stores. 1963 if (!SI->isSimple()) 1964 continue; 1965 1966 // Check that the pointer points to scalars. 1967 Type *Ty = SI->getValueOperand()->getType(); 1968 if (Ty->isAggregateType() || Ty->isVectorTy()) 1969 return 0; 1970 1971 // Find the base pointer. 1972 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), DL); 1973 1974 // Save the store locations. 1975 StoreRefs[Ptr].push_back(SI); 1976 count++; 1977 } 1978 return count; 1979 } 1980 1981 bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 1982 if (!A || !B) 1983 return false; 1984 Value *VL[] = { A, B }; 1985 return tryToVectorizeList(VL, R); 1986 } 1987 1988 bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R) { 1989 if (VL.size() < 2) 1990 return false; 1991 1992 DEBUG(dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n"); 1993 1994 // Check that all of the parts are scalar instructions of the same type. 1995 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 1996 if (!I0) 1997 return false; 1998 1999 unsigned Opcode0 = I0->getOpcode(); 2000 2001 Type *Ty0 = I0->getType(); 2002 unsigned Sz = DL->getTypeSizeInBits(Ty0); 2003 unsigned VF = MinVecRegSize / Sz; 2004 2005 for (int i = 0, e = VL.size(); i < e; ++i) { 2006 Type *Ty = VL[i]->getType(); 2007 if (Ty->isAggregateType() || Ty->isVectorTy()) 2008 return false; 2009 Instruction *Inst = dyn_cast<Instruction>(VL[i]); 2010 if (!Inst || Inst->getOpcode() != Opcode0) 2011 return false; 2012 } 2013 2014 bool Changed = false; 2015 2016 // Keep track of values that were delete by vectorizing in the loop below. 2017 SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end()); 2018 2019 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 2020 unsigned OpsWidth = 0; 2021 2022 if (i + VF > e) 2023 OpsWidth = e - i; 2024 else 2025 OpsWidth = VF; 2026 2027 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 2028 break; 2029 2030 // Check that a previous iteration of this loop did not delete the Value. 2031 if (hasValueBeenRAUWed(VL, TrackValues, i, OpsWidth)) 2032 continue; 2033 2034 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 2035 << "\n"); 2036 ArrayRef<Value *> Ops = VL.slice(i, OpsWidth); 2037 2038 R.buildTree(Ops); 2039 int Cost = R.getTreeCost(); 2040 2041 if (Cost < -SLPCostThreshold) { 2042 DEBUG(dbgs() << "SLP: Vectorizing pair at cost:" << Cost << ".\n"); 2043 R.vectorizeTree(); 2044 2045 // Move to the next bundle. 2046 i += VF - 1; 2047 Changed = true; 2048 } 2049 } 2050 2051 return Changed; 2052 } 2053 2054 bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { 2055 if (!V) 2056 return false; 2057 2058 // Try to vectorize V. 2059 if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R)) 2060 return true; 2061 2062 BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0)); 2063 BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1)); 2064 // Try to skip B. 2065 if (B && B->hasOneUse()) { 2066 BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 2067 BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 2068 if (tryToVectorizePair(A, B0, R)) { 2069 B->moveBefore(V); 2070 return true; 2071 } 2072 if (tryToVectorizePair(A, B1, R)) { 2073 B->moveBefore(V); 2074 return true; 2075 } 2076 } 2077 2078 // Try to skip A. 2079 if (A && A->hasOneUse()) { 2080 BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 2081 BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 2082 if (tryToVectorizePair(A0, B, R)) { 2083 A->moveBefore(V); 2084 return true; 2085 } 2086 if (tryToVectorizePair(A1, B, R)) { 2087 A->moveBefore(V); 2088 return true; 2089 } 2090 } 2091 return 0; 2092 } 2093 2094 /// \brief Generate a shuffle mask to be used in a reduction tree. 2095 /// 2096 /// \param VecLen The length of the vector to be reduced. 2097 /// \param NumEltsToRdx The number of elements that should be reduced in the 2098 /// vector. 2099 /// \param IsPairwise Whether the reduction is a pairwise or splitting 2100 /// reduction. A pairwise reduction will generate a mask of 2101 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 2102 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 2103 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 2104 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 2105 bool IsPairwise, bool IsLeft, 2106 IRBuilder<> &Builder) { 2107 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 2108 2109 SmallVector<Constant *, 32> ShuffleMask( 2110 VecLen, UndefValue::get(Builder.getInt32Ty())); 2111 2112 if (IsPairwise) 2113 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 2114 for (unsigned i = 0; i != NumEltsToRdx; ++i) 2115 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 2116 else 2117 // Move the upper half of the vector to the lower half. 2118 for (unsigned i = 0; i != NumEltsToRdx; ++i) 2119 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 2120 2121 return ConstantVector::get(ShuffleMask); 2122 } 2123 2124 2125 /// Model horizontal reductions. 2126 /// 2127 /// A horizontal reduction is a tree of reduction operations (currently add and 2128 /// fadd) that has operations that can be put into a vector as its leaf. 2129 /// For example, this tree: 2130 /// 2131 /// mul mul mul mul 2132 /// \ / \ / 2133 /// + + 2134 /// \ / 2135 /// + 2136 /// This tree has "mul" as its reduced values and "+" as its reduction 2137 /// operations. A reduction might be feeding into a store or a binary operation 2138 /// feeding a phi. 2139 /// ... 2140 /// \ / 2141 /// + 2142 /// | 2143 /// phi += 2144 /// 2145 /// Or: 2146 /// ... 2147 /// \ / 2148 /// + 2149 /// | 2150 /// *p = 2151 /// 2152 class HorizontalReduction { 2153 SmallPtrSet<Value *, 16> ReductionOps; 2154 SmallVector<Value *, 32> ReducedVals; 2155 2156 BinaryOperator *ReductionRoot; 2157 PHINode *ReductionPHI; 2158 2159 /// The opcode of the reduction. 2160 unsigned ReductionOpcode; 2161 /// The opcode of the values we perform a reduction on. 2162 unsigned ReducedValueOpcode; 2163 /// The width of one full horizontal reduction operation. 2164 unsigned ReduxWidth; 2165 /// Should we model this reduction as a pairwise reduction tree or a tree that 2166 /// splits the vector in halves and adds those halves. 2167 bool IsPairwiseReduction; 2168 2169 public: 2170 HorizontalReduction() 2171 : ReductionRoot(0), ReductionPHI(0), ReductionOpcode(0), 2172 ReducedValueOpcode(0), ReduxWidth(0), IsPairwiseReduction(false) {} 2173 2174 /// \brief Try to find a reduction tree. 2175 bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B, 2176 DataLayout *DL) { 2177 assert((!Phi || 2178 std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && 2179 "Thi phi needs to use the binary operator"); 2180 2181 // We could have a initial reductions that is not an add. 2182 // r *= v1 + v2 + v3 + v4 2183 // In such a case start looking for a tree rooted in the first '+'. 2184 if (Phi) { 2185 if (B->getOperand(0) == Phi) { 2186 Phi = 0; 2187 B = dyn_cast<BinaryOperator>(B->getOperand(1)); 2188 } else if (B->getOperand(1) == Phi) { 2189 Phi = 0; 2190 B = dyn_cast<BinaryOperator>(B->getOperand(0)); 2191 } 2192 } 2193 2194 if (!B) 2195 return false; 2196 2197 Type *Ty = B->getType(); 2198 if (Ty->isVectorTy()) 2199 return false; 2200 2201 ReductionOpcode = B->getOpcode(); 2202 ReducedValueOpcode = 0; 2203 ReduxWidth = MinVecRegSize / DL->getTypeSizeInBits(Ty); 2204 ReductionRoot = B; 2205 ReductionPHI = Phi; 2206 2207 if (ReduxWidth < 4) 2208 return false; 2209 2210 // We currently only support adds. 2211 if (ReductionOpcode != Instruction::Add && 2212 ReductionOpcode != Instruction::FAdd) 2213 return false; 2214 2215 // Post order traverse the reduction tree starting at B. We only handle true 2216 // trees containing only binary operators. 2217 SmallVector<std::pair<BinaryOperator *, unsigned>, 32> Stack; 2218 Stack.push_back(std::make_pair(B, 0)); 2219 while (!Stack.empty()) { 2220 BinaryOperator *TreeN = Stack.back().first; 2221 unsigned EdgeToVist = Stack.back().second++; 2222 bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode; 2223 2224 // Only handle trees in the current basic block. 2225 if (TreeN->getParent() != B->getParent()) 2226 return false; 2227 2228 // Each tree node needs to have one user except for the ultimate 2229 // reduction. 2230 if (!TreeN->hasOneUse() && TreeN != B) 2231 return false; 2232 2233 // Postorder vist. 2234 if (EdgeToVist == 2 || IsReducedValue) { 2235 if (IsReducedValue) { 2236 // Make sure that the opcodes of the operations that we are going to 2237 // reduce match. 2238 if (!ReducedValueOpcode) 2239 ReducedValueOpcode = TreeN->getOpcode(); 2240 else if (ReducedValueOpcode != TreeN->getOpcode()) 2241 return false; 2242 ReducedVals.push_back(TreeN); 2243 } else { 2244 // We need to be able to reassociate the adds. 2245 if (!TreeN->isAssociative()) 2246 return false; 2247 ReductionOps.insert(TreeN); 2248 } 2249 // Retract. 2250 Stack.pop_back(); 2251 continue; 2252 } 2253 2254 // Visit left or right. 2255 Value *NextV = TreeN->getOperand(EdgeToVist); 2256 BinaryOperator *Next = dyn_cast<BinaryOperator>(NextV); 2257 if (Next) 2258 Stack.push_back(std::make_pair(Next, 0)); 2259 else if (NextV != Phi) 2260 return false; 2261 } 2262 return true; 2263 } 2264 2265 /// \brief Attempt to vectorize the tree found by 2266 /// matchAssociativeReduction. 2267 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 2268 if (ReducedVals.empty()) 2269 return false; 2270 2271 unsigned NumReducedVals = ReducedVals.size(); 2272 if (NumReducedVals < ReduxWidth) 2273 return false; 2274 2275 Value *VectorizedTree = 0; 2276 IRBuilder<> Builder(ReductionRoot); 2277 FastMathFlags Unsafe; 2278 Unsafe.setUnsafeAlgebra(); 2279 Builder.SetFastMathFlags(Unsafe); 2280 unsigned i = 0; 2281 2282 for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) { 2283 ArrayRef<Value *> ValsToReduce(&ReducedVals[i], ReduxWidth); 2284 V.buildTree(ValsToReduce, &ReductionOps); 2285 2286 // Estimate cost. 2287 int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]); 2288 if (Cost >= -SLPCostThreshold) 2289 break; 2290 2291 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost 2292 << ". (HorRdx)\n"); 2293 2294 // Vectorize a tree. 2295 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 2296 Value *VectorizedRoot = V.vectorizeTree(); 2297 2298 // Emit a reduction. 2299 Value *ReducedSubTree = emitReduction(VectorizedRoot, Builder); 2300 if (VectorizedTree) { 2301 Builder.SetCurrentDebugLocation(Loc); 2302 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 2303 ReducedSubTree, "bin.rdx"); 2304 } else 2305 VectorizedTree = ReducedSubTree; 2306 } 2307 2308 if (VectorizedTree) { 2309 // Finish the reduction. 2310 for (; i < NumReducedVals; ++i) { 2311 Builder.SetCurrentDebugLocation( 2312 cast<Instruction>(ReducedVals[i])->getDebugLoc()); 2313 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 2314 ReducedVals[i]); 2315 } 2316 // Update users. 2317 if (ReductionPHI) { 2318 assert(ReductionRoot != NULL && "Need a reduction operation"); 2319 ReductionRoot->setOperand(0, VectorizedTree); 2320 ReductionRoot->setOperand(1, ReductionPHI); 2321 } else 2322 ReductionRoot->replaceAllUsesWith(VectorizedTree); 2323 } 2324 return VectorizedTree != 0; 2325 } 2326 2327 private: 2328 2329 /// \brief Calcuate the cost of a reduction. 2330 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) { 2331 Type *ScalarTy = FirstReducedVal->getType(); 2332 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 2333 2334 int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true); 2335 int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false); 2336 2337 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 2338 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 2339 2340 int ScalarReduxCost = 2341 ReduxWidth * TTI->getArithmeticInstrCost(ReductionOpcode, VecTy); 2342 2343 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 2344 << " for reduction that starts with " << *FirstReducedVal 2345 << " (It is a " 2346 << (IsPairwiseReduction ? "pairwise" : "splitting") 2347 << " reduction)\n"); 2348 2349 return VecReduxCost - ScalarReduxCost; 2350 } 2351 2352 static Value *createBinOp(IRBuilder<> &Builder, unsigned Opcode, Value *L, 2353 Value *R, const Twine &Name = "") { 2354 if (Opcode == Instruction::FAdd) 2355 return Builder.CreateFAdd(L, R, Name); 2356 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, L, R, Name); 2357 } 2358 2359 /// \brief Emit a horizontal reduction of the vectorized value. 2360 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder) { 2361 assert(VectorizedValue && "Need to have a vectorized tree node"); 2362 Instruction *ValToReduce = dyn_cast<Instruction>(VectorizedValue); 2363 assert(isPowerOf2_32(ReduxWidth) && 2364 "We only handle power-of-two reductions for now"); 2365 2366 Value *TmpVec = ValToReduce; 2367 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 2368 if (IsPairwiseReduction) { 2369 Value *LeftMask = 2370 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 2371 Value *RightMask = 2372 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 2373 2374 Value *LeftShuf = Builder.CreateShuffleVector( 2375 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 2376 Value *RightShuf = Builder.CreateShuffleVector( 2377 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 2378 "rdx.shuf.r"); 2379 TmpVec = createBinOp(Builder, ReductionOpcode, LeftShuf, RightShuf, 2380 "bin.rdx"); 2381 } else { 2382 Value *UpperHalf = 2383 createRdxShuffleMask(ReduxWidth, i, false, false, Builder); 2384 Value *Shuf = Builder.CreateShuffleVector( 2385 TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf"); 2386 TmpVec = createBinOp(Builder, ReductionOpcode, TmpVec, Shuf, "bin.rdx"); 2387 } 2388 } 2389 2390 // The result is in the first element of the vector. 2391 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 2392 } 2393 }; 2394 2395 /// \brief Recognize construction of vectors like 2396 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 2397 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 2398 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 2399 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 2400 /// 2401 /// Returns true if it matches 2402 /// 2403 static bool findBuildVector(InsertElementInst *IE, 2404 SmallVectorImpl<Value *> &Ops) { 2405 if (!isa<UndefValue>(IE->getOperand(0))) 2406 return false; 2407 2408 while (true) { 2409 Ops.push_back(IE->getOperand(1)); 2410 2411 if (IE->use_empty()) 2412 return false; 2413 2414 InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->use_back()); 2415 if (!NextUse) 2416 return true; 2417 2418 // If this isn't the final use, make sure the next insertelement is the only 2419 // use. It's OK if the final constructed vector is used multiple times 2420 if (!IE->hasOneUse()) 2421 return false; 2422 2423 IE = NextUse; 2424 } 2425 2426 return false; 2427 } 2428 2429 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 2430 return V->getType() < V2->getType(); 2431 } 2432 2433 bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 2434 bool Changed = false; 2435 SmallVector<Value *, 4> Incoming; 2436 SmallSet<Value *, 16> VisitedInstrs; 2437 2438 bool HaveVectorizedPhiNodes = true; 2439 while (HaveVectorizedPhiNodes) { 2440 HaveVectorizedPhiNodes = false; 2441 2442 // Collect the incoming values from the PHIs. 2443 Incoming.clear(); 2444 for (BasicBlock::iterator instr = BB->begin(), ie = BB->end(); instr != ie; 2445 ++instr) { 2446 PHINode *P = dyn_cast<PHINode>(instr); 2447 if (!P) 2448 break; 2449 2450 if (!VisitedInstrs.count(P)) 2451 Incoming.push_back(P); 2452 } 2453 2454 // Sort by type. 2455 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); 2456 2457 // Try to vectorize elements base on their type. 2458 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 2459 E = Incoming.end(); 2460 IncIt != E;) { 2461 2462 // Look for the next elements with the same type. 2463 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 2464 while (SameTypeIt != E && 2465 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 2466 VisitedInstrs.insert(*SameTypeIt); 2467 ++SameTypeIt; 2468 } 2469 2470 // Try to vectorize them. 2471 unsigned NumElts = (SameTypeIt - IncIt); 2472 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); 2473 if (NumElts > 1 && 2474 tryToVectorizeList(ArrayRef<Value *>(IncIt, NumElts), R)) { 2475 // Success start over because instructions might have been changed. 2476 HaveVectorizedPhiNodes = true; 2477 Changed = true; 2478 break; 2479 } 2480 2481 // Start over at the next instruction of a differnt type (or the end). 2482 IncIt = SameTypeIt; 2483 } 2484 } 2485 2486 VisitedInstrs.clear(); 2487 2488 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 2489 // We may go through BB multiple times so skip the one we have checked. 2490 if (!VisitedInstrs.insert(it)) 2491 continue; 2492 2493 if (isa<DbgInfoIntrinsic>(it)) 2494 continue; 2495 2496 // Try to vectorize reductions that use PHINodes. 2497 if (PHINode *P = dyn_cast<PHINode>(it)) { 2498 // Check that the PHI is a reduction PHI. 2499 if (P->getNumIncomingValues() != 2) 2500 return Changed; 2501 Value *Rdx = 2502 (P->getIncomingBlock(0) == BB 2503 ? (P->getIncomingValue(0)) 2504 : (P->getIncomingBlock(1) == BB ? P->getIncomingValue(1) : 0)); 2505 // Check if this is a Binary Operator. 2506 BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx); 2507 if (!BI) 2508 continue; 2509 2510 // Try to match and vectorize a horizontal reduction. 2511 HorizontalReduction HorRdx; 2512 if (ShouldVectorizeHor && 2513 HorRdx.matchAssociativeReduction(P, BI, DL) && 2514 HorRdx.tryToReduce(R, TTI)) { 2515 Changed = true; 2516 it = BB->begin(); 2517 e = BB->end(); 2518 continue; 2519 } 2520 2521 Value *Inst = BI->getOperand(0); 2522 if (Inst == P) 2523 Inst = BI->getOperand(1); 2524 2525 if (tryToVectorize(dyn_cast<BinaryOperator>(Inst), R)) { 2526 // We would like to start over since some instructions are deleted 2527 // and the iterator may become invalid value. 2528 Changed = true; 2529 it = BB->begin(); 2530 e = BB->end(); 2531 continue; 2532 } 2533 2534 continue; 2535 } 2536 2537 // Try to vectorize horizontal reductions feeding into a store. 2538 if (ShouldStartVectorizeHorAtStore) 2539 if (StoreInst *SI = dyn_cast<StoreInst>(it)) 2540 if (BinaryOperator *BinOp = 2541 dyn_cast<BinaryOperator>(SI->getValueOperand())) { 2542 HorizontalReduction HorRdx; 2543 if (((HorRdx.matchAssociativeReduction(0, BinOp, DL) && 2544 HorRdx.tryToReduce(R, TTI)) || 2545 tryToVectorize(BinOp, R))) { 2546 Changed = true; 2547 it = BB->begin(); 2548 e = BB->end(); 2549 continue; 2550 } 2551 } 2552 2553 // Try to vectorize trees that start at compare instructions. 2554 if (CmpInst *CI = dyn_cast<CmpInst>(it)) { 2555 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) { 2556 Changed = true; 2557 // We would like to start over since some instructions are deleted 2558 // and the iterator may become invalid value. 2559 it = BB->begin(); 2560 e = BB->end(); 2561 continue; 2562 } 2563 2564 for (int i = 0; i < 2; ++i) { 2565 if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) { 2566 if (tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R)) { 2567 Changed = true; 2568 // We would like to start over since some instructions are deleted 2569 // and the iterator may become invalid value. 2570 it = BB->begin(); 2571 e = BB->end(); 2572 } 2573 } 2574 } 2575 continue; 2576 } 2577 2578 // Try to vectorize trees that start at insertelement instructions. 2579 if (InsertElementInst *IE = dyn_cast<InsertElementInst>(it)) { 2580 SmallVector<Value *, 8> Ops; 2581 if (!findBuildVector(IE, Ops)) 2582 continue; 2583 2584 if (tryToVectorizeList(Ops, R)) { 2585 Changed = true; 2586 it = BB->begin(); 2587 e = BB->end(); 2588 } 2589 2590 continue; 2591 } 2592 } 2593 2594 return Changed; 2595 } 2596 2597 bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) { 2598 bool Changed = false; 2599 // Attempt to sort and vectorize each of the store-groups. 2600 for (StoreListMap::iterator it = StoreRefs.begin(), e = StoreRefs.end(); 2601 it != e; ++it) { 2602 if (it->second.size() < 2) 2603 continue; 2604 2605 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 2606 << it->second.size() << ".\n"); 2607 2608 // Process the stores in chunks of 16. 2609 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 2610 unsigned Len = std::min<unsigned>(CE - CI, 16); 2611 ArrayRef<StoreInst *> Chunk(&it->second[CI], Len); 2612 Changed |= vectorizeStores(Chunk, -SLPCostThreshold, R); 2613 } 2614 } 2615 return Changed; 2616 } 2617 2618 } // end anonymous namespace 2619 2620 char SLPVectorizer::ID = 0; 2621 static const char lv_name[] = "SLP Vectorizer"; 2622 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 2623 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 2624 INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) 2625 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 2626 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 2627 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 2628 2629 namespace llvm { 2630 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); } 2631 } 2632