1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 #define SV_NAME "slp-vectorizer" 19 #define DEBUG_TYPE "SLP" 20 21 #include "llvm/Transforms/Vectorize.h" 22 #include "llvm/ADT/MapVector.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/SetVector.h" 25 #include "llvm/Analysis/AliasAnalysis.h" 26 #include "llvm/Analysis/ScalarEvolution.h" 27 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 28 #include "llvm/Analysis/AliasAnalysis.h" 29 #include "llvm/Analysis/TargetTransformInfo.h" 30 #include "llvm/Analysis/Verifier.h" 31 #include "llvm/Analysis/LoopInfo.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/Instructions.h" 34 #include "llvm/IR/IntrinsicInst.h" 35 #include "llvm/IR/IRBuilder.h" 36 #include "llvm/IR/Module.h" 37 #include "llvm/IR/Type.h" 38 #include "llvm/IR/Value.h" 39 #include "llvm/Pass.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include <algorithm> 44 #include <map> 45 46 using namespace llvm; 47 48 static cl::opt<int> 49 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 50 cl::desc("Only vectorize if you gain more than this " 51 "number ")); 52 namespace { 53 54 static const unsigned MinVecRegSize = 128; 55 56 static const unsigned RecursionMaxDepth = 12; 57 58 /// RAII pattern to save the insertion point of the IR builder. 59 class BuilderLocGuard { 60 public: 61 BuilderLocGuard(IRBuilder<> &B) : Builder(B), Loc(B.GetInsertPoint()), 62 DbgLoc(B.getCurrentDebugLocation()) {} 63 ~BuilderLocGuard() { 64 Builder.SetCurrentDebugLocation(DbgLoc); 65 if (Loc) 66 Builder.SetInsertPoint(Loc); 67 } 68 69 private: 70 // Prevent copying. 71 BuilderLocGuard(const BuilderLocGuard &); 72 BuilderLocGuard &operator=(const BuilderLocGuard &); 73 IRBuilder<> &Builder; 74 AssertingVH<Instruction> Loc; 75 DebugLoc DbgLoc; 76 }; 77 78 /// A helper class for numbering instructions in multiple blocks. 79 /// Numbers start at zero for each basic block. 80 struct BlockNumbering { 81 82 BlockNumbering(BasicBlock *Bb) : BB(Bb), Valid(false) {} 83 84 BlockNumbering() : BB(0), Valid(false) {} 85 86 void numberInstructions() { 87 unsigned Loc = 0; 88 InstrIdx.clear(); 89 InstrVec.clear(); 90 // Number the instructions in the block. 91 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 92 InstrIdx[it] = Loc++; 93 InstrVec.push_back(it); 94 assert(InstrVec[InstrIdx[it]] == it && "Invalid allocation"); 95 } 96 Valid = true; 97 } 98 99 int getIndex(Instruction *I) { 100 assert(I->getParent() == BB && "Invalid instruction"); 101 if (!Valid) 102 numberInstructions(); 103 assert(InstrIdx.count(I) && "Unknown instruction"); 104 return InstrIdx[I]; 105 } 106 107 Instruction *getInstruction(unsigned loc) { 108 if (!Valid) 109 numberInstructions(); 110 assert(InstrVec.size() > loc && "Invalid Index"); 111 return InstrVec[loc]; 112 } 113 114 void forget() { Valid = false; } 115 116 private: 117 /// The block we are numbering. 118 BasicBlock *BB; 119 /// Is the block numbered. 120 bool Valid; 121 /// Maps instructions to numbers and back. 122 SmallDenseMap<Instruction *, int> InstrIdx; 123 /// Maps integers to Instructions. 124 SmallVector<Instruction *, 32> InstrVec; 125 }; 126 127 /// \returns the parent basic block if all of the instructions in \p VL 128 /// are in the same block or null otherwise. 129 static BasicBlock *getSameBlock(ArrayRef<Value *> VL) { 130 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 131 if (!I0) 132 return 0; 133 BasicBlock *BB = I0->getParent(); 134 for (int i = 1, e = VL.size(); i < e; i++) { 135 Instruction *I = dyn_cast<Instruction>(VL[i]); 136 if (!I) 137 return 0; 138 139 if (BB != I->getParent()) 140 return 0; 141 } 142 return BB; 143 } 144 145 /// \returns True if all of the values in \p VL are constants. 146 static bool allConstant(ArrayRef<Value *> VL) { 147 for (unsigned i = 0, e = VL.size(); i < e; ++i) 148 if (!isa<Constant>(VL[i])) 149 return false; 150 return true; 151 } 152 153 /// \returns True if all of the values in \p VL are identical. 154 static bool isSplat(ArrayRef<Value *> VL) { 155 for (unsigned i = 1, e = VL.size(); i < e; ++i) 156 if (VL[i] != VL[0]) 157 return false; 158 return true; 159 } 160 161 /// \returns The opcode if all of the Instructions in \p VL have the same 162 /// opcode, or zero. 163 static unsigned getSameOpcode(ArrayRef<Value *> VL) { 164 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 165 if (!I0) 166 return 0; 167 unsigned Opcode = I0->getOpcode(); 168 for (int i = 1, e = VL.size(); i < e; i++) { 169 Instruction *I = dyn_cast<Instruction>(VL[i]); 170 if (!I || Opcode != I->getOpcode()) 171 return 0; 172 } 173 return Opcode; 174 } 175 176 /// \returns The type that all of the values in \p VL have or null if there 177 /// are different types. 178 static Type* getSameType(ArrayRef<Value *> VL) { 179 Type *Ty = VL[0]->getType(); 180 for (int i = 1, e = VL.size(); i < e; i++) 181 if (VL[i]->getType() != Ty) 182 return 0; 183 184 return Ty; 185 } 186 187 /// \returns True if the ExtractElement instructions in VL can be vectorized 188 /// to use the original vector. 189 static bool CanReuseExtract(ArrayRef<Value *> VL) { 190 assert(Instruction::ExtractElement == getSameOpcode(VL) && "Invalid opcode"); 191 // Check if all of the extracts come from the same vector and from the 192 // correct offset. 193 Value *VL0 = VL[0]; 194 ExtractElementInst *E0 = cast<ExtractElementInst>(VL0); 195 Value *Vec = E0->getOperand(0); 196 197 // We have to extract from the same vector type. 198 unsigned NElts = Vec->getType()->getVectorNumElements(); 199 200 if (NElts != VL.size()) 201 return false; 202 203 // Check that all of the indices extract from the correct offset. 204 ConstantInt *CI = dyn_cast<ConstantInt>(E0->getOperand(1)); 205 if (!CI || CI->getZExtValue()) 206 return false; 207 208 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 209 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 210 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 211 212 if (!CI || CI->getZExtValue() != i || E->getOperand(0) != Vec) 213 return false; 214 } 215 216 return true; 217 } 218 219 /// Bottom Up SLP Vectorizer. 220 class BoUpSLP { 221 public: 222 typedef SmallVector<Value *, 8> ValueList; 223 typedef SmallVector<Instruction *, 16> InstrList; 224 typedef SmallPtrSet<Value *, 16> ValueSet; 225 typedef SmallVector<StoreInst *, 8> StoreList; 226 227 BoUpSLP(Function *Func, ScalarEvolution *Se, DataLayout *Dl, 228 TargetTransformInfo *Tti, AliasAnalysis *Aa, LoopInfo *Li, 229 DominatorTree *Dt) : 230 F(Func), SE(Se), DL(Dl), TTI(Tti), AA(Aa), LI(Li), DT(Dt), 231 Builder(Se->getContext()) { 232 // Setup the block numbering utility for all of the blocks in the 233 // function. 234 for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it) { 235 BasicBlock *BB = it; 236 BlocksNumbers[BB] = BlockNumbering(BB); 237 } 238 } 239 240 /// \brief Vectorize the tree that starts with the elements in \p VL. 241 void vectorizeTree(); 242 243 /// \returns the vectorization cost of the subtree that starts at \p VL. 244 /// A negative number means that this is profitable. 245 int getTreeCost(); 246 247 /// Construct a vectorizable tree that starts at \p Roots. 248 void buildTree(ArrayRef<Value *> Roots); 249 250 /// Clear the internal data structures that are created by 'buildTree'. 251 void deleteTree() { 252 VectorizableTree.clear(); 253 ScalarToTreeEntry.clear(); 254 MustGather.clear(); 255 ExternalUses.clear(); 256 MemBarrierIgnoreList.clear(); 257 } 258 259 /// \returns true if the memory operations A and B are consecutive. 260 bool isConsecutiveAccess(Value *A, Value *B); 261 262 /// \brief Perform LICM and CSE on the newly generated gather sequences. 263 void optimizeGatherSequence(); 264 private: 265 struct TreeEntry; 266 267 /// \returns the cost of the vectorizable entry. 268 int getEntryCost(TreeEntry *E); 269 270 /// This is the recursive part of buildTree. 271 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth); 272 273 /// Vectorize a single entry in the tree. 274 Value *vectorizeTree(TreeEntry *E); 275 276 /// Vectorize a single entry in the tree, starting in \p VL. 277 Value *vectorizeTree(ArrayRef<Value *> VL); 278 279 /// \returns the pointer to the vectorized value if \p VL is already 280 /// vectorized, or NULL. They may happen in cycles. 281 Value *alreadyVectorized(ArrayRef<Value *> VL) const; 282 283 /// \brief Take the pointer operand from the Load/Store instruction. 284 /// \returns NULL if this is not a valid Load/Store instruction. 285 static Value *getPointerOperand(Value *I); 286 287 /// \brief Take the address space operand from the Load/Store instruction. 288 /// \returns -1 if this is not a valid Load/Store instruction. 289 static unsigned getAddressSpaceOperand(Value *I); 290 291 /// \returns the scalarization cost for this type. Scalarization in this 292 /// context means the creation of vectors from a group of scalars. 293 int getGatherCost(Type *Ty); 294 295 /// \returns the scalarization cost for this list of values. Assuming that 296 /// this subtree gets vectorized, we may need to extract the values from the 297 /// roots. This method calculates the cost of extracting the values. 298 int getGatherCost(ArrayRef<Value *> VL); 299 300 /// \returns the AA location that is being access by the instruction. 301 AliasAnalysis::Location getLocation(Instruction *I); 302 303 /// \brief Checks if it is possible to sink an instruction from 304 /// \p Src to \p Dst. 305 /// \returns the pointer to the barrier instruction if we can't sink. 306 Value *getSinkBarrier(Instruction *Src, Instruction *Dst); 307 308 /// \returns the index of the last instrucion in the BB from \p VL. 309 int getLastIndex(ArrayRef<Value *> VL); 310 311 /// \returns the Instruction in the bundle \p VL. 312 Instruction *getLastInstruction(ArrayRef<Value *> VL); 313 314 /// \brief Set the Builder insert point to one after the last instruction in 315 /// the bundle 316 void setInsertPointAfterBundle(ArrayRef<Value *> VL); 317 318 /// \returns a vector from a collection of scalars in \p VL. 319 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 320 321 struct TreeEntry { 322 TreeEntry() : Scalars(), VectorizedValue(0), LastScalarIndex(0), 323 NeedToGather(0) {} 324 325 /// \returns true if the scalars in VL are equal to this entry. 326 bool isSame(ArrayRef<Value *> VL) const { 327 assert(VL.size() == Scalars.size() && "Invalid size"); 328 for (int i = 0, e = VL.size(); i != e; ++i) 329 if (VL[i] != Scalars[i]) 330 return false; 331 return true; 332 } 333 334 /// A vector of scalars. 335 ValueList Scalars; 336 337 /// The Scalars are vectorized into this value. It is initialized to Null. 338 Value *VectorizedValue; 339 340 /// The index in the basic block of the last scalar. 341 int LastScalarIndex; 342 343 /// Do we need to gather this sequence ? 344 bool NeedToGather; 345 }; 346 347 /// Create a new VectorizableTree entry. 348 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized) { 349 VectorizableTree.push_back(TreeEntry()); 350 int idx = VectorizableTree.size() - 1; 351 TreeEntry *Last = &VectorizableTree[idx]; 352 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 353 Last->NeedToGather = !Vectorized; 354 if (Vectorized) { 355 Last->LastScalarIndex = getLastIndex(VL); 356 for (int i = 0, e = VL.size(); i != e; ++i) { 357 assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!"); 358 ScalarToTreeEntry[VL[i]] = idx; 359 } 360 } else { 361 Last->LastScalarIndex = 0; 362 MustGather.insert(VL.begin(), VL.end()); 363 } 364 return Last; 365 } 366 367 /// -- Vectorization State -- 368 /// Holds all of the tree entries. 369 std::vector<TreeEntry> VectorizableTree; 370 371 /// Maps a specific scalar to its tree entry. 372 SmallDenseMap<Value*, int> ScalarToTreeEntry; 373 374 /// A list of scalars that we found that we need to keep as scalars. 375 ValueSet MustGather; 376 377 /// This POD struct describes one external user in the vectorized tree. 378 struct ExternalUser { 379 ExternalUser (Value *S, llvm::User *U, int L) : 380 Scalar(S), User(U), Lane(L){}; 381 // Which scalar in our function. 382 Value *Scalar; 383 // Which user that uses the scalar. 384 llvm::User *User; 385 // Which lane does the scalar belong to. 386 int Lane; 387 }; 388 typedef SmallVector<ExternalUser, 16> UserList; 389 390 /// A list of values that need to extracted out of the tree. 391 /// This list holds pairs of (Internal Scalar : External User). 392 UserList ExternalUses; 393 394 /// A list of instructions to ignore while sinking 395 /// memory instructions. This map must be reset between runs of getCost. 396 ValueSet MemBarrierIgnoreList; 397 398 /// Holds all of the instructions that we gathered. 399 SetVector<Instruction *> GatherSeq; 400 401 /// Numbers instructions in different blocks. 402 DenseMap<BasicBlock *, BlockNumbering> BlocksNumbers; 403 404 // Analysis and block reference. 405 Function *F; 406 ScalarEvolution *SE; 407 DataLayout *DL; 408 TargetTransformInfo *TTI; 409 AliasAnalysis *AA; 410 LoopInfo *LI; 411 DominatorTree *DT; 412 /// Instruction builder to construct the vectorized tree. 413 IRBuilder<> Builder; 414 }; 415 416 void BoUpSLP::buildTree(ArrayRef<Value *> Roots) { 417 deleteTree(); 418 if (!getSameType(Roots)) 419 return; 420 buildTree_rec(Roots, 0); 421 422 // Collect the values that we need to extract from the tree. 423 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 424 TreeEntry *Entry = &VectorizableTree[EIdx]; 425 426 // For each lane: 427 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 428 Value *Scalar = Entry->Scalars[Lane]; 429 430 // No need to handle users of gathered values. 431 if (Entry->NeedToGather) 432 continue; 433 434 for (Value::use_iterator User = Scalar->use_begin(), 435 UE = Scalar->use_end(); User != UE; ++User) { 436 DEBUG(dbgs() << "SLP: Checking user:" << **User << ".\n"); 437 438 bool Gathered = MustGather.count(*User); 439 440 // Skip in-tree scalars that become vectors. 441 if (ScalarToTreeEntry.count(*User) && !Gathered) { 442 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << 443 **User << ".\n"); 444 int Idx = ScalarToTreeEntry[*User]; (void) Idx; 445 assert(!VectorizableTree[Idx].NeedToGather && "Bad state"); 446 continue; 447 } 448 449 if (!isa<Instruction>(*User)) 450 continue; 451 452 DEBUG(dbgs() << "SLP: Need to extract:" << **User << " from lane " << 453 Lane << " from " << *Scalar << ".\n"); 454 ExternalUses.push_back(ExternalUser(Scalar, *User, Lane)); 455 } 456 } 457 } 458 } 459 460 461 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { 462 bool SameTy = getSameType(VL); (void)SameTy; 463 assert(SameTy && "Invalid types!"); 464 465 if (Depth == RecursionMaxDepth) { 466 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 467 newTreeEntry(VL, false); 468 return; 469 } 470 471 // Don't handle vectors. 472 if (VL[0]->getType()->isVectorTy()) { 473 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 474 newTreeEntry(VL, false); 475 return; 476 } 477 478 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 479 if (SI->getValueOperand()->getType()->isVectorTy()) { 480 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 481 newTreeEntry(VL, false); 482 return; 483 } 484 485 // If all of the operands are identical or constant we have a simple solution. 486 if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || 487 !getSameOpcode(VL)) { 488 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 489 newTreeEntry(VL, false); 490 return; 491 } 492 493 // We now know that this is a vector of instructions of the same type from 494 // the same block. 495 496 // Check if this is a duplicate of another entry. 497 if (ScalarToTreeEntry.count(VL[0])) { 498 int Idx = ScalarToTreeEntry[VL[0]]; 499 TreeEntry *E = &VectorizableTree[Idx]; 500 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 501 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 502 if (E->Scalars[i] != VL[i]) { 503 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 504 newTreeEntry(VL, false); 505 return; 506 } 507 } 508 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n"); 509 return; 510 } 511 512 // Check that none of the instructions in the bundle are already in the tree. 513 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 514 if (ScalarToTreeEntry.count(VL[i])) { 515 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 516 ") is already in tree.\n"); 517 newTreeEntry(VL, false); 518 return; 519 } 520 } 521 522 // If any of the scalars appears in the table OR it is marked as a value that 523 // needs to stat scalar then we need to gather the scalars. 524 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 525 if (ScalarToTreeEntry.count(VL[i]) || MustGather.count(VL[i])) { 526 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar. \n"); 527 newTreeEntry(VL, false); 528 return; 529 } 530 } 531 532 // Check that all of the users of the scalars that we want to vectorize are 533 // schedulable. 534 Instruction *VL0 = cast<Instruction>(VL[0]); 535 int MyLastIndex = getLastIndex(VL); 536 BasicBlock *BB = cast<Instruction>(VL0)->getParent(); 537 538 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 539 Instruction *Scalar = cast<Instruction>(VL[i]); 540 DEBUG(dbgs() << "SLP: Checking users of " << *Scalar << ". \n"); 541 for (Value::use_iterator U = Scalar->use_begin(), UE = Scalar->use_end(); 542 U != UE; ++U) { 543 DEBUG(dbgs() << "SLP: \tUser " << **U << ". \n"); 544 Instruction *User = dyn_cast<Instruction>(*U); 545 if (!User) { 546 DEBUG(dbgs() << "SLP: Gathering due unknown user. \n"); 547 newTreeEntry(VL, false); 548 return; 549 } 550 551 // We don't care if the user is in a different basic block. 552 BasicBlock *UserBlock = User->getParent(); 553 if (UserBlock != BB) { 554 DEBUG(dbgs() << "SLP: User from a different basic block " 555 << *User << ". \n"); 556 continue; 557 } 558 559 // If this is a PHINode within this basic block then we can place the 560 // extract wherever we want. 561 if (isa<PHINode>(*User)) { 562 DEBUG(dbgs() << "SLP: \tWe can schedule PHIs:" << *User << ". \n"); 563 continue; 564 } 565 566 // Check if this is a safe in-tree user. 567 if (ScalarToTreeEntry.count(User)) { 568 int Idx = ScalarToTreeEntry[User]; 569 int VecLocation = VectorizableTree[Idx].LastScalarIndex; 570 if (VecLocation <= MyLastIndex) { 571 DEBUG(dbgs() << "SLP: Gathering due to unschedulable vector. \n"); 572 newTreeEntry(VL, false); 573 return; 574 } 575 DEBUG(dbgs() << "SLP: In-tree user (" << *User << ") at #" << 576 VecLocation << " vector value (" << *Scalar << ") at #" 577 << MyLastIndex << ".\n"); 578 continue; 579 } 580 581 // Make sure that we can schedule this unknown user. 582 BlockNumbering &BN = BlocksNumbers[BB]; 583 int UserIndex = BN.getIndex(User); 584 if (UserIndex < MyLastIndex) { 585 586 DEBUG(dbgs() << "SLP: Can't schedule extractelement for " 587 << *User << ". \n"); 588 newTreeEntry(VL, false); 589 return; 590 } 591 } 592 } 593 594 // Check that every instructions appears once in this bundle. 595 for (unsigned i = 0, e = VL.size(); i < e; ++i) 596 for (unsigned j = i+1; j < e; ++j) 597 if (VL[i] == VL[j]) { 598 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 599 newTreeEntry(VL, false); 600 return; 601 } 602 603 // Check that instructions in this bundle don't reference other instructions. 604 // The runtime of this check is O(N * N-1 * uses(N)) and a typical N is 4. 605 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 606 for (Value::use_iterator U = VL[i]->use_begin(), UE = VL[i]->use_end(); 607 U != UE; ++U) { 608 for (unsigned j = 0; j < e; ++j) { 609 if (i != j && *U == VL[j]) { 610 DEBUG(dbgs() << "SLP: Intra-bundle dependencies!" << **U << ". \n"); 611 newTreeEntry(VL, false); 612 return; 613 } 614 } 615 } 616 } 617 618 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 619 620 unsigned Opcode = getSameOpcode(VL); 621 622 // Check if it is safe to sink the loads or the stores. 623 if (Opcode == Instruction::Load || Opcode == Instruction::Store) { 624 Instruction *Last = getLastInstruction(VL); 625 626 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 627 if (VL[i] == Last) 628 continue; 629 Value *Barrier = getSinkBarrier(cast<Instruction>(VL[i]), Last); 630 if (Barrier) { 631 DEBUG(dbgs() << "SLP: Can't sink " << *VL[i] << "\n down to " << *Last 632 << "\n because of " << *Barrier << ". Gathering.\n"); 633 newTreeEntry(VL, false); 634 return; 635 } 636 } 637 } 638 639 switch (Opcode) { 640 case Instruction::PHI: { 641 PHINode *PH = dyn_cast<PHINode>(VL0); 642 newTreeEntry(VL, true); 643 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 644 645 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 646 ValueList Operands; 647 // Prepare the operand vector. 648 for (unsigned j = 0; j < VL.size(); ++j) 649 Operands.push_back(cast<PHINode>(VL[j])->getIncomingValue(i)); 650 651 buildTree_rec(Operands, Depth + 1); 652 } 653 return; 654 } 655 case Instruction::ExtractElement: { 656 bool Reuse = CanReuseExtract(VL); 657 if (Reuse) { 658 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 659 } 660 newTreeEntry(VL, Reuse); 661 return; 662 } 663 case Instruction::Load: { 664 // Check if the loads are consecutive or of we need to swizzle them. 665 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 666 if (!isConsecutiveAccess(VL[i], VL[i + 1])) { 667 newTreeEntry(VL, false); 668 DEBUG(dbgs() << "SLP: Need to swizzle loads.\n"); 669 return; 670 } 671 672 newTreeEntry(VL, true); 673 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 674 return; 675 } 676 case Instruction::ZExt: 677 case Instruction::SExt: 678 case Instruction::FPToUI: 679 case Instruction::FPToSI: 680 case Instruction::FPExt: 681 case Instruction::PtrToInt: 682 case Instruction::IntToPtr: 683 case Instruction::SIToFP: 684 case Instruction::UIToFP: 685 case Instruction::Trunc: 686 case Instruction::FPTrunc: 687 case Instruction::BitCast: { 688 Type *SrcTy = VL0->getOperand(0)->getType(); 689 for (unsigned i = 0; i < VL.size(); ++i) { 690 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 691 if (Ty != SrcTy || Ty->isAggregateType() || Ty->isVectorTy()) { 692 newTreeEntry(VL, false); 693 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 694 return; 695 } 696 } 697 newTreeEntry(VL, true); 698 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 699 700 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 701 ValueList Operands; 702 // Prepare the operand vector. 703 for (unsigned j = 0; j < VL.size(); ++j) 704 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 705 706 buildTree_rec(Operands, Depth+1); 707 } 708 return; 709 } 710 case Instruction::ICmp: 711 case Instruction::FCmp: { 712 // Check that all of the compares have the same predicate. 713 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 714 Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType(); 715 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 716 CmpInst *Cmp = cast<CmpInst>(VL[i]); 717 if (Cmp->getPredicate() != P0 || 718 Cmp->getOperand(0)->getType() != ComparedTy) { 719 newTreeEntry(VL, false); 720 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 721 return; 722 } 723 } 724 725 newTreeEntry(VL, true); 726 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 727 728 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 729 ValueList Operands; 730 // Prepare the operand vector. 731 for (unsigned j = 0; j < VL.size(); ++j) 732 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 733 734 buildTree_rec(Operands, Depth+1); 735 } 736 return; 737 } 738 case Instruction::Select: 739 case Instruction::Add: 740 case Instruction::FAdd: 741 case Instruction::Sub: 742 case Instruction::FSub: 743 case Instruction::Mul: 744 case Instruction::FMul: 745 case Instruction::UDiv: 746 case Instruction::SDiv: 747 case Instruction::FDiv: 748 case Instruction::URem: 749 case Instruction::SRem: 750 case Instruction::FRem: 751 case Instruction::Shl: 752 case Instruction::LShr: 753 case Instruction::AShr: 754 case Instruction::And: 755 case Instruction::Or: 756 case Instruction::Xor: { 757 newTreeEntry(VL, true); 758 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 759 760 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 761 ValueList Operands; 762 // Prepare the operand vector. 763 for (unsigned j = 0; j < VL.size(); ++j) 764 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 765 766 buildTree_rec(Operands, Depth+1); 767 } 768 return; 769 } 770 case Instruction::Store: { 771 // Check if the stores are consecutive or of we need to swizzle them. 772 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 773 if (!isConsecutiveAccess(VL[i], VL[i + 1])) { 774 newTreeEntry(VL, false); 775 DEBUG(dbgs() << "SLP: Non consecutive store.\n"); 776 return; 777 } 778 779 newTreeEntry(VL, true); 780 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 781 782 ValueList Operands; 783 for (unsigned j = 0; j < VL.size(); ++j) 784 Operands.push_back(cast<Instruction>(VL[j])->getOperand(0)); 785 786 // We can ignore these values because we are sinking them down. 787 MemBarrierIgnoreList.insert(VL.begin(), VL.end()); 788 buildTree_rec(Operands, Depth + 1); 789 return; 790 } 791 default: 792 newTreeEntry(VL, false); 793 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 794 return; 795 } 796 } 797 798 int BoUpSLP::getEntryCost(TreeEntry *E) { 799 ArrayRef<Value*> VL = E->Scalars; 800 801 Type *ScalarTy = VL[0]->getType(); 802 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 803 ScalarTy = SI->getValueOperand()->getType(); 804 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 805 806 if (E->NeedToGather) { 807 if (allConstant(VL)) 808 return 0; 809 if (isSplat(VL)) { 810 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 811 } 812 return getGatherCost(E->Scalars); 813 } 814 815 assert(getSameOpcode(VL) && getSameType(VL) && getSameBlock(VL) && 816 "Invalid VL"); 817 Instruction *VL0 = cast<Instruction>(VL[0]); 818 unsigned Opcode = VL0->getOpcode(); 819 switch (Opcode) { 820 case Instruction::PHI: { 821 return 0; 822 } 823 case Instruction::ExtractElement: { 824 if (CanReuseExtract(VL)) 825 return 0; 826 return getGatherCost(VecTy); 827 } 828 case Instruction::ZExt: 829 case Instruction::SExt: 830 case Instruction::FPToUI: 831 case Instruction::FPToSI: 832 case Instruction::FPExt: 833 case Instruction::PtrToInt: 834 case Instruction::IntToPtr: 835 case Instruction::SIToFP: 836 case Instruction::UIToFP: 837 case Instruction::Trunc: 838 case Instruction::FPTrunc: 839 case Instruction::BitCast: { 840 Type *SrcTy = VL0->getOperand(0)->getType(); 841 842 // Calculate the cost of this instruction. 843 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 844 VL0->getType(), SrcTy); 845 846 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 847 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy); 848 return VecCost - ScalarCost; 849 } 850 case Instruction::FCmp: 851 case Instruction::ICmp: 852 case Instruction::Select: 853 case Instruction::Add: 854 case Instruction::FAdd: 855 case Instruction::Sub: 856 case Instruction::FSub: 857 case Instruction::Mul: 858 case Instruction::FMul: 859 case Instruction::UDiv: 860 case Instruction::SDiv: 861 case Instruction::FDiv: 862 case Instruction::URem: 863 case Instruction::SRem: 864 case Instruction::FRem: 865 case Instruction::Shl: 866 case Instruction::LShr: 867 case Instruction::AShr: 868 case Instruction::And: 869 case Instruction::Or: 870 case Instruction::Xor: { 871 // Calculate the cost of this instruction. 872 int ScalarCost = 0; 873 int VecCost = 0; 874 if (Opcode == Instruction::FCmp || Opcode == Instruction::ICmp || 875 Opcode == Instruction::Select) { 876 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 877 ScalarCost = VecTy->getNumElements() * 878 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty()); 879 VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy); 880 } else { 881 ScalarCost = VecTy->getNumElements() * 882 TTI->getArithmeticInstrCost(Opcode, ScalarTy); 883 VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy); 884 } 885 return VecCost - ScalarCost; 886 } 887 case Instruction::Load: { 888 // Cost of wide load - cost of scalar loads. 889 int ScalarLdCost = VecTy->getNumElements() * 890 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); 891 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); 892 return VecLdCost - ScalarLdCost; 893 } 894 case Instruction::Store: { 895 // We know that we can merge the stores. Calculate the cost. 896 int ScalarStCost = VecTy->getNumElements() * 897 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); 898 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); 899 return VecStCost - ScalarStCost; 900 } 901 default: 902 llvm_unreachable("Unknown instruction"); 903 } 904 } 905 906 int BoUpSLP::getTreeCost() { 907 int Cost = 0; 908 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 909 VectorizableTree.size() << ".\n"); 910 911 // Don't vectorize tiny trees. Small load/store chains or consecutive stores 912 // of constants will be vectoried in SelectionDAG in MergeConsecutiveStores. 913 // The SelectionDAG vectorizer can only handle pairs (trees of height = 2). 914 if (VectorizableTree.size() < 3) { 915 if (!VectorizableTree.size()) { 916 assert(!ExternalUses.size() && "We should not have any external users"); 917 } 918 return 0; 919 } 920 921 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 922 923 for (unsigned i = 0, e = VectorizableTree.size(); i != e; ++i) { 924 int C = getEntryCost(&VectorizableTree[i]); 925 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 926 << *VectorizableTree[i].Scalars[0] << " .\n"); 927 Cost += C; 928 } 929 930 int ExtractCost = 0; 931 for (UserList::iterator I = ExternalUses.begin(), E = ExternalUses.end(); 932 I != E; ++I) { 933 934 VectorType *VecTy = VectorType::get(I->Scalar->getType(), BundleWidth); 935 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 936 I->Lane); 937 } 938 939 940 DEBUG(dbgs() << "SLP: Total Cost " << Cost + ExtractCost<< ".\n"); 941 return Cost + ExtractCost; 942 } 943 944 int BoUpSLP::getGatherCost(Type *Ty) { 945 int Cost = 0; 946 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 947 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 948 return Cost; 949 } 950 951 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 952 // Find the type of the operands in VL. 953 Type *ScalarTy = VL[0]->getType(); 954 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 955 ScalarTy = SI->getValueOperand()->getType(); 956 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 957 // Find the cost of inserting/extracting values from the vector. 958 return getGatherCost(VecTy); 959 } 960 961 AliasAnalysis::Location BoUpSLP::getLocation(Instruction *I) { 962 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 963 return AA->getLocation(SI); 964 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 965 return AA->getLocation(LI); 966 return AliasAnalysis::Location(); 967 } 968 969 Value *BoUpSLP::getPointerOperand(Value *I) { 970 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 971 return LI->getPointerOperand(); 972 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 973 return SI->getPointerOperand(); 974 return 0; 975 } 976 977 unsigned BoUpSLP::getAddressSpaceOperand(Value *I) { 978 if (LoadInst *L = dyn_cast<LoadInst>(I)) 979 return L->getPointerAddressSpace(); 980 if (StoreInst *S = dyn_cast<StoreInst>(I)) 981 return S->getPointerAddressSpace(); 982 return -1; 983 } 984 985 bool BoUpSLP::isConsecutiveAccess(Value *A, Value *B) { 986 Value *PtrA = getPointerOperand(A); 987 Value *PtrB = getPointerOperand(B); 988 unsigned ASA = getAddressSpaceOperand(A); 989 unsigned ASB = getAddressSpaceOperand(B); 990 991 // Check that the address spaces match and that the pointers are valid. 992 if (!PtrA || !PtrB || (ASA != ASB)) 993 return false; 994 995 // Make sure that A and B are different pointers of the same type. 996 if (PtrA == PtrB || PtrA->getType() != PtrB->getType()) 997 return false; 998 999 unsigned PtrBitWidth = DL->getPointerSizeInBits(ASA); 1000 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1001 APInt Size(PtrBitWidth, DL->getTypeStoreSize(Ty)); 1002 1003 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); 1004 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetA); 1005 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetB); 1006 1007 APInt OffsetDelta = OffsetB - OffsetA; 1008 1009 // Check if they are based on the same pointer. That makes the offsets 1010 // sufficient. 1011 if (PtrA == PtrB) 1012 return OffsetDelta == Size; 1013 1014 // Compute the necessary base pointer delta to have the necessary final delta 1015 // equal to the size. 1016 APInt BaseDelta = Size - OffsetDelta; 1017 1018 // Otherwise compute the distance with SCEV between the base pointers. 1019 const SCEV *PtrSCEVA = SE->getSCEV(PtrA); 1020 const SCEV *PtrSCEVB = SE->getSCEV(PtrB); 1021 const SCEV *C = SE->getConstant(BaseDelta); 1022 const SCEV *X = SE->getAddExpr(PtrSCEVA, C); 1023 return X == PtrSCEVB; 1024 } 1025 1026 Value *BoUpSLP::getSinkBarrier(Instruction *Src, Instruction *Dst) { 1027 assert(Src->getParent() == Dst->getParent() && "Not the same BB"); 1028 BasicBlock::iterator I = Src, E = Dst; 1029 /// Scan all of the instruction from SRC to DST and check if 1030 /// the source may alias. 1031 for (++I; I != E; ++I) { 1032 // Ignore store instructions that are marked as 'ignore'. 1033 if (MemBarrierIgnoreList.count(I)) 1034 continue; 1035 if (Src->mayWriteToMemory()) /* Write */ { 1036 if (!I->mayReadOrWriteMemory()) 1037 continue; 1038 } else /* Read */ { 1039 if (!I->mayWriteToMemory()) 1040 continue; 1041 } 1042 AliasAnalysis::Location A = getLocation(&*I); 1043 AliasAnalysis::Location B = getLocation(Src); 1044 1045 if (!A.Ptr || !B.Ptr || AA->alias(A, B)) 1046 return I; 1047 } 1048 return 0; 1049 } 1050 1051 int BoUpSLP::getLastIndex(ArrayRef<Value *> VL) { 1052 BasicBlock *BB = cast<Instruction>(VL[0])->getParent(); 1053 assert(BB == getSameBlock(VL) && BlocksNumbers.count(BB) && "Invalid block"); 1054 BlockNumbering &BN = BlocksNumbers[BB]; 1055 1056 int MaxIdx = BN.getIndex(BB->getFirstNonPHI()); 1057 for (unsigned i = 0, e = VL.size(); i < e; ++i) 1058 MaxIdx = std::max(MaxIdx, BN.getIndex(cast<Instruction>(VL[i]))); 1059 return MaxIdx; 1060 } 1061 1062 Instruction *BoUpSLP::getLastInstruction(ArrayRef<Value *> VL) { 1063 BasicBlock *BB = cast<Instruction>(VL[0])->getParent(); 1064 assert(BB == getSameBlock(VL) && BlocksNumbers.count(BB) && "Invalid block"); 1065 BlockNumbering &BN = BlocksNumbers[BB]; 1066 1067 int MaxIdx = BN.getIndex(cast<Instruction>(VL[0])); 1068 for (unsigned i = 1, e = VL.size(); i < e; ++i) 1069 MaxIdx = std::max(MaxIdx, BN.getIndex(cast<Instruction>(VL[i]))); 1070 Instruction *I = BN.getInstruction(MaxIdx); 1071 assert(I && "bad location"); 1072 return I; 1073 } 1074 1075 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) { 1076 Instruction *VL0 = cast<Instruction>(VL[0]); 1077 Instruction *LastInst = getLastInstruction(VL); 1078 BasicBlock::iterator NextInst = LastInst; 1079 ++NextInst; 1080 Builder.SetInsertPoint(VL0->getParent(), NextInst); 1081 Builder.SetCurrentDebugLocation(VL0->getDebugLoc()); 1082 } 1083 1084 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 1085 Value *Vec = UndefValue::get(Ty); 1086 // Generate the 'InsertElement' instruction. 1087 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 1088 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 1089 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 1090 GatherSeq.insert(Insrt); 1091 1092 // Add to our 'need-to-extract' list. 1093 if (ScalarToTreeEntry.count(VL[i])) { 1094 int Idx = ScalarToTreeEntry[VL[i]]; 1095 TreeEntry *E = &VectorizableTree[Idx]; 1096 // Find which lane we need to extract. 1097 int FoundLane = -1; 1098 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 1099 // Is this the lane of the scalar that we are looking for ? 1100 if (E->Scalars[Lane] == VL[i]) { 1101 FoundLane = Lane; 1102 break; 1103 } 1104 } 1105 assert(FoundLane >= 0 && "Could not find the correct lane"); 1106 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 1107 } 1108 } 1109 } 1110 1111 return Vec; 1112 } 1113 1114 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const { 1115 SmallDenseMap<Value*, int>::const_iterator Entry 1116 = ScalarToTreeEntry.find(VL[0]); 1117 if (Entry != ScalarToTreeEntry.end()) { 1118 int Idx = Entry->second; 1119 const TreeEntry *En = &VectorizableTree[Idx]; 1120 if (En->isSame(VL) && En->VectorizedValue) 1121 return En->VectorizedValue; 1122 } 1123 return 0; 1124 } 1125 1126 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 1127 if (ScalarToTreeEntry.count(VL[0])) { 1128 int Idx = ScalarToTreeEntry[VL[0]]; 1129 TreeEntry *E = &VectorizableTree[Idx]; 1130 if (E->isSame(VL)) 1131 return vectorizeTree(E); 1132 } 1133 1134 Type *ScalarTy = VL[0]->getType(); 1135 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1136 ScalarTy = SI->getValueOperand()->getType(); 1137 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1138 1139 return Gather(VL, VecTy); 1140 } 1141 1142 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 1143 BuilderLocGuard Guard(Builder); 1144 1145 if (E->VectorizedValue) { 1146 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 1147 return E->VectorizedValue; 1148 } 1149 1150 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 1151 Type *ScalarTy = VL0->getType(); 1152 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 1153 ScalarTy = SI->getValueOperand()->getType(); 1154 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 1155 1156 if (E->NeedToGather) { 1157 setInsertPointAfterBundle(E->Scalars); 1158 return Gather(E->Scalars, VecTy); 1159 } 1160 1161 unsigned Opcode = VL0->getOpcode(); 1162 assert(Opcode == getSameOpcode(E->Scalars) && "Invalid opcode"); 1163 1164 switch (Opcode) { 1165 case Instruction::PHI: { 1166 PHINode *PH = dyn_cast<PHINode>(VL0); 1167 Builder.SetInsertPoint(PH->getParent()->getFirstInsertionPt()); 1168 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 1169 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 1170 E->VectorizedValue = NewPhi; 1171 1172 // PHINodes may have multiple entries from the same block. We want to 1173 // visit every block once. 1174 SmallSet<BasicBlock*, 4> VisitedBBs; 1175 1176 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1177 ValueList Operands; 1178 BasicBlock *IBB = PH->getIncomingBlock(i); 1179 1180 if (!VisitedBBs.insert(IBB)) { 1181 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 1182 continue; 1183 } 1184 1185 // Prepare the operand vector. 1186 for (unsigned j = 0; j < E->Scalars.size(); ++j) 1187 Operands.push_back(cast<PHINode>(E->Scalars[j])-> 1188 getIncomingValueForBlock(IBB)); 1189 1190 Builder.SetInsertPoint(IBB->getTerminator()); 1191 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 1192 Value *Vec = vectorizeTree(Operands); 1193 NewPhi->addIncoming(Vec, IBB); 1194 } 1195 1196 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 1197 "Invalid number of incoming values"); 1198 return NewPhi; 1199 } 1200 1201 case Instruction::ExtractElement: { 1202 if (CanReuseExtract(E->Scalars)) { 1203 Value *V = VL0->getOperand(0); 1204 E->VectorizedValue = V; 1205 return V; 1206 } 1207 return Gather(E->Scalars, VecTy); 1208 } 1209 case Instruction::ZExt: 1210 case Instruction::SExt: 1211 case Instruction::FPToUI: 1212 case Instruction::FPToSI: 1213 case Instruction::FPExt: 1214 case Instruction::PtrToInt: 1215 case Instruction::IntToPtr: 1216 case Instruction::SIToFP: 1217 case Instruction::UIToFP: 1218 case Instruction::Trunc: 1219 case Instruction::FPTrunc: 1220 case Instruction::BitCast: { 1221 ValueList INVL; 1222 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1223 INVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1224 1225 setInsertPointAfterBundle(E->Scalars); 1226 1227 Value *InVec = vectorizeTree(INVL); 1228 1229 if (Value *V = alreadyVectorized(E->Scalars)) 1230 return V; 1231 1232 CastInst *CI = dyn_cast<CastInst>(VL0); 1233 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 1234 E->VectorizedValue = V; 1235 return V; 1236 } 1237 case Instruction::FCmp: 1238 case Instruction::ICmp: { 1239 ValueList LHSV, RHSV; 1240 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1241 LHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1242 RHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1243 } 1244 1245 setInsertPointAfterBundle(E->Scalars); 1246 1247 Value *L = vectorizeTree(LHSV); 1248 Value *R = vectorizeTree(RHSV); 1249 1250 if (Value *V = alreadyVectorized(E->Scalars)) 1251 return V; 1252 1253 CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate(); 1254 Value *V; 1255 if (Opcode == Instruction::FCmp) 1256 V = Builder.CreateFCmp(P0, L, R); 1257 else 1258 V = Builder.CreateICmp(P0, L, R); 1259 1260 E->VectorizedValue = V; 1261 return V; 1262 } 1263 case Instruction::Select: { 1264 ValueList TrueVec, FalseVec, CondVec; 1265 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1266 CondVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1267 TrueVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1268 FalseVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(2)); 1269 } 1270 1271 setInsertPointAfterBundle(E->Scalars); 1272 1273 Value *Cond = vectorizeTree(CondVec); 1274 Value *True = vectorizeTree(TrueVec); 1275 Value *False = vectorizeTree(FalseVec); 1276 1277 if (Value *V = alreadyVectorized(E->Scalars)) 1278 return V; 1279 1280 Value *V = Builder.CreateSelect(Cond, True, False); 1281 E->VectorizedValue = V; 1282 return V; 1283 } 1284 case Instruction::Add: 1285 case Instruction::FAdd: 1286 case Instruction::Sub: 1287 case Instruction::FSub: 1288 case Instruction::Mul: 1289 case Instruction::FMul: 1290 case Instruction::UDiv: 1291 case Instruction::SDiv: 1292 case Instruction::FDiv: 1293 case Instruction::URem: 1294 case Instruction::SRem: 1295 case Instruction::FRem: 1296 case Instruction::Shl: 1297 case Instruction::LShr: 1298 case Instruction::AShr: 1299 case Instruction::And: 1300 case Instruction::Or: 1301 case Instruction::Xor: { 1302 ValueList LHSVL, RHSVL; 1303 for (int i = 0, e = E->Scalars.size(); i < e; ++i) { 1304 LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0)); 1305 RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1)); 1306 } 1307 1308 setInsertPointAfterBundle(E->Scalars); 1309 1310 Value *LHS = vectorizeTree(LHSVL); 1311 Value *RHS = vectorizeTree(RHSVL); 1312 1313 if (LHS == RHS && isa<Instruction>(LHS)) { 1314 assert((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order"); 1315 } 1316 1317 if (Value *V = alreadyVectorized(E->Scalars)) 1318 return V; 1319 1320 BinaryOperator *BinOp = cast<BinaryOperator>(VL0); 1321 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS); 1322 E->VectorizedValue = V; 1323 return V; 1324 } 1325 case Instruction::Load: { 1326 // Loads are inserted at the head of the tree because we don't want to 1327 // sink them all the way down past store instructions. 1328 setInsertPointAfterBundle(E->Scalars); 1329 1330 LoadInst *LI = cast<LoadInst>(VL0); 1331 Value *VecPtr = 1332 Builder.CreateBitCast(LI->getPointerOperand(), VecTy->getPointerTo()); 1333 unsigned Alignment = LI->getAlignment(); 1334 LI = Builder.CreateLoad(VecPtr); 1335 LI->setAlignment(Alignment); 1336 E->VectorizedValue = LI; 1337 return LI; 1338 } 1339 case Instruction::Store: { 1340 StoreInst *SI = cast<StoreInst>(VL0); 1341 unsigned Alignment = SI->getAlignment(); 1342 1343 ValueList ValueOp; 1344 for (int i = 0, e = E->Scalars.size(); i < e; ++i) 1345 ValueOp.push_back(cast<StoreInst>(E->Scalars[i])->getValueOperand()); 1346 1347 setInsertPointAfterBundle(E->Scalars); 1348 1349 Value *VecValue = vectorizeTree(ValueOp); 1350 Value *VecPtr = 1351 Builder.CreateBitCast(SI->getPointerOperand(), VecTy->getPointerTo()); 1352 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 1353 S->setAlignment(Alignment); 1354 E->VectorizedValue = S; 1355 return S; 1356 } 1357 default: 1358 llvm_unreachable("unknown inst"); 1359 } 1360 return 0; 1361 } 1362 1363 void BoUpSLP::vectorizeTree() { 1364 Builder.SetInsertPoint(F->getEntryBlock().begin()); 1365 vectorizeTree(&VectorizableTree[0]); 1366 1367 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 1368 1369 // Extract all of the elements with the external uses. 1370 for (UserList::iterator it = ExternalUses.begin(), e = ExternalUses.end(); 1371 it != e; ++it) { 1372 Value *Scalar = it->Scalar; 1373 llvm::User *User = it->User; 1374 1375 // Skip users that we already RAUW. This happens when one instruction 1376 // has multiple uses of the same value. 1377 if (std::find(Scalar->use_begin(), Scalar->use_end(), User) == 1378 Scalar->use_end()) 1379 continue; 1380 assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar"); 1381 1382 int Idx = ScalarToTreeEntry[Scalar]; 1383 TreeEntry *E = &VectorizableTree[Idx]; 1384 assert(!E->NeedToGather && "Extracting from a gather list"); 1385 1386 Value *Vec = E->VectorizedValue; 1387 assert(Vec && "Can't find vectorizable value"); 1388 1389 Value *Lane = Builder.getInt32(it->Lane); 1390 // Generate extracts for out-of-tree users. 1391 // Find the insertion point for the extractelement lane. 1392 if (PHINode *PN = dyn_cast<PHINode>(Vec)) { 1393 Builder.SetInsertPoint(PN->getParent()->getFirstInsertionPt()); 1394 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1395 User->replaceUsesOfWith(Scalar, Ex); 1396 } else if (isa<Instruction>(Vec)){ 1397 if (PHINode *PH = dyn_cast<PHINode>(User)) { 1398 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 1399 if (PH->getIncomingValue(i) == Scalar) { 1400 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 1401 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1402 PH->setOperand(i, Ex); 1403 } 1404 } 1405 } else { 1406 Builder.SetInsertPoint(cast<Instruction>(User)); 1407 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1408 User->replaceUsesOfWith(Scalar, Ex); 1409 } 1410 } else { 1411 Builder.SetInsertPoint(F->getEntryBlock().begin()); 1412 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 1413 User->replaceUsesOfWith(Scalar, Ex); 1414 } 1415 1416 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 1417 } 1418 1419 // For each vectorized value: 1420 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 1421 TreeEntry *Entry = &VectorizableTree[EIdx]; 1422 1423 // For each lane: 1424 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 1425 Value *Scalar = Entry->Scalars[Lane]; 1426 1427 // No need to handle users of gathered values. 1428 if (Entry->NeedToGather) 1429 continue; 1430 1431 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 1432 1433 Type *Ty = Scalar->getType(); 1434 if (!Ty->isVoidTy()) { 1435 for (Value::use_iterator User = Scalar->use_begin(), 1436 UE = Scalar->use_end(); User != UE; ++User) { 1437 DEBUG(dbgs() << "SLP: \tvalidating user:" << **User << ".\n"); 1438 assert(!MustGather.count(*User) && 1439 "Replacing gathered value with undef"); 1440 assert(ScalarToTreeEntry.count(*User) && 1441 "Replacing out-of-tree value with undef"); 1442 } 1443 Value *Undef = UndefValue::get(Ty); 1444 Scalar->replaceAllUsesWith(Undef); 1445 } 1446 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 1447 cast<Instruction>(Scalar)->eraseFromParent(); 1448 } 1449 } 1450 1451 for (Function::iterator it = F->begin(), e = F->end(); it != e; ++it) { 1452 BlocksNumbers[it].forget(); 1453 } 1454 Builder.ClearInsertionPoint(); 1455 } 1456 1457 void BoUpSLP::optimizeGatherSequence() { 1458 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 1459 << " gather sequences instructions.\n"); 1460 // LICM InsertElementInst sequences. 1461 for (SetVector<Instruction *>::iterator it = GatherSeq.begin(), 1462 e = GatherSeq.end(); it != e; ++it) { 1463 InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it); 1464 1465 if (!Insert) 1466 continue; 1467 1468 // Check if this block is inside a loop. 1469 Loop *L = LI->getLoopFor(Insert->getParent()); 1470 if (!L) 1471 continue; 1472 1473 // Check if it has a preheader. 1474 BasicBlock *PreHeader = L->getLoopPreheader(); 1475 if (!PreHeader) 1476 continue; 1477 1478 // If the vector or the element that we insert into it are 1479 // instructions that are defined in this basic block then we can't 1480 // hoist this instruction. 1481 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 1482 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 1483 if (CurrVec && L->contains(CurrVec)) 1484 continue; 1485 if (NewElem && L->contains(NewElem)) 1486 continue; 1487 1488 // We can hoist this instruction. Move it to the pre-header. 1489 Insert->moveBefore(PreHeader->getTerminator()); 1490 } 1491 1492 // Perform O(N^2) search over the gather sequences and merge identical 1493 // instructions. TODO: We can further optimize this scan if we split the 1494 // instructions into different buckets based on the insert lane. 1495 SmallPtrSet<Instruction*, 16> Visited; 1496 SmallVector<Instruction*, 16> ToRemove; 1497 ReversePostOrderTraversal<Function*> RPOT(F); 1498 for (ReversePostOrderTraversal<Function*>::rpo_iterator I = RPOT.begin(), 1499 E = RPOT.end(); I != E; ++I) { 1500 BasicBlock *BB = *I; 1501 // For all instructions in the function: 1502 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 1503 Instruction *In = it; 1504 if ((!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) || 1505 !GatherSeq.count(In)) 1506 continue; 1507 1508 // Check if we can replace this instruction with any of the 1509 // visited instructions. 1510 for (SmallPtrSet<Instruction*, 16>::iterator v = Visited.begin(), 1511 ve = Visited.end(); v != ve; ++v) { 1512 if (In->isIdenticalTo(*v) && 1513 DT->dominates((*v)->getParent(), In->getParent())) { 1514 In->replaceAllUsesWith(*v); 1515 ToRemove.push_back(In); 1516 In = 0; 1517 break; 1518 } 1519 } 1520 if (In) 1521 Visited.insert(In); 1522 } 1523 } 1524 1525 // Erase all of the instructions that we RAUWed. 1526 for (SmallVectorImpl<Instruction *>::iterator v = ToRemove.begin(), 1527 ve = ToRemove.end(); v != ve; ++v) { 1528 assert((*v)->getNumUses() == 0 && "Can't remove instructions with uses"); 1529 (*v)->eraseFromParent(); 1530 } 1531 } 1532 1533 /// The SLPVectorizer Pass. 1534 struct SLPVectorizer : public FunctionPass { 1535 typedef SmallVector<StoreInst *, 8> StoreList; 1536 typedef MapVector<Value *, StoreList> StoreListMap; 1537 1538 /// Pass identification, replacement for typeid 1539 static char ID; 1540 1541 explicit SLPVectorizer() : FunctionPass(ID) { 1542 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 1543 } 1544 1545 ScalarEvolution *SE; 1546 DataLayout *DL; 1547 TargetTransformInfo *TTI; 1548 AliasAnalysis *AA; 1549 LoopInfo *LI; 1550 DominatorTree *DT; 1551 1552 virtual bool runOnFunction(Function &F) { 1553 SE = &getAnalysis<ScalarEvolution>(); 1554 DL = getAnalysisIfAvailable<DataLayout>(); 1555 TTI = &getAnalysis<TargetTransformInfo>(); 1556 AA = &getAnalysis<AliasAnalysis>(); 1557 LI = &getAnalysis<LoopInfo>(); 1558 DT = &getAnalysis<DominatorTree>(); 1559 1560 StoreRefs.clear(); 1561 bool Changed = false; 1562 1563 // Must have DataLayout. We can't require it because some tests run w/o 1564 // triple. 1565 if (!DL) 1566 return false; 1567 1568 // Don't vectorize when the attribute NoImplicitFloat is used. 1569 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 1570 return false; 1571 1572 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 1573 1574 // Use the bollom up slp vectorizer to construct chains that start with 1575 // he store instructions. 1576 BoUpSLP R(&F, SE, DL, TTI, AA, LI, DT); 1577 1578 // Scan the blocks in the function in post order. 1579 for (po_iterator<BasicBlock*> it = po_begin(&F.getEntryBlock()), 1580 e = po_end(&F.getEntryBlock()); it != e; ++it) { 1581 BasicBlock *BB = *it; 1582 1583 // Vectorize trees that end at stores. 1584 if (unsigned count = collectStores(BB, R)) { 1585 (void)count; 1586 DEBUG(dbgs() << "SLP: Found " << count << " stores to vectorize.\n"); 1587 Changed |= vectorizeStoreChains(R); 1588 } 1589 1590 // Vectorize trees that end at reductions. 1591 Changed |= vectorizeChainsInBlock(BB, R); 1592 } 1593 1594 if (Changed) { 1595 R.optimizeGatherSequence(); 1596 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 1597 DEBUG(verifyFunction(F)); 1598 } 1599 return Changed; 1600 } 1601 1602 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 1603 FunctionPass::getAnalysisUsage(AU); 1604 AU.addRequired<ScalarEvolution>(); 1605 AU.addRequired<AliasAnalysis>(); 1606 AU.addRequired<TargetTransformInfo>(); 1607 AU.addRequired<LoopInfo>(); 1608 AU.addRequired<DominatorTree>(); 1609 AU.addPreserved<LoopInfo>(); 1610 AU.addPreserved<DominatorTree>(); 1611 AU.setPreservesCFG(); 1612 } 1613 1614 private: 1615 1616 /// \brief Collect memory references and sort them according to their base 1617 /// object. We sort the stores to their base objects to reduce the cost of the 1618 /// quadratic search on the stores. TODO: We can further reduce this cost 1619 /// if we flush the chain creation every time we run into a memory barrier. 1620 unsigned collectStores(BasicBlock *BB, BoUpSLP &R); 1621 1622 /// \brief Try to vectorize a chain that starts at two arithmetic instrs. 1623 bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R); 1624 1625 /// \brief Try to vectorize a list of operands. 1626 /// \returns true if a value was vectorized. 1627 bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R); 1628 1629 /// \brief Try to vectorize a chain that may start at the operands of \V; 1630 bool tryToVectorize(BinaryOperator *V, BoUpSLP &R); 1631 1632 /// \brief Vectorize the stores that were collected in StoreRefs. 1633 bool vectorizeStoreChains(BoUpSLP &R); 1634 1635 /// \brief Scan the basic block and look for patterns that are likely to start 1636 /// a vectorization chain. 1637 bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R); 1638 1639 bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold, 1640 BoUpSLP &R); 1641 1642 bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold, 1643 BoUpSLP &R); 1644 private: 1645 StoreListMap StoreRefs; 1646 }; 1647 1648 bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain, 1649 int CostThreshold, BoUpSLP &R) { 1650 unsigned ChainLen = Chain.size(); 1651 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 1652 << "\n"); 1653 Type *StoreTy = cast<StoreInst>(Chain[0])->getValueOperand()->getType(); 1654 unsigned Sz = DL->getTypeSizeInBits(StoreTy); 1655 unsigned VF = MinVecRegSize / Sz; 1656 1657 if (!isPowerOf2_32(Sz) || VF < 2) 1658 return false; 1659 1660 bool Changed = false; 1661 // Look for profitable vectorizable trees at all offsets, starting at zero. 1662 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 1663 if (i + VF > e) 1664 break; 1665 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 1666 << "\n"); 1667 ArrayRef<Value *> Operands = Chain.slice(i, VF); 1668 1669 R.buildTree(Operands); 1670 1671 int Cost = R.getTreeCost(); 1672 1673 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 1674 if (Cost < CostThreshold) { 1675 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 1676 R.vectorizeTree(); 1677 1678 // Move to the next bundle. 1679 i += VF - 1; 1680 Changed = true; 1681 } 1682 } 1683 1684 return Changed; 1685 } 1686 1687 bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, 1688 int costThreshold, BoUpSLP &R) { 1689 SetVector<Value *> Heads, Tails; 1690 SmallDenseMap<Value *, Value *> ConsecutiveChain; 1691 1692 // We may run into multiple chains that merge into a single chain. We mark the 1693 // stores that we vectorized so that we don't visit the same store twice. 1694 BoUpSLP::ValueSet VectorizedStores; 1695 bool Changed = false; 1696 1697 // Do a quadratic search on all of the given stores and find 1698 // all of the pairs of stores that follow each other. 1699 for (unsigned i = 0, e = Stores.size(); i < e; ++i) { 1700 for (unsigned j = 0; j < e; ++j) { 1701 if (i == j) 1702 continue; 1703 1704 if (R.isConsecutiveAccess(Stores[i], Stores[j])) { 1705 Tails.insert(Stores[j]); 1706 Heads.insert(Stores[i]); 1707 ConsecutiveChain[Stores[i]] = Stores[j]; 1708 } 1709 } 1710 } 1711 1712 // For stores that start but don't end a link in the chain: 1713 for (SetVector<Value *>::iterator it = Heads.begin(), e = Heads.end(); 1714 it != e; ++it) { 1715 if (Tails.count(*it)) 1716 continue; 1717 1718 // We found a store instr that starts a chain. Now follow the chain and try 1719 // to vectorize it. 1720 BoUpSLP::ValueList Operands; 1721 Value *I = *it; 1722 // Collect the chain into a list. 1723 while (Tails.count(I) || Heads.count(I)) { 1724 if (VectorizedStores.count(I)) 1725 break; 1726 Operands.push_back(I); 1727 // Move to the next value in the chain. 1728 I = ConsecutiveChain[I]; 1729 } 1730 1731 bool Vectorized = vectorizeStoreChain(Operands, costThreshold, R); 1732 1733 // Mark the vectorized stores so that we don't vectorize them again. 1734 if (Vectorized) 1735 VectorizedStores.insert(Operands.begin(), Operands.end()); 1736 Changed |= Vectorized; 1737 } 1738 1739 return Changed; 1740 } 1741 1742 1743 unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) { 1744 unsigned count = 0; 1745 StoreRefs.clear(); 1746 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 1747 StoreInst *SI = dyn_cast<StoreInst>(it); 1748 if (!SI) 1749 continue; 1750 1751 // Check that the pointer points to scalars. 1752 Type *Ty = SI->getValueOperand()->getType(); 1753 if (Ty->isAggregateType() || Ty->isVectorTy()) 1754 return 0; 1755 1756 // Find the base of the GEP. 1757 Value *Ptr = SI->getPointerOperand(); 1758 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) 1759 Ptr = GEP->getPointerOperand(); 1760 1761 // Save the store locations. 1762 StoreRefs[Ptr].push_back(SI); 1763 count++; 1764 } 1765 return count; 1766 } 1767 1768 bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 1769 if (!A || !B) 1770 return false; 1771 Value *VL[] = { A, B }; 1772 return tryToVectorizeList(VL, R); 1773 } 1774 1775 bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R) { 1776 if (VL.size() < 2) 1777 return false; 1778 1779 DEBUG(dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n"); 1780 1781 // Check that all of the parts are scalar instructions of the same type. 1782 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 1783 if (!I0) 1784 return false; 1785 1786 unsigned Opcode0 = I0->getOpcode(); 1787 1788 Type *Ty0 = I0->getType(); 1789 unsigned Sz = DL->getTypeSizeInBits(Ty0); 1790 unsigned VF = MinVecRegSize / Sz; 1791 1792 for (int i = 0, e = VL.size(); i < e; ++i) { 1793 Type *Ty = VL[i]->getType(); 1794 if (Ty->isAggregateType() || Ty->isVectorTy()) 1795 return false; 1796 Instruction *Inst = dyn_cast<Instruction>(VL[i]); 1797 if (!Inst || Inst->getOpcode() != Opcode0) 1798 return false; 1799 } 1800 1801 bool Changed = false; 1802 1803 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 1804 unsigned OpsWidth = 0; 1805 1806 if (i + VF > e) 1807 OpsWidth = e - i; 1808 else 1809 OpsWidth = VF; 1810 1811 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 1812 break; 1813 1814 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " << "\n"); 1815 ArrayRef<Value *> Ops = VL.slice(i, OpsWidth); 1816 1817 R.buildTree(Ops); 1818 int Cost = R.getTreeCost(); 1819 1820 if (Cost < -SLPCostThreshold) { 1821 DEBUG(dbgs() << "SLP: Vectorizing pair at cost:" << Cost << ".\n"); 1822 R.vectorizeTree(); 1823 1824 // Move to the next bundle. 1825 i += VF - 1; 1826 Changed = true; 1827 } 1828 } 1829 1830 return Changed; 1831 } 1832 1833 bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { 1834 if (!V) 1835 return false; 1836 1837 // Try to vectorize V. 1838 if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R)) 1839 return true; 1840 1841 BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0)); 1842 BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1)); 1843 // Try to skip B. 1844 if (B && B->hasOneUse()) { 1845 BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 1846 BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 1847 if (tryToVectorizePair(A, B0, R)) { 1848 B->moveBefore(V); 1849 return true; 1850 } 1851 if (tryToVectorizePair(A, B1, R)) { 1852 B->moveBefore(V); 1853 return true; 1854 } 1855 } 1856 1857 // Try to skip A. 1858 if (A && A->hasOneUse()) { 1859 BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 1860 BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 1861 if (tryToVectorizePair(A0, B, R)) { 1862 A->moveBefore(V); 1863 return true; 1864 } 1865 if (tryToVectorizePair(A1, B, R)) { 1866 A->moveBefore(V); 1867 return true; 1868 } 1869 } 1870 return 0; 1871 } 1872 1873 /// \brief Recognize construction of vectors like 1874 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 1875 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 1876 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 1877 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 1878 /// 1879 /// Returns true if it matches 1880 /// 1881 static bool findBuildVector(InsertElementInst *IE, 1882 SmallVectorImpl<Value *> &Ops) { 1883 if (!isa<UndefValue>(IE->getOperand(0))) 1884 return false; 1885 1886 while (true) { 1887 Ops.push_back(IE->getOperand(1)); 1888 1889 if (IE->use_empty()) 1890 return false; 1891 1892 InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->use_back()); 1893 if (!NextUse) 1894 return true; 1895 1896 // If this isn't the final use, make sure the next insertelement is the only 1897 // use. It's OK if the final constructed vector is used multiple times 1898 if (!IE->hasOneUse()) 1899 return false; 1900 1901 IE = NextUse; 1902 } 1903 1904 return false; 1905 } 1906 1907 bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 1908 bool Changed = false; 1909 SmallVector<Value *, 4> Incoming; 1910 SmallSet<Instruction *, 16> VisitedInstrs; 1911 1912 // Collect the incoming values from the PHIs. 1913 for (BasicBlock::iterator instr = BB->begin(), ie = BB->end(); instr != ie; 1914 ++instr) { 1915 PHINode *P = dyn_cast<PHINode>(instr); 1916 1917 if (!P) 1918 break; 1919 1920 // We may go through BB multiple times so skip the one we have checked. 1921 if (!VisitedInstrs.insert(instr)) 1922 continue; 1923 1924 // Stop constructing the list when you reach a different type. 1925 if (Incoming.size() && P->getType() != Incoming[0]->getType()) { 1926 if (tryToVectorizeList(Incoming, R)) { 1927 // We would like to start over since some instructions are deleted 1928 // and the iterator may become invalid value. 1929 Changed = true; 1930 instr = BB->begin(); 1931 ie = BB->end(); 1932 } 1933 1934 Incoming.clear(); 1935 } 1936 1937 Incoming.push_back(P); 1938 } 1939 1940 if (Incoming.size() > 1) 1941 Changed |= tryToVectorizeList(Incoming, R); 1942 1943 VisitedInstrs.clear(); 1944 1945 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 1946 // We may go through BB multiple times so skip the one we have checked. 1947 if (!VisitedInstrs.insert(it)) 1948 continue; 1949 1950 if (isa<DbgInfoIntrinsic>(it)) 1951 continue; 1952 1953 // Try to vectorize reductions that use PHINodes. 1954 if (PHINode *P = dyn_cast<PHINode>(it)) { 1955 // Check that the PHI is a reduction PHI. 1956 if (P->getNumIncomingValues() != 2) 1957 return Changed; 1958 Value *Rdx = 1959 (P->getIncomingBlock(0) == BB 1960 ? (P->getIncomingValue(0)) 1961 : (P->getIncomingBlock(1) == BB ? P->getIncomingValue(1) : 0)); 1962 // Check if this is a Binary Operator. 1963 BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx); 1964 if (!BI) 1965 continue; 1966 1967 Value *Inst = BI->getOperand(0); 1968 if (Inst == P) 1969 Inst = BI->getOperand(1); 1970 1971 if (tryToVectorize(dyn_cast<BinaryOperator>(Inst), R)) { 1972 // We would like to start over since some instructions are deleted 1973 // and the iterator may become invalid value. 1974 Changed = true; 1975 it = BB->begin(); 1976 e = BB->end(); 1977 } 1978 continue; 1979 } 1980 1981 // Try to vectorize trees that start at compare instructions. 1982 if (CmpInst *CI = dyn_cast<CmpInst>(it)) { 1983 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) { 1984 Changed = true; 1985 // We would like to start over since some instructions are deleted 1986 // and the iterator may become invalid value. 1987 it = BB->begin(); 1988 e = BB->end(); 1989 continue; 1990 } 1991 1992 for (int i = 0; i < 2; ++i) { 1993 if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) { 1994 if (tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R)) { 1995 Changed = true; 1996 // We would like to start over since some instructions are deleted 1997 // and the iterator may become invalid value. 1998 it = BB->begin(); 1999 e = BB->end(); 2000 } 2001 } 2002 } 2003 continue; 2004 } 2005 2006 // Try to vectorize trees that start at insertelement instructions. 2007 if (InsertElementInst *IE = dyn_cast<InsertElementInst>(it)) { 2008 SmallVector<Value *, 8> Ops; 2009 if (!findBuildVector(IE, Ops)) 2010 continue; 2011 2012 if (tryToVectorizeList(Ops, R)) { 2013 Changed = true; 2014 it = BB->begin(); 2015 e = BB->end(); 2016 } 2017 2018 continue; 2019 } 2020 } 2021 2022 return Changed; 2023 } 2024 2025 bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) { 2026 bool Changed = false; 2027 // Attempt to sort and vectorize each of the store-groups. 2028 for (StoreListMap::iterator it = StoreRefs.begin(), e = StoreRefs.end(); 2029 it != e; ++it) { 2030 if (it->second.size() < 2) 2031 continue; 2032 2033 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 2034 << it->second.size() << ".\n"); 2035 2036 // Process the stores in chunks of 16. 2037 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 2038 unsigned Len = std::min<unsigned>(CE - CI, 16); 2039 ArrayRef<StoreInst *> Chunk(&it->second[CI], Len); 2040 Changed |= vectorizeStores(Chunk, -SLPCostThreshold, R); 2041 } 2042 } 2043 return Changed; 2044 } 2045 2046 } // end anonymous namespace 2047 2048 char SLPVectorizer::ID = 0; 2049 static const char lv_name[] = "SLP Vectorizer"; 2050 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 2051 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 2052 INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) 2053 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 2054 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 2055 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 2056 2057 namespace llvm { 2058 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); } 2059 } 2060