1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 #include "llvm/Transforms/Vectorize.h" 19 #include "llvm/ADT/MapVector.h" 20 #include "llvm/ADT/Optional.h" 21 #include "llvm/ADT/PostOrderIterator.h" 22 #include "llvm/ADT/SetVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Analysis/AliasAnalysis.h" 25 #include "llvm/Analysis/GlobalsModRef.h" 26 #include "llvm/Analysis/AssumptionCache.h" 27 #include "llvm/Analysis/CodeMetrics.h" 28 #include "llvm/Analysis/LoopInfo.h" 29 #include "llvm/Analysis/ScalarEvolution.h" 30 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 31 #include "llvm/Analysis/TargetTransformInfo.h" 32 #include "llvm/Analysis/ValueTracking.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/Dominators.h" 35 #include "llvm/IR/IRBuilder.h" 36 #include "llvm/IR/Instructions.h" 37 #include "llvm/IR/IntrinsicInst.h" 38 #include "llvm/IR/Module.h" 39 #include "llvm/IR/NoFolder.h" 40 #include "llvm/IR/Type.h" 41 #include "llvm/IR/Value.h" 42 #include "llvm/IR/Verifier.h" 43 #include "llvm/Pass.h" 44 #include "llvm/Support/CommandLine.h" 45 #include "llvm/Support/Debug.h" 46 #include "llvm/Support/raw_ostream.h" 47 #include "llvm/Analysis/VectorUtils.h" 48 #include <algorithm> 49 #include <map> 50 #include <memory> 51 52 using namespace llvm; 53 54 #define SV_NAME "slp-vectorizer" 55 #define DEBUG_TYPE "SLP" 56 57 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 58 59 static cl::opt<int> 60 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 61 cl::desc("Only vectorize if you gain more than this " 62 "number ")); 63 64 static cl::opt<bool> 65 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 66 cl::desc("Attempt to vectorize horizontal reductions")); 67 68 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 69 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 70 cl::desc( 71 "Attempt to vectorize horizontal reductions feeding into a store")); 72 73 static cl::opt<int> 74 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 75 cl::desc("Attempt to vectorize for this register size in bits")); 76 77 /// Limits the size of scheduling regions in a block. 78 /// It avoid long compile times for _very_ large blocks where vector 79 /// instructions are spread over a wide range. 80 /// This limit is way higher than needed by real-world functions. 81 static cl::opt<int> 82 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 83 cl::desc("Limit the size of the SLP scheduling region per block")); 84 85 namespace { 86 87 // FIXME: Set this via cl::opt to allow overriding. 88 static const unsigned MinVecRegSize = 128; 89 90 static const unsigned RecursionMaxDepth = 12; 91 92 // Limit the number of alias checks. The limit is chosen so that 93 // it has no negative effect on the llvm benchmarks. 94 static const unsigned AliasedCheckLimit = 10; 95 96 // Another limit for the alias checks: The maximum distance between load/store 97 // instructions where alias checks are done. 98 // This limit is useful for very large basic blocks. 99 static const unsigned MaxMemDepDistance = 160; 100 101 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 102 /// regions to be handled. 103 static const int MinScheduleRegionSize = 16; 104 105 /// \brief Predicate for the element types that the SLP vectorizer supports. 106 /// 107 /// The most important thing to filter here are types which are invalid in LLVM 108 /// vectors. We also filter target specific types which have absolutely no 109 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 110 /// avoids spending time checking the cost model and realizing that they will 111 /// be inevitably scalarized. 112 static bool isValidElementType(Type *Ty) { 113 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 114 !Ty->isPPC_FP128Ty(); 115 } 116 117 /// \returns the parent basic block if all of the instructions in \p VL 118 /// are in the same block or null otherwise. 119 static BasicBlock *getSameBlock(ArrayRef<Value *> VL) { 120 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 121 if (!I0) 122 return nullptr; 123 BasicBlock *BB = I0->getParent(); 124 for (int i = 1, e = VL.size(); i < e; i++) { 125 Instruction *I = dyn_cast<Instruction>(VL[i]); 126 if (!I) 127 return nullptr; 128 129 if (BB != I->getParent()) 130 return nullptr; 131 } 132 return BB; 133 } 134 135 /// \returns True if all of the values in \p VL are constants. 136 static bool allConstant(ArrayRef<Value *> VL) { 137 for (unsigned i = 0, e = VL.size(); i < e; ++i) 138 if (!isa<Constant>(VL[i])) 139 return false; 140 return true; 141 } 142 143 /// \returns True if all of the values in \p VL are identical. 144 static bool isSplat(ArrayRef<Value *> VL) { 145 for (unsigned i = 1, e = VL.size(); i < e; ++i) 146 if (VL[i] != VL[0]) 147 return false; 148 return true; 149 } 150 151 ///\returns Opcode that can be clubbed with \p Op to create an alternate 152 /// sequence which can later be merged as a ShuffleVector instruction. 153 static unsigned getAltOpcode(unsigned Op) { 154 switch (Op) { 155 case Instruction::FAdd: 156 return Instruction::FSub; 157 case Instruction::FSub: 158 return Instruction::FAdd; 159 case Instruction::Add: 160 return Instruction::Sub; 161 case Instruction::Sub: 162 return Instruction::Add; 163 default: 164 return 0; 165 } 166 } 167 168 ///\returns bool representing if Opcode \p Op can be part 169 /// of an alternate sequence which can later be merged as 170 /// a ShuffleVector instruction. 171 static bool canCombineAsAltInst(unsigned Op) { 172 return Op == Instruction::FAdd || Op == Instruction::FSub || 173 Op == Instruction::Sub || Op == Instruction::Add; 174 } 175 176 /// \returns ShuffleVector instruction if instructions in \p VL have 177 /// alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence. 178 /// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...) 179 static unsigned isAltInst(ArrayRef<Value *> VL) { 180 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 181 unsigned Opcode = I0->getOpcode(); 182 unsigned AltOpcode = getAltOpcode(Opcode); 183 for (int i = 1, e = VL.size(); i < e; i++) { 184 Instruction *I = dyn_cast<Instruction>(VL[i]); 185 if (!I || I->getOpcode() != ((i & 1) ? AltOpcode : Opcode)) 186 return 0; 187 } 188 return Instruction::ShuffleVector; 189 } 190 191 /// \returns The opcode if all of the Instructions in \p VL have the same 192 /// opcode, or zero. 193 static unsigned getSameOpcode(ArrayRef<Value *> VL) { 194 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 195 if (!I0) 196 return 0; 197 unsigned Opcode = I0->getOpcode(); 198 for (int i = 1, e = VL.size(); i < e; i++) { 199 Instruction *I = dyn_cast<Instruction>(VL[i]); 200 if (!I || Opcode != I->getOpcode()) { 201 if (canCombineAsAltInst(Opcode) && i == 1) 202 return isAltInst(VL); 203 return 0; 204 } 205 } 206 return Opcode; 207 } 208 209 /// Get the intersection (logical and) of all of the potential IR flags 210 /// of each scalar operation (VL) that will be converted into a vector (I). 211 /// Flag set: NSW, NUW, exact, and all of fast-math. 212 static void propagateIRFlags(Value *I, ArrayRef<Value *> VL) { 213 if (auto *VecOp = dyn_cast<BinaryOperator>(I)) { 214 if (auto *Intersection = dyn_cast<BinaryOperator>(VL[0])) { 215 // Intersection is initialized to the 0th scalar, 216 // so start counting from index '1'. 217 for (int i = 1, e = VL.size(); i < e; ++i) { 218 if (auto *Scalar = dyn_cast<BinaryOperator>(VL[i])) 219 Intersection->andIRFlags(Scalar); 220 } 221 VecOp->copyIRFlags(Intersection); 222 } 223 } 224 } 225 226 /// \returns \p I after propagating metadata from \p VL. 227 static Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL) { 228 Instruction *I0 = cast<Instruction>(VL[0]); 229 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 230 I0->getAllMetadataOtherThanDebugLoc(Metadata); 231 232 for (unsigned i = 0, n = Metadata.size(); i != n; ++i) { 233 unsigned Kind = Metadata[i].first; 234 MDNode *MD = Metadata[i].second; 235 236 for (int i = 1, e = VL.size(); MD && i != e; i++) { 237 Instruction *I = cast<Instruction>(VL[i]); 238 MDNode *IMD = I->getMetadata(Kind); 239 240 switch (Kind) { 241 default: 242 MD = nullptr; // Remove unknown metadata 243 break; 244 case LLVMContext::MD_tbaa: 245 MD = MDNode::getMostGenericTBAA(MD, IMD); 246 break; 247 case LLVMContext::MD_alias_scope: 248 MD = MDNode::getMostGenericAliasScope(MD, IMD); 249 break; 250 case LLVMContext::MD_noalias: 251 MD = MDNode::intersect(MD, IMD); 252 break; 253 case LLVMContext::MD_fpmath: 254 MD = MDNode::getMostGenericFPMath(MD, IMD); 255 break; 256 case LLVMContext::MD_nontemporal: 257 MD = MDNode::intersect(MD, IMD); 258 break; 259 } 260 } 261 I->setMetadata(Kind, MD); 262 } 263 return I; 264 } 265 266 /// \returns The type that all of the values in \p VL have or null if there 267 /// are different types. 268 static Type* getSameType(ArrayRef<Value *> VL) { 269 Type *Ty = VL[0]->getType(); 270 for (int i = 1, e = VL.size(); i < e; i++) 271 if (VL[i]->getType() != Ty) 272 return nullptr; 273 274 return Ty; 275 } 276 277 /// \returns True if the ExtractElement instructions in VL can be vectorized 278 /// to use the original vector. 279 static bool CanReuseExtract(ArrayRef<Value *> VL) { 280 assert(Instruction::ExtractElement == getSameOpcode(VL) && "Invalid opcode"); 281 // Check if all of the extracts come from the same vector and from the 282 // correct offset. 283 Value *VL0 = VL[0]; 284 ExtractElementInst *E0 = cast<ExtractElementInst>(VL0); 285 Value *Vec = E0->getOperand(0); 286 287 // We have to extract from the same vector type. 288 unsigned NElts = Vec->getType()->getVectorNumElements(); 289 290 if (NElts != VL.size()) 291 return false; 292 293 // Check that all of the indices extract from the correct offset. 294 ConstantInt *CI = dyn_cast<ConstantInt>(E0->getOperand(1)); 295 if (!CI || CI->getZExtValue()) 296 return false; 297 298 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 299 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 300 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 301 302 if (!CI || CI->getZExtValue() != i || E->getOperand(0) != Vec) 303 return false; 304 } 305 306 return true; 307 } 308 309 /// \returns True if in-tree use also needs extract. This refers to 310 /// possible scalar operand in vectorized instruction. 311 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 312 TargetLibraryInfo *TLI) { 313 314 unsigned Opcode = UserInst->getOpcode(); 315 switch (Opcode) { 316 case Instruction::Load: { 317 LoadInst *LI = cast<LoadInst>(UserInst); 318 return (LI->getPointerOperand() == Scalar); 319 } 320 case Instruction::Store: { 321 StoreInst *SI = cast<StoreInst>(UserInst); 322 return (SI->getPointerOperand() == Scalar); 323 } 324 case Instruction::Call: { 325 CallInst *CI = cast<CallInst>(UserInst); 326 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 327 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 328 return (CI->getArgOperand(1) == Scalar); 329 } 330 } 331 default: 332 return false; 333 } 334 } 335 336 /// \returns the AA location that is being access by the instruction. 337 static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) { 338 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 339 return MemoryLocation::get(SI); 340 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 341 return MemoryLocation::get(LI); 342 return MemoryLocation(); 343 } 344 345 /// \returns True if the instruction is not a volatile or atomic load/store. 346 static bool isSimple(Instruction *I) { 347 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 348 return LI->isSimple(); 349 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 350 return SI->isSimple(); 351 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 352 return !MI->isVolatile(); 353 return true; 354 } 355 356 /// Bottom Up SLP Vectorizer. 357 class BoUpSLP { 358 public: 359 typedef SmallVector<Value *, 8> ValueList; 360 typedef SmallVector<Instruction *, 16> InstrList; 361 typedef SmallPtrSet<Value *, 16> ValueSet; 362 typedef SmallVector<StoreInst *, 8> StoreList; 363 364 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 365 TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li, 366 DominatorTree *Dt, AssumptionCache *AC) 367 : NumLoadsWantToKeepOrder(0), NumLoadsWantToChangeOrder(0), F(Func), 368 SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), 369 Builder(Se->getContext()) { 370 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 371 } 372 373 /// \brief Vectorize the tree that starts with the elements in \p VL. 374 /// Returns the vectorized root. 375 Value *vectorizeTree(); 376 377 /// \returns the cost incurred by unwanted spills and fills, caused by 378 /// holding live values over call sites. 379 int getSpillCost(); 380 381 /// \returns the vectorization cost of the subtree that starts at \p VL. 382 /// A negative number means that this is profitable. 383 int getTreeCost(); 384 385 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 386 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 387 void buildTree(ArrayRef<Value *> Roots, 388 ArrayRef<Value *> UserIgnoreLst = None); 389 390 /// Clear the internal data structures that are created by 'buildTree'. 391 void deleteTree() { 392 VectorizableTree.clear(); 393 ScalarToTreeEntry.clear(); 394 MustGather.clear(); 395 ExternalUses.clear(); 396 NumLoadsWantToKeepOrder = 0; 397 NumLoadsWantToChangeOrder = 0; 398 for (auto &Iter : BlocksSchedules) { 399 BlockScheduling *BS = Iter.second.get(); 400 BS->clear(); 401 } 402 } 403 404 /// \returns true if the memory operations A and B are consecutive. 405 bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL); 406 407 /// \brief Perform LICM and CSE on the newly generated gather sequences. 408 void optimizeGatherSequence(); 409 410 /// \returns true if it is beneficial to reverse the vector order. 411 bool shouldReorder() const { 412 return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder; 413 } 414 415 private: 416 struct TreeEntry; 417 418 /// \returns the cost of the vectorizable entry. 419 int getEntryCost(TreeEntry *E); 420 421 /// This is the recursive part of buildTree. 422 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth); 423 424 /// Vectorize a single entry in the tree. 425 Value *vectorizeTree(TreeEntry *E); 426 427 /// Vectorize a single entry in the tree, starting in \p VL. 428 Value *vectorizeTree(ArrayRef<Value *> VL); 429 430 /// \returns the pointer to the vectorized value if \p VL is already 431 /// vectorized, or NULL. They may happen in cycles. 432 Value *alreadyVectorized(ArrayRef<Value *> VL) const; 433 434 /// \brief Take the pointer operand from the Load/Store instruction. 435 /// \returns NULL if this is not a valid Load/Store instruction. 436 static Value *getPointerOperand(Value *I); 437 438 /// \brief Take the address space operand from the Load/Store instruction. 439 /// \returns -1 if this is not a valid Load/Store instruction. 440 static unsigned getAddressSpaceOperand(Value *I); 441 442 /// \returns the scalarization cost for this type. Scalarization in this 443 /// context means the creation of vectors from a group of scalars. 444 int getGatherCost(Type *Ty); 445 446 /// \returns the scalarization cost for this list of values. Assuming that 447 /// this subtree gets vectorized, we may need to extract the values from the 448 /// roots. This method calculates the cost of extracting the values. 449 int getGatherCost(ArrayRef<Value *> VL); 450 451 /// \brief Set the Builder insert point to one after the last instruction in 452 /// the bundle 453 void setInsertPointAfterBundle(ArrayRef<Value *> VL); 454 455 /// \returns a vector from a collection of scalars in \p VL. 456 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 457 458 /// \returns whether the VectorizableTree is fully vectorizable and will 459 /// be beneficial even the tree height is tiny. 460 bool isFullyVectorizableTinyTree(); 461 462 /// \reorder commutative operands in alt shuffle if they result in 463 /// vectorized code. 464 void reorderAltShuffleOperands(ArrayRef<Value *> VL, 465 SmallVectorImpl<Value *> &Left, 466 SmallVectorImpl<Value *> &Right); 467 /// \reorder commutative operands to get better probability of 468 /// generating vectorized code. 469 void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 470 SmallVectorImpl<Value *> &Left, 471 SmallVectorImpl<Value *> &Right); 472 struct TreeEntry { 473 TreeEntry() : Scalars(), VectorizedValue(nullptr), 474 NeedToGather(0) {} 475 476 /// \returns true if the scalars in VL are equal to this entry. 477 bool isSame(ArrayRef<Value *> VL) const { 478 assert(VL.size() == Scalars.size() && "Invalid size"); 479 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 480 } 481 482 /// A vector of scalars. 483 ValueList Scalars; 484 485 /// The Scalars are vectorized into this value. It is initialized to Null. 486 Value *VectorizedValue; 487 488 /// Do we need to gather this sequence ? 489 bool NeedToGather; 490 }; 491 492 /// Create a new VectorizableTree entry. 493 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized) { 494 VectorizableTree.emplace_back(); 495 int idx = VectorizableTree.size() - 1; 496 TreeEntry *Last = &VectorizableTree[idx]; 497 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 498 Last->NeedToGather = !Vectorized; 499 if (Vectorized) { 500 for (int i = 0, e = VL.size(); i != e; ++i) { 501 assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!"); 502 ScalarToTreeEntry[VL[i]] = idx; 503 } 504 } else { 505 MustGather.insert(VL.begin(), VL.end()); 506 } 507 return Last; 508 } 509 510 /// -- Vectorization State -- 511 /// Holds all of the tree entries. 512 std::vector<TreeEntry> VectorizableTree; 513 514 /// Maps a specific scalar to its tree entry. 515 SmallDenseMap<Value*, int> ScalarToTreeEntry; 516 517 /// A list of scalars that we found that we need to keep as scalars. 518 ValueSet MustGather; 519 520 /// This POD struct describes one external user in the vectorized tree. 521 struct ExternalUser { 522 ExternalUser (Value *S, llvm::User *U, int L) : 523 Scalar(S), User(U), Lane(L){} 524 // Which scalar in our function. 525 Value *Scalar; 526 // Which user that uses the scalar. 527 llvm::User *User; 528 // Which lane does the scalar belong to. 529 int Lane; 530 }; 531 typedef SmallVector<ExternalUser, 16> UserList; 532 533 /// Checks if two instructions may access the same memory. 534 /// 535 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 536 /// is invariant in the calling loop. 537 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 538 Instruction *Inst2) { 539 540 // First check if the result is already in the cache. 541 AliasCacheKey key = std::make_pair(Inst1, Inst2); 542 Optional<bool> &result = AliasCache[key]; 543 if (result.hasValue()) { 544 return result.getValue(); 545 } 546 MemoryLocation Loc2 = getLocation(Inst2, AA); 547 bool aliased = true; 548 if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) { 549 // Do the alias check. 550 aliased = AA->alias(Loc1, Loc2); 551 } 552 // Store the result in the cache. 553 result = aliased; 554 return aliased; 555 } 556 557 typedef std::pair<Instruction *, Instruction *> AliasCacheKey; 558 559 /// Cache for alias results. 560 /// TODO: consider moving this to the AliasAnalysis itself. 561 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 562 563 /// Removes an instruction from its block and eventually deletes it. 564 /// It's like Instruction::eraseFromParent() except that the actual deletion 565 /// is delayed until BoUpSLP is destructed. 566 /// This is required to ensure that there are no incorrect collisions in the 567 /// AliasCache, which can happen if a new instruction is allocated at the 568 /// same address as a previously deleted instruction. 569 void eraseInstruction(Instruction *I) { 570 I->removeFromParent(); 571 I->dropAllReferences(); 572 DeletedInstructions.push_back(std::unique_ptr<Instruction>(I)); 573 } 574 575 /// Temporary store for deleted instructions. Instructions will be deleted 576 /// eventually when the BoUpSLP is destructed. 577 SmallVector<std::unique_ptr<Instruction>, 8> DeletedInstructions; 578 579 /// A list of values that need to extracted out of the tree. 580 /// This list holds pairs of (Internal Scalar : External User). 581 UserList ExternalUses; 582 583 /// Values used only by @llvm.assume calls. 584 SmallPtrSet<const Value *, 32> EphValues; 585 586 /// Holds all of the instructions that we gathered. 587 SetVector<Instruction *> GatherSeq; 588 /// A list of blocks that we are going to CSE. 589 SetVector<BasicBlock *> CSEBlocks; 590 591 /// Contains all scheduling relevant data for an instruction. 592 /// A ScheduleData either represents a single instruction or a member of an 593 /// instruction bundle (= a group of instructions which is combined into a 594 /// vector instruction). 595 struct ScheduleData { 596 597 // The initial value for the dependency counters. It means that the 598 // dependencies are not calculated yet. 599 enum { InvalidDeps = -1 }; 600 601 ScheduleData() 602 : Inst(nullptr), FirstInBundle(nullptr), NextInBundle(nullptr), 603 NextLoadStore(nullptr), SchedulingRegionID(0), SchedulingPriority(0), 604 Dependencies(InvalidDeps), UnscheduledDeps(InvalidDeps), 605 UnscheduledDepsInBundle(InvalidDeps), IsScheduled(false) {} 606 607 void init(int BlockSchedulingRegionID) { 608 FirstInBundle = this; 609 NextInBundle = nullptr; 610 NextLoadStore = nullptr; 611 IsScheduled = false; 612 SchedulingRegionID = BlockSchedulingRegionID; 613 UnscheduledDepsInBundle = UnscheduledDeps; 614 clearDependencies(); 615 } 616 617 /// Returns true if the dependency information has been calculated. 618 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 619 620 /// Returns true for single instructions and for bundle representatives 621 /// (= the head of a bundle). 622 bool isSchedulingEntity() const { return FirstInBundle == this; } 623 624 /// Returns true if it represents an instruction bundle and not only a 625 /// single instruction. 626 bool isPartOfBundle() const { 627 return NextInBundle != nullptr || FirstInBundle != this; 628 } 629 630 /// Returns true if it is ready for scheduling, i.e. it has no more 631 /// unscheduled depending instructions/bundles. 632 bool isReady() const { 633 assert(isSchedulingEntity() && 634 "can't consider non-scheduling entity for ready list"); 635 return UnscheduledDepsInBundle == 0 && !IsScheduled; 636 } 637 638 /// Modifies the number of unscheduled dependencies, also updating it for 639 /// the whole bundle. 640 int incrementUnscheduledDeps(int Incr) { 641 UnscheduledDeps += Incr; 642 return FirstInBundle->UnscheduledDepsInBundle += Incr; 643 } 644 645 /// Sets the number of unscheduled dependencies to the number of 646 /// dependencies. 647 void resetUnscheduledDeps() { 648 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 649 } 650 651 /// Clears all dependency information. 652 void clearDependencies() { 653 Dependencies = InvalidDeps; 654 resetUnscheduledDeps(); 655 MemoryDependencies.clear(); 656 } 657 658 void dump(raw_ostream &os) const { 659 if (!isSchedulingEntity()) { 660 os << "/ " << *Inst; 661 } else if (NextInBundle) { 662 os << '[' << *Inst; 663 ScheduleData *SD = NextInBundle; 664 while (SD) { 665 os << ';' << *SD->Inst; 666 SD = SD->NextInBundle; 667 } 668 os << ']'; 669 } else { 670 os << *Inst; 671 } 672 } 673 674 Instruction *Inst; 675 676 /// Points to the head in an instruction bundle (and always to this for 677 /// single instructions). 678 ScheduleData *FirstInBundle; 679 680 /// Single linked list of all instructions in a bundle. Null if it is a 681 /// single instruction. 682 ScheduleData *NextInBundle; 683 684 /// Single linked list of all memory instructions (e.g. load, store, call) 685 /// in the block - until the end of the scheduling region. 686 ScheduleData *NextLoadStore; 687 688 /// The dependent memory instructions. 689 /// This list is derived on demand in calculateDependencies(). 690 SmallVector<ScheduleData *, 4> MemoryDependencies; 691 692 /// This ScheduleData is in the current scheduling region if this matches 693 /// the current SchedulingRegionID of BlockScheduling. 694 int SchedulingRegionID; 695 696 /// Used for getting a "good" final ordering of instructions. 697 int SchedulingPriority; 698 699 /// The number of dependencies. Constitutes of the number of users of the 700 /// instruction plus the number of dependent memory instructions (if any). 701 /// This value is calculated on demand. 702 /// If InvalidDeps, the number of dependencies is not calculated yet. 703 /// 704 int Dependencies; 705 706 /// The number of dependencies minus the number of dependencies of scheduled 707 /// instructions. As soon as this is zero, the instruction/bundle gets ready 708 /// for scheduling. 709 /// Note that this is negative as long as Dependencies is not calculated. 710 int UnscheduledDeps; 711 712 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 713 /// single instructions. 714 int UnscheduledDepsInBundle; 715 716 /// True if this instruction is scheduled (or considered as scheduled in the 717 /// dry-run). 718 bool IsScheduled; 719 }; 720 721 #ifndef NDEBUG 722 friend raw_ostream &operator<<(raw_ostream &os, 723 const BoUpSLP::ScheduleData &SD); 724 #endif 725 726 /// Contains all scheduling data for a basic block. 727 /// 728 struct BlockScheduling { 729 730 BlockScheduling(BasicBlock *BB) 731 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize), 732 ScheduleStart(nullptr), ScheduleEnd(nullptr), 733 FirstLoadStoreInRegion(nullptr), LastLoadStoreInRegion(nullptr), 734 ScheduleRegionSize(0), 735 ScheduleRegionSizeLimit(ScheduleRegionSizeBudget), 736 // Make sure that the initial SchedulingRegionID is greater than the 737 // initial SchedulingRegionID in ScheduleData (which is 0). 738 SchedulingRegionID(1) {} 739 740 void clear() { 741 ReadyInsts.clear(); 742 ScheduleStart = nullptr; 743 ScheduleEnd = nullptr; 744 FirstLoadStoreInRegion = nullptr; 745 LastLoadStoreInRegion = nullptr; 746 747 // Reduce the maximum schedule region size by the size of the 748 // previous scheduling run. 749 ScheduleRegionSizeLimit -= ScheduleRegionSize; 750 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 751 ScheduleRegionSizeLimit = MinScheduleRegionSize; 752 ScheduleRegionSize = 0; 753 754 // Make a new scheduling region, i.e. all existing ScheduleData is not 755 // in the new region yet. 756 ++SchedulingRegionID; 757 } 758 759 ScheduleData *getScheduleData(Value *V) { 760 ScheduleData *SD = ScheduleDataMap[V]; 761 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 762 return SD; 763 return nullptr; 764 } 765 766 bool isInSchedulingRegion(ScheduleData *SD) { 767 return SD->SchedulingRegionID == SchedulingRegionID; 768 } 769 770 /// Marks an instruction as scheduled and puts all dependent ready 771 /// instructions into the ready-list. 772 template <typename ReadyListType> 773 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 774 SD->IsScheduled = true; 775 DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 776 777 ScheduleData *BundleMember = SD; 778 while (BundleMember) { 779 // Handle the def-use chain dependencies. 780 for (Use &U : BundleMember->Inst->operands()) { 781 ScheduleData *OpDef = getScheduleData(U.get()); 782 if (OpDef && OpDef->hasValidDependencies() && 783 OpDef->incrementUnscheduledDeps(-1) == 0) { 784 // There are no more unscheduled dependencies after decrementing, 785 // so we can put the dependent instruction into the ready list. 786 ScheduleData *DepBundle = OpDef->FirstInBundle; 787 assert(!DepBundle->IsScheduled && 788 "already scheduled bundle gets ready"); 789 ReadyList.insert(DepBundle); 790 DEBUG(dbgs() << "SLP: gets ready (def): " << *DepBundle << "\n"); 791 } 792 } 793 // Handle the memory dependencies. 794 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 795 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 796 // There are no more unscheduled dependencies after decrementing, 797 // so we can put the dependent instruction into the ready list. 798 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 799 assert(!DepBundle->IsScheduled && 800 "already scheduled bundle gets ready"); 801 ReadyList.insert(DepBundle); 802 DEBUG(dbgs() << "SLP: gets ready (mem): " << *DepBundle << "\n"); 803 } 804 } 805 BundleMember = BundleMember->NextInBundle; 806 } 807 } 808 809 /// Put all instructions into the ReadyList which are ready for scheduling. 810 template <typename ReadyListType> 811 void initialFillReadyList(ReadyListType &ReadyList) { 812 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 813 ScheduleData *SD = getScheduleData(I); 814 if (SD->isSchedulingEntity() && SD->isReady()) { 815 ReadyList.insert(SD); 816 DEBUG(dbgs() << "SLP: initially in ready list: " << *I << "\n"); 817 } 818 } 819 } 820 821 /// Checks if a bundle of instructions can be scheduled, i.e. has no 822 /// cyclic dependencies. This is only a dry-run, no instructions are 823 /// actually moved at this stage. 824 bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP); 825 826 /// Un-bundles a group of instructions. 827 void cancelScheduling(ArrayRef<Value *> VL); 828 829 /// Extends the scheduling region so that V is inside the region. 830 /// \returns true if the region size is within the limit. 831 bool extendSchedulingRegion(Value *V); 832 833 /// Initialize the ScheduleData structures for new instructions in the 834 /// scheduling region. 835 void initScheduleData(Instruction *FromI, Instruction *ToI, 836 ScheduleData *PrevLoadStore, 837 ScheduleData *NextLoadStore); 838 839 /// Updates the dependency information of a bundle and of all instructions/ 840 /// bundles which depend on the original bundle. 841 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 842 BoUpSLP *SLP); 843 844 /// Sets all instruction in the scheduling region to un-scheduled. 845 void resetSchedule(); 846 847 BasicBlock *BB; 848 849 /// Simple memory allocation for ScheduleData. 850 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 851 852 /// The size of a ScheduleData array in ScheduleDataChunks. 853 int ChunkSize; 854 855 /// The allocator position in the current chunk, which is the last entry 856 /// of ScheduleDataChunks. 857 int ChunkPos; 858 859 /// Attaches ScheduleData to Instruction. 860 /// Note that the mapping survives during all vectorization iterations, i.e. 861 /// ScheduleData structures are recycled. 862 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 863 864 struct ReadyList : SmallVector<ScheduleData *, 8> { 865 void insert(ScheduleData *SD) { push_back(SD); } 866 }; 867 868 /// The ready-list for scheduling (only used for the dry-run). 869 ReadyList ReadyInsts; 870 871 /// The first instruction of the scheduling region. 872 Instruction *ScheduleStart; 873 874 /// The first instruction _after_ the scheduling region. 875 Instruction *ScheduleEnd; 876 877 /// The first memory accessing instruction in the scheduling region 878 /// (can be null). 879 ScheduleData *FirstLoadStoreInRegion; 880 881 /// The last memory accessing instruction in the scheduling region 882 /// (can be null). 883 ScheduleData *LastLoadStoreInRegion; 884 885 /// The current size of the scheduling region. 886 int ScheduleRegionSize; 887 888 /// The maximum size allowed for the scheduling region. 889 int ScheduleRegionSizeLimit; 890 891 /// The ID of the scheduling region. For a new vectorization iteration this 892 /// is incremented which "removes" all ScheduleData from the region. 893 int SchedulingRegionID; 894 }; 895 896 /// Attaches the BlockScheduling structures to basic blocks. 897 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 898 899 /// Performs the "real" scheduling. Done before vectorization is actually 900 /// performed in a basic block. 901 void scheduleBlock(BlockScheduling *BS); 902 903 /// List of users to ignore during scheduling and that don't need extracting. 904 ArrayRef<Value *> UserIgnoreList; 905 906 // Number of load-bundles, which contain consecutive loads. 907 int NumLoadsWantToKeepOrder; 908 909 // Number of load-bundles of size 2, which are consecutive loads if reversed. 910 int NumLoadsWantToChangeOrder; 911 912 // Analysis and block reference. 913 Function *F; 914 ScalarEvolution *SE; 915 TargetTransformInfo *TTI; 916 TargetLibraryInfo *TLI; 917 AliasAnalysis *AA; 918 LoopInfo *LI; 919 DominatorTree *DT; 920 /// Instruction builder to construct the vectorized tree. 921 IRBuilder<> Builder; 922 }; 923 924 #ifndef NDEBUG 925 raw_ostream &operator<<(raw_ostream &os, const BoUpSLP::ScheduleData &SD) { 926 SD.dump(os); 927 return os; 928 } 929 #endif 930 931 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 932 ArrayRef<Value *> UserIgnoreLst) { 933 deleteTree(); 934 UserIgnoreList = UserIgnoreLst; 935 if (!getSameType(Roots)) 936 return; 937 buildTree_rec(Roots, 0); 938 939 // Collect the values that we need to extract from the tree. 940 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 941 TreeEntry *Entry = &VectorizableTree[EIdx]; 942 943 // For each lane: 944 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 945 Value *Scalar = Entry->Scalars[Lane]; 946 947 // No need to handle users of gathered values. 948 if (Entry->NeedToGather) 949 continue; 950 951 for (User *U : Scalar->users()) { 952 DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 953 954 Instruction *UserInst = dyn_cast<Instruction>(U); 955 if (!UserInst) 956 continue; 957 958 // Skip in-tree scalars that become vectors 959 if (ScalarToTreeEntry.count(U)) { 960 int Idx = ScalarToTreeEntry[U]; 961 TreeEntry *UseEntry = &VectorizableTree[Idx]; 962 Value *UseScalar = UseEntry->Scalars[0]; 963 // Some in-tree scalars will remain as scalar in vectorized 964 // instructions. If that is the case, the one in Lane 0 will 965 // be used. 966 if (UseScalar != U || 967 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 968 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 969 << ".\n"); 970 assert(!VectorizableTree[Idx].NeedToGather && "Bad state"); 971 continue; 972 } 973 } 974 975 // Ignore users in the user ignore list. 976 if (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), UserInst) != 977 UserIgnoreList.end()) 978 continue; 979 980 DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " << 981 Lane << " from " << *Scalar << ".\n"); 982 ExternalUses.push_back(ExternalUser(Scalar, U, Lane)); 983 } 984 } 985 } 986 } 987 988 989 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { 990 bool SameTy = getSameType(VL); (void)SameTy; 991 bool isAltShuffle = false; 992 assert(SameTy && "Invalid types!"); 993 994 if (Depth == RecursionMaxDepth) { 995 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 996 newTreeEntry(VL, false); 997 return; 998 } 999 1000 // Don't handle vectors. 1001 if (VL[0]->getType()->isVectorTy()) { 1002 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 1003 newTreeEntry(VL, false); 1004 return; 1005 } 1006 1007 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1008 if (SI->getValueOperand()->getType()->isVectorTy()) { 1009 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 1010 newTreeEntry(VL, false); 1011 return; 1012 } 1013 unsigned Opcode = getSameOpcode(VL); 1014 1015 // Check that this shuffle vector refers to the alternate 1016 // sequence of opcodes. 1017 if (Opcode == Instruction::ShuffleVector) { 1018 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 1019 unsigned Op = I0->getOpcode(); 1020 if (Op != Instruction::ShuffleVector) 1021 isAltShuffle = true; 1022 } 1023 1024 // If all of the operands are identical or constant we have a simple solution. 1025 if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || !Opcode) { 1026 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 1027 newTreeEntry(VL, false); 1028 return; 1029 } 1030 1031 // We now know that this is a vector of instructions of the same type from 1032 // the same block. 1033 1034 // Don't vectorize ephemeral values. 1035 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1036 if (EphValues.count(VL[i])) { 1037 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1038 ") is ephemeral.\n"); 1039 newTreeEntry(VL, false); 1040 return; 1041 } 1042 } 1043 1044 // Check if this is a duplicate of another entry. 1045 if (ScalarToTreeEntry.count(VL[0])) { 1046 int Idx = ScalarToTreeEntry[VL[0]]; 1047 TreeEntry *E = &VectorizableTree[Idx]; 1048 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1049 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 1050 if (E->Scalars[i] != VL[i]) { 1051 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 1052 newTreeEntry(VL, false); 1053 return; 1054 } 1055 } 1056 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n"); 1057 return; 1058 } 1059 1060 // Check that none of the instructions in the bundle are already in the tree. 1061 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1062 if (ScalarToTreeEntry.count(VL[i])) { 1063 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1064 ") is already in tree.\n"); 1065 newTreeEntry(VL, false); 1066 return; 1067 } 1068 } 1069 1070 // If any of the scalars is marked as a value that needs to stay scalar then 1071 // we need to gather the scalars. 1072 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1073 if (MustGather.count(VL[i])) { 1074 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 1075 newTreeEntry(VL, false); 1076 return; 1077 } 1078 } 1079 1080 // Check that all of the users of the scalars that we want to vectorize are 1081 // schedulable. 1082 Instruction *VL0 = cast<Instruction>(VL[0]); 1083 BasicBlock *BB = cast<Instruction>(VL0)->getParent(); 1084 1085 if (!DT->isReachableFromEntry(BB)) { 1086 // Don't go into unreachable blocks. They may contain instructions with 1087 // dependency cycles which confuse the final scheduling. 1088 DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 1089 newTreeEntry(VL, false); 1090 return; 1091 } 1092 1093 // Check that every instructions appears once in this bundle. 1094 for (unsigned i = 0, e = VL.size(); i < e; ++i) 1095 for (unsigned j = i+1; j < e; ++j) 1096 if (VL[i] == VL[j]) { 1097 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 1098 newTreeEntry(VL, false); 1099 return; 1100 } 1101 1102 auto &BSRef = BlocksSchedules[BB]; 1103 if (!BSRef) { 1104 BSRef = llvm::make_unique<BlockScheduling>(BB); 1105 } 1106 BlockScheduling &BS = *BSRef.get(); 1107 1108 if (!BS.tryScheduleBundle(VL, this)) { 1109 DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 1110 assert((!BS.getScheduleData(VL[0]) || 1111 !BS.getScheduleData(VL[0])->isPartOfBundle()) && 1112 "tryScheduleBundle should cancelScheduling on failure"); 1113 newTreeEntry(VL, false); 1114 return; 1115 } 1116 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 1117 1118 switch (Opcode) { 1119 case Instruction::PHI: { 1120 PHINode *PH = dyn_cast<PHINode>(VL0); 1121 1122 // Check for terminator values (e.g. invoke). 1123 for (unsigned j = 0; j < VL.size(); ++j) 1124 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1125 TerminatorInst *Term = dyn_cast<TerminatorInst>( 1126 cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i))); 1127 if (Term) { 1128 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); 1129 BS.cancelScheduling(VL); 1130 newTreeEntry(VL, false); 1131 return; 1132 } 1133 } 1134 1135 newTreeEntry(VL, true); 1136 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 1137 1138 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1139 ValueList Operands; 1140 // Prepare the operand vector. 1141 for (unsigned j = 0; j < VL.size(); ++j) 1142 Operands.push_back(cast<PHINode>(VL[j])->getIncomingValueForBlock( 1143 PH->getIncomingBlock(i))); 1144 1145 buildTree_rec(Operands, Depth + 1); 1146 } 1147 return; 1148 } 1149 case Instruction::ExtractElement: { 1150 bool Reuse = CanReuseExtract(VL); 1151 if (Reuse) { 1152 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 1153 } else { 1154 BS.cancelScheduling(VL); 1155 } 1156 newTreeEntry(VL, Reuse); 1157 return; 1158 } 1159 case Instruction::Load: { 1160 // Check that a vectorized load would load the same memory as a scalar 1161 // load. 1162 // For example we don't want vectorize loads that are smaller than 8 bit. 1163 // Even though we have a packed struct {<i2, i2, i2, i2>} LLVM treats 1164 // loading/storing it as an i8 struct. If we vectorize loads/stores from 1165 // such a struct we read/write packed bits disagreeing with the 1166 // unvectorized version. 1167 const DataLayout &DL = F->getParent()->getDataLayout(); 1168 Type *ScalarTy = VL[0]->getType(); 1169 1170 if (DL.getTypeSizeInBits(ScalarTy) != 1171 DL.getTypeAllocSizeInBits(ScalarTy)) { 1172 BS.cancelScheduling(VL); 1173 newTreeEntry(VL, false); 1174 DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 1175 return; 1176 } 1177 // Check if the loads are consecutive or of we need to swizzle them. 1178 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 1179 LoadInst *L = cast<LoadInst>(VL[i]); 1180 if (!L->isSimple()) { 1181 BS.cancelScheduling(VL); 1182 newTreeEntry(VL, false); 1183 DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 1184 return; 1185 } 1186 1187 if (!isConsecutiveAccess(VL[i], VL[i + 1], DL)) { 1188 if (VL.size() == 2 && isConsecutiveAccess(VL[1], VL[0], DL)) { 1189 ++NumLoadsWantToChangeOrder; 1190 } 1191 BS.cancelScheduling(VL); 1192 newTreeEntry(VL, false); 1193 DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 1194 return; 1195 } 1196 } 1197 ++NumLoadsWantToKeepOrder; 1198 newTreeEntry(VL, true); 1199 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 1200 return; 1201 } 1202 case Instruction::ZExt: 1203 case Instruction::SExt: 1204 case Instruction::FPToUI: 1205 case Instruction::FPToSI: 1206 case Instruction::FPExt: 1207 case Instruction::PtrToInt: 1208 case Instruction::IntToPtr: 1209 case Instruction::SIToFP: 1210 case Instruction::UIToFP: 1211 case Instruction::Trunc: 1212 case Instruction::FPTrunc: 1213 case Instruction::BitCast: { 1214 Type *SrcTy = VL0->getOperand(0)->getType(); 1215 for (unsigned i = 0; i < VL.size(); ++i) { 1216 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 1217 if (Ty != SrcTy || !isValidElementType(Ty)) { 1218 BS.cancelScheduling(VL); 1219 newTreeEntry(VL, false); 1220 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 1221 return; 1222 } 1223 } 1224 newTreeEntry(VL, true); 1225 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 1226 1227 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1228 ValueList Operands; 1229 // Prepare the operand vector. 1230 for (unsigned j = 0; j < VL.size(); ++j) 1231 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1232 1233 buildTree_rec(Operands, Depth+1); 1234 } 1235 return; 1236 } 1237 case Instruction::ICmp: 1238 case Instruction::FCmp: { 1239 // Check that all of the compares have the same predicate. 1240 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 1241 Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType(); 1242 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 1243 CmpInst *Cmp = cast<CmpInst>(VL[i]); 1244 if (Cmp->getPredicate() != P0 || 1245 Cmp->getOperand(0)->getType() != ComparedTy) { 1246 BS.cancelScheduling(VL); 1247 newTreeEntry(VL, false); 1248 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 1249 return; 1250 } 1251 } 1252 1253 newTreeEntry(VL, true); 1254 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 1255 1256 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1257 ValueList Operands; 1258 // Prepare the operand vector. 1259 for (unsigned j = 0; j < VL.size(); ++j) 1260 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1261 1262 buildTree_rec(Operands, Depth+1); 1263 } 1264 return; 1265 } 1266 case Instruction::Select: 1267 case Instruction::Add: 1268 case Instruction::FAdd: 1269 case Instruction::Sub: 1270 case Instruction::FSub: 1271 case Instruction::Mul: 1272 case Instruction::FMul: 1273 case Instruction::UDiv: 1274 case Instruction::SDiv: 1275 case Instruction::FDiv: 1276 case Instruction::URem: 1277 case Instruction::SRem: 1278 case Instruction::FRem: 1279 case Instruction::Shl: 1280 case Instruction::LShr: 1281 case Instruction::AShr: 1282 case Instruction::And: 1283 case Instruction::Or: 1284 case Instruction::Xor: { 1285 newTreeEntry(VL, true); 1286 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 1287 1288 // Sort operands of the instructions so that each side is more likely to 1289 // have the same opcode. 1290 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 1291 ValueList Left, Right; 1292 reorderInputsAccordingToOpcode(VL, Left, Right); 1293 buildTree_rec(Left, Depth + 1); 1294 buildTree_rec(Right, Depth + 1); 1295 return; 1296 } 1297 1298 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1299 ValueList Operands; 1300 // Prepare the operand vector. 1301 for (unsigned j = 0; j < VL.size(); ++j) 1302 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1303 1304 buildTree_rec(Operands, Depth+1); 1305 } 1306 return; 1307 } 1308 case Instruction::GetElementPtr: { 1309 // We don't combine GEPs with complicated (nested) indexing. 1310 for (unsigned j = 0; j < VL.size(); ++j) { 1311 if (cast<Instruction>(VL[j])->getNumOperands() != 2) { 1312 DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 1313 BS.cancelScheduling(VL); 1314 newTreeEntry(VL, false); 1315 return; 1316 } 1317 } 1318 1319 // We can't combine several GEPs into one vector if they operate on 1320 // different types. 1321 Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType(); 1322 for (unsigned j = 0; j < VL.size(); ++j) { 1323 Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType(); 1324 if (Ty0 != CurTy) { 1325 DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); 1326 BS.cancelScheduling(VL); 1327 newTreeEntry(VL, false); 1328 return; 1329 } 1330 } 1331 1332 // We don't combine GEPs with non-constant indexes. 1333 for (unsigned j = 0; j < VL.size(); ++j) { 1334 auto Op = cast<Instruction>(VL[j])->getOperand(1); 1335 if (!isa<ConstantInt>(Op)) { 1336 DEBUG( 1337 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 1338 BS.cancelScheduling(VL); 1339 newTreeEntry(VL, false); 1340 return; 1341 } 1342 } 1343 1344 newTreeEntry(VL, true); 1345 DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 1346 for (unsigned i = 0, e = 2; i < e; ++i) { 1347 ValueList Operands; 1348 // Prepare the operand vector. 1349 for (unsigned j = 0; j < VL.size(); ++j) 1350 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1351 1352 buildTree_rec(Operands, Depth + 1); 1353 } 1354 return; 1355 } 1356 case Instruction::Store: { 1357 const DataLayout &DL = F->getParent()->getDataLayout(); 1358 // Check if the stores are consecutive or of we need to swizzle them. 1359 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 1360 if (!isConsecutiveAccess(VL[i], VL[i + 1], DL)) { 1361 BS.cancelScheduling(VL); 1362 newTreeEntry(VL, false); 1363 DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 1364 return; 1365 } 1366 1367 newTreeEntry(VL, true); 1368 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 1369 1370 ValueList Operands; 1371 for (unsigned j = 0; j < VL.size(); ++j) 1372 Operands.push_back(cast<Instruction>(VL[j])->getOperand(0)); 1373 1374 buildTree_rec(Operands, Depth + 1); 1375 return; 1376 } 1377 case Instruction::Call: { 1378 // Check if the calls are all to the same vectorizable intrinsic. 1379 CallInst *CI = cast<CallInst>(VL[0]); 1380 // Check if this is an Intrinsic call or something that can be 1381 // represented by an intrinsic call 1382 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 1383 if (!isTriviallyVectorizable(ID)) { 1384 BS.cancelScheduling(VL); 1385 newTreeEntry(VL, false); 1386 DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 1387 return; 1388 } 1389 Function *Int = CI->getCalledFunction(); 1390 Value *A1I = nullptr; 1391 if (hasVectorInstrinsicScalarOpd(ID, 1)) 1392 A1I = CI->getArgOperand(1); 1393 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 1394 CallInst *CI2 = dyn_cast<CallInst>(VL[i]); 1395 if (!CI2 || CI2->getCalledFunction() != Int || 1396 getIntrinsicIDForCall(CI2, TLI) != ID) { 1397 BS.cancelScheduling(VL); 1398 newTreeEntry(VL, false); 1399 DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] 1400 << "\n"); 1401 return; 1402 } 1403 // ctlz,cttz and powi are special intrinsics whose second argument 1404 // should be same in order for them to be vectorized. 1405 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 1406 Value *A1J = CI2->getArgOperand(1); 1407 if (A1I != A1J) { 1408 BS.cancelScheduling(VL); 1409 newTreeEntry(VL, false); 1410 DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 1411 << " argument "<< A1I<<"!=" << A1J 1412 << "\n"); 1413 return; 1414 } 1415 } 1416 } 1417 1418 newTreeEntry(VL, true); 1419 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 1420 ValueList Operands; 1421 // Prepare the operand vector. 1422 for (unsigned j = 0; j < VL.size(); ++j) { 1423 CallInst *CI2 = dyn_cast<CallInst>(VL[j]); 1424 Operands.push_back(CI2->getArgOperand(i)); 1425 } 1426 buildTree_rec(Operands, Depth + 1); 1427 } 1428 return; 1429 } 1430 case Instruction::ShuffleVector: { 1431 // If this is not an alternate sequence of opcode like add-sub 1432 // then do not vectorize this instruction. 1433 if (!isAltShuffle) { 1434 BS.cancelScheduling(VL); 1435 newTreeEntry(VL, false); 1436 DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 1437 return; 1438 } 1439 newTreeEntry(VL, true); 1440 DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 1441 1442 // Reorder operands if reordering would enable vectorization. 1443 if (isa<BinaryOperator>(VL0)) { 1444 ValueList Left, Right; 1445 reorderAltShuffleOperands(VL, Left, Right); 1446 buildTree_rec(Left, Depth + 1); 1447 buildTree_rec(Right, Depth + 1); 1448 return; 1449 } 1450 1451 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1452 ValueList Operands; 1453 // Prepare the operand vector. 1454 for (unsigned j = 0; j < VL.size(); ++j) 1455 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1456 1457 buildTree_rec(Operands, Depth + 1); 1458 } 1459 return; 1460 } 1461 default: 1462 BS.cancelScheduling(VL); 1463 newTreeEntry(VL, false); 1464 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 1465 return; 1466 } 1467 } 1468 1469 int BoUpSLP::getEntryCost(TreeEntry *E) { 1470 ArrayRef<Value*> VL = E->Scalars; 1471 1472 Type *ScalarTy = VL[0]->getType(); 1473 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1474 ScalarTy = SI->getValueOperand()->getType(); 1475 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1476 1477 if (E->NeedToGather) { 1478 if (allConstant(VL)) 1479 return 0; 1480 if (isSplat(VL)) { 1481 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 1482 } 1483 return getGatherCost(E->Scalars); 1484 } 1485 unsigned Opcode = getSameOpcode(VL); 1486 assert(Opcode && getSameType(VL) && getSameBlock(VL) && "Invalid VL"); 1487 Instruction *VL0 = cast<Instruction>(VL[0]); 1488 switch (Opcode) { 1489 case Instruction::PHI: { 1490 return 0; 1491 } 1492 case Instruction::ExtractElement: { 1493 if (CanReuseExtract(VL)) { 1494 int DeadCost = 0; 1495 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 1496 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]); 1497 if (E->hasOneUse()) 1498 // Take credit for instruction that will become dead. 1499 DeadCost += 1500 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); 1501 } 1502 return -DeadCost; 1503 } 1504 return getGatherCost(VecTy); 1505 } 1506 case Instruction::ZExt: 1507 case Instruction::SExt: 1508 case Instruction::FPToUI: 1509 case Instruction::FPToSI: 1510 case Instruction::FPExt: 1511 case Instruction::PtrToInt: 1512 case Instruction::IntToPtr: 1513 case Instruction::SIToFP: 1514 case Instruction::UIToFP: 1515 case Instruction::Trunc: 1516 case Instruction::FPTrunc: 1517 case Instruction::BitCast: { 1518 Type *SrcTy = VL0->getOperand(0)->getType(); 1519 1520 // Calculate the cost of this instruction. 1521 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 1522 VL0->getType(), SrcTy); 1523 1524 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 1525 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy); 1526 return VecCost - ScalarCost; 1527 } 1528 case Instruction::FCmp: 1529 case Instruction::ICmp: 1530 case Instruction::Select: 1531 case Instruction::Add: 1532 case Instruction::FAdd: 1533 case Instruction::Sub: 1534 case Instruction::FSub: 1535 case Instruction::Mul: 1536 case Instruction::FMul: 1537 case Instruction::UDiv: 1538 case Instruction::SDiv: 1539 case Instruction::FDiv: 1540 case Instruction::URem: 1541 case Instruction::SRem: 1542 case Instruction::FRem: 1543 case Instruction::Shl: 1544 case Instruction::LShr: 1545 case Instruction::AShr: 1546 case Instruction::And: 1547 case Instruction::Or: 1548 case Instruction::Xor: { 1549 // Calculate the cost of this instruction. 1550 int ScalarCost = 0; 1551 int VecCost = 0; 1552 if (Opcode == Instruction::FCmp || Opcode == Instruction::ICmp || 1553 Opcode == Instruction::Select) { 1554 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 1555 ScalarCost = VecTy->getNumElements() * 1556 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty()); 1557 VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy); 1558 } else { 1559 // Certain instructions can be cheaper to vectorize if they have a 1560 // constant second vector operand. 1561 TargetTransformInfo::OperandValueKind Op1VK = 1562 TargetTransformInfo::OK_AnyValue; 1563 TargetTransformInfo::OperandValueKind Op2VK = 1564 TargetTransformInfo::OK_UniformConstantValue; 1565 TargetTransformInfo::OperandValueProperties Op1VP = 1566 TargetTransformInfo::OP_None; 1567 TargetTransformInfo::OperandValueProperties Op2VP = 1568 TargetTransformInfo::OP_None; 1569 1570 // If all operands are exactly the same ConstantInt then set the 1571 // operand kind to OK_UniformConstantValue. 1572 // If instead not all operands are constants, then set the operand kind 1573 // to OK_AnyValue. If all operands are constants but not the same, 1574 // then set the operand kind to OK_NonUniformConstantValue. 1575 ConstantInt *CInt = nullptr; 1576 for (unsigned i = 0; i < VL.size(); ++i) { 1577 const Instruction *I = cast<Instruction>(VL[i]); 1578 if (!isa<ConstantInt>(I->getOperand(1))) { 1579 Op2VK = TargetTransformInfo::OK_AnyValue; 1580 break; 1581 } 1582 if (i == 0) { 1583 CInt = cast<ConstantInt>(I->getOperand(1)); 1584 continue; 1585 } 1586 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && 1587 CInt != cast<ConstantInt>(I->getOperand(1))) 1588 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 1589 } 1590 // FIXME: Currently cost of model modification for division by 1591 // power of 2 is handled only for X86. Add support for other targets. 1592 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt && 1593 CInt->getValue().isPowerOf2()) 1594 Op2VP = TargetTransformInfo::OP_PowerOf2; 1595 1596 ScalarCost = VecTy->getNumElements() * 1597 TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK, 1598 Op1VP, Op2VP); 1599 VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK, 1600 Op1VP, Op2VP); 1601 } 1602 return VecCost - ScalarCost; 1603 } 1604 case Instruction::GetElementPtr: { 1605 TargetTransformInfo::OperandValueKind Op1VK = 1606 TargetTransformInfo::OK_AnyValue; 1607 TargetTransformInfo::OperandValueKind Op2VK = 1608 TargetTransformInfo::OK_UniformConstantValue; 1609 1610 int ScalarCost = 1611 VecTy->getNumElements() * 1612 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK); 1613 int VecCost = 1614 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK); 1615 1616 return VecCost - ScalarCost; 1617 } 1618 case Instruction::Load: { 1619 // Cost of wide load - cost of scalar loads. 1620 int ScalarLdCost = VecTy->getNumElements() * 1621 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); 1622 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, 1, 0); 1623 return VecLdCost - ScalarLdCost; 1624 } 1625 case Instruction::Store: { 1626 // We know that we can merge the stores. Calculate the cost. 1627 int ScalarStCost = VecTy->getNumElements() * 1628 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); 1629 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, VecTy, 1, 0); 1630 return VecStCost - ScalarStCost; 1631 } 1632 case Instruction::Call: { 1633 CallInst *CI = cast<CallInst>(VL0); 1634 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 1635 1636 // Calculate the cost of the scalar and vector calls. 1637 SmallVector<Type*, 4> ScalarTys, VecTys; 1638 for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) { 1639 ScalarTys.push_back(CI->getArgOperand(op)->getType()); 1640 VecTys.push_back(VectorType::get(CI->getArgOperand(op)->getType(), 1641 VecTy->getNumElements())); 1642 } 1643 1644 int ScalarCallCost = VecTy->getNumElements() * 1645 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys); 1646 1647 int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys); 1648 1649 DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost 1650 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 1651 << " for " << *CI << "\n"); 1652 1653 return VecCallCost - ScalarCallCost; 1654 } 1655 case Instruction::ShuffleVector: { 1656 TargetTransformInfo::OperandValueKind Op1VK = 1657 TargetTransformInfo::OK_AnyValue; 1658 TargetTransformInfo::OperandValueKind Op2VK = 1659 TargetTransformInfo::OK_AnyValue; 1660 int ScalarCost = 0; 1661 int VecCost = 0; 1662 for (unsigned i = 0; i < VL.size(); ++i) { 1663 Instruction *I = cast<Instruction>(VL[i]); 1664 if (!I) 1665 break; 1666 ScalarCost += 1667 TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK); 1668 } 1669 // VecCost is equal to sum of the cost of creating 2 vectors 1670 // and the cost of creating shuffle. 1671 Instruction *I0 = cast<Instruction>(VL[0]); 1672 VecCost = 1673 TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK); 1674 Instruction *I1 = cast<Instruction>(VL[1]); 1675 VecCost += 1676 TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK); 1677 VecCost += 1678 TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0); 1679 return VecCost - ScalarCost; 1680 } 1681 default: 1682 llvm_unreachable("Unknown instruction"); 1683 } 1684 } 1685 1686 bool BoUpSLP::isFullyVectorizableTinyTree() { 1687 DEBUG(dbgs() << "SLP: Check whether the tree with height " << 1688 VectorizableTree.size() << " is fully vectorizable .\n"); 1689 1690 // We only handle trees of height 2. 1691 if (VectorizableTree.size() != 2) 1692 return false; 1693 1694 // Handle splat and all-constants stores. 1695 if (!VectorizableTree[0].NeedToGather && 1696 (allConstant(VectorizableTree[1].Scalars) || 1697 isSplat(VectorizableTree[1].Scalars))) 1698 return true; 1699 1700 // Gathering cost would be too much for tiny trees. 1701 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 1702 return false; 1703 1704 return true; 1705 } 1706 1707 int BoUpSLP::getSpillCost() { 1708 // Walk from the bottom of the tree to the top, tracking which values are 1709 // live. When we see a call instruction that is not part of our tree, 1710 // query TTI to see if there is a cost to keeping values live over it 1711 // (for example, if spills and fills are required). 1712 unsigned BundleWidth = VectorizableTree.front().Scalars.size(); 1713 int Cost = 0; 1714 1715 SmallPtrSet<Instruction*, 4> LiveValues; 1716 Instruction *PrevInst = nullptr; 1717 1718 for (unsigned N = 0; N < VectorizableTree.size(); ++N) { 1719 Instruction *Inst = dyn_cast<Instruction>(VectorizableTree[N].Scalars[0]); 1720 if (!Inst) 1721 continue; 1722 1723 if (!PrevInst) { 1724 PrevInst = Inst; 1725 continue; 1726 } 1727 1728 DEBUG( 1729 dbgs() << "SLP: #LV: " << LiveValues.size(); 1730 for (auto *X : LiveValues) 1731 dbgs() << " " << X->getName(); 1732 dbgs() << ", Looking at "; 1733 Inst->dump(); 1734 ); 1735 1736 // Update LiveValues. 1737 LiveValues.erase(PrevInst); 1738 for (auto &J : PrevInst->operands()) { 1739 if (isa<Instruction>(&*J) && ScalarToTreeEntry.count(&*J)) 1740 LiveValues.insert(cast<Instruction>(&*J)); 1741 } 1742 1743 // Now find the sequence of instructions between PrevInst and Inst. 1744 BasicBlock::reverse_iterator InstIt(Inst->getIterator()), 1745 PrevInstIt(PrevInst->getIterator()); 1746 --PrevInstIt; 1747 while (InstIt != PrevInstIt) { 1748 if (PrevInstIt == PrevInst->getParent()->rend()) { 1749 PrevInstIt = Inst->getParent()->rbegin(); 1750 continue; 1751 } 1752 1753 if (isa<CallInst>(&*PrevInstIt) && &*PrevInstIt != PrevInst) { 1754 SmallVector<Type*, 4> V; 1755 for (auto *II : LiveValues) 1756 V.push_back(VectorType::get(II->getType(), BundleWidth)); 1757 Cost += TTI->getCostOfKeepingLiveOverCall(V); 1758 } 1759 1760 ++PrevInstIt; 1761 } 1762 1763 PrevInst = Inst; 1764 } 1765 1766 DEBUG(dbgs() << "SLP: SpillCost=" << Cost << "\n"); 1767 return Cost; 1768 } 1769 1770 int BoUpSLP::getTreeCost() { 1771 int Cost = 0; 1772 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 1773 VectorizableTree.size() << ".\n"); 1774 1775 // We only vectorize tiny trees if it is fully vectorizable. 1776 if (VectorizableTree.size() < 3 && !isFullyVectorizableTinyTree()) { 1777 if (VectorizableTree.empty()) { 1778 assert(!ExternalUses.size() && "We should not have any external users"); 1779 } 1780 return INT_MAX; 1781 } 1782 1783 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 1784 1785 for (TreeEntry &TE : VectorizableTree) { 1786 int C = getEntryCost(&TE); 1787 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 1788 << TE.Scalars[0] << " .\n"); 1789 Cost += C; 1790 } 1791 1792 SmallSet<Value *, 16> ExtractCostCalculated; 1793 int ExtractCost = 0; 1794 for (ExternalUser &EU : ExternalUses) { 1795 // We only add extract cost once for the same scalar. 1796 if (!ExtractCostCalculated.insert(EU.Scalar).second) 1797 continue; 1798 1799 // Uses by ephemeral values are free (because the ephemeral value will be 1800 // removed prior to code generation, and so the extraction will be 1801 // removed as well). 1802 if (EphValues.count(EU.User)) 1803 continue; 1804 1805 VectorType *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth); 1806 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 1807 EU.Lane); 1808 } 1809 1810 Cost += getSpillCost(); 1811 1812 DEBUG(dbgs() << "SLP: Total Cost " << Cost + ExtractCost<< ".\n"); 1813 return Cost + ExtractCost; 1814 } 1815 1816 int BoUpSLP::getGatherCost(Type *Ty) { 1817 int Cost = 0; 1818 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 1819 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 1820 return Cost; 1821 } 1822 1823 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 1824 // Find the type of the operands in VL. 1825 Type *ScalarTy = VL[0]->getType(); 1826 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1827 ScalarTy = SI->getValueOperand()->getType(); 1828 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1829 // Find the cost of inserting/extracting values from the vector. 1830 return getGatherCost(VecTy); 1831 } 1832 1833 Value *BoUpSLP::getPointerOperand(Value *I) { 1834 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1835 return LI->getPointerOperand(); 1836 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1837 return SI->getPointerOperand(); 1838 return nullptr; 1839 } 1840 1841 unsigned BoUpSLP::getAddressSpaceOperand(Value *I) { 1842 if (LoadInst *L = dyn_cast<LoadInst>(I)) 1843 return L->getPointerAddressSpace(); 1844 if (StoreInst *S = dyn_cast<StoreInst>(I)) 1845 return S->getPointerAddressSpace(); 1846 return -1; 1847 } 1848 1849 bool BoUpSLP::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL) { 1850 Value *PtrA = getPointerOperand(A); 1851 Value *PtrB = getPointerOperand(B); 1852 unsigned ASA = getAddressSpaceOperand(A); 1853 unsigned ASB = getAddressSpaceOperand(B); 1854 1855 // Check that the address spaces match and that the pointers are valid. 1856 if (!PtrA || !PtrB || (ASA != ASB)) 1857 return false; 1858 1859 // Make sure that A and B are different pointers of the same type. 1860 if (PtrA == PtrB || PtrA->getType() != PtrB->getType()) 1861 return false; 1862 1863 unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA); 1864 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType(); 1865 APInt Size(PtrBitWidth, DL.getTypeStoreSize(Ty)); 1866 1867 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); 1868 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); 1869 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); 1870 1871 APInt OffsetDelta = OffsetB - OffsetA; 1872 1873 // Check if they are based on the same pointer. That makes the offsets 1874 // sufficient. 1875 if (PtrA == PtrB) 1876 return OffsetDelta == Size; 1877 1878 // Compute the necessary base pointer delta to have the necessary final delta 1879 // equal to the size. 1880 APInt BaseDelta = Size - OffsetDelta; 1881 1882 // Otherwise compute the distance with SCEV between the base pointers. 1883 const SCEV *PtrSCEVA = SE->getSCEV(PtrA); 1884 const SCEV *PtrSCEVB = SE->getSCEV(PtrB); 1885 const SCEV *C = SE->getConstant(BaseDelta); 1886 const SCEV *X = SE->getAddExpr(PtrSCEVA, C); 1887 return X == PtrSCEVB; 1888 } 1889 1890 // Reorder commutative operations in alternate shuffle if the resulting vectors 1891 // are consecutive loads. This would allow us to vectorize the tree. 1892 // If we have something like- 1893 // load a[0] - load b[0] 1894 // load b[1] + load a[1] 1895 // load a[2] - load b[2] 1896 // load a[3] + load b[3] 1897 // Reordering the second load b[1] load a[1] would allow us to vectorize this 1898 // code. 1899 void BoUpSLP::reorderAltShuffleOperands(ArrayRef<Value *> VL, 1900 SmallVectorImpl<Value *> &Left, 1901 SmallVectorImpl<Value *> &Right) { 1902 const DataLayout &DL = F->getParent()->getDataLayout(); 1903 1904 // Push left and right operands of binary operation into Left and Right 1905 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 1906 Left.push_back(cast<Instruction>(VL[i])->getOperand(0)); 1907 Right.push_back(cast<Instruction>(VL[i])->getOperand(1)); 1908 } 1909 1910 // Reorder if we have a commutative operation and consecutive access 1911 // are on either side of the alternate instructions. 1912 for (unsigned j = 0; j < VL.size() - 1; ++j) { 1913 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 1914 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 1915 Instruction *VL1 = cast<Instruction>(VL[j]); 1916 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 1917 if (isConsecutiveAccess(L, L1, DL) && VL1->isCommutative()) { 1918 std::swap(Left[j], Right[j]); 1919 continue; 1920 } else if (isConsecutiveAccess(L, L1, DL) && VL2->isCommutative()) { 1921 std::swap(Left[j + 1], Right[j + 1]); 1922 continue; 1923 } 1924 // else unchanged 1925 } 1926 } 1927 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 1928 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 1929 Instruction *VL1 = cast<Instruction>(VL[j]); 1930 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 1931 if (isConsecutiveAccess(L, L1, DL) && VL1->isCommutative()) { 1932 std::swap(Left[j], Right[j]); 1933 continue; 1934 } else if (isConsecutiveAccess(L, L1, DL) && VL2->isCommutative()) { 1935 std::swap(Left[j + 1], Right[j + 1]); 1936 continue; 1937 } 1938 // else unchanged 1939 } 1940 } 1941 } 1942 } 1943 1944 // Return true if I should be commuted before adding it's left and right 1945 // operands to the arrays Left and Right. 1946 // 1947 // The vectorizer is trying to either have all elements one side being 1948 // instruction with the same opcode to enable further vectorization, or having 1949 // a splat to lower the vectorizing cost. 1950 static bool shouldReorderOperands(int i, Instruction &I, 1951 SmallVectorImpl<Value *> &Left, 1952 SmallVectorImpl<Value *> &Right, 1953 bool AllSameOpcodeLeft, 1954 bool AllSameOpcodeRight, bool SplatLeft, 1955 bool SplatRight) { 1956 Value *VLeft = I.getOperand(0); 1957 Value *VRight = I.getOperand(1); 1958 // If we have "SplatRight", try to see if commuting is needed to preserve it. 1959 if (SplatRight) { 1960 if (VRight == Right[i - 1]) 1961 // Preserve SplatRight 1962 return false; 1963 if (VLeft == Right[i - 1]) { 1964 // Commuting would preserve SplatRight, but we don't want to break 1965 // SplatLeft either, i.e. preserve the original order if possible. 1966 // (FIXME: why do we care?) 1967 if (SplatLeft && VLeft == Left[i - 1]) 1968 return false; 1969 return true; 1970 } 1971 } 1972 // Symmetrically handle Right side. 1973 if (SplatLeft) { 1974 if (VLeft == Left[i - 1]) 1975 // Preserve SplatLeft 1976 return false; 1977 if (VRight == Left[i - 1]) 1978 return true; 1979 } 1980 1981 Instruction *ILeft = dyn_cast<Instruction>(VLeft); 1982 Instruction *IRight = dyn_cast<Instruction>(VRight); 1983 1984 // If we have "AllSameOpcodeRight", try to see if the left operands preserves 1985 // it and not the right, in this case we want to commute. 1986 if (AllSameOpcodeRight) { 1987 unsigned RightPrevOpcode = cast<Instruction>(Right[i - 1])->getOpcode(); 1988 if (IRight && RightPrevOpcode == IRight->getOpcode()) 1989 // Do not commute, a match on the right preserves AllSameOpcodeRight 1990 return false; 1991 if (ILeft && RightPrevOpcode == ILeft->getOpcode()) { 1992 // We have a match and may want to commute, but first check if there is 1993 // not also a match on the existing operands on the Left to preserve 1994 // AllSameOpcodeLeft, i.e. preserve the original order if possible. 1995 // (FIXME: why do we care?) 1996 if (AllSameOpcodeLeft && ILeft && 1997 cast<Instruction>(Left[i - 1])->getOpcode() == ILeft->getOpcode()) 1998 return false; 1999 return true; 2000 } 2001 } 2002 // Symmetrically handle Left side. 2003 if (AllSameOpcodeLeft) { 2004 unsigned LeftPrevOpcode = cast<Instruction>(Left[i - 1])->getOpcode(); 2005 if (ILeft && LeftPrevOpcode == ILeft->getOpcode()) 2006 return false; 2007 if (IRight && LeftPrevOpcode == IRight->getOpcode()) 2008 return true; 2009 } 2010 return false; 2011 } 2012 2013 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 2014 SmallVectorImpl<Value *> &Left, 2015 SmallVectorImpl<Value *> &Right) { 2016 2017 if (VL.size()) { 2018 // Peel the first iteration out of the loop since there's nothing 2019 // interesting to do anyway and it simplifies the checks in the loop. 2020 auto VLeft = cast<Instruction>(VL[0])->getOperand(0); 2021 auto VRight = cast<Instruction>(VL[0])->getOperand(1); 2022 if (!isa<Instruction>(VRight) && isa<Instruction>(VLeft)) 2023 // Favor having instruction to the right. FIXME: why? 2024 std::swap(VLeft, VRight); 2025 Left.push_back(VLeft); 2026 Right.push_back(VRight); 2027 } 2028 2029 // Keep track if we have instructions with all the same opcode on one side. 2030 bool AllSameOpcodeLeft = isa<Instruction>(Left[0]); 2031 bool AllSameOpcodeRight = isa<Instruction>(Right[0]); 2032 // Keep track if we have one side with all the same value (broadcast). 2033 bool SplatLeft = true; 2034 bool SplatRight = true; 2035 2036 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 2037 Instruction *I = cast<Instruction>(VL[i]); 2038 assert(I->isCommutative() && "Can only process commutative instruction"); 2039 // Commute to favor either a splat or maximizing having the same opcodes on 2040 // one side. 2041 if (shouldReorderOperands(i, *I, Left, Right, AllSameOpcodeLeft, 2042 AllSameOpcodeRight, SplatLeft, SplatRight)) { 2043 Left.push_back(I->getOperand(1)); 2044 Right.push_back(I->getOperand(0)); 2045 } else { 2046 Left.push_back(I->getOperand(0)); 2047 Right.push_back(I->getOperand(1)); 2048 } 2049 // Update Splat* and AllSameOpcode* after the insertion. 2050 SplatRight = SplatRight && (Right[i - 1] == Right[i]); 2051 SplatLeft = SplatLeft && (Left[i - 1] == Left[i]); 2052 AllSameOpcodeLeft = AllSameOpcodeLeft && isa<Instruction>(Left[i]) && 2053 (cast<Instruction>(Left[i - 1])->getOpcode() == 2054 cast<Instruction>(Left[i])->getOpcode()); 2055 AllSameOpcodeRight = AllSameOpcodeRight && isa<Instruction>(Right[i]) && 2056 (cast<Instruction>(Right[i - 1])->getOpcode() == 2057 cast<Instruction>(Right[i])->getOpcode()); 2058 } 2059 2060 // If one operand end up being broadcast, return this operand order. 2061 if (SplatRight || SplatLeft) 2062 return; 2063 2064 const DataLayout &DL = F->getParent()->getDataLayout(); 2065 2066 // Finally check if we can get longer vectorizable chain by reordering 2067 // without breaking the good operand order detected above. 2068 // E.g. If we have something like- 2069 // load a[0] load b[0] 2070 // load b[1] load a[1] 2071 // load a[2] load b[2] 2072 // load a[3] load b[3] 2073 // Reordering the second load b[1] load a[1] would allow us to vectorize 2074 // this code and we still retain AllSameOpcode property. 2075 // FIXME: This load reordering might break AllSameOpcode in some rare cases 2076 // such as- 2077 // add a[0],c[0] load b[0] 2078 // add a[1],c[2] load b[1] 2079 // b[2] load b[2] 2080 // add a[3],c[3] load b[3] 2081 for (unsigned j = 0; j < VL.size() - 1; ++j) { 2082 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 2083 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 2084 if (isConsecutiveAccess(L, L1, DL)) { 2085 std::swap(Left[j + 1], Right[j + 1]); 2086 continue; 2087 } 2088 } 2089 } 2090 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 2091 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 2092 if (isConsecutiveAccess(L, L1, DL)) { 2093 std::swap(Left[j + 1], Right[j + 1]); 2094 continue; 2095 } 2096 } 2097 } 2098 // else unchanged 2099 } 2100 } 2101 2102 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) { 2103 Instruction *VL0 = cast<Instruction>(VL[0]); 2104 BasicBlock::iterator NextInst(VL0); 2105 ++NextInst; 2106 Builder.SetInsertPoint(VL0->getParent(), NextInst); 2107 Builder.SetCurrentDebugLocation(VL0->getDebugLoc()); 2108 } 2109 2110 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 2111 Value *Vec = UndefValue::get(Ty); 2112 // Generate the 'InsertElement' instruction. 2113 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 2114 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 2115 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 2116 GatherSeq.insert(Insrt); 2117 CSEBlocks.insert(Insrt->getParent()); 2118 2119 // Add to our 'need-to-extract' list. 2120 if (ScalarToTreeEntry.count(VL[i])) { 2121 int Idx = ScalarToTreeEntry[VL[i]]; 2122 TreeEntry *E = &VectorizableTree[Idx]; 2123 // Find which lane we need to extract. 2124 int FoundLane = -1; 2125 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 2126 // Is this the lane of the scalar that we are looking for ? 2127 if (E->Scalars[Lane] == VL[i]) { 2128 FoundLane = Lane; 2129 break; 2130 } 2131 } 2132 assert(FoundLane >= 0 && "Could not find the correct lane"); 2133 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 2134 } 2135 } 2136 } 2137 2138 return Vec; 2139 } 2140 2141 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const { 2142 SmallDenseMap<Value*, int>::const_iterator Entry 2143 = ScalarToTreeEntry.find(VL[0]); 2144 if (Entry != ScalarToTreeEntry.end()) { 2145 int Idx = Entry->second; 2146 const TreeEntry *En = &VectorizableTree[Idx]; 2147 if (En->isSame(VL) && En->VectorizedValue) 2148 return En->VectorizedValue; 2149 } 2150 return nullptr; 2151 } 2152 2153 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 2154 if (ScalarToTreeEntry.count(VL[0])) { 2155 int Idx = ScalarToTreeEntry[VL[0]]; 2156 TreeEntry *E = &VectorizableTree[Idx]; 2157 if (E->isSame(VL)) 2158 return vectorizeTree(E); 2159 } 2160 2161 Type *ScalarTy = VL[0]->getType(); 2162 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 2163 ScalarTy = SI->getValueOperand()->getType(); 2164 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 2165 2166 return Gather(VL, VecTy); 2167 } 2168 2169 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 2170 IRBuilder<>::InsertPointGuard Guard(Builder); 2171 2172 if (E->VectorizedValue) { 2173 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 2174 return E->VectorizedValue; 2175 } 2176 2177 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 2178 Type *ScalarTy = VL0->getType(); 2179 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 2180 ScalarTy = SI->getValueOperand()->getType(); 2181 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 2182 2183 if (E->NeedToGather) { 2184 setInsertPointAfterBundle(E->Scalars); 2185 return Gather(E->Scalars, VecTy); 2186 } 2187 2188 const DataLayout &DL = F->getParent()->getDataLayout(); 2189 unsigned Opcode = getSameOpcode(E->Scalars); 2190 2191 switch (Opcode) { 2192 case Instruction::PHI: { 2193 PHINode *PH = dyn_cast<PHINode>(VL0); 2194 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 2195 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2196 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 2197 E->VectorizedValue = NewPhi; 2198 2199 // PHINodes may have multiple entries from the same block. We want to 2200 // visit every block once. 2201 SmallSet<BasicBlock*, 4> VisitedBBs; 2202 2203 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 2204 ValueList Operands; 2205 BasicBlock *IBB = PH->getIncomingBlock(i); 2206 2207 if (!VisitedBBs.insert(IBB).second) { 2208 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 2209 continue; 2210 } 2211 2212 // Prepare the operand vector. 2213 for (Value *V : E->Scalars) 2214 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(IBB)); 2215 2216 Builder.SetInsertPoint(IBB->getTerminator()); 2217 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2218 Value *Vec = vectorizeTree(Operands); 2219 NewPhi->addIncoming(Vec, IBB); 2220 } 2221 2222 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 2223 "Invalid number of incoming values"); 2224 return NewPhi; 2225 } 2226 2227 case Instruction::ExtractElement: { 2228 if (CanReuseExtract(E->Scalars)) { 2229 Value *V = VL0->getOperand(0); 2230 E->VectorizedValue = V; 2231 return V; 2232 } 2233 return Gather(E->Scalars, VecTy); 2234 } 2235 case Instruction::ZExt: 2236 case Instruction::SExt: 2237 case Instruction::FPToUI: 2238 case Instruction::FPToSI: 2239 case Instruction::FPExt: 2240 case Instruction::PtrToInt: 2241 case Instruction::IntToPtr: 2242 case Instruction::SIToFP: 2243 case Instruction::UIToFP: 2244 case Instruction::Trunc: 2245 case Instruction::FPTrunc: 2246 case Instruction::BitCast: { 2247 ValueList INVL; 2248 for (Value *V : E->Scalars) 2249 INVL.push_back(cast<Instruction>(V)->getOperand(0)); 2250 2251 setInsertPointAfterBundle(E->Scalars); 2252 2253 Value *InVec = vectorizeTree(INVL); 2254 2255 if (Value *V = alreadyVectorized(E->Scalars)) 2256 return V; 2257 2258 CastInst *CI = dyn_cast<CastInst>(VL0); 2259 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 2260 E->VectorizedValue = V; 2261 ++NumVectorInstructions; 2262 return V; 2263 } 2264 case Instruction::FCmp: 2265 case Instruction::ICmp: { 2266 ValueList LHSV, RHSV; 2267 for (Value *V : E->Scalars) { 2268 LHSV.push_back(cast<Instruction>(V)->getOperand(0)); 2269 RHSV.push_back(cast<Instruction>(V)->getOperand(1)); 2270 } 2271 2272 setInsertPointAfterBundle(E->Scalars); 2273 2274 Value *L = vectorizeTree(LHSV); 2275 Value *R = vectorizeTree(RHSV); 2276 2277 if (Value *V = alreadyVectorized(E->Scalars)) 2278 return V; 2279 2280 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 2281 Value *V; 2282 if (Opcode == Instruction::FCmp) 2283 V = Builder.CreateFCmp(P0, L, R); 2284 else 2285 V = Builder.CreateICmp(P0, L, R); 2286 2287 E->VectorizedValue = V; 2288 ++NumVectorInstructions; 2289 return V; 2290 } 2291 case Instruction::Select: { 2292 ValueList TrueVec, FalseVec, CondVec; 2293 for (Value *V : E->Scalars) { 2294 CondVec.push_back(cast<Instruction>(V)->getOperand(0)); 2295 TrueVec.push_back(cast<Instruction>(V)->getOperand(1)); 2296 FalseVec.push_back(cast<Instruction>(V)->getOperand(2)); 2297 } 2298 2299 setInsertPointAfterBundle(E->Scalars); 2300 2301 Value *Cond = vectorizeTree(CondVec); 2302 Value *True = vectorizeTree(TrueVec); 2303 Value *False = vectorizeTree(FalseVec); 2304 2305 if (Value *V = alreadyVectorized(E->Scalars)) 2306 return V; 2307 2308 Value *V = Builder.CreateSelect(Cond, True, False); 2309 E->VectorizedValue = V; 2310 ++NumVectorInstructions; 2311 return V; 2312 } 2313 case Instruction::Add: 2314 case Instruction::FAdd: 2315 case Instruction::Sub: 2316 case Instruction::FSub: 2317 case Instruction::Mul: 2318 case Instruction::FMul: 2319 case Instruction::UDiv: 2320 case Instruction::SDiv: 2321 case Instruction::FDiv: 2322 case Instruction::URem: 2323 case Instruction::SRem: 2324 case Instruction::FRem: 2325 case Instruction::Shl: 2326 case Instruction::LShr: 2327 case Instruction::AShr: 2328 case Instruction::And: 2329 case Instruction::Or: 2330 case Instruction::Xor: { 2331 ValueList LHSVL, RHSVL; 2332 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) 2333 reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL); 2334 else 2335 for (Value *V : E->Scalars) { 2336 LHSVL.push_back(cast<Instruction>(V)->getOperand(0)); 2337 RHSVL.push_back(cast<Instruction>(V)->getOperand(1)); 2338 } 2339 2340 setInsertPointAfterBundle(E->Scalars); 2341 2342 Value *LHS = vectorizeTree(LHSVL); 2343 Value *RHS = vectorizeTree(RHSVL); 2344 2345 if (LHS == RHS && isa<Instruction>(LHS)) { 2346 assert((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order"); 2347 } 2348 2349 if (Value *V = alreadyVectorized(E->Scalars)) 2350 return V; 2351 2352 BinaryOperator *BinOp = cast<BinaryOperator>(VL0); 2353 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS); 2354 E->VectorizedValue = V; 2355 propagateIRFlags(E->VectorizedValue, E->Scalars); 2356 ++NumVectorInstructions; 2357 2358 if (Instruction *I = dyn_cast<Instruction>(V)) 2359 return propagateMetadata(I, E->Scalars); 2360 2361 return V; 2362 } 2363 case Instruction::Load: { 2364 // Loads are inserted at the head of the tree because we don't want to 2365 // sink them all the way down past store instructions. 2366 setInsertPointAfterBundle(E->Scalars); 2367 2368 LoadInst *LI = cast<LoadInst>(VL0); 2369 Type *ScalarLoadTy = LI->getType(); 2370 unsigned AS = LI->getPointerAddressSpace(); 2371 2372 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 2373 VecTy->getPointerTo(AS)); 2374 2375 // The pointer operand uses an in-tree scalar so we add the new BitCast to 2376 // ExternalUses list to make sure that an extract will be generated in the 2377 // future. 2378 if (ScalarToTreeEntry.count(LI->getPointerOperand())) 2379 ExternalUses.push_back( 2380 ExternalUser(LI->getPointerOperand(), cast<User>(VecPtr), 0)); 2381 2382 unsigned Alignment = LI->getAlignment(); 2383 LI = Builder.CreateLoad(VecPtr); 2384 if (!Alignment) { 2385 Alignment = DL.getABITypeAlignment(ScalarLoadTy); 2386 } 2387 LI->setAlignment(Alignment); 2388 E->VectorizedValue = LI; 2389 ++NumVectorInstructions; 2390 return propagateMetadata(LI, E->Scalars); 2391 } 2392 case Instruction::Store: { 2393 StoreInst *SI = cast<StoreInst>(VL0); 2394 unsigned Alignment = SI->getAlignment(); 2395 unsigned AS = SI->getPointerAddressSpace(); 2396 2397 ValueList ValueOp; 2398 for (Value *V : E->Scalars) 2399 ValueOp.push_back(cast<StoreInst>(V)->getValueOperand()); 2400 2401 setInsertPointAfterBundle(E->Scalars); 2402 2403 Value *VecValue = vectorizeTree(ValueOp); 2404 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), 2405 VecTy->getPointerTo(AS)); 2406 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 2407 2408 // The pointer operand uses an in-tree scalar so we add the new BitCast to 2409 // ExternalUses list to make sure that an extract will be generated in the 2410 // future. 2411 if (ScalarToTreeEntry.count(SI->getPointerOperand())) 2412 ExternalUses.push_back( 2413 ExternalUser(SI->getPointerOperand(), cast<User>(VecPtr), 0)); 2414 2415 if (!Alignment) { 2416 Alignment = DL.getABITypeAlignment(SI->getValueOperand()->getType()); 2417 } 2418 S->setAlignment(Alignment); 2419 E->VectorizedValue = S; 2420 ++NumVectorInstructions; 2421 return propagateMetadata(S, E->Scalars); 2422 } 2423 case Instruction::GetElementPtr: { 2424 setInsertPointAfterBundle(E->Scalars); 2425 2426 ValueList Op0VL; 2427 for (Value *V : E->Scalars) 2428 Op0VL.push_back(cast<GetElementPtrInst>(V)->getOperand(0)); 2429 2430 Value *Op0 = vectorizeTree(Op0VL); 2431 2432 std::vector<Value *> OpVecs; 2433 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 2434 ++j) { 2435 ValueList OpVL; 2436 for (Value *V : E->Scalars) 2437 OpVL.push_back(cast<GetElementPtrInst>(V)->getOperand(j)); 2438 2439 Value *OpVec = vectorizeTree(OpVL); 2440 OpVecs.push_back(OpVec); 2441 } 2442 2443 Value *V = Builder.CreateGEP( 2444 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs); 2445 E->VectorizedValue = V; 2446 ++NumVectorInstructions; 2447 2448 if (Instruction *I = dyn_cast<Instruction>(V)) 2449 return propagateMetadata(I, E->Scalars); 2450 2451 return V; 2452 } 2453 case Instruction::Call: { 2454 CallInst *CI = cast<CallInst>(VL0); 2455 setInsertPointAfterBundle(E->Scalars); 2456 Function *FI; 2457 Intrinsic::ID IID = Intrinsic::not_intrinsic; 2458 Value *ScalarArg = nullptr; 2459 if (CI && (FI = CI->getCalledFunction())) { 2460 IID = FI->getIntrinsicID(); 2461 } 2462 std::vector<Value *> OpVecs; 2463 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 2464 ValueList OpVL; 2465 // ctlz,cttz and powi are special intrinsics whose second argument is 2466 // a scalar. This argument should not be vectorized. 2467 if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) { 2468 CallInst *CEI = cast<CallInst>(E->Scalars[0]); 2469 ScalarArg = CEI->getArgOperand(j); 2470 OpVecs.push_back(CEI->getArgOperand(j)); 2471 continue; 2472 } 2473 for (Value *V : E->Scalars) { 2474 CallInst *CEI = cast<CallInst>(V); 2475 OpVL.push_back(CEI->getArgOperand(j)); 2476 } 2477 2478 Value *OpVec = vectorizeTree(OpVL); 2479 DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 2480 OpVecs.push_back(OpVec); 2481 } 2482 2483 Module *M = F->getParent(); 2484 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI); 2485 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; 2486 Function *CF = Intrinsic::getDeclaration(M, ID, Tys); 2487 Value *V = Builder.CreateCall(CF, OpVecs); 2488 2489 // The scalar argument uses an in-tree scalar so we add the new vectorized 2490 // call to ExternalUses list to make sure that an extract will be 2491 // generated in the future. 2492 if (ScalarArg && ScalarToTreeEntry.count(ScalarArg)) 2493 ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0)); 2494 2495 E->VectorizedValue = V; 2496 ++NumVectorInstructions; 2497 return V; 2498 } 2499 case Instruction::ShuffleVector: { 2500 ValueList LHSVL, RHSVL; 2501 assert(isa<BinaryOperator>(VL0) && "Invalid Shuffle Vector Operand"); 2502 reorderAltShuffleOperands(E->Scalars, LHSVL, RHSVL); 2503 setInsertPointAfterBundle(E->Scalars); 2504 2505 Value *LHS = vectorizeTree(LHSVL); 2506 Value *RHS = vectorizeTree(RHSVL); 2507 2508 if (Value *V = alreadyVectorized(E->Scalars)) 2509 return V; 2510 2511 // Create a vector of LHS op1 RHS 2512 BinaryOperator *BinOp0 = cast<BinaryOperator>(VL0); 2513 Value *V0 = Builder.CreateBinOp(BinOp0->getOpcode(), LHS, RHS); 2514 2515 // Create a vector of LHS op2 RHS 2516 Instruction *VL1 = cast<Instruction>(E->Scalars[1]); 2517 BinaryOperator *BinOp1 = cast<BinaryOperator>(VL1); 2518 Value *V1 = Builder.CreateBinOp(BinOp1->getOpcode(), LHS, RHS); 2519 2520 // Create shuffle to take alternate operations from the vector. 2521 // Also, gather up odd and even scalar ops to propagate IR flags to 2522 // each vector operation. 2523 ValueList OddScalars, EvenScalars; 2524 unsigned e = E->Scalars.size(); 2525 SmallVector<Constant *, 8> Mask(e); 2526 for (unsigned i = 0; i < e; ++i) { 2527 if (i & 1) { 2528 Mask[i] = Builder.getInt32(e + i); 2529 OddScalars.push_back(E->Scalars[i]); 2530 } else { 2531 Mask[i] = Builder.getInt32(i); 2532 EvenScalars.push_back(E->Scalars[i]); 2533 } 2534 } 2535 2536 Value *ShuffleMask = ConstantVector::get(Mask); 2537 propagateIRFlags(V0, EvenScalars); 2538 propagateIRFlags(V1, OddScalars); 2539 2540 Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 2541 E->VectorizedValue = V; 2542 ++NumVectorInstructions; 2543 if (Instruction *I = dyn_cast<Instruction>(V)) 2544 return propagateMetadata(I, E->Scalars); 2545 2546 return V; 2547 } 2548 default: 2549 llvm_unreachable("unknown inst"); 2550 } 2551 return nullptr; 2552 } 2553 2554 Value *BoUpSLP::vectorizeTree() { 2555 2556 // All blocks must be scheduled before any instructions are inserted. 2557 for (auto &BSIter : BlocksSchedules) { 2558 scheduleBlock(BSIter.second.get()); 2559 } 2560 2561 Builder.SetInsertPoint(&F->getEntryBlock().front()); 2562 vectorizeTree(&VectorizableTree[0]); 2563 2564 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 2565 2566 // Extract all of the elements with the external uses. 2567 for (UserList::iterator it = ExternalUses.begin(), e = ExternalUses.end(); 2568 it != e; ++it) { 2569 Value *Scalar = it->Scalar; 2570 llvm::User *User = it->User; 2571 2572 // Skip users that we already RAUW. This happens when one instruction 2573 // has multiple uses of the same value. 2574 if (std::find(Scalar->user_begin(), Scalar->user_end(), User) == 2575 Scalar->user_end()) 2576 continue; 2577 assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar"); 2578 2579 int Idx = ScalarToTreeEntry[Scalar]; 2580 TreeEntry *E = &VectorizableTree[Idx]; 2581 assert(!E->NeedToGather && "Extracting from a gather list"); 2582 2583 Value *Vec = E->VectorizedValue; 2584 assert(Vec && "Can't find vectorizable value"); 2585 2586 Value *Lane = Builder.getInt32(it->Lane); 2587 // Generate extracts for out-of-tree users. 2588 // Find the insertion point for the extractelement lane. 2589 if (isa<Instruction>(Vec)){ 2590 if (PHINode *PH = dyn_cast<PHINode>(User)) { 2591 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 2592 if (PH->getIncomingValue(i) == Scalar) { 2593 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 2594 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2595 CSEBlocks.insert(PH->getIncomingBlock(i)); 2596 PH->setOperand(i, Ex); 2597 } 2598 } 2599 } else { 2600 Builder.SetInsertPoint(cast<Instruction>(User)); 2601 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2602 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 2603 User->replaceUsesOfWith(Scalar, Ex); 2604 } 2605 } else { 2606 Builder.SetInsertPoint(&F->getEntryBlock().front()); 2607 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2608 CSEBlocks.insert(&F->getEntryBlock()); 2609 User->replaceUsesOfWith(Scalar, Ex); 2610 } 2611 2612 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 2613 } 2614 2615 // For each vectorized value: 2616 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 2617 TreeEntry *Entry = &VectorizableTree[EIdx]; 2618 2619 // For each lane: 2620 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 2621 Value *Scalar = Entry->Scalars[Lane]; 2622 // No need to handle users of gathered values. 2623 if (Entry->NeedToGather) 2624 continue; 2625 2626 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 2627 2628 Type *Ty = Scalar->getType(); 2629 if (!Ty->isVoidTy()) { 2630 #ifndef NDEBUG 2631 for (User *U : Scalar->users()) { 2632 DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 2633 2634 assert((ScalarToTreeEntry.count(U) || 2635 // It is legal to replace users in the ignorelist by undef. 2636 (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), U) != 2637 UserIgnoreList.end())) && 2638 "Replacing out-of-tree value with undef"); 2639 } 2640 #endif 2641 Value *Undef = UndefValue::get(Ty); 2642 Scalar->replaceAllUsesWith(Undef); 2643 } 2644 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 2645 eraseInstruction(cast<Instruction>(Scalar)); 2646 } 2647 } 2648 2649 Builder.ClearInsertionPoint(); 2650 2651 return VectorizableTree[0].VectorizedValue; 2652 } 2653 2654 void BoUpSLP::optimizeGatherSequence() { 2655 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 2656 << " gather sequences instructions.\n"); 2657 // LICM InsertElementInst sequences. 2658 for (SetVector<Instruction *>::iterator it = GatherSeq.begin(), 2659 e = GatherSeq.end(); it != e; ++it) { 2660 InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it); 2661 2662 if (!Insert) 2663 continue; 2664 2665 // Check if this block is inside a loop. 2666 Loop *L = LI->getLoopFor(Insert->getParent()); 2667 if (!L) 2668 continue; 2669 2670 // Check if it has a preheader. 2671 BasicBlock *PreHeader = L->getLoopPreheader(); 2672 if (!PreHeader) 2673 continue; 2674 2675 // If the vector or the element that we insert into it are 2676 // instructions that are defined in this basic block then we can't 2677 // hoist this instruction. 2678 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 2679 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 2680 if (CurrVec && L->contains(CurrVec)) 2681 continue; 2682 if (NewElem && L->contains(NewElem)) 2683 continue; 2684 2685 // We can hoist this instruction. Move it to the pre-header. 2686 Insert->moveBefore(PreHeader->getTerminator()); 2687 } 2688 2689 // Make a list of all reachable blocks in our CSE queue. 2690 SmallVector<const DomTreeNode *, 8> CSEWorkList; 2691 CSEWorkList.reserve(CSEBlocks.size()); 2692 for (BasicBlock *BB : CSEBlocks) 2693 if (DomTreeNode *N = DT->getNode(BB)) { 2694 assert(DT->isReachableFromEntry(N)); 2695 CSEWorkList.push_back(N); 2696 } 2697 2698 // Sort blocks by domination. This ensures we visit a block after all blocks 2699 // dominating it are visited. 2700 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), 2701 [this](const DomTreeNode *A, const DomTreeNode *B) { 2702 return DT->properlyDominates(A, B); 2703 }); 2704 2705 // Perform O(N^2) search over the gather sequences and merge identical 2706 // instructions. TODO: We can further optimize this scan if we split the 2707 // instructions into different buckets based on the insert lane. 2708 SmallVector<Instruction *, 16> Visited; 2709 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 2710 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 2711 "Worklist not sorted properly!"); 2712 BasicBlock *BB = (*I)->getBlock(); 2713 // For all instructions in blocks containing gather sequences: 2714 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 2715 Instruction *In = &*it++; 2716 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 2717 continue; 2718 2719 // Check if we can replace this instruction with any of the 2720 // visited instructions. 2721 for (SmallVectorImpl<Instruction *>::iterator v = Visited.begin(), 2722 ve = Visited.end(); 2723 v != ve; ++v) { 2724 if (In->isIdenticalTo(*v) && 2725 DT->dominates((*v)->getParent(), In->getParent())) { 2726 In->replaceAllUsesWith(*v); 2727 eraseInstruction(In); 2728 In = nullptr; 2729 break; 2730 } 2731 } 2732 if (In) { 2733 assert(std::find(Visited.begin(), Visited.end(), In) == Visited.end()); 2734 Visited.push_back(In); 2735 } 2736 } 2737 } 2738 CSEBlocks.clear(); 2739 GatherSeq.clear(); 2740 } 2741 2742 // Groups the instructions to a bundle (which is then a single scheduling entity) 2743 // and schedules instructions until the bundle gets ready. 2744 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, 2745 BoUpSLP *SLP) { 2746 if (isa<PHINode>(VL[0])) 2747 return true; 2748 2749 // Initialize the instruction bundle. 2750 Instruction *OldScheduleEnd = ScheduleEnd; 2751 ScheduleData *PrevInBundle = nullptr; 2752 ScheduleData *Bundle = nullptr; 2753 bool ReSchedule = false; 2754 DEBUG(dbgs() << "SLP: bundle: " << *VL[0] << "\n"); 2755 2756 // Make sure that the scheduling region contains all 2757 // instructions of the bundle. 2758 for (Value *V : VL) { 2759 if (!extendSchedulingRegion(V)) 2760 return false; 2761 } 2762 2763 for (Value *V : VL) { 2764 ScheduleData *BundleMember = getScheduleData(V); 2765 assert(BundleMember && 2766 "no ScheduleData for bundle member (maybe not in same basic block)"); 2767 if (BundleMember->IsScheduled) { 2768 // A bundle member was scheduled as single instruction before and now 2769 // needs to be scheduled as part of the bundle. We just get rid of the 2770 // existing schedule. 2771 DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 2772 << " was already scheduled\n"); 2773 ReSchedule = true; 2774 } 2775 assert(BundleMember->isSchedulingEntity() && 2776 "bundle member already part of other bundle"); 2777 if (PrevInBundle) { 2778 PrevInBundle->NextInBundle = BundleMember; 2779 } else { 2780 Bundle = BundleMember; 2781 } 2782 BundleMember->UnscheduledDepsInBundle = 0; 2783 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 2784 2785 // Group the instructions to a bundle. 2786 BundleMember->FirstInBundle = Bundle; 2787 PrevInBundle = BundleMember; 2788 } 2789 if (ScheduleEnd != OldScheduleEnd) { 2790 // The scheduling region got new instructions at the lower end (or it is a 2791 // new region for the first bundle). This makes it necessary to 2792 // recalculate all dependencies. 2793 // It is seldom that this needs to be done a second time after adding the 2794 // initial bundle to the region. 2795 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 2796 ScheduleData *SD = getScheduleData(I); 2797 SD->clearDependencies(); 2798 } 2799 ReSchedule = true; 2800 } 2801 if (ReSchedule) { 2802 resetSchedule(); 2803 initialFillReadyList(ReadyInsts); 2804 } 2805 2806 DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " 2807 << BB->getName() << "\n"); 2808 2809 calculateDependencies(Bundle, true, SLP); 2810 2811 // Now try to schedule the new bundle. As soon as the bundle is "ready" it 2812 // means that there are no cyclic dependencies and we can schedule it. 2813 // Note that's important that we don't "schedule" the bundle yet (see 2814 // cancelScheduling). 2815 while (!Bundle->isReady() && !ReadyInsts.empty()) { 2816 2817 ScheduleData *pickedSD = ReadyInsts.back(); 2818 ReadyInsts.pop_back(); 2819 2820 if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) { 2821 schedule(pickedSD, ReadyInsts); 2822 } 2823 } 2824 if (!Bundle->isReady()) { 2825 cancelScheduling(VL); 2826 return false; 2827 } 2828 return true; 2829 } 2830 2831 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL) { 2832 if (isa<PHINode>(VL[0])) 2833 return; 2834 2835 ScheduleData *Bundle = getScheduleData(VL[0]); 2836 DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 2837 assert(!Bundle->IsScheduled && 2838 "Can't cancel bundle which is already scheduled"); 2839 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 2840 "tried to unbundle something which is not a bundle"); 2841 2842 // Un-bundle: make single instructions out of the bundle. 2843 ScheduleData *BundleMember = Bundle; 2844 while (BundleMember) { 2845 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 2846 BundleMember->FirstInBundle = BundleMember; 2847 ScheduleData *Next = BundleMember->NextInBundle; 2848 BundleMember->NextInBundle = nullptr; 2849 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 2850 if (BundleMember->UnscheduledDepsInBundle == 0) { 2851 ReadyInsts.insert(BundleMember); 2852 } 2853 BundleMember = Next; 2854 } 2855 } 2856 2857 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V) { 2858 if (getScheduleData(V)) 2859 return true; 2860 Instruction *I = dyn_cast<Instruction>(V); 2861 assert(I && "bundle member must be an instruction"); 2862 assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled"); 2863 if (!ScheduleStart) { 2864 // It's the first instruction in the new region. 2865 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 2866 ScheduleStart = I; 2867 ScheduleEnd = I->getNextNode(); 2868 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 2869 DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 2870 return true; 2871 } 2872 // Search up and down at the same time, because we don't know if the new 2873 // instruction is above or below the existing scheduling region. 2874 BasicBlock::reverse_iterator UpIter(ScheduleStart->getIterator()); 2875 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 2876 BasicBlock::iterator DownIter(ScheduleEnd); 2877 BasicBlock::iterator LowerEnd = BB->end(); 2878 for (;;) { 2879 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 2880 DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 2881 return false; 2882 } 2883 2884 if (UpIter != UpperEnd) { 2885 if (&*UpIter == I) { 2886 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 2887 ScheduleStart = I; 2888 DEBUG(dbgs() << "SLP: extend schedule region start to " << *I << "\n"); 2889 return true; 2890 } 2891 UpIter++; 2892 } 2893 if (DownIter != LowerEnd) { 2894 if (&*DownIter == I) { 2895 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 2896 nullptr); 2897 ScheduleEnd = I->getNextNode(); 2898 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 2899 DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 2900 return true; 2901 } 2902 DownIter++; 2903 } 2904 assert((UpIter != UpperEnd || DownIter != LowerEnd) && 2905 "instruction not found in block"); 2906 } 2907 return true; 2908 } 2909 2910 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 2911 Instruction *ToI, 2912 ScheduleData *PrevLoadStore, 2913 ScheduleData *NextLoadStore) { 2914 ScheduleData *CurrentLoadStore = PrevLoadStore; 2915 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 2916 ScheduleData *SD = ScheduleDataMap[I]; 2917 if (!SD) { 2918 // Allocate a new ScheduleData for the instruction. 2919 if (ChunkPos >= ChunkSize) { 2920 ScheduleDataChunks.push_back( 2921 llvm::make_unique<ScheduleData[]>(ChunkSize)); 2922 ChunkPos = 0; 2923 } 2924 SD = &(ScheduleDataChunks.back()[ChunkPos++]); 2925 ScheduleDataMap[I] = SD; 2926 SD->Inst = I; 2927 } 2928 assert(!isInSchedulingRegion(SD) && 2929 "new ScheduleData already in scheduling region"); 2930 SD->init(SchedulingRegionID); 2931 2932 if (I->mayReadOrWriteMemory()) { 2933 // Update the linked list of memory accessing instructions. 2934 if (CurrentLoadStore) { 2935 CurrentLoadStore->NextLoadStore = SD; 2936 } else { 2937 FirstLoadStoreInRegion = SD; 2938 } 2939 CurrentLoadStore = SD; 2940 } 2941 } 2942 if (NextLoadStore) { 2943 if (CurrentLoadStore) 2944 CurrentLoadStore->NextLoadStore = NextLoadStore; 2945 } else { 2946 LastLoadStoreInRegion = CurrentLoadStore; 2947 } 2948 } 2949 2950 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 2951 bool InsertInReadyList, 2952 BoUpSLP *SLP) { 2953 assert(SD->isSchedulingEntity()); 2954 2955 SmallVector<ScheduleData *, 10> WorkList; 2956 WorkList.push_back(SD); 2957 2958 while (!WorkList.empty()) { 2959 ScheduleData *SD = WorkList.back(); 2960 WorkList.pop_back(); 2961 2962 ScheduleData *BundleMember = SD; 2963 while (BundleMember) { 2964 assert(isInSchedulingRegion(BundleMember)); 2965 if (!BundleMember->hasValidDependencies()) { 2966 2967 DEBUG(dbgs() << "SLP: update deps of " << *BundleMember << "\n"); 2968 BundleMember->Dependencies = 0; 2969 BundleMember->resetUnscheduledDeps(); 2970 2971 // Handle def-use chain dependencies. 2972 for (User *U : BundleMember->Inst->users()) { 2973 if (isa<Instruction>(U)) { 2974 ScheduleData *UseSD = getScheduleData(U); 2975 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 2976 BundleMember->Dependencies++; 2977 ScheduleData *DestBundle = UseSD->FirstInBundle; 2978 if (!DestBundle->IsScheduled) { 2979 BundleMember->incrementUnscheduledDeps(1); 2980 } 2981 if (!DestBundle->hasValidDependencies()) { 2982 WorkList.push_back(DestBundle); 2983 } 2984 } 2985 } else { 2986 // I'm not sure if this can ever happen. But we need to be safe. 2987 // This lets the instruction/bundle never be scheduled and 2988 // eventually disable vectorization. 2989 BundleMember->Dependencies++; 2990 BundleMember->incrementUnscheduledDeps(1); 2991 } 2992 } 2993 2994 // Handle the memory dependencies. 2995 ScheduleData *DepDest = BundleMember->NextLoadStore; 2996 if (DepDest) { 2997 Instruction *SrcInst = BundleMember->Inst; 2998 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); 2999 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 3000 unsigned numAliased = 0; 3001 unsigned DistToSrc = 1; 3002 3003 while (DepDest) { 3004 assert(isInSchedulingRegion(DepDest)); 3005 3006 // We have two limits to reduce the complexity: 3007 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 3008 // SLP->isAliased (which is the expensive part in this loop). 3009 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 3010 // the whole loop (even if the loop is fast, it's quadratic). 3011 // It's important for the loop break condition (see below) to 3012 // check this limit even between two read-only instructions. 3013 if (DistToSrc >= MaxMemDepDistance || 3014 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 3015 (numAliased >= AliasedCheckLimit || 3016 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 3017 3018 // We increment the counter only if the locations are aliased 3019 // (instead of counting all alias checks). This gives a better 3020 // balance between reduced runtime and accurate dependencies. 3021 numAliased++; 3022 3023 DepDest->MemoryDependencies.push_back(BundleMember); 3024 BundleMember->Dependencies++; 3025 ScheduleData *DestBundle = DepDest->FirstInBundle; 3026 if (!DestBundle->IsScheduled) { 3027 BundleMember->incrementUnscheduledDeps(1); 3028 } 3029 if (!DestBundle->hasValidDependencies()) { 3030 WorkList.push_back(DestBundle); 3031 } 3032 } 3033 DepDest = DepDest->NextLoadStore; 3034 3035 // Example, explaining the loop break condition: Let's assume our 3036 // starting instruction is i0 and MaxMemDepDistance = 3. 3037 // 3038 // +--------v--v--v 3039 // i0,i1,i2,i3,i4,i5,i6,i7,i8 3040 // +--------^--^--^ 3041 // 3042 // MaxMemDepDistance let us stop alias-checking at i3 and we add 3043 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 3044 // Previously we already added dependencies from i3 to i6,i7,i8 3045 // (because of MaxMemDepDistance). As we added a dependency from 3046 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 3047 // and we can abort this loop at i6. 3048 if (DistToSrc >= 2 * MaxMemDepDistance) 3049 break; 3050 DistToSrc++; 3051 } 3052 } 3053 } 3054 BundleMember = BundleMember->NextInBundle; 3055 } 3056 if (InsertInReadyList && SD->isReady()) { 3057 ReadyInsts.push_back(SD); 3058 DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n"); 3059 } 3060 } 3061 } 3062 3063 void BoUpSLP::BlockScheduling::resetSchedule() { 3064 assert(ScheduleStart && 3065 "tried to reset schedule on block which has not been scheduled"); 3066 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3067 ScheduleData *SD = getScheduleData(I); 3068 assert(isInSchedulingRegion(SD)); 3069 SD->IsScheduled = false; 3070 SD->resetUnscheduledDeps(); 3071 } 3072 ReadyInsts.clear(); 3073 } 3074 3075 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 3076 3077 if (!BS->ScheduleStart) 3078 return; 3079 3080 DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 3081 3082 BS->resetSchedule(); 3083 3084 // For the real scheduling we use a more sophisticated ready-list: it is 3085 // sorted by the original instruction location. This lets the final schedule 3086 // be as close as possible to the original instruction order. 3087 struct ScheduleDataCompare { 3088 bool operator()(ScheduleData *SD1, ScheduleData *SD2) { 3089 return SD2->SchedulingPriority < SD1->SchedulingPriority; 3090 } 3091 }; 3092 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 3093 3094 // Ensure that all dependency data is updated and fill the ready-list with 3095 // initial instructions. 3096 int Idx = 0; 3097 int NumToSchedule = 0; 3098 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 3099 I = I->getNextNode()) { 3100 ScheduleData *SD = BS->getScheduleData(I); 3101 assert( 3102 SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD->Inst) != 0) && 3103 "scheduler and vectorizer have different opinion on what is a bundle"); 3104 SD->FirstInBundle->SchedulingPriority = Idx++; 3105 if (SD->isSchedulingEntity()) { 3106 BS->calculateDependencies(SD, false, this); 3107 NumToSchedule++; 3108 } 3109 } 3110 BS->initialFillReadyList(ReadyInsts); 3111 3112 Instruction *LastScheduledInst = BS->ScheduleEnd; 3113 3114 // Do the "real" scheduling. 3115 while (!ReadyInsts.empty()) { 3116 ScheduleData *picked = *ReadyInsts.begin(); 3117 ReadyInsts.erase(ReadyInsts.begin()); 3118 3119 // Move the scheduled instruction(s) to their dedicated places, if not 3120 // there yet. 3121 ScheduleData *BundleMember = picked; 3122 while (BundleMember) { 3123 Instruction *pickedInst = BundleMember->Inst; 3124 if (LastScheduledInst->getNextNode() != pickedInst) { 3125 BS->BB->getInstList().remove(pickedInst); 3126 BS->BB->getInstList().insert(LastScheduledInst->getIterator(), 3127 pickedInst); 3128 } 3129 LastScheduledInst = pickedInst; 3130 BundleMember = BundleMember->NextInBundle; 3131 } 3132 3133 BS->schedule(picked, ReadyInsts); 3134 NumToSchedule--; 3135 } 3136 assert(NumToSchedule == 0 && "could not schedule all instructions"); 3137 3138 // Avoid duplicate scheduling of the block. 3139 BS->ScheduleStart = nullptr; 3140 } 3141 3142 /// The SLPVectorizer Pass. 3143 struct SLPVectorizer : public FunctionPass { 3144 typedef SmallVector<StoreInst *, 8> StoreList; 3145 typedef MapVector<Value *, StoreList> StoreListMap; 3146 3147 /// Pass identification, replacement for typeid 3148 static char ID; 3149 3150 explicit SLPVectorizer() : FunctionPass(ID) { 3151 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 3152 } 3153 3154 ScalarEvolution *SE; 3155 TargetTransformInfo *TTI; 3156 TargetLibraryInfo *TLI; 3157 AliasAnalysis *AA; 3158 LoopInfo *LI; 3159 DominatorTree *DT; 3160 AssumptionCache *AC; 3161 3162 bool runOnFunction(Function &F) override { 3163 if (skipOptnoneFunction(F)) 3164 return false; 3165 3166 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 3167 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 3168 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 3169 TLI = TLIP ? &TLIP->getTLI() : nullptr; 3170 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 3171 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 3172 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 3173 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 3174 3175 StoreRefs.clear(); 3176 bool Changed = false; 3177 3178 // If the target claims to have no vector registers don't attempt 3179 // vectorization. 3180 if (!TTI->getNumberOfRegisters(true)) 3181 return false; 3182 3183 // Use the vector register size specified by the target unless overridden 3184 // by a command-line option. 3185 // TODO: It would be better to limit the vectorization factor based on 3186 // data type rather than just register size. For example, x86 AVX has 3187 // 256-bit registers, but it does not support integer operations 3188 // at that width (that requires AVX2). 3189 if (MaxVectorRegSizeOption.getNumOccurrences()) 3190 MaxVecRegSize = MaxVectorRegSizeOption; 3191 else 3192 MaxVecRegSize = TTI->getRegisterBitWidth(true); 3193 3194 // Don't vectorize when the attribute NoImplicitFloat is used. 3195 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 3196 return false; 3197 3198 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 3199 3200 // Use the bottom up slp vectorizer to construct chains that start with 3201 // store instructions. 3202 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC); 3203 3204 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 3205 // delete instructions. 3206 3207 // Scan the blocks in the function in post order. 3208 for (auto BB : post_order(&F.getEntryBlock())) { 3209 // Vectorize trees that end at stores. 3210 if (unsigned count = collectStores(BB, R)) { 3211 (void)count; 3212 DEBUG(dbgs() << "SLP: Found " << count << " stores to vectorize.\n"); 3213 Changed |= vectorizeStoreChains(R); 3214 } 3215 3216 // Vectorize trees that end at reductions. 3217 Changed |= vectorizeChainsInBlock(BB, R); 3218 } 3219 3220 if (Changed) { 3221 R.optimizeGatherSequence(); 3222 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 3223 DEBUG(verifyFunction(F)); 3224 } 3225 return Changed; 3226 } 3227 3228 void getAnalysisUsage(AnalysisUsage &AU) const override { 3229 FunctionPass::getAnalysisUsage(AU); 3230 AU.addRequired<AssumptionCacheTracker>(); 3231 AU.addRequired<ScalarEvolutionWrapperPass>(); 3232 AU.addRequired<AAResultsWrapperPass>(); 3233 AU.addRequired<TargetTransformInfoWrapperPass>(); 3234 AU.addRequired<LoopInfoWrapperPass>(); 3235 AU.addRequired<DominatorTreeWrapperPass>(); 3236 AU.addPreserved<LoopInfoWrapperPass>(); 3237 AU.addPreserved<DominatorTreeWrapperPass>(); 3238 AU.addPreserved<AAResultsWrapperPass>(); 3239 AU.addPreserved<GlobalsAAWrapperPass>(); 3240 AU.setPreservesCFG(); 3241 } 3242 3243 private: 3244 3245 /// \brief Collect memory references and sort them according to their base 3246 /// object. We sort the stores to their base objects to reduce the cost of the 3247 /// quadratic search on the stores. TODO: We can further reduce this cost 3248 /// if we flush the chain creation every time we run into a memory barrier. 3249 unsigned collectStores(BasicBlock *BB, BoUpSLP &R); 3250 3251 /// \brief Try to vectorize a chain that starts at two arithmetic instrs. 3252 bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R); 3253 3254 /// \brief Try to vectorize a list of operands. 3255 /// \@param BuildVector A list of users to ignore for the purpose of 3256 /// scheduling and that don't need extracting. 3257 /// \returns true if a value was vectorized. 3258 bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 3259 ArrayRef<Value *> BuildVector = None, 3260 bool allowReorder = false); 3261 3262 /// \brief Try to vectorize a chain that may start at the operands of \V; 3263 bool tryToVectorize(BinaryOperator *V, BoUpSLP &R); 3264 3265 /// \brief Vectorize the stores that were collected in StoreRefs. 3266 bool vectorizeStoreChains(BoUpSLP &R); 3267 3268 /// \brief Scan the basic block and look for patterns that are likely to start 3269 /// a vectorization chain. 3270 bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R); 3271 3272 bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold, 3273 BoUpSLP &R, unsigned VecRegSize); 3274 3275 bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold, 3276 BoUpSLP &R); 3277 private: 3278 StoreListMap StoreRefs; 3279 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 3280 }; 3281 3282 /// \brief Check that the Values in the slice in VL array are still existent in 3283 /// the WeakVH array. 3284 /// Vectorization of part of the VL array may cause later values in the VL array 3285 /// to become invalid. We track when this has happened in the WeakVH array. 3286 static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, ArrayRef<WeakVH> VH, 3287 unsigned SliceBegin, unsigned SliceSize) { 3288 VL = VL.slice(SliceBegin, SliceSize); 3289 VH = VH.slice(SliceBegin, SliceSize); 3290 return !std::equal(VL.begin(), VL.end(), VH.begin()); 3291 } 3292 3293 bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain, 3294 int CostThreshold, BoUpSLP &R, 3295 unsigned VecRegSize) { 3296 unsigned ChainLen = Chain.size(); 3297 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 3298 << "\n"); 3299 Type *StoreTy = cast<StoreInst>(Chain[0])->getValueOperand()->getType(); 3300 auto &DL = cast<StoreInst>(Chain[0])->getModule()->getDataLayout(); 3301 unsigned Sz = DL.getTypeSizeInBits(StoreTy); 3302 unsigned VF = VecRegSize / Sz; 3303 3304 if (!isPowerOf2_32(Sz) || VF < 2) 3305 return false; 3306 3307 // Keep track of values that were deleted by vectorizing in the loop below. 3308 SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end()); 3309 3310 bool Changed = false; 3311 // Look for profitable vectorizable trees at all offsets, starting at zero. 3312 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 3313 if (i + VF > e) 3314 break; 3315 3316 // Check that a previous iteration of this loop did not delete the Value. 3317 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 3318 continue; 3319 3320 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 3321 << "\n"); 3322 ArrayRef<Value *> Operands = Chain.slice(i, VF); 3323 3324 R.buildTree(Operands); 3325 3326 int Cost = R.getTreeCost(); 3327 3328 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 3329 if (Cost < CostThreshold) { 3330 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 3331 R.vectorizeTree(); 3332 3333 // Move to the next bundle. 3334 i += VF - 1; 3335 Changed = true; 3336 } 3337 } 3338 3339 return Changed; 3340 } 3341 3342 bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, 3343 int costThreshold, BoUpSLP &R) { 3344 SetVector<StoreInst *> Heads, Tails; 3345 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; 3346 3347 // We may run into multiple chains that merge into a single chain. We mark the 3348 // stores that we vectorized so that we don't visit the same store twice. 3349 BoUpSLP::ValueSet VectorizedStores; 3350 bool Changed = false; 3351 3352 // Do a quadratic search on all of the given stores and find 3353 // all of the pairs of stores that follow each other. 3354 SmallVector<unsigned, 16> IndexQueue; 3355 for (unsigned i = 0, e = Stores.size(); i < e; ++i) { 3356 const DataLayout &DL = Stores[i]->getModule()->getDataLayout(); 3357 IndexQueue.clear(); 3358 // If a store has multiple consecutive store candidates, search Stores 3359 // array according to the sequence: from i+1 to e, then from i-1 to 0. 3360 // This is because usually pairing with immediate succeeding or preceding 3361 // candidate create the best chance to find slp vectorization opportunity. 3362 unsigned j = 0; 3363 for (j = i + 1; j < e; ++j) 3364 IndexQueue.push_back(j); 3365 for (j = i; j > 0; --j) 3366 IndexQueue.push_back(j - 1); 3367 3368 for (auto &k : IndexQueue) { 3369 if (R.isConsecutiveAccess(Stores[i], Stores[k], DL)) { 3370 Tails.insert(Stores[k]); 3371 Heads.insert(Stores[i]); 3372 ConsecutiveChain[Stores[i]] = Stores[k]; 3373 break; 3374 } 3375 } 3376 } 3377 3378 // For stores that start but don't end a link in the chain: 3379 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end(); 3380 it != e; ++it) { 3381 if (Tails.count(*it)) 3382 continue; 3383 3384 // We found a store instr that starts a chain. Now follow the chain and try 3385 // to vectorize it. 3386 BoUpSLP::ValueList Operands; 3387 StoreInst *I = *it; 3388 // Collect the chain into a list. 3389 while (Tails.count(I) || Heads.count(I)) { 3390 if (VectorizedStores.count(I)) 3391 break; 3392 Operands.push_back(I); 3393 // Move to the next value in the chain. 3394 I = ConsecutiveChain[I]; 3395 } 3396 3397 // FIXME: Is division-by-2 the correct step? Should we assert that the 3398 // register size is a power-of-2? 3399 for (unsigned Size = MaxVecRegSize; Size >= MinVecRegSize; Size /= 2) { 3400 if (vectorizeStoreChain(Operands, costThreshold, R, Size)) { 3401 // Mark the vectorized stores so that we don't vectorize them again. 3402 VectorizedStores.insert(Operands.begin(), Operands.end()); 3403 Changed = true; 3404 break; 3405 } 3406 } 3407 } 3408 3409 return Changed; 3410 } 3411 3412 3413 unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) { 3414 unsigned count = 0; 3415 StoreRefs.clear(); 3416 const DataLayout &DL = BB->getModule()->getDataLayout(); 3417 for (Instruction &I : *BB) { 3418 StoreInst *SI = dyn_cast<StoreInst>(&I); 3419 if (!SI) 3420 continue; 3421 3422 // Don't touch volatile stores. 3423 if (!SI->isSimple()) 3424 continue; 3425 3426 // Check that the pointer points to scalars. 3427 Type *Ty = SI->getValueOperand()->getType(); 3428 if (!isValidElementType(Ty)) 3429 continue; 3430 3431 // Find the base pointer. 3432 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), DL); 3433 3434 // Save the store locations. 3435 StoreRefs[Ptr].push_back(SI); 3436 count++; 3437 } 3438 return count; 3439 } 3440 3441 bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 3442 if (!A || !B) 3443 return false; 3444 Value *VL[] = { A, B }; 3445 return tryToVectorizeList(VL, R, None, true); 3446 } 3447 3448 bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 3449 ArrayRef<Value *> BuildVector, 3450 bool allowReorder) { 3451 if (VL.size() < 2) 3452 return false; 3453 3454 DEBUG(dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n"); 3455 3456 // Check that all of the parts are scalar instructions of the same type. 3457 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 3458 if (!I0) 3459 return false; 3460 3461 unsigned Opcode0 = I0->getOpcode(); 3462 const DataLayout &DL = I0->getModule()->getDataLayout(); 3463 3464 Type *Ty0 = I0->getType(); 3465 unsigned Sz = DL.getTypeSizeInBits(Ty0); 3466 // FIXME: Register size should be a parameter to this function, so we can 3467 // try different vectorization factors. 3468 unsigned VF = MinVecRegSize / Sz; 3469 3470 for (Value *V : VL) { 3471 Type *Ty = V->getType(); 3472 if (!isValidElementType(Ty)) 3473 return false; 3474 Instruction *Inst = dyn_cast<Instruction>(V); 3475 if (!Inst || Inst->getOpcode() != Opcode0) 3476 return false; 3477 } 3478 3479 bool Changed = false; 3480 3481 // Keep track of values that were deleted by vectorizing in the loop below. 3482 SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end()); 3483 3484 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 3485 unsigned OpsWidth = 0; 3486 3487 if (i + VF > e) 3488 OpsWidth = e - i; 3489 else 3490 OpsWidth = VF; 3491 3492 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 3493 break; 3494 3495 // Check that a previous iteration of this loop did not delete the Value. 3496 if (hasValueBeenRAUWed(VL, TrackValues, i, OpsWidth)) 3497 continue; 3498 3499 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 3500 << "\n"); 3501 ArrayRef<Value *> Ops = VL.slice(i, OpsWidth); 3502 3503 ArrayRef<Value *> BuildVectorSlice; 3504 if (!BuildVector.empty()) 3505 BuildVectorSlice = BuildVector.slice(i, OpsWidth); 3506 3507 R.buildTree(Ops, BuildVectorSlice); 3508 // TODO: check if we can allow reordering also for other cases than 3509 // tryToVectorizePair() 3510 if (allowReorder && R.shouldReorder()) { 3511 assert(Ops.size() == 2); 3512 assert(BuildVectorSlice.empty()); 3513 Value *ReorderedOps[] = { Ops[1], Ops[0] }; 3514 R.buildTree(ReorderedOps, None); 3515 } 3516 int Cost = R.getTreeCost(); 3517 3518 if (Cost < -SLPCostThreshold) { 3519 DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 3520 Value *VectorizedRoot = R.vectorizeTree(); 3521 3522 // Reconstruct the build vector by extracting the vectorized root. This 3523 // way we handle the case where some elements of the vector are undefined. 3524 // (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2)) 3525 if (!BuildVectorSlice.empty()) { 3526 // The insert point is the last build vector instruction. The vectorized 3527 // root will precede it. This guarantees that we get an instruction. The 3528 // vectorized tree could have been constant folded. 3529 Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back()); 3530 unsigned VecIdx = 0; 3531 for (auto &V : BuildVectorSlice) { 3532 IRBuilder<true, NoFolder> Builder( 3533 InsertAfter->getParent(), ++BasicBlock::iterator(InsertAfter)); 3534 InsertElementInst *IE = cast<InsertElementInst>(V); 3535 Instruction *Extract = cast<Instruction>(Builder.CreateExtractElement( 3536 VectorizedRoot, Builder.getInt32(VecIdx++))); 3537 IE->setOperand(1, Extract); 3538 IE->removeFromParent(); 3539 IE->insertAfter(Extract); 3540 InsertAfter = IE; 3541 } 3542 } 3543 // Move to the next bundle. 3544 i += VF - 1; 3545 Changed = true; 3546 } 3547 } 3548 3549 return Changed; 3550 } 3551 3552 bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { 3553 if (!V) 3554 return false; 3555 3556 // Try to vectorize V. 3557 if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R)) 3558 return true; 3559 3560 BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0)); 3561 BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1)); 3562 // Try to skip B. 3563 if (B && B->hasOneUse()) { 3564 BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 3565 BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 3566 if (tryToVectorizePair(A, B0, R)) { 3567 return true; 3568 } 3569 if (tryToVectorizePair(A, B1, R)) { 3570 return true; 3571 } 3572 } 3573 3574 // Try to skip A. 3575 if (A && A->hasOneUse()) { 3576 BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 3577 BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 3578 if (tryToVectorizePair(A0, B, R)) { 3579 return true; 3580 } 3581 if (tryToVectorizePair(A1, B, R)) { 3582 return true; 3583 } 3584 } 3585 return 0; 3586 } 3587 3588 /// \brief Generate a shuffle mask to be used in a reduction tree. 3589 /// 3590 /// \param VecLen The length of the vector to be reduced. 3591 /// \param NumEltsToRdx The number of elements that should be reduced in the 3592 /// vector. 3593 /// \param IsPairwise Whether the reduction is a pairwise or splitting 3594 /// reduction. A pairwise reduction will generate a mask of 3595 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 3596 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 3597 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 3598 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 3599 bool IsPairwise, bool IsLeft, 3600 IRBuilder<> &Builder) { 3601 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 3602 3603 SmallVector<Constant *, 32> ShuffleMask( 3604 VecLen, UndefValue::get(Builder.getInt32Ty())); 3605 3606 if (IsPairwise) 3607 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 3608 for (unsigned i = 0; i != NumEltsToRdx; ++i) 3609 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 3610 else 3611 // Move the upper half of the vector to the lower half. 3612 for (unsigned i = 0; i != NumEltsToRdx; ++i) 3613 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 3614 3615 return ConstantVector::get(ShuffleMask); 3616 } 3617 3618 3619 /// Model horizontal reductions. 3620 /// 3621 /// A horizontal reduction is a tree of reduction operations (currently add and 3622 /// fadd) that has operations that can be put into a vector as its leaf. 3623 /// For example, this tree: 3624 /// 3625 /// mul mul mul mul 3626 /// \ / \ / 3627 /// + + 3628 /// \ / 3629 /// + 3630 /// This tree has "mul" as its reduced values and "+" as its reduction 3631 /// operations. A reduction might be feeding into a store or a binary operation 3632 /// feeding a phi. 3633 /// ... 3634 /// \ / 3635 /// + 3636 /// | 3637 /// phi += 3638 /// 3639 /// Or: 3640 /// ... 3641 /// \ / 3642 /// + 3643 /// | 3644 /// *p = 3645 /// 3646 class HorizontalReduction { 3647 SmallVector<Value *, 16> ReductionOps; 3648 SmallVector<Value *, 32> ReducedVals; 3649 3650 BinaryOperator *ReductionRoot; 3651 PHINode *ReductionPHI; 3652 3653 /// The opcode of the reduction. 3654 unsigned ReductionOpcode; 3655 /// The opcode of the values we perform a reduction on. 3656 unsigned ReducedValueOpcode; 3657 /// Should we model this reduction as a pairwise reduction tree or a tree that 3658 /// splits the vector in halves and adds those halves. 3659 bool IsPairwiseReduction; 3660 3661 public: 3662 /// The width of one full horizontal reduction operation. 3663 unsigned ReduxWidth; 3664 3665 HorizontalReduction() 3666 : ReductionRoot(nullptr), ReductionPHI(nullptr), ReductionOpcode(0), 3667 ReducedValueOpcode(0), IsPairwiseReduction(false), ReduxWidth(0) {} 3668 3669 /// \brief Try to find a reduction tree. 3670 bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B) { 3671 assert((!Phi || 3672 std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && 3673 "Thi phi needs to use the binary operator"); 3674 3675 // We could have a initial reductions that is not an add. 3676 // r *= v1 + v2 + v3 + v4 3677 // In such a case start looking for a tree rooted in the first '+'. 3678 if (Phi) { 3679 if (B->getOperand(0) == Phi) { 3680 Phi = nullptr; 3681 B = dyn_cast<BinaryOperator>(B->getOperand(1)); 3682 } else if (B->getOperand(1) == Phi) { 3683 Phi = nullptr; 3684 B = dyn_cast<BinaryOperator>(B->getOperand(0)); 3685 } 3686 } 3687 3688 if (!B) 3689 return false; 3690 3691 Type *Ty = B->getType(); 3692 if (!isValidElementType(Ty)) 3693 return false; 3694 3695 const DataLayout &DL = B->getModule()->getDataLayout(); 3696 ReductionOpcode = B->getOpcode(); 3697 ReducedValueOpcode = 0; 3698 // FIXME: Register size should be a parameter to this function, so we can 3699 // try different vectorization factors. 3700 ReduxWidth = MinVecRegSize / DL.getTypeSizeInBits(Ty); 3701 ReductionRoot = B; 3702 ReductionPHI = Phi; 3703 3704 if (ReduxWidth < 4) 3705 return false; 3706 3707 // We currently only support adds. 3708 if (ReductionOpcode != Instruction::Add && 3709 ReductionOpcode != Instruction::FAdd) 3710 return false; 3711 3712 // Post order traverse the reduction tree starting at B. We only handle true 3713 // trees containing only binary operators or selects. 3714 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; 3715 Stack.push_back(std::make_pair(B, 0)); 3716 while (!Stack.empty()) { 3717 Instruction *TreeN = Stack.back().first; 3718 unsigned EdgeToVist = Stack.back().second++; 3719 bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode; 3720 3721 // Only handle trees in the current basic block. 3722 if (TreeN->getParent() != B->getParent()) 3723 return false; 3724 3725 // Each tree node needs to have one user except for the ultimate 3726 // reduction. 3727 if (!TreeN->hasOneUse() && TreeN != B) 3728 return false; 3729 3730 // Postorder vist. 3731 if (EdgeToVist == 2 || IsReducedValue) { 3732 if (IsReducedValue) { 3733 // Make sure that the opcodes of the operations that we are going to 3734 // reduce match. 3735 if (!ReducedValueOpcode) 3736 ReducedValueOpcode = TreeN->getOpcode(); 3737 else if (ReducedValueOpcode != TreeN->getOpcode()) 3738 return false; 3739 ReducedVals.push_back(TreeN); 3740 } else { 3741 // We need to be able to reassociate the adds. 3742 if (!TreeN->isAssociative()) 3743 return false; 3744 ReductionOps.push_back(TreeN); 3745 } 3746 // Retract. 3747 Stack.pop_back(); 3748 continue; 3749 } 3750 3751 // Visit left or right. 3752 Value *NextV = TreeN->getOperand(EdgeToVist); 3753 // We currently only allow BinaryOperator's and SelectInst's as reduction 3754 // values in our tree. 3755 if (isa<BinaryOperator>(NextV) || isa<SelectInst>(NextV)) 3756 Stack.push_back(std::make_pair(cast<Instruction>(NextV), 0)); 3757 else if (NextV != Phi) 3758 return false; 3759 } 3760 return true; 3761 } 3762 3763 /// \brief Attempt to vectorize the tree found by 3764 /// matchAssociativeReduction. 3765 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 3766 if (ReducedVals.empty()) 3767 return false; 3768 3769 unsigned NumReducedVals = ReducedVals.size(); 3770 if (NumReducedVals < ReduxWidth) 3771 return false; 3772 3773 Value *VectorizedTree = nullptr; 3774 IRBuilder<> Builder(ReductionRoot); 3775 FastMathFlags Unsafe; 3776 Unsafe.setUnsafeAlgebra(); 3777 Builder.setFastMathFlags(Unsafe); 3778 unsigned i = 0; 3779 3780 for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) { 3781 V.buildTree(makeArrayRef(&ReducedVals[i], ReduxWidth), ReductionOps); 3782 3783 // Estimate cost. 3784 int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]); 3785 if (Cost >= -SLPCostThreshold) 3786 break; 3787 3788 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost 3789 << ". (HorRdx)\n"); 3790 3791 // Vectorize a tree. 3792 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 3793 Value *VectorizedRoot = V.vectorizeTree(); 3794 3795 // Emit a reduction. 3796 Value *ReducedSubTree = emitReduction(VectorizedRoot, Builder); 3797 if (VectorizedTree) { 3798 Builder.SetCurrentDebugLocation(Loc); 3799 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 3800 ReducedSubTree, "bin.rdx"); 3801 } else 3802 VectorizedTree = ReducedSubTree; 3803 } 3804 3805 if (VectorizedTree) { 3806 // Finish the reduction. 3807 for (; i < NumReducedVals; ++i) { 3808 Builder.SetCurrentDebugLocation( 3809 cast<Instruction>(ReducedVals[i])->getDebugLoc()); 3810 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 3811 ReducedVals[i]); 3812 } 3813 // Update users. 3814 if (ReductionPHI) { 3815 assert(ReductionRoot && "Need a reduction operation"); 3816 ReductionRoot->setOperand(0, VectorizedTree); 3817 ReductionRoot->setOperand(1, ReductionPHI); 3818 } else 3819 ReductionRoot->replaceAllUsesWith(VectorizedTree); 3820 } 3821 return VectorizedTree != nullptr; 3822 } 3823 3824 unsigned numReductionValues() const { 3825 return ReducedVals.size(); 3826 } 3827 3828 private: 3829 /// \brief Calculate the cost of a reduction. 3830 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) { 3831 Type *ScalarTy = FirstReducedVal->getType(); 3832 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 3833 3834 int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true); 3835 int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false); 3836 3837 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 3838 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 3839 3840 int ScalarReduxCost = 3841 ReduxWidth * TTI->getArithmeticInstrCost(ReductionOpcode, VecTy); 3842 3843 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 3844 << " for reduction that starts with " << *FirstReducedVal 3845 << " (It is a " 3846 << (IsPairwiseReduction ? "pairwise" : "splitting") 3847 << " reduction)\n"); 3848 3849 return VecReduxCost - ScalarReduxCost; 3850 } 3851 3852 static Value *createBinOp(IRBuilder<> &Builder, unsigned Opcode, Value *L, 3853 Value *R, const Twine &Name = "") { 3854 if (Opcode == Instruction::FAdd) 3855 return Builder.CreateFAdd(L, R, Name); 3856 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, L, R, Name); 3857 } 3858 3859 /// \brief Emit a horizontal reduction of the vectorized value. 3860 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder) { 3861 assert(VectorizedValue && "Need to have a vectorized tree node"); 3862 assert(isPowerOf2_32(ReduxWidth) && 3863 "We only handle power-of-two reductions for now"); 3864 3865 Value *TmpVec = VectorizedValue; 3866 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 3867 if (IsPairwiseReduction) { 3868 Value *LeftMask = 3869 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 3870 Value *RightMask = 3871 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 3872 3873 Value *LeftShuf = Builder.CreateShuffleVector( 3874 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 3875 Value *RightShuf = Builder.CreateShuffleVector( 3876 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 3877 "rdx.shuf.r"); 3878 TmpVec = createBinOp(Builder, ReductionOpcode, LeftShuf, RightShuf, 3879 "bin.rdx"); 3880 } else { 3881 Value *UpperHalf = 3882 createRdxShuffleMask(ReduxWidth, i, false, false, Builder); 3883 Value *Shuf = Builder.CreateShuffleVector( 3884 TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf"); 3885 TmpVec = createBinOp(Builder, ReductionOpcode, TmpVec, Shuf, "bin.rdx"); 3886 } 3887 } 3888 3889 // The result is in the first element of the vector. 3890 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 3891 } 3892 }; 3893 3894 /// \brief Recognize construction of vectors like 3895 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 3896 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 3897 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 3898 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 3899 /// 3900 /// Returns true if it matches 3901 /// 3902 static bool findBuildVector(InsertElementInst *FirstInsertElem, 3903 SmallVectorImpl<Value *> &BuildVector, 3904 SmallVectorImpl<Value *> &BuildVectorOpds) { 3905 if (!isa<UndefValue>(FirstInsertElem->getOperand(0))) 3906 return false; 3907 3908 InsertElementInst *IE = FirstInsertElem; 3909 while (true) { 3910 BuildVector.push_back(IE); 3911 BuildVectorOpds.push_back(IE->getOperand(1)); 3912 3913 if (IE->use_empty()) 3914 return false; 3915 3916 InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back()); 3917 if (!NextUse) 3918 return true; 3919 3920 // If this isn't the final use, make sure the next insertelement is the only 3921 // use. It's OK if the final constructed vector is used multiple times 3922 if (!IE->hasOneUse()) 3923 return false; 3924 3925 IE = NextUse; 3926 } 3927 3928 return false; 3929 } 3930 3931 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 3932 return V->getType() < V2->getType(); 3933 } 3934 3935 /// \brief Try and get a reduction value from a phi node. 3936 /// 3937 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 3938 /// if they come from either \p ParentBB or a containing loop latch. 3939 /// 3940 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 3941 /// if not possible. 3942 static Value *getReductionValue(const DominatorTree *DT, PHINode *P, 3943 BasicBlock *ParentBB, LoopInfo *LI) { 3944 // There are situations where the reduction value is not dominated by the 3945 // reduction phi. Vectorizing such cases has been reported to cause 3946 // miscompiles. See PR25787. 3947 auto DominatedReduxValue = [&](Value *R) { 3948 return ( 3949 dyn_cast<Instruction>(R) && 3950 DT->dominates(P->getParent(), dyn_cast<Instruction>(R)->getParent())); 3951 }; 3952 3953 Value *Rdx = nullptr; 3954 3955 // Return the incoming value if it comes from the same BB as the phi node. 3956 if (P->getIncomingBlock(0) == ParentBB) { 3957 Rdx = P->getIncomingValue(0); 3958 } else if (P->getIncomingBlock(1) == ParentBB) { 3959 Rdx = P->getIncomingValue(1); 3960 } 3961 3962 if (Rdx && DominatedReduxValue(Rdx)) 3963 return Rdx; 3964 3965 // Otherwise, check whether we have a loop latch to look at. 3966 Loop *BBL = LI->getLoopFor(ParentBB); 3967 if (!BBL) 3968 return nullptr; 3969 BasicBlock *BBLatch = BBL->getLoopLatch(); 3970 if (!BBLatch) 3971 return nullptr; 3972 3973 // There is a loop latch, return the incoming value if it comes from 3974 // that. This reduction pattern occassionaly turns up. 3975 if (P->getIncomingBlock(0) == BBLatch) { 3976 Rdx = P->getIncomingValue(0); 3977 } else if (P->getIncomingBlock(1) == BBLatch) { 3978 Rdx = P->getIncomingValue(1); 3979 } 3980 3981 if (Rdx && DominatedReduxValue(Rdx)) 3982 return Rdx; 3983 3984 return nullptr; 3985 } 3986 3987 /// \brief Attempt to reduce a horizontal reduction. 3988 /// If it is legal to match a horizontal reduction feeding 3989 /// the phi node P with reduction operators BI, then check if it 3990 /// can be done. 3991 /// \returns true if a horizontal reduction was matched and reduced. 3992 /// \returns false if a horizontal reduction was not matched. 3993 static bool canMatchHorizontalReduction(PHINode *P, BinaryOperator *BI, 3994 BoUpSLP &R, TargetTransformInfo *TTI) { 3995 if (!ShouldVectorizeHor) 3996 return false; 3997 3998 HorizontalReduction HorRdx; 3999 if (!HorRdx.matchAssociativeReduction(P, BI)) 4000 return false; 4001 4002 // If there is a sufficient number of reduction values, reduce 4003 // to a nearby power-of-2. Can safely generate oversized 4004 // vectors and rely on the backend to split them to legal sizes. 4005 HorRdx.ReduxWidth = 4006 std::max((uint64_t)4, PowerOf2Floor(HorRdx.numReductionValues())); 4007 4008 return HorRdx.tryToReduce(R, TTI); 4009 } 4010 4011 bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 4012 bool Changed = false; 4013 SmallVector<Value *, 4> Incoming; 4014 SmallSet<Value *, 16> VisitedInstrs; 4015 4016 bool HaveVectorizedPhiNodes = true; 4017 while (HaveVectorizedPhiNodes) { 4018 HaveVectorizedPhiNodes = false; 4019 4020 // Collect the incoming values from the PHIs. 4021 Incoming.clear(); 4022 for (Instruction &I : *BB) { 4023 PHINode *P = dyn_cast<PHINode>(&I); 4024 if (!P) 4025 break; 4026 4027 if (!VisitedInstrs.count(P)) 4028 Incoming.push_back(P); 4029 } 4030 4031 // Sort by type. 4032 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); 4033 4034 // Try to vectorize elements base on their type. 4035 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 4036 E = Incoming.end(); 4037 IncIt != E;) { 4038 4039 // Look for the next elements with the same type. 4040 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 4041 while (SameTypeIt != E && 4042 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 4043 VisitedInstrs.insert(*SameTypeIt); 4044 ++SameTypeIt; 4045 } 4046 4047 // Try to vectorize them. 4048 unsigned NumElts = (SameTypeIt - IncIt); 4049 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); 4050 if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R)) { 4051 // Success start over because instructions might have been changed. 4052 HaveVectorizedPhiNodes = true; 4053 Changed = true; 4054 break; 4055 } 4056 4057 // Start over at the next instruction of a different type (or the end). 4058 IncIt = SameTypeIt; 4059 } 4060 } 4061 4062 VisitedInstrs.clear(); 4063 4064 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 4065 // We may go through BB multiple times so skip the one we have checked. 4066 if (!VisitedInstrs.insert(&*it).second) 4067 continue; 4068 4069 if (isa<DbgInfoIntrinsic>(it)) 4070 continue; 4071 4072 // Try to vectorize reductions that use PHINodes. 4073 if (PHINode *P = dyn_cast<PHINode>(it)) { 4074 // Check that the PHI is a reduction PHI. 4075 if (P->getNumIncomingValues() != 2) 4076 return Changed; 4077 4078 Value *Rdx = getReductionValue(DT, P, BB, LI); 4079 4080 // Check if this is a Binary Operator. 4081 BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx); 4082 if (!BI) 4083 continue; 4084 4085 // Try to match and vectorize a horizontal reduction. 4086 if (canMatchHorizontalReduction(P, BI, R, TTI)) { 4087 Changed = true; 4088 it = BB->begin(); 4089 e = BB->end(); 4090 continue; 4091 } 4092 4093 Value *Inst = BI->getOperand(0); 4094 if (Inst == P) 4095 Inst = BI->getOperand(1); 4096 4097 if (tryToVectorize(dyn_cast<BinaryOperator>(Inst), R)) { 4098 // We would like to start over since some instructions are deleted 4099 // and the iterator may become invalid value. 4100 Changed = true; 4101 it = BB->begin(); 4102 e = BB->end(); 4103 continue; 4104 } 4105 4106 continue; 4107 } 4108 4109 if (ShouldStartVectorizeHorAtStore) 4110 if (StoreInst *SI = dyn_cast<StoreInst>(it)) 4111 if (BinaryOperator *BinOp = 4112 dyn_cast<BinaryOperator>(SI->getValueOperand())) { 4113 if (canMatchHorizontalReduction(nullptr, BinOp, R, TTI) || 4114 tryToVectorize(BinOp, R)) { 4115 Changed = true; 4116 it = BB->begin(); 4117 e = BB->end(); 4118 continue; 4119 } 4120 } 4121 4122 // Try to vectorize horizontal reductions feeding into a return. 4123 if (ReturnInst *RI = dyn_cast<ReturnInst>(it)) 4124 if (RI->getNumOperands() != 0) 4125 if (BinaryOperator *BinOp = 4126 dyn_cast<BinaryOperator>(RI->getOperand(0))) { 4127 DEBUG(dbgs() << "SLP: Found a return to vectorize.\n"); 4128 if (tryToVectorizePair(BinOp->getOperand(0), 4129 BinOp->getOperand(1), R)) { 4130 Changed = true; 4131 it = BB->begin(); 4132 e = BB->end(); 4133 continue; 4134 } 4135 } 4136 4137 // Try to vectorize trees that start at compare instructions. 4138 if (CmpInst *CI = dyn_cast<CmpInst>(it)) { 4139 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) { 4140 Changed = true; 4141 // We would like to start over since some instructions are deleted 4142 // and the iterator may become invalid value. 4143 it = BB->begin(); 4144 e = BB->end(); 4145 continue; 4146 } 4147 4148 for (int i = 0; i < 2; ++i) { 4149 if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) { 4150 if (tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R)) { 4151 Changed = true; 4152 // We would like to start over since some instructions are deleted 4153 // and the iterator may become invalid value. 4154 it = BB->begin(); 4155 e = BB->end(); 4156 break; 4157 } 4158 } 4159 } 4160 continue; 4161 } 4162 4163 // Try to vectorize trees that start at insertelement instructions. 4164 if (InsertElementInst *FirstInsertElem = dyn_cast<InsertElementInst>(it)) { 4165 SmallVector<Value *, 16> BuildVector; 4166 SmallVector<Value *, 16> BuildVectorOpds; 4167 if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds)) 4168 continue; 4169 4170 // Vectorize starting with the build vector operands ignoring the 4171 // BuildVector instructions for the purpose of scheduling and user 4172 // extraction. 4173 if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) { 4174 Changed = true; 4175 it = BB->begin(); 4176 e = BB->end(); 4177 } 4178 4179 continue; 4180 } 4181 } 4182 4183 return Changed; 4184 } 4185 4186 bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) { 4187 bool Changed = false; 4188 // Attempt to sort and vectorize each of the store-groups. 4189 for (StoreListMap::iterator it = StoreRefs.begin(), e = StoreRefs.end(); 4190 it != e; ++it) { 4191 if (it->second.size() < 2) 4192 continue; 4193 4194 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 4195 << it->second.size() << ".\n"); 4196 4197 // Process the stores in chunks of 16. 4198 // TODO: The limit of 16 inhibits greater vectorization factors. 4199 // For example, AVX2 supports v32i8. Increasing this limit, however, 4200 // may cause a significant compile-time increase. 4201 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 4202 unsigned Len = std::min<unsigned>(CE - CI, 16); 4203 Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len), 4204 -SLPCostThreshold, R); 4205 } 4206 } 4207 return Changed; 4208 } 4209 4210 } // end anonymous namespace 4211 4212 char SLPVectorizer::ID = 0; 4213 static const char lv_name[] = "SLP Vectorizer"; 4214 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 4215 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 4216 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 4217 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 4218 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 4219 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 4220 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 4221 4222 namespace llvm { 4223 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); } 4224 } 4225