1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 #include "llvm/ADT/MapVector.h" 19 #include "llvm/ADT/Optional.h" 20 #include "llvm/ADT/PostOrderIterator.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/Analysis/AssumptionCache.h" 25 #include "llvm/Analysis/CodeMetrics.h" 26 #include "llvm/Analysis/DemandedBits.h" 27 #include "llvm/Analysis/GlobalsModRef.h" 28 #include "llvm/Analysis/LoopAccessAnalysis.h" 29 #include "llvm/Analysis/LoopAccessAnalysis.h" 30 #include "llvm/Analysis/LoopInfo.h" 31 #include "llvm/Analysis/ScalarEvolution.h" 32 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 33 #include "llvm/Analysis/TargetTransformInfo.h" 34 #include "llvm/Analysis/ValueTracking.h" 35 #include "llvm/Analysis/VectorUtils.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/Dominators.h" 38 #include "llvm/IR/IRBuilder.h" 39 #include "llvm/IR/Instructions.h" 40 #include "llvm/IR/IntrinsicInst.h" 41 #include "llvm/IR/Module.h" 42 #include "llvm/IR/NoFolder.h" 43 #include "llvm/IR/Type.h" 44 #include "llvm/IR/Value.h" 45 #include "llvm/IR/Verifier.h" 46 #include "llvm/Pass.h" 47 #include "llvm/Support/CommandLine.h" 48 #include "llvm/Support/Debug.h" 49 #include "llvm/Support/raw_ostream.h" 50 #include "llvm/Transforms/Vectorize.h" 51 #include <algorithm> 52 #include <memory> 53 54 using namespace llvm; 55 56 #define SV_NAME "slp-vectorizer" 57 #define DEBUG_TYPE "SLP" 58 59 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 60 61 static cl::opt<int> 62 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 63 cl::desc("Only vectorize if you gain more than this " 64 "number ")); 65 66 static cl::opt<bool> 67 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 68 cl::desc("Attempt to vectorize horizontal reductions")); 69 70 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 71 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 72 cl::desc( 73 "Attempt to vectorize horizontal reductions feeding into a store")); 74 75 static cl::opt<int> 76 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 77 cl::desc("Attempt to vectorize for this register size in bits")); 78 79 /// Limits the size of scheduling regions in a block. 80 /// It avoid long compile times for _very_ large blocks where vector 81 /// instructions are spread over a wide range. 82 /// This limit is way higher than needed by real-world functions. 83 static cl::opt<int> 84 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 85 cl::desc("Limit the size of the SLP scheduling region per block")); 86 87 static cl::opt<int> MinVectorRegSizeOption( 88 "slp-min-reg-size", cl::init(128), cl::Hidden, 89 cl::desc("Attempt to vectorize for this register size in bits")); 90 91 namespace { 92 93 // FIXME: Set this via cl::opt to allow overriding. 94 static const unsigned RecursionMaxDepth = 12; 95 96 // Limit the number of alias checks. The limit is chosen so that 97 // it has no negative effect on the llvm benchmarks. 98 static const unsigned AliasedCheckLimit = 10; 99 100 // Another limit for the alias checks: The maximum distance between load/store 101 // instructions where alias checks are done. 102 // This limit is useful for very large basic blocks. 103 static const unsigned MaxMemDepDistance = 160; 104 105 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 106 /// regions to be handled. 107 static const int MinScheduleRegionSize = 16; 108 109 /// \brief Predicate for the element types that the SLP vectorizer supports. 110 /// 111 /// The most important thing to filter here are types which are invalid in LLVM 112 /// vectors. We also filter target specific types which have absolutely no 113 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 114 /// avoids spending time checking the cost model and realizing that they will 115 /// be inevitably scalarized. 116 static bool isValidElementType(Type *Ty) { 117 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 118 !Ty->isPPC_FP128Ty(); 119 } 120 121 /// \returns the parent basic block if all of the instructions in \p VL 122 /// are in the same block or null otherwise. 123 static BasicBlock *getSameBlock(ArrayRef<Value *> VL) { 124 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 125 if (!I0) 126 return nullptr; 127 BasicBlock *BB = I0->getParent(); 128 for (int i = 1, e = VL.size(); i < e; i++) { 129 Instruction *I = dyn_cast<Instruction>(VL[i]); 130 if (!I) 131 return nullptr; 132 133 if (BB != I->getParent()) 134 return nullptr; 135 } 136 return BB; 137 } 138 139 /// \returns True if all of the values in \p VL are constants. 140 static bool allConstant(ArrayRef<Value *> VL) { 141 for (unsigned i = 0, e = VL.size(); i < e; ++i) 142 if (!isa<Constant>(VL[i])) 143 return false; 144 return true; 145 } 146 147 /// \returns True if all of the values in \p VL are identical. 148 static bool isSplat(ArrayRef<Value *> VL) { 149 for (unsigned i = 1, e = VL.size(); i < e; ++i) 150 if (VL[i] != VL[0]) 151 return false; 152 return true; 153 } 154 155 ///\returns Opcode that can be clubbed with \p Op to create an alternate 156 /// sequence which can later be merged as a ShuffleVector instruction. 157 static unsigned getAltOpcode(unsigned Op) { 158 switch (Op) { 159 case Instruction::FAdd: 160 return Instruction::FSub; 161 case Instruction::FSub: 162 return Instruction::FAdd; 163 case Instruction::Add: 164 return Instruction::Sub; 165 case Instruction::Sub: 166 return Instruction::Add; 167 default: 168 return 0; 169 } 170 } 171 172 ///\returns bool representing if Opcode \p Op can be part 173 /// of an alternate sequence which can later be merged as 174 /// a ShuffleVector instruction. 175 static bool canCombineAsAltInst(unsigned Op) { 176 return Op == Instruction::FAdd || Op == Instruction::FSub || 177 Op == Instruction::Sub || Op == Instruction::Add; 178 } 179 180 /// \returns ShuffleVector instruction if instructions in \p VL have 181 /// alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence. 182 /// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...) 183 static unsigned isAltInst(ArrayRef<Value *> VL) { 184 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 185 unsigned Opcode = I0->getOpcode(); 186 unsigned AltOpcode = getAltOpcode(Opcode); 187 for (int i = 1, e = VL.size(); i < e; i++) { 188 Instruction *I = dyn_cast<Instruction>(VL[i]); 189 if (!I || I->getOpcode() != ((i & 1) ? AltOpcode : Opcode)) 190 return 0; 191 } 192 return Instruction::ShuffleVector; 193 } 194 195 /// \returns The opcode if all of the Instructions in \p VL have the same 196 /// opcode, or zero. 197 static unsigned getSameOpcode(ArrayRef<Value *> VL) { 198 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 199 if (!I0) 200 return 0; 201 unsigned Opcode = I0->getOpcode(); 202 for (int i = 1, e = VL.size(); i < e; i++) { 203 Instruction *I = dyn_cast<Instruction>(VL[i]); 204 if (!I || Opcode != I->getOpcode()) { 205 if (canCombineAsAltInst(Opcode) && i == 1) 206 return isAltInst(VL); 207 return 0; 208 } 209 } 210 return Opcode; 211 } 212 213 /// Get the intersection (logical and) of all of the potential IR flags 214 /// of each scalar operation (VL) that will be converted into a vector (I). 215 /// Flag set: NSW, NUW, exact, and all of fast-math. 216 static void propagateIRFlags(Value *I, ArrayRef<Value *> VL) { 217 if (auto *VecOp = dyn_cast<BinaryOperator>(I)) { 218 if (auto *Intersection = dyn_cast<BinaryOperator>(VL[0])) { 219 // Intersection is initialized to the 0th scalar, 220 // so start counting from index '1'. 221 for (int i = 1, e = VL.size(); i < e; ++i) { 222 if (auto *Scalar = dyn_cast<BinaryOperator>(VL[i])) 223 Intersection->andIRFlags(Scalar); 224 } 225 VecOp->copyIRFlags(Intersection); 226 } 227 } 228 } 229 230 /// \returns \p I after propagating metadata from \p VL. 231 static Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL) { 232 Instruction *I0 = cast<Instruction>(VL[0]); 233 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 234 I0->getAllMetadataOtherThanDebugLoc(Metadata); 235 236 for (unsigned i = 0, n = Metadata.size(); i != n; ++i) { 237 unsigned Kind = Metadata[i].first; 238 MDNode *MD = Metadata[i].second; 239 240 for (int i = 1, e = VL.size(); MD && i != e; i++) { 241 Instruction *I = cast<Instruction>(VL[i]); 242 MDNode *IMD = I->getMetadata(Kind); 243 244 switch (Kind) { 245 default: 246 MD = nullptr; // Remove unknown metadata 247 break; 248 case LLVMContext::MD_tbaa: 249 MD = MDNode::getMostGenericTBAA(MD, IMD); 250 break; 251 case LLVMContext::MD_alias_scope: 252 MD = MDNode::getMostGenericAliasScope(MD, IMD); 253 break; 254 case LLVMContext::MD_noalias: 255 MD = MDNode::intersect(MD, IMD); 256 break; 257 case LLVMContext::MD_fpmath: 258 MD = MDNode::getMostGenericFPMath(MD, IMD); 259 break; 260 case LLVMContext::MD_nontemporal: 261 MD = MDNode::intersect(MD, IMD); 262 break; 263 } 264 } 265 I->setMetadata(Kind, MD); 266 } 267 return I; 268 } 269 270 /// \returns The type that all of the values in \p VL have or null if there 271 /// are different types. 272 static Type* getSameType(ArrayRef<Value *> VL) { 273 Type *Ty = VL[0]->getType(); 274 for (int i = 1, e = VL.size(); i < e; i++) 275 if (VL[i]->getType() != Ty) 276 return nullptr; 277 278 return Ty; 279 } 280 281 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 282 static bool matchExtractIndex(Instruction *E, unsigned Idx, unsigned Opcode) { 283 assert(Opcode == Instruction::ExtractElement || 284 Opcode == Instruction::ExtractValue); 285 if (Opcode == Instruction::ExtractElement) { 286 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 287 return CI && CI->getZExtValue() == Idx; 288 } else { 289 ExtractValueInst *EI = cast<ExtractValueInst>(E); 290 return EI->getNumIndices() == 1 && *EI->idx_begin() == Idx; 291 } 292 } 293 294 /// \returns True if in-tree use also needs extract. This refers to 295 /// possible scalar operand in vectorized instruction. 296 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 297 TargetLibraryInfo *TLI) { 298 299 unsigned Opcode = UserInst->getOpcode(); 300 switch (Opcode) { 301 case Instruction::Load: { 302 LoadInst *LI = cast<LoadInst>(UserInst); 303 return (LI->getPointerOperand() == Scalar); 304 } 305 case Instruction::Store: { 306 StoreInst *SI = cast<StoreInst>(UserInst); 307 return (SI->getPointerOperand() == Scalar); 308 } 309 case Instruction::Call: { 310 CallInst *CI = cast<CallInst>(UserInst); 311 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 312 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 313 return (CI->getArgOperand(1) == Scalar); 314 } 315 } 316 default: 317 return false; 318 } 319 } 320 321 /// \returns the AA location that is being access by the instruction. 322 static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) { 323 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 324 return MemoryLocation::get(SI); 325 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 326 return MemoryLocation::get(LI); 327 return MemoryLocation(); 328 } 329 330 /// \returns True if the instruction is not a volatile or atomic load/store. 331 static bool isSimple(Instruction *I) { 332 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 333 return LI->isSimple(); 334 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 335 return SI->isSimple(); 336 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 337 return !MI->isVolatile(); 338 return true; 339 } 340 341 /// Bottom Up SLP Vectorizer. 342 class BoUpSLP { 343 public: 344 typedef SmallVector<Value *, 8> ValueList; 345 typedef SmallVector<Instruction *, 16> InstrList; 346 typedef SmallPtrSet<Value *, 16> ValueSet; 347 typedef SmallVector<StoreInst *, 8> StoreList; 348 349 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 350 TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li, 351 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 352 const DataLayout *DL) 353 : NumLoadsWantToKeepOrder(0), NumLoadsWantToChangeOrder(0), F(Func), 354 SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), DB(DB), 355 DL(DL), Builder(Se->getContext()) { 356 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 357 // Use the vector register size specified by the target unless overridden 358 // by a command-line option. 359 // TODO: It would be better to limit the vectorization factor based on 360 // data type rather than just register size. For example, x86 AVX has 361 // 256-bit registers, but it does not support integer operations 362 // at that width (that requires AVX2). 363 if (MaxVectorRegSizeOption.getNumOccurrences()) 364 MaxVecRegSize = MaxVectorRegSizeOption; 365 else 366 MaxVecRegSize = TTI->getRegisterBitWidth(true); 367 368 MinVecRegSize = MinVectorRegSizeOption; 369 } 370 371 /// \brief Vectorize the tree that starts with the elements in \p VL. 372 /// Returns the vectorized root. 373 Value *vectorizeTree(); 374 375 /// \returns the cost incurred by unwanted spills and fills, caused by 376 /// holding live values over call sites. 377 int getSpillCost(); 378 379 /// \returns the vectorization cost of the subtree that starts at \p VL. 380 /// A negative number means that this is profitable. 381 int getTreeCost(); 382 383 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 384 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 385 void buildTree(ArrayRef<Value *> Roots, 386 ArrayRef<Value *> UserIgnoreLst = None); 387 388 /// Clear the internal data structures that are created by 'buildTree'. 389 void deleteTree() { 390 VectorizableTree.clear(); 391 ScalarToTreeEntry.clear(); 392 MustGather.clear(); 393 ExternalUses.clear(); 394 NumLoadsWantToKeepOrder = 0; 395 NumLoadsWantToChangeOrder = 0; 396 for (auto &Iter : BlocksSchedules) { 397 BlockScheduling *BS = Iter.second.get(); 398 BS->clear(); 399 } 400 MinBWs.clear(); 401 } 402 403 /// \brief Perform LICM and CSE on the newly generated gather sequences. 404 void optimizeGatherSequence(); 405 406 /// \returns true if it is beneficial to reverse the vector order. 407 bool shouldReorder() const { 408 return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder; 409 } 410 411 /// \return The vector element size in bits to use when vectorizing the 412 /// expression tree ending at \p V. If V is a store, the size is the width of 413 /// the stored value. Otherwise, the size is the width of the largest loaded 414 /// value reaching V. This method is used by the vectorizer to calculate 415 /// vectorization factors. 416 unsigned getVectorElementSize(Value *V); 417 418 /// Compute the minimum type sizes required to represent the entries in a 419 /// vectorizable tree. 420 void computeMinimumValueSizes(); 421 422 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 423 unsigned getMaxVecRegSize() const { 424 return MaxVecRegSize; 425 } 426 427 // \returns minimum vector register size as set by cl::opt. 428 unsigned getMinVecRegSize() const { 429 return MinVecRegSize; 430 } 431 432 /// \brief Check if ArrayType or StructType is isomorphic to some VectorType. 433 /// 434 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 435 unsigned canMapToVector(Type *T, const DataLayout &DL) const; 436 437 private: 438 struct TreeEntry; 439 440 /// \returns the cost of the vectorizable entry. 441 int getEntryCost(TreeEntry *E); 442 443 /// This is the recursive part of buildTree. 444 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth); 445 446 /// \returns True if the ExtractElement/ExtractValue instructions in VL can 447 /// be vectorized to use the original vector (or aggregate "bitcast" to a vector). 448 bool canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const; 449 450 /// Vectorize a single entry in the tree. 451 Value *vectorizeTree(TreeEntry *E); 452 453 /// Vectorize a single entry in the tree, starting in \p VL. 454 Value *vectorizeTree(ArrayRef<Value *> VL); 455 456 /// \returns the pointer to the vectorized value if \p VL is already 457 /// vectorized, or NULL. They may happen in cycles. 458 Value *alreadyVectorized(ArrayRef<Value *> VL) const; 459 460 /// \returns the scalarization cost for this type. Scalarization in this 461 /// context means the creation of vectors from a group of scalars. 462 int getGatherCost(Type *Ty); 463 464 /// \returns the scalarization cost for this list of values. Assuming that 465 /// this subtree gets vectorized, we may need to extract the values from the 466 /// roots. This method calculates the cost of extracting the values. 467 int getGatherCost(ArrayRef<Value *> VL); 468 469 /// \brief Set the Builder insert point to one after the last instruction in 470 /// the bundle 471 void setInsertPointAfterBundle(ArrayRef<Value *> VL); 472 473 /// \returns a vector from a collection of scalars in \p VL. 474 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 475 476 /// \returns whether the VectorizableTree is fully vectorizable and will 477 /// be beneficial even the tree height is tiny. 478 bool isFullyVectorizableTinyTree(); 479 480 /// \reorder commutative operands in alt shuffle if they result in 481 /// vectorized code. 482 void reorderAltShuffleOperands(ArrayRef<Value *> VL, 483 SmallVectorImpl<Value *> &Left, 484 SmallVectorImpl<Value *> &Right); 485 /// \reorder commutative operands to get better probability of 486 /// generating vectorized code. 487 void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 488 SmallVectorImpl<Value *> &Left, 489 SmallVectorImpl<Value *> &Right); 490 struct TreeEntry { 491 TreeEntry() : Scalars(), VectorizedValue(nullptr), 492 NeedToGather(0) {} 493 494 /// \returns true if the scalars in VL are equal to this entry. 495 bool isSame(ArrayRef<Value *> VL) const { 496 assert(VL.size() == Scalars.size() && "Invalid size"); 497 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 498 } 499 500 /// A vector of scalars. 501 ValueList Scalars; 502 503 /// The Scalars are vectorized into this value. It is initialized to Null. 504 Value *VectorizedValue; 505 506 /// Do we need to gather this sequence ? 507 bool NeedToGather; 508 }; 509 510 /// Create a new VectorizableTree entry. 511 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized) { 512 VectorizableTree.emplace_back(); 513 int idx = VectorizableTree.size() - 1; 514 TreeEntry *Last = &VectorizableTree[idx]; 515 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 516 Last->NeedToGather = !Vectorized; 517 if (Vectorized) { 518 for (int i = 0, e = VL.size(); i != e; ++i) { 519 assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!"); 520 ScalarToTreeEntry[VL[i]] = idx; 521 } 522 } else { 523 MustGather.insert(VL.begin(), VL.end()); 524 } 525 return Last; 526 } 527 528 /// -- Vectorization State -- 529 /// Holds all of the tree entries. 530 std::vector<TreeEntry> VectorizableTree; 531 532 /// Maps a specific scalar to its tree entry. 533 SmallDenseMap<Value*, int> ScalarToTreeEntry; 534 535 /// A list of scalars that we found that we need to keep as scalars. 536 ValueSet MustGather; 537 538 /// This POD struct describes one external user in the vectorized tree. 539 struct ExternalUser { 540 ExternalUser (Value *S, llvm::User *U, int L) : 541 Scalar(S), User(U), Lane(L){} 542 // Which scalar in our function. 543 Value *Scalar; 544 // Which user that uses the scalar. 545 llvm::User *User; 546 // Which lane does the scalar belong to. 547 int Lane; 548 }; 549 typedef SmallVector<ExternalUser, 16> UserList; 550 551 /// Checks if two instructions may access the same memory. 552 /// 553 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 554 /// is invariant in the calling loop. 555 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 556 Instruction *Inst2) { 557 558 // First check if the result is already in the cache. 559 AliasCacheKey key = std::make_pair(Inst1, Inst2); 560 Optional<bool> &result = AliasCache[key]; 561 if (result.hasValue()) { 562 return result.getValue(); 563 } 564 MemoryLocation Loc2 = getLocation(Inst2, AA); 565 bool aliased = true; 566 if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) { 567 // Do the alias check. 568 aliased = AA->alias(Loc1, Loc2); 569 } 570 // Store the result in the cache. 571 result = aliased; 572 return aliased; 573 } 574 575 typedef std::pair<Instruction *, Instruction *> AliasCacheKey; 576 577 /// Cache for alias results. 578 /// TODO: consider moving this to the AliasAnalysis itself. 579 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 580 581 /// Removes an instruction from its block and eventually deletes it. 582 /// It's like Instruction::eraseFromParent() except that the actual deletion 583 /// is delayed until BoUpSLP is destructed. 584 /// This is required to ensure that there are no incorrect collisions in the 585 /// AliasCache, which can happen if a new instruction is allocated at the 586 /// same address as a previously deleted instruction. 587 void eraseInstruction(Instruction *I) { 588 I->removeFromParent(); 589 I->dropAllReferences(); 590 DeletedInstructions.push_back(std::unique_ptr<Instruction>(I)); 591 } 592 593 /// Temporary store for deleted instructions. Instructions will be deleted 594 /// eventually when the BoUpSLP is destructed. 595 SmallVector<std::unique_ptr<Instruction>, 8> DeletedInstructions; 596 597 /// A list of values that need to extracted out of the tree. 598 /// This list holds pairs of (Internal Scalar : External User). 599 UserList ExternalUses; 600 601 /// Values used only by @llvm.assume calls. 602 SmallPtrSet<const Value *, 32> EphValues; 603 604 /// Holds all of the instructions that we gathered. 605 SetVector<Instruction *> GatherSeq; 606 /// A list of blocks that we are going to CSE. 607 SetVector<BasicBlock *> CSEBlocks; 608 609 /// Contains all scheduling relevant data for an instruction. 610 /// A ScheduleData either represents a single instruction or a member of an 611 /// instruction bundle (= a group of instructions which is combined into a 612 /// vector instruction). 613 struct ScheduleData { 614 615 // The initial value for the dependency counters. It means that the 616 // dependencies are not calculated yet. 617 enum { InvalidDeps = -1 }; 618 619 ScheduleData() 620 : Inst(nullptr), FirstInBundle(nullptr), NextInBundle(nullptr), 621 NextLoadStore(nullptr), SchedulingRegionID(0), SchedulingPriority(0), 622 Dependencies(InvalidDeps), UnscheduledDeps(InvalidDeps), 623 UnscheduledDepsInBundle(InvalidDeps), IsScheduled(false) {} 624 625 void init(int BlockSchedulingRegionID) { 626 FirstInBundle = this; 627 NextInBundle = nullptr; 628 NextLoadStore = nullptr; 629 IsScheduled = false; 630 SchedulingRegionID = BlockSchedulingRegionID; 631 UnscheduledDepsInBundle = UnscheduledDeps; 632 clearDependencies(); 633 } 634 635 /// Returns true if the dependency information has been calculated. 636 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 637 638 /// Returns true for single instructions and for bundle representatives 639 /// (= the head of a bundle). 640 bool isSchedulingEntity() const { return FirstInBundle == this; } 641 642 /// Returns true if it represents an instruction bundle and not only a 643 /// single instruction. 644 bool isPartOfBundle() const { 645 return NextInBundle != nullptr || FirstInBundle != this; 646 } 647 648 /// Returns true if it is ready for scheduling, i.e. it has no more 649 /// unscheduled depending instructions/bundles. 650 bool isReady() const { 651 assert(isSchedulingEntity() && 652 "can't consider non-scheduling entity for ready list"); 653 return UnscheduledDepsInBundle == 0 && !IsScheduled; 654 } 655 656 /// Modifies the number of unscheduled dependencies, also updating it for 657 /// the whole bundle. 658 int incrementUnscheduledDeps(int Incr) { 659 UnscheduledDeps += Incr; 660 return FirstInBundle->UnscheduledDepsInBundle += Incr; 661 } 662 663 /// Sets the number of unscheduled dependencies to the number of 664 /// dependencies. 665 void resetUnscheduledDeps() { 666 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 667 } 668 669 /// Clears all dependency information. 670 void clearDependencies() { 671 Dependencies = InvalidDeps; 672 resetUnscheduledDeps(); 673 MemoryDependencies.clear(); 674 } 675 676 void dump(raw_ostream &os) const { 677 if (!isSchedulingEntity()) { 678 os << "/ " << *Inst; 679 } else if (NextInBundle) { 680 os << '[' << *Inst; 681 ScheduleData *SD = NextInBundle; 682 while (SD) { 683 os << ';' << *SD->Inst; 684 SD = SD->NextInBundle; 685 } 686 os << ']'; 687 } else { 688 os << *Inst; 689 } 690 } 691 692 Instruction *Inst; 693 694 /// Points to the head in an instruction bundle (and always to this for 695 /// single instructions). 696 ScheduleData *FirstInBundle; 697 698 /// Single linked list of all instructions in a bundle. Null if it is a 699 /// single instruction. 700 ScheduleData *NextInBundle; 701 702 /// Single linked list of all memory instructions (e.g. load, store, call) 703 /// in the block - until the end of the scheduling region. 704 ScheduleData *NextLoadStore; 705 706 /// The dependent memory instructions. 707 /// This list is derived on demand in calculateDependencies(). 708 SmallVector<ScheduleData *, 4> MemoryDependencies; 709 710 /// This ScheduleData is in the current scheduling region if this matches 711 /// the current SchedulingRegionID of BlockScheduling. 712 int SchedulingRegionID; 713 714 /// Used for getting a "good" final ordering of instructions. 715 int SchedulingPriority; 716 717 /// The number of dependencies. Constitutes of the number of users of the 718 /// instruction plus the number of dependent memory instructions (if any). 719 /// This value is calculated on demand. 720 /// If InvalidDeps, the number of dependencies is not calculated yet. 721 /// 722 int Dependencies; 723 724 /// The number of dependencies minus the number of dependencies of scheduled 725 /// instructions. As soon as this is zero, the instruction/bundle gets ready 726 /// for scheduling. 727 /// Note that this is negative as long as Dependencies is not calculated. 728 int UnscheduledDeps; 729 730 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 731 /// single instructions. 732 int UnscheduledDepsInBundle; 733 734 /// True if this instruction is scheduled (or considered as scheduled in the 735 /// dry-run). 736 bool IsScheduled; 737 }; 738 739 #ifndef NDEBUG 740 friend raw_ostream &operator<<(raw_ostream &os, 741 const BoUpSLP::ScheduleData &SD); 742 #endif 743 744 /// Contains all scheduling data for a basic block. 745 /// 746 struct BlockScheduling { 747 748 BlockScheduling(BasicBlock *BB) 749 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize), 750 ScheduleStart(nullptr), ScheduleEnd(nullptr), 751 FirstLoadStoreInRegion(nullptr), LastLoadStoreInRegion(nullptr), 752 ScheduleRegionSize(0), 753 ScheduleRegionSizeLimit(ScheduleRegionSizeBudget), 754 // Make sure that the initial SchedulingRegionID is greater than the 755 // initial SchedulingRegionID in ScheduleData (which is 0). 756 SchedulingRegionID(1) {} 757 758 void clear() { 759 ReadyInsts.clear(); 760 ScheduleStart = nullptr; 761 ScheduleEnd = nullptr; 762 FirstLoadStoreInRegion = nullptr; 763 LastLoadStoreInRegion = nullptr; 764 765 // Reduce the maximum schedule region size by the size of the 766 // previous scheduling run. 767 ScheduleRegionSizeLimit -= ScheduleRegionSize; 768 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 769 ScheduleRegionSizeLimit = MinScheduleRegionSize; 770 ScheduleRegionSize = 0; 771 772 // Make a new scheduling region, i.e. all existing ScheduleData is not 773 // in the new region yet. 774 ++SchedulingRegionID; 775 } 776 777 ScheduleData *getScheduleData(Value *V) { 778 ScheduleData *SD = ScheduleDataMap[V]; 779 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 780 return SD; 781 return nullptr; 782 } 783 784 bool isInSchedulingRegion(ScheduleData *SD) { 785 return SD->SchedulingRegionID == SchedulingRegionID; 786 } 787 788 /// Marks an instruction as scheduled and puts all dependent ready 789 /// instructions into the ready-list. 790 template <typename ReadyListType> 791 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 792 SD->IsScheduled = true; 793 DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 794 795 ScheduleData *BundleMember = SD; 796 while (BundleMember) { 797 // Handle the def-use chain dependencies. 798 for (Use &U : BundleMember->Inst->operands()) { 799 ScheduleData *OpDef = getScheduleData(U.get()); 800 if (OpDef && OpDef->hasValidDependencies() && 801 OpDef->incrementUnscheduledDeps(-1) == 0) { 802 // There are no more unscheduled dependencies after decrementing, 803 // so we can put the dependent instruction into the ready list. 804 ScheduleData *DepBundle = OpDef->FirstInBundle; 805 assert(!DepBundle->IsScheduled && 806 "already scheduled bundle gets ready"); 807 ReadyList.insert(DepBundle); 808 DEBUG(dbgs() << "SLP: gets ready (def): " << *DepBundle << "\n"); 809 } 810 } 811 // Handle the memory dependencies. 812 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 813 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 814 // There are no more unscheduled dependencies after decrementing, 815 // so we can put the dependent instruction into the ready list. 816 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 817 assert(!DepBundle->IsScheduled && 818 "already scheduled bundle gets ready"); 819 ReadyList.insert(DepBundle); 820 DEBUG(dbgs() << "SLP: gets ready (mem): " << *DepBundle << "\n"); 821 } 822 } 823 BundleMember = BundleMember->NextInBundle; 824 } 825 } 826 827 /// Put all instructions into the ReadyList which are ready for scheduling. 828 template <typename ReadyListType> 829 void initialFillReadyList(ReadyListType &ReadyList) { 830 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 831 ScheduleData *SD = getScheduleData(I); 832 if (SD->isSchedulingEntity() && SD->isReady()) { 833 ReadyList.insert(SD); 834 DEBUG(dbgs() << "SLP: initially in ready list: " << *I << "\n"); 835 } 836 } 837 } 838 839 /// Checks if a bundle of instructions can be scheduled, i.e. has no 840 /// cyclic dependencies. This is only a dry-run, no instructions are 841 /// actually moved at this stage. 842 bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP); 843 844 /// Un-bundles a group of instructions. 845 void cancelScheduling(ArrayRef<Value *> VL); 846 847 /// Extends the scheduling region so that V is inside the region. 848 /// \returns true if the region size is within the limit. 849 bool extendSchedulingRegion(Value *V); 850 851 /// Initialize the ScheduleData structures for new instructions in the 852 /// scheduling region. 853 void initScheduleData(Instruction *FromI, Instruction *ToI, 854 ScheduleData *PrevLoadStore, 855 ScheduleData *NextLoadStore); 856 857 /// Updates the dependency information of a bundle and of all instructions/ 858 /// bundles which depend on the original bundle. 859 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 860 BoUpSLP *SLP); 861 862 /// Sets all instruction in the scheduling region to un-scheduled. 863 void resetSchedule(); 864 865 BasicBlock *BB; 866 867 /// Simple memory allocation for ScheduleData. 868 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 869 870 /// The size of a ScheduleData array in ScheduleDataChunks. 871 int ChunkSize; 872 873 /// The allocator position in the current chunk, which is the last entry 874 /// of ScheduleDataChunks. 875 int ChunkPos; 876 877 /// Attaches ScheduleData to Instruction. 878 /// Note that the mapping survives during all vectorization iterations, i.e. 879 /// ScheduleData structures are recycled. 880 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 881 882 struct ReadyList : SmallVector<ScheduleData *, 8> { 883 void insert(ScheduleData *SD) { push_back(SD); } 884 }; 885 886 /// The ready-list for scheduling (only used for the dry-run). 887 ReadyList ReadyInsts; 888 889 /// The first instruction of the scheduling region. 890 Instruction *ScheduleStart; 891 892 /// The first instruction _after_ the scheduling region. 893 Instruction *ScheduleEnd; 894 895 /// The first memory accessing instruction in the scheduling region 896 /// (can be null). 897 ScheduleData *FirstLoadStoreInRegion; 898 899 /// The last memory accessing instruction in the scheduling region 900 /// (can be null). 901 ScheduleData *LastLoadStoreInRegion; 902 903 /// The current size of the scheduling region. 904 int ScheduleRegionSize; 905 906 /// The maximum size allowed for the scheduling region. 907 int ScheduleRegionSizeLimit; 908 909 /// The ID of the scheduling region. For a new vectorization iteration this 910 /// is incremented which "removes" all ScheduleData from the region. 911 int SchedulingRegionID; 912 }; 913 914 /// Attaches the BlockScheduling structures to basic blocks. 915 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 916 917 /// Performs the "real" scheduling. Done before vectorization is actually 918 /// performed in a basic block. 919 void scheduleBlock(BlockScheduling *BS); 920 921 /// List of users to ignore during scheduling and that don't need extracting. 922 ArrayRef<Value *> UserIgnoreList; 923 924 // Number of load-bundles, which contain consecutive loads. 925 int NumLoadsWantToKeepOrder; 926 927 // Number of load-bundles of size 2, which are consecutive loads if reversed. 928 int NumLoadsWantToChangeOrder; 929 930 // Analysis and block reference. 931 Function *F; 932 ScalarEvolution *SE; 933 TargetTransformInfo *TTI; 934 TargetLibraryInfo *TLI; 935 AliasAnalysis *AA; 936 LoopInfo *LI; 937 DominatorTree *DT; 938 AssumptionCache *AC; 939 DemandedBits *DB; 940 const DataLayout *DL; 941 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 942 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 943 /// Instruction builder to construct the vectorized tree. 944 IRBuilder<> Builder; 945 946 /// A map of scalar integer values to the smallest bit width with which they 947 /// can legally be represented. 948 MapVector<Value *, uint64_t> MinBWs; 949 }; 950 951 #ifndef NDEBUG 952 raw_ostream &operator<<(raw_ostream &os, const BoUpSLP::ScheduleData &SD) { 953 SD.dump(os); 954 return os; 955 } 956 #endif 957 958 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 959 ArrayRef<Value *> UserIgnoreLst) { 960 deleteTree(); 961 UserIgnoreList = UserIgnoreLst; 962 if (!getSameType(Roots)) 963 return; 964 buildTree_rec(Roots, 0); 965 966 // Collect the values that we need to extract from the tree. 967 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 968 TreeEntry *Entry = &VectorizableTree[EIdx]; 969 970 // For each lane: 971 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 972 Value *Scalar = Entry->Scalars[Lane]; 973 974 // No need to handle users of gathered values. 975 if (Entry->NeedToGather) 976 continue; 977 978 for (User *U : Scalar->users()) { 979 DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 980 981 Instruction *UserInst = dyn_cast<Instruction>(U); 982 if (!UserInst) 983 continue; 984 985 // Skip in-tree scalars that become vectors 986 if (ScalarToTreeEntry.count(U)) { 987 int Idx = ScalarToTreeEntry[U]; 988 TreeEntry *UseEntry = &VectorizableTree[Idx]; 989 Value *UseScalar = UseEntry->Scalars[0]; 990 // Some in-tree scalars will remain as scalar in vectorized 991 // instructions. If that is the case, the one in Lane 0 will 992 // be used. 993 if (UseScalar != U || 994 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 995 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 996 << ".\n"); 997 assert(!VectorizableTree[Idx].NeedToGather && "Bad state"); 998 continue; 999 } 1000 } 1001 1002 // Ignore users in the user ignore list. 1003 if (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), UserInst) != 1004 UserIgnoreList.end()) 1005 continue; 1006 1007 DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " << 1008 Lane << " from " << *Scalar << ".\n"); 1009 ExternalUses.push_back(ExternalUser(Scalar, U, Lane)); 1010 } 1011 } 1012 } 1013 } 1014 1015 1016 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) { 1017 bool SameTy = getSameType(VL); (void)SameTy; 1018 bool isAltShuffle = false; 1019 assert(SameTy && "Invalid types!"); 1020 1021 if (Depth == RecursionMaxDepth) { 1022 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 1023 newTreeEntry(VL, false); 1024 return; 1025 } 1026 1027 // Don't handle vectors. 1028 if (VL[0]->getType()->isVectorTy()) { 1029 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 1030 newTreeEntry(VL, false); 1031 return; 1032 } 1033 1034 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1035 if (SI->getValueOperand()->getType()->isVectorTy()) { 1036 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 1037 newTreeEntry(VL, false); 1038 return; 1039 } 1040 unsigned Opcode = getSameOpcode(VL); 1041 1042 // Check that this shuffle vector refers to the alternate 1043 // sequence of opcodes. 1044 if (Opcode == Instruction::ShuffleVector) { 1045 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 1046 unsigned Op = I0->getOpcode(); 1047 if (Op != Instruction::ShuffleVector) 1048 isAltShuffle = true; 1049 } 1050 1051 // If all of the operands are identical or constant we have a simple solution. 1052 if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || !Opcode) { 1053 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 1054 newTreeEntry(VL, false); 1055 return; 1056 } 1057 1058 // We now know that this is a vector of instructions of the same type from 1059 // the same block. 1060 1061 // Don't vectorize ephemeral values. 1062 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1063 if (EphValues.count(VL[i])) { 1064 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1065 ") is ephemeral.\n"); 1066 newTreeEntry(VL, false); 1067 return; 1068 } 1069 } 1070 1071 // Check if this is a duplicate of another entry. 1072 if (ScalarToTreeEntry.count(VL[0])) { 1073 int Idx = ScalarToTreeEntry[VL[0]]; 1074 TreeEntry *E = &VectorizableTree[Idx]; 1075 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1076 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 1077 if (E->Scalars[i] != VL[i]) { 1078 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 1079 newTreeEntry(VL, false); 1080 return; 1081 } 1082 } 1083 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n"); 1084 return; 1085 } 1086 1087 // Check that none of the instructions in the bundle are already in the tree. 1088 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1089 if (ScalarToTreeEntry.count(VL[i])) { 1090 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1091 ") is already in tree.\n"); 1092 newTreeEntry(VL, false); 1093 return; 1094 } 1095 } 1096 1097 // If any of the scalars is marked as a value that needs to stay scalar then 1098 // we need to gather the scalars. 1099 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1100 if (MustGather.count(VL[i])) { 1101 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 1102 newTreeEntry(VL, false); 1103 return; 1104 } 1105 } 1106 1107 // Check that all of the users of the scalars that we want to vectorize are 1108 // schedulable. 1109 Instruction *VL0 = cast<Instruction>(VL[0]); 1110 BasicBlock *BB = cast<Instruction>(VL0)->getParent(); 1111 1112 if (!DT->isReachableFromEntry(BB)) { 1113 // Don't go into unreachable blocks. They may contain instructions with 1114 // dependency cycles which confuse the final scheduling. 1115 DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 1116 newTreeEntry(VL, false); 1117 return; 1118 } 1119 1120 // Check that every instructions appears once in this bundle. 1121 for (unsigned i = 0, e = VL.size(); i < e; ++i) 1122 for (unsigned j = i+1; j < e; ++j) 1123 if (VL[i] == VL[j]) { 1124 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 1125 newTreeEntry(VL, false); 1126 return; 1127 } 1128 1129 auto &BSRef = BlocksSchedules[BB]; 1130 if (!BSRef) { 1131 BSRef = llvm::make_unique<BlockScheduling>(BB); 1132 } 1133 BlockScheduling &BS = *BSRef.get(); 1134 1135 if (!BS.tryScheduleBundle(VL, this)) { 1136 DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 1137 assert((!BS.getScheduleData(VL[0]) || 1138 !BS.getScheduleData(VL[0])->isPartOfBundle()) && 1139 "tryScheduleBundle should cancelScheduling on failure"); 1140 newTreeEntry(VL, false); 1141 return; 1142 } 1143 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 1144 1145 switch (Opcode) { 1146 case Instruction::PHI: { 1147 PHINode *PH = dyn_cast<PHINode>(VL0); 1148 1149 // Check for terminator values (e.g. invoke). 1150 for (unsigned j = 0; j < VL.size(); ++j) 1151 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1152 TerminatorInst *Term = dyn_cast<TerminatorInst>( 1153 cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i))); 1154 if (Term) { 1155 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); 1156 BS.cancelScheduling(VL); 1157 newTreeEntry(VL, false); 1158 return; 1159 } 1160 } 1161 1162 newTreeEntry(VL, true); 1163 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 1164 1165 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1166 ValueList Operands; 1167 // Prepare the operand vector. 1168 for (unsigned j = 0; j < VL.size(); ++j) 1169 Operands.push_back(cast<PHINode>(VL[j])->getIncomingValueForBlock( 1170 PH->getIncomingBlock(i))); 1171 1172 buildTree_rec(Operands, Depth + 1); 1173 } 1174 return; 1175 } 1176 case Instruction::ExtractValue: 1177 case Instruction::ExtractElement: { 1178 bool Reuse = canReuseExtract(VL, Opcode); 1179 if (Reuse) { 1180 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 1181 } else { 1182 BS.cancelScheduling(VL); 1183 } 1184 newTreeEntry(VL, Reuse); 1185 return; 1186 } 1187 case Instruction::Load: { 1188 // Check that a vectorized load would load the same memory as a scalar 1189 // load. 1190 // For example we don't want vectorize loads that are smaller than 8 bit. 1191 // Even though we have a packed struct {<i2, i2, i2, i2>} LLVM treats 1192 // loading/storing it as an i8 struct. If we vectorize loads/stores from 1193 // such a struct we read/write packed bits disagreeing with the 1194 // unvectorized version. 1195 Type *ScalarTy = VL[0]->getType(); 1196 1197 if (DL->getTypeSizeInBits(ScalarTy) != 1198 DL->getTypeAllocSizeInBits(ScalarTy)) { 1199 BS.cancelScheduling(VL); 1200 newTreeEntry(VL, false); 1201 DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 1202 return; 1203 } 1204 // Check if the loads are consecutive or of we need to swizzle them. 1205 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 1206 LoadInst *L = cast<LoadInst>(VL[i]); 1207 if (!L->isSimple()) { 1208 BS.cancelScheduling(VL); 1209 newTreeEntry(VL, false); 1210 DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 1211 return; 1212 } 1213 1214 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { 1215 if (VL.size() == 2 && isConsecutiveAccess(VL[1], VL[0], *DL, *SE)) { 1216 ++NumLoadsWantToChangeOrder; 1217 } 1218 BS.cancelScheduling(VL); 1219 newTreeEntry(VL, false); 1220 DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 1221 return; 1222 } 1223 } 1224 ++NumLoadsWantToKeepOrder; 1225 newTreeEntry(VL, true); 1226 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 1227 return; 1228 } 1229 case Instruction::ZExt: 1230 case Instruction::SExt: 1231 case Instruction::FPToUI: 1232 case Instruction::FPToSI: 1233 case Instruction::FPExt: 1234 case Instruction::PtrToInt: 1235 case Instruction::IntToPtr: 1236 case Instruction::SIToFP: 1237 case Instruction::UIToFP: 1238 case Instruction::Trunc: 1239 case Instruction::FPTrunc: 1240 case Instruction::BitCast: { 1241 Type *SrcTy = VL0->getOperand(0)->getType(); 1242 for (unsigned i = 0; i < VL.size(); ++i) { 1243 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 1244 if (Ty != SrcTy || !isValidElementType(Ty)) { 1245 BS.cancelScheduling(VL); 1246 newTreeEntry(VL, false); 1247 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 1248 return; 1249 } 1250 } 1251 newTreeEntry(VL, true); 1252 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 1253 1254 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1255 ValueList Operands; 1256 // Prepare the operand vector. 1257 for (unsigned j = 0; j < VL.size(); ++j) 1258 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1259 1260 buildTree_rec(Operands, Depth+1); 1261 } 1262 return; 1263 } 1264 case Instruction::ICmp: 1265 case Instruction::FCmp: { 1266 // Check that all of the compares have the same predicate. 1267 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 1268 Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType(); 1269 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 1270 CmpInst *Cmp = cast<CmpInst>(VL[i]); 1271 if (Cmp->getPredicate() != P0 || 1272 Cmp->getOperand(0)->getType() != ComparedTy) { 1273 BS.cancelScheduling(VL); 1274 newTreeEntry(VL, false); 1275 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 1276 return; 1277 } 1278 } 1279 1280 newTreeEntry(VL, true); 1281 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 1282 1283 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1284 ValueList Operands; 1285 // Prepare the operand vector. 1286 for (unsigned j = 0; j < VL.size(); ++j) 1287 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1288 1289 buildTree_rec(Operands, Depth+1); 1290 } 1291 return; 1292 } 1293 case Instruction::Select: 1294 case Instruction::Add: 1295 case Instruction::FAdd: 1296 case Instruction::Sub: 1297 case Instruction::FSub: 1298 case Instruction::Mul: 1299 case Instruction::FMul: 1300 case Instruction::UDiv: 1301 case Instruction::SDiv: 1302 case Instruction::FDiv: 1303 case Instruction::URem: 1304 case Instruction::SRem: 1305 case Instruction::FRem: 1306 case Instruction::Shl: 1307 case Instruction::LShr: 1308 case Instruction::AShr: 1309 case Instruction::And: 1310 case Instruction::Or: 1311 case Instruction::Xor: { 1312 newTreeEntry(VL, true); 1313 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 1314 1315 // Sort operands of the instructions so that each side is more likely to 1316 // have the same opcode. 1317 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 1318 ValueList Left, Right; 1319 reorderInputsAccordingToOpcode(VL, Left, Right); 1320 buildTree_rec(Left, Depth + 1); 1321 buildTree_rec(Right, Depth + 1); 1322 return; 1323 } 1324 1325 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1326 ValueList Operands; 1327 // Prepare the operand vector. 1328 for (unsigned j = 0; j < VL.size(); ++j) 1329 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1330 1331 buildTree_rec(Operands, Depth+1); 1332 } 1333 return; 1334 } 1335 case Instruction::GetElementPtr: { 1336 // We don't combine GEPs with complicated (nested) indexing. 1337 for (unsigned j = 0; j < VL.size(); ++j) { 1338 if (cast<Instruction>(VL[j])->getNumOperands() != 2) { 1339 DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 1340 BS.cancelScheduling(VL); 1341 newTreeEntry(VL, false); 1342 return; 1343 } 1344 } 1345 1346 // We can't combine several GEPs into one vector if they operate on 1347 // different types. 1348 Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType(); 1349 for (unsigned j = 0; j < VL.size(); ++j) { 1350 Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType(); 1351 if (Ty0 != CurTy) { 1352 DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); 1353 BS.cancelScheduling(VL); 1354 newTreeEntry(VL, false); 1355 return; 1356 } 1357 } 1358 1359 // We don't combine GEPs with non-constant indexes. 1360 for (unsigned j = 0; j < VL.size(); ++j) { 1361 auto Op = cast<Instruction>(VL[j])->getOperand(1); 1362 if (!isa<ConstantInt>(Op)) { 1363 DEBUG( 1364 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 1365 BS.cancelScheduling(VL); 1366 newTreeEntry(VL, false); 1367 return; 1368 } 1369 } 1370 1371 newTreeEntry(VL, true); 1372 DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 1373 for (unsigned i = 0, e = 2; i < e; ++i) { 1374 ValueList Operands; 1375 // Prepare the operand vector. 1376 for (unsigned j = 0; j < VL.size(); ++j) 1377 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1378 1379 buildTree_rec(Operands, Depth + 1); 1380 } 1381 return; 1382 } 1383 case Instruction::Store: { 1384 // Check if the stores are consecutive or of we need to swizzle them. 1385 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 1386 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { 1387 BS.cancelScheduling(VL); 1388 newTreeEntry(VL, false); 1389 DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 1390 return; 1391 } 1392 1393 newTreeEntry(VL, true); 1394 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 1395 1396 ValueList Operands; 1397 for (unsigned j = 0; j < VL.size(); ++j) 1398 Operands.push_back(cast<Instruction>(VL[j])->getOperand(0)); 1399 1400 buildTree_rec(Operands, Depth + 1); 1401 return; 1402 } 1403 case Instruction::Call: { 1404 // Check if the calls are all to the same vectorizable intrinsic. 1405 CallInst *CI = cast<CallInst>(VL[0]); 1406 // Check if this is an Intrinsic call or something that can be 1407 // represented by an intrinsic call 1408 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 1409 if (!isTriviallyVectorizable(ID)) { 1410 BS.cancelScheduling(VL); 1411 newTreeEntry(VL, false); 1412 DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 1413 return; 1414 } 1415 Function *Int = CI->getCalledFunction(); 1416 Value *A1I = nullptr; 1417 if (hasVectorInstrinsicScalarOpd(ID, 1)) 1418 A1I = CI->getArgOperand(1); 1419 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 1420 CallInst *CI2 = dyn_cast<CallInst>(VL[i]); 1421 if (!CI2 || CI2->getCalledFunction() != Int || 1422 getVectorIntrinsicIDForCall(CI2, TLI) != ID) { 1423 BS.cancelScheduling(VL); 1424 newTreeEntry(VL, false); 1425 DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] 1426 << "\n"); 1427 return; 1428 } 1429 // ctlz,cttz and powi are special intrinsics whose second argument 1430 // should be same in order for them to be vectorized. 1431 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 1432 Value *A1J = CI2->getArgOperand(1); 1433 if (A1I != A1J) { 1434 BS.cancelScheduling(VL); 1435 newTreeEntry(VL, false); 1436 DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 1437 << " argument "<< A1I<<"!=" << A1J 1438 << "\n"); 1439 return; 1440 } 1441 } 1442 } 1443 1444 newTreeEntry(VL, true); 1445 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 1446 ValueList Operands; 1447 // Prepare the operand vector. 1448 for (unsigned j = 0; j < VL.size(); ++j) { 1449 CallInst *CI2 = dyn_cast<CallInst>(VL[j]); 1450 Operands.push_back(CI2->getArgOperand(i)); 1451 } 1452 buildTree_rec(Operands, Depth + 1); 1453 } 1454 return; 1455 } 1456 case Instruction::ShuffleVector: { 1457 // If this is not an alternate sequence of opcode like add-sub 1458 // then do not vectorize this instruction. 1459 if (!isAltShuffle) { 1460 BS.cancelScheduling(VL); 1461 newTreeEntry(VL, false); 1462 DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 1463 return; 1464 } 1465 newTreeEntry(VL, true); 1466 DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 1467 1468 // Reorder operands if reordering would enable vectorization. 1469 if (isa<BinaryOperator>(VL0)) { 1470 ValueList Left, Right; 1471 reorderAltShuffleOperands(VL, Left, Right); 1472 buildTree_rec(Left, Depth + 1); 1473 buildTree_rec(Right, Depth + 1); 1474 return; 1475 } 1476 1477 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1478 ValueList Operands; 1479 // Prepare the operand vector. 1480 for (unsigned j = 0; j < VL.size(); ++j) 1481 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i)); 1482 1483 buildTree_rec(Operands, Depth + 1); 1484 } 1485 return; 1486 } 1487 default: 1488 BS.cancelScheduling(VL); 1489 newTreeEntry(VL, false); 1490 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 1491 return; 1492 } 1493 } 1494 1495 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { 1496 unsigned N; 1497 Type *EltTy; 1498 auto *ST = dyn_cast<StructType>(T); 1499 if (ST) { 1500 N = ST->getNumElements(); 1501 EltTy = *ST->element_begin(); 1502 } else { 1503 N = cast<ArrayType>(T)->getNumElements(); 1504 EltTy = cast<ArrayType>(T)->getElementType(); 1505 } 1506 if (!isValidElementType(EltTy)) 1507 return 0; 1508 uint64_t VTSize = DL.getTypeStoreSizeInBits(VectorType::get(EltTy, N)); 1509 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) 1510 return 0; 1511 if (ST) { 1512 // Check that struct is homogeneous. 1513 for (const auto *Ty : ST->elements()) 1514 if (Ty != EltTy) 1515 return 0; 1516 } 1517 return N; 1518 } 1519 1520 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const { 1521 assert(Opcode == Instruction::ExtractElement || 1522 Opcode == Instruction::ExtractValue); 1523 assert(Opcode == getSameOpcode(VL) && "Invalid opcode"); 1524 // Check if all of the extracts come from the same vector and from the 1525 // correct offset. 1526 Value *VL0 = VL[0]; 1527 Instruction *E0 = cast<Instruction>(VL0); 1528 Value *Vec = E0->getOperand(0); 1529 1530 // We have to extract from a vector/aggregate with the same number of elements. 1531 unsigned NElts; 1532 if (Opcode == Instruction::ExtractValue) { 1533 const DataLayout &DL = E0->getModule()->getDataLayout(); 1534 NElts = canMapToVector(Vec->getType(), DL); 1535 if (!NElts) 1536 return false; 1537 // Check if load can be rewritten as load of vector. 1538 LoadInst *LI = dyn_cast<LoadInst>(Vec); 1539 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 1540 return false; 1541 } else { 1542 NElts = Vec->getType()->getVectorNumElements(); 1543 } 1544 1545 if (NElts != VL.size()) 1546 return false; 1547 1548 // Check that all of the indices extract from the correct offset. 1549 if (!matchExtractIndex(E0, 0, Opcode)) 1550 return false; 1551 1552 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 1553 Instruction *E = cast<Instruction>(VL[i]); 1554 if (!matchExtractIndex(E, i, Opcode)) 1555 return false; 1556 if (E->getOperand(0) != Vec) 1557 return false; 1558 } 1559 1560 return true; 1561 } 1562 1563 int BoUpSLP::getEntryCost(TreeEntry *E) { 1564 ArrayRef<Value*> VL = E->Scalars; 1565 1566 Type *ScalarTy = VL[0]->getType(); 1567 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1568 ScalarTy = SI->getValueOperand()->getType(); 1569 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1570 1571 // If we have computed a smaller type for the expression, update VecTy so 1572 // that the costs will be accurate. 1573 if (MinBWs.count(VL[0])) 1574 VecTy = VectorType::get(IntegerType::get(F->getContext(), MinBWs[VL[0]]), 1575 VL.size()); 1576 1577 if (E->NeedToGather) { 1578 if (allConstant(VL)) 1579 return 0; 1580 if (isSplat(VL)) { 1581 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 1582 } 1583 return getGatherCost(E->Scalars); 1584 } 1585 unsigned Opcode = getSameOpcode(VL); 1586 assert(Opcode && getSameType(VL) && getSameBlock(VL) && "Invalid VL"); 1587 Instruction *VL0 = cast<Instruction>(VL[0]); 1588 switch (Opcode) { 1589 case Instruction::PHI: { 1590 return 0; 1591 } 1592 case Instruction::ExtractValue: 1593 case Instruction::ExtractElement: { 1594 if (canReuseExtract(VL, Opcode)) { 1595 int DeadCost = 0; 1596 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 1597 Instruction *E = cast<Instruction>(VL[i]); 1598 if (E->hasOneUse()) 1599 // Take credit for instruction that will become dead. 1600 DeadCost += 1601 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); 1602 } 1603 return -DeadCost; 1604 } 1605 return getGatherCost(VecTy); 1606 } 1607 case Instruction::ZExt: 1608 case Instruction::SExt: 1609 case Instruction::FPToUI: 1610 case Instruction::FPToSI: 1611 case Instruction::FPExt: 1612 case Instruction::PtrToInt: 1613 case Instruction::IntToPtr: 1614 case Instruction::SIToFP: 1615 case Instruction::UIToFP: 1616 case Instruction::Trunc: 1617 case Instruction::FPTrunc: 1618 case Instruction::BitCast: { 1619 Type *SrcTy = VL0->getOperand(0)->getType(); 1620 1621 // Calculate the cost of this instruction. 1622 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 1623 VL0->getType(), SrcTy); 1624 1625 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 1626 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy); 1627 return VecCost - ScalarCost; 1628 } 1629 case Instruction::FCmp: 1630 case Instruction::ICmp: 1631 case Instruction::Select: 1632 case Instruction::Add: 1633 case Instruction::FAdd: 1634 case Instruction::Sub: 1635 case Instruction::FSub: 1636 case Instruction::Mul: 1637 case Instruction::FMul: 1638 case Instruction::UDiv: 1639 case Instruction::SDiv: 1640 case Instruction::FDiv: 1641 case Instruction::URem: 1642 case Instruction::SRem: 1643 case Instruction::FRem: 1644 case Instruction::Shl: 1645 case Instruction::LShr: 1646 case Instruction::AShr: 1647 case Instruction::And: 1648 case Instruction::Or: 1649 case Instruction::Xor: { 1650 // Calculate the cost of this instruction. 1651 int ScalarCost = 0; 1652 int VecCost = 0; 1653 if (Opcode == Instruction::FCmp || Opcode == Instruction::ICmp || 1654 Opcode == Instruction::Select) { 1655 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 1656 ScalarCost = VecTy->getNumElements() * 1657 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty()); 1658 VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy); 1659 } else { 1660 // Certain instructions can be cheaper to vectorize if they have a 1661 // constant second vector operand. 1662 TargetTransformInfo::OperandValueKind Op1VK = 1663 TargetTransformInfo::OK_AnyValue; 1664 TargetTransformInfo::OperandValueKind Op2VK = 1665 TargetTransformInfo::OK_UniformConstantValue; 1666 TargetTransformInfo::OperandValueProperties Op1VP = 1667 TargetTransformInfo::OP_None; 1668 TargetTransformInfo::OperandValueProperties Op2VP = 1669 TargetTransformInfo::OP_None; 1670 1671 // If all operands are exactly the same ConstantInt then set the 1672 // operand kind to OK_UniformConstantValue. 1673 // If instead not all operands are constants, then set the operand kind 1674 // to OK_AnyValue. If all operands are constants but not the same, 1675 // then set the operand kind to OK_NonUniformConstantValue. 1676 ConstantInt *CInt = nullptr; 1677 for (unsigned i = 0; i < VL.size(); ++i) { 1678 const Instruction *I = cast<Instruction>(VL[i]); 1679 if (!isa<ConstantInt>(I->getOperand(1))) { 1680 Op2VK = TargetTransformInfo::OK_AnyValue; 1681 break; 1682 } 1683 if (i == 0) { 1684 CInt = cast<ConstantInt>(I->getOperand(1)); 1685 continue; 1686 } 1687 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && 1688 CInt != cast<ConstantInt>(I->getOperand(1))) 1689 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 1690 } 1691 // FIXME: Currently cost of model modification for division by power of 1692 // 2 is handled for X86 and AArch64. Add support for other targets. 1693 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt && 1694 CInt->getValue().isPowerOf2()) 1695 Op2VP = TargetTransformInfo::OP_PowerOf2; 1696 1697 ScalarCost = VecTy->getNumElements() * 1698 TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK, 1699 Op1VP, Op2VP); 1700 VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK, 1701 Op1VP, Op2VP); 1702 } 1703 return VecCost - ScalarCost; 1704 } 1705 case Instruction::GetElementPtr: { 1706 TargetTransformInfo::OperandValueKind Op1VK = 1707 TargetTransformInfo::OK_AnyValue; 1708 TargetTransformInfo::OperandValueKind Op2VK = 1709 TargetTransformInfo::OK_UniformConstantValue; 1710 1711 int ScalarCost = 1712 VecTy->getNumElements() * 1713 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK); 1714 int VecCost = 1715 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK); 1716 1717 return VecCost - ScalarCost; 1718 } 1719 case Instruction::Load: { 1720 // Cost of wide load - cost of scalar loads. 1721 int ScalarLdCost = VecTy->getNumElements() * 1722 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0); 1723 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, 1, 0); 1724 return VecLdCost - ScalarLdCost; 1725 } 1726 case Instruction::Store: { 1727 // We know that we can merge the stores. Calculate the cost. 1728 int ScalarStCost = VecTy->getNumElements() * 1729 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0); 1730 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, VecTy, 1, 0); 1731 return VecStCost - ScalarStCost; 1732 } 1733 case Instruction::Call: { 1734 CallInst *CI = cast<CallInst>(VL0); 1735 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 1736 1737 // Calculate the cost of the scalar and vector calls. 1738 SmallVector<Type*, 4> ScalarTys, VecTys; 1739 for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) { 1740 ScalarTys.push_back(CI->getArgOperand(op)->getType()); 1741 VecTys.push_back(VectorType::get(CI->getArgOperand(op)->getType(), 1742 VecTy->getNumElements())); 1743 } 1744 1745 FastMathFlags FMF; 1746 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 1747 FMF = FPMO->getFastMathFlags(); 1748 1749 int ScalarCallCost = VecTy->getNumElements() * 1750 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys, FMF); 1751 1752 int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys, FMF); 1753 1754 DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost 1755 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 1756 << " for " << *CI << "\n"); 1757 1758 return VecCallCost - ScalarCallCost; 1759 } 1760 case Instruction::ShuffleVector: { 1761 TargetTransformInfo::OperandValueKind Op1VK = 1762 TargetTransformInfo::OK_AnyValue; 1763 TargetTransformInfo::OperandValueKind Op2VK = 1764 TargetTransformInfo::OK_AnyValue; 1765 int ScalarCost = 0; 1766 int VecCost = 0; 1767 for (unsigned i = 0; i < VL.size(); ++i) { 1768 Instruction *I = cast<Instruction>(VL[i]); 1769 if (!I) 1770 break; 1771 ScalarCost += 1772 TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK); 1773 } 1774 // VecCost is equal to sum of the cost of creating 2 vectors 1775 // and the cost of creating shuffle. 1776 Instruction *I0 = cast<Instruction>(VL[0]); 1777 VecCost = 1778 TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK); 1779 Instruction *I1 = cast<Instruction>(VL[1]); 1780 VecCost += 1781 TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK); 1782 VecCost += 1783 TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0); 1784 return VecCost - ScalarCost; 1785 } 1786 default: 1787 llvm_unreachable("Unknown instruction"); 1788 } 1789 } 1790 1791 bool BoUpSLP::isFullyVectorizableTinyTree() { 1792 DEBUG(dbgs() << "SLP: Check whether the tree with height " << 1793 VectorizableTree.size() << " is fully vectorizable .\n"); 1794 1795 // We only handle trees of height 2. 1796 if (VectorizableTree.size() != 2) 1797 return false; 1798 1799 // Handle splat and all-constants stores. 1800 if (!VectorizableTree[0].NeedToGather && 1801 (allConstant(VectorizableTree[1].Scalars) || 1802 isSplat(VectorizableTree[1].Scalars))) 1803 return true; 1804 1805 // Gathering cost would be too much for tiny trees. 1806 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 1807 return false; 1808 1809 return true; 1810 } 1811 1812 int BoUpSLP::getSpillCost() { 1813 // Walk from the bottom of the tree to the top, tracking which values are 1814 // live. When we see a call instruction that is not part of our tree, 1815 // query TTI to see if there is a cost to keeping values live over it 1816 // (for example, if spills and fills are required). 1817 unsigned BundleWidth = VectorizableTree.front().Scalars.size(); 1818 int Cost = 0; 1819 1820 SmallPtrSet<Instruction*, 4> LiveValues; 1821 Instruction *PrevInst = nullptr; 1822 1823 for (unsigned N = 0; N < VectorizableTree.size(); ++N) { 1824 Instruction *Inst = dyn_cast<Instruction>(VectorizableTree[N].Scalars[0]); 1825 if (!Inst) 1826 continue; 1827 1828 if (!PrevInst) { 1829 PrevInst = Inst; 1830 continue; 1831 } 1832 1833 // Update LiveValues. 1834 LiveValues.erase(PrevInst); 1835 for (auto &J : PrevInst->operands()) { 1836 if (isa<Instruction>(&*J) && ScalarToTreeEntry.count(&*J)) 1837 LiveValues.insert(cast<Instruction>(&*J)); 1838 } 1839 1840 DEBUG( 1841 dbgs() << "SLP: #LV: " << LiveValues.size(); 1842 for (auto *X : LiveValues) 1843 dbgs() << " " << X->getName(); 1844 dbgs() << ", Looking at "; 1845 Inst->dump(); 1846 ); 1847 1848 // Now find the sequence of instructions between PrevInst and Inst. 1849 BasicBlock::reverse_iterator InstIt(Inst->getIterator()), 1850 PrevInstIt(PrevInst->getIterator()); 1851 --PrevInstIt; 1852 while (InstIt != PrevInstIt) { 1853 if (PrevInstIt == PrevInst->getParent()->rend()) { 1854 PrevInstIt = Inst->getParent()->rbegin(); 1855 continue; 1856 } 1857 1858 if (isa<CallInst>(&*PrevInstIt) && &*PrevInstIt != PrevInst) { 1859 SmallVector<Type*, 4> V; 1860 for (auto *II : LiveValues) 1861 V.push_back(VectorType::get(II->getType(), BundleWidth)); 1862 Cost += TTI->getCostOfKeepingLiveOverCall(V); 1863 } 1864 1865 ++PrevInstIt; 1866 } 1867 1868 PrevInst = Inst; 1869 } 1870 1871 return Cost; 1872 } 1873 1874 int BoUpSLP::getTreeCost() { 1875 int Cost = 0; 1876 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 1877 VectorizableTree.size() << ".\n"); 1878 1879 // We only vectorize tiny trees if it is fully vectorizable. 1880 if (VectorizableTree.size() < 3 && !isFullyVectorizableTinyTree()) { 1881 if (VectorizableTree.empty()) { 1882 assert(!ExternalUses.size() && "We should not have any external users"); 1883 } 1884 return INT_MAX; 1885 } 1886 1887 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 1888 1889 for (TreeEntry &TE : VectorizableTree) { 1890 int C = getEntryCost(&TE); 1891 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 1892 << *TE.Scalars[0] << ".\n"); 1893 Cost += C; 1894 } 1895 1896 SmallSet<Value *, 16> ExtractCostCalculated; 1897 int ExtractCost = 0; 1898 for (ExternalUser &EU : ExternalUses) { 1899 // We only add extract cost once for the same scalar. 1900 if (!ExtractCostCalculated.insert(EU.Scalar).second) 1901 continue; 1902 1903 // Uses by ephemeral values are free (because the ephemeral value will be 1904 // removed prior to code generation, and so the extraction will be 1905 // removed as well). 1906 if (EphValues.count(EU.User)) 1907 continue; 1908 1909 // If we plan to rewrite the tree in a smaller type, we will need to sign 1910 // extend the extracted value back to the original type. Here, we account 1911 // for the extract and the added cost of the sign extend if needed. 1912 auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth); 1913 auto *ScalarRoot = VectorizableTree[0].Scalars[0]; 1914 if (MinBWs.count(ScalarRoot)) { 1915 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot]); 1916 VecTy = VectorType::get(MinTy, BundleWidth); 1917 ExtractCost += TTI->getExtractWithExtendCost( 1918 Instruction::SExt, EU.Scalar->getType(), VecTy, EU.Lane); 1919 } else { 1920 ExtractCost += 1921 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); 1922 } 1923 } 1924 1925 int SpillCost = getSpillCost(); 1926 Cost += SpillCost + ExtractCost; 1927 1928 DEBUG(dbgs() << "SLP: Spill Cost = " << SpillCost << ".\n" 1929 << "SLP: Extract Cost = " << ExtractCost << ".\n" 1930 << "SLP: Total Cost = " << Cost << ".\n"); 1931 return Cost; 1932 } 1933 1934 int BoUpSLP::getGatherCost(Type *Ty) { 1935 int Cost = 0; 1936 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 1937 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 1938 return Cost; 1939 } 1940 1941 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 1942 // Find the type of the operands in VL. 1943 Type *ScalarTy = VL[0]->getType(); 1944 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1945 ScalarTy = SI->getValueOperand()->getType(); 1946 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1947 // Find the cost of inserting/extracting values from the vector. 1948 return getGatherCost(VecTy); 1949 } 1950 1951 // Reorder commutative operations in alternate shuffle if the resulting vectors 1952 // are consecutive loads. This would allow us to vectorize the tree. 1953 // If we have something like- 1954 // load a[0] - load b[0] 1955 // load b[1] + load a[1] 1956 // load a[2] - load b[2] 1957 // load a[3] + load b[3] 1958 // Reordering the second load b[1] load a[1] would allow us to vectorize this 1959 // code. 1960 void BoUpSLP::reorderAltShuffleOperands(ArrayRef<Value *> VL, 1961 SmallVectorImpl<Value *> &Left, 1962 SmallVectorImpl<Value *> &Right) { 1963 // Push left and right operands of binary operation into Left and Right 1964 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 1965 Left.push_back(cast<Instruction>(VL[i])->getOperand(0)); 1966 Right.push_back(cast<Instruction>(VL[i])->getOperand(1)); 1967 } 1968 1969 // Reorder if we have a commutative operation and consecutive access 1970 // are on either side of the alternate instructions. 1971 for (unsigned j = 0; j < VL.size() - 1; ++j) { 1972 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 1973 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 1974 Instruction *VL1 = cast<Instruction>(VL[j]); 1975 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 1976 if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) { 1977 std::swap(Left[j], Right[j]); 1978 continue; 1979 } else if (VL2->isCommutative() && 1980 isConsecutiveAccess(L, L1, *DL, *SE)) { 1981 std::swap(Left[j + 1], Right[j + 1]); 1982 continue; 1983 } 1984 // else unchanged 1985 } 1986 } 1987 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 1988 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 1989 Instruction *VL1 = cast<Instruction>(VL[j]); 1990 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 1991 if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) { 1992 std::swap(Left[j], Right[j]); 1993 continue; 1994 } else if (VL2->isCommutative() && 1995 isConsecutiveAccess(L, L1, *DL, *SE)) { 1996 std::swap(Left[j + 1], Right[j + 1]); 1997 continue; 1998 } 1999 // else unchanged 2000 } 2001 } 2002 } 2003 } 2004 2005 // Return true if I should be commuted before adding it's left and right 2006 // operands to the arrays Left and Right. 2007 // 2008 // The vectorizer is trying to either have all elements one side being 2009 // instruction with the same opcode to enable further vectorization, or having 2010 // a splat to lower the vectorizing cost. 2011 static bool shouldReorderOperands(int i, Instruction &I, 2012 SmallVectorImpl<Value *> &Left, 2013 SmallVectorImpl<Value *> &Right, 2014 bool AllSameOpcodeLeft, 2015 bool AllSameOpcodeRight, bool SplatLeft, 2016 bool SplatRight) { 2017 Value *VLeft = I.getOperand(0); 2018 Value *VRight = I.getOperand(1); 2019 // If we have "SplatRight", try to see if commuting is needed to preserve it. 2020 if (SplatRight) { 2021 if (VRight == Right[i - 1]) 2022 // Preserve SplatRight 2023 return false; 2024 if (VLeft == Right[i - 1]) { 2025 // Commuting would preserve SplatRight, but we don't want to break 2026 // SplatLeft either, i.e. preserve the original order if possible. 2027 // (FIXME: why do we care?) 2028 if (SplatLeft && VLeft == Left[i - 1]) 2029 return false; 2030 return true; 2031 } 2032 } 2033 // Symmetrically handle Right side. 2034 if (SplatLeft) { 2035 if (VLeft == Left[i - 1]) 2036 // Preserve SplatLeft 2037 return false; 2038 if (VRight == Left[i - 1]) 2039 return true; 2040 } 2041 2042 Instruction *ILeft = dyn_cast<Instruction>(VLeft); 2043 Instruction *IRight = dyn_cast<Instruction>(VRight); 2044 2045 // If we have "AllSameOpcodeRight", try to see if the left operands preserves 2046 // it and not the right, in this case we want to commute. 2047 if (AllSameOpcodeRight) { 2048 unsigned RightPrevOpcode = cast<Instruction>(Right[i - 1])->getOpcode(); 2049 if (IRight && RightPrevOpcode == IRight->getOpcode()) 2050 // Do not commute, a match on the right preserves AllSameOpcodeRight 2051 return false; 2052 if (ILeft && RightPrevOpcode == ILeft->getOpcode()) { 2053 // We have a match and may want to commute, but first check if there is 2054 // not also a match on the existing operands on the Left to preserve 2055 // AllSameOpcodeLeft, i.e. preserve the original order if possible. 2056 // (FIXME: why do we care?) 2057 if (AllSameOpcodeLeft && ILeft && 2058 cast<Instruction>(Left[i - 1])->getOpcode() == ILeft->getOpcode()) 2059 return false; 2060 return true; 2061 } 2062 } 2063 // Symmetrically handle Left side. 2064 if (AllSameOpcodeLeft) { 2065 unsigned LeftPrevOpcode = cast<Instruction>(Left[i - 1])->getOpcode(); 2066 if (ILeft && LeftPrevOpcode == ILeft->getOpcode()) 2067 return false; 2068 if (IRight && LeftPrevOpcode == IRight->getOpcode()) 2069 return true; 2070 } 2071 return false; 2072 } 2073 2074 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 2075 SmallVectorImpl<Value *> &Left, 2076 SmallVectorImpl<Value *> &Right) { 2077 2078 if (VL.size()) { 2079 // Peel the first iteration out of the loop since there's nothing 2080 // interesting to do anyway and it simplifies the checks in the loop. 2081 auto VLeft = cast<Instruction>(VL[0])->getOperand(0); 2082 auto VRight = cast<Instruction>(VL[0])->getOperand(1); 2083 if (!isa<Instruction>(VRight) && isa<Instruction>(VLeft)) 2084 // Favor having instruction to the right. FIXME: why? 2085 std::swap(VLeft, VRight); 2086 Left.push_back(VLeft); 2087 Right.push_back(VRight); 2088 } 2089 2090 // Keep track if we have instructions with all the same opcode on one side. 2091 bool AllSameOpcodeLeft = isa<Instruction>(Left[0]); 2092 bool AllSameOpcodeRight = isa<Instruction>(Right[0]); 2093 // Keep track if we have one side with all the same value (broadcast). 2094 bool SplatLeft = true; 2095 bool SplatRight = true; 2096 2097 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 2098 Instruction *I = cast<Instruction>(VL[i]); 2099 assert(I->isCommutative() && "Can only process commutative instruction"); 2100 // Commute to favor either a splat or maximizing having the same opcodes on 2101 // one side. 2102 if (shouldReorderOperands(i, *I, Left, Right, AllSameOpcodeLeft, 2103 AllSameOpcodeRight, SplatLeft, SplatRight)) { 2104 Left.push_back(I->getOperand(1)); 2105 Right.push_back(I->getOperand(0)); 2106 } else { 2107 Left.push_back(I->getOperand(0)); 2108 Right.push_back(I->getOperand(1)); 2109 } 2110 // Update Splat* and AllSameOpcode* after the insertion. 2111 SplatRight = SplatRight && (Right[i - 1] == Right[i]); 2112 SplatLeft = SplatLeft && (Left[i - 1] == Left[i]); 2113 AllSameOpcodeLeft = AllSameOpcodeLeft && isa<Instruction>(Left[i]) && 2114 (cast<Instruction>(Left[i - 1])->getOpcode() == 2115 cast<Instruction>(Left[i])->getOpcode()); 2116 AllSameOpcodeRight = AllSameOpcodeRight && isa<Instruction>(Right[i]) && 2117 (cast<Instruction>(Right[i - 1])->getOpcode() == 2118 cast<Instruction>(Right[i])->getOpcode()); 2119 } 2120 2121 // If one operand end up being broadcast, return this operand order. 2122 if (SplatRight || SplatLeft) 2123 return; 2124 2125 // Finally check if we can get longer vectorizable chain by reordering 2126 // without breaking the good operand order detected above. 2127 // E.g. If we have something like- 2128 // load a[0] load b[0] 2129 // load b[1] load a[1] 2130 // load a[2] load b[2] 2131 // load a[3] load b[3] 2132 // Reordering the second load b[1] load a[1] would allow us to vectorize 2133 // this code and we still retain AllSameOpcode property. 2134 // FIXME: This load reordering might break AllSameOpcode in some rare cases 2135 // such as- 2136 // add a[0],c[0] load b[0] 2137 // add a[1],c[2] load b[1] 2138 // b[2] load b[2] 2139 // add a[3],c[3] load b[3] 2140 for (unsigned j = 0; j < VL.size() - 1; ++j) { 2141 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 2142 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 2143 if (isConsecutiveAccess(L, L1, *DL, *SE)) { 2144 std::swap(Left[j + 1], Right[j + 1]); 2145 continue; 2146 } 2147 } 2148 } 2149 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 2150 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 2151 if (isConsecutiveAccess(L, L1, *DL, *SE)) { 2152 std::swap(Left[j + 1], Right[j + 1]); 2153 continue; 2154 } 2155 } 2156 } 2157 // else unchanged 2158 } 2159 } 2160 2161 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) { 2162 Instruction *VL0 = cast<Instruction>(VL[0]); 2163 BasicBlock::iterator NextInst(VL0); 2164 ++NextInst; 2165 Builder.SetInsertPoint(VL0->getParent(), NextInst); 2166 Builder.SetCurrentDebugLocation(VL0->getDebugLoc()); 2167 } 2168 2169 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 2170 Value *Vec = UndefValue::get(Ty); 2171 // Generate the 'InsertElement' instruction. 2172 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 2173 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 2174 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 2175 GatherSeq.insert(Insrt); 2176 CSEBlocks.insert(Insrt->getParent()); 2177 2178 // Add to our 'need-to-extract' list. 2179 if (ScalarToTreeEntry.count(VL[i])) { 2180 int Idx = ScalarToTreeEntry[VL[i]]; 2181 TreeEntry *E = &VectorizableTree[Idx]; 2182 // Find which lane we need to extract. 2183 int FoundLane = -1; 2184 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 2185 // Is this the lane of the scalar that we are looking for ? 2186 if (E->Scalars[Lane] == VL[i]) { 2187 FoundLane = Lane; 2188 break; 2189 } 2190 } 2191 assert(FoundLane >= 0 && "Could not find the correct lane"); 2192 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 2193 } 2194 } 2195 } 2196 2197 return Vec; 2198 } 2199 2200 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const { 2201 SmallDenseMap<Value*, int>::const_iterator Entry 2202 = ScalarToTreeEntry.find(VL[0]); 2203 if (Entry != ScalarToTreeEntry.end()) { 2204 int Idx = Entry->second; 2205 const TreeEntry *En = &VectorizableTree[Idx]; 2206 if (En->isSame(VL) && En->VectorizedValue) 2207 return En->VectorizedValue; 2208 } 2209 return nullptr; 2210 } 2211 2212 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 2213 if (ScalarToTreeEntry.count(VL[0])) { 2214 int Idx = ScalarToTreeEntry[VL[0]]; 2215 TreeEntry *E = &VectorizableTree[Idx]; 2216 if (E->isSame(VL)) 2217 return vectorizeTree(E); 2218 } 2219 2220 Type *ScalarTy = VL[0]->getType(); 2221 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 2222 ScalarTy = SI->getValueOperand()->getType(); 2223 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 2224 2225 return Gather(VL, VecTy); 2226 } 2227 2228 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 2229 IRBuilder<>::InsertPointGuard Guard(Builder); 2230 2231 if (E->VectorizedValue) { 2232 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 2233 return E->VectorizedValue; 2234 } 2235 2236 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 2237 Type *ScalarTy = VL0->getType(); 2238 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 2239 ScalarTy = SI->getValueOperand()->getType(); 2240 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 2241 2242 if (E->NeedToGather) { 2243 setInsertPointAfterBundle(E->Scalars); 2244 return Gather(E->Scalars, VecTy); 2245 } 2246 2247 unsigned Opcode = getSameOpcode(E->Scalars); 2248 2249 switch (Opcode) { 2250 case Instruction::PHI: { 2251 PHINode *PH = dyn_cast<PHINode>(VL0); 2252 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 2253 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2254 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 2255 E->VectorizedValue = NewPhi; 2256 2257 // PHINodes may have multiple entries from the same block. We want to 2258 // visit every block once. 2259 SmallSet<BasicBlock*, 4> VisitedBBs; 2260 2261 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 2262 ValueList Operands; 2263 BasicBlock *IBB = PH->getIncomingBlock(i); 2264 2265 if (!VisitedBBs.insert(IBB).second) { 2266 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 2267 continue; 2268 } 2269 2270 // Prepare the operand vector. 2271 for (Value *V : E->Scalars) 2272 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(IBB)); 2273 2274 Builder.SetInsertPoint(IBB->getTerminator()); 2275 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2276 Value *Vec = vectorizeTree(Operands); 2277 NewPhi->addIncoming(Vec, IBB); 2278 } 2279 2280 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 2281 "Invalid number of incoming values"); 2282 return NewPhi; 2283 } 2284 2285 case Instruction::ExtractElement: { 2286 if (canReuseExtract(E->Scalars, Instruction::ExtractElement)) { 2287 Value *V = VL0->getOperand(0); 2288 E->VectorizedValue = V; 2289 return V; 2290 } 2291 return Gather(E->Scalars, VecTy); 2292 } 2293 case Instruction::ExtractValue: { 2294 if (canReuseExtract(E->Scalars, Instruction::ExtractValue)) { 2295 LoadInst *LI = cast<LoadInst>(VL0->getOperand(0)); 2296 Builder.SetInsertPoint(LI); 2297 PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); 2298 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); 2299 LoadInst *V = Builder.CreateAlignedLoad(Ptr, LI->getAlignment()); 2300 E->VectorizedValue = V; 2301 return propagateMetadata(V, E->Scalars); 2302 } 2303 return Gather(E->Scalars, VecTy); 2304 } 2305 case Instruction::ZExt: 2306 case Instruction::SExt: 2307 case Instruction::FPToUI: 2308 case Instruction::FPToSI: 2309 case Instruction::FPExt: 2310 case Instruction::PtrToInt: 2311 case Instruction::IntToPtr: 2312 case Instruction::SIToFP: 2313 case Instruction::UIToFP: 2314 case Instruction::Trunc: 2315 case Instruction::FPTrunc: 2316 case Instruction::BitCast: { 2317 ValueList INVL; 2318 for (Value *V : E->Scalars) 2319 INVL.push_back(cast<Instruction>(V)->getOperand(0)); 2320 2321 setInsertPointAfterBundle(E->Scalars); 2322 2323 Value *InVec = vectorizeTree(INVL); 2324 2325 if (Value *V = alreadyVectorized(E->Scalars)) 2326 return V; 2327 2328 CastInst *CI = dyn_cast<CastInst>(VL0); 2329 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 2330 E->VectorizedValue = V; 2331 ++NumVectorInstructions; 2332 return V; 2333 } 2334 case Instruction::FCmp: 2335 case Instruction::ICmp: { 2336 ValueList LHSV, RHSV; 2337 for (Value *V : E->Scalars) { 2338 LHSV.push_back(cast<Instruction>(V)->getOperand(0)); 2339 RHSV.push_back(cast<Instruction>(V)->getOperand(1)); 2340 } 2341 2342 setInsertPointAfterBundle(E->Scalars); 2343 2344 Value *L = vectorizeTree(LHSV); 2345 Value *R = vectorizeTree(RHSV); 2346 2347 if (Value *V = alreadyVectorized(E->Scalars)) 2348 return V; 2349 2350 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 2351 Value *V; 2352 if (Opcode == Instruction::FCmp) 2353 V = Builder.CreateFCmp(P0, L, R); 2354 else 2355 V = Builder.CreateICmp(P0, L, R); 2356 2357 E->VectorizedValue = V; 2358 ++NumVectorInstructions; 2359 return V; 2360 } 2361 case Instruction::Select: { 2362 ValueList TrueVec, FalseVec, CondVec; 2363 for (Value *V : E->Scalars) { 2364 CondVec.push_back(cast<Instruction>(V)->getOperand(0)); 2365 TrueVec.push_back(cast<Instruction>(V)->getOperand(1)); 2366 FalseVec.push_back(cast<Instruction>(V)->getOperand(2)); 2367 } 2368 2369 setInsertPointAfterBundle(E->Scalars); 2370 2371 Value *Cond = vectorizeTree(CondVec); 2372 Value *True = vectorizeTree(TrueVec); 2373 Value *False = vectorizeTree(FalseVec); 2374 2375 if (Value *V = alreadyVectorized(E->Scalars)) 2376 return V; 2377 2378 Value *V = Builder.CreateSelect(Cond, True, False); 2379 E->VectorizedValue = V; 2380 ++NumVectorInstructions; 2381 return V; 2382 } 2383 case Instruction::Add: 2384 case Instruction::FAdd: 2385 case Instruction::Sub: 2386 case Instruction::FSub: 2387 case Instruction::Mul: 2388 case Instruction::FMul: 2389 case Instruction::UDiv: 2390 case Instruction::SDiv: 2391 case Instruction::FDiv: 2392 case Instruction::URem: 2393 case Instruction::SRem: 2394 case Instruction::FRem: 2395 case Instruction::Shl: 2396 case Instruction::LShr: 2397 case Instruction::AShr: 2398 case Instruction::And: 2399 case Instruction::Or: 2400 case Instruction::Xor: { 2401 ValueList LHSVL, RHSVL; 2402 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) 2403 reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL); 2404 else 2405 for (Value *V : E->Scalars) { 2406 LHSVL.push_back(cast<Instruction>(V)->getOperand(0)); 2407 RHSVL.push_back(cast<Instruction>(V)->getOperand(1)); 2408 } 2409 2410 setInsertPointAfterBundle(E->Scalars); 2411 2412 Value *LHS = vectorizeTree(LHSVL); 2413 Value *RHS = vectorizeTree(RHSVL); 2414 2415 if (LHS == RHS && isa<Instruction>(LHS)) { 2416 assert((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order"); 2417 } 2418 2419 if (Value *V = alreadyVectorized(E->Scalars)) 2420 return V; 2421 2422 BinaryOperator *BinOp = cast<BinaryOperator>(VL0); 2423 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS); 2424 E->VectorizedValue = V; 2425 propagateIRFlags(E->VectorizedValue, E->Scalars); 2426 ++NumVectorInstructions; 2427 2428 if (Instruction *I = dyn_cast<Instruction>(V)) 2429 return propagateMetadata(I, E->Scalars); 2430 2431 return V; 2432 } 2433 case Instruction::Load: { 2434 // Loads are inserted at the head of the tree because we don't want to 2435 // sink them all the way down past store instructions. 2436 setInsertPointAfterBundle(E->Scalars); 2437 2438 LoadInst *LI = cast<LoadInst>(VL0); 2439 Type *ScalarLoadTy = LI->getType(); 2440 unsigned AS = LI->getPointerAddressSpace(); 2441 2442 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 2443 VecTy->getPointerTo(AS)); 2444 2445 // The pointer operand uses an in-tree scalar so we add the new BitCast to 2446 // ExternalUses list to make sure that an extract will be generated in the 2447 // future. 2448 if (ScalarToTreeEntry.count(LI->getPointerOperand())) 2449 ExternalUses.push_back( 2450 ExternalUser(LI->getPointerOperand(), cast<User>(VecPtr), 0)); 2451 2452 unsigned Alignment = LI->getAlignment(); 2453 LI = Builder.CreateLoad(VecPtr); 2454 if (!Alignment) { 2455 Alignment = DL->getABITypeAlignment(ScalarLoadTy); 2456 } 2457 LI->setAlignment(Alignment); 2458 E->VectorizedValue = LI; 2459 ++NumVectorInstructions; 2460 return propagateMetadata(LI, E->Scalars); 2461 } 2462 case Instruction::Store: { 2463 StoreInst *SI = cast<StoreInst>(VL0); 2464 unsigned Alignment = SI->getAlignment(); 2465 unsigned AS = SI->getPointerAddressSpace(); 2466 2467 ValueList ValueOp; 2468 for (Value *V : E->Scalars) 2469 ValueOp.push_back(cast<StoreInst>(V)->getValueOperand()); 2470 2471 setInsertPointAfterBundle(E->Scalars); 2472 2473 Value *VecValue = vectorizeTree(ValueOp); 2474 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(), 2475 VecTy->getPointerTo(AS)); 2476 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 2477 2478 // The pointer operand uses an in-tree scalar so we add the new BitCast to 2479 // ExternalUses list to make sure that an extract will be generated in the 2480 // future. 2481 if (ScalarToTreeEntry.count(SI->getPointerOperand())) 2482 ExternalUses.push_back( 2483 ExternalUser(SI->getPointerOperand(), cast<User>(VecPtr), 0)); 2484 2485 if (!Alignment) { 2486 Alignment = DL->getABITypeAlignment(SI->getValueOperand()->getType()); 2487 } 2488 S->setAlignment(Alignment); 2489 E->VectorizedValue = S; 2490 ++NumVectorInstructions; 2491 return propagateMetadata(S, E->Scalars); 2492 } 2493 case Instruction::GetElementPtr: { 2494 setInsertPointAfterBundle(E->Scalars); 2495 2496 ValueList Op0VL; 2497 for (Value *V : E->Scalars) 2498 Op0VL.push_back(cast<GetElementPtrInst>(V)->getOperand(0)); 2499 2500 Value *Op0 = vectorizeTree(Op0VL); 2501 2502 std::vector<Value *> OpVecs; 2503 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 2504 ++j) { 2505 ValueList OpVL; 2506 for (Value *V : E->Scalars) 2507 OpVL.push_back(cast<GetElementPtrInst>(V)->getOperand(j)); 2508 2509 Value *OpVec = vectorizeTree(OpVL); 2510 OpVecs.push_back(OpVec); 2511 } 2512 2513 Value *V = Builder.CreateGEP( 2514 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs); 2515 E->VectorizedValue = V; 2516 ++NumVectorInstructions; 2517 2518 if (Instruction *I = dyn_cast<Instruction>(V)) 2519 return propagateMetadata(I, E->Scalars); 2520 2521 return V; 2522 } 2523 case Instruction::Call: { 2524 CallInst *CI = cast<CallInst>(VL0); 2525 setInsertPointAfterBundle(E->Scalars); 2526 Function *FI; 2527 Intrinsic::ID IID = Intrinsic::not_intrinsic; 2528 Value *ScalarArg = nullptr; 2529 if (CI && (FI = CI->getCalledFunction())) { 2530 IID = FI->getIntrinsicID(); 2531 } 2532 std::vector<Value *> OpVecs; 2533 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 2534 ValueList OpVL; 2535 // ctlz,cttz and powi are special intrinsics whose second argument is 2536 // a scalar. This argument should not be vectorized. 2537 if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) { 2538 CallInst *CEI = cast<CallInst>(E->Scalars[0]); 2539 ScalarArg = CEI->getArgOperand(j); 2540 OpVecs.push_back(CEI->getArgOperand(j)); 2541 continue; 2542 } 2543 for (Value *V : E->Scalars) { 2544 CallInst *CEI = cast<CallInst>(V); 2545 OpVL.push_back(CEI->getArgOperand(j)); 2546 } 2547 2548 Value *OpVec = vectorizeTree(OpVL); 2549 DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 2550 OpVecs.push_back(OpVec); 2551 } 2552 2553 Module *M = F->getParent(); 2554 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 2555 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; 2556 Function *CF = Intrinsic::getDeclaration(M, ID, Tys); 2557 Value *V = Builder.CreateCall(CF, OpVecs); 2558 2559 // The scalar argument uses an in-tree scalar so we add the new vectorized 2560 // call to ExternalUses list to make sure that an extract will be 2561 // generated in the future. 2562 if (ScalarArg && ScalarToTreeEntry.count(ScalarArg)) 2563 ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0)); 2564 2565 E->VectorizedValue = V; 2566 ++NumVectorInstructions; 2567 return V; 2568 } 2569 case Instruction::ShuffleVector: { 2570 ValueList LHSVL, RHSVL; 2571 assert(isa<BinaryOperator>(VL0) && "Invalid Shuffle Vector Operand"); 2572 reorderAltShuffleOperands(E->Scalars, LHSVL, RHSVL); 2573 setInsertPointAfterBundle(E->Scalars); 2574 2575 Value *LHS = vectorizeTree(LHSVL); 2576 Value *RHS = vectorizeTree(RHSVL); 2577 2578 if (Value *V = alreadyVectorized(E->Scalars)) 2579 return V; 2580 2581 // Create a vector of LHS op1 RHS 2582 BinaryOperator *BinOp0 = cast<BinaryOperator>(VL0); 2583 Value *V0 = Builder.CreateBinOp(BinOp0->getOpcode(), LHS, RHS); 2584 2585 // Create a vector of LHS op2 RHS 2586 Instruction *VL1 = cast<Instruction>(E->Scalars[1]); 2587 BinaryOperator *BinOp1 = cast<BinaryOperator>(VL1); 2588 Value *V1 = Builder.CreateBinOp(BinOp1->getOpcode(), LHS, RHS); 2589 2590 // Create shuffle to take alternate operations from the vector. 2591 // Also, gather up odd and even scalar ops to propagate IR flags to 2592 // each vector operation. 2593 ValueList OddScalars, EvenScalars; 2594 unsigned e = E->Scalars.size(); 2595 SmallVector<Constant *, 8> Mask(e); 2596 for (unsigned i = 0; i < e; ++i) { 2597 if (i & 1) { 2598 Mask[i] = Builder.getInt32(e + i); 2599 OddScalars.push_back(E->Scalars[i]); 2600 } else { 2601 Mask[i] = Builder.getInt32(i); 2602 EvenScalars.push_back(E->Scalars[i]); 2603 } 2604 } 2605 2606 Value *ShuffleMask = ConstantVector::get(Mask); 2607 propagateIRFlags(V0, EvenScalars); 2608 propagateIRFlags(V1, OddScalars); 2609 2610 Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 2611 E->VectorizedValue = V; 2612 ++NumVectorInstructions; 2613 if (Instruction *I = dyn_cast<Instruction>(V)) 2614 return propagateMetadata(I, E->Scalars); 2615 2616 return V; 2617 } 2618 default: 2619 llvm_unreachable("unknown inst"); 2620 } 2621 return nullptr; 2622 } 2623 2624 Value *BoUpSLP::vectorizeTree() { 2625 2626 // All blocks must be scheduled before any instructions are inserted. 2627 for (auto &BSIter : BlocksSchedules) { 2628 scheduleBlock(BSIter.second.get()); 2629 } 2630 2631 Builder.SetInsertPoint(&F->getEntryBlock().front()); 2632 auto *VectorRoot = vectorizeTree(&VectorizableTree[0]); 2633 2634 // If the vectorized tree can be rewritten in a smaller type, we truncate the 2635 // vectorized root. InstCombine will then rewrite the entire expression. We 2636 // sign extend the extracted values below. 2637 auto *ScalarRoot = VectorizableTree[0].Scalars[0]; 2638 if (MinBWs.count(ScalarRoot)) { 2639 if (auto *I = dyn_cast<Instruction>(VectorRoot)) 2640 Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); 2641 auto BundleWidth = VectorizableTree[0].Scalars.size(); 2642 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot]); 2643 auto *VecTy = VectorType::get(MinTy, BundleWidth); 2644 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); 2645 VectorizableTree[0].VectorizedValue = Trunc; 2646 } 2647 2648 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 2649 2650 // Extract all of the elements with the external uses. 2651 for (UserList::iterator it = ExternalUses.begin(), e = ExternalUses.end(); 2652 it != e; ++it) { 2653 Value *Scalar = it->Scalar; 2654 llvm::User *User = it->User; 2655 2656 // Skip users that we already RAUW. This happens when one instruction 2657 // has multiple uses of the same value. 2658 if (std::find(Scalar->user_begin(), Scalar->user_end(), User) == 2659 Scalar->user_end()) 2660 continue; 2661 assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar"); 2662 2663 int Idx = ScalarToTreeEntry[Scalar]; 2664 TreeEntry *E = &VectorizableTree[Idx]; 2665 assert(!E->NeedToGather && "Extracting from a gather list"); 2666 2667 Value *Vec = E->VectorizedValue; 2668 assert(Vec && "Can't find vectorizable value"); 2669 2670 Value *Lane = Builder.getInt32(it->Lane); 2671 // Generate extracts for out-of-tree users. 2672 // Find the insertion point for the extractelement lane. 2673 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 2674 if (PHINode *PH = dyn_cast<PHINode>(User)) { 2675 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 2676 if (PH->getIncomingValue(i) == Scalar) { 2677 TerminatorInst *IncomingTerminator = 2678 PH->getIncomingBlock(i)->getTerminator(); 2679 if (isa<CatchSwitchInst>(IncomingTerminator)) { 2680 Builder.SetInsertPoint(VecI->getParent(), 2681 std::next(VecI->getIterator())); 2682 } else { 2683 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 2684 } 2685 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2686 if (MinBWs.count(ScalarRoot)) 2687 Ex = Builder.CreateSExt(Ex, Scalar->getType()); 2688 CSEBlocks.insert(PH->getIncomingBlock(i)); 2689 PH->setOperand(i, Ex); 2690 } 2691 } 2692 } else { 2693 Builder.SetInsertPoint(cast<Instruction>(User)); 2694 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2695 if (MinBWs.count(ScalarRoot)) 2696 Ex = Builder.CreateSExt(Ex, Scalar->getType()); 2697 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 2698 User->replaceUsesOfWith(Scalar, Ex); 2699 } 2700 } else { 2701 Builder.SetInsertPoint(&F->getEntryBlock().front()); 2702 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 2703 if (MinBWs.count(ScalarRoot)) 2704 Ex = Builder.CreateSExt(Ex, Scalar->getType()); 2705 CSEBlocks.insert(&F->getEntryBlock()); 2706 User->replaceUsesOfWith(Scalar, Ex); 2707 } 2708 2709 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 2710 } 2711 2712 // For each vectorized value: 2713 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) { 2714 TreeEntry *Entry = &VectorizableTree[EIdx]; 2715 2716 // For each lane: 2717 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 2718 Value *Scalar = Entry->Scalars[Lane]; 2719 // No need to handle users of gathered values. 2720 if (Entry->NeedToGather) 2721 continue; 2722 2723 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 2724 2725 Type *Ty = Scalar->getType(); 2726 if (!Ty->isVoidTy()) { 2727 #ifndef NDEBUG 2728 for (User *U : Scalar->users()) { 2729 DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 2730 2731 assert((ScalarToTreeEntry.count(U) || 2732 // It is legal to replace users in the ignorelist by undef. 2733 (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), U) != 2734 UserIgnoreList.end())) && 2735 "Replacing out-of-tree value with undef"); 2736 } 2737 #endif 2738 Value *Undef = UndefValue::get(Ty); 2739 Scalar->replaceAllUsesWith(Undef); 2740 } 2741 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 2742 eraseInstruction(cast<Instruction>(Scalar)); 2743 } 2744 } 2745 2746 Builder.ClearInsertionPoint(); 2747 2748 return VectorizableTree[0].VectorizedValue; 2749 } 2750 2751 void BoUpSLP::optimizeGatherSequence() { 2752 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 2753 << " gather sequences instructions.\n"); 2754 // LICM InsertElementInst sequences. 2755 for (SetVector<Instruction *>::iterator it = GatherSeq.begin(), 2756 e = GatherSeq.end(); it != e; ++it) { 2757 InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it); 2758 2759 if (!Insert) 2760 continue; 2761 2762 // Check if this block is inside a loop. 2763 Loop *L = LI->getLoopFor(Insert->getParent()); 2764 if (!L) 2765 continue; 2766 2767 // Check if it has a preheader. 2768 BasicBlock *PreHeader = L->getLoopPreheader(); 2769 if (!PreHeader) 2770 continue; 2771 2772 // If the vector or the element that we insert into it are 2773 // instructions that are defined in this basic block then we can't 2774 // hoist this instruction. 2775 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 2776 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 2777 if (CurrVec && L->contains(CurrVec)) 2778 continue; 2779 if (NewElem && L->contains(NewElem)) 2780 continue; 2781 2782 // We can hoist this instruction. Move it to the pre-header. 2783 Insert->moveBefore(PreHeader->getTerminator()); 2784 } 2785 2786 // Make a list of all reachable blocks in our CSE queue. 2787 SmallVector<const DomTreeNode *, 8> CSEWorkList; 2788 CSEWorkList.reserve(CSEBlocks.size()); 2789 for (BasicBlock *BB : CSEBlocks) 2790 if (DomTreeNode *N = DT->getNode(BB)) { 2791 assert(DT->isReachableFromEntry(N)); 2792 CSEWorkList.push_back(N); 2793 } 2794 2795 // Sort blocks by domination. This ensures we visit a block after all blocks 2796 // dominating it are visited. 2797 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), 2798 [this](const DomTreeNode *A, const DomTreeNode *B) { 2799 return DT->properlyDominates(A, B); 2800 }); 2801 2802 // Perform O(N^2) search over the gather sequences and merge identical 2803 // instructions. TODO: We can further optimize this scan if we split the 2804 // instructions into different buckets based on the insert lane. 2805 SmallVector<Instruction *, 16> Visited; 2806 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 2807 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 2808 "Worklist not sorted properly!"); 2809 BasicBlock *BB = (*I)->getBlock(); 2810 // For all instructions in blocks containing gather sequences: 2811 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 2812 Instruction *In = &*it++; 2813 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 2814 continue; 2815 2816 // Check if we can replace this instruction with any of the 2817 // visited instructions. 2818 for (SmallVectorImpl<Instruction *>::iterator v = Visited.begin(), 2819 ve = Visited.end(); 2820 v != ve; ++v) { 2821 if (In->isIdenticalTo(*v) && 2822 DT->dominates((*v)->getParent(), In->getParent())) { 2823 In->replaceAllUsesWith(*v); 2824 eraseInstruction(In); 2825 In = nullptr; 2826 break; 2827 } 2828 } 2829 if (In) { 2830 assert(std::find(Visited.begin(), Visited.end(), In) == Visited.end()); 2831 Visited.push_back(In); 2832 } 2833 } 2834 } 2835 CSEBlocks.clear(); 2836 GatherSeq.clear(); 2837 } 2838 2839 // Groups the instructions to a bundle (which is then a single scheduling entity) 2840 // and schedules instructions until the bundle gets ready. 2841 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, 2842 BoUpSLP *SLP) { 2843 if (isa<PHINode>(VL[0])) 2844 return true; 2845 2846 // Initialize the instruction bundle. 2847 Instruction *OldScheduleEnd = ScheduleEnd; 2848 ScheduleData *PrevInBundle = nullptr; 2849 ScheduleData *Bundle = nullptr; 2850 bool ReSchedule = false; 2851 DEBUG(dbgs() << "SLP: bundle: " << *VL[0] << "\n"); 2852 2853 // Make sure that the scheduling region contains all 2854 // instructions of the bundle. 2855 for (Value *V : VL) { 2856 if (!extendSchedulingRegion(V)) 2857 return false; 2858 } 2859 2860 for (Value *V : VL) { 2861 ScheduleData *BundleMember = getScheduleData(V); 2862 assert(BundleMember && 2863 "no ScheduleData for bundle member (maybe not in same basic block)"); 2864 if (BundleMember->IsScheduled) { 2865 // A bundle member was scheduled as single instruction before and now 2866 // needs to be scheduled as part of the bundle. We just get rid of the 2867 // existing schedule. 2868 DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 2869 << " was already scheduled\n"); 2870 ReSchedule = true; 2871 } 2872 assert(BundleMember->isSchedulingEntity() && 2873 "bundle member already part of other bundle"); 2874 if (PrevInBundle) { 2875 PrevInBundle->NextInBundle = BundleMember; 2876 } else { 2877 Bundle = BundleMember; 2878 } 2879 BundleMember->UnscheduledDepsInBundle = 0; 2880 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 2881 2882 // Group the instructions to a bundle. 2883 BundleMember->FirstInBundle = Bundle; 2884 PrevInBundle = BundleMember; 2885 } 2886 if (ScheduleEnd != OldScheduleEnd) { 2887 // The scheduling region got new instructions at the lower end (or it is a 2888 // new region for the first bundle). This makes it necessary to 2889 // recalculate all dependencies. 2890 // It is seldom that this needs to be done a second time after adding the 2891 // initial bundle to the region. 2892 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 2893 ScheduleData *SD = getScheduleData(I); 2894 SD->clearDependencies(); 2895 } 2896 ReSchedule = true; 2897 } 2898 if (ReSchedule) { 2899 resetSchedule(); 2900 initialFillReadyList(ReadyInsts); 2901 } 2902 2903 DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " 2904 << BB->getName() << "\n"); 2905 2906 calculateDependencies(Bundle, true, SLP); 2907 2908 // Now try to schedule the new bundle. As soon as the bundle is "ready" it 2909 // means that there are no cyclic dependencies and we can schedule it. 2910 // Note that's important that we don't "schedule" the bundle yet (see 2911 // cancelScheduling). 2912 while (!Bundle->isReady() && !ReadyInsts.empty()) { 2913 2914 ScheduleData *pickedSD = ReadyInsts.back(); 2915 ReadyInsts.pop_back(); 2916 2917 if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) { 2918 schedule(pickedSD, ReadyInsts); 2919 } 2920 } 2921 if (!Bundle->isReady()) { 2922 cancelScheduling(VL); 2923 return false; 2924 } 2925 return true; 2926 } 2927 2928 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL) { 2929 if (isa<PHINode>(VL[0])) 2930 return; 2931 2932 ScheduleData *Bundle = getScheduleData(VL[0]); 2933 DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 2934 assert(!Bundle->IsScheduled && 2935 "Can't cancel bundle which is already scheduled"); 2936 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 2937 "tried to unbundle something which is not a bundle"); 2938 2939 // Un-bundle: make single instructions out of the bundle. 2940 ScheduleData *BundleMember = Bundle; 2941 while (BundleMember) { 2942 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 2943 BundleMember->FirstInBundle = BundleMember; 2944 ScheduleData *Next = BundleMember->NextInBundle; 2945 BundleMember->NextInBundle = nullptr; 2946 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 2947 if (BundleMember->UnscheduledDepsInBundle == 0) { 2948 ReadyInsts.insert(BundleMember); 2949 } 2950 BundleMember = Next; 2951 } 2952 } 2953 2954 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V) { 2955 if (getScheduleData(V)) 2956 return true; 2957 Instruction *I = dyn_cast<Instruction>(V); 2958 assert(I && "bundle member must be an instruction"); 2959 assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled"); 2960 if (!ScheduleStart) { 2961 // It's the first instruction in the new region. 2962 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 2963 ScheduleStart = I; 2964 ScheduleEnd = I->getNextNode(); 2965 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 2966 DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 2967 return true; 2968 } 2969 // Search up and down at the same time, because we don't know if the new 2970 // instruction is above or below the existing scheduling region. 2971 BasicBlock::reverse_iterator UpIter(ScheduleStart->getIterator()); 2972 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 2973 BasicBlock::iterator DownIter(ScheduleEnd); 2974 BasicBlock::iterator LowerEnd = BB->end(); 2975 for (;;) { 2976 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 2977 DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 2978 return false; 2979 } 2980 2981 if (UpIter != UpperEnd) { 2982 if (&*UpIter == I) { 2983 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 2984 ScheduleStart = I; 2985 DEBUG(dbgs() << "SLP: extend schedule region start to " << *I << "\n"); 2986 return true; 2987 } 2988 UpIter++; 2989 } 2990 if (DownIter != LowerEnd) { 2991 if (&*DownIter == I) { 2992 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 2993 nullptr); 2994 ScheduleEnd = I->getNextNode(); 2995 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 2996 DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 2997 return true; 2998 } 2999 DownIter++; 3000 } 3001 assert((UpIter != UpperEnd || DownIter != LowerEnd) && 3002 "instruction not found in block"); 3003 } 3004 return true; 3005 } 3006 3007 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 3008 Instruction *ToI, 3009 ScheduleData *PrevLoadStore, 3010 ScheduleData *NextLoadStore) { 3011 ScheduleData *CurrentLoadStore = PrevLoadStore; 3012 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 3013 ScheduleData *SD = ScheduleDataMap[I]; 3014 if (!SD) { 3015 // Allocate a new ScheduleData for the instruction. 3016 if (ChunkPos >= ChunkSize) { 3017 ScheduleDataChunks.push_back( 3018 llvm::make_unique<ScheduleData[]>(ChunkSize)); 3019 ChunkPos = 0; 3020 } 3021 SD = &(ScheduleDataChunks.back()[ChunkPos++]); 3022 ScheduleDataMap[I] = SD; 3023 SD->Inst = I; 3024 } 3025 assert(!isInSchedulingRegion(SD) && 3026 "new ScheduleData already in scheduling region"); 3027 SD->init(SchedulingRegionID); 3028 3029 if (I->mayReadOrWriteMemory()) { 3030 // Update the linked list of memory accessing instructions. 3031 if (CurrentLoadStore) { 3032 CurrentLoadStore->NextLoadStore = SD; 3033 } else { 3034 FirstLoadStoreInRegion = SD; 3035 } 3036 CurrentLoadStore = SD; 3037 } 3038 } 3039 if (NextLoadStore) { 3040 if (CurrentLoadStore) 3041 CurrentLoadStore->NextLoadStore = NextLoadStore; 3042 } else { 3043 LastLoadStoreInRegion = CurrentLoadStore; 3044 } 3045 } 3046 3047 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 3048 bool InsertInReadyList, 3049 BoUpSLP *SLP) { 3050 assert(SD->isSchedulingEntity()); 3051 3052 SmallVector<ScheduleData *, 10> WorkList; 3053 WorkList.push_back(SD); 3054 3055 while (!WorkList.empty()) { 3056 ScheduleData *SD = WorkList.back(); 3057 WorkList.pop_back(); 3058 3059 ScheduleData *BundleMember = SD; 3060 while (BundleMember) { 3061 assert(isInSchedulingRegion(BundleMember)); 3062 if (!BundleMember->hasValidDependencies()) { 3063 3064 DEBUG(dbgs() << "SLP: update deps of " << *BundleMember << "\n"); 3065 BundleMember->Dependencies = 0; 3066 BundleMember->resetUnscheduledDeps(); 3067 3068 // Handle def-use chain dependencies. 3069 for (User *U : BundleMember->Inst->users()) { 3070 if (isa<Instruction>(U)) { 3071 ScheduleData *UseSD = getScheduleData(U); 3072 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 3073 BundleMember->Dependencies++; 3074 ScheduleData *DestBundle = UseSD->FirstInBundle; 3075 if (!DestBundle->IsScheduled) { 3076 BundleMember->incrementUnscheduledDeps(1); 3077 } 3078 if (!DestBundle->hasValidDependencies()) { 3079 WorkList.push_back(DestBundle); 3080 } 3081 } 3082 } else { 3083 // I'm not sure if this can ever happen. But we need to be safe. 3084 // This lets the instruction/bundle never be scheduled and 3085 // eventually disable vectorization. 3086 BundleMember->Dependencies++; 3087 BundleMember->incrementUnscheduledDeps(1); 3088 } 3089 } 3090 3091 // Handle the memory dependencies. 3092 ScheduleData *DepDest = BundleMember->NextLoadStore; 3093 if (DepDest) { 3094 Instruction *SrcInst = BundleMember->Inst; 3095 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); 3096 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 3097 unsigned numAliased = 0; 3098 unsigned DistToSrc = 1; 3099 3100 while (DepDest) { 3101 assert(isInSchedulingRegion(DepDest)); 3102 3103 // We have two limits to reduce the complexity: 3104 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 3105 // SLP->isAliased (which is the expensive part in this loop). 3106 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 3107 // the whole loop (even if the loop is fast, it's quadratic). 3108 // It's important for the loop break condition (see below) to 3109 // check this limit even between two read-only instructions. 3110 if (DistToSrc >= MaxMemDepDistance || 3111 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 3112 (numAliased >= AliasedCheckLimit || 3113 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 3114 3115 // We increment the counter only if the locations are aliased 3116 // (instead of counting all alias checks). This gives a better 3117 // balance between reduced runtime and accurate dependencies. 3118 numAliased++; 3119 3120 DepDest->MemoryDependencies.push_back(BundleMember); 3121 BundleMember->Dependencies++; 3122 ScheduleData *DestBundle = DepDest->FirstInBundle; 3123 if (!DestBundle->IsScheduled) { 3124 BundleMember->incrementUnscheduledDeps(1); 3125 } 3126 if (!DestBundle->hasValidDependencies()) { 3127 WorkList.push_back(DestBundle); 3128 } 3129 } 3130 DepDest = DepDest->NextLoadStore; 3131 3132 // Example, explaining the loop break condition: Let's assume our 3133 // starting instruction is i0 and MaxMemDepDistance = 3. 3134 // 3135 // +--------v--v--v 3136 // i0,i1,i2,i3,i4,i5,i6,i7,i8 3137 // +--------^--^--^ 3138 // 3139 // MaxMemDepDistance let us stop alias-checking at i3 and we add 3140 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 3141 // Previously we already added dependencies from i3 to i6,i7,i8 3142 // (because of MaxMemDepDistance). As we added a dependency from 3143 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 3144 // and we can abort this loop at i6. 3145 if (DistToSrc >= 2 * MaxMemDepDistance) 3146 break; 3147 DistToSrc++; 3148 } 3149 } 3150 } 3151 BundleMember = BundleMember->NextInBundle; 3152 } 3153 if (InsertInReadyList && SD->isReady()) { 3154 ReadyInsts.push_back(SD); 3155 DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n"); 3156 } 3157 } 3158 } 3159 3160 void BoUpSLP::BlockScheduling::resetSchedule() { 3161 assert(ScheduleStart && 3162 "tried to reset schedule on block which has not been scheduled"); 3163 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3164 ScheduleData *SD = getScheduleData(I); 3165 assert(isInSchedulingRegion(SD)); 3166 SD->IsScheduled = false; 3167 SD->resetUnscheduledDeps(); 3168 } 3169 ReadyInsts.clear(); 3170 } 3171 3172 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 3173 3174 if (!BS->ScheduleStart) 3175 return; 3176 3177 DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 3178 3179 BS->resetSchedule(); 3180 3181 // For the real scheduling we use a more sophisticated ready-list: it is 3182 // sorted by the original instruction location. This lets the final schedule 3183 // be as close as possible to the original instruction order. 3184 struct ScheduleDataCompare { 3185 bool operator()(ScheduleData *SD1, ScheduleData *SD2) { 3186 return SD2->SchedulingPriority < SD1->SchedulingPriority; 3187 } 3188 }; 3189 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 3190 3191 // Ensure that all dependency data is updated and fill the ready-list with 3192 // initial instructions. 3193 int Idx = 0; 3194 int NumToSchedule = 0; 3195 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 3196 I = I->getNextNode()) { 3197 ScheduleData *SD = BS->getScheduleData(I); 3198 assert( 3199 SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD->Inst) != 0) && 3200 "scheduler and vectorizer have different opinion on what is a bundle"); 3201 SD->FirstInBundle->SchedulingPriority = Idx++; 3202 if (SD->isSchedulingEntity()) { 3203 BS->calculateDependencies(SD, false, this); 3204 NumToSchedule++; 3205 } 3206 } 3207 BS->initialFillReadyList(ReadyInsts); 3208 3209 Instruction *LastScheduledInst = BS->ScheduleEnd; 3210 3211 // Do the "real" scheduling. 3212 while (!ReadyInsts.empty()) { 3213 ScheduleData *picked = *ReadyInsts.begin(); 3214 ReadyInsts.erase(ReadyInsts.begin()); 3215 3216 // Move the scheduled instruction(s) to their dedicated places, if not 3217 // there yet. 3218 ScheduleData *BundleMember = picked; 3219 while (BundleMember) { 3220 Instruction *pickedInst = BundleMember->Inst; 3221 if (LastScheduledInst->getNextNode() != pickedInst) { 3222 BS->BB->getInstList().remove(pickedInst); 3223 BS->BB->getInstList().insert(LastScheduledInst->getIterator(), 3224 pickedInst); 3225 } 3226 LastScheduledInst = pickedInst; 3227 BundleMember = BundleMember->NextInBundle; 3228 } 3229 3230 BS->schedule(picked, ReadyInsts); 3231 NumToSchedule--; 3232 } 3233 assert(NumToSchedule == 0 && "could not schedule all instructions"); 3234 3235 // Avoid duplicate scheduling of the block. 3236 BS->ScheduleStart = nullptr; 3237 } 3238 3239 unsigned BoUpSLP::getVectorElementSize(Value *V) { 3240 // If V is a store, just return the width of the stored value without 3241 // traversing the expression tree. This is the common case. 3242 if (auto *Store = dyn_cast<StoreInst>(V)) 3243 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 3244 3245 // If V is not a store, we can traverse the expression tree to find loads 3246 // that feed it. The type of the loaded value may indicate a more suitable 3247 // width than V's type. We want to base the vector element size on the width 3248 // of memory operations where possible. 3249 SmallVector<Instruction *, 16> Worklist; 3250 SmallPtrSet<Instruction *, 16> Visited; 3251 if (auto *I = dyn_cast<Instruction>(V)) 3252 Worklist.push_back(I); 3253 3254 // Traverse the expression tree in bottom-up order looking for loads. If we 3255 // encounter an instruciton we don't yet handle, we give up. 3256 auto MaxWidth = 0u; 3257 auto FoundUnknownInst = false; 3258 while (!Worklist.empty() && !FoundUnknownInst) { 3259 auto *I = Worklist.pop_back_val(); 3260 Visited.insert(I); 3261 3262 // We should only be looking at scalar instructions here. If the current 3263 // instruction has a vector type, give up. 3264 auto *Ty = I->getType(); 3265 if (isa<VectorType>(Ty)) 3266 FoundUnknownInst = true; 3267 3268 // If the current instruction is a load, update MaxWidth to reflect the 3269 // width of the loaded value. 3270 else if (isa<LoadInst>(I)) 3271 MaxWidth = std::max<unsigned>(MaxWidth, DL->getTypeSizeInBits(Ty)); 3272 3273 // Otherwise, we need to visit the operands of the instruction. We only 3274 // handle the interesting cases from buildTree here. If an operand is an 3275 // instruction we haven't yet visited, we add it to the worklist. 3276 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 3277 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I)) { 3278 for (Use &U : I->operands()) 3279 if (auto *J = dyn_cast<Instruction>(U.get())) 3280 if (!Visited.count(J)) 3281 Worklist.push_back(J); 3282 } 3283 3284 // If we don't yet handle the instruction, give up. 3285 else 3286 FoundUnknownInst = true; 3287 } 3288 3289 // If we didn't encounter a memory access in the expression tree, or if we 3290 // gave up for some reason, just return the width of V. 3291 if (!MaxWidth || FoundUnknownInst) 3292 return DL->getTypeSizeInBits(V->getType()); 3293 3294 // Otherwise, return the maximum width we found. 3295 return MaxWidth; 3296 } 3297 3298 // Determine if a value V in a vectorizable expression Expr can be demoted to a 3299 // smaller type with a truncation. We collect the values that will be demoted 3300 // in ToDemote and additional roots that require investigating in Roots. 3301 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, 3302 SmallVectorImpl<Value *> &ToDemote, 3303 SmallVectorImpl<Value *> &Roots) { 3304 3305 // We can always demote constants. 3306 if (isa<Constant>(V)) { 3307 ToDemote.push_back(V); 3308 return true; 3309 } 3310 3311 // If the value is not an instruction in the expression with only one use, it 3312 // cannot be demoted. 3313 auto *I = dyn_cast<Instruction>(V); 3314 if (!I || !I->hasOneUse() || !Expr.count(I)) 3315 return false; 3316 3317 switch (I->getOpcode()) { 3318 3319 // We can always demote truncations and extensions. Since truncations can 3320 // seed additional demotion, we save the truncated value. 3321 case Instruction::Trunc: 3322 Roots.push_back(I->getOperand(0)); 3323 case Instruction::ZExt: 3324 case Instruction::SExt: 3325 break; 3326 3327 // We can demote certain binary operations if we can demote both of their 3328 // operands. 3329 case Instruction::Add: 3330 case Instruction::Sub: 3331 case Instruction::Mul: 3332 case Instruction::And: 3333 case Instruction::Or: 3334 case Instruction::Xor: 3335 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || 3336 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) 3337 return false; 3338 break; 3339 3340 // We can demote selects if we can demote their true and false values. 3341 case Instruction::Select: { 3342 SelectInst *SI = cast<SelectInst>(I); 3343 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || 3344 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) 3345 return false; 3346 break; 3347 } 3348 3349 // We can demote phis if we can demote all their incoming operands. Note that 3350 // we don't need to worry about cycles since we ensure single use above. 3351 case Instruction::PHI: { 3352 PHINode *PN = cast<PHINode>(I); 3353 for (Value *IncValue : PN->incoming_values()) 3354 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) 3355 return false; 3356 break; 3357 } 3358 3359 // Otherwise, conservatively give up. 3360 default: 3361 return false; 3362 } 3363 3364 // Record the value that we can demote. 3365 ToDemote.push_back(V); 3366 return true; 3367 } 3368 3369 void BoUpSLP::computeMinimumValueSizes() { 3370 // If there are no external uses, the expression tree must be rooted by a 3371 // store. We can't demote in-memory values, so there is nothing to do here. 3372 if (ExternalUses.empty()) 3373 return; 3374 3375 // We only attempt to truncate integer expressions. 3376 auto &TreeRoot = VectorizableTree[0].Scalars; 3377 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 3378 if (!TreeRootIT) 3379 return; 3380 3381 // If the expression is not rooted by a store, these roots should have 3382 // external uses. We will rely on InstCombine to rewrite the expression in 3383 // the narrower type. However, InstCombine only rewrites single-use values. 3384 // This means that if a tree entry other than a root is used externally, it 3385 // must have multiple uses and InstCombine will not rewrite it. The code 3386 // below ensures that only the roots are used externally. 3387 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); 3388 for (auto &EU : ExternalUses) 3389 if (!Expr.erase(EU.Scalar)) 3390 return; 3391 if (!Expr.empty()) 3392 return; 3393 3394 // Collect the scalar values of the vectorizable expression. We will use this 3395 // context to determine which values can be demoted. If we see a truncation, 3396 // we mark it as seeding another demotion. 3397 for (auto &Entry : VectorizableTree) 3398 Expr.insert(Entry.Scalars.begin(), Entry.Scalars.end()); 3399 3400 // Ensure the roots of the vectorizable tree don't form a cycle. They must 3401 // have a single external user that is not in the vectorizable tree. 3402 for (auto *Root : TreeRoot) 3403 if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) 3404 return; 3405 3406 // Conservatively determine if we can actually truncate the roots of the 3407 // expression. Collect the values that can be demoted in ToDemote and 3408 // additional roots that require investigating in Roots. 3409 SmallVector<Value *, 32> ToDemote; 3410 SmallVector<Value *, 4> Roots; 3411 for (auto *Root : TreeRoot) 3412 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) 3413 return; 3414 3415 // The maximum bit width required to represent all the values that can be 3416 // demoted without loss of precision. It would be safe to truncate the roots 3417 // of the expression to this width. 3418 auto MaxBitWidth = 8u; 3419 3420 // We first check if all the bits of the roots are demanded. If they're not, 3421 // we can truncate the roots to this narrower type. 3422 for (auto *Root : TreeRoot) { 3423 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 3424 MaxBitWidth = std::max<unsigned>( 3425 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); 3426 } 3427 3428 // If all the bits of the roots are demanded, we can try a little harder to 3429 // compute a narrower type. This can happen, for example, if the roots are 3430 // getelementptr indices. InstCombine promotes these indices to the pointer 3431 // width. Thus, all their bits are technically demanded even though the 3432 // address computation might be vectorized in a smaller type. 3433 // 3434 // We start by looking at each entry that can be demoted. We compute the 3435 // maximum bit width required to store the scalar by using ValueTracking to 3436 // compute the number of high-order bits we can truncate. 3437 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType())) { 3438 MaxBitWidth = 8u; 3439 for (auto *Scalar : ToDemote) { 3440 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, 0, DT); 3441 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 3442 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 3443 } 3444 } 3445 3446 // Round MaxBitWidth up to the next power-of-two. 3447 if (!isPowerOf2_64(MaxBitWidth)) 3448 MaxBitWidth = NextPowerOf2(MaxBitWidth); 3449 3450 // If the maximum bit width we compute is less than the with of the roots' 3451 // type, we can proceed with the narrowing. Otherwise, do nothing. 3452 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 3453 return; 3454 3455 // If we can truncate the root, we must collect additional values that might 3456 // be demoted as a result. That is, those seeded by truncations we will 3457 // modify. 3458 while (!Roots.empty()) 3459 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); 3460 3461 // Finally, map the values we can demote to the maximum bit with we computed. 3462 for (auto *Scalar : ToDemote) 3463 MinBWs[Scalar] = MaxBitWidth; 3464 } 3465 3466 /// The SLPVectorizer Pass. 3467 struct SLPVectorizer : public FunctionPass { 3468 typedef SmallVector<StoreInst *, 8> StoreList; 3469 typedef MapVector<Value *, StoreList> StoreListMap; 3470 typedef SmallVector<WeakVH, 8> WeakVHList; 3471 typedef MapVector<Value *, WeakVHList> WeakVHListMap; 3472 3473 /// Pass identification, replacement for typeid 3474 static char ID; 3475 3476 explicit SLPVectorizer() : FunctionPass(ID) { 3477 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 3478 } 3479 3480 ScalarEvolution *SE; 3481 TargetTransformInfo *TTI; 3482 TargetLibraryInfo *TLI; 3483 AliasAnalysis *AA; 3484 LoopInfo *LI; 3485 DominatorTree *DT; 3486 AssumptionCache *AC; 3487 DemandedBits *DB; 3488 const DataLayout *DL; 3489 3490 bool doInitialization(Module &M) override { 3491 DL = &M.getDataLayout(); 3492 return false; 3493 } 3494 3495 bool runOnFunction(Function &F) override { 3496 if (skipFunction(F)) 3497 return false; 3498 3499 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 3500 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 3501 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 3502 TLI = TLIP ? &TLIP->getTLI() : nullptr; 3503 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 3504 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 3505 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 3506 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 3507 DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 3508 3509 Stores.clear(); 3510 GEPs.clear(); 3511 bool Changed = false; 3512 3513 // If the target claims to have no vector registers don't attempt 3514 // vectorization. 3515 if (!TTI->getNumberOfRegisters(true)) 3516 return false; 3517 3518 // Don't vectorize when the attribute NoImplicitFloat is used. 3519 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 3520 return false; 3521 3522 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 3523 3524 // Use the bottom up slp vectorizer to construct chains that start with 3525 // store instructions. 3526 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL); 3527 3528 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 3529 // delete instructions. 3530 3531 // Scan the blocks in the function in post order. 3532 for (auto BB : post_order(&F.getEntryBlock())) { 3533 collectSeedInstructions(BB); 3534 3535 // Vectorize trees that end at stores. 3536 if (!Stores.empty()) { 3537 DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 3538 << " underlying objects.\n"); 3539 Changed |= vectorizeStoreChains(R); 3540 } 3541 3542 // Vectorize trees that end at reductions. 3543 Changed |= vectorizeChainsInBlock(BB, R); 3544 3545 // Vectorize the index computations of getelementptr instructions. This 3546 // is primarily intended to catch gather-like idioms ending at 3547 // non-consecutive loads. 3548 if (!GEPs.empty()) { 3549 DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 3550 << " underlying objects.\n"); 3551 Changed |= vectorizeGEPIndices(BB, R); 3552 } 3553 } 3554 3555 if (Changed) { 3556 R.optimizeGatherSequence(); 3557 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 3558 DEBUG(verifyFunction(F)); 3559 } 3560 return Changed; 3561 } 3562 3563 void getAnalysisUsage(AnalysisUsage &AU) const override { 3564 FunctionPass::getAnalysisUsage(AU); 3565 AU.addRequired<AssumptionCacheTracker>(); 3566 AU.addRequired<ScalarEvolutionWrapperPass>(); 3567 AU.addRequired<AAResultsWrapperPass>(); 3568 AU.addRequired<TargetTransformInfoWrapperPass>(); 3569 AU.addRequired<LoopInfoWrapperPass>(); 3570 AU.addRequired<DominatorTreeWrapperPass>(); 3571 AU.addRequired<DemandedBitsWrapperPass>(); 3572 AU.addPreserved<LoopInfoWrapperPass>(); 3573 AU.addPreserved<DominatorTreeWrapperPass>(); 3574 AU.addPreserved<AAResultsWrapperPass>(); 3575 AU.addPreserved<GlobalsAAWrapperPass>(); 3576 AU.setPreservesCFG(); 3577 } 3578 3579 private: 3580 /// \brief Collect store and getelementptr instructions and organize them 3581 /// according to the underlying object of their pointer operands. We sort the 3582 /// instructions by their underlying objects to reduce the cost of 3583 /// consecutive access queries. 3584 /// 3585 /// TODO: We can further reduce this cost if we flush the chain creation 3586 /// every time we run into a memory barrier. 3587 void collectSeedInstructions(BasicBlock *BB); 3588 3589 /// \brief Try to vectorize a chain that starts at two arithmetic instrs. 3590 bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R); 3591 3592 /// \brief Try to vectorize a list of operands. 3593 /// \@param BuildVector A list of users to ignore for the purpose of 3594 /// scheduling and that don't need extracting. 3595 /// \returns true if a value was vectorized. 3596 bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 3597 ArrayRef<Value *> BuildVector = None, 3598 bool allowReorder = false); 3599 3600 /// \brief Try to vectorize a chain that may start at the operands of \V; 3601 bool tryToVectorize(BinaryOperator *V, BoUpSLP &R); 3602 3603 /// \brief Vectorize the store instructions collected in Stores. 3604 bool vectorizeStoreChains(BoUpSLP &R); 3605 3606 /// \brief Vectorize the index computations of the getelementptr instructions 3607 /// collected in GEPs. 3608 bool vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R); 3609 3610 /// \brief Scan the basic block and look for patterns that are likely to start 3611 /// a vectorization chain. 3612 bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R); 3613 3614 bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold, 3615 BoUpSLP &R, unsigned VecRegSize); 3616 3617 bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold, 3618 BoUpSLP &R); 3619 3620 /// The store instructions in a basic block organized by base pointer. 3621 StoreListMap Stores; 3622 3623 /// The getelementptr instructions in a basic block organized by base pointer. 3624 WeakVHListMap GEPs; 3625 }; 3626 3627 /// \brief Check that the Values in the slice in VL array are still existent in 3628 /// the WeakVH array. 3629 /// Vectorization of part of the VL array may cause later values in the VL array 3630 /// to become invalid. We track when this has happened in the WeakVH array. 3631 static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, ArrayRef<WeakVH> VH, 3632 unsigned SliceBegin, unsigned SliceSize) { 3633 VL = VL.slice(SliceBegin, SliceSize); 3634 VH = VH.slice(SliceBegin, SliceSize); 3635 return !std::equal(VL.begin(), VL.end(), VH.begin()); 3636 } 3637 3638 bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain, 3639 int CostThreshold, BoUpSLP &R, 3640 unsigned VecRegSize) { 3641 unsigned ChainLen = Chain.size(); 3642 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 3643 << "\n"); 3644 unsigned Sz = R.getVectorElementSize(Chain[0]); 3645 unsigned VF = VecRegSize / Sz; 3646 3647 if (!isPowerOf2_32(Sz) || VF < 2) 3648 return false; 3649 3650 // Keep track of values that were deleted by vectorizing in the loop below. 3651 SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end()); 3652 3653 bool Changed = false; 3654 // Look for profitable vectorizable trees at all offsets, starting at zero. 3655 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 3656 if (i + VF > e) 3657 break; 3658 3659 // Check that a previous iteration of this loop did not delete the Value. 3660 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 3661 continue; 3662 3663 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 3664 << "\n"); 3665 ArrayRef<Value *> Operands = Chain.slice(i, VF); 3666 3667 R.buildTree(Operands); 3668 R.computeMinimumValueSizes(); 3669 3670 int Cost = R.getTreeCost(); 3671 3672 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 3673 if (Cost < CostThreshold) { 3674 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 3675 R.vectorizeTree(); 3676 3677 // Move to the next bundle. 3678 i += VF - 1; 3679 Changed = true; 3680 } 3681 } 3682 3683 return Changed; 3684 } 3685 3686 bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores, 3687 int costThreshold, BoUpSLP &R) { 3688 SetVector<StoreInst *> Heads, Tails; 3689 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; 3690 3691 // We may run into multiple chains that merge into a single chain. We mark the 3692 // stores that we vectorized so that we don't visit the same store twice. 3693 BoUpSLP::ValueSet VectorizedStores; 3694 bool Changed = false; 3695 3696 // Do a quadratic search on all of the given stores and find 3697 // all of the pairs of stores that follow each other. 3698 SmallVector<unsigned, 16> IndexQueue; 3699 for (unsigned i = 0, e = Stores.size(); i < e; ++i) { 3700 IndexQueue.clear(); 3701 // If a store has multiple consecutive store candidates, search Stores 3702 // array according to the sequence: from i+1 to e, then from i-1 to 0. 3703 // This is because usually pairing with immediate succeeding or preceding 3704 // candidate create the best chance to find slp vectorization opportunity. 3705 unsigned j = 0; 3706 for (j = i + 1; j < e; ++j) 3707 IndexQueue.push_back(j); 3708 for (j = i; j > 0; --j) 3709 IndexQueue.push_back(j - 1); 3710 3711 for (auto &k : IndexQueue) { 3712 if (isConsecutiveAccess(Stores[i], Stores[k], *DL, *SE)) { 3713 Tails.insert(Stores[k]); 3714 Heads.insert(Stores[i]); 3715 ConsecutiveChain[Stores[i]] = Stores[k]; 3716 break; 3717 } 3718 } 3719 } 3720 3721 // For stores that start but don't end a link in the chain: 3722 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end(); 3723 it != e; ++it) { 3724 if (Tails.count(*it)) 3725 continue; 3726 3727 // We found a store instr that starts a chain. Now follow the chain and try 3728 // to vectorize it. 3729 BoUpSLP::ValueList Operands; 3730 StoreInst *I = *it; 3731 // Collect the chain into a list. 3732 while (Tails.count(I) || Heads.count(I)) { 3733 if (VectorizedStores.count(I)) 3734 break; 3735 Operands.push_back(I); 3736 // Move to the next value in the chain. 3737 I = ConsecutiveChain[I]; 3738 } 3739 3740 // FIXME: Is division-by-2 the correct step? Should we assert that the 3741 // register size is a power-of-2? 3742 for (unsigned Size = R.getMaxVecRegSize(); Size >= R.getMinVecRegSize(); Size /= 2) { 3743 if (vectorizeStoreChain(Operands, costThreshold, R, Size)) { 3744 // Mark the vectorized stores so that we don't vectorize them again. 3745 VectorizedStores.insert(Operands.begin(), Operands.end()); 3746 Changed = true; 3747 break; 3748 } 3749 } 3750 } 3751 3752 return Changed; 3753 } 3754 3755 void SLPVectorizer::collectSeedInstructions(BasicBlock *BB) { 3756 3757 // Initialize the collections. We will make a single pass over the block. 3758 Stores.clear(); 3759 GEPs.clear(); 3760 3761 // Visit the store and getelementptr instructions in BB and organize them in 3762 // Stores and GEPs according to the underlying objects of their pointer 3763 // operands. 3764 for (Instruction &I : *BB) { 3765 3766 // Ignore store instructions that are volatile or have a pointer operand 3767 // that doesn't point to a scalar type. 3768 if (auto *SI = dyn_cast<StoreInst>(&I)) { 3769 if (!SI->isSimple()) 3770 continue; 3771 if (!isValidElementType(SI->getValueOperand()->getType())) 3772 continue; 3773 Stores[GetUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI); 3774 } 3775 3776 // Ignore getelementptr instructions that have more than one index, a 3777 // constant index, or a pointer operand that doesn't point to a scalar 3778 // type. 3779 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 3780 auto Idx = GEP->idx_begin()->get(); 3781 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) 3782 continue; 3783 if (!isValidElementType(Idx->getType())) 3784 continue; 3785 GEPs[GetUnderlyingObject(GEP->getPointerOperand(), *DL)].push_back(GEP); 3786 } 3787 } 3788 } 3789 3790 bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 3791 if (!A || !B) 3792 return false; 3793 Value *VL[] = { A, B }; 3794 return tryToVectorizeList(VL, R, None, true); 3795 } 3796 3797 bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 3798 ArrayRef<Value *> BuildVector, 3799 bool allowReorder) { 3800 if (VL.size() < 2) 3801 return false; 3802 3803 DEBUG(dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n"); 3804 3805 // Check that all of the parts are scalar instructions of the same type. 3806 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 3807 if (!I0) 3808 return false; 3809 3810 unsigned Opcode0 = I0->getOpcode(); 3811 3812 // FIXME: Register size should be a parameter to this function, so we can 3813 // try different vectorization factors. 3814 unsigned Sz = R.getVectorElementSize(I0); 3815 unsigned VF = R.getMinVecRegSize() / Sz; 3816 3817 for (Value *V : VL) { 3818 Type *Ty = V->getType(); 3819 if (!isValidElementType(Ty)) 3820 return false; 3821 Instruction *Inst = dyn_cast<Instruction>(V); 3822 if (!Inst || Inst->getOpcode() != Opcode0) 3823 return false; 3824 } 3825 3826 bool Changed = false; 3827 3828 // Keep track of values that were deleted by vectorizing in the loop below. 3829 SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end()); 3830 3831 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 3832 unsigned OpsWidth = 0; 3833 3834 if (i + VF > e) 3835 OpsWidth = e - i; 3836 else 3837 OpsWidth = VF; 3838 3839 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 3840 break; 3841 3842 // Check that a previous iteration of this loop did not delete the Value. 3843 if (hasValueBeenRAUWed(VL, TrackValues, i, OpsWidth)) 3844 continue; 3845 3846 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 3847 << "\n"); 3848 ArrayRef<Value *> Ops = VL.slice(i, OpsWidth); 3849 3850 ArrayRef<Value *> BuildVectorSlice; 3851 if (!BuildVector.empty()) 3852 BuildVectorSlice = BuildVector.slice(i, OpsWidth); 3853 3854 R.buildTree(Ops, BuildVectorSlice); 3855 // TODO: check if we can allow reordering also for other cases than 3856 // tryToVectorizePair() 3857 if (allowReorder && R.shouldReorder()) { 3858 assert(Ops.size() == 2); 3859 assert(BuildVectorSlice.empty()); 3860 Value *ReorderedOps[] = { Ops[1], Ops[0] }; 3861 R.buildTree(ReorderedOps, None); 3862 } 3863 R.computeMinimumValueSizes(); 3864 int Cost = R.getTreeCost(); 3865 3866 if (Cost < -SLPCostThreshold) { 3867 DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 3868 Value *VectorizedRoot = R.vectorizeTree(); 3869 3870 // Reconstruct the build vector by extracting the vectorized root. This 3871 // way we handle the case where some elements of the vector are undefined. 3872 // (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2)) 3873 if (!BuildVectorSlice.empty()) { 3874 // The insert point is the last build vector instruction. The vectorized 3875 // root will precede it. This guarantees that we get an instruction. The 3876 // vectorized tree could have been constant folded. 3877 Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back()); 3878 unsigned VecIdx = 0; 3879 for (auto &V : BuildVectorSlice) { 3880 IRBuilder<NoFolder> Builder(InsertAfter->getParent(), 3881 ++BasicBlock::iterator(InsertAfter)); 3882 Instruction *I = cast<Instruction>(V); 3883 assert(isa<InsertElementInst>(I) || isa<InsertValueInst>(I)); 3884 Instruction *Extract = cast<Instruction>(Builder.CreateExtractElement( 3885 VectorizedRoot, Builder.getInt32(VecIdx++))); 3886 I->setOperand(1, Extract); 3887 I->removeFromParent(); 3888 I->insertAfter(Extract); 3889 InsertAfter = I; 3890 } 3891 } 3892 // Move to the next bundle. 3893 i += VF - 1; 3894 Changed = true; 3895 } 3896 } 3897 3898 return Changed; 3899 } 3900 3901 bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) { 3902 if (!V) 3903 return false; 3904 3905 // Try to vectorize V. 3906 if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R)) 3907 return true; 3908 3909 BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0)); 3910 BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1)); 3911 // Try to skip B. 3912 if (B && B->hasOneUse()) { 3913 BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 3914 BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 3915 if (tryToVectorizePair(A, B0, R)) { 3916 return true; 3917 } 3918 if (tryToVectorizePair(A, B1, R)) { 3919 return true; 3920 } 3921 } 3922 3923 // Try to skip A. 3924 if (A && A->hasOneUse()) { 3925 BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 3926 BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 3927 if (tryToVectorizePair(A0, B, R)) { 3928 return true; 3929 } 3930 if (tryToVectorizePair(A1, B, R)) { 3931 return true; 3932 } 3933 } 3934 return 0; 3935 } 3936 3937 /// \brief Generate a shuffle mask to be used in a reduction tree. 3938 /// 3939 /// \param VecLen The length of the vector to be reduced. 3940 /// \param NumEltsToRdx The number of elements that should be reduced in the 3941 /// vector. 3942 /// \param IsPairwise Whether the reduction is a pairwise or splitting 3943 /// reduction. A pairwise reduction will generate a mask of 3944 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 3945 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 3946 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 3947 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 3948 bool IsPairwise, bool IsLeft, 3949 IRBuilder<> &Builder) { 3950 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 3951 3952 SmallVector<Constant *, 32> ShuffleMask( 3953 VecLen, UndefValue::get(Builder.getInt32Ty())); 3954 3955 if (IsPairwise) 3956 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 3957 for (unsigned i = 0; i != NumEltsToRdx; ++i) 3958 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 3959 else 3960 // Move the upper half of the vector to the lower half. 3961 for (unsigned i = 0; i != NumEltsToRdx; ++i) 3962 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 3963 3964 return ConstantVector::get(ShuffleMask); 3965 } 3966 3967 3968 /// Model horizontal reductions. 3969 /// 3970 /// A horizontal reduction is a tree of reduction operations (currently add and 3971 /// fadd) that has operations that can be put into a vector as its leaf. 3972 /// For example, this tree: 3973 /// 3974 /// mul mul mul mul 3975 /// \ / \ / 3976 /// + + 3977 /// \ / 3978 /// + 3979 /// This tree has "mul" as its reduced values and "+" as its reduction 3980 /// operations. A reduction might be feeding into a store or a binary operation 3981 /// feeding a phi. 3982 /// ... 3983 /// \ / 3984 /// + 3985 /// | 3986 /// phi += 3987 /// 3988 /// Or: 3989 /// ... 3990 /// \ / 3991 /// + 3992 /// | 3993 /// *p = 3994 /// 3995 class HorizontalReduction { 3996 SmallVector<Value *, 16> ReductionOps; 3997 SmallVector<Value *, 32> ReducedVals; 3998 3999 BinaryOperator *ReductionRoot; 4000 PHINode *ReductionPHI; 4001 4002 /// The opcode of the reduction. 4003 unsigned ReductionOpcode; 4004 /// The opcode of the values we perform a reduction on. 4005 unsigned ReducedValueOpcode; 4006 /// Should we model this reduction as a pairwise reduction tree or a tree that 4007 /// splits the vector in halves and adds those halves. 4008 bool IsPairwiseReduction; 4009 4010 public: 4011 /// The width of one full horizontal reduction operation. 4012 unsigned ReduxWidth; 4013 4014 /// Minimal width of available vector registers. It's used to determine 4015 /// ReduxWidth. 4016 unsigned MinVecRegSize; 4017 4018 HorizontalReduction(unsigned MinVecRegSize) 4019 : ReductionRoot(nullptr), ReductionPHI(nullptr), ReductionOpcode(0), 4020 ReducedValueOpcode(0), IsPairwiseReduction(false), ReduxWidth(0), 4021 MinVecRegSize(MinVecRegSize) {} 4022 4023 /// \brief Try to find a reduction tree. 4024 bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B) { 4025 assert((!Phi || 4026 std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) && 4027 "Thi phi needs to use the binary operator"); 4028 4029 // We could have a initial reductions that is not an add. 4030 // r *= v1 + v2 + v3 + v4 4031 // In such a case start looking for a tree rooted in the first '+'. 4032 if (Phi) { 4033 if (B->getOperand(0) == Phi) { 4034 Phi = nullptr; 4035 B = dyn_cast<BinaryOperator>(B->getOperand(1)); 4036 } else if (B->getOperand(1) == Phi) { 4037 Phi = nullptr; 4038 B = dyn_cast<BinaryOperator>(B->getOperand(0)); 4039 } 4040 } 4041 4042 if (!B) 4043 return false; 4044 4045 Type *Ty = B->getType(); 4046 if (!isValidElementType(Ty)) 4047 return false; 4048 4049 const DataLayout &DL = B->getModule()->getDataLayout(); 4050 ReductionOpcode = B->getOpcode(); 4051 ReducedValueOpcode = 0; 4052 // FIXME: Register size should be a parameter to this function, so we can 4053 // try different vectorization factors. 4054 ReduxWidth = MinVecRegSize / DL.getTypeSizeInBits(Ty); 4055 ReductionRoot = B; 4056 ReductionPHI = Phi; 4057 4058 if (ReduxWidth < 4) 4059 return false; 4060 4061 // We currently only support adds. 4062 if (ReductionOpcode != Instruction::Add && 4063 ReductionOpcode != Instruction::FAdd) 4064 return false; 4065 4066 // Post order traverse the reduction tree starting at B. We only handle true 4067 // trees containing only binary operators or selects. 4068 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; 4069 Stack.push_back(std::make_pair(B, 0)); 4070 while (!Stack.empty()) { 4071 Instruction *TreeN = Stack.back().first; 4072 unsigned EdgeToVist = Stack.back().second++; 4073 bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode; 4074 4075 // Only handle trees in the current basic block. 4076 if (TreeN->getParent() != B->getParent()) 4077 return false; 4078 4079 // Each tree node needs to have one user except for the ultimate 4080 // reduction. 4081 if (!TreeN->hasOneUse() && TreeN != B) 4082 return false; 4083 4084 // Postorder vist. 4085 if (EdgeToVist == 2 || IsReducedValue) { 4086 if (IsReducedValue) { 4087 // Make sure that the opcodes of the operations that we are going to 4088 // reduce match. 4089 if (!ReducedValueOpcode) 4090 ReducedValueOpcode = TreeN->getOpcode(); 4091 else if (ReducedValueOpcode != TreeN->getOpcode()) 4092 return false; 4093 ReducedVals.push_back(TreeN); 4094 } else { 4095 // We need to be able to reassociate the adds. 4096 if (!TreeN->isAssociative()) 4097 return false; 4098 ReductionOps.push_back(TreeN); 4099 } 4100 // Retract. 4101 Stack.pop_back(); 4102 continue; 4103 } 4104 4105 // Visit left or right. 4106 Value *NextV = TreeN->getOperand(EdgeToVist); 4107 // We currently only allow BinaryOperator's and SelectInst's as reduction 4108 // values in our tree. 4109 if (isa<BinaryOperator>(NextV) || isa<SelectInst>(NextV)) 4110 Stack.push_back(std::make_pair(cast<Instruction>(NextV), 0)); 4111 else if (NextV != Phi) 4112 return false; 4113 } 4114 return true; 4115 } 4116 4117 /// \brief Attempt to vectorize the tree found by 4118 /// matchAssociativeReduction. 4119 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 4120 if (ReducedVals.empty()) 4121 return false; 4122 4123 unsigned NumReducedVals = ReducedVals.size(); 4124 if (NumReducedVals < ReduxWidth) 4125 return false; 4126 4127 Value *VectorizedTree = nullptr; 4128 IRBuilder<> Builder(ReductionRoot); 4129 FastMathFlags Unsafe; 4130 Unsafe.setUnsafeAlgebra(); 4131 Builder.setFastMathFlags(Unsafe); 4132 unsigned i = 0; 4133 4134 for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) { 4135 V.buildTree(makeArrayRef(&ReducedVals[i], ReduxWidth), ReductionOps); 4136 V.computeMinimumValueSizes(); 4137 4138 // Estimate cost. 4139 int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]); 4140 if (Cost >= -SLPCostThreshold) 4141 break; 4142 4143 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost 4144 << ". (HorRdx)\n"); 4145 4146 // Vectorize a tree. 4147 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 4148 Value *VectorizedRoot = V.vectorizeTree(); 4149 4150 // Emit a reduction. 4151 Value *ReducedSubTree = emitReduction(VectorizedRoot, Builder); 4152 if (VectorizedTree) { 4153 Builder.SetCurrentDebugLocation(Loc); 4154 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 4155 ReducedSubTree, "bin.rdx"); 4156 } else 4157 VectorizedTree = ReducedSubTree; 4158 } 4159 4160 if (VectorizedTree) { 4161 // Finish the reduction. 4162 for (; i < NumReducedVals; ++i) { 4163 Builder.SetCurrentDebugLocation( 4164 cast<Instruction>(ReducedVals[i])->getDebugLoc()); 4165 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree, 4166 ReducedVals[i]); 4167 } 4168 // Update users. 4169 if (ReductionPHI) { 4170 assert(ReductionRoot && "Need a reduction operation"); 4171 ReductionRoot->setOperand(0, VectorizedTree); 4172 ReductionRoot->setOperand(1, ReductionPHI); 4173 } else 4174 ReductionRoot->replaceAllUsesWith(VectorizedTree); 4175 } 4176 return VectorizedTree != nullptr; 4177 } 4178 4179 unsigned numReductionValues() const { 4180 return ReducedVals.size(); 4181 } 4182 4183 private: 4184 /// \brief Calculate the cost of a reduction. 4185 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) { 4186 Type *ScalarTy = FirstReducedVal->getType(); 4187 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 4188 4189 int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true); 4190 int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false); 4191 4192 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 4193 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 4194 4195 int ScalarReduxCost = 4196 ReduxWidth * TTI->getArithmeticInstrCost(ReductionOpcode, VecTy); 4197 4198 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 4199 << " for reduction that starts with " << *FirstReducedVal 4200 << " (It is a " 4201 << (IsPairwiseReduction ? "pairwise" : "splitting") 4202 << " reduction)\n"); 4203 4204 return VecReduxCost - ScalarReduxCost; 4205 } 4206 4207 static Value *createBinOp(IRBuilder<> &Builder, unsigned Opcode, Value *L, 4208 Value *R, const Twine &Name = "") { 4209 if (Opcode == Instruction::FAdd) 4210 return Builder.CreateFAdd(L, R, Name); 4211 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, L, R, Name); 4212 } 4213 4214 /// \brief Emit a horizontal reduction of the vectorized value. 4215 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder) { 4216 assert(VectorizedValue && "Need to have a vectorized tree node"); 4217 assert(isPowerOf2_32(ReduxWidth) && 4218 "We only handle power-of-two reductions for now"); 4219 4220 Value *TmpVec = VectorizedValue; 4221 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 4222 if (IsPairwiseReduction) { 4223 Value *LeftMask = 4224 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 4225 Value *RightMask = 4226 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 4227 4228 Value *LeftShuf = Builder.CreateShuffleVector( 4229 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 4230 Value *RightShuf = Builder.CreateShuffleVector( 4231 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 4232 "rdx.shuf.r"); 4233 TmpVec = createBinOp(Builder, ReductionOpcode, LeftShuf, RightShuf, 4234 "bin.rdx"); 4235 } else { 4236 Value *UpperHalf = 4237 createRdxShuffleMask(ReduxWidth, i, false, false, Builder); 4238 Value *Shuf = Builder.CreateShuffleVector( 4239 TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf"); 4240 TmpVec = createBinOp(Builder, ReductionOpcode, TmpVec, Shuf, "bin.rdx"); 4241 } 4242 } 4243 4244 // The result is in the first element of the vector. 4245 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 4246 } 4247 }; 4248 4249 /// \brief Recognize construction of vectors like 4250 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 4251 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 4252 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 4253 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 4254 /// 4255 /// Returns true if it matches 4256 /// 4257 static bool findBuildVector(InsertElementInst *FirstInsertElem, 4258 SmallVectorImpl<Value *> &BuildVector, 4259 SmallVectorImpl<Value *> &BuildVectorOpds) { 4260 if (!isa<UndefValue>(FirstInsertElem->getOperand(0))) 4261 return false; 4262 4263 InsertElementInst *IE = FirstInsertElem; 4264 while (true) { 4265 BuildVector.push_back(IE); 4266 BuildVectorOpds.push_back(IE->getOperand(1)); 4267 4268 if (IE->use_empty()) 4269 return false; 4270 4271 InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back()); 4272 if (!NextUse) 4273 return true; 4274 4275 // If this isn't the final use, make sure the next insertelement is the only 4276 // use. It's OK if the final constructed vector is used multiple times 4277 if (!IE->hasOneUse()) 4278 return false; 4279 4280 IE = NextUse; 4281 } 4282 4283 return false; 4284 } 4285 4286 /// \brief Like findBuildVector, but looks backwards for construction of aggregate. 4287 /// 4288 /// \return true if it matches. 4289 static bool findBuildAggregate(InsertValueInst *IV, 4290 SmallVectorImpl<Value *> &BuildVector, 4291 SmallVectorImpl<Value *> &BuildVectorOpds) { 4292 if (!IV->hasOneUse()) 4293 return false; 4294 Value *V = IV->getAggregateOperand(); 4295 if (!isa<UndefValue>(V)) { 4296 InsertValueInst *I = dyn_cast<InsertValueInst>(V); 4297 if (!I || !findBuildAggregate(I, BuildVector, BuildVectorOpds)) 4298 return false; 4299 } 4300 BuildVector.push_back(IV); 4301 BuildVectorOpds.push_back(IV->getInsertedValueOperand()); 4302 return true; 4303 } 4304 4305 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 4306 return V->getType() < V2->getType(); 4307 } 4308 4309 /// \brief Try and get a reduction value from a phi node. 4310 /// 4311 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 4312 /// if they come from either \p ParentBB or a containing loop latch. 4313 /// 4314 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 4315 /// if not possible. 4316 static Value *getReductionValue(const DominatorTree *DT, PHINode *P, 4317 BasicBlock *ParentBB, LoopInfo *LI) { 4318 // There are situations where the reduction value is not dominated by the 4319 // reduction phi. Vectorizing such cases has been reported to cause 4320 // miscompiles. See PR25787. 4321 auto DominatedReduxValue = [&](Value *R) { 4322 return ( 4323 dyn_cast<Instruction>(R) && 4324 DT->dominates(P->getParent(), dyn_cast<Instruction>(R)->getParent())); 4325 }; 4326 4327 Value *Rdx = nullptr; 4328 4329 // Return the incoming value if it comes from the same BB as the phi node. 4330 if (P->getIncomingBlock(0) == ParentBB) { 4331 Rdx = P->getIncomingValue(0); 4332 } else if (P->getIncomingBlock(1) == ParentBB) { 4333 Rdx = P->getIncomingValue(1); 4334 } 4335 4336 if (Rdx && DominatedReduxValue(Rdx)) 4337 return Rdx; 4338 4339 // Otherwise, check whether we have a loop latch to look at. 4340 Loop *BBL = LI->getLoopFor(ParentBB); 4341 if (!BBL) 4342 return nullptr; 4343 BasicBlock *BBLatch = BBL->getLoopLatch(); 4344 if (!BBLatch) 4345 return nullptr; 4346 4347 // There is a loop latch, return the incoming value if it comes from 4348 // that. This reduction pattern occassionaly turns up. 4349 if (P->getIncomingBlock(0) == BBLatch) { 4350 Rdx = P->getIncomingValue(0); 4351 } else if (P->getIncomingBlock(1) == BBLatch) { 4352 Rdx = P->getIncomingValue(1); 4353 } 4354 4355 if (Rdx && DominatedReduxValue(Rdx)) 4356 return Rdx; 4357 4358 return nullptr; 4359 } 4360 4361 /// \brief Attempt to reduce a horizontal reduction. 4362 /// If it is legal to match a horizontal reduction feeding 4363 /// the phi node P with reduction operators BI, then check if it 4364 /// can be done. 4365 /// \returns true if a horizontal reduction was matched and reduced. 4366 /// \returns false if a horizontal reduction was not matched. 4367 static bool canMatchHorizontalReduction(PHINode *P, BinaryOperator *BI, 4368 BoUpSLP &R, TargetTransformInfo *TTI, 4369 unsigned MinRegSize) { 4370 if (!ShouldVectorizeHor) 4371 return false; 4372 4373 HorizontalReduction HorRdx(MinRegSize); 4374 if (!HorRdx.matchAssociativeReduction(P, BI)) 4375 return false; 4376 4377 // If there is a sufficient number of reduction values, reduce 4378 // to a nearby power-of-2. Can safely generate oversized 4379 // vectors and rely on the backend to split them to legal sizes. 4380 HorRdx.ReduxWidth = 4381 std::max((uint64_t)4, PowerOf2Floor(HorRdx.numReductionValues())); 4382 4383 return HorRdx.tryToReduce(R, TTI); 4384 } 4385 4386 bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 4387 bool Changed = false; 4388 SmallVector<Value *, 4> Incoming; 4389 SmallSet<Value *, 16> VisitedInstrs; 4390 4391 bool HaveVectorizedPhiNodes = true; 4392 while (HaveVectorizedPhiNodes) { 4393 HaveVectorizedPhiNodes = false; 4394 4395 // Collect the incoming values from the PHIs. 4396 Incoming.clear(); 4397 for (Instruction &I : *BB) { 4398 PHINode *P = dyn_cast<PHINode>(&I); 4399 if (!P) 4400 break; 4401 4402 if (!VisitedInstrs.count(P)) 4403 Incoming.push_back(P); 4404 } 4405 4406 // Sort by type. 4407 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); 4408 4409 // Try to vectorize elements base on their type. 4410 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 4411 E = Incoming.end(); 4412 IncIt != E;) { 4413 4414 // Look for the next elements with the same type. 4415 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 4416 while (SameTypeIt != E && 4417 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 4418 VisitedInstrs.insert(*SameTypeIt); 4419 ++SameTypeIt; 4420 } 4421 4422 // Try to vectorize them. 4423 unsigned NumElts = (SameTypeIt - IncIt); 4424 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); 4425 if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R)) { 4426 // Success start over because instructions might have been changed. 4427 HaveVectorizedPhiNodes = true; 4428 Changed = true; 4429 break; 4430 } 4431 4432 // Start over at the next instruction of a different type (or the end). 4433 IncIt = SameTypeIt; 4434 } 4435 } 4436 4437 VisitedInstrs.clear(); 4438 4439 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 4440 // We may go through BB multiple times so skip the one we have checked. 4441 if (!VisitedInstrs.insert(&*it).second) 4442 continue; 4443 4444 if (isa<DbgInfoIntrinsic>(it)) 4445 continue; 4446 4447 // Try to vectorize reductions that use PHINodes. 4448 if (PHINode *P = dyn_cast<PHINode>(it)) { 4449 // Check that the PHI is a reduction PHI. 4450 if (P->getNumIncomingValues() != 2) 4451 return Changed; 4452 4453 Value *Rdx = getReductionValue(DT, P, BB, LI); 4454 4455 // Check if this is a Binary Operator. 4456 BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx); 4457 if (!BI) 4458 continue; 4459 4460 // Try to match and vectorize a horizontal reduction. 4461 if (canMatchHorizontalReduction(P, BI, R, TTI, R.getMinVecRegSize())) { 4462 Changed = true; 4463 it = BB->begin(); 4464 e = BB->end(); 4465 continue; 4466 } 4467 4468 Value *Inst = BI->getOperand(0); 4469 if (Inst == P) 4470 Inst = BI->getOperand(1); 4471 4472 if (tryToVectorize(dyn_cast<BinaryOperator>(Inst), R)) { 4473 // We would like to start over since some instructions are deleted 4474 // and the iterator may become invalid value. 4475 Changed = true; 4476 it = BB->begin(); 4477 e = BB->end(); 4478 continue; 4479 } 4480 4481 continue; 4482 } 4483 4484 if (ShouldStartVectorizeHorAtStore) 4485 if (StoreInst *SI = dyn_cast<StoreInst>(it)) 4486 if (BinaryOperator *BinOp = 4487 dyn_cast<BinaryOperator>(SI->getValueOperand())) { 4488 if (canMatchHorizontalReduction(nullptr, BinOp, R, TTI, 4489 R.getMinVecRegSize()) || 4490 tryToVectorize(BinOp, R)) { 4491 Changed = true; 4492 it = BB->begin(); 4493 e = BB->end(); 4494 continue; 4495 } 4496 } 4497 4498 // Try to vectorize horizontal reductions feeding into a return. 4499 if (ReturnInst *RI = dyn_cast<ReturnInst>(it)) 4500 if (RI->getNumOperands() != 0) 4501 if (BinaryOperator *BinOp = 4502 dyn_cast<BinaryOperator>(RI->getOperand(0))) { 4503 DEBUG(dbgs() << "SLP: Found a return to vectorize.\n"); 4504 if (tryToVectorizePair(BinOp->getOperand(0), 4505 BinOp->getOperand(1), R)) { 4506 Changed = true; 4507 it = BB->begin(); 4508 e = BB->end(); 4509 continue; 4510 } 4511 } 4512 4513 // Try to vectorize trees that start at compare instructions. 4514 if (CmpInst *CI = dyn_cast<CmpInst>(it)) { 4515 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) { 4516 Changed = true; 4517 // We would like to start over since some instructions are deleted 4518 // and the iterator may become invalid value. 4519 it = BB->begin(); 4520 e = BB->end(); 4521 continue; 4522 } 4523 4524 for (int i = 0; i < 2; ++i) { 4525 if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) { 4526 if (tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R)) { 4527 Changed = true; 4528 // We would like to start over since some instructions are deleted 4529 // and the iterator may become invalid value. 4530 it = BB->begin(); 4531 e = BB->end(); 4532 break; 4533 } 4534 } 4535 } 4536 continue; 4537 } 4538 4539 // Try to vectorize trees that start at insertelement instructions. 4540 if (InsertElementInst *FirstInsertElem = dyn_cast<InsertElementInst>(it)) { 4541 SmallVector<Value *, 16> BuildVector; 4542 SmallVector<Value *, 16> BuildVectorOpds; 4543 if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds)) 4544 continue; 4545 4546 // Vectorize starting with the build vector operands ignoring the 4547 // BuildVector instructions for the purpose of scheduling and user 4548 // extraction. 4549 if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) { 4550 Changed = true; 4551 it = BB->begin(); 4552 e = BB->end(); 4553 } 4554 4555 continue; 4556 } 4557 4558 // Try to vectorize trees that start at insertvalue instructions feeding into 4559 // a store. 4560 if (StoreInst *SI = dyn_cast<StoreInst>(it)) { 4561 if (InsertValueInst *LastInsertValue = dyn_cast<InsertValueInst>(SI->getValueOperand())) { 4562 const DataLayout &DL = BB->getModule()->getDataLayout(); 4563 if (R.canMapToVector(SI->getValueOperand()->getType(), DL)) { 4564 SmallVector<Value *, 16> BuildVector; 4565 SmallVector<Value *, 16> BuildVectorOpds; 4566 if (!findBuildAggregate(LastInsertValue, BuildVector, BuildVectorOpds)) 4567 continue; 4568 4569 DEBUG(dbgs() << "SLP: store of array mappable to vector: " << *SI << "\n"); 4570 if (tryToVectorizeList(BuildVectorOpds, R, BuildVector, false)) { 4571 Changed = true; 4572 it = BB->begin(); 4573 e = BB->end(); 4574 } 4575 continue; 4576 } 4577 } 4578 } 4579 } 4580 4581 return Changed; 4582 } 4583 4584 bool SLPVectorizer::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 4585 auto Changed = false; 4586 for (auto &Entry : GEPs) { 4587 4588 // If the getelementptr list has fewer than two elements, there's nothing 4589 // to do. 4590 if (Entry.second.size() < 2) 4591 continue; 4592 4593 DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 4594 << Entry.second.size() << ".\n"); 4595 4596 // We process the getelementptr list in chunks of 16 (like we do for 4597 // stores) to minimize compile-time. 4598 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += 16) { 4599 auto Len = std::min<unsigned>(BE - BI, 16); 4600 auto GEPList = makeArrayRef(&Entry.second[BI], Len); 4601 4602 // Initialize a set a candidate getelementptrs. Note that we use a 4603 // SetVector here to preserve program order. If the index computations 4604 // are vectorizable and begin with loads, we want to minimize the chance 4605 // of having to reorder them later. 4606 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 4607 4608 // Some of the candidates may have already been vectorized after we 4609 // initially collected them. If so, the WeakVHs will have nullified the 4610 // values, so remove them from the set of candidates. 4611 Candidates.remove(nullptr); 4612 4613 // Remove from the set of candidates all pairs of getelementptrs with 4614 // constant differences. Such getelementptrs are likely not good 4615 // candidates for vectorization in a bottom-up phase since one can be 4616 // computed from the other. We also ensure all candidate getelementptr 4617 // indices are unique. 4618 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 4619 auto *GEPI = cast<GetElementPtrInst>(GEPList[I]); 4620 if (!Candidates.count(GEPI)) 4621 continue; 4622 auto *SCEVI = SE->getSCEV(GEPList[I]); 4623 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 4624 auto *GEPJ = cast<GetElementPtrInst>(GEPList[J]); 4625 auto *SCEVJ = SE->getSCEV(GEPList[J]); 4626 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 4627 Candidates.remove(GEPList[I]); 4628 Candidates.remove(GEPList[J]); 4629 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 4630 Candidates.remove(GEPList[J]); 4631 } 4632 } 4633 } 4634 4635 // We break out of the above computation as soon as we know there are 4636 // fewer than two candidates remaining. 4637 if (Candidates.size() < 2) 4638 continue; 4639 4640 // Add the single, non-constant index of each candidate to the bundle. We 4641 // ensured the indices met these constraints when we originally collected 4642 // the getelementptrs. 4643 SmallVector<Value *, 16> Bundle(Candidates.size()); 4644 auto BundleIndex = 0u; 4645 for (auto *V : Candidates) { 4646 auto *GEP = cast<GetElementPtrInst>(V); 4647 auto *GEPIdx = GEP->idx_begin()->get(); 4648 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 4649 Bundle[BundleIndex++] = GEPIdx; 4650 } 4651 4652 // Try and vectorize the indices. We are currently only interested in 4653 // gather-like cases of the form: 4654 // 4655 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 4656 // 4657 // where the loads of "a", the loads of "b", and the subtractions can be 4658 // performed in parallel. It's likely that detecting this pattern in a 4659 // bottom-up phase will be simpler and less costly than building a 4660 // full-blown top-down phase beginning at the consecutive loads. 4661 Changed |= tryToVectorizeList(Bundle, R); 4662 } 4663 } 4664 return Changed; 4665 } 4666 4667 bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) { 4668 bool Changed = false; 4669 // Attempt to sort and vectorize each of the store-groups. 4670 for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e; 4671 ++it) { 4672 if (it->second.size() < 2) 4673 continue; 4674 4675 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 4676 << it->second.size() << ".\n"); 4677 4678 // Process the stores in chunks of 16. 4679 // TODO: The limit of 16 inhibits greater vectorization factors. 4680 // For example, AVX2 supports v32i8. Increasing this limit, however, 4681 // may cause a significant compile-time increase. 4682 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 4683 unsigned Len = std::min<unsigned>(CE - CI, 16); 4684 Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len), 4685 -SLPCostThreshold, R); 4686 } 4687 } 4688 return Changed; 4689 } 4690 4691 } // end anonymous namespace 4692 4693 char SLPVectorizer::ID = 0; 4694 static const char lv_name[] = "SLP Vectorizer"; 4695 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 4696 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 4697 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 4698 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 4699 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 4700 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 4701 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 4702 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 4703 4704 namespace llvm { 4705 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); } 4706 } 4707