1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 20 #include "llvm/ADT/ArrayRef.h" 21 #include "llvm/ADT/DenseMap.h" 22 #include "llvm/ADT/DenseSet.h" 23 #include "llvm/ADT/MapVector.h" 24 #include "llvm/ADT/None.h" 25 #include "llvm/ADT/Optional.h" 26 #include "llvm/ADT/PostOrderIterator.h" 27 #include "llvm/ADT/STLExtras.h" 28 #include "llvm/ADT/SetVector.h" 29 #include "llvm/ADT/SmallPtrSet.h" 30 #include "llvm/ADT/SmallSet.h" 31 #include "llvm/ADT/SmallVector.h" 32 #include "llvm/ADT/Statistic.h" 33 #include "llvm/ADT/iterator.h" 34 #include "llvm/ADT/iterator_range.h" 35 #include "llvm/Analysis/AliasAnalysis.h" 36 #include "llvm/Analysis/CodeMetrics.h" 37 #include "llvm/Analysis/DemandedBits.h" 38 #include "llvm/Analysis/GlobalsModRef.h" 39 #include "llvm/Analysis/LoopAccessAnalysis.h" 40 #include "llvm/Analysis/LoopInfo.h" 41 #include "llvm/Analysis/MemoryLocation.h" 42 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 43 #include "llvm/Analysis/ScalarEvolution.h" 44 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 45 #include "llvm/Analysis/TargetLibraryInfo.h" 46 #include "llvm/Analysis/TargetTransformInfo.h" 47 #include "llvm/Analysis/ValueTracking.h" 48 #include "llvm/Analysis/VectorUtils.h" 49 #include "llvm/IR/Attributes.h" 50 #include "llvm/IR/BasicBlock.h" 51 #include "llvm/IR/Constant.h" 52 #include "llvm/IR/Constants.h" 53 #include "llvm/IR/DataLayout.h" 54 #include "llvm/IR/DebugLoc.h" 55 #include "llvm/IR/DerivedTypes.h" 56 #include "llvm/IR/Dominators.h" 57 #include "llvm/IR/Function.h" 58 #include "llvm/IR/IRBuilder.h" 59 #include "llvm/IR/InstrTypes.h" 60 #include "llvm/IR/Instruction.h" 61 #include "llvm/IR/Instructions.h" 62 #include "llvm/IR/IntrinsicInst.h" 63 #include "llvm/IR/Intrinsics.h" 64 #include "llvm/IR/Module.h" 65 #include "llvm/IR/NoFolder.h" 66 #include "llvm/IR/Operator.h" 67 #include "llvm/IR/PassManager.h" 68 #include "llvm/IR/PatternMatch.h" 69 #include "llvm/IR/Type.h" 70 #include "llvm/IR/Use.h" 71 #include "llvm/IR/User.h" 72 #include "llvm/IR/Value.h" 73 #include "llvm/IR/ValueHandle.h" 74 #include "llvm/IR/Verifier.h" 75 #include "llvm/Pass.h" 76 #include "llvm/Support/Casting.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Compiler.h" 79 #include "llvm/Support/DOTGraphTraits.h" 80 #include "llvm/Support/Debug.h" 81 #include "llvm/Support/ErrorHandling.h" 82 #include "llvm/Support/GraphWriter.h" 83 #include "llvm/Support/KnownBits.h" 84 #include "llvm/Support/MathExtras.h" 85 #include "llvm/Support/raw_ostream.h" 86 #include "llvm/Transforms/Utils/LoopUtils.h" 87 #include "llvm/Transforms/Vectorize.h" 88 #include <algorithm> 89 #include <cassert> 90 #include <cstdint> 91 #include <iterator> 92 #include <memory> 93 #include <set> 94 #include <string> 95 #include <tuple> 96 #include <utility> 97 #include <vector> 98 99 using namespace llvm; 100 using namespace llvm::PatternMatch; 101 using namespace slpvectorizer; 102 103 #define SV_NAME "slp-vectorizer" 104 #define DEBUG_TYPE "SLP" 105 106 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 107 108 cl::opt<bool> 109 llvm::RunSLPVectorization("vectorize-slp", cl::init(false), cl::Hidden, 110 cl::desc("Run the SLP vectorization passes")); 111 112 static cl::opt<int> 113 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 114 cl::desc("Only vectorize if you gain more than this " 115 "number ")); 116 117 static cl::opt<bool> 118 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 119 cl::desc("Attempt to vectorize horizontal reductions")); 120 121 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 122 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 123 cl::desc( 124 "Attempt to vectorize horizontal reductions feeding into a store")); 125 126 static cl::opt<int> 127 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 128 cl::desc("Attempt to vectorize for this register size in bits")); 129 130 /// Limits the size of scheduling regions in a block. 131 /// It avoid long compile times for _very_ large blocks where vector 132 /// instructions are spread over a wide range. 133 /// This limit is way higher than needed by real-world functions. 134 static cl::opt<int> 135 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 136 cl::desc("Limit the size of the SLP scheduling region per block")); 137 138 static cl::opt<int> MinVectorRegSizeOption( 139 "slp-min-reg-size", cl::init(128), cl::Hidden, 140 cl::desc("Attempt to vectorize for this register size in bits")); 141 142 static cl::opt<unsigned> RecursionMaxDepth( 143 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 144 cl::desc("Limit the recursion depth when building a vectorizable tree")); 145 146 static cl::opt<unsigned> MinTreeSize( 147 "slp-min-tree-size", cl::init(3), cl::Hidden, 148 cl::desc("Only vectorize small trees if they are fully vectorizable")); 149 150 // The maximum depth that the look-ahead score heuristic will explore. 151 // The higher this value, the higher the compilation time overhead. 152 static cl::opt<int> LookAheadMaxDepth( 153 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden, 154 cl::desc("The maximum look-ahead depth for operand reordering scores")); 155 156 static cl::opt<bool> 157 ViewSLPTree("view-slp-tree", cl::Hidden, 158 cl::desc("Display the SLP trees with Graphviz")); 159 160 // Limit the number of alias checks. The limit is chosen so that 161 // it has no negative effect on the llvm benchmarks. 162 static const unsigned AliasedCheckLimit = 10; 163 164 // Another limit for the alias checks: The maximum distance between load/store 165 // instructions where alias checks are done. 166 // This limit is useful for very large basic blocks. 167 static const unsigned MaxMemDepDistance = 160; 168 169 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 170 /// regions to be handled. 171 static const int MinScheduleRegionSize = 16; 172 173 /// Predicate for the element types that the SLP vectorizer supports. 174 /// 175 /// The most important thing to filter here are types which are invalid in LLVM 176 /// vectors. We also filter target specific types which have absolutely no 177 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 178 /// avoids spending time checking the cost model and realizing that they will 179 /// be inevitably scalarized. 180 static bool isValidElementType(Type *Ty) { 181 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 182 !Ty->isPPC_FP128Ty(); 183 } 184 185 /// \returns true if all of the instructions in \p VL are in the same block or 186 /// false otherwise. 187 static bool allSameBlock(ArrayRef<Value *> VL) { 188 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 189 if (!I0) 190 return false; 191 BasicBlock *BB = I0->getParent(); 192 for (int i = 1, e = VL.size(); i < e; i++) { 193 Instruction *I = dyn_cast<Instruction>(VL[i]); 194 if (!I) 195 return false; 196 197 if (BB != I->getParent()) 198 return false; 199 } 200 return true; 201 } 202 203 /// \returns True if all of the values in \p VL are constants. 204 static bool allConstant(ArrayRef<Value *> VL) { 205 for (Value *i : VL) 206 if (!isa<Constant>(i)) 207 return false; 208 return true; 209 } 210 211 /// \returns True if all of the values in \p VL are identical. 212 static bool isSplat(ArrayRef<Value *> VL) { 213 for (unsigned i = 1, e = VL.size(); i < e; ++i) 214 if (VL[i] != VL[0]) 215 return false; 216 return true; 217 } 218 219 /// \returns True if \p I is commutative, handles CmpInst as well as Instruction. 220 static bool isCommutative(Instruction *I) { 221 if (auto *IC = dyn_cast<CmpInst>(I)) 222 return IC->isCommutative(); 223 return I->isCommutative(); 224 } 225 226 /// Checks if the vector of instructions can be represented as a shuffle, like: 227 /// %x0 = extractelement <4 x i8> %x, i32 0 228 /// %x3 = extractelement <4 x i8> %x, i32 3 229 /// %y1 = extractelement <4 x i8> %y, i32 1 230 /// %y2 = extractelement <4 x i8> %y, i32 2 231 /// %x0x0 = mul i8 %x0, %x0 232 /// %x3x3 = mul i8 %x3, %x3 233 /// %y1y1 = mul i8 %y1, %y1 234 /// %y2y2 = mul i8 %y2, %y2 235 /// %ins1 = insertelement <4 x i8> undef, i8 %x0x0, i32 0 236 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 237 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 238 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 239 /// ret <4 x i8> %ins4 240 /// can be transformed into: 241 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 242 /// i32 6> 243 /// %2 = mul <4 x i8> %1, %1 244 /// ret <4 x i8> %2 245 /// We convert this initially to something like: 246 /// %x0 = extractelement <4 x i8> %x, i32 0 247 /// %x3 = extractelement <4 x i8> %x, i32 3 248 /// %y1 = extractelement <4 x i8> %y, i32 1 249 /// %y2 = extractelement <4 x i8> %y, i32 2 250 /// %1 = insertelement <4 x i8> undef, i8 %x0, i32 0 251 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1 252 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2 253 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3 254 /// %5 = mul <4 x i8> %4, %4 255 /// %6 = extractelement <4 x i8> %5, i32 0 256 /// %ins1 = insertelement <4 x i8> undef, i8 %6, i32 0 257 /// %7 = extractelement <4 x i8> %5, i32 1 258 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1 259 /// %8 = extractelement <4 x i8> %5, i32 2 260 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2 261 /// %9 = extractelement <4 x i8> %5, i32 3 262 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3 263 /// ret <4 x i8> %ins4 264 /// InstCombiner transforms this into a shuffle and vector mul 265 /// TODO: Can we split off and reuse the shuffle mask detection from 266 /// TargetTransformInfo::getInstructionThroughput? 267 static Optional<TargetTransformInfo::ShuffleKind> 268 isShuffle(ArrayRef<Value *> VL) { 269 auto *EI0 = cast<ExtractElementInst>(VL[0]); 270 unsigned Size = EI0->getVectorOperandType()->getVectorNumElements(); 271 Value *Vec1 = nullptr; 272 Value *Vec2 = nullptr; 273 enum ShuffleMode { Unknown, Select, Permute }; 274 ShuffleMode CommonShuffleMode = Unknown; 275 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 276 auto *EI = cast<ExtractElementInst>(VL[I]); 277 auto *Vec = EI->getVectorOperand(); 278 // All vector operands must have the same number of vector elements. 279 if (Vec->getType()->getVectorNumElements() != Size) 280 return None; 281 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 282 if (!Idx) 283 return None; 284 // Undefined behavior if Idx is negative or >= Size. 285 if (Idx->getValue().uge(Size)) 286 continue; 287 unsigned IntIdx = Idx->getValue().getZExtValue(); 288 // We can extractelement from undef vector. 289 if (isa<UndefValue>(Vec)) 290 continue; 291 // For correct shuffling we have to have at most 2 different vector operands 292 // in all extractelement instructions. 293 if (!Vec1 || Vec1 == Vec) 294 Vec1 = Vec; 295 else if (!Vec2 || Vec2 == Vec) 296 Vec2 = Vec; 297 else 298 return None; 299 if (CommonShuffleMode == Permute) 300 continue; 301 // If the extract index is not the same as the operation number, it is a 302 // permutation. 303 if (IntIdx != I) { 304 CommonShuffleMode = Permute; 305 continue; 306 } 307 CommonShuffleMode = Select; 308 } 309 // If we're not crossing lanes in different vectors, consider it as blending. 310 if (CommonShuffleMode == Select && Vec2) 311 return TargetTransformInfo::SK_Select; 312 // If Vec2 was never used, we have a permutation of a single vector, otherwise 313 // we have permutation of 2 vectors. 314 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 315 : TargetTransformInfo::SK_PermuteSingleSrc; 316 } 317 318 namespace { 319 320 /// Main data required for vectorization of instructions. 321 struct InstructionsState { 322 /// The very first instruction in the list with the main opcode. 323 Value *OpValue = nullptr; 324 325 /// The main/alternate instruction. 326 Instruction *MainOp = nullptr; 327 Instruction *AltOp = nullptr; 328 329 /// The main/alternate opcodes for the list of instructions. 330 unsigned getOpcode() const { 331 return MainOp ? MainOp->getOpcode() : 0; 332 } 333 334 unsigned getAltOpcode() const { 335 return AltOp ? AltOp->getOpcode() : 0; 336 } 337 338 /// Some of the instructions in the list have alternate opcodes. 339 bool isAltShuffle() const { return getOpcode() != getAltOpcode(); } 340 341 bool isOpcodeOrAlt(Instruction *I) const { 342 unsigned CheckedOpcode = I->getOpcode(); 343 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode; 344 } 345 346 InstructionsState() = delete; 347 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp) 348 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {} 349 }; 350 351 } // end anonymous namespace 352 353 /// Chooses the correct key for scheduling data. If \p Op has the same (or 354 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 355 /// OpValue. 356 static Value *isOneOf(const InstructionsState &S, Value *Op) { 357 auto *I = dyn_cast<Instruction>(Op); 358 if (I && S.isOpcodeOrAlt(I)) 359 return Op; 360 return S.OpValue; 361 } 362 363 /// \returns analysis of the Instructions in \p VL described in 364 /// InstructionsState, the Opcode that we suppose the whole list 365 /// could be vectorized even if its structure is diverse. 366 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 367 unsigned BaseIndex = 0) { 368 // Make sure these are all Instructions. 369 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); })) 370 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 371 372 bool IsCastOp = isa<CastInst>(VL[BaseIndex]); 373 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]); 374 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode(); 375 unsigned AltOpcode = Opcode; 376 unsigned AltIndex = BaseIndex; 377 378 // Check for one alternate opcode from another BinaryOperator. 379 // TODO - generalize to support all operators (types, calls etc.). 380 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { 381 unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode(); 382 if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) { 383 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 384 continue; 385 if (Opcode == AltOpcode) { 386 AltOpcode = InstOpcode; 387 AltIndex = Cnt; 388 continue; 389 } 390 } else if (IsCastOp && isa<CastInst>(VL[Cnt])) { 391 Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType(); 392 Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType(); 393 if (Ty0 == Ty1) { 394 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 395 continue; 396 if (Opcode == AltOpcode) { 397 AltOpcode = InstOpcode; 398 AltIndex = Cnt; 399 continue; 400 } 401 } 402 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode) 403 continue; 404 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 405 } 406 407 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]), 408 cast<Instruction>(VL[AltIndex])); 409 } 410 411 /// \returns true if all of the values in \p VL have the same type or false 412 /// otherwise. 413 static bool allSameType(ArrayRef<Value *> VL) { 414 Type *Ty = VL[0]->getType(); 415 for (int i = 1, e = VL.size(); i < e; i++) 416 if (VL[i]->getType() != Ty) 417 return false; 418 419 return true; 420 } 421 422 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 423 static Optional<unsigned> getExtractIndex(Instruction *E) { 424 unsigned Opcode = E->getOpcode(); 425 assert((Opcode == Instruction::ExtractElement || 426 Opcode == Instruction::ExtractValue) && 427 "Expected extractelement or extractvalue instruction."); 428 if (Opcode == Instruction::ExtractElement) { 429 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 430 if (!CI) 431 return None; 432 return CI->getZExtValue(); 433 } 434 ExtractValueInst *EI = cast<ExtractValueInst>(E); 435 if (EI->getNumIndices() != 1) 436 return None; 437 return *EI->idx_begin(); 438 } 439 440 /// \returns True if in-tree use also needs extract. This refers to 441 /// possible scalar operand in vectorized instruction. 442 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 443 TargetLibraryInfo *TLI) { 444 unsigned Opcode = UserInst->getOpcode(); 445 switch (Opcode) { 446 case Instruction::Load: { 447 LoadInst *LI = cast<LoadInst>(UserInst); 448 return (LI->getPointerOperand() == Scalar); 449 } 450 case Instruction::Store: { 451 StoreInst *SI = cast<StoreInst>(UserInst); 452 return (SI->getPointerOperand() == Scalar); 453 } 454 case Instruction::Call: { 455 CallInst *CI = cast<CallInst>(UserInst); 456 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 457 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 458 if (hasVectorInstrinsicScalarOpd(ID, i)) 459 return (CI->getArgOperand(i) == Scalar); 460 } 461 LLVM_FALLTHROUGH; 462 } 463 default: 464 return false; 465 } 466 } 467 468 /// \returns the AA location that is being access by the instruction. 469 static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) { 470 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 471 return MemoryLocation::get(SI); 472 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 473 return MemoryLocation::get(LI); 474 return MemoryLocation(); 475 } 476 477 /// \returns True if the instruction is not a volatile or atomic load/store. 478 static bool isSimple(Instruction *I) { 479 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 480 return LI->isSimple(); 481 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 482 return SI->isSimple(); 483 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 484 return !MI->isVolatile(); 485 return true; 486 } 487 488 namespace llvm { 489 490 namespace slpvectorizer { 491 492 /// Bottom Up SLP Vectorizer. 493 class BoUpSLP { 494 struct TreeEntry; 495 496 public: 497 using ValueList = SmallVector<Value *, 8>; 498 using InstrList = SmallVector<Instruction *, 16>; 499 using ValueSet = SmallPtrSet<Value *, 16>; 500 using StoreList = SmallVector<StoreInst *, 8>; 501 using ExtraValueToDebugLocsMap = 502 MapVector<Value *, SmallVector<Instruction *, 2>>; 503 504 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 505 TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li, 506 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 507 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 508 : F(Func), SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), 509 DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { 510 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 511 // Use the vector register size specified by the target unless overridden 512 // by a command-line option. 513 // TODO: It would be better to limit the vectorization factor based on 514 // data type rather than just register size. For example, x86 AVX has 515 // 256-bit registers, but it does not support integer operations 516 // at that width (that requires AVX2). 517 if (MaxVectorRegSizeOption.getNumOccurrences()) 518 MaxVecRegSize = MaxVectorRegSizeOption; 519 else 520 MaxVecRegSize = TTI->getRegisterBitWidth(true); 521 522 if (MinVectorRegSizeOption.getNumOccurrences()) 523 MinVecRegSize = MinVectorRegSizeOption; 524 else 525 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 526 } 527 528 /// Vectorize the tree that starts with the elements in \p VL. 529 /// Returns the vectorized root. 530 Value *vectorizeTree(); 531 532 /// Vectorize the tree but with the list of externally used values \p 533 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 534 /// generated extractvalue instructions. 535 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues); 536 537 /// \returns the cost incurred by unwanted spills and fills, caused by 538 /// holding live values over call sites. 539 int getSpillCost() const; 540 541 /// \returns the vectorization cost of the subtree that starts at \p VL. 542 /// A negative number means that this is profitable. 543 int getTreeCost(); 544 545 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 546 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 547 void buildTree(ArrayRef<Value *> Roots, 548 ArrayRef<Value *> UserIgnoreLst = None); 549 550 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 551 /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking 552 /// into account (anf updating it, if required) list of externally used 553 /// values stored in \p ExternallyUsedValues. 554 void buildTree(ArrayRef<Value *> Roots, 555 ExtraValueToDebugLocsMap &ExternallyUsedValues, 556 ArrayRef<Value *> UserIgnoreLst = None); 557 558 /// Clear the internal data structures that are created by 'buildTree'. 559 void deleteTree() { 560 VectorizableTree.clear(); 561 ScalarToTreeEntry.clear(); 562 MustGather.clear(); 563 ExternalUses.clear(); 564 NumOpsWantToKeepOrder.clear(); 565 NumOpsWantToKeepOriginalOrder = 0; 566 for (auto &Iter : BlocksSchedules) { 567 BlockScheduling *BS = Iter.second.get(); 568 BS->clear(); 569 } 570 MinBWs.clear(); 571 } 572 573 unsigned getTreeSize() const { return VectorizableTree.size(); } 574 575 /// Perform LICM and CSE on the newly generated gather sequences. 576 void optimizeGatherSequence(); 577 578 /// \returns The best order of instructions for vectorization. 579 Optional<ArrayRef<unsigned>> bestOrder() const { 580 auto I = std::max_element( 581 NumOpsWantToKeepOrder.begin(), NumOpsWantToKeepOrder.end(), 582 [](const decltype(NumOpsWantToKeepOrder)::value_type &D1, 583 const decltype(NumOpsWantToKeepOrder)::value_type &D2) { 584 return D1.second < D2.second; 585 }); 586 if (I == NumOpsWantToKeepOrder.end() || 587 I->getSecond() <= NumOpsWantToKeepOriginalOrder) 588 return None; 589 590 return makeArrayRef(I->getFirst()); 591 } 592 593 /// \return The vector element size in bits to use when vectorizing the 594 /// expression tree ending at \p V. If V is a store, the size is the width of 595 /// the stored value. Otherwise, the size is the width of the largest loaded 596 /// value reaching V. This method is used by the vectorizer to calculate 597 /// vectorization factors. 598 unsigned getVectorElementSize(Value *V) const; 599 600 /// Compute the minimum type sizes required to represent the entries in a 601 /// vectorizable tree. 602 void computeMinimumValueSizes(); 603 604 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 605 unsigned getMaxVecRegSize() const { 606 return MaxVecRegSize; 607 } 608 609 // \returns minimum vector register size as set by cl::opt. 610 unsigned getMinVecRegSize() const { 611 return MinVecRegSize; 612 } 613 614 /// Check if ArrayType or StructType is isomorphic to some VectorType. 615 /// 616 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 617 unsigned canMapToVector(Type *T, const DataLayout &DL) const; 618 619 /// \returns True if the VectorizableTree is both tiny and not fully 620 /// vectorizable. We do not vectorize such trees. 621 bool isTreeTinyAndNotFullyVectorizable() const; 622 623 OptimizationRemarkEmitter *getORE() { return ORE; } 624 625 /// This structure holds any data we need about the edges being traversed 626 /// during buildTree_rec(). We keep track of: 627 /// (i) the user TreeEntry index, and 628 /// (ii) the index of the edge. 629 struct EdgeInfo { 630 EdgeInfo() = default; 631 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx) 632 : UserTE(UserTE), EdgeIdx(EdgeIdx) {} 633 /// The user TreeEntry. 634 TreeEntry *UserTE = nullptr; 635 /// The operand index of the use. 636 unsigned EdgeIdx = UINT_MAX; 637 #ifndef NDEBUG 638 friend inline raw_ostream &operator<<(raw_ostream &OS, 639 const BoUpSLP::EdgeInfo &EI) { 640 EI.dump(OS); 641 return OS; 642 } 643 /// Debug print. 644 void dump(raw_ostream &OS) const { 645 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null") 646 << " EdgeIdx:" << EdgeIdx << "}"; 647 } 648 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); } 649 #endif 650 }; 651 652 /// A helper data structure to hold the operands of a vector of instructions. 653 /// This supports a fixed vector length for all operand vectors. 654 class VLOperands { 655 /// For each operand we need (i) the value, and (ii) the opcode that it 656 /// would be attached to if the expression was in a left-linearized form. 657 /// This is required to avoid illegal operand reordering. 658 /// For example: 659 /// \verbatim 660 /// 0 Op1 661 /// |/ 662 /// Op1 Op2 Linearized + Op2 663 /// \ / ----------> |/ 664 /// - - 665 /// 666 /// Op1 - Op2 (0 + Op1) - Op2 667 /// \endverbatim 668 /// 669 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'. 670 /// 671 /// Another way to think of this is to track all the operations across the 672 /// path from the operand all the way to the root of the tree and to 673 /// calculate the operation that corresponds to this path. For example, the 674 /// path from Op2 to the root crosses the RHS of the '-', therefore the 675 /// corresponding operation is a '-' (which matches the one in the 676 /// linearized tree, as shown above). 677 /// 678 /// For lack of a better term, we refer to this operation as Accumulated 679 /// Path Operation (APO). 680 struct OperandData { 681 OperandData() = default; 682 OperandData(Value *V, bool APO, bool IsUsed) 683 : V(V), APO(APO), IsUsed(IsUsed) {} 684 /// The operand value. 685 Value *V = nullptr; 686 /// TreeEntries only allow a single opcode, or an alternate sequence of 687 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the 688 /// APO. It is set to 'true' if 'V' is attached to an inverse operation 689 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise 690 /// (e.g., Add/Mul) 691 bool APO = false; 692 /// Helper data for the reordering function. 693 bool IsUsed = false; 694 }; 695 696 /// During operand reordering, we are trying to select the operand at lane 697 /// that matches best with the operand at the neighboring lane. Our 698 /// selection is based on the type of value we are looking for. For example, 699 /// if the neighboring lane has a load, we need to look for a load that is 700 /// accessing a consecutive address. These strategies are summarized in the 701 /// 'ReorderingMode' enumerator. 702 enum class ReorderingMode { 703 Load, ///< Matching loads to consecutive memory addresses 704 Opcode, ///< Matching instructions based on opcode (same or alternate) 705 Constant, ///< Matching constants 706 Splat, ///< Matching the same instruction multiple times (broadcast) 707 Failed, ///< We failed to create a vectorizable group 708 }; 709 710 using OperandDataVec = SmallVector<OperandData, 2>; 711 712 /// A vector of operand vectors. 713 SmallVector<OperandDataVec, 4> OpsVec; 714 715 const DataLayout &DL; 716 ScalarEvolution &SE; 717 const BoUpSLP &R; 718 719 /// \returns the operand data at \p OpIdx and \p Lane. 720 OperandData &getData(unsigned OpIdx, unsigned Lane) { 721 return OpsVec[OpIdx][Lane]; 722 } 723 724 /// \returns the operand data at \p OpIdx and \p Lane. Const version. 725 const OperandData &getData(unsigned OpIdx, unsigned Lane) const { 726 return OpsVec[OpIdx][Lane]; 727 } 728 729 /// Clears the used flag for all entries. 730 void clearUsed() { 731 for (unsigned OpIdx = 0, NumOperands = getNumOperands(); 732 OpIdx != NumOperands; ++OpIdx) 733 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 734 ++Lane) 735 OpsVec[OpIdx][Lane].IsUsed = false; 736 } 737 738 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2. 739 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) { 740 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]); 741 } 742 743 // The hard-coded scores listed here are not very important. When computing 744 // the scores of matching one sub-tree with another, we are basically 745 // counting the number of values that are matching. So even if all scores 746 // are set to 1, we would still get a decent matching result. 747 // However, sometimes we have to break ties. For example we may have to 748 // choose between matching loads vs matching opcodes. This is what these 749 // scores are helping us with: they provide the order of preference. 750 751 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]). 752 static const int ScoreConsecutiveLoads = 3; 753 /// Constants. 754 static const int ScoreConstants = 2; 755 /// Instructions with the same opcode. 756 static const int ScoreSameOpcode = 2; 757 /// Instructions with alt opcodes (e.g, add + sub). 758 static const int ScoreAltOpcodes = 1; 759 /// Identical instructions (a.k.a. splat or broadcast). 760 static const int ScoreSplat = 1; 761 /// Matching with an undef is preferable to failing. 762 static const int ScoreUndef = 1; 763 /// Score for failing to find a decent match. 764 static const int ScoreFail = 0; 765 /// User exteranl to the vectorized code. 766 static const int ExternalUseCost = 1; 767 /// The user is internal but in a different lane. 768 static const int UserInDiffLaneCost = ExternalUseCost; 769 770 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes. 771 static int getShallowScore(Value *V1, Value *V2, const DataLayout &DL, 772 ScalarEvolution &SE) { 773 auto *LI1 = dyn_cast<LoadInst>(V1); 774 auto *LI2 = dyn_cast<LoadInst>(V2); 775 if (LI1 && LI2) 776 return isConsecutiveAccess(LI1, LI2, DL, SE) 777 ? VLOperands::ScoreConsecutiveLoads 778 : VLOperands::ScoreFail; 779 780 auto *C1 = dyn_cast<Constant>(V1); 781 auto *C2 = dyn_cast<Constant>(V2); 782 if (C1 && C2) 783 return VLOperands::ScoreConstants; 784 785 auto *I1 = dyn_cast<Instruction>(V1); 786 auto *I2 = dyn_cast<Instruction>(V2); 787 if (I1 && I2) { 788 if (I1 == I2) 789 return VLOperands::ScoreSplat; 790 InstructionsState S = getSameOpcode({I1, I2}); 791 // Note: Only consider instructions with <= 2 operands to avoid 792 // complexity explosion. 793 if (S.getOpcode() && S.MainOp->getNumOperands() <= 2) 794 return S.isAltShuffle() ? VLOperands::ScoreAltOpcodes 795 : VLOperands::ScoreSameOpcode; 796 } 797 798 if (isa<UndefValue>(V2)) 799 return VLOperands::ScoreUndef; 800 801 return VLOperands::ScoreFail; 802 } 803 804 /// Holds the values and their lane that are taking part in the look-ahead 805 /// score calculation. This is used in the external uses cost calculation. 806 SmallDenseMap<Value *, int> InLookAheadValues; 807 808 /// \Returns the additinal cost due to uses of \p LHS and \p RHS that are 809 /// either external to the vectorized code, or require shuffling. 810 int getExternalUsesCost(const std::pair<Value *, int> &LHS, 811 const std::pair<Value *, int> &RHS) { 812 int Cost = 0; 813 SmallVector<std::pair<Value *, int>, 2> Values = {LHS, RHS}; 814 for (int Idx = 0, IdxE = Values.size(); Idx != IdxE; ++Idx) { 815 Value *V = Values[Idx].first; 816 // Calculate the absolute lane, using the minimum relative lane of LHS 817 // and RHS as base and Idx as the offset. 818 int Ln = std::min(LHS.second, RHS.second) + Idx; 819 assert(Ln >= 0 && "Bad lane calculation"); 820 for (User *U : V->users()) { 821 if (const TreeEntry *UserTE = R.getTreeEntry(U)) { 822 // The user is in the VectorizableTree. Check if we need to insert. 823 auto It = llvm::find(UserTE->Scalars, U); 824 assert(It != UserTE->Scalars.end() && "U is in UserTE"); 825 int UserLn = std::distance(UserTE->Scalars.begin(), It); 826 assert(UserLn >= 0 && "Bad lane"); 827 if (UserLn != Ln) 828 Cost += UserInDiffLaneCost; 829 } else { 830 // Check if the user is in the look-ahead code. 831 auto It2 = InLookAheadValues.find(U); 832 if (It2 != InLookAheadValues.end()) { 833 // The user is in the look-ahead code. Check the lane. 834 if (It2->second != Ln) 835 Cost += UserInDiffLaneCost; 836 } else { 837 // The user is neither in SLP tree nor in the look-ahead code. 838 Cost += ExternalUseCost; 839 } 840 } 841 } 842 } 843 return Cost; 844 } 845 846 /// Go through the operands of \p LHS and \p RHS recursively until \p 847 /// MaxLevel, and return the cummulative score. For example: 848 /// \verbatim 849 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1] 850 /// \ / \ / \ / \ / 851 /// + + + + 852 /// G1 G2 G3 G4 853 /// \endverbatim 854 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at 855 /// each level recursively, accumulating the score. It starts from matching 856 /// the additions at level 0, then moves on to the loads (level 1). The 857 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and 858 /// {B[0],B[1]} match with VLOperands::ScoreConsecutiveLoads, while 859 /// {A[0],C[0]} has a score of VLOperands::ScoreFail. 860 /// Please note that the order of the operands does not matter, as we 861 /// evaluate the score of all profitable combinations of operands. In 862 /// other words the score of G1 and G4 is the same as G1 and G2. This 863 /// heuristic is based on ideas described in: 864 /// Look-ahead SLP: Auto-vectorization in the presence of commutative 865 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha, 866 /// Luís F. W. Góes 867 int getScoreAtLevelRec(const std::pair<Value *, int> &LHS, 868 const std::pair<Value *, int> &RHS, int CurrLevel, 869 int MaxLevel) { 870 871 Value *V1 = LHS.first; 872 Value *V2 = RHS.first; 873 // Get the shallow score of V1 and V2. 874 int ShallowScoreAtThisLevel = 875 std::max((int)ScoreFail, getShallowScore(V1, V2, DL, SE) - 876 getExternalUsesCost(LHS, RHS)); 877 int Lane1 = LHS.second; 878 int Lane2 = RHS.second; 879 880 // If reached MaxLevel, 881 // or if V1 and V2 are not instructions, 882 // or if they are SPLAT, 883 // or if they are not consecutive, early return the current cost. 884 auto *I1 = dyn_cast<Instruction>(V1); 885 auto *I2 = dyn_cast<Instruction>(V2); 886 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 || 887 ShallowScoreAtThisLevel == VLOperands::ScoreFail || 888 (isa<LoadInst>(I1) && isa<LoadInst>(I2) && ShallowScoreAtThisLevel)) 889 return ShallowScoreAtThisLevel; 890 assert(I1 && I2 && "Should have early exited."); 891 892 // Keep track of in-tree values for determining the external-use cost. 893 InLookAheadValues[V1] = Lane1; 894 InLookAheadValues[V2] = Lane2; 895 896 // Contains the I2 operand indexes that got matched with I1 operands. 897 SmallSet<unsigned, 4> Op2Used; 898 899 // Recursion towards the operands of I1 and I2. We are trying all possbile 900 // operand pairs, and keeping track of the best score. 901 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands(); 902 OpIdx1 != NumOperands1; ++OpIdx1) { 903 // Try to pair op1I with the best operand of I2. 904 int MaxTmpScore = 0; 905 unsigned MaxOpIdx2 = 0; 906 bool FoundBest = false; 907 // If I2 is commutative try all combinations. 908 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1; 909 unsigned ToIdx = isCommutative(I2) 910 ? I2->getNumOperands() 911 : std::min(I2->getNumOperands(), OpIdx1 + 1); 912 assert(FromIdx <= ToIdx && "Bad index"); 913 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) { 914 // Skip operands already paired with OpIdx1. 915 if (Op2Used.count(OpIdx2)) 916 continue; 917 // Recursively calculate the cost at each level 918 int TmpScore = getScoreAtLevelRec({I1->getOperand(OpIdx1), Lane1}, 919 {I2->getOperand(OpIdx2), Lane2}, 920 CurrLevel + 1, MaxLevel); 921 // Look for the best score. 922 if (TmpScore > VLOperands::ScoreFail && TmpScore > MaxTmpScore) { 923 MaxTmpScore = TmpScore; 924 MaxOpIdx2 = OpIdx2; 925 FoundBest = true; 926 } 927 } 928 if (FoundBest) { 929 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it. 930 Op2Used.insert(MaxOpIdx2); 931 ShallowScoreAtThisLevel += MaxTmpScore; 932 } 933 } 934 return ShallowScoreAtThisLevel; 935 } 936 937 /// \Returns the look-ahead score, which tells us how much the sub-trees 938 /// rooted at \p LHS and \p RHS match, the more they match the higher the 939 /// score. This helps break ties in an informed way when we cannot decide on 940 /// the order of the operands by just considering the immediate 941 /// predecessors. 942 int getLookAheadScore(const std::pair<Value *, int> &LHS, 943 const std::pair<Value *, int> &RHS) { 944 InLookAheadValues.clear(); 945 return getScoreAtLevelRec(LHS, RHS, 1, LookAheadMaxDepth); 946 } 947 948 // Search all operands in Ops[*][Lane] for the one that matches best 949 // Ops[OpIdx][LastLane] and return its opreand index. 950 // If no good match can be found, return None. 951 Optional<unsigned> 952 getBestOperand(unsigned OpIdx, int Lane, int LastLane, 953 ArrayRef<ReorderingMode> ReorderingModes) { 954 unsigned NumOperands = getNumOperands(); 955 956 // The operand of the previous lane at OpIdx. 957 Value *OpLastLane = getData(OpIdx, LastLane).V; 958 959 // Our strategy mode for OpIdx. 960 ReorderingMode RMode = ReorderingModes[OpIdx]; 961 962 // The linearized opcode of the operand at OpIdx, Lane. 963 bool OpIdxAPO = getData(OpIdx, Lane).APO; 964 965 // The best operand index and its score. 966 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we 967 // are using the score to differentiate between the two. 968 struct BestOpData { 969 Optional<unsigned> Idx = None; 970 unsigned Score = 0; 971 } BestOp; 972 973 // Iterate through all unused operands and look for the best. 974 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) { 975 // Get the operand at Idx and Lane. 976 OperandData &OpData = getData(Idx, Lane); 977 Value *Op = OpData.V; 978 bool OpAPO = OpData.APO; 979 980 // Skip already selected operands. 981 if (OpData.IsUsed) 982 continue; 983 984 // Skip if we are trying to move the operand to a position with a 985 // different opcode in the linearized tree form. This would break the 986 // semantics. 987 if (OpAPO != OpIdxAPO) 988 continue; 989 990 // Look for an operand that matches the current mode. 991 switch (RMode) { 992 case ReorderingMode::Load: 993 case ReorderingMode::Constant: 994 case ReorderingMode::Opcode: { 995 bool LeftToRight = Lane > LastLane; 996 Value *OpLeft = (LeftToRight) ? OpLastLane : Op; 997 Value *OpRight = (LeftToRight) ? Op : OpLastLane; 998 unsigned Score = 999 getLookAheadScore({OpLeft, LastLane}, {OpRight, Lane}); 1000 if (Score > BestOp.Score) { 1001 BestOp.Idx = Idx; 1002 BestOp.Score = Score; 1003 } 1004 break; 1005 } 1006 case ReorderingMode::Splat: 1007 if (Op == OpLastLane) 1008 BestOp.Idx = Idx; 1009 break; 1010 case ReorderingMode::Failed: 1011 return None; 1012 } 1013 } 1014 1015 if (BestOp.Idx) { 1016 getData(BestOp.Idx.getValue(), Lane).IsUsed = true; 1017 return BestOp.Idx; 1018 } 1019 // If we could not find a good match return None. 1020 return None; 1021 } 1022 1023 /// Helper for reorderOperandVecs. \Returns the lane that we should start 1024 /// reordering from. This is the one which has the least number of operands 1025 /// that can freely move about. 1026 unsigned getBestLaneToStartReordering() const { 1027 unsigned BestLane = 0; 1028 unsigned Min = UINT_MAX; 1029 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 1030 ++Lane) { 1031 unsigned NumFreeOps = getMaxNumOperandsThatCanBeReordered(Lane); 1032 if (NumFreeOps < Min) { 1033 Min = NumFreeOps; 1034 BestLane = Lane; 1035 } 1036 } 1037 return BestLane; 1038 } 1039 1040 /// \Returns the maximum number of operands that are allowed to be reordered 1041 /// for \p Lane. This is used as a heuristic for selecting the first lane to 1042 /// start operand reordering. 1043 unsigned getMaxNumOperandsThatCanBeReordered(unsigned Lane) const { 1044 unsigned CntTrue = 0; 1045 unsigned NumOperands = getNumOperands(); 1046 // Operands with the same APO can be reordered. We therefore need to count 1047 // how many of them we have for each APO, like this: Cnt[APO] = x. 1048 // Since we only have two APOs, namely true and false, we can avoid using 1049 // a map. Instead we can simply count the number of operands that 1050 // correspond to one of them (in this case the 'true' APO), and calculate 1051 // the other by subtracting it from the total number of operands. 1052 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) 1053 if (getData(OpIdx, Lane).APO) 1054 ++CntTrue; 1055 unsigned CntFalse = NumOperands - CntTrue; 1056 return std::max(CntTrue, CntFalse); 1057 } 1058 1059 /// Go through the instructions in VL and append their operands. 1060 void appendOperandsOfVL(ArrayRef<Value *> VL) { 1061 assert(!VL.empty() && "Bad VL"); 1062 assert((empty() || VL.size() == getNumLanes()) && 1063 "Expected same number of lanes"); 1064 assert(isa<Instruction>(VL[0]) && "Expected instruction"); 1065 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands(); 1066 OpsVec.resize(NumOperands); 1067 unsigned NumLanes = VL.size(); 1068 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1069 OpsVec[OpIdx].resize(NumLanes); 1070 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1071 assert(isa<Instruction>(VL[Lane]) && "Expected instruction"); 1072 // Our tree has just 3 nodes: the root and two operands. 1073 // It is therefore trivial to get the APO. We only need to check the 1074 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or 1075 // RHS operand. The LHS operand of both add and sub is never attached 1076 // to an inversese operation in the linearized form, therefore its APO 1077 // is false. The RHS is true only if VL[Lane] is an inverse operation. 1078 1079 // Since operand reordering is performed on groups of commutative 1080 // operations or alternating sequences (e.g., +, -), we can safely 1081 // tell the inverse operations by checking commutativity. 1082 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane])); 1083 bool APO = (OpIdx == 0) ? false : IsInverseOperation; 1084 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx), 1085 APO, false}; 1086 } 1087 } 1088 } 1089 1090 /// \returns the number of operands. 1091 unsigned getNumOperands() const { return OpsVec.size(); } 1092 1093 /// \returns the number of lanes. 1094 unsigned getNumLanes() const { return OpsVec[0].size(); } 1095 1096 /// \returns the operand value at \p OpIdx and \p Lane. 1097 Value *getValue(unsigned OpIdx, unsigned Lane) const { 1098 return getData(OpIdx, Lane).V; 1099 } 1100 1101 /// \returns true if the data structure is empty. 1102 bool empty() const { return OpsVec.empty(); } 1103 1104 /// Clears the data. 1105 void clear() { OpsVec.clear(); } 1106 1107 /// \Returns true if there are enough operands identical to \p Op to fill 1108 /// the whole vector. 1109 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow. 1110 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) { 1111 bool OpAPO = getData(OpIdx, Lane).APO; 1112 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) { 1113 if (Ln == Lane) 1114 continue; 1115 // This is set to true if we found a candidate for broadcast at Lane. 1116 bool FoundCandidate = false; 1117 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) { 1118 OperandData &Data = getData(OpI, Ln); 1119 if (Data.APO != OpAPO || Data.IsUsed) 1120 continue; 1121 if (Data.V == Op) { 1122 FoundCandidate = true; 1123 Data.IsUsed = true; 1124 break; 1125 } 1126 } 1127 if (!FoundCandidate) 1128 return false; 1129 } 1130 return true; 1131 } 1132 1133 public: 1134 /// Initialize with all the operands of the instruction vector \p RootVL. 1135 VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL, 1136 ScalarEvolution &SE, const BoUpSLP &R) 1137 : DL(DL), SE(SE), R(R) { 1138 // Append all the operands of RootVL. 1139 appendOperandsOfVL(RootVL); 1140 } 1141 1142 /// \Returns a value vector with the operands across all lanes for the 1143 /// opearnd at \p OpIdx. 1144 ValueList getVL(unsigned OpIdx) const { 1145 ValueList OpVL(OpsVec[OpIdx].size()); 1146 assert(OpsVec[OpIdx].size() == getNumLanes() && 1147 "Expected same num of lanes across all operands"); 1148 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane) 1149 OpVL[Lane] = OpsVec[OpIdx][Lane].V; 1150 return OpVL; 1151 } 1152 1153 // Performs operand reordering for 2 or more operands. 1154 // The original operands are in OrigOps[OpIdx][Lane]. 1155 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'. 1156 void reorder() { 1157 unsigned NumOperands = getNumOperands(); 1158 unsigned NumLanes = getNumLanes(); 1159 // Each operand has its own mode. We are using this mode to help us select 1160 // the instructions for each lane, so that they match best with the ones 1161 // we have selected so far. 1162 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands); 1163 1164 // This is a greedy single-pass algorithm. We are going over each lane 1165 // once and deciding on the best order right away with no back-tracking. 1166 // However, in order to increase its effectiveness, we start with the lane 1167 // that has operands that can move the least. For example, given the 1168 // following lanes: 1169 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd 1170 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st 1171 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd 1172 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th 1173 // we will start at Lane 1, since the operands of the subtraction cannot 1174 // be reordered. Then we will visit the rest of the lanes in a circular 1175 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3. 1176 1177 // Find the first lane that we will start our search from. 1178 unsigned FirstLane = getBestLaneToStartReordering(); 1179 1180 // Initialize the modes. 1181 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1182 Value *OpLane0 = getValue(OpIdx, FirstLane); 1183 // Keep track if we have instructions with all the same opcode on one 1184 // side. 1185 if (isa<LoadInst>(OpLane0)) 1186 ReorderingModes[OpIdx] = ReorderingMode::Load; 1187 else if (isa<Instruction>(OpLane0)) { 1188 // Check if OpLane0 should be broadcast. 1189 if (shouldBroadcast(OpLane0, OpIdx, FirstLane)) 1190 ReorderingModes[OpIdx] = ReorderingMode::Splat; 1191 else 1192 ReorderingModes[OpIdx] = ReorderingMode::Opcode; 1193 } 1194 else if (isa<Constant>(OpLane0)) 1195 ReorderingModes[OpIdx] = ReorderingMode::Constant; 1196 else if (isa<Argument>(OpLane0)) 1197 // Our best hope is a Splat. It may save some cost in some cases. 1198 ReorderingModes[OpIdx] = ReorderingMode::Splat; 1199 else 1200 // NOTE: This should be unreachable. 1201 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1202 } 1203 1204 // If the initial strategy fails for any of the operand indexes, then we 1205 // perform reordering again in a second pass. This helps avoid assigning 1206 // high priority to the failed strategy, and should improve reordering for 1207 // the non-failed operand indexes. 1208 for (int Pass = 0; Pass != 2; ++Pass) { 1209 // Skip the second pass if the first pass did not fail. 1210 bool StrategyFailed = false; 1211 // Mark all operand data as free to use. 1212 clearUsed(); 1213 // We keep the original operand order for the FirstLane, so reorder the 1214 // rest of the lanes. We are visiting the nodes in a circular fashion, 1215 // using FirstLane as the center point and increasing the radius 1216 // distance. 1217 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) { 1218 // Visit the lane on the right and then the lane on the left. 1219 for (int Direction : {+1, -1}) { 1220 int Lane = FirstLane + Direction * Distance; 1221 if (Lane < 0 || Lane >= (int)NumLanes) 1222 continue; 1223 int LastLane = Lane - Direction; 1224 assert(LastLane >= 0 && LastLane < (int)NumLanes && 1225 "Out of bounds"); 1226 // Look for a good match for each operand. 1227 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1228 // Search for the operand that matches SortedOps[OpIdx][Lane-1]. 1229 Optional<unsigned> BestIdx = 1230 getBestOperand(OpIdx, Lane, LastLane, ReorderingModes); 1231 // By not selecting a value, we allow the operands that follow to 1232 // select a better matching value. We will get a non-null value in 1233 // the next run of getBestOperand(). 1234 if (BestIdx) { 1235 // Swap the current operand with the one returned by 1236 // getBestOperand(). 1237 swap(OpIdx, BestIdx.getValue(), Lane); 1238 } else { 1239 // We failed to find a best operand, set mode to 'Failed'. 1240 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1241 // Enable the second pass. 1242 StrategyFailed = true; 1243 } 1244 } 1245 } 1246 } 1247 // Skip second pass if the strategy did not fail. 1248 if (!StrategyFailed) 1249 break; 1250 } 1251 } 1252 1253 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1254 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) { 1255 switch (RMode) { 1256 case ReorderingMode::Load: 1257 return "Load"; 1258 case ReorderingMode::Opcode: 1259 return "Opcode"; 1260 case ReorderingMode::Constant: 1261 return "Constant"; 1262 case ReorderingMode::Splat: 1263 return "Splat"; 1264 case ReorderingMode::Failed: 1265 return "Failed"; 1266 } 1267 llvm_unreachable("Unimplemented Reordering Type"); 1268 } 1269 1270 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode, 1271 raw_ostream &OS) { 1272 return OS << getModeStr(RMode); 1273 } 1274 1275 /// Debug print. 1276 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) { 1277 printMode(RMode, dbgs()); 1278 } 1279 1280 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) { 1281 return printMode(RMode, OS); 1282 } 1283 1284 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const { 1285 const unsigned Indent = 2; 1286 unsigned Cnt = 0; 1287 for (const OperandDataVec &OpDataVec : OpsVec) { 1288 OS << "Operand " << Cnt++ << "\n"; 1289 for (const OperandData &OpData : OpDataVec) { 1290 OS.indent(Indent) << "{"; 1291 if (Value *V = OpData.V) 1292 OS << *V; 1293 else 1294 OS << "null"; 1295 OS << ", APO:" << OpData.APO << "}\n"; 1296 } 1297 OS << "\n"; 1298 } 1299 return OS; 1300 } 1301 1302 /// Debug print. 1303 LLVM_DUMP_METHOD void dump() const { print(dbgs()); } 1304 #endif 1305 }; 1306 1307 private: 1308 /// Checks if all users of \p I are the part of the vectorization tree. 1309 bool areAllUsersVectorized(Instruction *I) const; 1310 1311 /// \returns the cost of the vectorizable entry. 1312 int getEntryCost(TreeEntry *E); 1313 1314 /// This is the recursive part of buildTree. 1315 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, 1316 const EdgeInfo &EI); 1317 1318 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can 1319 /// be vectorized to use the original vector (or aggregate "bitcast" to a 1320 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise 1321 /// returns false, setting \p CurrentOrder to either an empty vector or a 1322 /// non-identity permutation that allows to reuse extract instructions. 1323 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 1324 SmallVectorImpl<unsigned> &CurrentOrder) const; 1325 1326 /// Vectorize a single entry in the tree. 1327 Value *vectorizeTree(TreeEntry *E); 1328 1329 /// Vectorize a single entry in the tree, starting in \p VL. 1330 Value *vectorizeTree(ArrayRef<Value *> VL); 1331 1332 /// \returns the scalarization cost for this type. Scalarization in this 1333 /// context means the creation of vectors from a group of scalars. 1334 int getGatherCost(Type *Ty, const DenseSet<unsigned> &ShuffledIndices) const; 1335 1336 /// \returns the scalarization cost for this list of values. Assuming that 1337 /// this subtree gets vectorized, we may need to extract the values from the 1338 /// roots. This method calculates the cost of extracting the values. 1339 int getGatherCost(ArrayRef<Value *> VL) const; 1340 1341 /// Set the Builder insert point to one after the last instruction in 1342 /// the bundle 1343 void setInsertPointAfterBundle(ArrayRef<Value *> VL, 1344 const InstructionsState &S); 1345 1346 /// \returns a vector from a collection of scalars in \p VL. 1347 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 1348 1349 /// \returns whether the VectorizableTree is fully vectorizable and will 1350 /// be beneficial even the tree height is tiny. 1351 bool isFullyVectorizableTinyTree() const; 1352 1353 /// Reorder commutative or alt operands to get better probability of 1354 /// generating vectorized code. 1355 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 1356 SmallVectorImpl<Value *> &Left, 1357 SmallVectorImpl<Value *> &Right, 1358 const DataLayout &DL, 1359 ScalarEvolution &SE, 1360 const BoUpSLP &R); 1361 struct TreeEntry { 1362 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>; 1363 TreeEntry(VecTreeTy &Container) : Container(Container) {} 1364 1365 /// \returns true if the scalars in VL are equal to this entry. 1366 bool isSame(ArrayRef<Value *> VL) const { 1367 if (VL.size() == Scalars.size()) 1368 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 1369 return VL.size() == ReuseShuffleIndices.size() && 1370 std::equal( 1371 VL.begin(), VL.end(), ReuseShuffleIndices.begin(), 1372 [this](Value *V, unsigned Idx) { return V == Scalars[Idx]; }); 1373 } 1374 1375 /// A vector of scalars. 1376 ValueList Scalars; 1377 1378 /// The Scalars are vectorized into this value. It is initialized to Null. 1379 Value *VectorizedValue = nullptr; 1380 1381 /// Do we need to gather this sequence ? 1382 bool NeedToGather = false; 1383 1384 /// Does this sequence require some shuffling? 1385 SmallVector<unsigned, 4> ReuseShuffleIndices; 1386 1387 /// Does this entry require reordering? 1388 ArrayRef<unsigned> ReorderIndices; 1389 1390 /// Points back to the VectorizableTree. 1391 /// 1392 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 1393 /// to be a pointer and needs to be able to initialize the child iterator. 1394 /// Thus we need a reference back to the container to translate the indices 1395 /// to entries. 1396 VecTreeTy &Container; 1397 1398 /// The TreeEntry index containing the user of this entry. We can actually 1399 /// have multiple users so the data structure is not truly a tree. 1400 SmallVector<EdgeInfo, 1> UserTreeIndices; 1401 1402 /// The index of this treeEntry in VectorizableTree. 1403 int Idx = -1; 1404 1405 private: 1406 /// The operands of each instruction in each lane Operands[op_index][lane]. 1407 /// Note: This helps avoid the replication of the code that performs the 1408 /// reordering of operands during buildTree_rec() and vectorizeTree(). 1409 SmallVector<ValueList, 2> Operands; 1410 1411 public: 1412 /// Set this bundle's \p OpIdx'th operand to \p OpVL. 1413 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL, 1414 ArrayRef<unsigned> ReuseShuffleIndices) { 1415 if (Operands.size() < OpIdx + 1) 1416 Operands.resize(OpIdx + 1); 1417 assert(Operands[OpIdx].size() == 0 && "Already resized?"); 1418 Operands[OpIdx].resize(Scalars.size()); 1419 for (unsigned Lane = 0, E = Scalars.size(); Lane != E; ++Lane) 1420 Operands[OpIdx][Lane] = (!ReuseShuffleIndices.empty()) 1421 ? OpVL[ReuseShuffleIndices[Lane]] 1422 : OpVL[Lane]; 1423 } 1424 1425 /// If there is a user TreeEntry, then set its operand. 1426 void trySetUserTEOperand(const EdgeInfo &UserTreeIdx, 1427 ArrayRef<Value *> OpVL, 1428 ArrayRef<unsigned> ReuseShuffleIndices) { 1429 if (UserTreeIdx.UserTE) 1430 UserTreeIdx.UserTE->setOperand(UserTreeIdx.EdgeIdx, OpVL, 1431 ReuseShuffleIndices); 1432 } 1433 1434 /// \returns the \p OpIdx operand of this TreeEntry. 1435 ValueList &getOperand(unsigned OpIdx) { 1436 assert(OpIdx < Operands.size() && "Off bounds"); 1437 return Operands[OpIdx]; 1438 } 1439 1440 /// \return the single \p OpIdx operand. 1441 Value *getSingleOperand(unsigned OpIdx) const { 1442 assert(OpIdx < Operands.size() && "Off bounds"); 1443 assert(!Operands[OpIdx].empty() && "No operand available"); 1444 return Operands[OpIdx][0]; 1445 } 1446 1447 #ifndef NDEBUG 1448 /// Debug printer. 1449 LLVM_DUMP_METHOD void dump() const { 1450 dbgs() << Idx << ".\n"; 1451 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) { 1452 dbgs() << "Operand " << OpI << ":\n"; 1453 for (const Value *V : Operands[OpI]) 1454 dbgs().indent(2) << *V << "\n"; 1455 } 1456 dbgs() << "Scalars: \n"; 1457 for (Value *V : Scalars) 1458 dbgs().indent(2) << *V << "\n"; 1459 dbgs() << "NeedToGather: " << NeedToGather << "\n"; 1460 dbgs() << "VectorizedValue: "; 1461 if (VectorizedValue) 1462 dbgs() << *VectorizedValue; 1463 else 1464 dbgs() << "NULL"; 1465 dbgs() << "\n"; 1466 dbgs() << "ReuseShuffleIndices: "; 1467 if (ReuseShuffleIndices.empty()) 1468 dbgs() << "Emtpy"; 1469 else 1470 for (unsigned Idx : ReuseShuffleIndices) 1471 dbgs() << Idx << ", "; 1472 dbgs() << "\n"; 1473 dbgs() << "ReorderIndices: "; 1474 for (unsigned Idx : ReorderIndices) 1475 dbgs() << Idx << ", "; 1476 dbgs() << "\n"; 1477 dbgs() << "UserTreeIndices: "; 1478 for (const auto &EInfo : UserTreeIndices) 1479 dbgs() << EInfo << ", "; 1480 dbgs() << "\n"; 1481 } 1482 #endif 1483 }; 1484 1485 /// Create a new VectorizableTree entry. 1486 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized, 1487 const EdgeInfo &UserTreeIdx, 1488 ArrayRef<unsigned> ReuseShuffleIndices = None, 1489 ArrayRef<unsigned> ReorderIndices = None) { 1490 VectorizableTree.push_back(llvm::make_unique<TreeEntry>(VectorizableTree)); 1491 TreeEntry *Last = VectorizableTree.back().get(); 1492 Last->Idx = VectorizableTree.size() - 1; 1493 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 1494 Last->NeedToGather = !Vectorized; 1495 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(), 1496 ReuseShuffleIndices.end()); 1497 Last->ReorderIndices = ReorderIndices; 1498 if (Vectorized) { 1499 for (int i = 0, e = VL.size(); i != e; ++i) { 1500 assert(!getTreeEntry(VL[i]) && "Scalar already in tree!"); 1501 ScalarToTreeEntry[VL[i]] = Last->Idx; 1502 } 1503 } else { 1504 MustGather.insert(VL.begin(), VL.end()); 1505 } 1506 1507 if (UserTreeIdx.UserTE) 1508 Last->UserTreeIndices.push_back(UserTreeIdx); 1509 1510 Last->trySetUserTEOperand(UserTreeIdx, VL, ReuseShuffleIndices); 1511 return Last; 1512 } 1513 1514 /// -- Vectorization State -- 1515 /// Holds all of the tree entries. 1516 TreeEntry::VecTreeTy VectorizableTree; 1517 1518 #ifndef NDEBUG 1519 /// Debug printer. 1520 LLVM_DUMP_METHOD void dumpVectorizableTree() const { 1521 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) { 1522 VectorizableTree[Id]->dump(); 1523 dbgs() << "\n"; 1524 } 1525 } 1526 #endif 1527 1528 TreeEntry *getTreeEntry(Value *V) { 1529 auto I = ScalarToTreeEntry.find(V); 1530 if (I != ScalarToTreeEntry.end()) 1531 return VectorizableTree[I->second].get(); 1532 return nullptr; 1533 } 1534 1535 const TreeEntry *getTreeEntry(Value *V) const { 1536 auto I = ScalarToTreeEntry.find(V); 1537 if (I != ScalarToTreeEntry.end()) 1538 return VectorizableTree[I->second].get(); 1539 return nullptr; 1540 } 1541 1542 /// Maps a specific scalar to its tree entry. 1543 SmallDenseMap<Value*, int> ScalarToTreeEntry; 1544 1545 /// A list of scalars that we found that we need to keep as scalars. 1546 ValueSet MustGather; 1547 1548 /// This POD struct describes one external user in the vectorized tree. 1549 struct ExternalUser { 1550 ExternalUser(Value *S, llvm::User *U, int L) 1551 : Scalar(S), User(U), Lane(L) {} 1552 1553 // Which scalar in our function. 1554 Value *Scalar; 1555 1556 // Which user that uses the scalar. 1557 llvm::User *User; 1558 1559 // Which lane does the scalar belong to. 1560 int Lane; 1561 }; 1562 using UserList = SmallVector<ExternalUser, 16>; 1563 1564 /// Checks if two instructions may access the same memory. 1565 /// 1566 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 1567 /// is invariant in the calling loop. 1568 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 1569 Instruction *Inst2) { 1570 // First check if the result is already in the cache. 1571 AliasCacheKey key = std::make_pair(Inst1, Inst2); 1572 Optional<bool> &result = AliasCache[key]; 1573 if (result.hasValue()) { 1574 return result.getValue(); 1575 } 1576 MemoryLocation Loc2 = getLocation(Inst2, AA); 1577 bool aliased = true; 1578 if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) { 1579 // Do the alias check. 1580 aliased = AA->alias(Loc1, Loc2); 1581 } 1582 // Store the result in the cache. 1583 result = aliased; 1584 return aliased; 1585 } 1586 1587 using AliasCacheKey = std::pair<Instruction *, Instruction *>; 1588 1589 /// Cache for alias results. 1590 /// TODO: consider moving this to the AliasAnalysis itself. 1591 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 1592 1593 /// Removes an instruction from its block and eventually deletes it. 1594 /// It's like Instruction::eraseFromParent() except that the actual deletion 1595 /// is delayed until BoUpSLP is destructed. 1596 /// This is required to ensure that there are no incorrect collisions in the 1597 /// AliasCache, which can happen if a new instruction is allocated at the 1598 /// same address as a previously deleted instruction. 1599 void eraseInstruction(Instruction *I) { 1600 I->removeFromParent(); 1601 I->dropAllReferences(); 1602 DeletedInstructions.emplace_back(I); 1603 } 1604 1605 /// Temporary store for deleted instructions. Instructions will be deleted 1606 /// eventually when the BoUpSLP is destructed. 1607 SmallVector<unique_value, 8> DeletedInstructions; 1608 1609 /// A list of values that need to extracted out of the tree. 1610 /// This list holds pairs of (Internal Scalar : External User). External User 1611 /// can be nullptr, it means that this Internal Scalar will be used later, 1612 /// after vectorization. 1613 UserList ExternalUses; 1614 1615 /// Values used only by @llvm.assume calls. 1616 SmallPtrSet<const Value *, 32> EphValues; 1617 1618 /// Holds all of the instructions that we gathered. 1619 SetVector<Instruction *> GatherSeq; 1620 1621 /// A list of blocks that we are going to CSE. 1622 SetVector<BasicBlock *> CSEBlocks; 1623 1624 /// Contains all scheduling relevant data for an instruction. 1625 /// A ScheduleData either represents a single instruction or a member of an 1626 /// instruction bundle (= a group of instructions which is combined into a 1627 /// vector instruction). 1628 struct ScheduleData { 1629 // The initial value for the dependency counters. It means that the 1630 // dependencies are not calculated yet. 1631 enum { InvalidDeps = -1 }; 1632 1633 ScheduleData() = default; 1634 1635 void init(int BlockSchedulingRegionID, Value *OpVal) { 1636 FirstInBundle = this; 1637 NextInBundle = nullptr; 1638 NextLoadStore = nullptr; 1639 IsScheduled = false; 1640 SchedulingRegionID = BlockSchedulingRegionID; 1641 UnscheduledDepsInBundle = UnscheduledDeps; 1642 clearDependencies(); 1643 OpValue = OpVal; 1644 } 1645 1646 /// Returns true if the dependency information has been calculated. 1647 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 1648 1649 /// Returns true for single instructions and for bundle representatives 1650 /// (= the head of a bundle). 1651 bool isSchedulingEntity() const { return FirstInBundle == this; } 1652 1653 /// Returns true if it represents an instruction bundle and not only a 1654 /// single instruction. 1655 bool isPartOfBundle() const { 1656 return NextInBundle != nullptr || FirstInBundle != this; 1657 } 1658 1659 /// Returns true if it is ready for scheduling, i.e. it has no more 1660 /// unscheduled depending instructions/bundles. 1661 bool isReady() const { 1662 assert(isSchedulingEntity() && 1663 "can't consider non-scheduling entity for ready list"); 1664 return UnscheduledDepsInBundle == 0 && !IsScheduled; 1665 } 1666 1667 /// Modifies the number of unscheduled dependencies, also updating it for 1668 /// the whole bundle. 1669 int incrementUnscheduledDeps(int Incr) { 1670 UnscheduledDeps += Incr; 1671 return FirstInBundle->UnscheduledDepsInBundle += Incr; 1672 } 1673 1674 /// Sets the number of unscheduled dependencies to the number of 1675 /// dependencies. 1676 void resetUnscheduledDeps() { 1677 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 1678 } 1679 1680 /// Clears all dependency information. 1681 void clearDependencies() { 1682 Dependencies = InvalidDeps; 1683 resetUnscheduledDeps(); 1684 MemoryDependencies.clear(); 1685 } 1686 1687 void dump(raw_ostream &os) const { 1688 if (!isSchedulingEntity()) { 1689 os << "/ " << *Inst; 1690 } else if (NextInBundle) { 1691 os << '[' << *Inst; 1692 ScheduleData *SD = NextInBundle; 1693 while (SD) { 1694 os << ';' << *SD->Inst; 1695 SD = SD->NextInBundle; 1696 } 1697 os << ']'; 1698 } else { 1699 os << *Inst; 1700 } 1701 } 1702 1703 Instruction *Inst = nullptr; 1704 1705 /// Points to the head in an instruction bundle (and always to this for 1706 /// single instructions). 1707 ScheduleData *FirstInBundle = nullptr; 1708 1709 /// Single linked list of all instructions in a bundle. Null if it is a 1710 /// single instruction. 1711 ScheduleData *NextInBundle = nullptr; 1712 1713 /// Single linked list of all memory instructions (e.g. load, store, call) 1714 /// in the block - until the end of the scheduling region. 1715 ScheduleData *NextLoadStore = nullptr; 1716 1717 /// The dependent memory instructions. 1718 /// This list is derived on demand in calculateDependencies(). 1719 SmallVector<ScheduleData *, 4> MemoryDependencies; 1720 1721 /// This ScheduleData is in the current scheduling region if this matches 1722 /// the current SchedulingRegionID of BlockScheduling. 1723 int SchedulingRegionID = 0; 1724 1725 /// Used for getting a "good" final ordering of instructions. 1726 int SchedulingPriority = 0; 1727 1728 /// The number of dependencies. Constitutes of the number of users of the 1729 /// instruction plus the number of dependent memory instructions (if any). 1730 /// This value is calculated on demand. 1731 /// If InvalidDeps, the number of dependencies is not calculated yet. 1732 int Dependencies = InvalidDeps; 1733 1734 /// The number of dependencies minus the number of dependencies of scheduled 1735 /// instructions. As soon as this is zero, the instruction/bundle gets ready 1736 /// for scheduling. 1737 /// Note that this is negative as long as Dependencies is not calculated. 1738 int UnscheduledDeps = InvalidDeps; 1739 1740 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 1741 /// single instructions. 1742 int UnscheduledDepsInBundle = InvalidDeps; 1743 1744 /// True if this instruction is scheduled (or considered as scheduled in the 1745 /// dry-run). 1746 bool IsScheduled = false; 1747 1748 /// Opcode of the current instruction in the schedule data. 1749 Value *OpValue = nullptr; 1750 }; 1751 1752 #ifndef NDEBUG 1753 friend inline raw_ostream &operator<<(raw_ostream &os, 1754 const BoUpSLP::ScheduleData &SD) { 1755 SD.dump(os); 1756 return os; 1757 } 1758 #endif 1759 1760 friend struct GraphTraits<BoUpSLP *>; 1761 friend struct DOTGraphTraits<BoUpSLP *>; 1762 1763 /// Contains all scheduling data for a basic block. 1764 struct BlockScheduling { 1765 BlockScheduling(BasicBlock *BB) 1766 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} 1767 1768 void clear() { 1769 ReadyInsts.clear(); 1770 ScheduleStart = nullptr; 1771 ScheduleEnd = nullptr; 1772 FirstLoadStoreInRegion = nullptr; 1773 LastLoadStoreInRegion = nullptr; 1774 1775 // Reduce the maximum schedule region size by the size of the 1776 // previous scheduling run. 1777 ScheduleRegionSizeLimit -= ScheduleRegionSize; 1778 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 1779 ScheduleRegionSizeLimit = MinScheduleRegionSize; 1780 ScheduleRegionSize = 0; 1781 1782 // Make a new scheduling region, i.e. all existing ScheduleData is not 1783 // in the new region yet. 1784 ++SchedulingRegionID; 1785 } 1786 1787 ScheduleData *getScheduleData(Value *V) { 1788 ScheduleData *SD = ScheduleDataMap[V]; 1789 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 1790 return SD; 1791 return nullptr; 1792 } 1793 1794 ScheduleData *getScheduleData(Value *V, Value *Key) { 1795 if (V == Key) 1796 return getScheduleData(V); 1797 auto I = ExtraScheduleDataMap.find(V); 1798 if (I != ExtraScheduleDataMap.end()) { 1799 ScheduleData *SD = I->second[Key]; 1800 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 1801 return SD; 1802 } 1803 return nullptr; 1804 } 1805 1806 bool isInSchedulingRegion(ScheduleData *SD) { 1807 return SD->SchedulingRegionID == SchedulingRegionID; 1808 } 1809 1810 /// Marks an instruction as scheduled and puts all dependent ready 1811 /// instructions into the ready-list. 1812 template <typename ReadyListType> 1813 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 1814 SD->IsScheduled = true; 1815 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 1816 1817 ScheduleData *BundleMember = SD; 1818 while (BundleMember) { 1819 if (BundleMember->Inst != BundleMember->OpValue) { 1820 BundleMember = BundleMember->NextInBundle; 1821 continue; 1822 } 1823 // Handle the def-use chain dependencies. 1824 for (Use &U : BundleMember->Inst->operands()) { 1825 auto *I = dyn_cast<Instruction>(U.get()); 1826 if (!I) 1827 continue; 1828 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 1829 if (OpDef && OpDef->hasValidDependencies() && 1830 OpDef->incrementUnscheduledDeps(-1) == 0) { 1831 // There are no more unscheduled dependencies after 1832 // decrementing, so we can put the dependent instruction 1833 // into the ready list. 1834 ScheduleData *DepBundle = OpDef->FirstInBundle; 1835 assert(!DepBundle->IsScheduled && 1836 "already scheduled bundle gets ready"); 1837 ReadyList.insert(DepBundle); 1838 LLVM_DEBUG(dbgs() 1839 << "SLP: gets ready (def): " << *DepBundle << "\n"); 1840 } 1841 }); 1842 } 1843 // Handle the memory dependencies. 1844 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 1845 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 1846 // There are no more unscheduled dependencies after decrementing, 1847 // so we can put the dependent instruction into the ready list. 1848 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 1849 assert(!DepBundle->IsScheduled && 1850 "already scheduled bundle gets ready"); 1851 ReadyList.insert(DepBundle); 1852 LLVM_DEBUG(dbgs() 1853 << "SLP: gets ready (mem): " << *DepBundle << "\n"); 1854 } 1855 } 1856 BundleMember = BundleMember->NextInBundle; 1857 } 1858 } 1859 1860 void doForAllOpcodes(Value *V, 1861 function_ref<void(ScheduleData *SD)> Action) { 1862 if (ScheduleData *SD = getScheduleData(V)) 1863 Action(SD); 1864 auto I = ExtraScheduleDataMap.find(V); 1865 if (I != ExtraScheduleDataMap.end()) 1866 for (auto &P : I->second) 1867 if (P.second->SchedulingRegionID == SchedulingRegionID) 1868 Action(P.second); 1869 } 1870 1871 /// Put all instructions into the ReadyList which are ready for scheduling. 1872 template <typename ReadyListType> 1873 void initialFillReadyList(ReadyListType &ReadyList) { 1874 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 1875 doForAllOpcodes(I, [&](ScheduleData *SD) { 1876 if (SD->isSchedulingEntity() && SD->isReady()) { 1877 ReadyList.insert(SD); 1878 LLVM_DEBUG(dbgs() 1879 << "SLP: initially in ready list: " << *I << "\n"); 1880 } 1881 }); 1882 } 1883 } 1884 1885 /// Checks if a bundle of instructions can be scheduled, i.e. has no 1886 /// cyclic dependencies. This is only a dry-run, no instructions are 1887 /// actually moved at this stage. 1888 bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 1889 const InstructionsState &S); 1890 1891 /// Un-bundles a group of instructions. 1892 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 1893 1894 /// Allocates schedule data chunk. 1895 ScheduleData *allocateScheduleDataChunks(); 1896 1897 /// Extends the scheduling region so that V is inside the region. 1898 /// \returns true if the region size is within the limit. 1899 bool extendSchedulingRegion(Value *V, const InstructionsState &S); 1900 1901 /// Initialize the ScheduleData structures for new instructions in the 1902 /// scheduling region. 1903 void initScheduleData(Instruction *FromI, Instruction *ToI, 1904 ScheduleData *PrevLoadStore, 1905 ScheduleData *NextLoadStore); 1906 1907 /// Updates the dependency information of a bundle and of all instructions/ 1908 /// bundles which depend on the original bundle. 1909 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 1910 BoUpSLP *SLP); 1911 1912 /// Sets all instruction in the scheduling region to un-scheduled. 1913 void resetSchedule(); 1914 1915 BasicBlock *BB; 1916 1917 /// Simple memory allocation for ScheduleData. 1918 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 1919 1920 /// The size of a ScheduleData array in ScheduleDataChunks. 1921 int ChunkSize; 1922 1923 /// The allocator position in the current chunk, which is the last entry 1924 /// of ScheduleDataChunks. 1925 int ChunkPos; 1926 1927 /// Attaches ScheduleData to Instruction. 1928 /// Note that the mapping survives during all vectorization iterations, i.e. 1929 /// ScheduleData structures are recycled. 1930 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 1931 1932 /// Attaches ScheduleData to Instruction with the leading key. 1933 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 1934 ExtraScheduleDataMap; 1935 1936 struct ReadyList : SmallVector<ScheduleData *, 8> { 1937 void insert(ScheduleData *SD) { push_back(SD); } 1938 }; 1939 1940 /// The ready-list for scheduling (only used for the dry-run). 1941 ReadyList ReadyInsts; 1942 1943 /// The first instruction of the scheduling region. 1944 Instruction *ScheduleStart = nullptr; 1945 1946 /// The first instruction _after_ the scheduling region. 1947 Instruction *ScheduleEnd = nullptr; 1948 1949 /// The first memory accessing instruction in the scheduling region 1950 /// (can be null). 1951 ScheduleData *FirstLoadStoreInRegion = nullptr; 1952 1953 /// The last memory accessing instruction in the scheduling region 1954 /// (can be null). 1955 ScheduleData *LastLoadStoreInRegion = nullptr; 1956 1957 /// The current size of the scheduling region. 1958 int ScheduleRegionSize = 0; 1959 1960 /// The maximum size allowed for the scheduling region. 1961 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; 1962 1963 /// The ID of the scheduling region. For a new vectorization iteration this 1964 /// is incremented which "removes" all ScheduleData from the region. 1965 // Make sure that the initial SchedulingRegionID is greater than the 1966 // initial SchedulingRegionID in ScheduleData (which is 0). 1967 int SchedulingRegionID = 1; 1968 }; 1969 1970 /// Attaches the BlockScheduling structures to basic blocks. 1971 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 1972 1973 /// Performs the "real" scheduling. Done before vectorization is actually 1974 /// performed in a basic block. 1975 void scheduleBlock(BlockScheduling *BS); 1976 1977 /// List of users to ignore during scheduling and that don't need extracting. 1978 ArrayRef<Value *> UserIgnoreList; 1979 1980 using OrdersType = SmallVector<unsigned, 4>; 1981 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of 1982 /// sorted SmallVectors of unsigned. 1983 struct OrdersTypeDenseMapInfo { 1984 static OrdersType getEmptyKey() { 1985 OrdersType V; 1986 V.push_back(~1U); 1987 return V; 1988 } 1989 1990 static OrdersType getTombstoneKey() { 1991 OrdersType V; 1992 V.push_back(~2U); 1993 return V; 1994 } 1995 1996 static unsigned getHashValue(const OrdersType &V) { 1997 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); 1998 } 1999 2000 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) { 2001 return LHS == RHS; 2002 } 2003 }; 2004 2005 /// Contains orders of operations along with the number of bundles that have 2006 /// operations in this order. It stores only those orders that require 2007 /// reordering, if reordering is not required it is counted using \a 2008 /// NumOpsWantToKeepOriginalOrder. 2009 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo> NumOpsWantToKeepOrder; 2010 /// Number of bundles that do not require reordering. 2011 unsigned NumOpsWantToKeepOriginalOrder = 0; 2012 2013 // Analysis and block reference. 2014 Function *F; 2015 ScalarEvolution *SE; 2016 TargetTransformInfo *TTI; 2017 TargetLibraryInfo *TLI; 2018 AliasAnalysis *AA; 2019 LoopInfo *LI; 2020 DominatorTree *DT; 2021 AssumptionCache *AC; 2022 DemandedBits *DB; 2023 const DataLayout *DL; 2024 OptimizationRemarkEmitter *ORE; 2025 2026 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 2027 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 2028 2029 /// Instruction builder to construct the vectorized tree. 2030 IRBuilder<> Builder; 2031 2032 /// A map of scalar integer values to the smallest bit width with which they 2033 /// can legally be represented. The values map to (width, signed) pairs, 2034 /// where "width" indicates the minimum bit width and "signed" is True if the 2035 /// value must be signed-extended, rather than zero-extended, back to its 2036 /// original width. 2037 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs; 2038 }; 2039 2040 } // end namespace slpvectorizer 2041 2042 template <> struct GraphTraits<BoUpSLP *> { 2043 using TreeEntry = BoUpSLP::TreeEntry; 2044 2045 /// NodeRef has to be a pointer per the GraphWriter. 2046 using NodeRef = TreeEntry *; 2047 2048 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy; 2049 2050 /// Add the VectorizableTree to the index iterator to be able to return 2051 /// TreeEntry pointers. 2052 struct ChildIteratorType 2053 : public iterator_adaptor_base< 2054 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> { 2055 ContainerTy &VectorizableTree; 2056 2057 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W, 2058 ContainerTy &VT) 2059 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 2060 2061 NodeRef operator*() { return I->UserTE; } 2062 }; 2063 2064 static NodeRef getEntryNode(BoUpSLP &R) { 2065 return R.VectorizableTree[0].get(); 2066 } 2067 2068 static ChildIteratorType child_begin(NodeRef N) { 2069 return {N->UserTreeIndices.begin(), N->Container}; 2070 } 2071 2072 static ChildIteratorType child_end(NodeRef N) { 2073 return {N->UserTreeIndices.end(), N->Container}; 2074 } 2075 2076 /// For the node iterator we just need to turn the TreeEntry iterator into a 2077 /// TreeEntry* iterator so that it dereferences to NodeRef. 2078 class nodes_iterator { 2079 using ItTy = ContainerTy::iterator; 2080 ItTy It; 2081 2082 public: 2083 nodes_iterator(const ItTy &It2) : It(It2) {} 2084 NodeRef operator*() { return It->get(); } 2085 nodes_iterator operator++() { 2086 ++It; 2087 return *this; 2088 } 2089 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; } 2090 }; 2091 2092 static nodes_iterator nodes_begin(BoUpSLP *R) { 2093 return nodes_iterator(R->VectorizableTree.begin()); 2094 } 2095 2096 static nodes_iterator nodes_end(BoUpSLP *R) { 2097 return nodes_iterator(R->VectorizableTree.end()); 2098 } 2099 2100 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 2101 }; 2102 2103 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 2104 using TreeEntry = BoUpSLP::TreeEntry; 2105 2106 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 2107 2108 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 2109 std::string Str; 2110 raw_string_ostream OS(Str); 2111 if (isSplat(Entry->Scalars)) { 2112 OS << "<splat> " << *Entry->Scalars[0]; 2113 return Str; 2114 } 2115 for (auto V : Entry->Scalars) { 2116 OS << *V; 2117 if (std::any_of( 2118 R->ExternalUses.begin(), R->ExternalUses.end(), 2119 [&](const BoUpSLP::ExternalUser &EU) { return EU.Scalar == V; })) 2120 OS << " <extract>"; 2121 OS << "\n"; 2122 } 2123 return Str; 2124 } 2125 2126 static std::string getNodeAttributes(const TreeEntry *Entry, 2127 const BoUpSLP *) { 2128 if (Entry->NeedToGather) 2129 return "color=red"; 2130 return ""; 2131 } 2132 }; 2133 2134 } // end namespace llvm 2135 2136 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 2137 ArrayRef<Value *> UserIgnoreLst) { 2138 ExtraValueToDebugLocsMap ExternallyUsedValues; 2139 buildTree(Roots, ExternallyUsedValues, UserIgnoreLst); 2140 } 2141 2142 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 2143 ExtraValueToDebugLocsMap &ExternallyUsedValues, 2144 ArrayRef<Value *> UserIgnoreLst) { 2145 deleteTree(); 2146 UserIgnoreList = UserIgnoreLst; 2147 if (!allSameType(Roots)) 2148 return; 2149 buildTree_rec(Roots, 0, EdgeInfo()); 2150 2151 // Collect the values that we need to extract from the tree. 2152 for (auto &TEPtr : VectorizableTree) { 2153 TreeEntry *Entry = TEPtr.get(); 2154 2155 // No need to handle users of gathered values. 2156 if (Entry->NeedToGather) 2157 continue; 2158 2159 // For each lane: 2160 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 2161 Value *Scalar = Entry->Scalars[Lane]; 2162 int FoundLane = Lane; 2163 if (!Entry->ReuseShuffleIndices.empty()) { 2164 FoundLane = 2165 std::distance(Entry->ReuseShuffleIndices.begin(), 2166 llvm::find(Entry->ReuseShuffleIndices, FoundLane)); 2167 } 2168 2169 // Check if the scalar is externally used as an extra arg. 2170 auto ExtI = ExternallyUsedValues.find(Scalar); 2171 if (ExtI != ExternallyUsedValues.end()) { 2172 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " 2173 << Lane << " from " << *Scalar << ".\n"); 2174 ExternalUses.emplace_back(Scalar, nullptr, FoundLane); 2175 } 2176 for (User *U : Scalar->users()) { 2177 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 2178 2179 Instruction *UserInst = dyn_cast<Instruction>(U); 2180 if (!UserInst) 2181 continue; 2182 2183 // Skip in-tree scalars that become vectors 2184 if (TreeEntry *UseEntry = getTreeEntry(U)) { 2185 Value *UseScalar = UseEntry->Scalars[0]; 2186 // Some in-tree scalars will remain as scalar in vectorized 2187 // instructions. If that is the case, the one in Lane 0 will 2188 // be used. 2189 if (UseScalar != U || 2190 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 2191 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 2192 << ".\n"); 2193 assert(!UseEntry->NeedToGather && "Bad state"); 2194 continue; 2195 } 2196 } 2197 2198 // Ignore users in the user ignore list. 2199 if (is_contained(UserIgnoreList, UserInst)) 2200 continue; 2201 2202 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " 2203 << Lane << " from " << *Scalar << ".\n"); 2204 ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane)); 2205 } 2206 } 2207 } 2208 } 2209 2210 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 2211 const EdgeInfo &UserTreeIdx) { 2212 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 2213 2214 InstructionsState S = getSameOpcode(VL); 2215 if (Depth == RecursionMaxDepth) { 2216 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 2217 newTreeEntry(VL, false, UserTreeIdx); 2218 return; 2219 } 2220 2221 // Don't handle vectors. 2222 if (S.OpValue->getType()->isVectorTy()) { 2223 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 2224 newTreeEntry(VL, false, UserTreeIdx); 2225 return; 2226 } 2227 2228 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 2229 if (SI->getValueOperand()->getType()->isVectorTy()) { 2230 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 2231 newTreeEntry(VL, false, UserTreeIdx); 2232 return; 2233 } 2234 2235 // If all of the operands are identical or constant we have a simple solution. 2236 if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.getOpcode()) { 2237 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 2238 newTreeEntry(VL, false, UserTreeIdx); 2239 return; 2240 } 2241 2242 // We now know that this is a vector of instructions of the same type from 2243 // the same block. 2244 2245 // Don't vectorize ephemeral values. 2246 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 2247 if (EphValues.count(VL[i])) { 2248 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] 2249 << ") is ephemeral.\n"); 2250 newTreeEntry(VL, false, UserTreeIdx); 2251 return; 2252 } 2253 } 2254 2255 // Check if this is a duplicate of another entry. 2256 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 2257 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n"); 2258 if (!E->isSame(VL)) { 2259 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 2260 newTreeEntry(VL, false, UserTreeIdx); 2261 return; 2262 } 2263 // Record the reuse of the tree node. FIXME, currently this is only used to 2264 // properly draw the graph rather than for the actual vectorization. 2265 E->UserTreeIndices.push_back(UserTreeIdx); 2266 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue 2267 << ".\n"); 2268 E->trySetUserTEOperand(UserTreeIdx, VL, None); 2269 return; 2270 } 2271 2272 // Check that none of the instructions in the bundle are already in the tree. 2273 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 2274 auto *I = dyn_cast<Instruction>(VL[i]); 2275 if (!I) 2276 continue; 2277 if (getTreeEntry(I)) { 2278 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] 2279 << ") is already in tree.\n"); 2280 newTreeEntry(VL, false, UserTreeIdx); 2281 return; 2282 } 2283 } 2284 2285 // If any of the scalars is marked as a value that needs to stay scalar, then 2286 // we need to gather the scalars. 2287 // The reduction nodes (stored in UserIgnoreList) also should stay scalar. 2288 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 2289 if (MustGather.count(VL[i]) || is_contained(UserIgnoreList, VL[i])) { 2290 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 2291 newTreeEntry(VL, false, UserTreeIdx); 2292 return; 2293 } 2294 } 2295 2296 // Check that all of the users of the scalars that we want to vectorize are 2297 // schedulable. 2298 auto *VL0 = cast<Instruction>(S.OpValue); 2299 BasicBlock *BB = VL0->getParent(); 2300 2301 if (!DT->isReachableFromEntry(BB)) { 2302 // Don't go into unreachable blocks. They may contain instructions with 2303 // dependency cycles which confuse the final scheduling. 2304 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 2305 newTreeEntry(VL, false, UserTreeIdx); 2306 return; 2307 } 2308 2309 // Check that every instruction appears once in this bundle. 2310 SmallVector<unsigned, 4> ReuseShuffleIndicies; 2311 SmallVector<Value *, 4> UniqueValues; 2312 DenseMap<Value *, unsigned> UniquePositions; 2313 for (Value *V : VL) { 2314 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 2315 ReuseShuffleIndicies.emplace_back(Res.first->second); 2316 if (Res.second) 2317 UniqueValues.emplace_back(V); 2318 } 2319 if (UniqueValues.size() == VL.size()) { 2320 ReuseShuffleIndicies.clear(); 2321 } else { 2322 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n"); 2323 if (UniqueValues.size() <= 1 || !llvm::isPowerOf2_32(UniqueValues.size())) { 2324 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 2325 newTreeEntry(VL, false, UserTreeIdx); 2326 return; 2327 } 2328 VL = UniqueValues; 2329 } 2330 2331 auto &BSRef = BlocksSchedules[BB]; 2332 if (!BSRef) 2333 BSRef = llvm::make_unique<BlockScheduling>(BB); 2334 2335 BlockScheduling &BS = *BSRef.get(); 2336 2337 if (!BS.tryScheduleBundle(VL, this, S)) { 2338 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 2339 assert((!BS.getScheduleData(VL0) || 2340 !BS.getScheduleData(VL0)->isPartOfBundle()) && 2341 "tryScheduleBundle should cancelScheduling on failure"); 2342 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2343 return; 2344 } 2345 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 2346 2347 unsigned ShuffleOrOp = S.isAltShuffle() ? 2348 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 2349 switch (ShuffleOrOp) { 2350 case Instruction::PHI: { 2351 PHINode *PH = dyn_cast<PHINode>(VL0); 2352 2353 // Check for terminator values (e.g. invoke). 2354 for (unsigned j = 0; j < VL.size(); ++j) 2355 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 2356 Instruction *Term = dyn_cast<Instruction>( 2357 cast<PHINode>(VL[j])->getIncomingValueForBlock( 2358 PH->getIncomingBlock(i))); 2359 if (Term && Term->isTerminator()) { 2360 LLVM_DEBUG(dbgs() 2361 << "SLP: Need to swizzle PHINodes (terminator use).\n"); 2362 BS.cancelScheduling(VL, VL0); 2363 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2364 return; 2365 } 2366 } 2367 2368 auto *TE = newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); 2369 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 2370 2371 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 2372 ValueList Operands; 2373 // Prepare the operand vector. 2374 for (Value *j : VL) 2375 Operands.push_back(cast<PHINode>(j)->getIncomingValueForBlock( 2376 PH->getIncomingBlock(i))); 2377 2378 buildTree_rec(Operands, Depth + 1, {TE, i}); 2379 } 2380 return; 2381 } 2382 case Instruction::ExtractValue: 2383 case Instruction::ExtractElement: { 2384 OrdersType CurrentOrder; 2385 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder); 2386 if (Reuse) { 2387 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n"); 2388 ++NumOpsWantToKeepOriginalOrder; 2389 newTreeEntry(VL, /*Vectorized=*/true, UserTreeIdx, 2390 ReuseShuffleIndicies); 2391 // This is a special case, as it does not gather, but at the same time 2392 // we are not extending buildTree_rec() towards the operands. 2393 ValueList Op0; 2394 Op0.assign(VL.size(), VL0->getOperand(0)); 2395 VectorizableTree.back()->setOperand(0, Op0, ReuseShuffleIndicies); 2396 return; 2397 } 2398 if (!CurrentOrder.empty()) { 2399 LLVM_DEBUG({ 2400 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " 2401 "with order"; 2402 for (unsigned Idx : CurrentOrder) 2403 dbgs() << " " << Idx; 2404 dbgs() << "\n"; 2405 }); 2406 // Insert new order with initial value 0, if it does not exist, 2407 // otherwise return the iterator to the existing one. 2408 auto StoredCurrentOrderAndNum = 2409 NumOpsWantToKeepOrder.try_emplace(CurrentOrder).first; 2410 ++StoredCurrentOrderAndNum->getSecond(); 2411 newTreeEntry(VL, /*Vectorized=*/true, UserTreeIdx, ReuseShuffleIndicies, 2412 StoredCurrentOrderAndNum->getFirst()); 2413 // This is a special case, as it does not gather, but at the same time 2414 // we are not extending buildTree_rec() towards the operands. 2415 ValueList Op0; 2416 Op0.assign(VL.size(), VL0->getOperand(0)); 2417 VectorizableTree.back()->setOperand(0, Op0, ReuseShuffleIndicies); 2418 return; 2419 } 2420 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n"); 2421 newTreeEntry(VL, /*Vectorized=*/false, UserTreeIdx, ReuseShuffleIndicies); 2422 BS.cancelScheduling(VL, VL0); 2423 return; 2424 } 2425 case Instruction::Load: { 2426 // Check that a vectorized load would load the same memory as a scalar 2427 // load. For example, we don't want to vectorize loads that are smaller 2428 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 2429 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 2430 // from such a struct, we read/write packed bits disagreeing with the 2431 // unvectorized version. 2432 Type *ScalarTy = VL0->getType(); 2433 2434 if (DL->getTypeSizeInBits(ScalarTy) != 2435 DL->getTypeAllocSizeInBits(ScalarTy)) { 2436 BS.cancelScheduling(VL, VL0); 2437 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2438 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 2439 return; 2440 } 2441 2442 // Make sure all loads in the bundle are simple - we can't vectorize 2443 // atomic or volatile loads. 2444 SmallVector<Value *, 4> PointerOps(VL.size()); 2445 auto POIter = PointerOps.begin(); 2446 for (Value *V : VL) { 2447 auto *L = cast<LoadInst>(V); 2448 if (!L->isSimple()) { 2449 BS.cancelScheduling(VL, VL0); 2450 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2451 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 2452 return; 2453 } 2454 *POIter = L->getPointerOperand(); 2455 ++POIter; 2456 } 2457 2458 OrdersType CurrentOrder; 2459 // Check the order of pointer operands. 2460 if (llvm::sortPtrAccesses(PointerOps, *DL, *SE, CurrentOrder)) { 2461 Value *Ptr0; 2462 Value *PtrN; 2463 if (CurrentOrder.empty()) { 2464 Ptr0 = PointerOps.front(); 2465 PtrN = PointerOps.back(); 2466 } else { 2467 Ptr0 = PointerOps[CurrentOrder.front()]; 2468 PtrN = PointerOps[CurrentOrder.back()]; 2469 } 2470 const SCEV *Scev0 = SE->getSCEV(Ptr0); 2471 const SCEV *ScevN = SE->getSCEV(PtrN); 2472 const auto *Diff = 2473 dyn_cast<SCEVConstant>(SE->getMinusSCEV(ScevN, Scev0)); 2474 uint64_t Size = DL->getTypeAllocSize(ScalarTy); 2475 // Check that the sorted loads are consecutive. 2476 if (Diff && Diff->getAPInt().getZExtValue() == (VL.size() - 1) * Size) { 2477 if (CurrentOrder.empty()) { 2478 // Original loads are consecutive and does not require reordering. 2479 ++NumOpsWantToKeepOriginalOrder; 2480 newTreeEntry(VL, /*Vectorized=*/true, UserTreeIdx, 2481 ReuseShuffleIndicies); 2482 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 2483 } else { 2484 // Need to reorder. 2485 auto I = NumOpsWantToKeepOrder.try_emplace(CurrentOrder).first; 2486 ++I->getSecond(); 2487 newTreeEntry(VL, /*Vectorized=*/true, UserTreeIdx, 2488 ReuseShuffleIndicies, I->getFirst()); 2489 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n"); 2490 } 2491 return; 2492 } 2493 } 2494 2495 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 2496 BS.cancelScheduling(VL, VL0); 2497 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2498 return; 2499 } 2500 case Instruction::ZExt: 2501 case Instruction::SExt: 2502 case Instruction::FPToUI: 2503 case Instruction::FPToSI: 2504 case Instruction::FPExt: 2505 case Instruction::PtrToInt: 2506 case Instruction::IntToPtr: 2507 case Instruction::SIToFP: 2508 case Instruction::UIToFP: 2509 case Instruction::Trunc: 2510 case Instruction::FPTrunc: 2511 case Instruction::BitCast: { 2512 Type *SrcTy = VL0->getOperand(0)->getType(); 2513 for (unsigned i = 0; i < VL.size(); ++i) { 2514 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 2515 if (Ty != SrcTy || !isValidElementType(Ty)) { 2516 BS.cancelScheduling(VL, VL0); 2517 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2518 LLVM_DEBUG(dbgs() 2519 << "SLP: Gathering casts with different src types.\n"); 2520 return; 2521 } 2522 } 2523 auto *TE = newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); 2524 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 2525 2526 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 2527 ValueList Operands; 2528 // Prepare the operand vector. 2529 for (Value *j : VL) 2530 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 2531 2532 buildTree_rec(Operands, Depth + 1, {TE, i}); 2533 } 2534 return; 2535 } 2536 case Instruction::ICmp: 2537 case Instruction::FCmp: { 2538 // Check that all of the compares have the same predicate. 2539 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 2540 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0); 2541 Type *ComparedTy = VL0->getOperand(0)->getType(); 2542 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 2543 CmpInst *Cmp = cast<CmpInst>(VL[i]); 2544 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) || 2545 Cmp->getOperand(0)->getType() != ComparedTy) { 2546 BS.cancelScheduling(VL, VL0); 2547 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2548 LLVM_DEBUG(dbgs() 2549 << "SLP: Gathering cmp with different predicate.\n"); 2550 return; 2551 } 2552 } 2553 2554 auto *TE = newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); 2555 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 2556 2557 ValueList Left, Right; 2558 if (cast<CmpInst>(VL0)->isCommutative()) { 2559 // Commutative predicate - collect + sort operands of the instructions 2560 // so that each side is more likely to have the same opcode. 2561 assert(P0 == SwapP0 && "Commutative Predicate mismatch"); 2562 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 2563 } else { 2564 // Collect operands - commute if it uses the swapped predicate. 2565 for (Value *V : VL) { 2566 auto *Cmp = cast<CmpInst>(V); 2567 Value *LHS = Cmp->getOperand(0); 2568 Value *RHS = Cmp->getOperand(1); 2569 if (Cmp->getPredicate() != P0) 2570 std::swap(LHS, RHS); 2571 Left.push_back(LHS); 2572 Right.push_back(RHS); 2573 } 2574 } 2575 2576 buildTree_rec(Left, Depth + 1, {TE, 0}); 2577 buildTree_rec(Right, Depth + 1, {TE, 1}); 2578 return; 2579 } 2580 case Instruction::Select: 2581 case Instruction::FNeg: 2582 case Instruction::Add: 2583 case Instruction::FAdd: 2584 case Instruction::Sub: 2585 case Instruction::FSub: 2586 case Instruction::Mul: 2587 case Instruction::FMul: 2588 case Instruction::UDiv: 2589 case Instruction::SDiv: 2590 case Instruction::FDiv: 2591 case Instruction::URem: 2592 case Instruction::SRem: 2593 case Instruction::FRem: 2594 case Instruction::Shl: 2595 case Instruction::LShr: 2596 case Instruction::AShr: 2597 case Instruction::And: 2598 case Instruction::Or: 2599 case Instruction::Xor: { 2600 auto *TE = newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); 2601 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n"); 2602 2603 // Sort operands of the instructions so that each side is more likely to 2604 // have the same opcode. 2605 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 2606 ValueList Left, Right; 2607 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 2608 buildTree_rec(Left, Depth + 1, {TE, 0}); 2609 buildTree_rec(Right, Depth + 1, {TE, 1}); 2610 return; 2611 } 2612 2613 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 2614 ValueList Operands; 2615 // Prepare the operand vector. 2616 for (Value *j : VL) 2617 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 2618 2619 buildTree_rec(Operands, Depth + 1, {TE, i}); 2620 } 2621 return; 2622 } 2623 case Instruction::GetElementPtr: { 2624 // We don't combine GEPs with complicated (nested) indexing. 2625 for (unsigned j = 0; j < VL.size(); ++j) { 2626 if (cast<Instruction>(VL[j])->getNumOperands() != 2) { 2627 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 2628 BS.cancelScheduling(VL, VL0); 2629 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2630 return; 2631 } 2632 } 2633 2634 // We can't combine several GEPs into one vector if they operate on 2635 // different types. 2636 Type *Ty0 = VL0->getOperand(0)->getType(); 2637 for (unsigned j = 0; j < VL.size(); ++j) { 2638 Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType(); 2639 if (Ty0 != CurTy) { 2640 LLVM_DEBUG(dbgs() 2641 << "SLP: not-vectorizable GEP (different types).\n"); 2642 BS.cancelScheduling(VL, VL0); 2643 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2644 return; 2645 } 2646 } 2647 2648 // We don't combine GEPs with non-constant indexes. 2649 for (unsigned j = 0; j < VL.size(); ++j) { 2650 auto Op = cast<Instruction>(VL[j])->getOperand(1); 2651 if (!isa<ConstantInt>(Op)) { 2652 LLVM_DEBUG(dbgs() 2653 << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 2654 BS.cancelScheduling(VL, VL0); 2655 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2656 return; 2657 } 2658 } 2659 2660 auto *TE = newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); 2661 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 2662 for (unsigned i = 0, e = 2; i < e; ++i) { 2663 ValueList Operands; 2664 // Prepare the operand vector. 2665 for (Value *j : VL) 2666 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 2667 2668 buildTree_rec(Operands, Depth + 1, {TE, i}); 2669 } 2670 return; 2671 } 2672 case Instruction::Store: { 2673 // Check if the stores are consecutive or of we need to swizzle them. 2674 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 2675 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { 2676 BS.cancelScheduling(VL, VL0); 2677 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2678 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 2679 return; 2680 } 2681 2682 auto *TE = newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); 2683 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 2684 2685 ValueList Operands; 2686 for (Value *j : VL) 2687 Operands.push_back(cast<Instruction>(j)->getOperand(0)); 2688 2689 buildTree_rec(Operands, Depth + 1, {TE, 0}); 2690 return; 2691 } 2692 case Instruction::Call: { 2693 // Check if the calls are all to the same vectorizable intrinsic. 2694 CallInst *CI = cast<CallInst>(VL0); 2695 // Check if this is an Intrinsic call or something that can be 2696 // represented by an intrinsic call 2697 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 2698 if (!isTriviallyVectorizable(ID)) { 2699 BS.cancelScheduling(VL, VL0); 2700 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2701 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 2702 return; 2703 } 2704 Function *Int = CI->getCalledFunction(); 2705 unsigned NumArgs = CI->getNumArgOperands(); 2706 SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr); 2707 for (unsigned j = 0; j != NumArgs; ++j) 2708 if (hasVectorInstrinsicScalarOpd(ID, j)) 2709 ScalarArgs[j] = CI->getArgOperand(j); 2710 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 2711 CallInst *CI2 = dyn_cast<CallInst>(VL[i]); 2712 if (!CI2 || CI2->getCalledFunction() != Int || 2713 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 2714 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 2715 BS.cancelScheduling(VL, VL0); 2716 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2717 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] 2718 << "\n"); 2719 return; 2720 } 2721 // Some intrinsics have scalar arguments and should be same in order for 2722 // them to be vectorized. 2723 for (unsigned j = 0; j != NumArgs; ++j) { 2724 if (hasVectorInstrinsicScalarOpd(ID, j)) { 2725 Value *A1J = CI2->getArgOperand(j); 2726 if (ScalarArgs[j] != A1J) { 2727 BS.cancelScheduling(VL, VL0); 2728 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2729 LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 2730 << " argument " << ScalarArgs[j] << "!=" << A1J 2731 << "\n"); 2732 return; 2733 } 2734 } 2735 } 2736 // Verify that the bundle operands are identical between the two calls. 2737 if (CI->hasOperandBundles() && 2738 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 2739 CI->op_begin() + CI->getBundleOperandsEndIndex(), 2740 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 2741 BS.cancelScheduling(VL, VL0); 2742 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2743 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" 2744 << *CI << "!=" << *VL[i] << '\n'); 2745 return; 2746 } 2747 } 2748 2749 auto *TE = newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); 2750 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 2751 ValueList Operands; 2752 // Prepare the operand vector. 2753 for (Value *j : VL) { 2754 CallInst *CI2 = dyn_cast<CallInst>(j); 2755 Operands.push_back(CI2->getArgOperand(i)); 2756 } 2757 buildTree_rec(Operands, Depth + 1, {TE, i}); 2758 } 2759 return; 2760 } 2761 case Instruction::ShuffleVector: { 2762 // If this is not an alternate sequence of opcode like add-sub 2763 // then do not vectorize this instruction. 2764 if (!S.isAltShuffle()) { 2765 BS.cancelScheduling(VL, VL0); 2766 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2767 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 2768 return; 2769 } 2770 auto *TE = newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); 2771 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 2772 2773 // Reorder operands if reordering would enable vectorization. 2774 if (isa<BinaryOperator>(VL0)) { 2775 ValueList Left, Right; 2776 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 2777 buildTree_rec(Left, Depth + 1, {TE, 0}); 2778 buildTree_rec(Right, Depth + 1, {TE, 1}); 2779 return; 2780 } 2781 2782 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 2783 ValueList Operands; 2784 // Prepare the operand vector. 2785 for (Value *j : VL) 2786 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 2787 2788 buildTree_rec(Operands, Depth + 1, {TE, i}); 2789 } 2790 return; 2791 } 2792 default: 2793 BS.cancelScheduling(VL, VL0); 2794 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2795 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 2796 return; 2797 } 2798 } 2799 2800 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { 2801 unsigned N; 2802 Type *EltTy; 2803 auto *ST = dyn_cast<StructType>(T); 2804 if (ST) { 2805 N = ST->getNumElements(); 2806 EltTy = *ST->element_begin(); 2807 } else { 2808 N = cast<ArrayType>(T)->getNumElements(); 2809 EltTy = cast<ArrayType>(T)->getElementType(); 2810 } 2811 if (!isValidElementType(EltTy)) 2812 return 0; 2813 uint64_t VTSize = DL.getTypeStoreSizeInBits(VectorType::get(EltTy, N)); 2814 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) 2815 return 0; 2816 if (ST) { 2817 // Check that struct is homogeneous. 2818 for (const auto *Ty : ST->elements()) 2819 if (Ty != EltTy) 2820 return 0; 2821 } 2822 return N; 2823 } 2824 2825 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 2826 SmallVectorImpl<unsigned> &CurrentOrder) const { 2827 Instruction *E0 = cast<Instruction>(OpValue); 2828 assert(E0->getOpcode() == Instruction::ExtractElement || 2829 E0->getOpcode() == Instruction::ExtractValue); 2830 assert(E0->getOpcode() == getSameOpcode(VL).getOpcode() && "Invalid opcode"); 2831 // Check if all of the extracts come from the same vector and from the 2832 // correct offset. 2833 Value *Vec = E0->getOperand(0); 2834 2835 CurrentOrder.clear(); 2836 2837 // We have to extract from a vector/aggregate with the same number of elements. 2838 unsigned NElts; 2839 if (E0->getOpcode() == Instruction::ExtractValue) { 2840 const DataLayout &DL = E0->getModule()->getDataLayout(); 2841 NElts = canMapToVector(Vec->getType(), DL); 2842 if (!NElts) 2843 return false; 2844 // Check if load can be rewritten as load of vector. 2845 LoadInst *LI = dyn_cast<LoadInst>(Vec); 2846 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 2847 return false; 2848 } else { 2849 NElts = Vec->getType()->getVectorNumElements(); 2850 } 2851 2852 if (NElts != VL.size()) 2853 return false; 2854 2855 // Check that all of the indices extract from the correct offset. 2856 bool ShouldKeepOrder = true; 2857 unsigned E = VL.size(); 2858 // Assign to all items the initial value E + 1 so we can check if the extract 2859 // instruction index was used already. 2860 // Also, later we can check that all the indices are used and we have a 2861 // consecutive access in the extract instructions, by checking that no 2862 // element of CurrentOrder still has value E + 1. 2863 CurrentOrder.assign(E, E + 1); 2864 unsigned I = 0; 2865 for (; I < E; ++I) { 2866 auto *Inst = cast<Instruction>(VL[I]); 2867 if (Inst->getOperand(0) != Vec) 2868 break; 2869 Optional<unsigned> Idx = getExtractIndex(Inst); 2870 if (!Idx) 2871 break; 2872 const unsigned ExtIdx = *Idx; 2873 if (ExtIdx != I) { 2874 if (ExtIdx >= E || CurrentOrder[ExtIdx] != E + 1) 2875 break; 2876 ShouldKeepOrder = false; 2877 CurrentOrder[ExtIdx] = I; 2878 } else { 2879 if (CurrentOrder[I] != E + 1) 2880 break; 2881 CurrentOrder[I] = I; 2882 } 2883 } 2884 if (I < E) { 2885 CurrentOrder.clear(); 2886 return false; 2887 } 2888 2889 return ShouldKeepOrder; 2890 } 2891 2892 bool BoUpSLP::areAllUsersVectorized(Instruction *I) const { 2893 return I->hasOneUse() || 2894 std::all_of(I->user_begin(), I->user_end(), [this](User *U) { 2895 return ScalarToTreeEntry.count(U) > 0; 2896 }); 2897 } 2898 2899 int BoUpSLP::getEntryCost(TreeEntry *E) { 2900 ArrayRef<Value*> VL = E->Scalars; 2901 2902 Type *ScalarTy = VL[0]->getType(); 2903 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 2904 ScalarTy = SI->getValueOperand()->getType(); 2905 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0])) 2906 ScalarTy = CI->getOperand(0)->getType(); 2907 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 2908 2909 // If we have computed a smaller type for the expression, update VecTy so 2910 // that the costs will be accurate. 2911 if (MinBWs.count(VL[0])) 2912 VecTy = VectorType::get( 2913 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size()); 2914 2915 unsigned ReuseShuffleNumbers = E->ReuseShuffleIndices.size(); 2916 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 2917 int ReuseShuffleCost = 0; 2918 if (NeedToShuffleReuses) { 2919 ReuseShuffleCost = 2920 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy); 2921 } 2922 if (E->NeedToGather) { 2923 if (allConstant(VL)) 2924 return 0; 2925 if (isSplat(VL)) { 2926 return ReuseShuffleCost + 2927 TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 2928 } 2929 if (getSameOpcode(VL).getOpcode() == Instruction::ExtractElement && 2930 allSameType(VL) && allSameBlock(VL)) { 2931 Optional<TargetTransformInfo::ShuffleKind> ShuffleKind = isShuffle(VL); 2932 if (ShuffleKind.hasValue()) { 2933 int Cost = TTI->getShuffleCost(ShuffleKind.getValue(), VecTy); 2934 for (auto *V : VL) { 2935 // If all users of instruction are going to be vectorized and this 2936 // instruction itself is not going to be vectorized, consider this 2937 // instruction as dead and remove its cost from the final cost of the 2938 // vectorized tree. 2939 if (areAllUsersVectorized(cast<Instruction>(V)) && 2940 !ScalarToTreeEntry.count(V)) { 2941 auto *IO = cast<ConstantInt>( 2942 cast<ExtractElementInst>(V)->getIndexOperand()); 2943 Cost -= TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 2944 IO->getZExtValue()); 2945 } 2946 } 2947 return ReuseShuffleCost + Cost; 2948 } 2949 } 2950 return ReuseShuffleCost + getGatherCost(VL); 2951 } 2952 InstructionsState S = getSameOpcode(VL); 2953 assert(S.getOpcode() && allSameType(VL) && allSameBlock(VL) && "Invalid VL"); 2954 Instruction *VL0 = cast<Instruction>(S.OpValue); 2955 unsigned ShuffleOrOp = S.isAltShuffle() ? 2956 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 2957 switch (ShuffleOrOp) { 2958 case Instruction::PHI: 2959 return 0; 2960 2961 case Instruction::ExtractValue: 2962 case Instruction::ExtractElement: 2963 if (NeedToShuffleReuses) { 2964 unsigned Idx = 0; 2965 for (unsigned I : E->ReuseShuffleIndices) { 2966 if (ShuffleOrOp == Instruction::ExtractElement) { 2967 auto *IO = cast<ConstantInt>( 2968 cast<ExtractElementInst>(VL[I])->getIndexOperand()); 2969 Idx = IO->getZExtValue(); 2970 ReuseShuffleCost -= TTI->getVectorInstrCost( 2971 Instruction::ExtractElement, VecTy, Idx); 2972 } else { 2973 ReuseShuffleCost -= TTI->getVectorInstrCost( 2974 Instruction::ExtractElement, VecTy, Idx); 2975 ++Idx; 2976 } 2977 } 2978 Idx = ReuseShuffleNumbers; 2979 for (Value *V : VL) { 2980 if (ShuffleOrOp == Instruction::ExtractElement) { 2981 auto *IO = cast<ConstantInt>( 2982 cast<ExtractElementInst>(V)->getIndexOperand()); 2983 Idx = IO->getZExtValue(); 2984 } else { 2985 --Idx; 2986 } 2987 ReuseShuffleCost += 2988 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, Idx); 2989 } 2990 } 2991 if (!E->NeedToGather) { 2992 int DeadCost = ReuseShuffleCost; 2993 if (!E->ReorderIndices.empty()) { 2994 // TODO: Merge this shuffle with the ReuseShuffleCost. 2995 DeadCost += TTI->getShuffleCost( 2996 TargetTransformInfo::SK_PermuteSingleSrc, VecTy); 2997 } 2998 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 2999 Instruction *E = cast<Instruction>(VL[i]); 3000 // If all users are going to be vectorized, instruction can be 3001 // considered as dead. 3002 // The same, if have only one user, it will be vectorized for sure. 3003 if (areAllUsersVectorized(E)) { 3004 // Take credit for instruction that will become dead. 3005 if (E->hasOneUse()) { 3006 Instruction *Ext = E->user_back(); 3007 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 3008 all_of(Ext->users(), 3009 [](User *U) { return isa<GetElementPtrInst>(U); })) { 3010 // Use getExtractWithExtendCost() to calculate the cost of 3011 // extractelement/ext pair. 3012 DeadCost -= TTI->getExtractWithExtendCost( 3013 Ext->getOpcode(), Ext->getType(), VecTy, i); 3014 // Add back the cost of s|zext which is subtracted separately. 3015 DeadCost += TTI->getCastInstrCost( 3016 Ext->getOpcode(), Ext->getType(), E->getType(), Ext); 3017 continue; 3018 } 3019 } 3020 DeadCost -= 3021 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); 3022 } 3023 } 3024 return DeadCost; 3025 } 3026 return ReuseShuffleCost + getGatherCost(VL); 3027 3028 case Instruction::ZExt: 3029 case Instruction::SExt: 3030 case Instruction::FPToUI: 3031 case Instruction::FPToSI: 3032 case Instruction::FPExt: 3033 case Instruction::PtrToInt: 3034 case Instruction::IntToPtr: 3035 case Instruction::SIToFP: 3036 case Instruction::UIToFP: 3037 case Instruction::Trunc: 3038 case Instruction::FPTrunc: 3039 case Instruction::BitCast: { 3040 Type *SrcTy = VL0->getOperand(0)->getType(); 3041 int ScalarEltCost = 3042 TTI->getCastInstrCost(S.getOpcode(), ScalarTy, SrcTy, VL0); 3043 if (NeedToShuffleReuses) { 3044 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 3045 } 3046 3047 // Calculate the cost of this instruction. 3048 int ScalarCost = VL.size() * ScalarEltCost; 3049 3050 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 3051 int VecCost = 0; 3052 // Check if the values are candidates to demote. 3053 if (!MinBWs.count(VL0) || VecTy != SrcVecTy) { 3054 VecCost = ReuseShuffleCost + 3055 TTI->getCastInstrCost(S.getOpcode(), VecTy, SrcVecTy, VL0); 3056 } 3057 return VecCost - ScalarCost; 3058 } 3059 case Instruction::FCmp: 3060 case Instruction::ICmp: 3061 case Instruction::Select: { 3062 // Calculate the cost of this instruction. 3063 int ScalarEltCost = TTI->getCmpSelInstrCost(S.getOpcode(), ScalarTy, 3064 Builder.getInt1Ty(), VL0); 3065 if (NeedToShuffleReuses) { 3066 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 3067 } 3068 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 3069 int ScalarCost = VecTy->getNumElements() * ScalarEltCost; 3070 int VecCost = TTI->getCmpSelInstrCost(S.getOpcode(), VecTy, MaskTy, VL0); 3071 return ReuseShuffleCost + VecCost - ScalarCost; 3072 } 3073 case Instruction::FNeg: 3074 case Instruction::Add: 3075 case Instruction::FAdd: 3076 case Instruction::Sub: 3077 case Instruction::FSub: 3078 case Instruction::Mul: 3079 case Instruction::FMul: 3080 case Instruction::UDiv: 3081 case Instruction::SDiv: 3082 case Instruction::FDiv: 3083 case Instruction::URem: 3084 case Instruction::SRem: 3085 case Instruction::FRem: 3086 case Instruction::Shl: 3087 case Instruction::LShr: 3088 case Instruction::AShr: 3089 case Instruction::And: 3090 case Instruction::Or: 3091 case Instruction::Xor: { 3092 // Certain instructions can be cheaper to vectorize if they have a 3093 // constant second vector operand. 3094 TargetTransformInfo::OperandValueKind Op1VK = 3095 TargetTransformInfo::OK_AnyValue; 3096 TargetTransformInfo::OperandValueKind Op2VK = 3097 TargetTransformInfo::OK_UniformConstantValue; 3098 TargetTransformInfo::OperandValueProperties Op1VP = 3099 TargetTransformInfo::OP_None; 3100 TargetTransformInfo::OperandValueProperties Op2VP = 3101 TargetTransformInfo::OP_PowerOf2; 3102 3103 // If all operands are exactly the same ConstantInt then set the 3104 // operand kind to OK_UniformConstantValue. 3105 // If instead not all operands are constants, then set the operand kind 3106 // to OK_AnyValue. If all operands are constants but not the same, 3107 // then set the operand kind to OK_NonUniformConstantValue. 3108 ConstantInt *CInt0 = nullptr; 3109 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 3110 const Instruction *I = cast<Instruction>(VL[i]); 3111 unsigned OpIdx = isa<BinaryOperator>(I) ? 1 : 0; 3112 ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(OpIdx)); 3113 if (!CInt) { 3114 Op2VK = TargetTransformInfo::OK_AnyValue; 3115 Op2VP = TargetTransformInfo::OP_None; 3116 break; 3117 } 3118 if (Op2VP == TargetTransformInfo::OP_PowerOf2 && 3119 !CInt->getValue().isPowerOf2()) 3120 Op2VP = TargetTransformInfo::OP_None; 3121 if (i == 0) { 3122 CInt0 = CInt; 3123 continue; 3124 } 3125 if (CInt0 != CInt) 3126 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 3127 } 3128 3129 SmallVector<const Value *, 4> Operands(VL0->operand_values()); 3130 int ScalarEltCost = TTI->getArithmeticInstrCost( 3131 S.getOpcode(), ScalarTy, Op1VK, Op2VK, Op1VP, Op2VP, Operands); 3132 if (NeedToShuffleReuses) { 3133 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 3134 } 3135 int ScalarCost = VecTy->getNumElements() * ScalarEltCost; 3136 int VecCost = TTI->getArithmeticInstrCost(S.getOpcode(), VecTy, Op1VK, 3137 Op2VK, Op1VP, Op2VP, Operands); 3138 return ReuseShuffleCost + VecCost - ScalarCost; 3139 } 3140 case Instruction::GetElementPtr: { 3141 TargetTransformInfo::OperandValueKind Op1VK = 3142 TargetTransformInfo::OK_AnyValue; 3143 TargetTransformInfo::OperandValueKind Op2VK = 3144 TargetTransformInfo::OK_UniformConstantValue; 3145 3146 int ScalarEltCost = 3147 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK); 3148 if (NeedToShuffleReuses) { 3149 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 3150 } 3151 int ScalarCost = VecTy->getNumElements() * ScalarEltCost; 3152 int VecCost = 3153 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK); 3154 return ReuseShuffleCost + VecCost - ScalarCost; 3155 } 3156 case Instruction::Load: { 3157 // Cost of wide load - cost of scalar loads. 3158 unsigned alignment = cast<LoadInst>(VL0)->getAlignment(); 3159 int ScalarEltCost = 3160 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, alignment, 0, VL0); 3161 if (NeedToShuffleReuses) { 3162 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 3163 } 3164 int ScalarLdCost = VecTy->getNumElements() * ScalarEltCost; 3165 int VecLdCost = 3166 TTI->getMemoryOpCost(Instruction::Load, VecTy, alignment, 0, VL0); 3167 if (!E->ReorderIndices.empty()) { 3168 // TODO: Merge this shuffle with the ReuseShuffleCost. 3169 VecLdCost += TTI->getShuffleCost( 3170 TargetTransformInfo::SK_PermuteSingleSrc, VecTy); 3171 } 3172 return ReuseShuffleCost + VecLdCost - ScalarLdCost; 3173 } 3174 case Instruction::Store: { 3175 // We know that we can merge the stores. Calculate the cost. 3176 unsigned alignment = cast<StoreInst>(VL0)->getAlignment(); 3177 int ScalarEltCost = 3178 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, alignment, 0, VL0); 3179 if (NeedToShuffleReuses) { 3180 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 3181 } 3182 int ScalarStCost = VecTy->getNumElements() * ScalarEltCost; 3183 int VecStCost = 3184 TTI->getMemoryOpCost(Instruction::Store, VecTy, alignment, 0, VL0); 3185 return ReuseShuffleCost + VecStCost - ScalarStCost; 3186 } 3187 case Instruction::Call: { 3188 CallInst *CI = cast<CallInst>(VL0); 3189 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3190 3191 // Calculate the cost of the scalar and vector calls. 3192 SmallVector<Type *, 4> ScalarTys; 3193 for (unsigned op = 0, opc = CI->getNumArgOperands(); op != opc; ++op) 3194 ScalarTys.push_back(CI->getArgOperand(op)->getType()); 3195 3196 FastMathFlags FMF; 3197 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3198 FMF = FPMO->getFastMathFlags(); 3199 3200 int ScalarEltCost = 3201 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys, FMF); 3202 if (NeedToShuffleReuses) { 3203 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 3204 } 3205 int ScalarCallCost = VecTy->getNumElements() * ScalarEltCost; 3206 3207 SmallVector<Value *, 4> Args(CI->arg_operands()); 3208 int VecCallCost = TTI->getIntrinsicInstrCost(ID, CI->getType(), Args, FMF, 3209 VecTy->getNumElements()); 3210 3211 LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost 3212 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 3213 << " for " << *CI << "\n"); 3214 3215 return ReuseShuffleCost + VecCallCost - ScalarCallCost; 3216 } 3217 case Instruction::ShuffleVector: { 3218 assert(S.isAltShuffle() && 3219 ((Instruction::isBinaryOp(S.getOpcode()) && 3220 Instruction::isBinaryOp(S.getAltOpcode())) || 3221 (Instruction::isCast(S.getOpcode()) && 3222 Instruction::isCast(S.getAltOpcode()))) && 3223 "Invalid Shuffle Vector Operand"); 3224 int ScalarCost = 0; 3225 if (NeedToShuffleReuses) { 3226 for (unsigned Idx : E->ReuseShuffleIndices) { 3227 Instruction *I = cast<Instruction>(VL[Idx]); 3228 ReuseShuffleCost -= TTI->getInstructionCost( 3229 I, TargetTransformInfo::TCK_RecipThroughput); 3230 } 3231 for (Value *V : VL) { 3232 Instruction *I = cast<Instruction>(V); 3233 ReuseShuffleCost += TTI->getInstructionCost( 3234 I, TargetTransformInfo::TCK_RecipThroughput); 3235 } 3236 } 3237 for (Value *i : VL) { 3238 Instruction *I = cast<Instruction>(i); 3239 assert(S.isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 3240 ScalarCost += TTI->getInstructionCost( 3241 I, TargetTransformInfo::TCK_RecipThroughput); 3242 } 3243 // VecCost is equal to sum of the cost of creating 2 vectors 3244 // and the cost of creating shuffle. 3245 int VecCost = 0; 3246 if (Instruction::isBinaryOp(S.getOpcode())) { 3247 VecCost = TTI->getArithmeticInstrCost(S.getOpcode(), VecTy); 3248 VecCost += TTI->getArithmeticInstrCost(S.getAltOpcode(), VecTy); 3249 } else { 3250 Type *Src0SclTy = S.MainOp->getOperand(0)->getType(); 3251 Type *Src1SclTy = S.AltOp->getOperand(0)->getType(); 3252 VectorType *Src0Ty = VectorType::get(Src0SclTy, VL.size()); 3253 VectorType *Src1Ty = VectorType::get(Src1SclTy, VL.size()); 3254 VecCost = TTI->getCastInstrCost(S.getOpcode(), VecTy, Src0Ty); 3255 VecCost += TTI->getCastInstrCost(S.getAltOpcode(), VecTy, Src1Ty); 3256 } 3257 VecCost += TTI->getShuffleCost(TargetTransformInfo::SK_Select, VecTy, 0); 3258 return ReuseShuffleCost + VecCost - ScalarCost; 3259 } 3260 default: 3261 llvm_unreachable("Unknown instruction"); 3262 } 3263 } 3264 3265 bool BoUpSLP::isFullyVectorizableTinyTree() const { 3266 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height " 3267 << VectorizableTree.size() << " is fully vectorizable .\n"); 3268 3269 // We only handle trees of heights 1 and 2. 3270 if (VectorizableTree.size() == 1 && !VectorizableTree[0]->NeedToGather) 3271 return true; 3272 3273 if (VectorizableTree.size() != 2) 3274 return false; 3275 3276 // Handle splat and all-constants stores. 3277 if (!VectorizableTree[0]->NeedToGather && 3278 (allConstant(VectorizableTree[1]->Scalars) || 3279 isSplat(VectorizableTree[1]->Scalars))) 3280 return true; 3281 3282 // Gathering cost would be too much for tiny trees. 3283 if (VectorizableTree[0]->NeedToGather || VectorizableTree[1]->NeedToGather) 3284 return false; 3285 3286 return true; 3287 } 3288 3289 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() const { 3290 // We can vectorize the tree if its size is greater than or equal to the 3291 // minimum size specified by the MinTreeSize command line option. 3292 if (VectorizableTree.size() >= MinTreeSize) 3293 return false; 3294 3295 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 3296 // can vectorize it if we can prove it fully vectorizable. 3297 if (isFullyVectorizableTinyTree()) 3298 return false; 3299 3300 assert(VectorizableTree.empty() 3301 ? ExternalUses.empty() 3302 : true && "We shouldn't have any external users"); 3303 3304 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 3305 // vectorizable. 3306 return true; 3307 } 3308 3309 int BoUpSLP::getSpillCost() const { 3310 // Walk from the bottom of the tree to the top, tracking which values are 3311 // live. When we see a call instruction that is not part of our tree, 3312 // query TTI to see if there is a cost to keeping values live over it 3313 // (for example, if spills and fills are required). 3314 unsigned BundleWidth = VectorizableTree.front()->Scalars.size(); 3315 int Cost = 0; 3316 3317 SmallPtrSet<Instruction*, 4> LiveValues; 3318 Instruction *PrevInst = nullptr; 3319 3320 for (const auto &TEPtr : VectorizableTree) { 3321 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]); 3322 if (!Inst) 3323 continue; 3324 3325 if (!PrevInst) { 3326 PrevInst = Inst; 3327 continue; 3328 } 3329 3330 // Update LiveValues. 3331 LiveValues.erase(PrevInst); 3332 for (auto &J : PrevInst->operands()) { 3333 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 3334 LiveValues.insert(cast<Instruction>(&*J)); 3335 } 3336 3337 LLVM_DEBUG({ 3338 dbgs() << "SLP: #LV: " << LiveValues.size(); 3339 for (auto *X : LiveValues) 3340 dbgs() << " " << X->getName(); 3341 dbgs() << ", Looking at "; 3342 Inst->dump(); 3343 }); 3344 3345 // Now find the sequence of instructions between PrevInst and Inst. 3346 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 3347 PrevInstIt = 3348 PrevInst->getIterator().getReverse(); 3349 while (InstIt != PrevInstIt) { 3350 if (PrevInstIt == PrevInst->getParent()->rend()) { 3351 PrevInstIt = Inst->getParent()->rbegin(); 3352 continue; 3353 } 3354 3355 // Debug informations don't impact spill cost. 3356 if ((isa<CallInst>(&*PrevInstIt) && 3357 !isa<DbgInfoIntrinsic>(&*PrevInstIt)) && 3358 &*PrevInstIt != PrevInst) { 3359 SmallVector<Type*, 4> V; 3360 for (auto *II : LiveValues) 3361 V.push_back(VectorType::get(II->getType(), BundleWidth)); 3362 Cost += TTI->getCostOfKeepingLiveOverCall(V); 3363 } 3364 3365 ++PrevInstIt; 3366 } 3367 3368 PrevInst = Inst; 3369 } 3370 3371 return Cost; 3372 } 3373 3374 int BoUpSLP::getTreeCost() { 3375 int Cost = 0; 3376 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size " 3377 << VectorizableTree.size() << ".\n"); 3378 3379 unsigned BundleWidth = VectorizableTree[0]->Scalars.size(); 3380 3381 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) { 3382 TreeEntry &TE = *VectorizableTree[I].get(); 3383 3384 // We create duplicate tree entries for gather sequences that have multiple 3385 // uses. However, we should not compute the cost of duplicate sequences. 3386 // For example, if we have a build vector (i.e., insertelement sequence) 3387 // that is used by more than one vector instruction, we only need to 3388 // compute the cost of the insertelement instructions once. The redundant 3389 // instructions will be eliminated by CSE. 3390 // 3391 // We should consider not creating duplicate tree entries for gather 3392 // sequences, and instead add additional edges to the tree representing 3393 // their uses. Since such an approach results in fewer total entries, 3394 // existing heuristics based on tree size may yield different results. 3395 // 3396 if (TE.NeedToGather && 3397 std::any_of( 3398 std::next(VectorizableTree.begin(), I + 1), VectorizableTree.end(), 3399 [TE](const std::unique_ptr<TreeEntry> &EntryPtr) { 3400 return EntryPtr->NeedToGather && EntryPtr->isSame(TE.Scalars); 3401 })) 3402 continue; 3403 3404 int C = getEntryCost(&TE); 3405 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 3406 << " for bundle that starts with " << *TE.Scalars[0] 3407 << ".\n"); 3408 Cost += C; 3409 } 3410 3411 SmallPtrSet<Value *, 16> ExtractCostCalculated; 3412 int ExtractCost = 0; 3413 for (ExternalUser &EU : ExternalUses) { 3414 // We only add extract cost once for the same scalar. 3415 if (!ExtractCostCalculated.insert(EU.Scalar).second) 3416 continue; 3417 3418 // Uses by ephemeral values are free (because the ephemeral value will be 3419 // removed prior to code generation, and so the extraction will be 3420 // removed as well). 3421 if (EphValues.count(EU.User)) 3422 continue; 3423 3424 // If we plan to rewrite the tree in a smaller type, we will need to sign 3425 // extend the extracted value back to the original type. Here, we account 3426 // for the extract and the added cost of the sign extend if needed. 3427 auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth); 3428 auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; 3429 if (MinBWs.count(ScalarRoot)) { 3430 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 3431 auto Extend = 3432 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; 3433 VecTy = VectorType::get(MinTy, BundleWidth); 3434 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 3435 VecTy, EU.Lane); 3436 } else { 3437 ExtractCost += 3438 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); 3439 } 3440 } 3441 3442 int SpillCost = getSpillCost(); 3443 Cost += SpillCost + ExtractCost; 3444 3445 std::string Str; 3446 { 3447 raw_string_ostream OS(Str); 3448 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 3449 << "SLP: Extract Cost = " << ExtractCost << ".\n" 3450 << "SLP: Total Cost = " << Cost << ".\n"; 3451 } 3452 LLVM_DEBUG(dbgs() << Str); 3453 3454 if (ViewSLPTree) 3455 ViewGraph(this, "SLP" + F->getName(), false, Str); 3456 3457 return Cost; 3458 } 3459 3460 int BoUpSLP::getGatherCost(Type *Ty, 3461 const DenseSet<unsigned> &ShuffledIndices) const { 3462 int Cost = 0; 3463 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 3464 if (!ShuffledIndices.count(i)) 3465 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 3466 if (!ShuffledIndices.empty()) 3467 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty); 3468 return Cost; 3469 } 3470 3471 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const { 3472 // Find the type of the operands in VL. 3473 Type *ScalarTy = VL[0]->getType(); 3474 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 3475 ScalarTy = SI->getValueOperand()->getType(); 3476 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 3477 // Find the cost of inserting/extracting values from the vector. 3478 // Check if the same elements are inserted several times and count them as 3479 // shuffle candidates. 3480 DenseSet<unsigned> ShuffledElements; 3481 DenseSet<Value *> UniqueElements; 3482 // Iterate in reverse order to consider insert elements with the high cost. 3483 for (unsigned I = VL.size(); I > 0; --I) { 3484 unsigned Idx = I - 1; 3485 if (!UniqueElements.insert(VL[Idx]).second) 3486 ShuffledElements.insert(Idx); 3487 } 3488 return getGatherCost(VecTy, ShuffledElements); 3489 } 3490 3491 // Perform operand reordering on the instructions in VL and return the reordered 3492 // operands in Left and Right. 3493 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 3494 SmallVectorImpl<Value *> &Left, 3495 SmallVectorImpl<Value *> &Right, 3496 const DataLayout &DL, 3497 ScalarEvolution &SE, 3498 const BoUpSLP &R) { 3499 if (VL.empty()) 3500 return; 3501 VLOperands Ops(VL, DL, SE, R); 3502 // Reorder the operands in place. 3503 Ops.reorder(); 3504 Left = Ops.getVL(0); 3505 Right = Ops.getVL(1); 3506 } 3507 3508 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL, 3509 const InstructionsState &S) { 3510 // Get the basic block this bundle is in. All instructions in the bundle 3511 // should be in this block. 3512 auto *Front = cast<Instruction>(S.OpValue); 3513 auto *BB = Front->getParent(); 3514 assert(llvm::all_of(make_range(VL.begin(), VL.end()), [=](Value *V) -> bool { 3515 auto *I = cast<Instruction>(V); 3516 return !S.isOpcodeOrAlt(I) || I->getParent() == BB; 3517 })); 3518 3519 // The last instruction in the bundle in program order. 3520 Instruction *LastInst = nullptr; 3521 3522 // Find the last instruction. The common case should be that BB has been 3523 // scheduled, and the last instruction is VL.back(). So we start with 3524 // VL.back() and iterate over schedule data until we reach the end of the 3525 // bundle. The end of the bundle is marked by null ScheduleData. 3526 if (BlocksSchedules.count(BB)) { 3527 auto *Bundle = 3528 BlocksSchedules[BB]->getScheduleData(isOneOf(S, VL.back())); 3529 if (Bundle && Bundle->isPartOfBundle()) 3530 for (; Bundle; Bundle = Bundle->NextInBundle) 3531 if (Bundle->OpValue == Bundle->Inst) 3532 LastInst = Bundle->Inst; 3533 } 3534 3535 // LastInst can still be null at this point if there's either not an entry 3536 // for BB in BlocksSchedules or there's no ScheduleData available for 3537 // VL.back(). This can be the case if buildTree_rec aborts for various 3538 // reasons (e.g., the maximum recursion depth is reached, the maximum region 3539 // size is reached, etc.). ScheduleData is initialized in the scheduling 3540 // "dry-run". 3541 // 3542 // If this happens, we can still find the last instruction by brute force. We 3543 // iterate forwards from Front (inclusive) until we either see all 3544 // instructions in the bundle or reach the end of the block. If Front is the 3545 // last instruction in program order, LastInst will be set to Front, and we 3546 // will visit all the remaining instructions in the block. 3547 // 3548 // One of the reasons we exit early from buildTree_rec is to place an upper 3549 // bound on compile-time. Thus, taking an additional compile-time hit here is 3550 // not ideal. However, this should be exceedingly rare since it requires that 3551 // we both exit early from buildTree_rec and that the bundle be out-of-order 3552 // (causing us to iterate all the way to the end of the block). 3553 if (!LastInst) { 3554 SmallPtrSet<Value *, 16> Bundle(VL.begin(), VL.end()); 3555 for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) { 3556 if (Bundle.erase(&I) && S.isOpcodeOrAlt(&I)) 3557 LastInst = &I; 3558 if (Bundle.empty()) 3559 break; 3560 } 3561 } 3562 3563 // Set the insertion point after the last instruction in the bundle. Set the 3564 // debug location to Front. 3565 Builder.SetInsertPoint(BB, ++LastInst->getIterator()); 3566 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 3567 } 3568 3569 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 3570 Value *Vec = UndefValue::get(Ty); 3571 // Generate the 'InsertElement' instruction. 3572 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 3573 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 3574 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 3575 GatherSeq.insert(Insrt); 3576 CSEBlocks.insert(Insrt->getParent()); 3577 3578 // Add to our 'need-to-extract' list. 3579 if (TreeEntry *E = getTreeEntry(VL[i])) { 3580 // Find which lane we need to extract. 3581 int FoundLane = -1; 3582 for (unsigned Lane = 0, LE = E->Scalars.size(); Lane != LE; ++Lane) { 3583 // Is this the lane of the scalar that we are looking for ? 3584 if (E->Scalars[Lane] == VL[i]) { 3585 FoundLane = Lane; 3586 break; 3587 } 3588 } 3589 assert(FoundLane >= 0 && "Could not find the correct lane"); 3590 if (!E->ReuseShuffleIndices.empty()) { 3591 FoundLane = 3592 std::distance(E->ReuseShuffleIndices.begin(), 3593 llvm::find(E->ReuseShuffleIndices, FoundLane)); 3594 } 3595 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 3596 } 3597 } 3598 } 3599 3600 return Vec; 3601 } 3602 3603 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 3604 InstructionsState S = getSameOpcode(VL); 3605 if (S.getOpcode()) { 3606 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 3607 if (E->isSame(VL)) { 3608 Value *V = vectorizeTree(E); 3609 if (VL.size() == E->Scalars.size() && !E->ReuseShuffleIndices.empty()) { 3610 // We need to get the vectorized value but without shuffle. 3611 if (auto *SV = dyn_cast<ShuffleVectorInst>(V)) { 3612 V = SV->getOperand(0); 3613 } else { 3614 // Reshuffle to get only unique values. 3615 SmallVector<unsigned, 4> UniqueIdxs; 3616 SmallSet<unsigned, 4> UsedIdxs; 3617 for(unsigned Idx : E->ReuseShuffleIndices) 3618 if (UsedIdxs.insert(Idx).second) 3619 UniqueIdxs.emplace_back(Idx); 3620 V = Builder.CreateShuffleVector(V, UndefValue::get(V->getType()), 3621 UniqueIdxs); 3622 } 3623 } 3624 return V; 3625 } 3626 } 3627 } 3628 3629 Type *ScalarTy = S.OpValue->getType(); 3630 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 3631 ScalarTy = SI->getValueOperand()->getType(); 3632 3633 // Check that every instruction appears once in this bundle. 3634 SmallVector<unsigned, 4> ReuseShuffleIndicies; 3635 SmallVector<Value *, 4> UniqueValues; 3636 if (VL.size() > 2) { 3637 DenseMap<Value *, unsigned> UniquePositions; 3638 for (Value *V : VL) { 3639 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 3640 ReuseShuffleIndicies.emplace_back(Res.first->second); 3641 if (Res.second || isa<Constant>(V)) 3642 UniqueValues.emplace_back(V); 3643 } 3644 // Do not shuffle single element or if number of unique values is not power 3645 // of 2. 3646 if (UniqueValues.size() == VL.size() || UniqueValues.size() <= 1 || 3647 !llvm::isPowerOf2_32(UniqueValues.size())) 3648 ReuseShuffleIndicies.clear(); 3649 else 3650 VL = UniqueValues; 3651 } 3652 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 3653 3654 Value *V = Gather(VL, VecTy); 3655 if (!ReuseShuffleIndicies.empty()) { 3656 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3657 ReuseShuffleIndicies, "shuffle"); 3658 if (auto *I = dyn_cast<Instruction>(V)) { 3659 GatherSeq.insert(I); 3660 CSEBlocks.insert(I->getParent()); 3661 } 3662 } 3663 return V; 3664 } 3665 3666 static void inversePermutation(ArrayRef<unsigned> Indices, 3667 SmallVectorImpl<unsigned> &Mask) { 3668 Mask.clear(); 3669 const unsigned E = Indices.size(); 3670 Mask.resize(E); 3671 for (unsigned I = 0; I < E; ++I) 3672 Mask[Indices[I]] = I; 3673 } 3674 3675 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 3676 IRBuilder<>::InsertPointGuard Guard(Builder); 3677 3678 if (E->VectorizedValue) { 3679 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 3680 return E->VectorizedValue; 3681 } 3682 3683 InstructionsState S = getSameOpcode(E->Scalars); 3684 Instruction *VL0 = cast<Instruction>(S.OpValue); 3685 Type *ScalarTy = VL0->getType(); 3686 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 3687 ScalarTy = SI->getValueOperand()->getType(); 3688 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 3689 3690 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 3691 3692 if (E->NeedToGather) { 3693 setInsertPointAfterBundle(E->Scalars, S); 3694 auto *V = Gather(E->Scalars, VecTy); 3695 if (NeedToShuffleReuses) { 3696 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3697 E->ReuseShuffleIndices, "shuffle"); 3698 if (auto *I = dyn_cast<Instruction>(V)) { 3699 GatherSeq.insert(I); 3700 CSEBlocks.insert(I->getParent()); 3701 } 3702 } 3703 E->VectorizedValue = V; 3704 return V; 3705 } 3706 3707 unsigned ShuffleOrOp = S.isAltShuffle() ? 3708 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 3709 switch (ShuffleOrOp) { 3710 case Instruction::PHI: { 3711 PHINode *PH = dyn_cast<PHINode>(VL0); 3712 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 3713 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 3714 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 3715 Value *V = NewPhi; 3716 if (NeedToShuffleReuses) { 3717 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3718 E->ReuseShuffleIndices, "shuffle"); 3719 } 3720 E->VectorizedValue = V; 3721 3722 // PHINodes may have multiple entries from the same block. We want to 3723 // visit every block once. 3724 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 3725 3726 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 3727 ValueList Operands; 3728 BasicBlock *IBB = PH->getIncomingBlock(i); 3729 3730 if (!VisitedBBs.insert(IBB).second) { 3731 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 3732 continue; 3733 } 3734 3735 Builder.SetInsertPoint(IBB->getTerminator()); 3736 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 3737 Value *Vec = vectorizeTree(E->getOperand(i)); 3738 NewPhi->addIncoming(Vec, IBB); 3739 } 3740 3741 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 3742 "Invalid number of incoming values"); 3743 return V; 3744 } 3745 3746 case Instruction::ExtractElement: { 3747 if (!E->NeedToGather) { 3748 Value *V = E->getSingleOperand(0); 3749 if (!E->ReorderIndices.empty()) { 3750 OrdersType Mask; 3751 inversePermutation(E->ReorderIndices, Mask); 3752 Builder.SetInsertPoint(VL0); 3753 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), Mask, 3754 "reorder_shuffle"); 3755 } 3756 if (NeedToShuffleReuses) { 3757 // TODO: Merge this shuffle with the ReorderShuffleMask. 3758 if (E->ReorderIndices.empty()) 3759 Builder.SetInsertPoint(VL0); 3760 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3761 E->ReuseShuffleIndices, "shuffle"); 3762 } 3763 E->VectorizedValue = V; 3764 return V; 3765 } 3766 setInsertPointAfterBundle(E->Scalars, S); 3767 auto *V = Gather(E->Scalars, VecTy); 3768 if (NeedToShuffleReuses) { 3769 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3770 E->ReuseShuffleIndices, "shuffle"); 3771 if (auto *I = dyn_cast<Instruction>(V)) { 3772 GatherSeq.insert(I); 3773 CSEBlocks.insert(I->getParent()); 3774 } 3775 } 3776 E->VectorizedValue = V; 3777 return V; 3778 } 3779 case Instruction::ExtractValue: { 3780 if (!E->NeedToGather) { 3781 LoadInst *LI = cast<LoadInst>(E->getSingleOperand(0)); 3782 Builder.SetInsertPoint(LI); 3783 PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); 3784 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); 3785 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlignment()); 3786 Value *NewV = propagateMetadata(V, E->Scalars); 3787 if (!E->ReorderIndices.empty()) { 3788 OrdersType Mask; 3789 inversePermutation(E->ReorderIndices, Mask); 3790 NewV = Builder.CreateShuffleVector(NewV, UndefValue::get(VecTy), Mask, 3791 "reorder_shuffle"); 3792 } 3793 if (NeedToShuffleReuses) { 3794 // TODO: Merge this shuffle with the ReorderShuffleMask. 3795 NewV = Builder.CreateShuffleVector( 3796 NewV, UndefValue::get(VecTy), E->ReuseShuffleIndices, "shuffle"); 3797 } 3798 E->VectorizedValue = NewV; 3799 return NewV; 3800 } 3801 setInsertPointAfterBundle(E->Scalars, S); 3802 auto *V = Gather(E->Scalars, VecTy); 3803 if (NeedToShuffleReuses) { 3804 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3805 E->ReuseShuffleIndices, "shuffle"); 3806 if (auto *I = dyn_cast<Instruction>(V)) { 3807 GatherSeq.insert(I); 3808 CSEBlocks.insert(I->getParent()); 3809 } 3810 } 3811 E->VectorizedValue = V; 3812 return V; 3813 } 3814 case Instruction::ZExt: 3815 case Instruction::SExt: 3816 case Instruction::FPToUI: 3817 case Instruction::FPToSI: 3818 case Instruction::FPExt: 3819 case Instruction::PtrToInt: 3820 case Instruction::IntToPtr: 3821 case Instruction::SIToFP: 3822 case Instruction::UIToFP: 3823 case Instruction::Trunc: 3824 case Instruction::FPTrunc: 3825 case Instruction::BitCast: { 3826 setInsertPointAfterBundle(E->Scalars, S); 3827 3828 Value *InVec = vectorizeTree(E->getOperand(0)); 3829 3830 if (E->VectorizedValue) { 3831 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 3832 return E->VectorizedValue; 3833 } 3834 3835 CastInst *CI = dyn_cast<CastInst>(VL0); 3836 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 3837 if (NeedToShuffleReuses) { 3838 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3839 E->ReuseShuffleIndices, "shuffle"); 3840 } 3841 E->VectorizedValue = V; 3842 ++NumVectorInstructions; 3843 return V; 3844 } 3845 case Instruction::FCmp: 3846 case Instruction::ICmp: { 3847 setInsertPointAfterBundle(E->Scalars, S); 3848 3849 Value *L = vectorizeTree(E->getOperand(0)); 3850 Value *R = vectorizeTree(E->getOperand(1)); 3851 3852 if (E->VectorizedValue) { 3853 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 3854 return E->VectorizedValue; 3855 } 3856 3857 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 3858 Value *V; 3859 if (S.getOpcode() == Instruction::FCmp) 3860 V = Builder.CreateFCmp(P0, L, R); 3861 else 3862 V = Builder.CreateICmp(P0, L, R); 3863 3864 propagateIRFlags(V, E->Scalars, VL0); 3865 if (NeedToShuffleReuses) { 3866 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3867 E->ReuseShuffleIndices, "shuffle"); 3868 } 3869 E->VectorizedValue = V; 3870 ++NumVectorInstructions; 3871 return V; 3872 } 3873 case Instruction::Select: { 3874 setInsertPointAfterBundle(E->Scalars, S); 3875 3876 Value *Cond = vectorizeTree(E->getOperand(0)); 3877 Value *True = vectorizeTree(E->getOperand(1)); 3878 Value *False = vectorizeTree(E->getOperand(2)); 3879 3880 if (E->VectorizedValue) { 3881 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 3882 return E->VectorizedValue; 3883 } 3884 3885 Value *V = Builder.CreateSelect(Cond, True, False); 3886 if (NeedToShuffleReuses) { 3887 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3888 E->ReuseShuffleIndices, "shuffle"); 3889 } 3890 E->VectorizedValue = V; 3891 ++NumVectorInstructions; 3892 return V; 3893 } 3894 case Instruction::FNeg: { 3895 setInsertPointAfterBundle(E->Scalars, S); 3896 3897 Value *Op = vectorizeTree(E->getOperand(0)); 3898 3899 if (E->VectorizedValue) { 3900 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 3901 return E->VectorizedValue; 3902 } 3903 3904 Value *V = Builder.CreateUnOp( 3905 static_cast<Instruction::UnaryOps>(S.getOpcode()), Op); 3906 propagateIRFlags(V, E->Scalars, VL0); 3907 if (auto *I = dyn_cast<Instruction>(V)) 3908 V = propagateMetadata(I, E->Scalars); 3909 3910 if (NeedToShuffleReuses) { 3911 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3912 E->ReuseShuffleIndices, "shuffle"); 3913 } 3914 E->VectorizedValue = V; 3915 ++NumVectorInstructions; 3916 3917 return V; 3918 } 3919 case Instruction::Add: 3920 case Instruction::FAdd: 3921 case Instruction::Sub: 3922 case Instruction::FSub: 3923 case Instruction::Mul: 3924 case Instruction::FMul: 3925 case Instruction::UDiv: 3926 case Instruction::SDiv: 3927 case Instruction::FDiv: 3928 case Instruction::URem: 3929 case Instruction::SRem: 3930 case Instruction::FRem: 3931 case Instruction::Shl: 3932 case Instruction::LShr: 3933 case Instruction::AShr: 3934 case Instruction::And: 3935 case Instruction::Or: 3936 case Instruction::Xor: { 3937 setInsertPointAfterBundle(E->Scalars, S); 3938 3939 Value *LHS = vectorizeTree(E->getOperand(0)); 3940 Value *RHS = vectorizeTree(E->getOperand(1)); 3941 3942 if (E->VectorizedValue) { 3943 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 3944 return E->VectorizedValue; 3945 } 3946 3947 Value *V = Builder.CreateBinOp( 3948 static_cast<Instruction::BinaryOps>(S.getOpcode()), LHS, RHS); 3949 propagateIRFlags(V, E->Scalars, VL0); 3950 if (auto *I = dyn_cast<Instruction>(V)) 3951 V = propagateMetadata(I, E->Scalars); 3952 3953 if (NeedToShuffleReuses) { 3954 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3955 E->ReuseShuffleIndices, "shuffle"); 3956 } 3957 E->VectorizedValue = V; 3958 ++NumVectorInstructions; 3959 3960 return V; 3961 } 3962 case Instruction::Load: { 3963 // Loads are inserted at the head of the tree because we don't want to 3964 // sink them all the way down past store instructions. 3965 bool IsReorder = !E->ReorderIndices.empty(); 3966 if (IsReorder) { 3967 S = getSameOpcode(E->Scalars, E->ReorderIndices.front()); 3968 VL0 = cast<Instruction>(S.OpValue); 3969 } 3970 setInsertPointAfterBundle(E->Scalars, S); 3971 3972 LoadInst *LI = cast<LoadInst>(VL0); 3973 Type *ScalarLoadTy = LI->getType(); 3974 unsigned AS = LI->getPointerAddressSpace(); 3975 3976 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 3977 VecTy->getPointerTo(AS)); 3978 3979 // The pointer operand uses an in-tree scalar so we add the new BitCast to 3980 // ExternalUses list to make sure that an extract will be generated in the 3981 // future. 3982 Value *PO = LI->getPointerOperand(); 3983 if (getTreeEntry(PO)) 3984 ExternalUses.push_back(ExternalUser(PO, cast<User>(VecPtr), 0)); 3985 3986 unsigned Alignment = LI->getAlignment(); 3987 LI = Builder.CreateLoad(VecTy, VecPtr); 3988 if (!Alignment) { 3989 Alignment = DL->getABITypeAlignment(ScalarLoadTy); 3990 } 3991 LI->setAlignment(Alignment); 3992 Value *V = propagateMetadata(LI, E->Scalars); 3993 if (IsReorder) { 3994 OrdersType Mask; 3995 inversePermutation(E->ReorderIndices, Mask); 3996 V = Builder.CreateShuffleVector(V, UndefValue::get(V->getType()), 3997 Mask, "reorder_shuffle"); 3998 } 3999 if (NeedToShuffleReuses) { 4000 // TODO: Merge this shuffle with the ReorderShuffleMask. 4001 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4002 E->ReuseShuffleIndices, "shuffle"); 4003 } 4004 E->VectorizedValue = V; 4005 ++NumVectorInstructions; 4006 return V; 4007 } 4008 case Instruction::Store: { 4009 StoreInst *SI = cast<StoreInst>(VL0); 4010 unsigned Alignment = SI->getAlignment(); 4011 unsigned AS = SI->getPointerAddressSpace(); 4012 4013 setInsertPointAfterBundle(E->Scalars, S); 4014 4015 Value *VecValue = vectorizeTree(E->getOperand(0)); 4016 Value *ScalarPtr = SI->getPointerOperand(); 4017 Value *VecPtr = Builder.CreateBitCast(ScalarPtr, VecTy->getPointerTo(AS)); 4018 StoreInst *ST = Builder.CreateStore(VecValue, VecPtr); 4019 4020 // The pointer operand uses an in-tree scalar, so add the new BitCast to 4021 // ExternalUses to make sure that an extract will be generated in the 4022 // future. 4023 if (getTreeEntry(ScalarPtr)) 4024 ExternalUses.push_back(ExternalUser(ScalarPtr, cast<User>(VecPtr), 0)); 4025 4026 if (!Alignment) 4027 Alignment = DL->getABITypeAlignment(SI->getValueOperand()->getType()); 4028 4029 ST->setAlignment(Alignment); 4030 Value *V = propagateMetadata(ST, E->Scalars); 4031 if (NeedToShuffleReuses) { 4032 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4033 E->ReuseShuffleIndices, "shuffle"); 4034 } 4035 E->VectorizedValue = V; 4036 ++NumVectorInstructions; 4037 return V; 4038 } 4039 case Instruction::GetElementPtr: { 4040 setInsertPointAfterBundle(E->Scalars, S); 4041 4042 Value *Op0 = vectorizeTree(E->getOperand(0)); 4043 4044 std::vector<Value *> OpVecs; 4045 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 4046 ++j) { 4047 Value *OpVec = vectorizeTree(E->getOperand(j)); 4048 OpVecs.push_back(OpVec); 4049 } 4050 4051 Value *V = Builder.CreateGEP( 4052 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs); 4053 if (Instruction *I = dyn_cast<Instruction>(V)) 4054 V = propagateMetadata(I, E->Scalars); 4055 4056 if (NeedToShuffleReuses) { 4057 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4058 E->ReuseShuffleIndices, "shuffle"); 4059 } 4060 E->VectorizedValue = V; 4061 ++NumVectorInstructions; 4062 4063 return V; 4064 } 4065 case Instruction::Call: { 4066 CallInst *CI = cast<CallInst>(VL0); 4067 setInsertPointAfterBundle(E->Scalars, S); 4068 Function *FI; 4069 Intrinsic::ID IID = Intrinsic::not_intrinsic; 4070 Value *ScalarArg = nullptr; 4071 if (CI && (FI = CI->getCalledFunction())) { 4072 IID = FI->getIntrinsicID(); 4073 } 4074 std::vector<Value *> OpVecs; 4075 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 4076 ValueList OpVL; 4077 // Some intrinsics have scalar arguments. This argument should not be 4078 // vectorized. 4079 if (hasVectorInstrinsicScalarOpd(IID, j)) { 4080 CallInst *CEI = cast<CallInst>(VL0); 4081 ScalarArg = CEI->getArgOperand(j); 4082 OpVecs.push_back(CEI->getArgOperand(j)); 4083 continue; 4084 } 4085 4086 Value *OpVec = vectorizeTree(E->getOperand(j)); 4087 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 4088 OpVecs.push_back(OpVec); 4089 } 4090 4091 Module *M = F->getParent(); 4092 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4093 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; 4094 Function *CF = Intrinsic::getDeclaration(M, ID, Tys); 4095 SmallVector<OperandBundleDef, 1> OpBundles; 4096 CI->getOperandBundlesAsDefs(OpBundles); 4097 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 4098 4099 // The scalar argument uses an in-tree scalar so we add the new vectorized 4100 // call to ExternalUses list to make sure that an extract will be 4101 // generated in the future. 4102 if (ScalarArg && getTreeEntry(ScalarArg)) 4103 ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0)); 4104 4105 propagateIRFlags(V, E->Scalars, VL0); 4106 if (NeedToShuffleReuses) { 4107 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4108 E->ReuseShuffleIndices, "shuffle"); 4109 } 4110 E->VectorizedValue = V; 4111 ++NumVectorInstructions; 4112 return V; 4113 } 4114 case Instruction::ShuffleVector: { 4115 assert(S.isAltShuffle() && 4116 ((Instruction::isBinaryOp(S.getOpcode()) && 4117 Instruction::isBinaryOp(S.getAltOpcode())) || 4118 (Instruction::isCast(S.getOpcode()) && 4119 Instruction::isCast(S.getAltOpcode()))) && 4120 "Invalid Shuffle Vector Operand"); 4121 4122 Value *LHS, *RHS; 4123 if (Instruction::isBinaryOp(S.getOpcode())) { 4124 setInsertPointAfterBundle(E->Scalars, S); 4125 LHS = vectorizeTree(E->getOperand(0)); 4126 RHS = vectorizeTree(E->getOperand(1)); 4127 } else { 4128 setInsertPointAfterBundle(E->Scalars, S); 4129 LHS = vectorizeTree(E->getOperand(0)); 4130 } 4131 4132 if (E->VectorizedValue) { 4133 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 4134 return E->VectorizedValue; 4135 } 4136 4137 Value *V0, *V1; 4138 if (Instruction::isBinaryOp(S.getOpcode())) { 4139 V0 = Builder.CreateBinOp( 4140 static_cast<Instruction::BinaryOps>(S.getOpcode()), LHS, RHS); 4141 V1 = Builder.CreateBinOp( 4142 static_cast<Instruction::BinaryOps>(S.getAltOpcode()), LHS, RHS); 4143 } else { 4144 V0 = Builder.CreateCast( 4145 static_cast<Instruction::CastOps>(S.getOpcode()), LHS, VecTy); 4146 V1 = Builder.CreateCast( 4147 static_cast<Instruction::CastOps>(S.getAltOpcode()), LHS, VecTy); 4148 } 4149 4150 // Create shuffle to take alternate operations from the vector. 4151 // Also, gather up main and alt scalar ops to propagate IR flags to 4152 // each vector operation. 4153 ValueList OpScalars, AltScalars; 4154 unsigned e = E->Scalars.size(); 4155 SmallVector<Constant *, 8> Mask(e); 4156 for (unsigned i = 0; i < e; ++i) { 4157 auto *OpInst = cast<Instruction>(E->Scalars[i]); 4158 assert(S.isOpcodeOrAlt(OpInst) && "Unexpected main/alternate opcode"); 4159 if (OpInst->getOpcode() == S.getAltOpcode()) { 4160 Mask[i] = Builder.getInt32(e + i); 4161 AltScalars.push_back(E->Scalars[i]); 4162 } else { 4163 Mask[i] = Builder.getInt32(i); 4164 OpScalars.push_back(E->Scalars[i]); 4165 } 4166 } 4167 4168 Value *ShuffleMask = ConstantVector::get(Mask); 4169 propagateIRFlags(V0, OpScalars); 4170 propagateIRFlags(V1, AltScalars); 4171 4172 Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 4173 if (Instruction *I = dyn_cast<Instruction>(V)) 4174 V = propagateMetadata(I, E->Scalars); 4175 if (NeedToShuffleReuses) { 4176 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4177 E->ReuseShuffleIndices, "shuffle"); 4178 } 4179 E->VectorizedValue = V; 4180 ++NumVectorInstructions; 4181 4182 return V; 4183 } 4184 default: 4185 llvm_unreachable("unknown inst"); 4186 } 4187 return nullptr; 4188 } 4189 4190 Value *BoUpSLP::vectorizeTree() { 4191 ExtraValueToDebugLocsMap ExternallyUsedValues; 4192 return vectorizeTree(ExternallyUsedValues); 4193 } 4194 4195 Value * 4196 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { 4197 // All blocks must be scheduled before any instructions are inserted. 4198 for (auto &BSIter : BlocksSchedules) { 4199 scheduleBlock(BSIter.second.get()); 4200 } 4201 4202 Builder.SetInsertPoint(&F->getEntryBlock().front()); 4203 auto *VectorRoot = vectorizeTree(VectorizableTree[0].get()); 4204 4205 // If the vectorized tree can be rewritten in a smaller type, we truncate the 4206 // vectorized root. InstCombine will then rewrite the entire expression. We 4207 // sign extend the extracted values below. 4208 auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; 4209 if (MinBWs.count(ScalarRoot)) { 4210 if (auto *I = dyn_cast<Instruction>(VectorRoot)) 4211 Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); 4212 auto BundleWidth = VectorizableTree[0]->Scalars.size(); 4213 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 4214 auto *VecTy = VectorType::get(MinTy, BundleWidth); 4215 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); 4216 VectorizableTree[0]->VectorizedValue = Trunc; 4217 } 4218 4219 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() 4220 << " values .\n"); 4221 4222 // If necessary, sign-extend or zero-extend ScalarRoot to the larger type 4223 // specified by ScalarType. 4224 auto extend = [&](Value *ScalarRoot, Value *Ex, Type *ScalarType) { 4225 if (!MinBWs.count(ScalarRoot)) 4226 return Ex; 4227 if (MinBWs[ScalarRoot].second) 4228 return Builder.CreateSExt(Ex, ScalarType); 4229 return Builder.CreateZExt(Ex, ScalarType); 4230 }; 4231 4232 // Extract all of the elements with the external uses. 4233 for (const auto &ExternalUse : ExternalUses) { 4234 Value *Scalar = ExternalUse.Scalar; 4235 llvm::User *User = ExternalUse.User; 4236 4237 // Skip users that we already RAUW. This happens when one instruction 4238 // has multiple uses of the same value. 4239 if (User && !is_contained(Scalar->users(), User)) 4240 continue; 4241 TreeEntry *E = getTreeEntry(Scalar); 4242 assert(E && "Invalid scalar"); 4243 assert(!E->NeedToGather && "Extracting from a gather list"); 4244 4245 Value *Vec = E->VectorizedValue; 4246 assert(Vec && "Can't find vectorizable value"); 4247 4248 Value *Lane = Builder.getInt32(ExternalUse.Lane); 4249 // If User == nullptr, the Scalar is used as extra arg. Generate 4250 // ExtractElement instruction and update the record for this scalar in 4251 // ExternallyUsedValues. 4252 if (!User) { 4253 assert(ExternallyUsedValues.count(Scalar) && 4254 "Scalar with nullptr as an external user must be registered in " 4255 "ExternallyUsedValues map"); 4256 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 4257 Builder.SetInsertPoint(VecI->getParent(), 4258 std::next(VecI->getIterator())); 4259 } else { 4260 Builder.SetInsertPoint(&F->getEntryBlock().front()); 4261 } 4262 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 4263 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 4264 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent()); 4265 auto &Locs = ExternallyUsedValues[Scalar]; 4266 ExternallyUsedValues.insert({Ex, Locs}); 4267 ExternallyUsedValues.erase(Scalar); 4268 // Required to update internally referenced instructions. 4269 Scalar->replaceAllUsesWith(Ex); 4270 continue; 4271 } 4272 4273 // Generate extracts for out-of-tree users. 4274 // Find the insertion point for the extractelement lane. 4275 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 4276 if (PHINode *PH = dyn_cast<PHINode>(User)) { 4277 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 4278 if (PH->getIncomingValue(i) == Scalar) { 4279 Instruction *IncomingTerminator = 4280 PH->getIncomingBlock(i)->getTerminator(); 4281 if (isa<CatchSwitchInst>(IncomingTerminator)) { 4282 Builder.SetInsertPoint(VecI->getParent(), 4283 std::next(VecI->getIterator())); 4284 } else { 4285 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 4286 } 4287 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 4288 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 4289 CSEBlocks.insert(PH->getIncomingBlock(i)); 4290 PH->setOperand(i, Ex); 4291 } 4292 } 4293 } else { 4294 Builder.SetInsertPoint(cast<Instruction>(User)); 4295 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 4296 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 4297 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 4298 User->replaceUsesOfWith(Scalar, Ex); 4299 } 4300 } else { 4301 Builder.SetInsertPoint(&F->getEntryBlock().front()); 4302 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 4303 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 4304 CSEBlocks.insert(&F->getEntryBlock()); 4305 User->replaceUsesOfWith(Scalar, Ex); 4306 } 4307 4308 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 4309 } 4310 4311 // For each vectorized value: 4312 for (auto &TEPtr : VectorizableTree) { 4313 TreeEntry *Entry = TEPtr.get(); 4314 4315 // No need to handle users of gathered values. 4316 if (Entry->NeedToGather) 4317 continue; 4318 4319 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 4320 4321 // For each lane: 4322 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 4323 Value *Scalar = Entry->Scalars[Lane]; 4324 4325 Type *Ty = Scalar->getType(); 4326 if (!Ty->isVoidTy()) { 4327 #ifndef NDEBUG 4328 for (User *U : Scalar->users()) { 4329 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 4330 4331 // It is legal to replace users in the ignorelist by undef. 4332 assert((getTreeEntry(U) || is_contained(UserIgnoreList, U)) && 4333 "Replacing out-of-tree value with undef"); 4334 } 4335 #endif 4336 Value *Undef = UndefValue::get(Ty); 4337 Scalar->replaceAllUsesWith(Undef); 4338 } 4339 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 4340 eraseInstruction(cast<Instruction>(Scalar)); 4341 } 4342 } 4343 4344 Builder.ClearInsertionPoint(); 4345 4346 return VectorizableTree[0]->VectorizedValue; 4347 } 4348 4349 void BoUpSLP::optimizeGatherSequence() { 4350 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 4351 << " gather sequences instructions.\n"); 4352 // LICM InsertElementInst sequences. 4353 for (Instruction *I : GatherSeq) { 4354 if (!isa<InsertElementInst>(I) && !isa<ShuffleVectorInst>(I)) 4355 continue; 4356 4357 // Check if this block is inside a loop. 4358 Loop *L = LI->getLoopFor(I->getParent()); 4359 if (!L) 4360 continue; 4361 4362 // Check if it has a preheader. 4363 BasicBlock *PreHeader = L->getLoopPreheader(); 4364 if (!PreHeader) 4365 continue; 4366 4367 // If the vector or the element that we insert into it are 4368 // instructions that are defined in this basic block then we can't 4369 // hoist this instruction. 4370 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 4371 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 4372 if (Op0 && L->contains(Op0)) 4373 continue; 4374 if (Op1 && L->contains(Op1)) 4375 continue; 4376 4377 // We can hoist this instruction. Move it to the pre-header. 4378 I->moveBefore(PreHeader->getTerminator()); 4379 } 4380 4381 // Make a list of all reachable blocks in our CSE queue. 4382 SmallVector<const DomTreeNode *, 8> CSEWorkList; 4383 CSEWorkList.reserve(CSEBlocks.size()); 4384 for (BasicBlock *BB : CSEBlocks) 4385 if (DomTreeNode *N = DT->getNode(BB)) { 4386 assert(DT->isReachableFromEntry(N)); 4387 CSEWorkList.push_back(N); 4388 } 4389 4390 // Sort blocks by domination. This ensures we visit a block after all blocks 4391 // dominating it are visited. 4392 llvm::stable_sort(CSEWorkList, 4393 [this](const DomTreeNode *A, const DomTreeNode *B) { 4394 return DT->properlyDominates(A, B); 4395 }); 4396 4397 // Perform O(N^2) search over the gather sequences and merge identical 4398 // instructions. TODO: We can further optimize this scan if we split the 4399 // instructions into different buckets based on the insert lane. 4400 SmallVector<Instruction *, 16> Visited; 4401 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 4402 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 4403 "Worklist not sorted properly!"); 4404 BasicBlock *BB = (*I)->getBlock(); 4405 // For all instructions in blocks containing gather sequences: 4406 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 4407 Instruction *In = &*it++; 4408 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 4409 continue; 4410 4411 // Check if we can replace this instruction with any of the 4412 // visited instructions. 4413 for (Instruction *v : Visited) { 4414 if (In->isIdenticalTo(v) && 4415 DT->dominates(v->getParent(), In->getParent())) { 4416 In->replaceAllUsesWith(v); 4417 eraseInstruction(In); 4418 In = nullptr; 4419 break; 4420 } 4421 } 4422 if (In) { 4423 assert(!is_contained(Visited, In)); 4424 Visited.push_back(In); 4425 } 4426 } 4427 } 4428 CSEBlocks.clear(); 4429 GatherSeq.clear(); 4430 } 4431 4432 // Groups the instructions to a bundle (which is then a single scheduling entity) 4433 // and schedules instructions until the bundle gets ready. 4434 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, 4435 BoUpSLP *SLP, 4436 const InstructionsState &S) { 4437 if (isa<PHINode>(S.OpValue)) 4438 return true; 4439 4440 // Initialize the instruction bundle. 4441 Instruction *OldScheduleEnd = ScheduleEnd; 4442 ScheduleData *PrevInBundle = nullptr; 4443 ScheduleData *Bundle = nullptr; 4444 bool ReSchedule = false; 4445 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n"); 4446 4447 // Make sure that the scheduling region contains all 4448 // instructions of the bundle. 4449 for (Value *V : VL) { 4450 if (!extendSchedulingRegion(V, S)) 4451 return false; 4452 } 4453 4454 for (Value *V : VL) { 4455 ScheduleData *BundleMember = getScheduleData(V); 4456 assert(BundleMember && 4457 "no ScheduleData for bundle member (maybe not in same basic block)"); 4458 if (BundleMember->IsScheduled) { 4459 // A bundle member was scheduled as single instruction before and now 4460 // needs to be scheduled as part of the bundle. We just get rid of the 4461 // existing schedule. 4462 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 4463 << " was already scheduled\n"); 4464 ReSchedule = true; 4465 } 4466 assert(BundleMember->isSchedulingEntity() && 4467 "bundle member already part of other bundle"); 4468 if (PrevInBundle) { 4469 PrevInBundle->NextInBundle = BundleMember; 4470 } else { 4471 Bundle = BundleMember; 4472 } 4473 BundleMember->UnscheduledDepsInBundle = 0; 4474 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 4475 4476 // Group the instructions to a bundle. 4477 BundleMember->FirstInBundle = Bundle; 4478 PrevInBundle = BundleMember; 4479 } 4480 if (ScheduleEnd != OldScheduleEnd) { 4481 // The scheduling region got new instructions at the lower end (or it is a 4482 // new region for the first bundle). This makes it necessary to 4483 // recalculate all dependencies. 4484 // It is seldom that this needs to be done a second time after adding the 4485 // initial bundle to the region. 4486 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 4487 doForAllOpcodes(I, [](ScheduleData *SD) { 4488 SD->clearDependencies(); 4489 }); 4490 } 4491 ReSchedule = true; 4492 } 4493 if (ReSchedule) { 4494 resetSchedule(); 4495 initialFillReadyList(ReadyInsts); 4496 } 4497 4498 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " 4499 << BB->getName() << "\n"); 4500 4501 calculateDependencies(Bundle, true, SLP); 4502 4503 // Now try to schedule the new bundle. As soon as the bundle is "ready" it 4504 // means that there are no cyclic dependencies and we can schedule it. 4505 // Note that's important that we don't "schedule" the bundle yet (see 4506 // cancelScheduling). 4507 while (!Bundle->isReady() && !ReadyInsts.empty()) { 4508 4509 ScheduleData *pickedSD = ReadyInsts.back(); 4510 ReadyInsts.pop_back(); 4511 4512 if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) { 4513 schedule(pickedSD, ReadyInsts); 4514 } 4515 } 4516 if (!Bundle->isReady()) { 4517 cancelScheduling(VL, S.OpValue); 4518 return false; 4519 } 4520 return true; 4521 } 4522 4523 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 4524 Value *OpValue) { 4525 if (isa<PHINode>(OpValue)) 4526 return; 4527 4528 ScheduleData *Bundle = getScheduleData(OpValue); 4529 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 4530 assert(!Bundle->IsScheduled && 4531 "Can't cancel bundle which is already scheduled"); 4532 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 4533 "tried to unbundle something which is not a bundle"); 4534 4535 // Un-bundle: make single instructions out of the bundle. 4536 ScheduleData *BundleMember = Bundle; 4537 while (BundleMember) { 4538 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 4539 BundleMember->FirstInBundle = BundleMember; 4540 ScheduleData *Next = BundleMember->NextInBundle; 4541 BundleMember->NextInBundle = nullptr; 4542 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 4543 if (BundleMember->UnscheduledDepsInBundle == 0) { 4544 ReadyInsts.insert(BundleMember); 4545 } 4546 BundleMember = Next; 4547 } 4548 } 4549 4550 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 4551 // Allocate a new ScheduleData for the instruction. 4552 if (ChunkPos >= ChunkSize) { 4553 ScheduleDataChunks.push_back(llvm::make_unique<ScheduleData[]>(ChunkSize)); 4554 ChunkPos = 0; 4555 } 4556 return &(ScheduleDataChunks.back()[ChunkPos++]); 4557 } 4558 4559 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 4560 const InstructionsState &S) { 4561 if (getScheduleData(V, isOneOf(S, V))) 4562 return true; 4563 Instruction *I = dyn_cast<Instruction>(V); 4564 assert(I && "bundle member must be an instruction"); 4565 assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled"); 4566 auto &&CheckSheduleForI = [this, &S](Instruction *I) -> bool { 4567 ScheduleData *ISD = getScheduleData(I); 4568 if (!ISD) 4569 return false; 4570 assert(isInSchedulingRegion(ISD) && 4571 "ScheduleData not in scheduling region"); 4572 ScheduleData *SD = allocateScheduleDataChunks(); 4573 SD->Inst = I; 4574 SD->init(SchedulingRegionID, S.OpValue); 4575 ExtraScheduleDataMap[I][S.OpValue] = SD; 4576 return true; 4577 }; 4578 if (CheckSheduleForI(I)) 4579 return true; 4580 if (!ScheduleStart) { 4581 // It's the first instruction in the new region. 4582 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 4583 ScheduleStart = I; 4584 ScheduleEnd = I->getNextNode(); 4585 if (isOneOf(S, I) != I) 4586 CheckSheduleForI(I); 4587 assert(ScheduleEnd && "tried to vectorize a terminator?"); 4588 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 4589 return true; 4590 } 4591 // Search up and down at the same time, because we don't know if the new 4592 // instruction is above or below the existing scheduling region. 4593 BasicBlock::reverse_iterator UpIter = 4594 ++ScheduleStart->getIterator().getReverse(); 4595 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 4596 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 4597 BasicBlock::iterator LowerEnd = BB->end(); 4598 while (true) { 4599 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 4600 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 4601 return false; 4602 } 4603 4604 if (UpIter != UpperEnd) { 4605 if (&*UpIter == I) { 4606 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 4607 ScheduleStart = I; 4608 if (isOneOf(S, I) != I) 4609 CheckSheduleForI(I); 4610 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I 4611 << "\n"); 4612 return true; 4613 } 4614 ++UpIter; 4615 } 4616 if (DownIter != LowerEnd) { 4617 if (&*DownIter == I) { 4618 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 4619 nullptr); 4620 ScheduleEnd = I->getNextNode(); 4621 if (isOneOf(S, I) != I) 4622 CheckSheduleForI(I); 4623 assert(ScheduleEnd && "tried to vectorize a terminator?"); 4624 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I 4625 << "\n"); 4626 return true; 4627 } 4628 ++DownIter; 4629 } 4630 assert((UpIter != UpperEnd || DownIter != LowerEnd) && 4631 "instruction not found in block"); 4632 } 4633 return true; 4634 } 4635 4636 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 4637 Instruction *ToI, 4638 ScheduleData *PrevLoadStore, 4639 ScheduleData *NextLoadStore) { 4640 ScheduleData *CurrentLoadStore = PrevLoadStore; 4641 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 4642 ScheduleData *SD = ScheduleDataMap[I]; 4643 if (!SD) { 4644 SD = allocateScheduleDataChunks(); 4645 ScheduleDataMap[I] = SD; 4646 SD->Inst = I; 4647 } 4648 assert(!isInSchedulingRegion(SD) && 4649 "new ScheduleData already in scheduling region"); 4650 SD->init(SchedulingRegionID, I); 4651 4652 if (I->mayReadOrWriteMemory() && 4653 (!isa<IntrinsicInst>(I) || 4654 cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect)) { 4655 // Update the linked list of memory accessing instructions. 4656 if (CurrentLoadStore) { 4657 CurrentLoadStore->NextLoadStore = SD; 4658 } else { 4659 FirstLoadStoreInRegion = SD; 4660 } 4661 CurrentLoadStore = SD; 4662 } 4663 } 4664 if (NextLoadStore) { 4665 if (CurrentLoadStore) 4666 CurrentLoadStore->NextLoadStore = NextLoadStore; 4667 } else { 4668 LastLoadStoreInRegion = CurrentLoadStore; 4669 } 4670 } 4671 4672 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 4673 bool InsertInReadyList, 4674 BoUpSLP *SLP) { 4675 assert(SD->isSchedulingEntity()); 4676 4677 SmallVector<ScheduleData *, 10> WorkList; 4678 WorkList.push_back(SD); 4679 4680 while (!WorkList.empty()) { 4681 ScheduleData *SD = WorkList.back(); 4682 WorkList.pop_back(); 4683 4684 ScheduleData *BundleMember = SD; 4685 while (BundleMember) { 4686 assert(isInSchedulingRegion(BundleMember)); 4687 if (!BundleMember->hasValidDependencies()) { 4688 4689 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember 4690 << "\n"); 4691 BundleMember->Dependencies = 0; 4692 BundleMember->resetUnscheduledDeps(); 4693 4694 // Handle def-use chain dependencies. 4695 if (BundleMember->OpValue != BundleMember->Inst) { 4696 ScheduleData *UseSD = getScheduleData(BundleMember->Inst); 4697 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 4698 BundleMember->Dependencies++; 4699 ScheduleData *DestBundle = UseSD->FirstInBundle; 4700 if (!DestBundle->IsScheduled) 4701 BundleMember->incrementUnscheduledDeps(1); 4702 if (!DestBundle->hasValidDependencies()) 4703 WorkList.push_back(DestBundle); 4704 } 4705 } else { 4706 for (User *U : BundleMember->Inst->users()) { 4707 if (isa<Instruction>(U)) { 4708 ScheduleData *UseSD = getScheduleData(U); 4709 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 4710 BundleMember->Dependencies++; 4711 ScheduleData *DestBundle = UseSD->FirstInBundle; 4712 if (!DestBundle->IsScheduled) 4713 BundleMember->incrementUnscheduledDeps(1); 4714 if (!DestBundle->hasValidDependencies()) 4715 WorkList.push_back(DestBundle); 4716 } 4717 } else { 4718 // I'm not sure if this can ever happen. But we need to be safe. 4719 // This lets the instruction/bundle never be scheduled and 4720 // eventually disable vectorization. 4721 BundleMember->Dependencies++; 4722 BundleMember->incrementUnscheduledDeps(1); 4723 } 4724 } 4725 } 4726 4727 // Handle the memory dependencies. 4728 ScheduleData *DepDest = BundleMember->NextLoadStore; 4729 if (DepDest) { 4730 Instruction *SrcInst = BundleMember->Inst; 4731 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); 4732 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 4733 unsigned numAliased = 0; 4734 unsigned DistToSrc = 1; 4735 4736 while (DepDest) { 4737 assert(isInSchedulingRegion(DepDest)); 4738 4739 // We have two limits to reduce the complexity: 4740 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 4741 // SLP->isAliased (which is the expensive part in this loop). 4742 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 4743 // the whole loop (even if the loop is fast, it's quadratic). 4744 // It's important for the loop break condition (see below) to 4745 // check this limit even between two read-only instructions. 4746 if (DistToSrc >= MaxMemDepDistance || 4747 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 4748 (numAliased >= AliasedCheckLimit || 4749 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 4750 4751 // We increment the counter only if the locations are aliased 4752 // (instead of counting all alias checks). This gives a better 4753 // balance between reduced runtime and accurate dependencies. 4754 numAliased++; 4755 4756 DepDest->MemoryDependencies.push_back(BundleMember); 4757 BundleMember->Dependencies++; 4758 ScheduleData *DestBundle = DepDest->FirstInBundle; 4759 if (!DestBundle->IsScheduled) { 4760 BundleMember->incrementUnscheduledDeps(1); 4761 } 4762 if (!DestBundle->hasValidDependencies()) { 4763 WorkList.push_back(DestBundle); 4764 } 4765 } 4766 DepDest = DepDest->NextLoadStore; 4767 4768 // Example, explaining the loop break condition: Let's assume our 4769 // starting instruction is i0 and MaxMemDepDistance = 3. 4770 // 4771 // +--------v--v--v 4772 // i0,i1,i2,i3,i4,i5,i6,i7,i8 4773 // +--------^--^--^ 4774 // 4775 // MaxMemDepDistance let us stop alias-checking at i3 and we add 4776 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 4777 // Previously we already added dependencies from i3 to i6,i7,i8 4778 // (because of MaxMemDepDistance). As we added a dependency from 4779 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 4780 // and we can abort this loop at i6. 4781 if (DistToSrc >= 2 * MaxMemDepDistance) 4782 break; 4783 DistToSrc++; 4784 } 4785 } 4786 } 4787 BundleMember = BundleMember->NextInBundle; 4788 } 4789 if (InsertInReadyList && SD->isReady()) { 4790 ReadyInsts.push_back(SD); 4791 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst 4792 << "\n"); 4793 } 4794 } 4795 } 4796 4797 void BoUpSLP::BlockScheduling::resetSchedule() { 4798 assert(ScheduleStart && 4799 "tried to reset schedule on block which has not been scheduled"); 4800 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 4801 doForAllOpcodes(I, [&](ScheduleData *SD) { 4802 assert(isInSchedulingRegion(SD) && 4803 "ScheduleData not in scheduling region"); 4804 SD->IsScheduled = false; 4805 SD->resetUnscheduledDeps(); 4806 }); 4807 } 4808 ReadyInsts.clear(); 4809 } 4810 4811 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 4812 if (!BS->ScheduleStart) 4813 return; 4814 4815 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 4816 4817 BS->resetSchedule(); 4818 4819 // For the real scheduling we use a more sophisticated ready-list: it is 4820 // sorted by the original instruction location. This lets the final schedule 4821 // be as close as possible to the original instruction order. 4822 struct ScheduleDataCompare { 4823 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 4824 return SD2->SchedulingPriority < SD1->SchedulingPriority; 4825 } 4826 }; 4827 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 4828 4829 // Ensure that all dependency data is updated and fill the ready-list with 4830 // initial instructions. 4831 int Idx = 0; 4832 int NumToSchedule = 0; 4833 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 4834 I = I->getNextNode()) { 4835 BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) { 4836 assert(SD->isPartOfBundle() == 4837 (getTreeEntry(SD->Inst) != nullptr) && 4838 "scheduler and vectorizer bundle mismatch"); 4839 SD->FirstInBundle->SchedulingPriority = Idx++; 4840 if (SD->isSchedulingEntity()) { 4841 BS->calculateDependencies(SD, false, this); 4842 NumToSchedule++; 4843 } 4844 }); 4845 } 4846 BS->initialFillReadyList(ReadyInsts); 4847 4848 Instruction *LastScheduledInst = BS->ScheduleEnd; 4849 4850 // Do the "real" scheduling. 4851 while (!ReadyInsts.empty()) { 4852 ScheduleData *picked = *ReadyInsts.begin(); 4853 ReadyInsts.erase(ReadyInsts.begin()); 4854 4855 // Move the scheduled instruction(s) to their dedicated places, if not 4856 // there yet. 4857 ScheduleData *BundleMember = picked; 4858 while (BundleMember) { 4859 Instruction *pickedInst = BundleMember->Inst; 4860 if (LastScheduledInst->getNextNode() != pickedInst) { 4861 BS->BB->getInstList().remove(pickedInst); 4862 BS->BB->getInstList().insert(LastScheduledInst->getIterator(), 4863 pickedInst); 4864 } 4865 LastScheduledInst = pickedInst; 4866 BundleMember = BundleMember->NextInBundle; 4867 } 4868 4869 BS->schedule(picked, ReadyInsts); 4870 NumToSchedule--; 4871 } 4872 assert(NumToSchedule == 0 && "could not schedule all instructions"); 4873 4874 // Avoid duplicate scheduling of the block. 4875 BS->ScheduleStart = nullptr; 4876 } 4877 4878 unsigned BoUpSLP::getVectorElementSize(Value *V) const { 4879 // If V is a store, just return the width of the stored value without 4880 // traversing the expression tree. This is the common case. 4881 if (auto *Store = dyn_cast<StoreInst>(V)) 4882 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 4883 4884 // If V is not a store, we can traverse the expression tree to find loads 4885 // that feed it. The type of the loaded value may indicate a more suitable 4886 // width than V's type. We want to base the vector element size on the width 4887 // of memory operations where possible. 4888 SmallVector<Instruction *, 16> Worklist; 4889 SmallPtrSet<Instruction *, 16> Visited; 4890 if (auto *I = dyn_cast<Instruction>(V)) 4891 Worklist.push_back(I); 4892 4893 // Traverse the expression tree in bottom-up order looking for loads. If we 4894 // encounter an instruction we don't yet handle, we give up. 4895 auto MaxWidth = 0u; 4896 auto FoundUnknownInst = false; 4897 while (!Worklist.empty() && !FoundUnknownInst) { 4898 auto *I = Worklist.pop_back_val(); 4899 Visited.insert(I); 4900 4901 // We should only be looking at scalar instructions here. If the current 4902 // instruction has a vector type, give up. 4903 auto *Ty = I->getType(); 4904 if (isa<VectorType>(Ty)) 4905 FoundUnknownInst = true; 4906 4907 // If the current instruction is a load, update MaxWidth to reflect the 4908 // width of the loaded value. 4909 else if (isa<LoadInst>(I)) 4910 MaxWidth = std::max<unsigned>(MaxWidth, DL->getTypeSizeInBits(Ty)); 4911 4912 // Otherwise, we need to visit the operands of the instruction. We only 4913 // handle the interesting cases from buildTree here. If an operand is an 4914 // instruction we haven't yet visited, we add it to the worklist. 4915 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 4916 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I)) { 4917 for (Use &U : I->operands()) 4918 if (auto *J = dyn_cast<Instruction>(U.get())) 4919 if (!Visited.count(J)) 4920 Worklist.push_back(J); 4921 } 4922 4923 // If we don't yet handle the instruction, give up. 4924 else 4925 FoundUnknownInst = true; 4926 } 4927 4928 // If we didn't encounter a memory access in the expression tree, or if we 4929 // gave up for some reason, just return the width of V. 4930 if (!MaxWidth || FoundUnknownInst) 4931 return DL->getTypeSizeInBits(V->getType()); 4932 4933 // Otherwise, return the maximum width we found. 4934 return MaxWidth; 4935 } 4936 4937 // Determine if a value V in a vectorizable expression Expr can be demoted to a 4938 // smaller type with a truncation. We collect the values that will be demoted 4939 // in ToDemote and additional roots that require investigating in Roots. 4940 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, 4941 SmallVectorImpl<Value *> &ToDemote, 4942 SmallVectorImpl<Value *> &Roots) { 4943 // We can always demote constants. 4944 if (isa<Constant>(V)) { 4945 ToDemote.push_back(V); 4946 return true; 4947 } 4948 4949 // If the value is not an instruction in the expression with only one use, it 4950 // cannot be demoted. 4951 auto *I = dyn_cast<Instruction>(V); 4952 if (!I || !I->hasOneUse() || !Expr.count(I)) 4953 return false; 4954 4955 switch (I->getOpcode()) { 4956 4957 // We can always demote truncations and extensions. Since truncations can 4958 // seed additional demotion, we save the truncated value. 4959 case Instruction::Trunc: 4960 Roots.push_back(I->getOperand(0)); 4961 break; 4962 case Instruction::ZExt: 4963 case Instruction::SExt: 4964 break; 4965 4966 // We can demote certain binary operations if we can demote both of their 4967 // operands. 4968 case Instruction::Add: 4969 case Instruction::Sub: 4970 case Instruction::Mul: 4971 case Instruction::And: 4972 case Instruction::Or: 4973 case Instruction::Xor: 4974 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || 4975 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) 4976 return false; 4977 break; 4978 4979 // We can demote selects if we can demote their true and false values. 4980 case Instruction::Select: { 4981 SelectInst *SI = cast<SelectInst>(I); 4982 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || 4983 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) 4984 return false; 4985 break; 4986 } 4987 4988 // We can demote phis if we can demote all their incoming operands. Note that 4989 // we don't need to worry about cycles since we ensure single use above. 4990 case Instruction::PHI: { 4991 PHINode *PN = cast<PHINode>(I); 4992 for (Value *IncValue : PN->incoming_values()) 4993 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) 4994 return false; 4995 break; 4996 } 4997 4998 // Otherwise, conservatively give up. 4999 default: 5000 return false; 5001 } 5002 5003 // Record the value that we can demote. 5004 ToDemote.push_back(V); 5005 return true; 5006 } 5007 5008 void BoUpSLP::computeMinimumValueSizes() { 5009 // If there are no external uses, the expression tree must be rooted by a 5010 // store. We can't demote in-memory values, so there is nothing to do here. 5011 if (ExternalUses.empty()) 5012 return; 5013 5014 // We only attempt to truncate integer expressions. 5015 auto &TreeRoot = VectorizableTree[0]->Scalars; 5016 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 5017 if (!TreeRootIT) 5018 return; 5019 5020 // If the expression is not rooted by a store, these roots should have 5021 // external uses. We will rely on InstCombine to rewrite the expression in 5022 // the narrower type. However, InstCombine only rewrites single-use values. 5023 // This means that if a tree entry other than a root is used externally, it 5024 // must have multiple uses and InstCombine will not rewrite it. The code 5025 // below ensures that only the roots are used externally. 5026 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); 5027 for (auto &EU : ExternalUses) 5028 if (!Expr.erase(EU.Scalar)) 5029 return; 5030 if (!Expr.empty()) 5031 return; 5032 5033 // Collect the scalar values of the vectorizable expression. We will use this 5034 // context to determine which values can be demoted. If we see a truncation, 5035 // we mark it as seeding another demotion. 5036 for (auto &EntryPtr : VectorizableTree) 5037 Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end()); 5038 5039 // Ensure the roots of the vectorizable tree don't form a cycle. They must 5040 // have a single external user that is not in the vectorizable tree. 5041 for (auto *Root : TreeRoot) 5042 if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) 5043 return; 5044 5045 // Conservatively determine if we can actually truncate the roots of the 5046 // expression. Collect the values that can be demoted in ToDemote and 5047 // additional roots that require investigating in Roots. 5048 SmallVector<Value *, 32> ToDemote; 5049 SmallVector<Value *, 4> Roots; 5050 for (auto *Root : TreeRoot) 5051 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) 5052 return; 5053 5054 // The maximum bit width required to represent all the values that can be 5055 // demoted without loss of precision. It would be safe to truncate the roots 5056 // of the expression to this width. 5057 auto MaxBitWidth = 8u; 5058 5059 // We first check if all the bits of the roots are demanded. If they're not, 5060 // we can truncate the roots to this narrower type. 5061 for (auto *Root : TreeRoot) { 5062 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 5063 MaxBitWidth = std::max<unsigned>( 5064 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); 5065 } 5066 5067 // True if the roots can be zero-extended back to their original type, rather 5068 // than sign-extended. We know that if the leading bits are not demanded, we 5069 // can safely zero-extend. So we initialize IsKnownPositive to True. 5070 bool IsKnownPositive = true; 5071 5072 // If all the bits of the roots are demanded, we can try a little harder to 5073 // compute a narrower type. This can happen, for example, if the roots are 5074 // getelementptr indices. InstCombine promotes these indices to the pointer 5075 // width. Thus, all their bits are technically demanded even though the 5076 // address computation might be vectorized in a smaller type. 5077 // 5078 // We start by looking at each entry that can be demoted. We compute the 5079 // maximum bit width required to store the scalar by using ValueTracking to 5080 // compute the number of high-order bits we can truncate. 5081 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) && 5082 llvm::all_of(TreeRoot, [](Value *R) { 5083 assert(R->hasOneUse() && "Root should have only one use!"); 5084 return isa<GetElementPtrInst>(R->user_back()); 5085 })) { 5086 MaxBitWidth = 8u; 5087 5088 // Determine if the sign bit of all the roots is known to be zero. If not, 5089 // IsKnownPositive is set to False. 5090 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { 5091 KnownBits Known = computeKnownBits(R, *DL); 5092 return Known.isNonNegative(); 5093 }); 5094 5095 // Determine the maximum number of bits required to store the scalar 5096 // values. 5097 for (auto *Scalar : ToDemote) { 5098 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); 5099 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 5100 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 5101 } 5102 5103 // If we can't prove that the sign bit is zero, we must add one to the 5104 // maximum bit width to account for the unknown sign bit. This preserves 5105 // the existing sign bit so we can safely sign-extend the root back to the 5106 // original type. Otherwise, if we know the sign bit is zero, we will 5107 // zero-extend the root instead. 5108 // 5109 // FIXME: This is somewhat suboptimal, as there will be cases where adding 5110 // one to the maximum bit width will yield a larger-than-necessary 5111 // type. In general, we need to add an extra bit only if we can't 5112 // prove that the upper bit of the original type is equal to the 5113 // upper bit of the proposed smaller type. If these two bits are the 5114 // same (either zero or one) we know that sign-extending from the 5115 // smaller type will result in the same value. Here, since we can't 5116 // yet prove this, we are just making the proposed smaller type 5117 // larger to ensure correctness. 5118 if (!IsKnownPositive) 5119 ++MaxBitWidth; 5120 } 5121 5122 // Round MaxBitWidth up to the next power-of-two. 5123 if (!isPowerOf2_64(MaxBitWidth)) 5124 MaxBitWidth = NextPowerOf2(MaxBitWidth); 5125 5126 // If the maximum bit width we compute is less than the with of the roots' 5127 // type, we can proceed with the narrowing. Otherwise, do nothing. 5128 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 5129 return; 5130 5131 // If we can truncate the root, we must collect additional values that might 5132 // be demoted as a result. That is, those seeded by truncations we will 5133 // modify. 5134 while (!Roots.empty()) 5135 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); 5136 5137 // Finally, map the values we can demote to the maximum bit with we computed. 5138 for (auto *Scalar : ToDemote) 5139 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive); 5140 } 5141 5142 namespace { 5143 5144 /// The SLPVectorizer Pass. 5145 struct SLPVectorizer : public FunctionPass { 5146 SLPVectorizerPass Impl; 5147 5148 /// Pass identification, replacement for typeid 5149 static char ID; 5150 5151 explicit SLPVectorizer() : FunctionPass(ID) { 5152 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 5153 } 5154 5155 bool doInitialization(Module &M) override { 5156 return false; 5157 } 5158 5159 bool runOnFunction(Function &F) override { 5160 if (skipFunction(F)) 5161 return false; 5162 5163 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 5164 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 5165 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 5166 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 5167 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 5168 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 5169 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 5170 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 5171 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 5172 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 5173 5174 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 5175 } 5176 5177 void getAnalysisUsage(AnalysisUsage &AU) const override { 5178 FunctionPass::getAnalysisUsage(AU); 5179 AU.addRequired<AssumptionCacheTracker>(); 5180 AU.addRequired<ScalarEvolutionWrapperPass>(); 5181 AU.addRequired<AAResultsWrapperPass>(); 5182 AU.addRequired<TargetTransformInfoWrapperPass>(); 5183 AU.addRequired<LoopInfoWrapperPass>(); 5184 AU.addRequired<DominatorTreeWrapperPass>(); 5185 AU.addRequired<DemandedBitsWrapperPass>(); 5186 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 5187 AU.addPreserved<LoopInfoWrapperPass>(); 5188 AU.addPreserved<DominatorTreeWrapperPass>(); 5189 AU.addPreserved<AAResultsWrapperPass>(); 5190 AU.addPreserved<GlobalsAAWrapperPass>(); 5191 AU.setPreservesCFG(); 5192 } 5193 }; 5194 5195 } // end anonymous namespace 5196 5197 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 5198 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 5199 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 5200 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 5201 auto *AA = &AM.getResult<AAManager>(F); 5202 auto *LI = &AM.getResult<LoopAnalysis>(F); 5203 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 5204 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 5205 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 5206 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 5207 5208 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 5209 if (!Changed) 5210 return PreservedAnalyses::all(); 5211 5212 PreservedAnalyses PA; 5213 PA.preserveSet<CFGAnalyses>(); 5214 PA.preserve<AAManager>(); 5215 PA.preserve<GlobalsAA>(); 5216 return PA; 5217 } 5218 5219 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 5220 TargetTransformInfo *TTI_, 5221 TargetLibraryInfo *TLI_, AliasAnalysis *AA_, 5222 LoopInfo *LI_, DominatorTree *DT_, 5223 AssumptionCache *AC_, DemandedBits *DB_, 5224 OptimizationRemarkEmitter *ORE_) { 5225 SE = SE_; 5226 TTI = TTI_; 5227 TLI = TLI_; 5228 AA = AA_; 5229 LI = LI_; 5230 DT = DT_; 5231 AC = AC_; 5232 DB = DB_; 5233 DL = &F.getParent()->getDataLayout(); 5234 5235 Stores.clear(); 5236 GEPs.clear(); 5237 bool Changed = false; 5238 5239 // If the target claims to have no vector registers don't attempt 5240 // vectorization. 5241 if (!TTI->getNumberOfRegisters(true)) 5242 return false; 5243 5244 // Don't vectorize when the attribute NoImplicitFloat is used. 5245 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 5246 return false; 5247 5248 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 5249 5250 // Use the bottom up slp vectorizer to construct chains that start with 5251 // store instructions. 5252 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 5253 5254 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 5255 // delete instructions. 5256 5257 // Scan the blocks in the function in post order. 5258 for (auto BB : post_order(&F.getEntryBlock())) { 5259 collectSeedInstructions(BB); 5260 5261 // Vectorize trees that end at stores. 5262 if (!Stores.empty()) { 5263 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 5264 << " underlying objects.\n"); 5265 Changed |= vectorizeStoreChains(R); 5266 } 5267 5268 // Vectorize trees that end at reductions. 5269 Changed |= vectorizeChainsInBlock(BB, R); 5270 5271 // Vectorize the index computations of getelementptr instructions. This 5272 // is primarily intended to catch gather-like idioms ending at 5273 // non-consecutive loads. 5274 if (!GEPs.empty()) { 5275 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 5276 << " underlying objects.\n"); 5277 Changed |= vectorizeGEPIndices(BB, R); 5278 } 5279 } 5280 5281 if (Changed) { 5282 R.optimizeGatherSequence(); 5283 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 5284 LLVM_DEBUG(verifyFunction(F)); 5285 } 5286 return Changed; 5287 } 5288 5289 /// Check that the Values in the slice in VL array are still existent in 5290 /// the WeakTrackingVH array. 5291 /// Vectorization of part of the VL array may cause later values in the VL array 5292 /// to become invalid. We track when this has happened in the WeakTrackingVH 5293 /// array. 5294 static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, 5295 ArrayRef<WeakTrackingVH> VH, unsigned SliceBegin, 5296 unsigned SliceSize) { 5297 VL = VL.slice(SliceBegin, SliceSize); 5298 VH = VH.slice(SliceBegin, SliceSize); 5299 return !std::equal(VL.begin(), VL.end(), VH.begin()); 5300 } 5301 5302 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 5303 unsigned VecRegSize) { 5304 const unsigned ChainLen = Chain.size(); 5305 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 5306 << "\n"); 5307 const unsigned Sz = R.getVectorElementSize(Chain[0]); 5308 const unsigned VF = VecRegSize / Sz; 5309 5310 if (!isPowerOf2_32(Sz) || VF < 2) 5311 return false; 5312 5313 // Keep track of values that were deleted by vectorizing in the loop below. 5314 const SmallVector<WeakTrackingVH, 8> TrackValues(Chain.begin(), Chain.end()); 5315 5316 bool Changed = false; 5317 // Look for profitable vectorizable trees at all offsets, starting at zero. 5318 for (unsigned i = 0, e = ChainLen; i + VF <= e; ++i) { 5319 5320 // Check that a previous iteration of this loop did not delete the Value. 5321 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 5322 continue; 5323 5324 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 5325 << "\n"); 5326 ArrayRef<Value *> Operands = Chain.slice(i, VF); 5327 5328 R.buildTree(Operands); 5329 if (R.isTreeTinyAndNotFullyVectorizable()) 5330 continue; 5331 5332 R.computeMinimumValueSizes(); 5333 5334 int Cost = R.getTreeCost(); 5335 5336 LLVM_DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF 5337 << "\n"); 5338 if (Cost < -SLPCostThreshold) { 5339 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 5340 5341 using namespace ore; 5342 5343 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 5344 cast<StoreInst>(Chain[i])) 5345 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 5346 << " and with tree size " 5347 << NV("TreeSize", R.getTreeSize())); 5348 5349 R.vectorizeTree(); 5350 5351 // Move to the next bundle. 5352 i += VF - 1; 5353 Changed = true; 5354 } 5355 } 5356 5357 return Changed; 5358 } 5359 5360 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 5361 BoUpSLP &R) { 5362 SetVector<StoreInst *> Heads; 5363 SmallDenseSet<StoreInst *> Tails; 5364 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; 5365 5366 // We may run into multiple chains that merge into a single chain. We mark the 5367 // stores that we vectorized so that we don't visit the same store twice. 5368 BoUpSLP::ValueSet VectorizedStores; 5369 bool Changed = false; 5370 5371 auto &&FindConsecutiveAccess = 5372 [this, &Stores, &Heads, &Tails, &ConsecutiveChain] (int K, int Idx) { 5373 if (!isConsecutiveAccess(Stores[K], Stores[Idx], *DL, *SE)) 5374 return false; 5375 5376 Tails.insert(Stores[Idx]); 5377 Heads.insert(Stores[K]); 5378 ConsecutiveChain[Stores[K]] = Stores[Idx]; 5379 return true; 5380 }; 5381 5382 // Do a quadratic search on all of the given stores in reverse order and find 5383 // all of the pairs of stores that follow each other. 5384 int E = Stores.size(); 5385 for (int Idx = E - 1; Idx >= 0; --Idx) { 5386 // If a store has multiple consecutive store candidates, search according 5387 // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ... 5388 // This is because usually pairing with immediate succeeding or preceding 5389 // candidate create the best chance to find slp vectorization opportunity. 5390 for (int Offset = 1, F = std::max(E - Idx, Idx + 1); Offset < F; ++Offset) 5391 if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) || 5392 (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx))) 5393 break; 5394 } 5395 5396 // For stores that start but don't end a link in the chain: 5397 for (auto *SI : llvm::reverse(Heads)) { 5398 if (Tails.count(SI)) 5399 continue; 5400 5401 // We found a store instr that starts a chain. Now follow the chain and try 5402 // to vectorize it. 5403 BoUpSLP::ValueList Operands; 5404 StoreInst *I = SI; 5405 // Collect the chain into a list. 5406 while ((Tails.count(I) || Heads.count(I)) && !VectorizedStores.count(I)) { 5407 Operands.push_back(I); 5408 // Move to the next value in the chain. 5409 I = ConsecutiveChain[I]; 5410 } 5411 5412 // FIXME: Is division-by-2 the correct step? Should we assert that the 5413 // register size is a power-of-2? 5414 for (unsigned Size = R.getMaxVecRegSize(); Size >= R.getMinVecRegSize(); 5415 Size /= 2) { 5416 if (vectorizeStoreChain(Operands, R, Size)) { 5417 // Mark the vectorized stores so that we don't vectorize them again. 5418 VectorizedStores.insert(Operands.begin(), Operands.end()); 5419 Changed = true; 5420 break; 5421 } 5422 } 5423 } 5424 5425 return Changed; 5426 } 5427 5428 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 5429 // Initialize the collections. We will make a single pass over the block. 5430 Stores.clear(); 5431 GEPs.clear(); 5432 5433 // Visit the store and getelementptr instructions in BB and organize them in 5434 // Stores and GEPs according to the underlying objects of their pointer 5435 // operands. 5436 for (Instruction &I : *BB) { 5437 // Ignore store instructions that are volatile or have a pointer operand 5438 // that doesn't point to a scalar type. 5439 if (auto *SI = dyn_cast<StoreInst>(&I)) { 5440 if (!SI->isSimple()) 5441 continue; 5442 if (!isValidElementType(SI->getValueOperand()->getType())) 5443 continue; 5444 Stores[GetUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI); 5445 } 5446 5447 // Ignore getelementptr instructions that have more than one index, a 5448 // constant index, or a pointer operand that doesn't point to a scalar 5449 // type. 5450 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 5451 auto Idx = GEP->idx_begin()->get(); 5452 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) 5453 continue; 5454 if (!isValidElementType(Idx->getType())) 5455 continue; 5456 if (GEP->getType()->isVectorTy()) 5457 continue; 5458 GEPs[GEP->getPointerOperand()].push_back(GEP); 5459 } 5460 } 5461 } 5462 5463 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 5464 if (!A || !B) 5465 return false; 5466 Value *VL[] = { A, B }; 5467 return tryToVectorizeList(VL, R, /*UserCost=*/0, true); 5468 } 5469 5470 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 5471 int UserCost, bool AllowReorder) { 5472 if (VL.size() < 2) 5473 return false; 5474 5475 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " 5476 << VL.size() << ".\n"); 5477 5478 // Check that all of the parts are scalar instructions of the same type, 5479 // we permit an alternate opcode via InstructionsState. 5480 InstructionsState S = getSameOpcode(VL); 5481 if (!S.getOpcode()) 5482 return false; 5483 5484 Instruction *I0 = cast<Instruction>(S.OpValue); 5485 unsigned Sz = R.getVectorElementSize(I0); 5486 unsigned MinVF = std::max(2U, R.getMinVecRegSize() / Sz); 5487 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF); 5488 if (MaxVF < 2) { 5489 R.getORE()->emit([&]() { 5490 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0) 5491 << "Cannot SLP vectorize list: vectorization factor " 5492 << "less than 2 is not supported"; 5493 }); 5494 return false; 5495 } 5496 5497 for (Value *V : VL) { 5498 Type *Ty = V->getType(); 5499 if (!isValidElementType(Ty)) { 5500 // NOTE: the following will give user internal llvm type name, which may 5501 // not be useful. 5502 R.getORE()->emit([&]() { 5503 std::string type_str; 5504 llvm::raw_string_ostream rso(type_str); 5505 Ty->print(rso); 5506 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0) 5507 << "Cannot SLP vectorize list: type " 5508 << rso.str() + " is unsupported by vectorizer"; 5509 }); 5510 return false; 5511 } 5512 } 5513 5514 bool Changed = false; 5515 bool CandidateFound = false; 5516 int MinCost = SLPCostThreshold; 5517 5518 // Keep track of values that were deleted by vectorizing in the loop below. 5519 SmallVector<WeakTrackingVH, 8> TrackValues(VL.begin(), VL.end()); 5520 5521 unsigned NextInst = 0, MaxInst = VL.size(); 5522 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; 5523 VF /= 2) { 5524 // No actual vectorization should happen, if number of parts is the same as 5525 // provided vectorization factor (i.e. the scalar type is used for vector 5526 // code during codegen). 5527 auto *VecTy = VectorType::get(VL[0]->getType(), VF); 5528 if (TTI->getNumberOfParts(VecTy) == VF) 5529 continue; 5530 for (unsigned I = NextInst; I < MaxInst; ++I) { 5531 unsigned OpsWidth = 0; 5532 5533 if (I + VF > MaxInst) 5534 OpsWidth = MaxInst - I; 5535 else 5536 OpsWidth = VF; 5537 5538 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 5539 break; 5540 5541 // Check that a previous iteration of this loop did not delete the Value. 5542 if (hasValueBeenRAUWed(VL, TrackValues, I, OpsWidth)) 5543 continue; 5544 5545 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 5546 << "\n"); 5547 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth); 5548 5549 R.buildTree(Ops); 5550 Optional<ArrayRef<unsigned>> Order = R.bestOrder(); 5551 // TODO: check if we can allow reordering for more cases. 5552 if (AllowReorder && Order) { 5553 // TODO: reorder tree nodes without tree rebuilding. 5554 // Conceptually, there is nothing actually preventing us from trying to 5555 // reorder a larger list. In fact, we do exactly this when vectorizing 5556 // reductions. However, at this point, we only expect to get here when 5557 // there are exactly two operations. 5558 assert(Ops.size() == 2); 5559 Value *ReorderedOps[] = {Ops[1], Ops[0]}; 5560 R.buildTree(ReorderedOps, None); 5561 } 5562 if (R.isTreeTinyAndNotFullyVectorizable()) 5563 continue; 5564 5565 R.computeMinimumValueSizes(); 5566 int Cost = R.getTreeCost() - UserCost; 5567 CandidateFound = true; 5568 MinCost = std::min(MinCost, Cost); 5569 5570 if (Cost < -SLPCostThreshold) { 5571 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 5572 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 5573 cast<Instruction>(Ops[0])) 5574 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 5575 << " and with tree size " 5576 << ore::NV("TreeSize", R.getTreeSize())); 5577 5578 R.vectorizeTree(); 5579 // Move to the next bundle. 5580 I += VF - 1; 5581 NextInst = I + 1; 5582 Changed = true; 5583 } 5584 } 5585 } 5586 5587 if (!Changed && CandidateFound) { 5588 R.getORE()->emit([&]() { 5589 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0) 5590 << "List vectorization was possible but not beneficial with cost " 5591 << ore::NV("Cost", MinCost) << " >= " 5592 << ore::NV("Treshold", -SLPCostThreshold); 5593 }); 5594 } else if (!Changed) { 5595 R.getORE()->emit([&]() { 5596 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0) 5597 << "Cannot SLP vectorize list: vectorization was impossible" 5598 << " with available vectorization factors"; 5599 }); 5600 } 5601 return Changed; 5602 } 5603 5604 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 5605 if (!I) 5606 return false; 5607 5608 if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I)) 5609 return false; 5610 5611 Value *P = I->getParent(); 5612 5613 // Vectorize in current basic block only. 5614 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 5615 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 5616 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 5617 return false; 5618 5619 // Try to vectorize V. 5620 if (tryToVectorizePair(Op0, Op1, R)) 5621 return true; 5622 5623 auto *A = dyn_cast<BinaryOperator>(Op0); 5624 auto *B = dyn_cast<BinaryOperator>(Op1); 5625 // Try to skip B. 5626 if (B && B->hasOneUse()) { 5627 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 5628 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 5629 if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R)) 5630 return true; 5631 if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R)) 5632 return true; 5633 } 5634 5635 // Try to skip A. 5636 if (A && A->hasOneUse()) { 5637 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 5638 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 5639 if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R)) 5640 return true; 5641 if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R)) 5642 return true; 5643 } 5644 return false; 5645 } 5646 5647 /// Generate a shuffle mask to be used in a reduction tree. 5648 /// 5649 /// \param VecLen The length of the vector to be reduced. 5650 /// \param NumEltsToRdx The number of elements that should be reduced in the 5651 /// vector. 5652 /// \param IsPairwise Whether the reduction is a pairwise or splitting 5653 /// reduction. A pairwise reduction will generate a mask of 5654 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 5655 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 5656 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 5657 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 5658 bool IsPairwise, bool IsLeft, 5659 IRBuilder<> &Builder) { 5660 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 5661 5662 SmallVector<Constant *, 32> ShuffleMask( 5663 VecLen, UndefValue::get(Builder.getInt32Ty())); 5664 5665 if (IsPairwise) 5666 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 5667 for (unsigned i = 0; i != NumEltsToRdx; ++i) 5668 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 5669 else 5670 // Move the upper half of the vector to the lower half. 5671 for (unsigned i = 0; i != NumEltsToRdx; ++i) 5672 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 5673 5674 return ConstantVector::get(ShuffleMask); 5675 } 5676 5677 namespace { 5678 5679 /// Model horizontal reductions. 5680 /// 5681 /// A horizontal reduction is a tree of reduction operations (currently add and 5682 /// fadd) that has operations that can be put into a vector as its leaf. 5683 /// For example, this tree: 5684 /// 5685 /// mul mul mul mul 5686 /// \ / \ / 5687 /// + + 5688 /// \ / 5689 /// + 5690 /// This tree has "mul" as its reduced values and "+" as its reduction 5691 /// operations. A reduction might be feeding into a store or a binary operation 5692 /// feeding a phi. 5693 /// ... 5694 /// \ / 5695 /// + 5696 /// | 5697 /// phi += 5698 /// 5699 /// Or: 5700 /// ... 5701 /// \ / 5702 /// + 5703 /// | 5704 /// *p = 5705 /// 5706 class HorizontalReduction { 5707 using ReductionOpsType = SmallVector<Value *, 16>; 5708 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; 5709 ReductionOpsListType ReductionOps; 5710 SmallVector<Value *, 32> ReducedVals; 5711 // Use map vector to make stable output. 5712 MapVector<Instruction *, Value *> ExtraArgs; 5713 5714 /// Kind of the reduction data. 5715 enum ReductionKind { 5716 RK_None, /// Not a reduction. 5717 RK_Arithmetic, /// Binary reduction data. 5718 RK_Min, /// Minimum reduction data. 5719 RK_UMin, /// Unsigned minimum reduction data. 5720 RK_Max, /// Maximum reduction data. 5721 RK_UMax, /// Unsigned maximum reduction data. 5722 }; 5723 5724 /// Contains info about operation, like its opcode, left and right operands. 5725 class OperationData { 5726 /// Opcode of the instruction. 5727 unsigned Opcode = 0; 5728 5729 /// Left operand of the reduction operation. 5730 Value *LHS = nullptr; 5731 5732 /// Right operand of the reduction operation. 5733 Value *RHS = nullptr; 5734 5735 /// Kind of the reduction operation. 5736 ReductionKind Kind = RK_None; 5737 5738 /// True if float point min/max reduction has no NaNs. 5739 bool NoNaN = false; 5740 5741 /// Checks if the reduction operation can be vectorized. 5742 bool isVectorizable() const { 5743 return LHS && RHS && 5744 // We currently only support add/mul/logical && min/max reductions. 5745 ((Kind == RK_Arithmetic && 5746 (Opcode == Instruction::Add || Opcode == Instruction::FAdd || 5747 Opcode == Instruction::Mul || Opcode == Instruction::FMul || 5748 Opcode == Instruction::And || Opcode == Instruction::Or || 5749 Opcode == Instruction::Xor)) || 5750 ((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && 5751 (Kind == RK_Min || Kind == RK_Max)) || 5752 (Opcode == Instruction::ICmp && 5753 (Kind == RK_UMin || Kind == RK_UMax))); 5754 } 5755 5756 /// Creates reduction operation with the current opcode. 5757 Value *createOp(IRBuilder<> &Builder, const Twine &Name) const { 5758 assert(isVectorizable() && 5759 "Expected add|fadd or min/max reduction operation."); 5760 Value *Cmp; 5761 switch (Kind) { 5762 case RK_Arithmetic: 5763 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, LHS, RHS, 5764 Name); 5765 case RK_Min: 5766 Cmp = Opcode == Instruction::ICmp ? Builder.CreateICmpSLT(LHS, RHS) 5767 : Builder.CreateFCmpOLT(LHS, RHS); 5768 break; 5769 case RK_Max: 5770 Cmp = Opcode == Instruction::ICmp ? Builder.CreateICmpSGT(LHS, RHS) 5771 : Builder.CreateFCmpOGT(LHS, RHS); 5772 break; 5773 case RK_UMin: 5774 assert(Opcode == Instruction::ICmp && "Expected integer types."); 5775 Cmp = Builder.CreateICmpULT(LHS, RHS); 5776 break; 5777 case RK_UMax: 5778 assert(Opcode == Instruction::ICmp && "Expected integer types."); 5779 Cmp = Builder.CreateICmpUGT(LHS, RHS); 5780 break; 5781 case RK_None: 5782 llvm_unreachable("Unknown reduction operation."); 5783 } 5784 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 5785 } 5786 5787 public: 5788 explicit OperationData() = default; 5789 5790 /// Construction for reduced values. They are identified by opcode only and 5791 /// don't have associated LHS/RHS values. 5792 explicit OperationData(Value *V) { 5793 if (auto *I = dyn_cast<Instruction>(V)) 5794 Opcode = I->getOpcode(); 5795 } 5796 5797 /// Constructor for reduction operations with opcode and its left and 5798 /// right operands. 5799 OperationData(unsigned Opcode, Value *LHS, Value *RHS, ReductionKind Kind, 5800 bool NoNaN = false) 5801 : Opcode(Opcode), LHS(LHS), RHS(RHS), Kind(Kind), NoNaN(NoNaN) { 5802 assert(Kind != RK_None && "One of the reduction operations is expected."); 5803 } 5804 5805 explicit operator bool() const { return Opcode; } 5806 5807 /// Get the index of the first operand. 5808 unsigned getFirstOperandIndex() const { 5809 assert(!!*this && "The opcode is not set."); 5810 switch (Kind) { 5811 case RK_Min: 5812 case RK_UMin: 5813 case RK_Max: 5814 case RK_UMax: 5815 return 1; 5816 case RK_Arithmetic: 5817 case RK_None: 5818 break; 5819 } 5820 return 0; 5821 } 5822 5823 /// Total number of operands in the reduction operation. 5824 unsigned getNumberOfOperands() const { 5825 assert(Kind != RK_None && !!*this && LHS && RHS && 5826 "Expected reduction operation."); 5827 switch (Kind) { 5828 case RK_Arithmetic: 5829 return 2; 5830 case RK_Min: 5831 case RK_UMin: 5832 case RK_Max: 5833 case RK_UMax: 5834 return 3; 5835 case RK_None: 5836 break; 5837 } 5838 llvm_unreachable("Reduction kind is not set"); 5839 } 5840 5841 /// Checks if the operation has the same parent as \p P. 5842 bool hasSameParent(Instruction *I, Value *P, bool IsRedOp) const { 5843 assert(Kind != RK_None && !!*this && LHS && RHS && 5844 "Expected reduction operation."); 5845 if (!IsRedOp) 5846 return I->getParent() == P; 5847 switch (Kind) { 5848 case RK_Arithmetic: 5849 // Arithmetic reduction operation must be used once only. 5850 return I->getParent() == P; 5851 case RK_Min: 5852 case RK_UMin: 5853 case RK_Max: 5854 case RK_UMax: { 5855 // SelectInst must be used twice while the condition op must have single 5856 // use only. 5857 auto *Cmp = cast<Instruction>(cast<SelectInst>(I)->getCondition()); 5858 return I->getParent() == P && Cmp && Cmp->getParent() == P; 5859 } 5860 case RK_None: 5861 break; 5862 } 5863 llvm_unreachable("Reduction kind is not set"); 5864 } 5865 /// Expected number of uses for reduction operations/reduced values. 5866 bool hasRequiredNumberOfUses(Instruction *I, bool IsReductionOp) const { 5867 assert(Kind != RK_None && !!*this && LHS && RHS && 5868 "Expected reduction operation."); 5869 switch (Kind) { 5870 case RK_Arithmetic: 5871 return I->hasOneUse(); 5872 case RK_Min: 5873 case RK_UMin: 5874 case RK_Max: 5875 case RK_UMax: 5876 return I->hasNUses(2) && 5877 (!IsReductionOp || 5878 cast<SelectInst>(I)->getCondition()->hasOneUse()); 5879 case RK_None: 5880 break; 5881 } 5882 llvm_unreachable("Reduction kind is not set"); 5883 } 5884 5885 /// Initializes the list of reduction operations. 5886 void initReductionOps(ReductionOpsListType &ReductionOps) { 5887 assert(Kind != RK_None && !!*this && LHS && RHS && 5888 "Expected reduction operation."); 5889 switch (Kind) { 5890 case RK_Arithmetic: 5891 ReductionOps.assign(1, ReductionOpsType()); 5892 break; 5893 case RK_Min: 5894 case RK_UMin: 5895 case RK_Max: 5896 case RK_UMax: 5897 ReductionOps.assign(2, ReductionOpsType()); 5898 break; 5899 case RK_None: 5900 llvm_unreachable("Reduction kind is not set"); 5901 } 5902 } 5903 /// Add all reduction operations for the reduction instruction \p I. 5904 void addReductionOps(Instruction *I, ReductionOpsListType &ReductionOps) { 5905 assert(Kind != RK_None && !!*this && LHS && RHS && 5906 "Expected reduction operation."); 5907 switch (Kind) { 5908 case RK_Arithmetic: 5909 ReductionOps[0].emplace_back(I); 5910 break; 5911 case RK_Min: 5912 case RK_UMin: 5913 case RK_Max: 5914 case RK_UMax: 5915 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition()); 5916 ReductionOps[1].emplace_back(I); 5917 break; 5918 case RK_None: 5919 llvm_unreachable("Reduction kind is not set"); 5920 } 5921 } 5922 5923 /// Checks if instruction is associative and can be vectorized. 5924 bool isAssociative(Instruction *I) const { 5925 assert(Kind != RK_None && *this && LHS && RHS && 5926 "Expected reduction operation."); 5927 switch (Kind) { 5928 case RK_Arithmetic: 5929 return I->isAssociative(); 5930 case RK_Min: 5931 case RK_Max: 5932 return Opcode == Instruction::ICmp || 5933 cast<Instruction>(I->getOperand(0))->isFast(); 5934 case RK_UMin: 5935 case RK_UMax: 5936 assert(Opcode == Instruction::ICmp && 5937 "Only integer compare operation is expected."); 5938 return true; 5939 case RK_None: 5940 break; 5941 } 5942 llvm_unreachable("Reduction kind is not set"); 5943 } 5944 5945 /// Checks if the reduction operation can be vectorized. 5946 bool isVectorizable(Instruction *I) const { 5947 return isVectorizable() && isAssociative(I); 5948 } 5949 5950 /// Checks if two operation data are both a reduction op or both a reduced 5951 /// value. 5952 bool operator==(const OperationData &OD) { 5953 assert(((Kind != OD.Kind) || ((!LHS == !OD.LHS) && (!RHS == !OD.RHS))) && 5954 "One of the comparing operations is incorrect."); 5955 return this == &OD || (Kind == OD.Kind && Opcode == OD.Opcode); 5956 } 5957 bool operator!=(const OperationData &OD) { return !(*this == OD); } 5958 void clear() { 5959 Opcode = 0; 5960 LHS = nullptr; 5961 RHS = nullptr; 5962 Kind = RK_None; 5963 NoNaN = false; 5964 } 5965 5966 /// Get the opcode of the reduction operation. 5967 unsigned getOpcode() const { 5968 assert(isVectorizable() && "Expected vectorizable operation."); 5969 return Opcode; 5970 } 5971 5972 /// Get kind of reduction data. 5973 ReductionKind getKind() const { return Kind; } 5974 Value *getLHS() const { return LHS; } 5975 Value *getRHS() const { return RHS; } 5976 Type *getConditionType() const { 5977 switch (Kind) { 5978 case RK_Arithmetic: 5979 return nullptr; 5980 case RK_Min: 5981 case RK_Max: 5982 case RK_UMin: 5983 case RK_UMax: 5984 return CmpInst::makeCmpResultType(LHS->getType()); 5985 case RK_None: 5986 break; 5987 } 5988 llvm_unreachable("Reduction kind is not set"); 5989 } 5990 5991 /// Creates reduction operation with the current opcode with the IR flags 5992 /// from \p ReductionOps. 5993 Value *createOp(IRBuilder<> &Builder, const Twine &Name, 5994 const ReductionOpsListType &ReductionOps) const { 5995 assert(isVectorizable() && 5996 "Expected add|fadd or min/max reduction operation."); 5997 auto *Op = createOp(Builder, Name); 5998 switch (Kind) { 5999 case RK_Arithmetic: 6000 propagateIRFlags(Op, ReductionOps[0]); 6001 return Op; 6002 case RK_Min: 6003 case RK_Max: 6004 case RK_UMin: 6005 case RK_UMax: 6006 if (auto *SI = dyn_cast<SelectInst>(Op)) 6007 propagateIRFlags(SI->getCondition(), ReductionOps[0]); 6008 propagateIRFlags(Op, ReductionOps[1]); 6009 return Op; 6010 case RK_None: 6011 break; 6012 } 6013 llvm_unreachable("Unknown reduction operation."); 6014 } 6015 /// Creates reduction operation with the current opcode with the IR flags 6016 /// from \p I. 6017 Value *createOp(IRBuilder<> &Builder, const Twine &Name, 6018 Instruction *I) const { 6019 assert(isVectorizable() && 6020 "Expected add|fadd or min/max reduction operation."); 6021 auto *Op = createOp(Builder, Name); 6022 switch (Kind) { 6023 case RK_Arithmetic: 6024 propagateIRFlags(Op, I); 6025 return Op; 6026 case RK_Min: 6027 case RK_Max: 6028 case RK_UMin: 6029 case RK_UMax: 6030 if (auto *SI = dyn_cast<SelectInst>(Op)) { 6031 propagateIRFlags(SI->getCondition(), 6032 cast<SelectInst>(I)->getCondition()); 6033 } 6034 propagateIRFlags(Op, I); 6035 return Op; 6036 case RK_None: 6037 break; 6038 } 6039 llvm_unreachable("Unknown reduction operation."); 6040 } 6041 6042 TargetTransformInfo::ReductionFlags getFlags() const { 6043 TargetTransformInfo::ReductionFlags Flags; 6044 Flags.NoNaN = NoNaN; 6045 switch (Kind) { 6046 case RK_Arithmetic: 6047 break; 6048 case RK_Min: 6049 Flags.IsSigned = Opcode == Instruction::ICmp; 6050 Flags.IsMaxOp = false; 6051 break; 6052 case RK_Max: 6053 Flags.IsSigned = Opcode == Instruction::ICmp; 6054 Flags.IsMaxOp = true; 6055 break; 6056 case RK_UMin: 6057 Flags.IsSigned = false; 6058 Flags.IsMaxOp = false; 6059 break; 6060 case RK_UMax: 6061 Flags.IsSigned = false; 6062 Flags.IsMaxOp = true; 6063 break; 6064 case RK_None: 6065 llvm_unreachable("Reduction kind is not set"); 6066 } 6067 return Flags; 6068 } 6069 }; 6070 6071 WeakTrackingVH ReductionRoot; 6072 6073 /// The operation data of the reduction operation. 6074 OperationData ReductionData; 6075 6076 /// The operation data of the values we perform a reduction on. 6077 OperationData ReducedValueData; 6078 6079 /// Should we model this reduction as a pairwise reduction tree or a tree that 6080 /// splits the vector in halves and adds those halves. 6081 bool IsPairwiseReduction = false; 6082 6083 /// Checks if the ParentStackElem.first should be marked as a reduction 6084 /// operation with an extra argument or as extra argument itself. 6085 void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem, 6086 Value *ExtraArg) { 6087 if (ExtraArgs.count(ParentStackElem.first)) { 6088 ExtraArgs[ParentStackElem.first] = nullptr; 6089 // We ran into something like: 6090 // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg. 6091 // The whole ParentStackElem.first should be considered as an extra value 6092 // in this case. 6093 // Do not perform analysis of remaining operands of ParentStackElem.first 6094 // instruction, this whole instruction is an extra argument. 6095 ParentStackElem.second = ParentStackElem.first->getNumOperands(); 6096 } else { 6097 // We ran into something like: 6098 // ParentStackElem.first += ... + ExtraArg + ... 6099 ExtraArgs[ParentStackElem.first] = ExtraArg; 6100 } 6101 } 6102 6103 static OperationData getOperationData(Value *V) { 6104 if (!V) 6105 return OperationData(); 6106 6107 Value *LHS; 6108 Value *RHS; 6109 if (m_BinOp(m_Value(LHS), m_Value(RHS)).match(V)) { 6110 return OperationData(cast<BinaryOperator>(V)->getOpcode(), LHS, RHS, 6111 RK_Arithmetic); 6112 } 6113 if (auto *Select = dyn_cast<SelectInst>(V)) { 6114 // Look for a min/max pattern. 6115 if (m_UMin(m_Value(LHS), m_Value(RHS)).match(Select)) { 6116 return OperationData(Instruction::ICmp, LHS, RHS, RK_UMin); 6117 } else if (m_SMin(m_Value(LHS), m_Value(RHS)).match(Select)) { 6118 return OperationData(Instruction::ICmp, LHS, RHS, RK_Min); 6119 } else if (m_OrdFMin(m_Value(LHS), m_Value(RHS)).match(Select) || 6120 m_UnordFMin(m_Value(LHS), m_Value(RHS)).match(Select)) { 6121 return OperationData( 6122 Instruction::FCmp, LHS, RHS, RK_Min, 6123 cast<Instruction>(Select->getCondition())->hasNoNaNs()); 6124 } else if (m_UMax(m_Value(LHS), m_Value(RHS)).match(Select)) { 6125 return OperationData(Instruction::ICmp, LHS, RHS, RK_UMax); 6126 } else if (m_SMax(m_Value(LHS), m_Value(RHS)).match(Select)) { 6127 return OperationData(Instruction::ICmp, LHS, RHS, RK_Max); 6128 } else if (m_OrdFMax(m_Value(LHS), m_Value(RHS)).match(Select) || 6129 m_UnordFMax(m_Value(LHS), m_Value(RHS)).match(Select)) { 6130 return OperationData( 6131 Instruction::FCmp, LHS, RHS, RK_Max, 6132 cast<Instruction>(Select->getCondition())->hasNoNaNs()); 6133 } else { 6134 // Try harder: look for min/max pattern based on instructions producing 6135 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2). 6136 // During the intermediate stages of SLP, it's very common to have 6137 // pattern like this (since optimizeGatherSequence is run only once 6138 // at the end): 6139 // %1 = extractelement <2 x i32> %a, i32 0 6140 // %2 = extractelement <2 x i32> %a, i32 1 6141 // %cond = icmp sgt i32 %1, %2 6142 // %3 = extractelement <2 x i32> %a, i32 0 6143 // %4 = extractelement <2 x i32> %a, i32 1 6144 // %select = select i1 %cond, i32 %3, i32 %4 6145 CmpInst::Predicate Pred; 6146 Instruction *L1; 6147 Instruction *L2; 6148 6149 LHS = Select->getTrueValue(); 6150 RHS = Select->getFalseValue(); 6151 Value *Cond = Select->getCondition(); 6152 6153 // TODO: Support inverse predicates. 6154 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) { 6155 if (!isa<ExtractElementInst>(RHS) || 6156 !L2->isIdenticalTo(cast<Instruction>(RHS))) 6157 return OperationData(V); 6158 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) { 6159 if (!isa<ExtractElementInst>(LHS) || 6160 !L1->isIdenticalTo(cast<Instruction>(LHS))) 6161 return OperationData(V); 6162 } else { 6163 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS)) 6164 return OperationData(V); 6165 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) || 6166 !L1->isIdenticalTo(cast<Instruction>(LHS)) || 6167 !L2->isIdenticalTo(cast<Instruction>(RHS))) 6168 return OperationData(V); 6169 } 6170 switch (Pred) { 6171 default: 6172 return OperationData(V); 6173 6174 case CmpInst::ICMP_ULT: 6175 case CmpInst::ICMP_ULE: 6176 return OperationData(Instruction::ICmp, LHS, RHS, RK_UMin); 6177 6178 case CmpInst::ICMP_SLT: 6179 case CmpInst::ICMP_SLE: 6180 return OperationData(Instruction::ICmp, LHS, RHS, RK_Min); 6181 6182 case CmpInst::FCMP_OLT: 6183 case CmpInst::FCMP_OLE: 6184 case CmpInst::FCMP_ULT: 6185 case CmpInst::FCMP_ULE: 6186 return OperationData(Instruction::FCmp, LHS, RHS, RK_Min, 6187 cast<Instruction>(Cond)->hasNoNaNs()); 6188 6189 case CmpInst::ICMP_UGT: 6190 case CmpInst::ICMP_UGE: 6191 return OperationData(Instruction::ICmp, LHS, RHS, RK_UMax); 6192 6193 case CmpInst::ICMP_SGT: 6194 case CmpInst::ICMP_SGE: 6195 return OperationData(Instruction::ICmp, LHS, RHS, RK_Max); 6196 6197 case CmpInst::FCMP_OGT: 6198 case CmpInst::FCMP_OGE: 6199 case CmpInst::FCMP_UGT: 6200 case CmpInst::FCMP_UGE: 6201 return OperationData(Instruction::FCmp, LHS, RHS, RK_Max, 6202 cast<Instruction>(Cond)->hasNoNaNs()); 6203 } 6204 } 6205 } 6206 return OperationData(V); 6207 } 6208 6209 public: 6210 HorizontalReduction() = default; 6211 6212 /// Try to find a reduction tree. 6213 bool matchAssociativeReduction(PHINode *Phi, Instruction *B) { 6214 assert((!Phi || is_contained(Phi->operands(), B)) && 6215 "Thi phi needs to use the binary operator"); 6216 6217 ReductionData = getOperationData(B); 6218 6219 // We could have a initial reductions that is not an add. 6220 // r *= v1 + v2 + v3 + v4 6221 // In such a case start looking for a tree rooted in the first '+'. 6222 if (Phi) { 6223 if (ReductionData.getLHS() == Phi) { 6224 Phi = nullptr; 6225 B = dyn_cast<Instruction>(ReductionData.getRHS()); 6226 ReductionData = getOperationData(B); 6227 } else if (ReductionData.getRHS() == Phi) { 6228 Phi = nullptr; 6229 B = dyn_cast<Instruction>(ReductionData.getLHS()); 6230 ReductionData = getOperationData(B); 6231 } 6232 } 6233 6234 if (!ReductionData.isVectorizable(B)) 6235 return false; 6236 6237 Type *Ty = B->getType(); 6238 if (!isValidElementType(Ty)) 6239 return false; 6240 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy()) 6241 return false; 6242 6243 ReducedValueData.clear(); 6244 ReductionRoot = B; 6245 6246 // Post order traverse the reduction tree starting at B. We only handle true 6247 // trees containing only binary operators. 6248 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; 6249 Stack.push_back(std::make_pair(B, ReductionData.getFirstOperandIndex())); 6250 ReductionData.initReductionOps(ReductionOps); 6251 while (!Stack.empty()) { 6252 Instruction *TreeN = Stack.back().first; 6253 unsigned EdgeToVist = Stack.back().second++; 6254 OperationData OpData = getOperationData(TreeN); 6255 bool IsReducedValue = OpData != ReductionData; 6256 6257 // Postorder vist. 6258 if (IsReducedValue || EdgeToVist == OpData.getNumberOfOperands()) { 6259 if (IsReducedValue) 6260 ReducedVals.push_back(TreeN); 6261 else { 6262 auto I = ExtraArgs.find(TreeN); 6263 if (I != ExtraArgs.end() && !I->second) { 6264 // Check if TreeN is an extra argument of its parent operation. 6265 if (Stack.size() <= 1) { 6266 // TreeN can't be an extra argument as it is a root reduction 6267 // operation. 6268 return false; 6269 } 6270 // Yes, TreeN is an extra argument, do not add it to a list of 6271 // reduction operations. 6272 // Stack[Stack.size() - 2] always points to the parent operation. 6273 markExtraArg(Stack[Stack.size() - 2], TreeN); 6274 ExtraArgs.erase(TreeN); 6275 } else 6276 ReductionData.addReductionOps(TreeN, ReductionOps); 6277 } 6278 // Retract. 6279 Stack.pop_back(); 6280 continue; 6281 } 6282 6283 // Visit left or right. 6284 Value *NextV = TreeN->getOperand(EdgeToVist); 6285 if (NextV != Phi) { 6286 auto *I = dyn_cast<Instruction>(NextV); 6287 OpData = getOperationData(I); 6288 // Continue analysis if the next operand is a reduction operation or 6289 // (possibly) a reduced value. If the reduced value opcode is not set, 6290 // the first met operation != reduction operation is considered as the 6291 // reduced value class. 6292 if (I && (!ReducedValueData || OpData == ReducedValueData || 6293 OpData == ReductionData)) { 6294 const bool IsReductionOperation = OpData == ReductionData; 6295 // Only handle trees in the current basic block. 6296 if (!ReductionData.hasSameParent(I, B->getParent(), 6297 IsReductionOperation)) { 6298 // I is an extra argument for TreeN (its parent operation). 6299 markExtraArg(Stack.back(), I); 6300 continue; 6301 } 6302 6303 // Each tree node needs to have minimal number of users except for the 6304 // ultimate reduction. 6305 if (!ReductionData.hasRequiredNumberOfUses(I, 6306 OpData == ReductionData) && 6307 I != B) { 6308 // I is an extra argument for TreeN (its parent operation). 6309 markExtraArg(Stack.back(), I); 6310 continue; 6311 } 6312 6313 if (IsReductionOperation) { 6314 // We need to be able to reassociate the reduction operations. 6315 if (!OpData.isAssociative(I)) { 6316 // I is an extra argument for TreeN (its parent operation). 6317 markExtraArg(Stack.back(), I); 6318 continue; 6319 } 6320 } else if (ReducedValueData && 6321 ReducedValueData != OpData) { 6322 // Make sure that the opcodes of the operations that we are going to 6323 // reduce match. 6324 // I is an extra argument for TreeN (its parent operation). 6325 markExtraArg(Stack.back(), I); 6326 continue; 6327 } else if (!ReducedValueData) 6328 ReducedValueData = OpData; 6329 6330 Stack.push_back(std::make_pair(I, OpData.getFirstOperandIndex())); 6331 continue; 6332 } 6333 } 6334 // NextV is an extra argument for TreeN (its parent operation). 6335 markExtraArg(Stack.back(), NextV); 6336 } 6337 return true; 6338 } 6339 6340 /// Attempt to vectorize the tree found by 6341 /// matchAssociativeReduction. 6342 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 6343 if (ReducedVals.empty()) 6344 return false; 6345 6346 // If there is a sufficient number of reduction values, reduce 6347 // to a nearby power-of-2. Can safely generate oversized 6348 // vectors and rely on the backend to split them to legal sizes. 6349 unsigned NumReducedVals = ReducedVals.size(); 6350 if (NumReducedVals < 4) 6351 return false; 6352 6353 unsigned ReduxWidth = PowerOf2Floor(NumReducedVals); 6354 6355 Value *VectorizedTree = nullptr; 6356 6357 // FIXME: Fast-math-flags should be set based on the instructions in the 6358 // reduction (not all of 'fast' are required). 6359 IRBuilder<> Builder(cast<Instruction>(ReductionRoot)); 6360 FastMathFlags Unsafe; 6361 Unsafe.setFast(); 6362 Builder.setFastMathFlags(Unsafe); 6363 unsigned i = 0; 6364 6365 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 6366 // The same extra argument may be used several time, so log each attempt 6367 // to use it. 6368 for (auto &Pair : ExtraArgs) { 6369 assert(Pair.first && "DebugLoc must be set."); 6370 ExternallyUsedValues[Pair.second].push_back(Pair.first); 6371 } 6372 // The reduction root is used as the insertion point for new instructions, 6373 // so set it as externally used to prevent it from being deleted. 6374 ExternallyUsedValues[ReductionRoot]; 6375 SmallVector<Value *, 16> IgnoreList; 6376 for (auto &V : ReductionOps) 6377 IgnoreList.append(V.begin(), V.end()); 6378 while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) { 6379 auto VL = makeArrayRef(&ReducedVals[i], ReduxWidth); 6380 V.buildTree(VL, ExternallyUsedValues, IgnoreList); 6381 Optional<ArrayRef<unsigned>> Order = V.bestOrder(); 6382 // TODO: Handle orders of size less than number of elements in the vector. 6383 if (Order && Order->size() == VL.size()) { 6384 // TODO: reorder tree nodes without tree rebuilding. 6385 SmallVector<Value *, 4> ReorderedOps(VL.size()); 6386 llvm::transform(*Order, ReorderedOps.begin(), 6387 [VL](const unsigned Idx) { return VL[Idx]; }); 6388 V.buildTree(ReorderedOps, ExternallyUsedValues, IgnoreList); 6389 } 6390 if (V.isTreeTinyAndNotFullyVectorizable()) 6391 break; 6392 6393 V.computeMinimumValueSizes(); 6394 6395 // Estimate cost. 6396 int TreeCost = V.getTreeCost(); 6397 int ReductionCost = getReductionCost(TTI, ReducedVals[i], ReduxWidth); 6398 int Cost = TreeCost + ReductionCost; 6399 if (Cost >= -SLPCostThreshold) { 6400 V.getORE()->emit([&]() { 6401 return OptimizationRemarkMissed( 6402 SV_NAME, "HorSLPNotBeneficial", cast<Instruction>(VL[0])) 6403 << "Vectorizing horizontal reduction is possible" 6404 << "but not beneficial with cost " 6405 << ore::NV("Cost", Cost) << " and threshold " 6406 << ore::NV("Threshold", -SLPCostThreshold); 6407 }); 6408 break; 6409 } 6410 6411 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" 6412 << Cost << ". (HorRdx)\n"); 6413 V.getORE()->emit([&]() { 6414 return OptimizationRemark( 6415 SV_NAME, "VectorizedHorizontalReduction", cast<Instruction>(VL[0])) 6416 << "Vectorized horizontal reduction with cost " 6417 << ore::NV("Cost", Cost) << " and with tree size " 6418 << ore::NV("TreeSize", V.getTreeSize()); 6419 }); 6420 6421 // Vectorize a tree. 6422 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 6423 Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues); 6424 6425 // Emit a reduction. 6426 Builder.SetInsertPoint(cast<Instruction>(ReductionRoot)); 6427 Value *ReducedSubTree = 6428 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); 6429 if (VectorizedTree) { 6430 Builder.SetCurrentDebugLocation(Loc); 6431 OperationData VectReductionData(ReductionData.getOpcode(), 6432 VectorizedTree, ReducedSubTree, 6433 ReductionData.getKind()); 6434 VectorizedTree = 6435 VectReductionData.createOp(Builder, "op.rdx", ReductionOps); 6436 } else 6437 VectorizedTree = ReducedSubTree; 6438 i += ReduxWidth; 6439 ReduxWidth = PowerOf2Floor(NumReducedVals - i); 6440 } 6441 6442 if (VectorizedTree) { 6443 // Finish the reduction. 6444 for (; i < NumReducedVals; ++i) { 6445 auto *I = cast<Instruction>(ReducedVals[i]); 6446 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 6447 OperationData VectReductionData(ReductionData.getOpcode(), 6448 VectorizedTree, I, 6449 ReductionData.getKind()); 6450 VectorizedTree = VectReductionData.createOp(Builder, "", ReductionOps); 6451 } 6452 for (auto &Pair : ExternallyUsedValues) { 6453 // Add each externally used value to the final reduction. 6454 for (auto *I : Pair.second) { 6455 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 6456 OperationData VectReductionData(ReductionData.getOpcode(), 6457 VectorizedTree, Pair.first, 6458 ReductionData.getKind()); 6459 VectorizedTree = VectReductionData.createOp(Builder, "op.extra", I); 6460 } 6461 } 6462 // Update users. 6463 ReductionRoot->replaceAllUsesWith(VectorizedTree); 6464 } 6465 return VectorizedTree != nullptr; 6466 } 6467 6468 unsigned numReductionValues() const { 6469 return ReducedVals.size(); 6470 } 6471 6472 private: 6473 /// Calculate the cost of a reduction. 6474 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal, 6475 unsigned ReduxWidth) { 6476 Type *ScalarTy = FirstReducedVal->getType(); 6477 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 6478 6479 int PairwiseRdxCost; 6480 int SplittingRdxCost; 6481 switch (ReductionData.getKind()) { 6482 case RK_Arithmetic: 6483 PairwiseRdxCost = 6484 TTI->getArithmeticReductionCost(ReductionData.getOpcode(), VecTy, 6485 /*IsPairwiseForm=*/true); 6486 SplittingRdxCost = 6487 TTI->getArithmeticReductionCost(ReductionData.getOpcode(), VecTy, 6488 /*IsPairwiseForm=*/false); 6489 break; 6490 case RK_Min: 6491 case RK_Max: 6492 case RK_UMin: 6493 case RK_UMax: { 6494 Type *VecCondTy = CmpInst::makeCmpResultType(VecTy); 6495 bool IsUnsigned = ReductionData.getKind() == RK_UMin || 6496 ReductionData.getKind() == RK_UMax; 6497 PairwiseRdxCost = 6498 TTI->getMinMaxReductionCost(VecTy, VecCondTy, 6499 /*IsPairwiseForm=*/true, IsUnsigned); 6500 SplittingRdxCost = 6501 TTI->getMinMaxReductionCost(VecTy, VecCondTy, 6502 /*IsPairwiseForm=*/false, IsUnsigned); 6503 break; 6504 } 6505 case RK_None: 6506 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 6507 } 6508 6509 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 6510 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 6511 6512 int ScalarReduxCost; 6513 switch (ReductionData.getKind()) { 6514 case RK_Arithmetic: 6515 ScalarReduxCost = 6516 TTI->getArithmeticInstrCost(ReductionData.getOpcode(), ScalarTy); 6517 break; 6518 case RK_Min: 6519 case RK_Max: 6520 case RK_UMin: 6521 case RK_UMax: 6522 ScalarReduxCost = 6523 TTI->getCmpSelInstrCost(ReductionData.getOpcode(), ScalarTy) + 6524 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 6525 CmpInst::makeCmpResultType(ScalarTy)); 6526 break; 6527 case RK_None: 6528 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 6529 } 6530 ScalarReduxCost *= (ReduxWidth - 1); 6531 6532 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 6533 << " for reduction that starts with " << *FirstReducedVal 6534 << " (It is a " 6535 << (IsPairwiseReduction ? "pairwise" : "splitting") 6536 << " reduction)\n"); 6537 6538 return VecReduxCost - ScalarReduxCost; 6539 } 6540 6541 /// Emit a horizontal reduction of the vectorized value. 6542 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 6543 unsigned ReduxWidth, const TargetTransformInfo *TTI) { 6544 assert(VectorizedValue && "Need to have a vectorized tree node"); 6545 assert(isPowerOf2_32(ReduxWidth) && 6546 "We only handle power-of-two reductions for now"); 6547 6548 if (!IsPairwiseReduction) { 6549 // FIXME: The builder should use an FMF guard. It should not be hard-coded 6550 // to 'fast'. 6551 assert(Builder.getFastMathFlags().isFast() && "Expected 'fast' FMF"); 6552 return createSimpleTargetReduction( 6553 Builder, TTI, ReductionData.getOpcode(), VectorizedValue, 6554 ReductionData.getFlags(), ReductionOps.back()); 6555 } 6556 6557 Value *TmpVec = VectorizedValue; 6558 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 6559 Value *LeftMask = 6560 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 6561 Value *RightMask = 6562 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 6563 6564 Value *LeftShuf = Builder.CreateShuffleVector( 6565 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 6566 Value *RightShuf = Builder.CreateShuffleVector( 6567 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 6568 "rdx.shuf.r"); 6569 OperationData VectReductionData(ReductionData.getOpcode(), LeftShuf, 6570 RightShuf, ReductionData.getKind()); 6571 TmpVec = VectReductionData.createOp(Builder, "op.rdx", ReductionOps); 6572 } 6573 6574 // The result is in the first element of the vector. 6575 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 6576 } 6577 }; 6578 6579 } // end anonymous namespace 6580 6581 /// Recognize construction of vectors like 6582 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 6583 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 6584 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 6585 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 6586 /// starting from the last insertelement instruction. 6587 /// 6588 /// Returns true if it matches 6589 static bool findBuildVector(InsertElementInst *LastInsertElem, 6590 TargetTransformInfo *TTI, 6591 SmallVectorImpl<Value *> &BuildVectorOpds, 6592 int &UserCost) { 6593 UserCost = 0; 6594 Value *V = nullptr; 6595 do { 6596 if (auto *CI = dyn_cast<ConstantInt>(LastInsertElem->getOperand(2))) { 6597 UserCost += TTI->getVectorInstrCost(Instruction::InsertElement, 6598 LastInsertElem->getType(), 6599 CI->getZExtValue()); 6600 } 6601 BuildVectorOpds.push_back(LastInsertElem->getOperand(1)); 6602 V = LastInsertElem->getOperand(0); 6603 if (isa<UndefValue>(V)) 6604 break; 6605 LastInsertElem = dyn_cast<InsertElementInst>(V); 6606 if (!LastInsertElem || !LastInsertElem->hasOneUse()) 6607 return false; 6608 } while (true); 6609 std::reverse(BuildVectorOpds.begin(), BuildVectorOpds.end()); 6610 return true; 6611 } 6612 6613 /// Like findBuildVector, but looks for construction of aggregate. 6614 /// 6615 /// \return true if it matches. 6616 static bool findBuildAggregate(InsertValueInst *IV, 6617 SmallVectorImpl<Value *> &BuildVectorOpds) { 6618 Value *V; 6619 do { 6620 BuildVectorOpds.push_back(IV->getInsertedValueOperand()); 6621 V = IV->getAggregateOperand(); 6622 if (isa<UndefValue>(V)) 6623 break; 6624 IV = dyn_cast<InsertValueInst>(V); 6625 if (!IV || !IV->hasOneUse()) 6626 return false; 6627 } while (true); 6628 std::reverse(BuildVectorOpds.begin(), BuildVectorOpds.end()); 6629 return true; 6630 } 6631 6632 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 6633 return V->getType() < V2->getType(); 6634 } 6635 6636 /// Try and get a reduction value from a phi node. 6637 /// 6638 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 6639 /// if they come from either \p ParentBB or a containing loop latch. 6640 /// 6641 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 6642 /// if not possible. 6643 static Value *getReductionValue(const DominatorTree *DT, PHINode *P, 6644 BasicBlock *ParentBB, LoopInfo *LI) { 6645 // There are situations where the reduction value is not dominated by the 6646 // reduction phi. Vectorizing such cases has been reported to cause 6647 // miscompiles. See PR25787. 6648 auto DominatedReduxValue = [&](Value *R) { 6649 return isa<Instruction>(R) && 6650 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent()); 6651 }; 6652 6653 Value *Rdx = nullptr; 6654 6655 // Return the incoming value if it comes from the same BB as the phi node. 6656 if (P->getIncomingBlock(0) == ParentBB) { 6657 Rdx = P->getIncomingValue(0); 6658 } else if (P->getIncomingBlock(1) == ParentBB) { 6659 Rdx = P->getIncomingValue(1); 6660 } 6661 6662 if (Rdx && DominatedReduxValue(Rdx)) 6663 return Rdx; 6664 6665 // Otherwise, check whether we have a loop latch to look at. 6666 Loop *BBL = LI->getLoopFor(ParentBB); 6667 if (!BBL) 6668 return nullptr; 6669 BasicBlock *BBLatch = BBL->getLoopLatch(); 6670 if (!BBLatch) 6671 return nullptr; 6672 6673 // There is a loop latch, return the incoming value if it comes from 6674 // that. This reduction pattern occasionally turns up. 6675 if (P->getIncomingBlock(0) == BBLatch) { 6676 Rdx = P->getIncomingValue(0); 6677 } else if (P->getIncomingBlock(1) == BBLatch) { 6678 Rdx = P->getIncomingValue(1); 6679 } 6680 6681 if (Rdx && DominatedReduxValue(Rdx)) 6682 return Rdx; 6683 6684 return nullptr; 6685 } 6686 6687 /// Attempt to reduce a horizontal reduction. 6688 /// If it is legal to match a horizontal reduction feeding the phi node \a P 6689 /// with reduction operators \a Root (or one of its operands) in a basic block 6690 /// \a BB, then check if it can be done. If horizontal reduction is not found 6691 /// and root instruction is a binary operation, vectorization of the operands is 6692 /// attempted. 6693 /// \returns true if a horizontal reduction was matched and reduced or operands 6694 /// of one of the binary instruction were vectorized. 6695 /// \returns false if a horizontal reduction was not matched (or not possible) 6696 /// or no vectorization of any binary operation feeding \a Root instruction was 6697 /// performed. 6698 static bool tryToVectorizeHorReductionOrInstOperands( 6699 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, 6700 TargetTransformInfo *TTI, 6701 const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) { 6702 if (!ShouldVectorizeHor) 6703 return false; 6704 6705 if (!Root) 6706 return false; 6707 6708 if (Root->getParent() != BB || isa<PHINode>(Root)) 6709 return false; 6710 // Start analysis starting from Root instruction. If horizontal reduction is 6711 // found, try to vectorize it. If it is not a horizontal reduction or 6712 // vectorization is not possible or not effective, and currently analyzed 6713 // instruction is a binary operation, try to vectorize the operands, using 6714 // pre-order DFS traversal order. If the operands were not vectorized, repeat 6715 // the same procedure considering each operand as a possible root of the 6716 // horizontal reduction. 6717 // Interrupt the process if the Root instruction itself was vectorized or all 6718 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 6719 SmallVector<std::pair<WeakTrackingVH, unsigned>, 8> Stack(1, {Root, 0}); 6720 SmallPtrSet<Value *, 8> VisitedInstrs; 6721 bool Res = false; 6722 while (!Stack.empty()) { 6723 Value *V; 6724 unsigned Level; 6725 std::tie(V, Level) = Stack.pop_back_val(); 6726 if (!V) 6727 continue; 6728 auto *Inst = dyn_cast<Instruction>(V); 6729 if (!Inst) 6730 continue; 6731 auto *BI = dyn_cast<BinaryOperator>(Inst); 6732 auto *SI = dyn_cast<SelectInst>(Inst); 6733 if (BI || SI) { 6734 HorizontalReduction HorRdx; 6735 if (HorRdx.matchAssociativeReduction(P, Inst)) { 6736 if (HorRdx.tryToReduce(R, TTI)) { 6737 Res = true; 6738 // Set P to nullptr to avoid re-analysis of phi node in 6739 // matchAssociativeReduction function unless this is the root node. 6740 P = nullptr; 6741 continue; 6742 } 6743 } 6744 if (P && BI) { 6745 Inst = dyn_cast<Instruction>(BI->getOperand(0)); 6746 if (Inst == P) 6747 Inst = dyn_cast<Instruction>(BI->getOperand(1)); 6748 if (!Inst) { 6749 // Set P to nullptr to avoid re-analysis of phi node in 6750 // matchAssociativeReduction function unless this is the root node. 6751 P = nullptr; 6752 continue; 6753 } 6754 } 6755 } 6756 // Set P to nullptr to avoid re-analysis of phi node in 6757 // matchAssociativeReduction function unless this is the root node. 6758 P = nullptr; 6759 if (Vectorize(Inst, R)) { 6760 Res = true; 6761 continue; 6762 } 6763 6764 // Try to vectorize operands. 6765 // Continue analysis for the instruction from the same basic block only to 6766 // save compile time. 6767 if (++Level < RecursionMaxDepth) 6768 for (auto *Op : Inst->operand_values()) 6769 if (VisitedInstrs.insert(Op).second) 6770 if (auto *I = dyn_cast<Instruction>(Op)) 6771 if (!isa<PHINode>(I) && I->getParent() == BB) 6772 Stack.emplace_back(Op, Level); 6773 } 6774 return Res; 6775 } 6776 6777 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V, 6778 BasicBlock *BB, BoUpSLP &R, 6779 TargetTransformInfo *TTI) { 6780 if (!V) 6781 return false; 6782 auto *I = dyn_cast<Instruction>(V); 6783 if (!I) 6784 return false; 6785 6786 if (!isa<BinaryOperator>(I)) 6787 P = nullptr; 6788 // Try to match and vectorize a horizontal reduction. 6789 auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool { 6790 return tryToVectorize(I, R); 6791 }; 6792 return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI, 6793 ExtraVectorization); 6794 } 6795 6796 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 6797 BasicBlock *BB, BoUpSLP &R) { 6798 const DataLayout &DL = BB->getModule()->getDataLayout(); 6799 if (!R.canMapToVector(IVI->getType(), DL)) 6800 return false; 6801 6802 SmallVector<Value *, 16> BuildVectorOpds; 6803 if (!findBuildAggregate(IVI, BuildVectorOpds)) 6804 return false; 6805 6806 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 6807 // Aggregate value is unlikely to be processed in vector register, we need to 6808 // extract scalars into scalar registers, so NeedExtraction is set true. 6809 return tryToVectorizeList(BuildVectorOpds, R); 6810 } 6811 6812 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 6813 BasicBlock *BB, BoUpSLP &R) { 6814 int UserCost; 6815 SmallVector<Value *, 16> BuildVectorOpds; 6816 if (!findBuildVector(IEI, TTI, BuildVectorOpds, UserCost) || 6817 (llvm::all_of(BuildVectorOpds, 6818 [](Value *V) { return isa<ExtractElementInst>(V); }) && 6819 isShuffle(BuildVectorOpds))) 6820 return false; 6821 6822 // Vectorize starting with the build vector operands ignoring the BuildVector 6823 // instructions for the purpose of scheduling and user extraction. 6824 return tryToVectorizeList(BuildVectorOpds, R, UserCost); 6825 } 6826 6827 bool SLPVectorizerPass::vectorizeCmpInst(CmpInst *CI, BasicBlock *BB, 6828 BoUpSLP &R) { 6829 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) 6830 return true; 6831 6832 bool OpsChanged = false; 6833 for (int Idx = 0; Idx < 2; ++Idx) { 6834 OpsChanged |= 6835 vectorizeRootInstruction(nullptr, CI->getOperand(Idx), BB, R, TTI); 6836 } 6837 return OpsChanged; 6838 } 6839 6840 bool SLPVectorizerPass::vectorizeSimpleInstructions( 6841 SmallVectorImpl<WeakVH> &Instructions, BasicBlock *BB, BoUpSLP &R) { 6842 bool OpsChanged = false; 6843 for (auto &VH : reverse(Instructions)) { 6844 auto *I = dyn_cast_or_null<Instruction>(VH); 6845 if (!I) 6846 continue; 6847 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) 6848 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 6849 else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) 6850 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 6851 else if (auto *CI = dyn_cast<CmpInst>(I)) 6852 OpsChanged |= vectorizeCmpInst(CI, BB, R); 6853 } 6854 Instructions.clear(); 6855 return OpsChanged; 6856 } 6857 6858 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 6859 bool Changed = false; 6860 SmallVector<Value *, 4> Incoming; 6861 SmallPtrSet<Value *, 16> VisitedInstrs; 6862 6863 bool HaveVectorizedPhiNodes = true; 6864 while (HaveVectorizedPhiNodes) { 6865 HaveVectorizedPhiNodes = false; 6866 6867 // Collect the incoming values from the PHIs. 6868 Incoming.clear(); 6869 for (Instruction &I : *BB) { 6870 PHINode *P = dyn_cast<PHINode>(&I); 6871 if (!P) 6872 break; 6873 6874 if (!VisitedInstrs.count(P)) 6875 Incoming.push_back(P); 6876 } 6877 6878 // Sort by type. 6879 llvm::stable_sort(Incoming, PhiTypeSorterFunc); 6880 6881 // Try to vectorize elements base on their type. 6882 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 6883 E = Incoming.end(); 6884 IncIt != E;) { 6885 6886 // Look for the next elements with the same type. 6887 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 6888 while (SameTypeIt != E && 6889 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 6890 VisitedInstrs.insert(*SameTypeIt); 6891 ++SameTypeIt; 6892 } 6893 6894 // Try to vectorize them. 6895 unsigned NumElts = (SameTypeIt - IncIt); 6896 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at PHIs (" 6897 << NumElts << ")\n"); 6898 // The order in which the phi nodes appear in the program does not matter. 6899 // So allow tryToVectorizeList to reorder them if it is beneficial. This 6900 // is done when there are exactly two elements since tryToVectorizeList 6901 // asserts that there are only two values when AllowReorder is true. 6902 bool AllowReorder = NumElts == 2; 6903 if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R, 6904 /*UserCost=*/0, AllowReorder)) { 6905 // Success start over because instructions might have been changed. 6906 HaveVectorizedPhiNodes = true; 6907 Changed = true; 6908 break; 6909 } 6910 6911 // Start over at the next instruction of a different type (or the end). 6912 IncIt = SameTypeIt; 6913 } 6914 } 6915 6916 VisitedInstrs.clear(); 6917 6918 SmallVector<WeakVH, 8> PostProcessInstructions; 6919 SmallDenseSet<Instruction *, 4> KeyNodes; 6920 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 6921 // We may go through BB multiple times so skip the one we have checked. 6922 if (!VisitedInstrs.insert(&*it).second) { 6923 if (it->use_empty() && KeyNodes.count(&*it) > 0 && 6924 vectorizeSimpleInstructions(PostProcessInstructions, BB, R)) { 6925 // We would like to start over since some instructions are deleted 6926 // and the iterator may become invalid value. 6927 Changed = true; 6928 it = BB->begin(); 6929 e = BB->end(); 6930 } 6931 continue; 6932 } 6933 6934 if (isa<DbgInfoIntrinsic>(it)) 6935 continue; 6936 6937 // Try to vectorize reductions that use PHINodes. 6938 if (PHINode *P = dyn_cast<PHINode>(it)) { 6939 // Check that the PHI is a reduction PHI. 6940 if (P->getNumIncomingValues() != 2) 6941 return Changed; 6942 6943 // Try to match and vectorize a horizontal reduction. 6944 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R, 6945 TTI)) { 6946 Changed = true; 6947 it = BB->begin(); 6948 e = BB->end(); 6949 continue; 6950 } 6951 continue; 6952 } 6953 6954 // Ran into an instruction without users, like terminator, or function call 6955 // with ignored return value, store. Ignore unused instructions (basing on 6956 // instruction type, except for CallInst and InvokeInst). 6957 if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) || 6958 isa<InvokeInst>(it))) { 6959 KeyNodes.insert(&*it); 6960 bool OpsChanged = false; 6961 if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) { 6962 for (auto *V : it->operand_values()) { 6963 // Try to match and vectorize a horizontal reduction. 6964 OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI); 6965 } 6966 } 6967 // Start vectorization of post-process list of instructions from the 6968 // top-tree instructions to try to vectorize as many instructions as 6969 // possible. 6970 OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R); 6971 if (OpsChanged) { 6972 // We would like to start over since some instructions are deleted 6973 // and the iterator may become invalid value. 6974 Changed = true; 6975 it = BB->begin(); 6976 e = BB->end(); 6977 continue; 6978 } 6979 } 6980 6981 if (isa<InsertElementInst>(it) || isa<CmpInst>(it) || 6982 isa<InsertValueInst>(it)) 6983 PostProcessInstructions.push_back(&*it); 6984 } 6985 6986 return Changed; 6987 } 6988 6989 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 6990 auto Changed = false; 6991 for (auto &Entry : GEPs) { 6992 // If the getelementptr list has fewer than two elements, there's nothing 6993 // to do. 6994 if (Entry.second.size() < 2) 6995 continue; 6996 6997 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 6998 << Entry.second.size() << ".\n"); 6999 7000 // We process the getelementptr list in chunks of 16 (like we do for 7001 // stores) to minimize compile-time. 7002 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += 16) { 7003 auto Len = std::min<unsigned>(BE - BI, 16); 7004 auto GEPList = makeArrayRef(&Entry.second[BI], Len); 7005 7006 // Initialize a set a candidate getelementptrs. Note that we use a 7007 // SetVector here to preserve program order. If the index computations 7008 // are vectorizable and begin with loads, we want to minimize the chance 7009 // of having to reorder them later. 7010 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 7011 7012 // Some of the candidates may have already been vectorized after we 7013 // initially collected them. If so, the WeakTrackingVHs will have 7014 // nullified the 7015 // values, so remove them from the set of candidates. 7016 Candidates.remove(nullptr); 7017 7018 // Remove from the set of candidates all pairs of getelementptrs with 7019 // constant differences. Such getelementptrs are likely not good 7020 // candidates for vectorization in a bottom-up phase since one can be 7021 // computed from the other. We also ensure all candidate getelementptr 7022 // indices are unique. 7023 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 7024 auto *GEPI = cast<GetElementPtrInst>(GEPList[I]); 7025 if (!Candidates.count(GEPI)) 7026 continue; 7027 auto *SCEVI = SE->getSCEV(GEPList[I]); 7028 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 7029 auto *GEPJ = cast<GetElementPtrInst>(GEPList[J]); 7030 auto *SCEVJ = SE->getSCEV(GEPList[J]); 7031 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 7032 Candidates.remove(GEPList[I]); 7033 Candidates.remove(GEPList[J]); 7034 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 7035 Candidates.remove(GEPList[J]); 7036 } 7037 } 7038 } 7039 7040 // We break out of the above computation as soon as we know there are 7041 // fewer than two candidates remaining. 7042 if (Candidates.size() < 2) 7043 continue; 7044 7045 // Add the single, non-constant index of each candidate to the bundle. We 7046 // ensured the indices met these constraints when we originally collected 7047 // the getelementptrs. 7048 SmallVector<Value *, 16> Bundle(Candidates.size()); 7049 auto BundleIndex = 0u; 7050 for (auto *V : Candidates) { 7051 auto *GEP = cast<GetElementPtrInst>(V); 7052 auto *GEPIdx = GEP->idx_begin()->get(); 7053 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 7054 Bundle[BundleIndex++] = GEPIdx; 7055 } 7056 7057 // Try and vectorize the indices. We are currently only interested in 7058 // gather-like cases of the form: 7059 // 7060 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 7061 // 7062 // where the loads of "a", the loads of "b", and the subtractions can be 7063 // performed in parallel. It's likely that detecting this pattern in a 7064 // bottom-up phase will be simpler and less costly than building a 7065 // full-blown top-down phase beginning at the consecutive loads. 7066 Changed |= tryToVectorizeList(Bundle, R); 7067 } 7068 } 7069 return Changed; 7070 } 7071 7072 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 7073 bool Changed = false; 7074 // Attempt to sort and vectorize each of the store-groups. 7075 for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e; 7076 ++it) { 7077 if (it->second.size() < 2) 7078 continue; 7079 7080 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 7081 << it->second.size() << ".\n"); 7082 7083 // Process the stores in chunks of 16. 7084 // TODO: The limit of 16 inhibits greater vectorization factors. 7085 // For example, AVX2 supports v32i8. Increasing this limit, however, 7086 // may cause a significant compile-time increase. 7087 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI += 16) { 7088 unsigned Len = std::min<unsigned>(CE - CI, 16); 7089 Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len), R); 7090 } 7091 } 7092 return Changed; 7093 } 7094 7095 char SLPVectorizer::ID = 0; 7096 7097 static const char lv_name[] = "SLP Vectorizer"; 7098 7099 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 7100 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7101 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7102 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7103 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7104 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 7105 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7106 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7107 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 7108 7109 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); } 7110