1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 11 // stores that can be put together into vector-stores. Next, it attempts to 12 // construct vectorizable tree using the use-def chains. If a profitable tree 13 // was found, the SLP vectorizer performs vectorization on the tree. 14 // 15 // The pass is inspired by the work described in the paper: 16 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 21 #include "llvm/ADT/ArrayRef.h" 22 #include "llvm/ADT/DenseMap.h" 23 #include "llvm/ADT/DenseSet.h" 24 #include "llvm/ADT/MapVector.h" 25 #include "llvm/ADT/None.h" 26 #include "llvm/ADT/Optional.h" 27 #include "llvm/ADT/PostOrderIterator.h" 28 #include "llvm/ADT/STLExtras.h" 29 #include "llvm/ADT/SetVector.h" 30 #include "llvm/ADT/SmallPtrSet.h" 31 #include "llvm/ADT/SmallSet.h" 32 #include "llvm/ADT/SmallVector.h" 33 #include "llvm/ADT/Statistic.h" 34 #include "llvm/ADT/iterator.h" 35 #include "llvm/ADT/iterator_range.h" 36 #include "llvm/Analysis/AliasAnalysis.h" 37 #include "llvm/Analysis/CodeMetrics.h" 38 #include "llvm/Analysis/DemandedBits.h" 39 #include "llvm/Analysis/GlobalsModRef.h" 40 #include "llvm/Analysis/LoopAccessAnalysis.h" 41 #include "llvm/Analysis/LoopInfo.h" 42 #include "llvm/Analysis/MemoryLocation.h" 43 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 44 #include "llvm/Analysis/ScalarEvolution.h" 45 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 46 #include "llvm/Analysis/TargetLibraryInfo.h" 47 #include "llvm/Analysis/TargetTransformInfo.h" 48 #include "llvm/Analysis/ValueTracking.h" 49 #include "llvm/Analysis/VectorUtils.h" 50 #include "llvm/IR/Attributes.h" 51 #include "llvm/IR/BasicBlock.h" 52 #include "llvm/IR/Constant.h" 53 #include "llvm/IR/Constants.h" 54 #include "llvm/IR/DataLayout.h" 55 #include "llvm/IR/DebugLoc.h" 56 #include "llvm/IR/DerivedTypes.h" 57 #include "llvm/IR/Dominators.h" 58 #include "llvm/IR/Function.h" 59 #include "llvm/IR/IRBuilder.h" 60 #include "llvm/IR/InstrTypes.h" 61 #include "llvm/IR/Instruction.h" 62 #include "llvm/IR/Instructions.h" 63 #include "llvm/IR/IntrinsicInst.h" 64 #include "llvm/IR/Intrinsics.h" 65 #include "llvm/IR/Module.h" 66 #include "llvm/IR/NoFolder.h" 67 #include "llvm/IR/Operator.h" 68 #include "llvm/IR/PassManager.h" 69 #include "llvm/IR/PatternMatch.h" 70 #include "llvm/IR/Type.h" 71 #include "llvm/IR/Use.h" 72 #include "llvm/IR/User.h" 73 #include "llvm/IR/Value.h" 74 #include "llvm/IR/ValueHandle.h" 75 #include "llvm/IR/Verifier.h" 76 #include "llvm/Pass.h" 77 #include "llvm/Support/Casting.h" 78 #include "llvm/Support/CommandLine.h" 79 #include "llvm/Support/Compiler.h" 80 #include "llvm/Support/DOTGraphTraits.h" 81 #include "llvm/Support/Debug.h" 82 #include "llvm/Support/ErrorHandling.h" 83 #include "llvm/Support/GraphWriter.h" 84 #include "llvm/Support/KnownBits.h" 85 #include "llvm/Support/MathExtras.h" 86 #include "llvm/Support/raw_ostream.h" 87 #include "llvm/Transforms/Utils/LoopUtils.h" 88 #include "llvm/Transforms/Vectorize.h" 89 #include <algorithm> 90 #include <cassert> 91 #include <cstdint> 92 #include <iterator> 93 #include <memory> 94 #include <set> 95 #include <string> 96 #include <tuple> 97 #include <utility> 98 #include <vector> 99 100 using namespace llvm; 101 using namespace llvm::PatternMatch; 102 using namespace slpvectorizer; 103 104 #define SV_NAME "slp-vectorizer" 105 #define DEBUG_TYPE "SLP" 106 107 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 108 109 static cl::opt<int> 110 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 111 cl::desc("Only vectorize if you gain more than this " 112 "number ")); 113 114 static cl::opt<bool> 115 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 116 cl::desc("Attempt to vectorize horizontal reductions")); 117 118 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 119 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 120 cl::desc( 121 "Attempt to vectorize horizontal reductions feeding into a store")); 122 123 static cl::opt<int> 124 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 125 cl::desc("Attempt to vectorize for this register size in bits")); 126 127 /// Limits the size of scheduling regions in a block. 128 /// It avoid long compile times for _very_ large blocks where vector 129 /// instructions are spread over a wide range. 130 /// This limit is way higher than needed by real-world functions. 131 static cl::opt<int> 132 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 133 cl::desc("Limit the size of the SLP scheduling region per block")); 134 135 static cl::opt<int> MinVectorRegSizeOption( 136 "slp-min-reg-size", cl::init(128), cl::Hidden, 137 cl::desc("Attempt to vectorize for this register size in bits")); 138 139 static cl::opt<unsigned> RecursionMaxDepth( 140 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 141 cl::desc("Limit the recursion depth when building a vectorizable tree")); 142 143 static cl::opt<unsigned> MinTreeSize( 144 "slp-min-tree-size", cl::init(3), cl::Hidden, 145 cl::desc("Only vectorize small trees if they are fully vectorizable")); 146 147 static cl::opt<bool> 148 ViewSLPTree("view-slp-tree", cl::Hidden, 149 cl::desc("Display the SLP trees with Graphviz")); 150 151 // Limit the number of alias checks. The limit is chosen so that 152 // it has no negative effect on the llvm benchmarks. 153 static const unsigned AliasedCheckLimit = 10; 154 155 // Another limit for the alias checks: The maximum distance between load/store 156 // instructions where alias checks are done. 157 // This limit is useful for very large basic blocks. 158 static const unsigned MaxMemDepDistance = 160; 159 160 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 161 /// regions to be handled. 162 static const int MinScheduleRegionSize = 16; 163 164 /// \brief Predicate for the element types that the SLP vectorizer supports. 165 /// 166 /// The most important thing to filter here are types which are invalid in LLVM 167 /// vectors. We also filter target specific types which have absolutely no 168 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 169 /// avoids spending time checking the cost model and realizing that they will 170 /// be inevitably scalarized. 171 static bool isValidElementType(Type *Ty) { 172 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 173 !Ty->isPPC_FP128Ty(); 174 } 175 176 /// \returns true if all of the instructions in \p VL are in the same block or 177 /// false otherwise. 178 static bool allSameBlock(ArrayRef<Value *> VL) { 179 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 180 if (!I0) 181 return false; 182 BasicBlock *BB = I0->getParent(); 183 for (int i = 1, e = VL.size(); i < e; i++) { 184 Instruction *I = dyn_cast<Instruction>(VL[i]); 185 if (!I) 186 return false; 187 188 if (BB != I->getParent()) 189 return false; 190 } 191 return true; 192 } 193 194 /// \returns True if all of the values in \p VL are constants. 195 static bool allConstant(ArrayRef<Value *> VL) { 196 for (Value *i : VL) 197 if (!isa<Constant>(i)) 198 return false; 199 return true; 200 } 201 202 /// \returns True if all of the values in \p VL are identical. 203 static bool isSplat(ArrayRef<Value *> VL) { 204 for (unsigned i = 1, e = VL.size(); i < e; ++i) 205 if (VL[i] != VL[0]) 206 return false; 207 return true; 208 } 209 210 /// Checks if the vector of instructions can be represented as a shuffle, like: 211 /// %x0 = extractelement <4 x i8> %x, i32 0 212 /// %x3 = extractelement <4 x i8> %x, i32 3 213 /// %y1 = extractelement <4 x i8> %y, i32 1 214 /// %y2 = extractelement <4 x i8> %y, i32 2 215 /// %x0x0 = mul i8 %x0, %x0 216 /// %x3x3 = mul i8 %x3, %x3 217 /// %y1y1 = mul i8 %y1, %y1 218 /// %y2y2 = mul i8 %y2, %y2 219 /// %ins1 = insertelement <4 x i8> undef, i8 %x0x0, i32 0 220 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 221 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 222 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 223 /// ret <4 x i8> %ins4 224 /// can be transformed into: 225 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 226 /// i32 6> 227 /// %2 = mul <4 x i8> %1, %1 228 /// ret <4 x i8> %2 229 /// We convert this initially to something like: 230 /// %x0 = extractelement <4 x i8> %x, i32 0 231 /// %x3 = extractelement <4 x i8> %x, i32 3 232 /// %y1 = extractelement <4 x i8> %y, i32 1 233 /// %y2 = extractelement <4 x i8> %y, i32 2 234 /// %1 = insertelement <4 x i8> undef, i8 %x0, i32 0 235 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1 236 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2 237 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3 238 /// %5 = mul <4 x i8> %4, %4 239 /// %6 = extractelement <4 x i8> %5, i32 0 240 /// %ins1 = insertelement <4 x i8> undef, i8 %6, i32 0 241 /// %7 = extractelement <4 x i8> %5, i32 1 242 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1 243 /// %8 = extractelement <4 x i8> %5, i32 2 244 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2 245 /// %9 = extractelement <4 x i8> %5, i32 3 246 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3 247 /// ret <4 x i8> %ins4 248 /// InstCombiner transforms this into a shuffle and vector mul 249 static Optional<TargetTransformInfo::ShuffleKind> 250 isShuffle(ArrayRef<Value *> VL) { 251 auto *EI0 = cast<ExtractElementInst>(VL[0]); 252 unsigned Size = EI0->getVectorOperandType()->getVectorNumElements(); 253 Value *Vec1 = nullptr; 254 Value *Vec2 = nullptr; 255 enum ShuffleMode {Unknown, FirstAlternate, SecondAlternate, Permute}; 256 ShuffleMode CommonShuffleMode = Unknown; 257 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 258 auto *EI = cast<ExtractElementInst>(VL[I]); 259 auto *Vec = EI->getVectorOperand(); 260 // All vector operands must have the same number of vector elements. 261 if (Vec->getType()->getVectorNumElements() != Size) 262 return None; 263 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 264 if (!Idx) 265 return None; 266 // Undefined behavior if Idx is negative or >= Size. 267 if (Idx->getValue().uge(Size)) 268 continue; 269 unsigned IntIdx = Idx->getValue().getZExtValue(); 270 // We can extractelement from undef vector. 271 if (isa<UndefValue>(Vec)) 272 continue; 273 // For correct shuffling we have to have at most 2 different vector operands 274 // in all extractelement instructions. 275 if (Vec1 && Vec2 && Vec != Vec1 && Vec != Vec2) 276 return None; 277 if (CommonShuffleMode == Permute) 278 continue; 279 // If the extract index is not the same as the operation number, it is a 280 // permutation. 281 if (IntIdx != I) { 282 CommonShuffleMode = Permute; 283 continue; 284 } 285 // Check the shuffle mode for the current operation. 286 if (!Vec1) 287 Vec1 = Vec; 288 else if (Vec != Vec1) 289 Vec2 = Vec; 290 // Example: shufflevector A, B, <0,5,2,7> 291 // I is odd and IntIdx for A == I - FirstAlternate shuffle. 292 // I is even and IntIdx for B == I - FirstAlternate shuffle. 293 // Example: shufflevector A, B, <4,1,6,3> 294 // I is even and IntIdx for A == I - SecondAlternate shuffle. 295 // I is odd and IntIdx for B == I - SecondAlternate shuffle. 296 const bool IIsEven = I & 1; 297 const bool CurrVecIsA = Vec == Vec1; 298 const bool IIsOdd = !IIsEven; 299 const bool CurrVecIsB = !CurrVecIsA; 300 ShuffleMode CurrentShuffleMode = 301 ((IIsOdd && CurrVecIsA) || (IIsEven && CurrVecIsB)) ? FirstAlternate 302 : SecondAlternate; 303 // Common mode is not set or the same as the shuffle mode of the current 304 // operation - alternate. 305 if (CommonShuffleMode == Unknown) 306 CommonShuffleMode = CurrentShuffleMode; 307 // Common shuffle mode is not the same as the shuffle mode of the current 308 // operation - permutation. 309 if (CommonShuffleMode != CurrentShuffleMode) 310 CommonShuffleMode = Permute; 311 } 312 // If we're not crossing lanes in different vectors, consider it as blending. 313 if ((CommonShuffleMode == FirstAlternate || 314 CommonShuffleMode == SecondAlternate) && 315 Vec2) 316 return TargetTransformInfo::SK_Alternate; 317 // If Vec2 was never used, we have a permutation of a single vector, otherwise 318 // we have permutation of 2 vectors. 319 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 320 : TargetTransformInfo::SK_PermuteSingleSrc; 321 } 322 323 ///\returns Opcode that can be clubbed with \p Op to create an alternate 324 /// sequence which can later be merged as a ShuffleVector instruction. 325 static unsigned getAltOpcode(unsigned Op) { 326 switch (Op) { 327 case Instruction::FAdd: 328 return Instruction::FSub; 329 case Instruction::FSub: 330 return Instruction::FAdd; 331 case Instruction::Add: 332 return Instruction::Sub; 333 case Instruction::Sub: 334 return Instruction::Add; 335 default: 336 return 0; 337 } 338 } 339 340 static bool isOdd(unsigned Value) { 341 return Value & 1; 342 } 343 344 static bool sameOpcodeOrAlt(unsigned Opcode, unsigned AltOpcode, 345 unsigned CheckedOpcode) { 346 return Opcode == CheckedOpcode || AltOpcode == CheckedOpcode; 347 } 348 349 /// Chooses the correct key for scheduling data. If \p Op has the same (or 350 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 351 /// OpValue. 352 static Value *isOneOf(Value *OpValue, Value *Op) { 353 auto *I = dyn_cast<Instruction>(Op); 354 if (!I) 355 return OpValue; 356 auto *OpInst = cast<Instruction>(OpValue); 357 unsigned OpInstOpcode = OpInst->getOpcode(); 358 unsigned IOpcode = I->getOpcode(); 359 if (sameOpcodeOrAlt(OpInstOpcode, getAltOpcode(OpInstOpcode), IOpcode)) 360 return Op; 361 return OpValue; 362 } 363 364 namespace { 365 366 /// Contains data for the instructions going to be vectorized. 367 struct RawInstructionsData { 368 /// Main Opcode of the instructions going to be vectorized. 369 unsigned Opcode = 0; 370 371 /// The list of instructions have some instructions with alternate opcodes. 372 bool HasAltOpcodes = false; 373 }; 374 375 } // end anonymous namespace 376 377 /// Checks the list of the vectorized instructions \p VL and returns info about 378 /// this list. 379 static RawInstructionsData getMainOpcode(ArrayRef<Value *> VL) { 380 auto *I0 = dyn_cast<Instruction>(VL[0]); 381 if (!I0) 382 return {}; 383 RawInstructionsData Res; 384 unsigned Opcode = I0->getOpcode(); 385 // Walk through the list of the vectorized instructions 386 // in order to check its structure described by RawInstructionsData. 387 for (unsigned Cnt = 0, E = VL.size(); Cnt != E; ++Cnt) { 388 auto *I = dyn_cast<Instruction>(VL[Cnt]); 389 if (!I) 390 return {}; 391 if (Opcode != I->getOpcode()) 392 Res.HasAltOpcodes = true; 393 } 394 Res.Opcode = Opcode; 395 return Res; 396 } 397 398 namespace { 399 400 /// Main data required for vectorization of instructions. 401 struct InstructionsState { 402 /// The very first instruction in the list with the main opcode. 403 Value *OpValue = nullptr; 404 405 /// The main opcode for the list of instructions. 406 unsigned Opcode = 0; 407 408 /// Some of the instructions in the list have alternate opcodes. 409 bool IsAltShuffle = false; 410 411 InstructionsState() = default; 412 InstructionsState(Value *OpValue, unsigned Opcode, bool IsAltShuffle) 413 : OpValue(OpValue), Opcode(Opcode), IsAltShuffle(IsAltShuffle) {} 414 }; 415 416 } // end anonymous namespace 417 418 /// \returns analysis of the Instructions in \p VL described in 419 /// InstructionsState, the Opcode that we suppose the whole list 420 /// could be vectorized even if its structure is diverse. 421 static InstructionsState getSameOpcode(ArrayRef<Value *> VL) { 422 auto Res = getMainOpcode(VL); 423 unsigned Opcode = Res.Opcode; 424 if (!Res.HasAltOpcodes) 425 return InstructionsState(VL[0], Opcode, false); 426 auto *OpInst = cast<Instruction>(VL[0]); 427 unsigned AltOpcode = getAltOpcode(Opcode); 428 // Examine each element in the list instructions VL to determine 429 // if some operations there could be considered as an alternative 430 // (for example as subtraction relates to addition operation). 431 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { 432 auto *I = cast<Instruction>(VL[Cnt]); 433 unsigned InstOpcode = I->getOpcode(); 434 if ((Res.HasAltOpcodes && 435 InstOpcode != (isOdd(Cnt) ? AltOpcode : Opcode)) || 436 (!Res.HasAltOpcodes && InstOpcode != Opcode)) { 437 return InstructionsState(OpInst, 0, false); 438 } 439 } 440 return InstructionsState(OpInst, Opcode, Res.HasAltOpcodes); 441 } 442 443 /// \returns true if all of the values in \p VL have the same type or false 444 /// otherwise. 445 static bool allSameType(ArrayRef<Value *> VL) { 446 Type *Ty = VL[0]->getType(); 447 for (int i = 1, e = VL.size(); i < e; i++) 448 if (VL[i]->getType() != Ty) 449 return false; 450 451 return true; 452 } 453 454 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 455 static bool matchExtractIndex(Instruction *E, unsigned Idx, unsigned Opcode) { 456 assert(Opcode == Instruction::ExtractElement || 457 Opcode == Instruction::ExtractValue); 458 if (Opcode == Instruction::ExtractElement) { 459 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 460 return CI && CI->getZExtValue() == Idx; 461 } else { 462 ExtractValueInst *EI = cast<ExtractValueInst>(E); 463 return EI->getNumIndices() == 1 && *EI->idx_begin() == Idx; 464 } 465 } 466 467 /// \returns True if in-tree use also needs extract. This refers to 468 /// possible scalar operand in vectorized instruction. 469 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 470 TargetLibraryInfo *TLI) { 471 unsigned Opcode = UserInst->getOpcode(); 472 switch (Opcode) { 473 case Instruction::Load: { 474 LoadInst *LI = cast<LoadInst>(UserInst); 475 return (LI->getPointerOperand() == Scalar); 476 } 477 case Instruction::Store: { 478 StoreInst *SI = cast<StoreInst>(UserInst); 479 return (SI->getPointerOperand() == Scalar); 480 } 481 case Instruction::Call: { 482 CallInst *CI = cast<CallInst>(UserInst); 483 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 484 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 485 return (CI->getArgOperand(1) == Scalar); 486 } 487 LLVM_FALLTHROUGH; 488 } 489 default: 490 return false; 491 } 492 } 493 494 /// \returns the AA location that is being access by the instruction. 495 static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) { 496 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 497 return MemoryLocation::get(SI); 498 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 499 return MemoryLocation::get(LI); 500 return MemoryLocation(); 501 } 502 503 /// \returns True if the instruction is not a volatile or atomic load/store. 504 static bool isSimple(Instruction *I) { 505 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 506 return LI->isSimple(); 507 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 508 return SI->isSimple(); 509 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 510 return !MI->isVolatile(); 511 return true; 512 } 513 514 namespace llvm { 515 516 namespace slpvectorizer { 517 518 /// Bottom Up SLP Vectorizer. 519 class BoUpSLP { 520 public: 521 using ValueList = SmallVector<Value *, 8>; 522 using InstrList = SmallVector<Instruction *, 16>; 523 using ValueSet = SmallPtrSet<Value *, 16>; 524 using StoreList = SmallVector<StoreInst *, 8>; 525 using ExtraValueToDebugLocsMap = 526 MapVector<Value *, SmallVector<Instruction *, 2>>; 527 528 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 529 TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li, 530 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 531 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 532 : F(Func), SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), 533 DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { 534 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 535 // Use the vector register size specified by the target unless overridden 536 // by a command-line option. 537 // TODO: It would be better to limit the vectorization factor based on 538 // data type rather than just register size. For example, x86 AVX has 539 // 256-bit registers, but it does not support integer operations 540 // at that width (that requires AVX2). 541 if (MaxVectorRegSizeOption.getNumOccurrences()) 542 MaxVecRegSize = MaxVectorRegSizeOption; 543 else 544 MaxVecRegSize = TTI->getRegisterBitWidth(true); 545 546 if (MinVectorRegSizeOption.getNumOccurrences()) 547 MinVecRegSize = MinVectorRegSizeOption; 548 else 549 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 550 } 551 552 /// \brief Vectorize the tree that starts with the elements in \p VL. 553 /// Returns the vectorized root. 554 Value *vectorizeTree(); 555 556 /// Vectorize the tree but with the list of externally used values \p 557 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 558 /// generated extractvalue instructions. 559 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues); 560 561 /// \returns the cost incurred by unwanted spills and fills, caused by 562 /// holding live values over call sites. 563 int getSpillCost(); 564 565 /// \returns the vectorization cost of the subtree that starts at \p VL. 566 /// A negative number means that this is profitable. 567 int getTreeCost(); 568 569 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 570 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 571 void buildTree(ArrayRef<Value *> Roots, 572 ArrayRef<Value *> UserIgnoreLst = None); 573 574 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 575 /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking 576 /// into account (anf updating it, if required) list of externally used 577 /// values stored in \p ExternallyUsedValues. 578 void buildTree(ArrayRef<Value *> Roots, 579 ExtraValueToDebugLocsMap &ExternallyUsedValues, 580 ArrayRef<Value *> UserIgnoreLst = None); 581 582 /// Clear the internal data structures that are created by 'buildTree'. 583 void deleteTree() { 584 VectorizableTree.clear(); 585 ScalarToTreeEntry.clear(); 586 MustGather.clear(); 587 ExternalUses.clear(); 588 NumLoadsWantToKeepOrder = 0; 589 NumLoadsWantToChangeOrder = 0; 590 for (auto &Iter : BlocksSchedules) { 591 BlockScheduling *BS = Iter.second.get(); 592 BS->clear(); 593 } 594 MinBWs.clear(); 595 } 596 597 unsigned getTreeSize() const { return VectorizableTree.size(); } 598 599 /// \brief Perform LICM and CSE on the newly generated gather sequences. 600 void optimizeGatherSequence(); 601 602 /// \returns true if it is beneficial to reverse the vector order. 603 bool shouldReorder() const { 604 return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder; 605 } 606 607 /// \return The vector element size in bits to use when vectorizing the 608 /// expression tree ending at \p V. If V is a store, the size is the width of 609 /// the stored value. Otherwise, the size is the width of the largest loaded 610 /// value reaching V. This method is used by the vectorizer to calculate 611 /// vectorization factors. 612 unsigned getVectorElementSize(Value *V); 613 614 /// Compute the minimum type sizes required to represent the entries in a 615 /// vectorizable tree. 616 void computeMinimumValueSizes(); 617 618 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 619 unsigned getMaxVecRegSize() const { 620 return MaxVecRegSize; 621 } 622 623 // \returns minimum vector register size as set by cl::opt. 624 unsigned getMinVecRegSize() const { 625 return MinVecRegSize; 626 } 627 628 /// \brief Check if ArrayType or StructType is isomorphic to some VectorType. 629 /// 630 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 631 unsigned canMapToVector(Type *T, const DataLayout &DL) const; 632 633 /// \returns True if the VectorizableTree is both tiny and not fully 634 /// vectorizable. We do not vectorize such trees. 635 bool isTreeTinyAndNotFullyVectorizable(); 636 637 OptimizationRemarkEmitter *getORE() { return ORE; } 638 639 private: 640 struct TreeEntry; 641 642 /// Checks if all users of \p I are the part of the vectorization tree. 643 bool areAllUsersVectorized(Instruction *I) const; 644 645 /// \returns the cost of the vectorizable entry. 646 int getEntryCost(TreeEntry *E); 647 648 /// This is the recursive part of buildTree. 649 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, int); 650 651 /// \returns True if the ExtractElement/ExtractValue instructions in VL can 652 /// be vectorized to use the original vector (or aggregate "bitcast" to a vector). 653 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue) const; 654 655 /// Vectorize a single entry in the tree. 656 Value *vectorizeTree(TreeEntry *E); 657 658 /// Vectorize a single entry in the tree, starting in \p VL. 659 Value *vectorizeTree(ArrayRef<Value *> VL); 660 661 /// \returns the pointer to the vectorized value if \p VL is already 662 /// vectorized, or NULL. They may happen in cycles. 663 Value *alreadyVectorized(ArrayRef<Value *> VL, Value *OpValue) const; 664 665 /// \returns the scalarization cost for this type. Scalarization in this 666 /// context means the creation of vectors from a group of scalars. 667 int getGatherCost(Type *Ty); 668 669 /// \returns the scalarization cost for this list of values. Assuming that 670 /// this subtree gets vectorized, we may need to extract the values from the 671 /// roots. This method calculates the cost of extracting the values. 672 int getGatherCost(ArrayRef<Value *> VL); 673 674 /// \brief Set the Builder insert point to one after the last instruction in 675 /// the bundle 676 void setInsertPointAfterBundle(ArrayRef<Value *> VL, Value *OpValue); 677 678 /// \returns a vector from a collection of scalars in \p VL. 679 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 680 681 /// \returns whether the VectorizableTree is fully vectorizable and will 682 /// be beneficial even the tree height is tiny. 683 bool isFullyVectorizableTinyTree(); 684 685 /// \reorder commutative operands in alt shuffle if they result in 686 /// vectorized code. 687 void reorderAltShuffleOperands(unsigned Opcode, ArrayRef<Value *> VL, 688 SmallVectorImpl<Value *> &Left, 689 SmallVectorImpl<Value *> &Right); 690 691 /// \reorder commutative operands to get better probability of 692 /// generating vectorized code. 693 void reorderInputsAccordingToOpcode(unsigned Opcode, ArrayRef<Value *> VL, 694 SmallVectorImpl<Value *> &Left, 695 SmallVectorImpl<Value *> &Right); 696 struct TreeEntry { 697 TreeEntry(std::vector<TreeEntry> &Container) : Container(Container) {} 698 699 /// \returns true if the scalars in VL are equal to this entry. 700 bool isSame(ArrayRef<Value *> VL) const { 701 assert(VL.size() == Scalars.size() && "Invalid size"); 702 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 703 } 704 705 /// A vector of scalars. 706 ValueList Scalars; 707 708 /// The Scalars are vectorized into this value. It is initialized to Null. 709 Value *VectorizedValue = nullptr; 710 711 /// Do we need to gather this sequence ? 712 bool NeedToGather = false; 713 714 /// Points back to the VectorizableTree. 715 /// 716 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 717 /// to be a pointer and needs to be able to initialize the child iterator. 718 /// Thus we need a reference back to the container to translate the indices 719 /// to entries. 720 std::vector<TreeEntry> &Container; 721 722 /// The TreeEntry index containing the user of this entry. We can actually 723 /// have multiple users so the data structure is not truly a tree. 724 SmallVector<int, 1> UserTreeIndices; 725 }; 726 727 /// Create a new VectorizableTree entry. 728 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized, 729 int &UserTreeIdx) { 730 VectorizableTree.emplace_back(VectorizableTree); 731 int idx = VectorizableTree.size() - 1; 732 TreeEntry *Last = &VectorizableTree[idx]; 733 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 734 Last->NeedToGather = !Vectorized; 735 if (Vectorized) { 736 for (int i = 0, e = VL.size(); i != e; ++i) { 737 assert(!getTreeEntry(VL[i]) && "Scalar already in tree!"); 738 ScalarToTreeEntry[VL[i]] = idx; 739 } 740 } else { 741 MustGather.insert(VL.begin(), VL.end()); 742 } 743 744 if (UserTreeIdx >= 0) 745 Last->UserTreeIndices.push_back(UserTreeIdx); 746 UserTreeIdx = idx; 747 return Last; 748 } 749 750 /// -- Vectorization State -- 751 /// Holds all of the tree entries. 752 std::vector<TreeEntry> VectorizableTree; 753 754 TreeEntry *getTreeEntry(Value *V) { 755 auto I = ScalarToTreeEntry.find(V); 756 if (I != ScalarToTreeEntry.end()) 757 return &VectorizableTree[I->second]; 758 return nullptr; 759 } 760 761 const TreeEntry *getTreeEntry(Value *V) const { 762 auto I = ScalarToTreeEntry.find(V); 763 if (I != ScalarToTreeEntry.end()) 764 return &VectorizableTree[I->second]; 765 return nullptr; 766 } 767 768 /// Maps a specific scalar to its tree entry. 769 SmallDenseMap<Value*, int> ScalarToTreeEntry; 770 771 /// A list of scalars that we found that we need to keep as scalars. 772 ValueSet MustGather; 773 774 /// This POD struct describes one external user in the vectorized tree. 775 struct ExternalUser { 776 ExternalUser(Value *S, llvm::User *U, int L) 777 : Scalar(S), User(U), Lane(L) {} 778 779 // Which scalar in our function. 780 Value *Scalar; 781 782 // Which user that uses the scalar. 783 llvm::User *User; 784 785 // Which lane does the scalar belong to. 786 int Lane; 787 }; 788 using UserList = SmallVector<ExternalUser, 16>; 789 790 /// Checks if two instructions may access the same memory. 791 /// 792 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 793 /// is invariant in the calling loop. 794 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 795 Instruction *Inst2) { 796 // First check if the result is already in the cache. 797 AliasCacheKey key = std::make_pair(Inst1, Inst2); 798 Optional<bool> &result = AliasCache[key]; 799 if (result.hasValue()) { 800 return result.getValue(); 801 } 802 MemoryLocation Loc2 = getLocation(Inst2, AA); 803 bool aliased = true; 804 if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) { 805 // Do the alias check. 806 aliased = AA->alias(Loc1, Loc2); 807 } 808 // Store the result in the cache. 809 result = aliased; 810 return aliased; 811 } 812 813 using AliasCacheKey = std::pair<Instruction *, Instruction *>; 814 815 /// Cache for alias results. 816 /// TODO: consider moving this to the AliasAnalysis itself. 817 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 818 819 /// Removes an instruction from its block and eventually deletes it. 820 /// It's like Instruction::eraseFromParent() except that the actual deletion 821 /// is delayed until BoUpSLP is destructed. 822 /// This is required to ensure that there are no incorrect collisions in the 823 /// AliasCache, which can happen if a new instruction is allocated at the 824 /// same address as a previously deleted instruction. 825 void eraseInstruction(Instruction *I) { 826 I->removeFromParent(); 827 I->dropAllReferences(); 828 DeletedInstructions.emplace_back(I); 829 } 830 831 /// Temporary store for deleted instructions. Instructions will be deleted 832 /// eventually when the BoUpSLP is destructed. 833 SmallVector<unique_value, 8> DeletedInstructions; 834 835 /// A list of values that need to extracted out of the tree. 836 /// This list holds pairs of (Internal Scalar : External User). External User 837 /// can be nullptr, it means that this Internal Scalar will be used later, 838 /// after vectorization. 839 UserList ExternalUses; 840 841 /// Values used only by @llvm.assume calls. 842 SmallPtrSet<const Value *, 32> EphValues; 843 844 /// Holds all of the instructions that we gathered. 845 SetVector<Instruction *> GatherSeq; 846 847 /// A list of blocks that we are going to CSE. 848 SetVector<BasicBlock *> CSEBlocks; 849 850 /// Contains all scheduling relevant data for an instruction. 851 /// A ScheduleData either represents a single instruction or a member of an 852 /// instruction bundle (= a group of instructions which is combined into a 853 /// vector instruction). 854 struct ScheduleData { 855 // The initial value for the dependency counters. It means that the 856 // dependencies are not calculated yet. 857 enum { InvalidDeps = -1 }; 858 859 ScheduleData() = default; 860 861 void init(int BlockSchedulingRegionID, Value *OpVal) { 862 FirstInBundle = this; 863 NextInBundle = nullptr; 864 NextLoadStore = nullptr; 865 IsScheduled = false; 866 SchedulingRegionID = BlockSchedulingRegionID; 867 UnscheduledDepsInBundle = UnscheduledDeps; 868 clearDependencies(); 869 OpValue = OpVal; 870 } 871 872 /// Returns true if the dependency information has been calculated. 873 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 874 875 /// Returns true for single instructions and for bundle representatives 876 /// (= the head of a bundle). 877 bool isSchedulingEntity() const { return FirstInBundle == this; } 878 879 /// Returns true if it represents an instruction bundle and not only a 880 /// single instruction. 881 bool isPartOfBundle() const { 882 return NextInBundle != nullptr || FirstInBundle != this; 883 } 884 885 /// Returns true if it is ready for scheduling, i.e. it has no more 886 /// unscheduled depending instructions/bundles. 887 bool isReady() const { 888 assert(isSchedulingEntity() && 889 "can't consider non-scheduling entity for ready list"); 890 return UnscheduledDepsInBundle == 0 && !IsScheduled; 891 } 892 893 /// Modifies the number of unscheduled dependencies, also updating it for 894 /// the whole bundle. 895 int incrementUnscheduledDeps(int Incr) { 896 UnscheduledDeps += Incr; 897 return FirstInBundle->UnscheduledDepsInBundle += Incr; 898 } 899 900 /// Sets the number of unscheduled dependencies to the number of 901 /// dependencies. 902 void resetUnscheduledDeps() { 903 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 904 } 905 906 /// Clears all dependency information. 907 void clearDependencies() { 908 Dependencies = InvalidDeps; 909 resetUnscheduledDeps(); 910 MemoryDependencies.clear(); 911 } 912 913 void dump(raw_ostream &os) const { 914 if (!isSchedulingEntity()) { 915 os << "/ " << *Inst; 916 } else if (NextInBundle) { 917 os << '[' << *Inst; 918 ScheduleData *SD = NextInBundle; 919 while (SD) { 920 os << ';' << *SD->Inst; 921 SD = SD->NextInBundle; 922 } 923 os << ']'; 924 } else { 925 os << *Inst; 926 } 927 } 928 929 Instruction *Inst = nullptr; 930 931 /// Points to the head in an instruction bundle (and always to this for 932 /// single instructions). 933 ScheduleData *FirstInBundle = nullptr; 934 935 /// Single linked list of all instructions in a bundle. Null if it is a 936 /// single instruction. 937 ScheduleData *NextInBundle = nullptr; 938 939 /// Single linked list of all memory instructions (e.g. load, store, call) 940 /// in the block - until the end of the scheduling region. 941 ScheduleData *NextLoadStore = nullptr; 942 943 /// The dependent memory instructions. 944 /// This list is derived on demand in calculateDependencies(). 945 SmallVector<ScheduleData *, 4> MemoryDependencies; 946 947 /// This ScheduleData is in the current scheduling region if this matches 948 /// the current SchedulingRegionID of BlockScheduling. 949 int SchedulingRegionID = 0; 950 951 /// Used for getting a "good" final ordering of instructions. 952 int SchedulingPriority = 0; 953 954 /// The number of dependencies. Constitutes of the number of users of the 955 /// instruction plus the number of dependent memory instructions (if any). 956 /// This value is calculated on demand. 957 /// If InvalidDeps, the number of dependencies is not calculated yet. 958 int Dependencies = InvalidDeps; 959 960 /// The number of dependencies minus the number of dependencies of scheduled 961 /// instructions. As soon as this is zero, the instruction/bundle gets ready 962 /// for scheduling. 963 /// Note that this is negative as long as Dependencies is not calculated. 964 int UnscheduledDeps = InvalidDeps; 965 966 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 967 /// single instructions. 968 int UnscheduledDepsInBundle = InvalidDeps; 969 970 /// True if this instruction is scheduled (or considered as scheduled in the 971 /// dry-run). 972 bool IsScheduled = false; 973 974 /// Opcode of the current instruction in the schedule data. 975 Value *OpValue = nullptr; 976 }; 977 978 #ifndef NDEBUG 979 friend inline raw_ostream &operator<<(raw_ostream &os, 980 const BoUpSLP::ScheduleData &SD) { 981 SD.dump(os); 982 return os; 983 } 984 #endif 985 986 friend struct GraphTraits<BoUpSLP *>; 987 friend struct DOTGraphTraits<BoUpSLP *>; 988 989 /// Contains all scheduling data for a basic block. 990 struct BlockScheduling { 991 BlockScheduling(BasicBlock *BB) 992 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} 993 994 void clear() { 995 ReadyInsts.clear(); 996 ScheduleStart = nullptr; 997 ScheduleEnd = nullptr; 998 FirstLoadStoreInRegion = nullptr; 999 LastLoadStoreInRegion = nullptr; 1000 1001 // Reduce the maximum schedule region size by the size of the 1002 // previous scheduling run. 1003 ScheduleRegionSizeLimit -= ScheduleRegionSize; 1004 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 1005 ScheduleRegionSizeLimit = MinScheduleRegionSize; 1006 ScheduleRegionSize = 0; 1007 1008 // Make a new scheduling region, i.e. all existing ScheduleData is not 1009 // in the new region yet. 1010 ++SchedulingRegionID; 1011 } 1012 1013 ScheduleData *getScheduleData(Value *V) { 1014 ScheduleData *SD = ScheduleDataMap[V]; 1015 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 1016 return SD; 1017 return nullptr; 1018 } 1019 1020 ScheduleData *getScheduleData(Value *V, Value *Key) { 1021 if (V == Key) 1022 return getScheduleData(V); 1023 auto I = ExtraScheduleDataMap.find(V); 1024 if (I != ExtraScheduleDataMap.end()) { 1025 ScheduleData *SD = I->second[Key]; 1026 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 1027 return SD; 1028 } 1029 return nullptr; 1030 } 1031 1032 bool isInSchedulingRegion(ScheduleData *SD) { 1033 return SD->SchedulingRegionID == SchedulingRegionID; 1034 } 1035 1036 /// Marks an instruction as scheduled and puts all dependent ready 1037 /// instructions into the ready-list. 1038 template <typename ReadyListType> 1039 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 1040 SD->IsScheduled = true; 1041 DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 1042 1043 ScheduleData *BundleMember = SD; 1044 while (BundleMember) { 1045 if (BundleMember->Inst != BundleMember->OpValue) { 1046 BundleMember = BundleMember->NextInBundle; 1047 continue; 1048 } 1049 // Handle the def-use chain dependencies. 1050 for (Use &U : BundleMember->Inst->operands()) { 1051 auto *I = dyn_cast<Instruction>(U.get()); 1052 if (!I) 1053 continue; 1054 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 1055 if (OpDef && OpDef->hasValidDependencies() && 1056 OpDef->incrementUnscheduledDeps(-1) == 0) { 1057 // There are no more unscheduled dependencies after 1058 // decrementing, so we can put the dependent instruction 1059 // into the ready list. 1060 ScheduleData *DepBundle = OpDef->FirstInBundle; 1061 assert(!DepBundle->IsScheduled && 1062 "already scheduled bundle gets ready"); 1063 ReadyList.insert(DepBundle); 1064 DEBUG(dbgs() 1065 << "SLP: gets ready (def): " << *DepBundle << "\n"); 1066 } 1067 }); 1068 } 1069 // Handle the memory dependencies. 1070 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 1071 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 1072 // There are no more unscheduled dependencies after decrementing, 1073 // so we can put the dependent instruction into the ready list. 1074 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 1075 assert(!DepBundle->IsScheduled && 1076 "already scheduled bundle gets ready"); 1077 ReadyList.insert(DepBundle); 1078 DEBUG(dbgs() << "SLP: gets ready (mem): " << *DepBundle 1079 << "\n"); 1080 } 1081 } 1082 BundleMember = BundleMember->NextInBundle; 1083 } 1084 } 1085 1086 void doForAllOpcodes(Value *V, 1087 function_ref<void(ScheduleData *SD)> Action) { 1088 if (ScheduleData *SD = getScheduleData(V)) 1089 Action(SD); 1090 auto I = ExtraScheduleDataMap.find(V); 1091 if (I != ExtraScheduleDataMap.end()) 1092 for (auto &P : I->second) 1093 if (P.second->SchedulingRegionID == SchedulingRegionID) 1094 Action(P.second); 1095 } 1096 1097 /// Put all instructions into the ReadyList which are ready for scheduling. 1098 template <typename ReadyListType> 1099 void initialFillReadyList(ReadyListType &ReadyList) { 1100 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 1101 doForAllOpcodes(I, [&](ScheduleData *SD) { 1102 if (SD->isSchedulingEntity() && SD->isReady()) { 1103 ReadyList.insert(SD); 1104 DEBUG(dbgs() << "SLP: initially in ready list: " << *I << "\n"); 1105 } 1106 }); 1107 } 1108 } 1109 1110 /// Checks if a bundle of instructions can be scheduled, i.e. has no 1111 /// cyclic dependencies. This is only a dry-run, no instructions are 1112 /// actually moved at this stage. 1113 bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, Value *OpValue); 1114 1115 /// Un-bundles a group of instructions. 1116 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 1117 1118 /// Allocates schedule data chunk. 1119 ScheduleData *allocateScheduleDataChunks(); 1120 1121 /// Extends the scheduling region so that V is inside the region. 1122 /// \returns true if the region size is within the limit. 1123 bool extendSchedulingRegion(Value *V, Value *OpValue); 1124 1125 /// Initialize the ScheduleData structures for new instructions in the 1126 /// scheduling region. 1127 void initScheduleData(Instruction *FromI, Instruction *ToI, 1128 ScheduleData *PrevLoadStore, 1129 ScheduleData *NextLoadStore); 1130 1131 /// Updates the dependency information of a bundle and of all instructions/ 1132 /// bundles which depend on the original bundle. 1133 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 1134 BoUpSLP *SLP); 1135 1136 /// Sets all instruction in the scheduling region to un-scheduled. 1137 void resetSchedule(); 1138 1139 BasicBlock *BB; 1140 1141 /// Simple memory allocation for ScheduleData. 1142 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 1143 1144 /// The size of a ScheduleData array in ScheduleDataChunks. 1145 int ChunkSize; 1146 1147 /// The allocator position in the current chunk, which is the last entry 1148 /// of ScheduleDataChunks. 1149 int ChunkPos; 1150 1151 /// Attaches ScheduleData to Instruction. 1152 /// Note that the mapping survives during all vectorization iterations, i.e. 1153 /// ScheduleData structures are recycled. 1154 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 1155 1156 /// Attaches ScheduleData to Instruction with the leading key. 1157 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 1158 ExtraScheduleDataMap; 1159 1160 struct ReadyList : SmallVector<ScheduleData *, 8> { 1161 void insert(ScheduleData *SD) { push_back(SD); } 1162 }; 1163 1164 /// The ready-list for scheduling (only used for the dry-run). 1165 ReadyList ReadyInsts; 1166 1167 /// The first instruction of the scheduling region. 1168 Instruction *ScheduleStart = nullptr; 1169 1170 /// The first instruction _after_ the scheduling region. 1171 Instruction *ScheduleEnd = nullptr; 1172 1173 /// The first memory accessing instruction in the scheduling region 1174 /// (can be null). 1175 ScheduleData *FirstLoadStoreInRegion = nullptr; 1176 1177 /// The last memory accessing instruction in the scheduling region 1178 /// (can be null). 1179 ScheduleData *LastLoadStoreInRegion = nullptr; 1180 1181 /// The current size of the scheduling region. 1182 int ScheduleRegionSize = 0; 1183 1184 /// The maximum size allowed for the scheduling region. 1185 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; 1186 1187 /// The ID of the scheduling region. For a new vectorization iteration this 1188 /// is incremented which "removes" all ScheduleData from the region. 1189 // Make sure that the initial SchedulingRegionID is greater than the 1190 // initial SchedulingRegionID in ScheduleData (which is 0). 1191 int SchedulingRegionID = 1; 1192 }; 1193 1194 /// Attaches the BlockScheduling structures to basic blocks. 1195 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 1196 1197 /// Performs the "real" scheduling. Done before vectorization is actually 1198 /// performed in a basic block. 1199 void scheduleBlock(BlockScheduling *BS); 1200 1201 /// List of users to ignore during scheduling and that don't need extracting. 1202 ArrayRef<Value *> UserIgnoreList; 1203 1204 // Number of load bundles that contain consecutive loads. 1205 int NumLoadsWantToKeepOrder = 0; 1206 1207 // Number of load bundles that contain consecutive loads in reversed order. 1208 int NumLoadsWantToChangeOrder = 0; 1209 1210 // Analysis and block reference. 1211 Function *F; 1212 ScalarEvolution *SE; 1213 TargetTransformInfo *TTI; 1214 TargetLibraryInfo *TLI; 1215 AliasAnalysis *AA; 1216 LoopInfo *LI; 1217 DominatorTree *DT; 1218 AssumptionCache *AC; 1219 DemandedBits *DB; 1220 const DataLayout *DL; 1221 OptimizationRemarkEmitter *ORE; 1222 1223 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 1224 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 1225 1226 /// Instruction builder to construct the vectorized tree. 1227 IRBuilder<> Builder; 1228 1229 /// A map of scalar integer values to the smallest bit width with which they 1230 /// can legally be represented. The values map to (width, signed) pairs, 1231 /// where "width" indicates the minimum bit width and "signed" is True if the 1232 /// value must be signed-extended, rather than zero-extended, back to its 1233 /// original width. 1234 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs; 1235 }; 1236 1237 } // end namespace slpvectorizer 1238 1239 template <> struct GraphTraits<BoUpSLP *> { 1240 using TreeEntry = BoUpSLP::TreeEntry; 1241 1242 /// NodeRef has to be a pointer per the GraphWriter. 1243 using NodeRef = TreeEntry *; 1244 1245 /// \brief Add the VectorizableTree to the index iterator to be able to return 1246 /// TreeEntry pointers. 1247 struct ChildIteratorType 1248 : public iterator_adaptor_base<ChildIteratorType, 1249 SmallVector<int, 1>::iterator> { 1250 std::vector<TreeEntry> &VectorizableTree; 1251 1252 ChildIteratorType(SmallVector<int, 1>::iterator W, 1253 std::vector<TreeEntry> &VT) 1254 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 1255 1256 NodeRef operator*() { return &VectorizableTree[*I]; } 1257 }; 1258 1259 static NodeRef getEntryNode(BoUpSLP &R) { return &R.VectorizableTree[0]; } 1260 1261 static ChildIteratorType child_begin(NodeRef N) { 1262 return {N->UserTreeIndices.begin(), N->Container}; 1263 } 1264 1265 static ChildIteratorType child_end(NodeRef N) { 1266 return {N->UserTreeIndices.end(), N->Container}; 1267 } 1268 1269 /// For the node iterator we just need to turn the TreeEntry iterator into a 1270 /// TreeEntry* iterator so that it dereferences to NodeRef. 1271 using nodes_iterator = pointer_iterator<std::vector<TreeEntry>::iterator>; 1272 1273 static nodes_iterator nodes_begin(BoUpSLP *R) { 1274 return nodes_iterator(R->VectorizableTree.begin()); 1275 } 1276 1277 static nodes_iterator nodes_end(BoUpSLP *R) { 1278 return nodes_iterator(R->VectorizableTree.end()); 1279 } 1280 1281 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 1282 }; 1283 1284 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 1285 using TreeEntry = BoUpSLP::TreeEntry; 1286 1287 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 1288 1289 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 1290 std::string Str; 1291 raw_string_ostream OS(Str); 1292 if (isSplat(Entry->Scalars)) { 1293 OS << "<splat> " << *Entry->Scalars[0]; 1294 return Str; 1295 } 1296 for (auto V : Entry->Scalars) { 1297 OS << *V; 1298 if (std::any_of( 1299 R->ExternalUses.begin(), R->ExternalUses.end(), 1300 [&](const BoUpSLP::ExternalUser &EU) { return EU.Scalar == V; })) 1301 OS << " <extract>"; 1302 OS << "\n"; 1303 } 1304 return Str; 1305 } 1306 1307 static std::string getNodeAttributes(const TreeEntry *Entry, 1308 const BoUpSLP *) { 1309 if (Entry->NeedToGather) 1310 return "color=red"; 1311 return ""; 1312 } 1313 }; 1314 1315 } // end namespace llvm 1316 1317 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 1318 ArrayRef<Value *> UserIgnoreLst) { 1319 ExtraValueToDebugLocsMap ExternallyUsedValues; 1320 buildTree(Roots, ExternallyUsedValues, UserIgnoreLst); 1321 } 1322 1323 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 1324 ExtraValueToDebugLocsMap &ExternallyUsedValues, 1325 ArrayRef<Value *> UserIgnoreLst) { 1326 deleteTree(); 1327 UserIgnoreList = UserIgnoreLst; 1328 if (!allSameType(Roots)) 1329 return; 1330 buildTree_rec(Roots, 0, -1); 1331 1332 // Collect the values that we need to extract from the tree. 1333 for (TreeEntry &EIdx : VectorizableTree) { 1334 TreeEntry *Entry = &EIdx; 1335 1336 // No need to handle users of gathered values. 1337 if (Entry->NeedToGather) 1338 continue; 1339 1340 // For each lane: 1341 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 1342 Value *Scalar = Entry->Scalars[Lane]; 1343 1344 // Check if the scalar is externally used as an extra arg. 1345 auto ExtI = ExternallyUsedValues.find(Scalar); 1346 if (ExtI != ExternallyUsedValues.end()) { 1347 DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " << 1348 Lane << " from " << *Scalar << ".\n"); 1349 ExternalUses.emplace_back(Scalar, nullptr, Lane); 1350 } 1351 for (User *U : Scalar->users()) { 1352 DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 1353 1354 Instruction *UserInst = dyn_cast<Instruction>(U); 1355 if (!UserInst) 1356 continue; 1357 1358 // Skip in-tree scalars that become vectors 1359 if (TreeEntry *UseEntry = getTreeEntry(U)) { 1360 Value *UseScalar = UseEntry->Scalars[0]; 1361 // Some in-tree scalars will remain as scalar in vectorized 1362 // instructions. If that is the case, the one in Lane 0 will 1363 // be used. 1364 if (UseScalar != U || 1365 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 1366 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 1367 << ".\n"); 1368 assert(!UseEntry->NeedToGather && "Bad state"); 1369 continue; 1370 } 1371 } 1372 1373 // Ignore users in the user ignore list. 1374 if (is_contained(UserIgnoreList, UserInst)) 1375 continue; 1376 1377 DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " << 1378 Lane << " from " << *Scalar << ".\n"); 1379 ExternalUses.push_back(ExternalUser(Scalar, U, Lane)); 1380 } 1381 } 1382 } 1383 } 1384 1385 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 1386 int UserTreeIdx) { 1387 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 1388 1389 InstructionsState S = getSameOpcode(VL); 1390 if (Depth == RecursionMaxDepth) { 1391 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 1392 newTreeEntry(VL, false, UserTreeIdx); 1393 return; 1394 } 1395 1396 // Don't handle vectors. 1397 if (S.OpValue->getType()->isVectorTy()) { 1398 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 1399 newTreeEntry(VL, false, UserTreeIdx); 1400 return; 1401 } 1402 1403 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 1404 if (SI->getValueOperand()->getType()->isVectorTy()) { 1405 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 1406 newTreeEntry(VL, false, UserTreeIdx); 1407 return; 1408 } 1409 1410 // If all of the operands are identical or constant we have a simple solution. 1411 if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.Opcode) { 1412 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 1413 newTreeEntry(VL, false, UserTreeIdx); 1414 return; 1415 } 1416 1417 // We now know that this is a vector of instructions of the same type from 1418 // the same block. 1419 1420 // Don't vectorize ephemeral values. 1421 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1422 if (EphValues.count(VL[i])) { 1423 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1424 ") is ephemeral.\n"); 1425 newTreeEntry(VL, false, UserTreeIdx); 1426 return; 1427 } 1428 } 1429 1430 // Check if this is a duplicate of another entry. 1431 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 1432 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1433 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 1434 if (E->Scalars[i] != VL[i]) { 1435 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 1436 newTreeEntry(VL, false, UserTreeIdx); 1437 return; 1438 } 1439 } 1440 // Record the reuse of the tree node. FIXME, currently this is only used to 1441 // properly draw the graph rather than for the actual vectorization. 1442 E->UserTreeIndices.push_back(UserTreeIdx); 1443 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue << ".\n"); 1444 return; 1445 } 1446 1447 // Check that none of the instructions in the bundle are already in the tree. 1448 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1449 auto *I = dyn_cast<Instruction>(VL[i]); 1450 if (!I) 1451 continue; 1452 if (getTreeEntry(I)) { 1453 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1454 ") is already in tree.\n"); 1455 newTreeEntry(VL, false, UserTreeIdx); 1456 return; 1457 } 1458 } 1459 1460 // If any of the scalars is marked as a value that needs to stay scalar, then 1461 // we need to gather the scalars. 1462 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1463 if (MustGather.count(VL[i])) { 1464 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 1465 newTreeEntry(VL, false, UserTreeIdx); 1466 return; 1467 } 1468 } 1469 1470 // Check that all of the users of the scalars that we want to vectorize are 1471 // schedulable. 1472 auto *VL0 = cast<Instruction>(S.OpValue); 1473 BasicBlock *BB = VL0->getParent(); 1474 1475 if (!DT->isReachableFromEntry(BB)) { 1476 // Don't go into unreachable blocks. They may contain instructions with 1477 // dependency cycles which confuse the final scheduling. 1478 DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 1479 newTreeEntry(VL, false, UserTreeIdx); 1480 return; 1481 } 1482 1483 // Check that every instruction appears once in this bundle. 1484 for (unsigned i = 0, e = VL.size(); i < e; ++i) 1485 for (unsigned j = i + 1; j < e; ++j) 1486 if (VL[i] == VL[j]) { 1487 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 1488 newTreeEntry(VL, false, UserTreeIdx); 1489 return; 1490 } 1491 1492 auto &BSRef = BlocksSchedules[BB]; 1493 if (!BSRef) 1494 BSRef = llvm::make_unique<BlockScheduling>(BB); 1495 1496 BlockScheduling &BS = *BSRef.get(); 1497 1498 if (!BS.tryScheduleBundle(VL, this, S.OpValue)) { 1499 DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 1500 assert((!BS.getScheduleData(VL0) || 1501 !BS.getScheduleData(VL0)->isPartOfBundle()) && 1502 "tryScheduleBundle should cancelScheduling on failure"); 1503 newTreeEntry(VL, false, UserTreeIdx); 1504 return; 1505 } 1506 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 1507 1508 unsigned ShuffleOrOp = S.IsAltShuffle ? 1509 (unsigned) Instruction::ShuffleVector : S.Opcode; 1510 switch (ShuffleOrOp) { 1511 case Instruction::PHI: { 1512 PHINode *PH = dyn_cast<PHINode>(VL0); 1513 1514 // Check for terminator values (e.g. invoke). 1515 for (unsigned j = 0; j < VL.size(); ++j) 1516 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1517 TerminatorInst *Term = dyn_cast<TerminatorInst>( 1518 cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i))); 1519 if (Term) { 1520 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); 1521 BS.cancelScheduling(VL, VL0); 1522 newTreeEntry(VL, false, UserTreeIdx); 1523 return; 1524 } 1525 } 1526 1527 newTreeEntry(VL, true, UserTreeIdx); 1528 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 1529 1530 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1531 ValueList Operands; 1532 // Prepare the operand vector. 1533 for (Value *j : VL) 1534 Operands.push_back(cast<PHINode>(j)->getIncomingValueForBlock( 1535 PH->getIncomingBlock(i))); 1536 1537 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1538 } 1539 return; 1540 } 1541 case Instruction::ExtractValue: 1542 case Instruction::ExtractElement: { 1543 bool Reuse = canReuseExtract(VL, VL0); 1544 if (Reuse) { 1545 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 1546 } else { 1547 BS.cancelScheduling(VL, VL0); 1548 } 1549 newTreeEntry(VL, Reuse, UserTreeIdx); 1550 return; 1551 } 1552 case Instruction::Load: { 1553 // Check that a vectorized load would load the same memory as a scalar 1554 // load. For example, we don't want to vectorize loads that are smaller 1555 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 1556 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 1557 // from such a struct, we read/write packed bits disagreeing with the 1558 // unvectorized version. 1559 Type *ScalarTy = VL0->getType(); 1560 1561 if (DL->getTypeSizeInBits(ScalarTy) != 1562 DL->getTypeAllocSizeInBits(ScalarTy)) { 1563 BS.cancelScheduling(VL, VL0); 1564 newTreeEntry(VL, false, UserTreeIdx); 1565 DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 1566 return; 1567 } 1568 1569 // Make sure all loads in the bundle are simple - we can't vectorize 1570 // atomic or volatile loads. 1571 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 1572 LoadInst *L = cast<LoadInst>(VL[i]); 1573 if (!L->isSimple()) { 1574 BS.cancelScheduling(VL, VL0); 1575 newTreeEntry(VL, false, UserTreeIdx); 1576 DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 1577 return; 1578 } 1579 } 1580 1581 // Check if the loads are consecutive, reversed, or neither. 1582 // TODO: What we really want is to sort the loads, but for now, check 1583 // the two likely directions. 1584 bool Consecutive = true; 1585 bool ReverseConsecutive = true; 1586 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 1587 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { 1588 Consecutive = false; 1589 break; 1590 } else { 1591 ReverseConsecutive = false; 1592 } 1593 } 1594 1595 if (Consecutive) { 1596 ++NumLoadsWantToKeepOrder; 1597 newTreeEntry(VL, true, UserTreeIdx); 1598 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 1599 return; 1600 } 1601 1602 // If none of the load pairs were consecutive when checked in order, 1603 // check the reverse order. 1604 if (ReverseConsecutive) 1605 for (unsigned i = VL.size() - 1; i > 0; --i) 1606 if (!isConsecutiveAccess(VL[i], VL[i - 1], *DL, *SE)) { 1607 ReverseConsecutive = false; 1608 break; 1609 } 1610 1611 BS.cancelScheduling(VL, VL0); 1612 newTreeEntry(VL, false, UserTreeIdx); 1613 1614 if (ReverseConsecutive) { 1615 ++NumLoadsWantToChangeOrder; 1616 DEBUG(dbgs() << "SLP: Gathering reversed loads.\n"); 1617 } else { 1618 DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 1619 } 1620 return; 1621 } 1622 case Instruction::ZExt: 1623 case Instruction::SExt: 1624 case Instruction::FPToUI: 1625 case Instruction::FPToSI: 1626 case Instruction::FPExt: 1627 case Instruction::PtrToInt: 1628 case Instruction::IntToPtr: 1629 case Instruction::SIToFP: 1630 case Instruction::UIToFP: 1631 case Instruction::Trunc: 1632 case Instruction::FPTrunc: 1633 case Instruction::BitCast: { 1634 Type *SrcTy = VL0->getOperand(0)->getType(); 1635 for (unsigned i = 0; i < VL.size(); ++i) { 1636 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 1637 if (Ty != SrcTy || !isValidElementType(Ty)) { 1638 BS.cancelScheduling(VL, VL0); 1639 newTreeEntry(VL, false, UserTreeIdx); 1640 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 1641 return; 1642 } 1643 } 1644 newTreeEntry(VL, true, UserTreeIdx); 1645 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 1646 1647 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1648 ValueList Operands; 1649 // Prepare the operand vector. 1650 for (Value *j : VL) 1651 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1652 1653 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1654 } 1655 return; 1656 } 1657 case Instruction::ICmp: 1658 case Instruction::FCmp: { 1659 // Check that all of the compares have the same predicate. 1660 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 1661 Type *ComparedTy = VL0->getOperand(0)->getType(); 1662 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 1663 CmpInst *Cmp = cast<CmpInst>(VL[i]); 1664 if (Cmp->getPredicate() != P0 || 1665 Cmp->getOperand(0)->getType() != ComparedTy) { 1666 BS.cancelScheduling(VL, VL0); 1667 newTreeEntry(VL, false, UserTreeIdx); 1668 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 1669 return; 1670 } 1671 } 1672 1673 newTreeEntry(VL, true, UserTreeIdx); 1674 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 1675 1676 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1677 ValueList Operands; 1678 // Prepare the operand vector. 1679 for (Value *j : VL) 1680 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1681 1682 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1683 } 1684 return; 1685 } 1686 case Instruction::Select: 1687 case Instruction::Add: 1688 case Instruction::FAdd: 1689 case Instruction::Sub: 1690 case Instruction::FSub: 1691 case Instruction::Mul: 1692 case Instruction::FMul: 1693 case Instruction::UDiv: 1694 case Instruction::SDiv: 1695 case Instruction::FDiv: 1696 case Instruction::URem: 1697 case Instruction::SRem: 1698 case Instruction::FRem: 1699 case Instruction::Shl: 1700 case Instruction::LShr: 1701 case Instruction::AShr: 1702 case Instruction::And: 1703 case Instruction::Or: 1704 case Instruction::Xor: 1705 newTreeEntry(VL, true, UserTreeIdx); 1706 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 1707 1708 // Sort operands of the instructions so that each side is more likely to 1709 // have the same opcode. 1710 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 1711 ValueList Left, Right; 1712 reorderInputsAccordingToOpcode(S.Opcode, VL, Left, Right); 1713 buildTree_rec(Left, Depth + 1, UserTreeIdx); 1714 buildTree_rec(Right, Depth + 1, UserTreeIdx); 1715 return; 1716 } 1717 1718 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1719 ValueList Operands; 1720 // Prepare the operand vector. 1721 for (Value *j : VL) 1722 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1723 1724 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1725 } 1726 return; 1727 1728 case Instruction::GetElementPtr: { 1729 // We don't combine GEPs with complicated (nested) indexing. 1730 for (unsigned j = 0; j < VL.size(); ++j) { 1731 if (cast<Instruction>(VL[j])->getNumOperands() != 2) { 1732 DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 1733 BS.cancelScheduling(VL, VL0); 1734 newTreeEntry(VL, false, UserTreeIdx); 1735 return; 1736 } 1737 } 1738 1739 // We can't combine several GEPs into one vector if they operate on 1740 // different types. 1741 Type *Ty0 = VL0->getOperand(0)->getType(); 1742 for (unsigned j = 0; j < VL.size(); ++j) { 1743 Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType(); 1744 if (Ty0 != CurTy) { 1745 DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); 1746 BS.cancelScheduling(VL, VL0); 1747 newTreeEntry(VL, false, UserTreeIdx); 1748 return; 1749 } 1750 } 1751 1752 // We don't combine GEPs with non-constant indexes. 1753 for (unsigned j = 0; j < VL.size(); ++j) { 1754 auto Op = cast<Instruction>(VL[j])->getOperand(1); 1755 if (!isa<ConstantInt>(Op)) { 1756 DEBUG( 1757 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 1758 BS.cancelScheduling(VL, VL0); 1759 newTreeEntry(VL, false, UserTreeIdx); 1760 return; 1761 } 1762 } 1763 1764 newTreeEntry(VL, true, UserTreeIdx); 1765 DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 1766 for (unsigned i = 0, e = 2; i < e; ++i) { 1767 ValueList Operands; 1768 // Prepare the operand vector. 1769 for (Value *j : VL) 1770 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1771 1772 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1773 } 1774 return; 1775 } 1776 case Instruction::Store: { 1777 // Check if the stores are consecutive or of we need to swizzle them. 1778 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 1779 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { 1780 BS.cancelScheduling(VL, VL0); 1781 newTreeEntry(VL, false, UserTreeIdx); 1782 DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 1783 return; 1784 } 1785 1786 newTreeEntry(VL, true, UserTreeIdx); 1787 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 1788 1789 ValueList Operands; 1790 for (Value *j : VL) 1791 Operands.push_back(cast<Instruction>(j)->getOperand(0)); 1792 1793 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1794 return; 1795 } 1796 case Instruction::Call: { 1797 // Check if the calls are all to the same vectorizable intrinsic. 1798 CallInst *CI = cast<CallInst>(VL0); 1799 // Check if this is an Intrinsic call or something that can be 1800 // represented by an intrinsic call 1801 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 1802 if (!isTriviallyVectorizable(ID)) { 1803 BS.cancelScheduling(VL, VL0); 1804 newTreeEntry(VL, false, UserTreeIdx); 1805 DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 1806 return; 1807 } 1808 Function *Int = CI->getCalledFunction(); 1809 Value *A1I = nullptr; 1810 if (hasVectorInstrinsicScalarOpd(ID, 1)) 1811 A1I = CI->getArgOperand(1); 1812 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 1813 CallInst *CI2 = dyn_cast<CallInst>(VL[i]); 1814 if (!CI2 || CI2->getCalledFunction() != Int || 1815 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 1816 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 1817 BS.cancelScheduling(VL, VL0); 1818 newTreeEntry(VL, false, UserTreeIdx); 1819 DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] 1820 << "\n"); 1821 return; 1822 } 1823 // ctlz,cttz and powi are special intrinsics whose second argument 1824 // should be same in order for them to be vectorized. 1825 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 1826 Value *A1J = CI2->getArgOperand(1); 1827 if (A1I != A1J) { 1828 BS.cancelScheduling(VL, VL0); 1829 newTreeEntry(VL, false, UserTreeIdx); 1830 DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 1831 << " argument "<< A1I<<"!=" << A1J 1832 << "\n"); 1833 return; 1834 } 1835 } 1836 // Verify that the bundle operands are identical between the two calls. 1837 if (CI->hasOperandBundles() && 1838 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 1839 CI->op_begin() + CI->getBundleOperandsEndIndex(), 1840 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 1841 BS.cancelScheduling(VL, VL0); 1842 newTreeEntry(VL, false, UserTreeIdx); 1843 DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!=" 1844 << *VL[i] << '\n'); 1845 return; 1846 } 1847 } 1848 1849 newTreeEntry(VL, true, UserTreeIdx); 1850 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 1851 ValueList Operands; 1852 // Prepare the operand vector. 1853 for (Value *j : VL) { 1854 CallInst *CI2 = dyn_cast<CallInst>(j); 1855 Operands.push_back(CI2->getArgOperand(i)); 1856 } 1857 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1858 } 1859 return; 1860 } 1861 case Instruction::ShuffleVector: 1862 // If this is not an alternate sequence of opcode like add-sub 1863 // then do not vectorize this instruction. 1864 if (!S.IsAltShuffle) { 1865 BS.cancelScheduling(VL, VL0); 1866 newTreeEntry(VL, false, UserTreeIdx); 1867 DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 1868 return; 1869 } 1870 newTreeEntry(VL, true, UserTreeIdx); 1871 DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 1872 1873 // Reorder operands if reordering would enable vectorization. 1874 if (isa<BinaryOperator>(VL0)) { 1875 ValueList Left, Right; 1876 reorderAltShuffleOperands(S.Opcode, VL, Left, Right); 1877 buildTree_rec(Left, Depth + 1, UserTreeIdx); 1878 buildTree_rec(Right, Depth + 1, UserTreeIdx); 1879 return; 1880 } 1881 1882 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1883 ValueList Operands; 1884 // Prepare the operand vector. 1885 for (Value *j : VL) 1886 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1887 1888 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1889 } 1890 return; 1891 1892 default: 1893 BS.cancelScheduling(VL, VL0); 1894 newTreeEntry(VL, false, UserTreeIdx); 1895 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 1896 return; 1897 } 1898 } 1899 1900 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { 1901 unsigned N; 1902 Type *EltTy; 1903 auto *ST = dyn_cast<StructType>(T); 1904 if (ST) { 1905 N = ST->getNumElements(); 1906 EltTy = *ST->element_begin(); 1907 } else { 1908 N = cast<ArrayType>(T)->getNumElements(); 1909 EltTy = cast<ArrayType>(T)->getElementType(); 1910 } 1911 if (!isValidElementType(EltTy)) 1912 return 0; 1913 uint64_t VTSize = DL.getTypeStoreSizeInBits(VectorType::get(EltTy, N)); 1914 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) 1915 return 0; 1916 if (ST) { 1917 // Check that struct is homogeneous. 1918 for (const auto *Ty : ST->elements()) 1919 if (Ty != EltTy) 1920 return 0; 1921 } 1922 return N; 1923 } 1924 1925 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue) const { 1926 Instruction *E0 = cast<Instruction>(OpValue); 1927 assert(E0->getOpcode() == Instruction::ExtractElement || 1928 E0->getOpcode() == Instruction::ExtractValue); 1929 assert(E0->getOpcode() == getSameOpcode(VL).Opcode && "Invalid opcode"); 1930 // Check if all of the extracts come from the same vector and from the 1931 // correct offset. 1932 Value *Vec = E0->getOperand(0); 1933 1934 // We have to extract from a vector/aggregate with the same number of elements. 1935 unsigned NElts; 1936 if (E0->getOpcode() == Instruction::ExtractValue) { 1937 const DataLayout &DL = E0->getModule()->getDataLayout(); 1938 NElts = canMapToVector(Vec->getType(), DL); 1939 if (!NElts) 1940 return false; 1941 // Check if load can be rewritten as load of vector. 1942 LoadInst *LI = dyn_cast<LoadInst>(Vec); 1943 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 1944 return false; 1945 } else { 1946 NElts = Vec->getType()->getVectorNumElements(); 1947 } 1948 1949 if (NElts != VL.size()) 1950 return false; 1951 1952 // Check that all of the indices extract from the correct offset. 1953 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 1954 Instruction *Inst = cast<Instruction>(VL[I]); 1955 if (!matchExtractIndex(Inst, I, Inst->getOpcode())) 1956 return false; 1957 if (Inst->getOperand(0) != Vec) 1958 return false; 1959 } 1960 1961 return true; 1962 } 1963 1964 bool BoUpSLP::areAllUsersVectorized(Instruction *I) const { 1965 return I->hasOneUse() || 1966 std::all_of(I->user_begin(), I->user_end(), [this](User *U) { 1967 return ScalarToTreeEntry.count(U) > 0; 1968 }); 1969 } 1970 1971 int BoUpSLP::getEntryCost(TreeEntry *E) { 1972 ArrayRef<Value*> VL = E->Scalars; 1973 1974 Type *ScalarTy = VL[0]->getType(); 1975 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1976 ScalarTy = SI->getValueOperand()->getType(); 1977 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0])) 1978 ScalarTy = CI->getOperand(0)->getType(); 1979 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1980 1981 // If we have computed a smaller type for the expression, update VecTy so 1982 // that the costs will be accurate. 1983 if (MinBWs.count(VL[0])) 1984 VecTy = VectorType::get( 1985 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size()); 1986 1987 if (E->NeedToGather) { 1988 if (allConstant(VL)) 1989 return 0; 1990 if (isSplat(VL)) { 1991 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 1992 } 1993 if (getSameOpcode(VL).Opcode == Instruction::ExtractElement) { 1994 Optional<TargetTransformInfo::ShuffleKind> ShuffleKind = isShuffle(VL); 1995 if (ShuffleKind.hasValue()) { 1996 int Cost = TTI->getShuffleCost(ShuffleKind.getValue(), VecTy); 1997 for (auto *V : VL) { 1998 // If all users of instruction are going to be vectorized and this 1999 // instruction itself is not going to be vectorized, consider this 2000 // instruction as dead and remove its cost from the final cost of the 2001 // vectorized tree. 2002 if (areAllUsersVectorized(cast<Instruction>(V)) && 2003 !ScalarToTreeEntry.count(V)) { 2004 auto *IO = cast<ConstantInt>( 2005 cast<ExtractElementInst>(V)->getIndexOperand()); 2006 Cost -= TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 2007 IO->getZExtValue()); 2008 } 2009 } 2010 return Cost; 2011 } 2012 } 2013 return getGatherCost(E->Scalars); 2014 } 2015 InstructionsState S = getSameOpcode(VL); 2016 assert(S.Opcode && allSameType(VL) && allSameBlock(VL) && "Invalid VL"); 2017 Instruction *VL0 = cast<Instruction>(S.OpValue); 2018 unsigned ShuffleOrOp = S.IsAltShuffle ? 2019 (unsigned) Instruction::ShuffleVector : S.Opcode; 2020 switch (ShuffleOrOp) { 2021 case Instruction::PHI: 2022 return 0; 2023 2024 case Instruction::ExtractValue: 2025 case Instruction::ExtractElement: 2026 if (canReuseExtract(VL, S.OpValue)) { 2027 int DeadCost = 0; 2028 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 2029 Instruction *E = cast<Instruction>(VL[i]); 2030 // If all users are going to be vectorized, instruction can be 2031 // considered as dead. 2032 // The same, if have only one user, it will be vectorized for sure. 2033 if (areAllUsersVectorized(E)) 2034 // Take credit for instruction that will become dead. 2035 DeadCost += 2036 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); 2037 } 2038 return -DeadCost; 2039 } 2040 return getGatherCost(VecTy); 2041 2042 case Instruction::ZExt: 2043 case Instruction::SExt: 2044 case Instruction::FPToUI: 2045 case Instruction::FPToSI: 2046 case Instruction::FPExt: 2047 case Instruction::PtrToInt: 2048 case Instruction::IntToPtr: 2049 case Instruction::SIToFP: 2050 case Instruction::UIToFP: 2051 case Instruction::Trunc: 2052 case Instruction::FPTrunc: 2053 case Instruction::BitCast: { 2054 Type *SrcTy = VL0->getOperand(0)->getType(); 2055 2056 // Calculate the cost of this instruction. 2057 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 2058 VL0->getType(), SrcTy, VL0); 2059 2060 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 2061 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy, VL0); 2062 return VecCost - ScalarCost; 2063 } 2064 case Instruction::FCmp: 2065 case Instruction::ICmp: 2066 case Instruction::Select: { 2067 // Calculate the cost of this instruction. 2068 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 2069 int ScalarCost = VecTy->getNumElements() * 2070 TTI->getCmpSelInstrCost(S.Opcode, ScalarTy, Builder.getInt1Ty(), VL0); 2071 int VecCost = TTI->getCmpSelInstrCost(S.Opcode, VecTy, MaskTy, VL0); 2072 return VecCost - ScalarCost; 2073 } 2074 case Instruction::Add: 2075 case Instruction::FAdd: 2076 case Instruction::Sub: 2077 case Instruction::FSub: 2078 case Instruction::Mul: 2079 case Instruction::FMul: 2080 case Instruction::UDiv: 2081 case Instruction::SDiv: 2082 case Instruction::FDiv: 2083 case Instruction::URem: 2084 case Instruction::SRem: 2085 case Instruction::FRem: 2086 case Instruction::Shl: 2087 case Instruction::LShr: 2088 case Instruction::AShr: 2089 case Instruction::And: 2090 case Instruction::Or: 2091 case Instruction::Xor: { 2092 // Certain instructions can be cheaper to vectorize if they have a 2093 // constant second vector operand. 2094 TargetTransformInfo::OperandValueKind Op1VK = 2095 TargetTransformInfo::OK_AnyValue; 2096 TargetTransformInfo::OperandValueKind Op2VK = 2097 TargetTransformInfo::OK_UniformConstantValue; 2098 TargetTransformInfo::OperandValueProperties Op1VP = 2099 TargetTransformInfo::OP_None; 2100 TargetTransformInfo::OperandValueProperties Op2VP = 2101 TargetTransformInfo::OP_None; 2102 2103 // If all operands are exactly the same ConstantInt then set the 2104 // operand kind to OK_UniformConstantValue. 2105 // If instead not all operands are constants, then set the operand kind 2106 // to OK_AnyValue. If all operands are constants but not the same, 2107 // then set the operand kind to OK_NonUniformConstantValue. 2108 ConstantInt *CInt = nullptr; 2109 for (unsigned i = 0; i < VL.size(); ++i) { 2110 const Instruction *I = cast<Instruction>(VL[i]); 2111 if (!isa<ConstantInt>(I->getOperand(1))) { 2112 Op2VK = TargetTransformInfo::OK_AnyValue; 2113 break; 2114 } 2115 if (i == 0) { 2116 CInt = cast<ConstantInt>(I->getOperand(1)); 2117 continue; 2118 } 2119 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && 2120 CInt != cast<ConstantInt>(I->getOperand(1))) 2121 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 2122 } 2123 // FIXME: Currently cost of model modification for division by power of 2124 // 2 is handled for X86 and AArch64. Add support for other targets. 2125 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt && 2126 CInt->getValue().isPowerOf2()) 2127 Op2VP = TargetTransformInfo::OP_PowerOf2; 2128 2129 SmallVector<const Value *, 4> Operands(VL0->operand_values()); 2130 int ScalarCost = 2131 VecTy->getNumElements() * 2132 TTI->getArithmeticInstrCost(S.Opcode, ScalarTy, Op1VK, Op2VK, Op1VP, 2133 Op2VP, Operands); 2134 int VecCost = TTI->getArithmeticInstrCost(S.Opcode, VecTy, Op1VK, Op2VK, 2135 Op1VP, Op2VP, Operands); 2136 return VecCost - ScalarCost; 2137 } 2138 case Instruction::GetElementPtr: { 2139 TargetTransformInfo::OperandValueKind Op1VK = 2140 TargetTransformInfo::OK_AnyValue; 2141 TargetTransformInfo::OperandValueKind Op2VK = 2142 TargetTransformInfo::OK_UniformConstantValue; 2143 2144 int ScalarCost = 2145 VecTy->getNumElements() * 2146 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK); 2147 int VecCost = 2148 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK); 2149 2150 return VecCost - ScalarCost; 2151 } 2152 case Instruction::Load: { 2153 // Cost of wide load - cost of scalar loads. 2154 unsigned alignment = dyn_cast<LoadInst>(VL0)->getAlignment(); 2155 int ScalarLdCost = VecTy->getNumElements() * 2156 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, alignment, 0, VL0); 2157 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, 2158 VecTy, alignment, 0, VL0); 2159 return VecLdCost - ScalarLdCost; 2160 } 2161 case Instruction::Store: { 2162 // We know that we can merge the stores. Calculate the cost. 2163 unsigned alignment = dyn_cast<StoreInst>(VL0)->getAlignment(); 2164 int ScalarStCost = VecTy->getNumElements() * 2165 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, alignment, 0, VL0); 2166 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, 2167 VecTy, alignment, 0, VL0); 2168 return VecStCost - ScalarStCost; 2169 } 2170 case Instruction::Call: { 2171 CallInst *CI = cast<CallInst>(VL0); 2172 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 2173 2174 // Calculate the cost of the scalar and vector calls. 2175 SmallVector<Type*, 4> ScalarTys; 2176 for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) 2177 ScalarTys.push_back(CI->getArgOperand(op)->getType()); 2178 2179 FastMathFlags FMF; 2180 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 2181 FMF = FPMO->getFastMathFlags(); 2182 2183 int ScalarCallCost = VecTy->getNumElements() * 2184 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys, FMF); 2185 2186 SmallVector<Value *, 4> Args(CI->arg_operands()); 2187 int VecCallCost = TTI->getIntrinsicInstrCost(ID, CI->getType(), Args, FMF, 2188 VecTy->getNumElements()); 2189 2190 DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost 2191 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 2192 << " for " << *CI << "\n"); 2193 2194 return VecCallCost - ScalarCallCost; 2195 } 2196 case Instruction::ShuffleVector: { 2197 TargetTransformInfo::OperandValueKind Op1VK = 2198 TargetTransformInfo::OK_AnyValue; 2199 TargetTransformInfo::OperandValueKind Op2VK = 2200 TargetTransformInfo::OK_AnyValue; 2201 int ScalarCost = 0; 2202 int VecCost = 0; 2203 for (Value *i : VL) { 2204 Instruction *I = cast<Instruction>(i); 2205 if (!I) 2206 break; 2207 ScalarCost += 2208 TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK); 2209 } 2210 // VecCost is equal to sum of the cost of creating 2 vectors 2211 // and the cost of creating shuffle. 2212 Instruction *I0 = cast<Instruction>(VL[0]); 2213 VecCost = 2214 TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK); 2215 Instruction *I1 = cast<Instruction>(VL[1]); 2216 VecCost += 2217 TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK); 2218 VecCost += 2219 TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0); 2220 return VecCost - ScalarCost; 2221 } 2222 default: 2223 llvm_unreachable("Unknown instruction"); 2224 } 2225 } 2226 2227 bool BoUpSLP::isFullyVectorizableTinyTree() { 2228 DEBUG(dbgs() << "SLP: Check whether the tree with height " << 2229 VectorizableTree.size() << " is fully vectorizable .\n"); 2230 2231 // We only handle trees of heights 1 and 2. 2232 if (VectorizableTree.size() == 1 && !VectorizableTree[0].NeedToGather) 2233 return true; 2234 2235 if (VectorizableTree.size() != 2) 2236 return false; 2237 2238 // Handle splat and all-constants stores. 2239 if (!VectorizableTree[0].NeedToGather && 2240 (allConstant(VectorizableTree[1].Scalars) || 2241 isSplat(VectorizableTree[1].Scalars))) 2242 return true; 2243 2244 // Gathering cost would be too much for tiny trees. 2245 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 2246 return false; 2247 2248 return true; 2249 } 2250 2251 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() { 2252 // We can vectorize the tree if its size is greater than or equal to the 2253 // minimum size specified by the MinTreeSize command line option. 2254 if (VectorizableTree.size() >= MinTreeSize) 2255 return false; 2256 2257 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 2258 // can vectorize it if we can prove it fully vectorizable. 2259 if (isFullyVectorizableTinyTree()) 2260 return false; 2261 2262 assert(VectorizableTree.empty() 2263 ? ExternalUses.empty() 2264 : true && "We shouldn't have any external users"); 2265 2266 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 2267 // vectorizable. 2268 return true; 2269 } 2270 2271 int BoUpSLP::getSpillCost() { 2272 // Walk from the bottom of the tree to the top, tracking which values are 2273 // live. When we see a call instruction that is not part of our tree, 2274 // query TTI to see if there is a cost to keeping values live over it 2275 // (for example, if spills and fills are required). 2276 unsigned BundleWidth = VectorizableTree.front().Scalars.size(); 2277 int Cost = 0; 2278 2279 SmallPtrSet<Instruction*, 4> LiveValues; 2280 Instruction *PrevInst = nullptr; 2281 2282 for (const auto &N : VectorizableTree) { 2283 Instruction *Inst = dyn_cast<Instruction>(N.Scalars[0]); 2284 if (!Inst) 2285 continue; 2286 2287 if (!PrevInst) { 2288 PrevInst = Inst; 2289 continue; 2290 } 2291 2292 // Update LiveValues. 2293 LiveValues.erase(PrevInst); 2294 for (auto &J : PrevInst->operands()) { 2295 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 2296 LiveValues.insert(cast<Instruction>(&*J)); 2297 } 2298 2299 DEBUG( 2300 dbgs() << "SLP: #LV: " << LiveValues.size(); 2301 for (auto *X : LiveValues) 2302 dbgs() << " " << X->getName(); 2303 dbgs() << ", Looking at "; 2304 Inst->dump(); 2305 ); 2306 2307 // Now find the sequence of instructions between PrevInst and Inst. 2308 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 2309 PrevInstIt = 2310 PrevInst->getIterator().getReverse(); 2311 while (InstIt != PrevInstIt) { 2312 if (PrevInstIt == PrevInst->getParent()->rend()) { 2313 PrevInstIt = Inst->getParent()->rbegin(); 2314 continue; 2315 } 2316 2317 if (isa<CallInst>(&*PrevInstIt) && &*PrevInstIt != PrevInst) { 2318 SmallVector<Type*, 4> V; 2319 for (auto *II : LiveValues) 2320 V.push_back(VectorType::get(II->getType(), BundleWidth)); 2321 Cost += TTI->getCostOfKeepingLiveOverCall(V); 2322 } 2323 2324 ++PrevInstIt; 2325 } 2326 2327 PrevInst = Inst; 2328 } 2329 2330 return Cost; 2331 } 2332 2333 int BoUpSLP::getTreeCost() { 2334 int Cost = 0; 2335 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 2336 VectorizableTree.size() << ".\n"); 2337 2338 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 2339 2340 for (TreeEntry &TE : VectorizableTree) { 2341 int C = getEntryCost(&TE); 2342 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 2343 << *TE.Scalars[0] << ".\n"); 2344 Cost += C; 2345 } 2346 2347 SmallSet<Value *, 16> ExtractCostCalculated; 2348 int ExtractCost = 0; 2349 for (ExternalUser &EU : ExternalUses) { 2350 // We only add extract cost once for the same scalar. 2351 if (!ExtractCostCalculated.insert(EU.Scalar).second) 2352 continue; 2353 2354 // Uses by ephemeral values are free (because the ephemeral value will be 2355 // removed prior to code generation, and so the extraction will be 2356 // removed as well). 2357 if (EphValues.count(EU.User)) 2358 continue; 2359 2360 // If we plan to rewrite the tree in a smaller type, we will need to sign 2361 // extend the extracted value back to the original type. Here, we account 2362 // for the extract and the added cost of the sign extend if needed. 2363 auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth); 2364 auto *ScalarRoot = VectorizableTree[0].Scalars[0]; 2365 if (MinBWs.count(ScalarRoot)) { 2366 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 2367 auto Extend = 2368 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; 2369 VecTy = VectorType::get(MinTy, BundleWidth); 2370 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 2371 VecTy, EU.Lane); 2372 } else { 2373 ExtractCost += 2374 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); 2375 } 2376 } 2377 2378 int SpillCost = getSpillCost(); 2379 Cost += SpillCost + ExtractCost; 2380 2381 std::string Str; 2382 { 2383 raw_string_ostream OS(Str); 2384 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 2385 << "SLP: Extract Cost = " << ExtractCost << ".\n" 2386 << "SLP: Total Cost = " << Cost << ".\n"; 2387 } 2388 DEBUG(dbgs() << Str); 2389 2390 if (ViewSLPTree) 2391 ViewGraph(this, "SLP" + F->getName(), false, Str); 2392 2393 return Cost; 2394 } 2395 2396 int BoUpSLP::getGatherCost(Type *Ty) { 2397 int Cost = 0; 2398 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 2399 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 2400 return Cost; 2401 } 2402 2403 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 2404 // Find the type of the operands in VL. 2405 Type *ScalarTy = VL[0]->getType(); 2406 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 2407 ScalarTy = SI->getValueOperand()->getType(); 2408 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 2409 // Find the cost of inserting/extracting values from the vector. 2410 return getGatherCost(VecTy); 2411 } 2412 2413 // Reorder commutative operations in alternate shuffle if the resulting vectors 2414 // are consecutive loads. This would allow us to vectorize the tree. 2415 // If we have something like- 2416 // load a[0] - load b[0] 2417 // load b[1] + load a[1] 2418 // load a[2] - load b[2] 2419 // load a[3] + load b[3] 2420 // Reordering the second load b[1] load a[1] would allow us to vectorize this 2421 // code. 2422 void BoUpSLP::reorderAltShuffleOperands(unsigned Opcode, ArrayRef<Value *> VL, 2423 SmallVectorImpl<Value *> &Left, 2424 SmallVectorImpl<Value *> &Right) { 2425 // Push left and right operands of binary operation into Left and Right 2426 unsigned AltOpcode = getAltOpcode(Opcode); 2427 (void)AltOpcode; 2428 for (Value *V : VL) { 2429 auto *I = cast<Instruction>(V); 2430 assert(sameOpcodeOrAlt(Opcode, AltOpcode, I->getOpcode()) && 2431 "Incorrect instruction in vector"); 2432 Left.push_back(I->getOperand(0)); 2433 Right.push_back(I->getOperand(1)); 2434 } 2435 2436 // Reorder if we have a commutative operation and consecutive access 2437 // are on either side of the alternate instructions. 2438 for (unsigned j = 0; j < VL.size() - 1; ++j) { 2439 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 2440 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 2441 Instruction *VL1 = cast<Instruction>(VL[j]); 2442 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 2443 if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) { 2444 std::swap(Left[j], Right[j]); 2445 continue; 2446 } else if (VL2->isCommutative() && 2447 isConsecutiveAccess(L, L1, *DL, *SE)) { 2448 std::swap(Left[j + 1], Right[j + 1]); 2449 continue; 2450 } 2451 // else unchanged 2452 } 2453 } 2454 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 2455 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 2456 Instruction *VL1 = cast<Instruction>(VL[j]); 2457 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 2458 if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) { 2459 std::swap(Left[j], Right[j]); 2460 continue; 2461 } else if (VL2->isCommutative() && 2462 isConsecutiveAccess(L, L1, *DL, *SE)) { 2463 std::swap(Left[j + 1], Right[j + 1]); 2464 continue; 2465 } 2466 // else unchanged 2467 } 2468 } 2469 } 2470 } 2471 2472 // Return true if I should be commuted before adding it's left and right 2473 // operands to the arrays Left and Right. 2474 // 2475 // The vectorizer is trying to either have all elements one side being 2476 // instruction with the same opcode to enable further vectorization, or having 2477 // a splat to lower the vectorizing cost. 2478 static bool shouldReorderOperands( 2479 int i, unsigned Opcode, Instruction &I, ArrayRef<Value *> Left, 2480 ArrayRef<Value *> Right, bool AllSameOpcodeLeft, bool AllSameOpcodeRight, 2481 bool SplatLeft, bool SplatRight, Value *&VLeft, Value *&VRight) { 2482 VLeft = I.getOperand(0); 2483 VRight = I.getOperand(1); 2484 // If we have "SplatRight", try to see if commuting is needed to preserve it. 2485 if (SplatRight) { 2486 if (VRight == Right[i - 1]) 2487 // Preserve SplatRight 2488 return false; 2489 if (VLeft == Right[i - 1]) { 2490 // Commuting would preserve SplatRight, but we don't want to break 2491 // SplatLeft either, i.e. preserve the original order if possible. 2492 // (FIXME: why do we care?) 2493 if (SplatLeft && VLeft == Left[i - 1]) 2494 return false; 2495 return true; 2496 } 2497 } 2498 // Symmetrically handle Right side. 2499 if (SplatLeft) { 2500 if (VLeft == Left[i - 1]) 2501 // Preserve SplatLeft 2502 return false; 2503 if (VRight == Left[i - 1]) 2504 return true; 2505 } 2506 2507 Instruction *ILeft = dyn_cast<Instruction>(VLeft); 2508 Instruction *IRight = dyn_cast<Instruction>(VRight); 2509 2510 // If we have "AllSameOpcodeRight", try to see if the left operands preserves 2511 // it and not the right, in this case we want to commute. 2512 if (AllSameOpcodeRight) { 2513 unsigned RightPrevOpcode = cast<Instruction>(Right[i - 1])->getOpcode(); 2514 if (IRight && RightPrevOpcode == IRight->getOpcode()) 2515 // Do not commute, a match on the right preserves AllSameOpcodeRight 2516 return false; 2517 if (ILeft && RightPrevOpcode == ILeft->getOpcode()) { 2518 // We have a match and may want to commute, but first check if there is 2519 // not also a match on the existing operands on the Left to preserve 2520 // AllSameOpcodeLeft, i.e. preserve the original order if possible. 2521 // (FIXME: why do we care?) 2522 if (AllSameOpcodeLeft && ILeft && 2523 cast<Instruction>(Left[i - 1])->getOpcode() == ILeft->getOpcode()) 2524 return false; 2525 return true; 2526 } 2527 } 2528 // Symmetrically handle Left side. 2529 if (AllSameOpcodeLeft) { 2530 unsigned LeftPrevOpcode = cast<Instruction>(Left[i - 1])->getOpcode(); 2531 if (ILeft && LeftPrevOpcode == ILeft->getOpcode()) 2532 return false; 2533 if (IRight && LeftPrevOpcode == IRight->getOpcode()) 2534 return true; 2535 } 2536 return false; 2537 } 2538 2539 void BoUpSLP::reorderInputsAccordingToOpcode(unsigned Opcode, 2540 ArrayRef<Value *> VL, 2541 SmallVectorImpl<Value *> &Left, 2542 SmallVectorImpl<Value *> &Right) { 2543 if (!VL.empty()) { 2544 // Peel the first iteration out of the loop since there's nothing 2545 // interesting to do anyway and it simplifies the checks in the loop. 2546 auto *I = cast<Instruction>(VL[0]); 2547 Value *VLeft = I->getOperand(0); 2548 Value *VRight = I->getOperand(1); 2549 if (!isa<Instruction>(VRight) && isa<Instruction>(VLeft)) 2550 // Favor having instruction to the right. FIXME: why? 2551 std::swap(VLeft, VRight); 2552 Left.push_back(VLeft); 2553 Right.push_back(VRight); 2554 } 2555 2556 // Keep track if we have instructions with all the same opcode on one side. 2557 bool AllSameOpcodeLeft = isa<Instruction>(Left[0]); 2558 bool AllSameOpcodeRight = isa<Instruction>(Right[0]); 2559 // Keep track if we have one side with all the same value (broadcast). 2560 bool SplatLeft = true; 2561 bool SplatRight = true; 2562 2563 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 2564 Instruction *I = cast<Instruction>(VL[i]); 2565 assert(((I->getOpcode() == Opcode && I->isCommutative()) || 2566 (I->getOpcode() != Opcode && Instruction::isCommutative(Opcode))) && 2567 "Can only process commutative instruction"); 2568 // Commute to favor either a splat or maximizing having the same opcodes on 2569 // one side. 2570 Value *VLeft; 2571 Value *VRight; 2572 if (shouldReorderOperands(i, Opcode, *I, Left, Right, AllSameOpcodeLeft, 2573 AllSameOpcodeRight, SplatLeft, SplatRight, VLeft, 2574 VRight)) { 2575 Left.push_back(VRight); 2576 Right.push_back(VLeft); 2577 } else { 2578 Left.push_back(VLeft); 2579 Right.push_back(VRight); 2580 } 2581 // Update Splat* and AllSameOpcode* after the insertion. 2582 SplatRight = SplatRight && (Right[i - 1] == Right[i]); 2583 SplatLeft = SplatLeft && (Left[i - 1] == Left[i]); 2584 AllSameOpcodeLeft = AllSameOpcodeLeft && isa<Instruction>(Left[i]) && 2585 (cast<Instruction>(Left[i - 1])->getOpcode() == 2586 cast<Instruction>(Left[i])->getOpcode()); 2587 AllSameOpcodeRight = AllSameOpcodeRight && isa<Instruction>(Right[i]) && 2588 (cast<Instruction>(Right[i - 1])->getOpcode() == 2589 cast<Instruction>(Right[i])->getOpcode()); 2590 } 2591 2592 // If one operand end up being broadcast, return this operand order. 2593 if (SplatRight || SplatLeft) 2594 return; 2595 2596 // Finally check if we can get longer vectorizable chain by reordering 2597 // without breaking the good operand order detected above. 2598 // E.g. If we have something like- 2599 // load a[0] load b[0] 2600 // load b[1] load a[1] 2601 // load a[2] load b[2] 2602 // load a[3] load b[3] 2603 // Reordering the second load b[1] load a[1] would allow us to vectorize 2604 // this code and we still retain AllSameOpcode property. 2605 // FIXME: This load reordering might break AllSameOpcode in some rare cases 2606 // such as- 2607 // add a[0],c[0] load b[0] 2608 // add a[1],c[2] load b[1] 2609 // b[2] load b[2] 2610 // add a[3],c[3] load b[3] 2611 for (unsigned j = 0; j < VL.size() - 1; ++j) { 2612 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 2613 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 2614 if (isConsecutiveAccess(L, L1, *DL, *SE)) { 2615 std::swap(Left[j + 1], Right[j + 1]); 2616 continue; 2617 } 2618 } 2619 } 2620 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 2621 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 2622 if (isConsecutiveAccess(L, L1, *DL, *SE)) { 2623 std::swap(Left[j + 1], Right[j + 1]); 2624 continue; 2625 } 2626 } 2627 } 2628 // else unchanged 2629 } 2630 } 2631 2632 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL, Value *OpValue) { 2633 // Get the basic block this bundle is in. All instructions in the bundle 2634 // should be in this block. 2635 auto *Front = cast<Instruction>(OpValue); 2636 auto *BB = Front->getParent(); 2637 const unsigned Opcode = cast<Instruction>(OpValue)->getOpcode(); 2638 const unsigned AltOpcode = getAltOpcode(Opcode); 2639 assert(llvm::all_of(make_range(VL.begin(), VL.end()), [=](Value *V) -> bool { 2640 return !sameOpcodeOrAlt(Opcode, AltOpcode, 2641 cast<Instruction>(V)->getOpcode()) || 2642 cast<Instruction>(V)->getParent() == BB; 2643 })); 2644 2645 // The last instruction in the bundle in program order. 2646 Instruction *LastInst = nullptr; 2647 2648 // Find the last instruction. The common case should be that BB has been 2649 // scheduled, and the last instruction is VL.back(). So we start with 2650 // VL.back() and iterate over schedule data until we reach the end of the 2651 // bundle. The end of the bundle is marked by null ScheduleData. 2652 if (BlocksSchedules.count(BB)) { 2653 auto *Bundle = 2654 BlocksSchedules[BB]->getScheduleData(isOneOf(OpValue, VL.back())); 2655 if (Bundle && Bundle->isPartOfBundle()) 2656 for (; Bundle; Bundle = Bundle->NextInBundle) 2657 if (Bundle->OpValue == Bundle->Inst) 2658 LastInst = Bundle->Inst; 2659 } 2660 2661 // LastInst can still be null at this point if there's either not an entry 2662 // for BB in BlocksSchedules or there's no ScheduleData available for 2663 // VL.back(). This can be the case if buildTree_rec aborts for various 2664 // reasons (e.g., the maximum recursion depth is reached, the maximum region 2665 // size is reached, etc.). ScheduleData is initialized in the scheduling 2666 // "dry-run". 2667 // 2668 // If this happens, we can still find the last instruction by brute force. We 2669 // iterate forwards from Front (inclusive) until we either see all 2670 // instructions in the bundle or reach the end of the block. If Front is the 2671 // last instruction in program order, LastInst will be set to Front, and we 2672 // will visit all the remaining instructions in the block. 2673 // 2674 // One of the reasons we exit early from buildTree_rec is to place an upper 2675 // bound on compile-time. Thus, taking an additional compile-time hit here is 2676 // not ideal. However, this should be exceedingly rare since it requires that 2677 // we both exit early from buildTree_rec and that the bundle be out-of-order 2678 // (causing us to iterate all the way to the end of the block). 2679 if (!LastInst) { 2680 SmallPtrSet<Value *, 16> Bundle(VL.begin(), VL.end()); 2681 for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) { 2682 if (Bundle.erase(&I) && sameOpcodeOrAlt(Opcode, AltOpcode, I.getOpcode())) 2683 LastInst = &I; 2684 if (Bundle.empty()) 2685 break; 2686 } 2687 } 2688 2689 // Set the insertion point after the last instruction in the bundle. Set the 2690 // debug location to Front. 2691 Builder.SetInsertPoint(BB, ++LastInst->getIterator()); 2692 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 2693 } 2694 2695 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 2696 Value *Vec = UndefValue::get(Ty); 2697 // Generate the 'InsertElement' instruction. 2698 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 2699 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 2700 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 2701 GatherSeq.insert(Insrt); 2702 CSEBlocks.insert(Insrt->getParent()); 2703 2704 // Add to our 'need-to-extract' list. 2705 if (TreeEntry *E = getTreeEntry(VL[i])) { 2706 // Find which lane we need to extract. 2707 int FoundLane = -1; 2708 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 2709 // Is this the lane of the scalar that we are looking for ? 2710 if (E->Scalars[Lane] == VL[i]) { 2711 FoundLane = Lane; 2712 break; 2713 } 2714 } 2715 assert(FoundLane >= 0 && "Could not find the correct lane"); 2716 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 2717 } 2718 } 2719 } 2720 2721 return Vec; 2722 } 2723 2724 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL, Value *OpValue) const { 2725 if (const TreeEntry *En = getTreeEntry(OpValue)) { 2726 if (En->isSame(VL) && En->VectorizedValue) 2727 return En->VectorizedValue; 2728 } 2729 return nullptr; 2730 } 2731 2732 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 2733 InstructionsState S = getSameOpcode(VL); 2734 if (S.Opcode) { 2735 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 2736 if (E->isSame(VL)) 2737 return vectorizeTree(E); 2738 } 2739 } 2740 2741 Type *ScalarTy = S.OpValue->getType(); 2742 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 2743 ScalarTy = SI->getValueOperand()->getType(); 2744 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 2745 2746 return Gather(VL, VecTy); 2747 } 2748 2749 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 2750 IRBuilder<>::InsertPointGuard Guard(Builder); 2751 2752 if (E->VectorizedValue) { 2753 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 2754 return E->VectorizedValue; 2755 } 2756 2757 InstructionsState S = getSameOpcode(E->Scalars); 2758 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 2759 Type *ScalarTy = VL0->getType(); 2760 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 2761 ScalarTy = SI->getValueOperand()->getType(); 2762 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 2763 2764 if (E->NeedToGather) { 2765 setInsertPointAfterBundle(E->Scalars, VL0); 2766 auto *V = Gather(E->Scalars, VecTy); 2767 E->VectorizedValue = V; 2768 return V; 2769 } 2770 2771 unsigned ShuffleOrOp = S.IsAltShuffle ? 2772 (unsigned) Instruction::ShuffleVector : S.Opcode; 2773 switch (ShuffleOrOp) { 2774 case Instruction::PHI: { 2775 PHINode *PH = dyn_cast<PHINode>(VL0); 2776 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 2777 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2778 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 2779 E->VectorizedValue = NewPhi; 2780 2781 // PHINodes may have multiple entries from the same block. We want to 2782 // visit every block once. 2783 SmallSet<BasicBlock*, 4> VisitedBBs; 2784 2785 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 2786 ValueList Operands; 2787 BasicBlock *IBB = PH->getIncomingBlock(i); 2788 2789 if (!VisitedBBs.insert(IBB).second) { 2790 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 2791 continue; 2792 } 2793 2794 // Prepare the operand vector. 2795 for (Value *V : E->Scalars) 2796 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(IBB)); 2797 2798 Builder.SetInsertPoint(IBB->getTerminator()); 2799 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2800 Value *Vec = vectorizeTree(Operands); 2801 NewPhi->addIncoming(Vec, IBB); 2802 } 2803 2804 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 2805 "Invalid number of incoming values"); 2806 return NewPhi; 2807 } 2808 2809 case Instruction::ExtractElement: { 2810 if (canReuseExtract(E->Scalars, VL0)) { 2811 Value *V = VL0->getOperand(0); 2812 E->VectorizedValue = V; 2813 return V; 2814 } 2815 setInsertPointAfterBundle(E->Scalars, VL0); 2816 auto *V = Gather(E->Scalars, VecTy); 2817 E->VectorizedValue = V; 2818 return V; 2819 } 2820 case Instruction::ExtractValue: { 2821 if (canReuseExtract(E->Scalars, VL0)) { 2822 LoadInst *LI = cast<LoadInst>(VL0->getOperand(0)); 2823 Builder.SetInsertPoint(LI); 2824 PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); 2825 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); 2826 LoadInst *V = Builder.CreateAlignedLoad(Ptr, LI->getAlignment()); 2827 E->VectorizedValue = V; 2828 return propagateMetadata(V, E->Scalars); 2829 } 2830 setInsertPointAfterBundle(E->Scalars, VL0); 2831 auto *V = Gather(E->Scalars, VecTy); 2832 E->VectorizedValue = V; 2833 return V; 2834 } 2835 case Instruction::ZExt: 2836 case Instruction::SExt: 2837 case Instruction::FPToUI: 2838 case Instruction::FPToSI: 2839 case Instruction::FPExt: 2840 case Instruction::PtrToInt: 2841 case Instruction::IntToPtr: 2842 case Instruction::SIToFP: 2843 case Instruction::UIToFP: 2844 case Instruction::Trunc: 2845 case Instruction::FPTrunc: 2846 case Instruction::BitCast: { 2847 ValueList INVL; 2848 for (Value *V : E->Scalars) 2849 INVL.push_back(cast<Instruction>(V)->getOperand(0)); 2850 2851 setInsertPointAfterBundle(E->Scalars, VL0); 2852 2853 Value *InVec = vectorizeTree(INVL); 2854 2855 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2856 return V; 2857 2858 CastInst *CI = dyn_cast<CastInst>(VL0); 2859 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 2860 E->VectorizedValue = V; 2861 ++NumVectorInstructions; 2862 return V; 2863 } 2864 case Instruction::FCmp: 2865 case Instruction::ICmp: { 2866 ValueList LHSV, RHSV; 2867 for (Value *V : E->Scalars) { 2868 LHSV.push_back(cast<Instruction>(V)->getOperand(0)); 2869 RHSV.push_back(cast<Instruction>(V)->getOperand(1)); 2870 } 2871 2872 setInsertPointAfterBundle(E->Scalars, VL0); 2873 2874 Value *L = vectorizeTree(LHSV); 2875 Value *R = vectorizeTree(RHSV); 2876 2877 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2878 return V; 2879 2880 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 2881 Value *V; 2882 if (S.Opcode == Instruction::FCmp) 2883 V = Builder.CreateFCmp(P0, L, R); 2884 else 2885 V = Builder.CreateICmp(P0, L, R); 2886 2887 E->VectorizedValue = V; 2888 propagateIRFlags(E->VectorizedValue, E->Scalars, VL0); 2889 ++NumVectorInstructions; 2890 return V; 2891 } 2892 case Instruction::Select: { 2893 ValueList TrueVec, FalseVec, CondVec; 2894 for (Value *V : E->Scalars) { 2895 CondVec.push_back(cast<Instruction>(V)->getOperand(0)); 2896 TrueVec.push_back(cast<Instruction>(V)->getOperand(1)); 2897 FalseVec.push_back(cast<Instruction>(V)->getOperand(2)); 2898 } 2899 2900 setInsertPointAfterBundle(E->Scalars, VL0); 2901 2902 Value *Cond = vectorizeTree(CondVec); 2903 Value *True = vectorizeTree(TrueVec); 2904 Value *False = vectorizeTree(FalseVec); 2905 2906 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2907 return V; 2908 2909 Value *V = Builder.CreateSelect(Cond, True, False); 2910 E->VectorizedValue = V; 2911 ++NumVectorInstructions; 2912 return V; 2913 } 2914 case Instruction::Add: 2915 case Instruction::FAdd: 2916 case Instruction::Sub: 2917 case Instruction::FSub: 2918 case Instruction::Mul: 2919 case Instruction::FMul: 2920 case Instruction::UDiv: 2921 case Instruction::SDiv: 2922 case Instruction::FDiv: 2923 case Instruction::URem: 2924 case Instruction::SRem: 2925 case Instruction::FRem: 2926 case Instruction::Shl: 2927 case Instruction::LShr: 2928 case Instruction::AShr: 2929 case Instruction::And: 2930 case Instruction::Or: 2931 case Instruction::Xor: { 2932 ValueList LHSVL, RHSVL; 2933 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) 2934 reorderInputsAccordingToOpcode(S.Opcode, E->Scalars, LHSVL, 2935 RHSVL); 2936 else 2937 for (Value *V : E->Scalars) { 2938 auto *I = cast<Instruction>(V); 2939 LHSVL.push_back(I->getOperand(0)); 2940 RHSVL.push_back(I->getOperand(1)); 2941 } 2942 2943 setInsertPointAfterBundle(E->Scalars, VL0); 2944 2945 Value *LHS = vectorizeTree(LHSVL); 2946 Value *RHS = vectorizeTree(RHSVL); 2947 2948 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2949 return V; 2950 2951 Value *V = Builder.CreateBinOp( 2952 static_cast<Instruction::BinaryOps>(S.Opcode), LHS, RHS); 2953 E->VectorizedValue = V; 2954 propagateIRFlags(E->VectorizedValue, E->Scalars, VL0); 2955 ++NumVectorInstructions; 2956 2957 if (Instruction *I = dyn_cast<Instruction>(V)) 2958 return propagateMetadata(I, E->Scalars); 2959 2960 return V; 2961 } 2962 case Instruction::Load: { 2963 // Loads are inserted at the head of the tree because we don't want to 2964 // sink them all the way down past store instructions. 2965 setInsertPointAfterBundle(E->Scalars, VL0); 2966 2967 LoadInst *LI = cast<LoadInst>(VL0); 2968 Type *ScalarLoadTy = LI->getType(); 2969 unsigned AS = LI->getPointerAddressSpace(); 2970 2971 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 2972 VecTy->getPointerTo(AS)); 2973 2974 // The pointer operand uses an in-tree scalar so we add the new BitCast to 2975 // ExternalUses list to make sure that an extract will be generated in the 2976 // future. 2977 Value *PO = LI->getPointerOperand(); 2978 if (getTreeEntry(PO)) 2979 ExternalUses.push_back(ExternalUser(PO, cast<User>(VecPtr), 0)); 2980 2981 unsigned Alignment = LI->getAlignment(); 2982 LI = Builder.CreateLoad(VecPtr); 2983 if (!Alignment) { 2984 Alignment = DL->getABITypeAlignment(ScalarLoadTy); 2985 } 2986 LI->setAlignment(Alignment); 2987 E->VectorizedValue = LI; 2988 ++NumVectorInstructions; 2989 return propagateMetadata(LI, E->Scalars); 2990 } 2991 case Instruction::Store: { 2992 StoreInst *SI = cast<StoreInst>(VL0); 2993 unsigned Alignment = SI->getAlignment(); 2994 unsigned AS = SI->getPointerAddressSpace(); 2995 2996 ValueList ScalarStoreValues; 2997 for (Value *V : E->Scalars) 2998 ScalarStoreValues.push_back(cast<StoreInst>(V)->getValueOperand()); 2999 3000 setInsertPointAfterBundle(E->Scalars, VL0); 3001 3002 Value *VecValue = vectorizeTree(ScalarStoreValues); 3003 Value *ScalarPtr = SI->getPointerOperand(); 3004 Value *VecPtr = Builder.CreateBitCast(ScalarPtr, VecTy->getPointerTo(AS)); 3005 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 3006 3007 // The pointer operand uses an in-tree scalar, so add the new BitCast to 3008 // ExternalUses to make sure that an extract will be generated in the 3009 // future. 3010 if (getTreeEntry(ScalarPtr)) 3011 ExternalUses.push_back(ExternalUser(ScalarPtr, cast<User>(VecPtr), 0)); 3012 3013 if (!Alignment) 3014 Alignment = DL->getABITypeAlignment(SI->getValueOperand()->getType()); 3015 3016 S->setAlignment(Alignment); 3017 E->VectorizedValue = S; 3018 ++NumVectorInstructions; 3019 return propagateMetadata(S, E->Scalars); 3020 } 3021 case Instruction::GetElementPtr: { 3022 setInsertPointAfterBundle(E->Scalars, VL0); 3023 3024 ValueList Op0VL; 3025 for (Value *V : E->Scalars) 3026 Op0VL.push_back(cast<GetElementPtrInst>(V)->getOperand(0)); 3027 3028 Value *Op0 = vectorizeTree(Op0VL); 3029 3030 std::vector<Value *> OpVecs; 3031 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 3032 ++j) { 3033 ValueList OpVL; 3034 for (Value *V : E->Scalars) 3035 OpVL.push_back(cast<GetElementPtrInst>(V)->getOperand(j)); 3036 3037 Value *OpVec = vectorizeTree(OpVL); 3038 OpVecs.push_back(OpVec); 3039 } 3040 3041 Value *V = Builder.CreateGEP( 3042 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs); 3043 E->VectorizedValue = V; 3044 ++NumVectorInstructions; 3045 3046 if (Instruction *I = dyn_cast<Instruction>(V)) 3047 return propagateMetadata(I, E->Scalars); 3048 3049 return V; 3050 } 3051 case Instruction::Call: { 3052 CallInst *CI = cast<CallInst>(VL0); 3053 setInsertPointAfterBundle(E->Scalars, VL0); 3054 Function *FI; 3055 Intrinsic::ID IID = Intrinsic::not_intrinsic; 3056 Value *ScalarArg = nullptr; 3057 if (CI && (FI = CI->getCalledFunction())) { 3058 IID = FI->getIntrinsicID(); 3059 } 3060 std::vector<Value *> OpVecs; 3061 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 3062 ValueList OpVL; 3063 // ctlz,cttz and powi are special intrinsics whose second argument is 3064 // a scalar. This argument should not be vectorized. 3065 if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) { 3066 CallInst *CEI = cast<CallInst>(VL0); 3067 ScalarArg = CEI->getArgOperand(j); 3068 OpVecs.push_back(CEI->getArgOperand(j)); 3069 continue; 3070 } 3071 for (Value *V : E->Scalars) { 3072 CallInst *CEI = cast<CallInst>(V); 3073 OpVL.push_back(CEI->getArgOperand(j)); 3074 } 3075 3076 Value *OpVec = vectorizeTree(OpVL); 3077 DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 3078 OpVecs.push_back(OpVec); 3079 } 3080 3081 Module *M = F->getParent(); 3082 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3083 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; 3084 Function *CF = Intrinsic::getDeclaration(M, ID, Tys); 3085 SmallVector<OperandBundleDef, 1> OpBundles; 3086 CI->getOperandBundlesAsDefs(OpBundles); 3087 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 3088 3089 // The scalar argument uses an in-tree scalar so we add the new vectorized 3090 // call to ExternalUses list to make sure that an extract will be 3091 // generated in the future. 3092 if (ScalarArg && getTreeEntry(ScalarArg)) 3093 ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0)); 3094 3095 E->VectorizedValue = V; 3096 propagateIRFlags(E->VectorizedValue, E->Scalars, VL0); 3097 ++NumVectorInstructions; 3098 return V; 3099 } 3100 case Instruction::ShuffleVector: { 3101 ValueList LHSVL, RHSVL; 3102 assert(Instruction::isBinaryOp(S.Opcode) && 3103 "Invalid Shuffle Vector Operand"); 3104 reorderAltShuffleOperands(S.Opcode, E->Scalars, LHSVL, RHSVL); 3105 setInsertPointAfterBundle(E->Scalars, VL0); 3106 3107 Value *LHS = vectorizeTree(LHSVL); 3108 Value *RHS = vectorizeTree(RHSVL); 3109 3110 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 3111 return V; 3112 3113 // Create a vector of LHS op1 RHS 3114 Value *V0 = Builder.CreateBinOp( 3115 static_cast<Instruction::BinaryOps>(S.Opcode), LHS, RHS); 3116 3117 unsigned AltOpcode = getAltOpcode(S.Opcode); 3118 // Create a vector of LHS op2 RHS 3119 Value *V1 = Builder.CreateBinOp( 3120 static_cast<Instruction::BinaryOps>(AltOpcode), LHS, RHS); 3121 3122 // Create shuffle to take alternate operations from the vector. 3123 // Also, gather up odd and even scalar ops to propagate IR flags to 3124 // each vector operation. 3125 ValueList OddScalars, EvenScalars; 3126 unsigned e = E->Scalars.size(); 3127 SmallVector<Constant *, 8> Mask(e); 3128 for (unsigned i = 0; i < e; ++i) { 3129 if (isOdd(i)) { 3130 Mask[i] = Builder.getInt32(e + i); 3131 OddScalars.push_back(E->Scalars[i]); 3132 } else { 3133 Mask[i] = Builder.getInt32(i); 3134 EvenScalars.push_back(E->Scalars[i]); 3135 } 3136 } 3137 3138 Value *ShuffleMask = ConstantVector::get(Mask); 3139 propagateIRFlags(V0, EvenScalars); 3140 propagateIRFlags(V1, OddScalars); 3141 3142 Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 3143 E->VectorizedValue = V; 3144 ++NumVectorInstructions; 3145 if (Instruction *I = dyn_cast<Instruction>(V)) 3146 return propagateMetadata(I, E->Scalars); 3147 3148 return V; 3149 } 3150 default: 3151 llvm_unreachable("unknown inst"); 3152 } 3153 return nullptr; 3154 } 3155 3156 Value *BoUpSLP::vectorizeTree() { 3157 ExtraValueToDebugLocsMap ExternallyUsedValues; 3158 return vectorizeTree(ExternallyUsedValues); 3159 } 3160 3161 Value * 3162 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { 3163 // All blocks must be scheduled before any instructions are inserted. 3164 for (auto &BSIter : BlocksSchedules) { 3165 scheduleBlock(BSIter.second.get()); 3166 } 3167 3168 Builder.SetInsertPoint(&F->getEntryBlock().front()); 3169 auto *VectorRoot = vectorizeTree(&VectorizableTree[0]); 3170 3171 // If the vectorized tree can be rewritten in a smaller type, we truncate the 3172 // vectorized root. InstCombine will then rewrite the entire expression. We 3173 // sign extend the extracted values below. 3174 auto *ScalarRoot = VectorizableTree[0].Scalars[0]; 3175 if (MinBWs.count(ScalarRoot)) { 3176 if (auto *I = dyn_cast<Instruction>(VectorRoot)) 3177 Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); 3178 auto BundleWidth = VectorizableTree[0].Scalars.size(); 3179 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 3180 auto *VecTy = VectorType::get(MinTy, BundleWidth); 3181 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); 3182 VectorizableTree[0].VectorizedValue = Trunc; 3183 } 3184 3185 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 3186 3187 // If necessary, sign-extend or zero-extend ScalarRoot to the larger type 3188 // specified by ScalarType. 3189 auto extend = [&](Value *ScalarRoot, Value *Ex, Type *ScalarType) { 3190 if (!MinBWs.count(ScalarRoot)) 3191 return Ex; 3192 if (MinBWs[ScalarRoot].second) 3193 return Builder.CreateSExt(Ex, ScalarType); 3194 return Builder.CreateZExt(Ex, ScalarType); 3195 }; 3196 3197 // Extract all of the elements with the external uses. 3198 for (const auto &ExternalUse : ExternalUses) { 3199 Value *Scalar = ExternalUse.Scalar; 3200 llvm::User *User = ExternalUse.User; 3201 3202 // Skip users that we already RAUW. This happens when one instruction 3203 // has multiple uses of the same value. 3204 if (User && !is_contained(Scalar->users(), User)) 3205 continue; 3206 TreeEntry *E = getTreeEntry(Scalar); 3207 assert(E && "Invalid scalar"); 3208 assert(!E->NeedToGather && "Extracting from a gather list"); 3209 3210 Value *Vec = E->VectorizedValue; 3211 assert(Vec && "Can't find vectorizable value"); 3212 3213 Value *Lane = Builder.getInt32(ExternalUse.Lane); 3214 // If User == nullptr, the Scalar is used as extra arg. Generate 3215 // ExtractElement instruction and update the record for this scalar in 3216 // ExternallyUsedValues. 3217 if (!User) { 3218 assert(ExternallyUsedValues.count(Scalar) && 3219 "Scalar with nullptr as an external user must be registered in " 3220 "ExternallyUsedValues map"); 3221 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 3222 Builder.SetInsertPoint(VecI->getParent(), 3223 std::next(VecI->getIterator())); 3224 } else { 3225 Builder.SetInsertPoint(&F->getEntryBlock().front()); 3226 } 3227 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 3228 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 3229 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent()); 3230 auto &Locs = ExternallyUsedValues[Scalar]; 3231 ExternallyUsedValues.insert({Ex, Locs}); 3232 ExternallyUsedValues.erase(Scalar); 3233 continue; 3234 } 3235 3236 // Generate extracts for out-of-tree users. 3237 // Find the insertion point for the extractelement lane. 3238 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 3239 if (PHINode *PH = dyn_cast<PHINode>(User)) { 3240 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 3241 if (PH->getIncomingValue(i) == Scalar) { 3242 TerminatorInst *IncomingTerminator = 3243 PH->getIncomingBlock(i)->getTerminator(); 3244 if (isa<CatchSwitchInst>(IncomingTerminator)) { 3245 Builder.SetInsertPoint(VecI->getParent(), 3246 std::next(VecI->getIterator())); 3247 } else { 3248 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 3249 } 3250 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 3251 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 3252 CSEBlocks.insert(PH->getIncomingBlock(i)); 3253 PH->setOperand(i, Ex); 3254 } 3255 } 3256 } else { 3257 Builder.SetInsertPoint(cast<Instruction>(User)); 3258 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 3259 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 3260 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 3261 User->replaceUsesOfWith(Scalar, Ex); 3262 } 3263 } else { 3264 Builder.SetInsertPoint(&F->getEntryBlock().front()); 3265 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 3266 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 3267 CSEBlocks.insert(&F->getEntryBlock()); 3268 User->replaceUsesOfWith(Scalar, Ex); 3269 } 3270 3271 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 3272 } 3273 3274 // For each vectorized value: 3275 for (TreeEntry &EIdx : VectorizableTree) { 3276 TreeEntry *Entry = &EIdx; 3277 3278 // No need to handle users of gathered values. 3279 if (Entry->NeedToGather) 3280 continue; 3281 3282 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 3283 3284 // For each lane: 3285 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 3286 Value *Scalar = Entry->Scalars[Lane]; 3287 3288 Type *Ty = Scalar->getType(); 3289 if (!Ty->isVoidTy()) { 3290 #ifndef NDEBUG 3291 for (User *U : Scalar->users()) { 3292 DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 3293 3294 // It is legal to replace users in the ignorelist by undef. 3295 assert((getTreeEntry(U) || is_contained(UserIgnoreList, U)) && 3296 "Replacing out-of-tree value with undef"); 3297 } 3298 #endif 3299 Value *Undef = UndefValue::get(Ty); 3300 Scalar->replaceAllUsesWith(Undef); 3301 } 3302 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 3303 eraseInstruction(cast<Instruction>(Scalar)); 3304 } 3305 } 3306 3307 Builder.ClearInsertionPoint(); 3308 3309 return VectorizableTree[0].VectorizedValue; 3310 } 3311 3312 void BoUpSLP::optimizeGatherSequence() { 3313 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 3314 << " gather sequences instructions.\n"); 3315 // LICM InsertElementInst sequences. 3316 for (Instruction *it : GatherSeq) { 3317 InsertElementInst *Insert = dyn_cast<InsertElementInst>(it); 3318 3319 if (!Insert) 3320 continue; 3321 3322 // Check if this block is inside a loop. 3323 Loop *L = LI->getLoopFor(Insert->getParent()); 3324 if (!L) 3325 continue; 3326 3327 // Check if it has a preheader. 3328 BasicBlock *PreHeader = L->getLoopPreheader(); 3329 if (!PreHeader) 3330 continue; 3331 3332 // If the vector or the element that we insert into it are 3333 // instructions that are defined in this basic block then we can't 3334 // hoist this instruction. 3335 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 3336 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 3337 if (CurrVec && L->contains(CurrVec)) 3338 continue; 3339 if (NewElem && L->contains(NewElem)) 3340 continue; 3341 3342 // We can hoist this instruction. Move it to the pre-header. 3343 Insert->moveBefore(PreHeader->getTerminator()); 3344 } 3345 3346 // Make a list of all reachable blocks in our CSE queue. 3347 SmallVector<const DomTreeNode *, 8> CSEWorkList; 3348 CSEWorkList.reserve(CSEBlocks.size()); 3349 for (BasicBlock *BB : CSEBlocks) 3350 if (DomTreeNode *N = DT->getNode(BB)) { 3351 assert(DT->isReachableFromEntry(N)); 3352 CSEWorkList.push_back(N); 3353 } 3354 3355 // Sort blocks by domination. This ensures we visit a block after all blocks 3356 // dominating it are visited. 3357 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), 3358 [this](const DomTreeNode *A, const DomTreeNode *B) { 3359 return DT->properlyDominates(A, B); 3360 }); 3361 3362 // Perform O(N^2) search over the gather sequences and merge identical 3363 // instructions. TODO: We can further optimize this scan if we split the 3364 // instructions into different buckets based on the insert lane. 3365 SmallVector<Instruction *, 16> Visited; 3366 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 3367 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 3368 "Worklist not sorted properly!"); 3369 BasicBlock *BB = (*I)->getBlock(); 3370 // For all instructions in blocks containing gather sequences: 3371 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 3372 Instruction *In = &*it++; 3373 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 3374 continue; 3375 3376 // Check if we can replace this instruction with any of the 3377 // visited instructions. 3378 for (Instruction *v : Visited) { 3379 if (In->isIdenticalTo(v) && 3380 DT->dominates(v->getParent(), In->getParent())) { 3381 In->replaceAllUsesWith(v); 3382 eraseInstruction(In); 3383 In = nullptr; 3384 break; 3385 } 3386 } 3387 if (In) { 3388 assert(!is_contained(Visited, In)); 3389 Visited.push_back(In); 3390 } 3391 } 3392 } 3393 CSEBlocks.clear(); 3394 GatherSeq.clear(); 3395 } 3396 3397 // Groups the instructions to a bundle (which is then a single scheduling entity) 3398 // and schedules instructions until the bundle gets ready. 3399 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, 3400 BoUpSLP *SLP, Value *OpValue) { 3401 if (isa<PHINode>(OpValue)) 3402 return true; 3403 3404 // Initialize the instruction bundle. 3405 Instruction *OldScheduleEnd = ScheduleEnd; 3406 ScheduleData *PrevInBundle = nullptr; 3407 ScheduleData *Bundle = nullptr; 3408 bool ReSchedule = false; 3409 DEBUG(dbgs() << "SLP: bundle: " << *OpValue << "\n"); 3410 3411 // Make sure that the scheduling region contains all 3412 // instructions of the bundle. 3413 for (Value *V : VL) { 3414 if (!extendSchedulingRegion(V, OpValue)) 3415 return false; 3416 } 3417 3418 for (Value *V : VL) { 3419 ScheduleData *BundleMember = getScheduleData(V); 3420 assert(BundleMember && 3421 "no ScheduleData for bundle member (maybe not in same basic block)"); 3422 if (BundleMember->IsScheduled) { 3423 // A bundle member was scheduled as single instruction before and now 3424 // needs to be scheduled as part of the bundle. We just get rid of the 3425 // existing schedule. 3426 DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 3427 << " was already scheduled\n"); 3428 ReSchedule = true; 3429 } 3430 assert(BundleMember->isSchedulingEntity() && 3431 "bundle member already part of other bundle"); 3432 if (PrevInBundle) { 3433 PrevInBundle->NextInBundle = BundleMember; 3434 } else { 3435 Bundle = BundleMember; 3436 } 3437 BundleMember->UnscheduledDepsInBundle = 0; 3438 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 3439 3440 // Group the instructions to a bundle. 3441 BundleMember->FirstInBundle = Bundle; 3442 PrevInBundle = BundleMember; 3443 } 3444 if (ScheduleEnd != OldScheduleEnd) { 3445 // The scheduling region got new instructions at the lower end (or it is a 3446 // new region for the first bundle). This makes it necessary to 3447 // recalculate all dependencies. 3448 // It is seldom that this needs to be done a second time after adding the 3449 // initial bundle to the region. 3450 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3451 doForAllOpcodes(I, [](ScheduleData *SD) { 3452 SD->clearDependencies(); 3453 }); 3454 } 3455 ReSchedule = true; 3456 } 3457 if (ReSchedule) { 3458 resetSchedule(); 3459 initialFillReadyList(ReadyInsts); 3460 } 3461 3462 DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " 3463 << BB->getName() << "\n"); 3464 3465 calculateDependencies(Bundle, true, SLP); 3466 3467 // Now try to schedule the new bundle. As soon as the bundle is "ready" it 3468 // means that there are no cyclic dependencies and we can schedule it. 3469 // Note that's important that we don't "schedule" the bundle yet (see 3470 // cancelScheduling). 3471 while (!Bundle->isReady() && !ReadyInsts.empty()) { 3472 3473 ScheduleData *pickedSD = ReadyInsts.back(); 3474 ReadyInsts.pop_back(); 3475 3476 if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) { 3477 schedule(pickedSD, ReadyInsts); 3478 } 3479 } 3480 if (!Bundle->isReady()) { 3481 cancelScheduling(VL, OpValue); 3482 return false; 3483 } 3484 return true; 3485 } 3486 3487 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 3488 Value *OpValue) { 3489 if (isa<PHINode>(OpValue)) 3490 return; 3491 3492 ScheduleData *Bundle = getScheduleData(OpValue); 3493 DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 3494 assert(!Bundle->IsScheduled && 3495 "Can't cancel bundle which is already scheduled"); 3496 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 3497 "tried to unbundle something which is not a bundle"); 3498 3499 // Un-bundle: make single instructions out of the bundle. 3500 ScheduleData *BundleMember = Bundle; 3501 while (BundleMember) { 3502 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 3503 BundleMember->FirstInBundle = BundleMember; 3504 ScheduleData *Next = BundleMember->NextInBundle; 3505 BundleMember->NextInBundle = nullptr; 3506 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 3507 if (BundleMember->UnscheduledDepsInBundle == 0) { 3508 ReadyInsts.insert(BundleMember); 3509 } 3510 BundleMember = Next; 3511 } 3512 } 3513 3514 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 3515 // Allocate a new ScheduleData for the instruction. 3516 if (ChunkPos >= ChunkSize) { 3517 ScheduleDataChunks.push_back(llvm::make_unique<ScheduleData[]>(ChunkSize)); 3518 ChunkPos = 0; 3519 } 3520 return &(ScheduleDataChunks.back()[ChunkPos++]); 3521 } 3522 3523 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 3524 Value *OpValue) { 3525 if (getScheduleData(V, isOneOf(OpValue, V))) 3526 return true; 3527 Instruction *I = dyn_cast<Instruction>(V); 3528 assert(I && "bundle member must be an instruction"); 3529 assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled"); 3530 auto &&CheckSheduleForI = [this, OpValue](Instruction *I) -> bool { 3531 ScheduleData *ISD = getScheduleData(I); 3532 if (!ISD) 3533 return false; 3534 assert(isInSchedulingRegion(ISD) && 3535 "ScheduleData not in scheduling region"); 3536 ScheduleData *SD = allocateScheduleDataChunks(); 3537 SD->Inst = I; 3538 SD->init(SchedulingRegionID, OpValue); 3539 ExtraScheduleDataMap[I][OpValue] = SD; 3540 return true; 3541 }; 3542 if (CheckSheduleForI(I)) 3543 return true; 3544 if (!ScheduleStart) { 3545 // It's the first instruction in the new region. 3546 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 3547 ScheduleStart = I; 3548 ScheduleEnd = I->getNextNode(); 3549 if (isOneOf(OpValue, I) != I) 3550 CheckSheduleForI(I); 3551 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 3552 DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 3553 return true; 3554 } 3555 // Search up and down at the same time, because we don't know if the new 3556 // instruction is above or below the existing scheduling region. 3557 BasicBlock::reverse_iterator UpIter = 3558 ++ScheduleStart->getIterator().getReverse(); 3559 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 3560 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 3561 BasicBlock::iterator LowerEnd = BB->end(); 3562 while (true) { 3563 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 3564 DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 3565 return false; 3566 } 3567 3568 if (UpIter != UpperEnd) { 3569 if (&*UpIter == I) { 3570 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 3571 ScheduleStart = I; 3572 if (isOneOf(OpValue, I) != I) 3573 CheckSheduleForI(I); 3574 DEBUG(dbgs() << "SLP: extend schedule region start to " << *I << "\n"); 3575 return true; 3576 } 3577 UpIter++; 3578 } 3579 if (DownIter != LowerEnd) { 3580 if (&*DownIter == I) { 3581 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 3582 nullptr); 3583 ScheduleEnd = I->getNextNode(); 3584 if (isOneOf(OpValue, I) != I) 3585 CheckSheduleForI(I); 3586 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 3587 DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 3588 return true; 3589 } 3590 DownIter++; 3591 } 3592 assert((UpIter != UpperEnd || DownIter != LowerEnd) && 3593 "instruction not found in block"); 3594 } 3595 return true; 3596 } 3597 3598 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 3599 Instruction *ToI, 3600 ScheduleData *PrevLoadStore, 3601 ScheduleData *NextLoadStore) { 3602 ScheduleData *CurrentLoadStore = PrevLoadStore; 3603 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 3604 ScheduleData *SD = ScheduleDataMap[I]; 3605 if (!SD) { 3606 SD = allocateScheduleDataChunks(); 3607 ScheduleDataMap[I] = SD; 3608 SD->Inst = I; 3609 } 3610 assert(!isInSchedulingRegion(SD) && 3611 "new ScheduleData already in scheduling region"); 3612 SD->init(SchedulingRegionID, I); 3613 3614 if (I->mayReadOrWriteMemory() && 3615 (!isa<IntrinsicInst>(I) || 3616 cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect)) { 3617 // Update the linked list of memory accessing instructions. 3618 if (CurrentLoadStore) { 3619 CurrentLoadStore->NextLoadStore = SD; 3620 } else { 3621 FirstLoadStoreInRegion = SD; 3622 } 3623 CurrentLoadStore = SD; 3624 } 3625 } 3626 if (NextLoadStore) { 3627 if (CurrentLoadStore) 3628 CurrentLoadStore->NextLoadStore = NextLoadStore; 3629 } else { 3630 LastLoadStoreInRegion = CurrentLoadStore; 3631 } 3632 } 3633 3634 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 3635 bool InsertInReadyList, 3636 BoUpSLP *SLP) { 3637 assert(SD->isSchedulingEntity()); 3638 3639 SmallVector<ScheduleData *, 10> WorkList; 3640 WorkList.push_back(SD); 3641 3642 while (!WorkList.empty()) { 3643 ScheduleData *SD = WorkList.back(); 3644 WorkList.pop_back(); 3645 3646 ScheduleData *BundleMember = SD; 3647 while (BundleMember) { 3648 assert(isInSchedulingRegion(BundleMember)); 3649 if (!BundleMember->hasValidDependencies()) { 3650 3651 DEBUG(dbgs() << "SLP: update deps of " << *BundleMember << "\n"); 3652 BundleMember->Dependencies = 0; 3653 BundleMember->resetUnscheduledDeps(); 3654 3655 // Handle def-use chain dependencies. 3656 if (BundleMember->OpValue != BundleMember->Inst) { 3657 ScheduleData *UseSD = getScheduleData(BundleMember->Inst); 3658 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 3659 BundleMember->Dependencies++; 3660 ScheduleData *DestBundle = UseSD->FirstInBundle; 3661 if (!DestBundle->IsScheduled) 3662 BundleMember->incrementUnscheduledDeps(1); 3663 if (!DestBundle->hasValidDependencies()) 3664 WorkList.push_back(DestBundle); 3665 } 3666 } else { 3667 for (User *U : BundleMember->Inst->users()) { 3668 if (isa<Instruction>(U)) { 3669 ScheduleData *UseSD = getScheduleData(U); 3670 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 3671 BundleMember->Dependencies++; 3672 ScheduleData *DestBundle = UseSD->FirstInBundle; 3673 if (!DestBundle->IsScheduled) 3674 BundleMember->incrementUnscheduledDeps(1); 3675 if (!DestBundle->hasValidDependencies()) 3676 WorkList.push_back(DestBundle); 3677 } 3678 } else { 3679 // I'm not sure if this can ever happen. But we need to be safe. 3680 // This lets the instruction/bundle never be scheduled and 3681 // eventually disable vectorization. 3682 BundleMember->Dependencies++; 3683 BundleMember->incrementUnscheduledDeps(1); 3684 } 3685 } 3686 } 3687 3688 // Handle the memory dependencies. 3689 ScheduleData *DepDest = BundleMember->NextLoadStore; 3690 if (DepDest) { 3691 Instruction *SrcInst = BundleMember->Inst; 3692 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); 3693 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 3694 unsigned numAliased = 0; 3695 unsigned DistToSrc = 1; 3696 3697 while (DepDest) { 3698 assert(isInSchedulingRegion(DepDest)); 3699 3700 // We have two limits to reduce the complexity: 3701 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 3702 // SLP->isAliased (which is the expensive part in this loop). 3703 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 3704 // the whole loop (even if the loop is fast, it's quadratic). 3705 // It's important for the loop break condition (see below) to 3706 // check this limit even between two read-only instructions. 3707 if (DistToSrc >= MaxMemDepDistance || 3708 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 3709 (numAliased >= AliasedCheckLimit || 3710 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 3711 3712 // We increment the counter only if the locations are aliased 3713 // (instead of counting all alias checks). This gives a better 3714 // balance between reduced runtime and accurate dependencies. 3715 numAliased++; 3716 3717 DepDest->MemoryDependencies.push_back(BundleMember); 3718 BundleMember->Dependencies++; 3719 ScheduleData *DestBundle = DepDest->FirstInBundle; 3720 if (!DestBundle->IsScheduled) { 3721 BundleMember->incrementUnscheduledDeps(1); 3722 } 3723 if (!DestBundle->hasValidDependencies()) { 3724 WorkList.push_back(DestBundle); 3725 } 3726 } 3727 DepDest = DepDest->NextLoadStore; 3728 3729 // Example, explaining the loop break condition: Let's assume our 3730 // starting instruction is i0 and MaxMemDepDistance = 3. 3731 // 3732 // +--------v--v--v 3733 // i0,i1,i2,i3,i4,i5,i6,i7,i8 3734 // +--------^--^--^ 3735 // 3736 // MaxMemDepDistance let us stop alias-checking at i3 and we add 3737 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 3738 // Previously we already added dependencies from i3 to i6,i7,i8 3739 // (because of MaxMemDepDistance). As we added a dependency from 3740 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 3741 // and we can abort this loop at i6. 3742 if (DistToSrc >= 2 * MaxMemDepDistance) 3743 break; 3744 DistToSrc++; 3745 } 3746 } 3747 } 3748 BundleMember = BundleMember->NextInBundle; 3749 } 3750 if (InsertInReadyList && SD->isReady()) { 3751 ReadyInsts.push_back(SD); 3752 DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n"); 3753 } 3754 } 3755 } 3756 3757 void BoUpSLP::BlockScheduling::resetSchedule() { 3758 assert(ScheduleStart && 3759 "tried to reset schedule on block which has not been scheduled"); 3760 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3761 doForAllOpcodes(I, [&](ScheduleData *SD) { 3762 assert(isInSchedulingRegion(SD) && 3763 "ScheduleData not in scheduling region"); 3764 SD->IsScheduled = false; 3765 SD->resetUnscheduledDeps(); 3766 }); 3767 } 3768 ReadyInsts.clear(); 3769 } 3770 3771 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 3772 if (!BS->ScheduleStart) 3773 return; 3774 3775 DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 3776 3777 BS->resetSchedule(); 3778 3779 // For the real scheduling we use a more sophisticated ready-list: it is 3780 // sorted by the original instruction location. This lets the final schedule 3781 // be as close as possible to the original instruction order. 3782 struct ScheduleDataCompare { 3783 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 3784 return SD2->SchedulingPriority < SD1->SchedulingPriority; 3785 } 3786 }; 3787 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 3788 3789 // Ensure that all dependency data is updated and fill the ready-list with 3790 // initial instructions. 3791 int Idx = 0; 3792 int NumToSchedule = 0; 3793 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 3794 I = I->getNextNode()) { 3795 BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) { 3796 assert(SD->isPartOfBundle() == 3797 (getTreeEntry(SD->Inst) != nullptr) && 3798 "scheduler and vectorizer bundle mismatch"); 3799 SD->FirstInBundle->SchedulingPriority = Idx++; 3800 if (SD->isSchedulingEntity()) { 3801 BS->calculateDependencies(SD, false, this); 3802 NumToSchedule++; 3803 } 3804 }); 3805 } 3806 BS->initialFillReadyList(ReadyInsts); 3807 3808 Instruction *LastScheduledInst = BS->ScheduleEnd; 3809 3810 // Do the "real" scheduling. 3811 while (!ReadyInsts.empty()) { 3812 ScheduleData *picked = *ReadyInsts.begin(); 3813 ReadyInsts.erase(ReadyInsts.begin()); 3814 3815 // Move the scheduled instruction(s) to their dedicated places, if not 3816 // there yet. 3817 ScheduleData *BundleMember = picked; 3818 while (BundleMember) { 3819 Instruction *pickedInst = BundleMember->Inst; 3820 if (LastScheduledInst->getNextNode() != pickedInst) { 3821 BS->BB->getInstList().remove(pickedInst); 3822 BS->BB->getInstList().insert(LastScheduledInst->getIterator(), 3823 pickedInst); 3824 } 3825 LastScheduledInst = pickedInst; 3826 BundleMember = BundleMember->NextInBundle; 3827 } 3828 3829 BS->schedule(picked, ReadyInsts); 3830 NumToSchedule--; 3831 } 3832 assert(NumToSchedule == 0 && "could not schedule all instructions"); 3833 3834 // Avoid duplicate scheduling of the block. 3835 BS->ScheduleStart = nullptr; 3836 } 3837 3838 unsigned BoUpSLP::getVectorElementSize(Value *V) { 3839 // If V is a store, just return the width of the stored value without 3840 // traversing the expression tree. This is the common case. 3841 if (auto *Store = dyn_cast<StoreInst>(V)) 3842 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 3843 3844 // If V is not a store, we can traverse the expression tree to find loads 3845 // that feed it. The type of the loaded value may indicate a more suitable 3846 // width than V's type. We want to base the vector element size on the width 3847 // of memory operations where possible. 3848 SmallVector<Instruction *, 16> Worklist; 3849 SmallPtrSet<Instruction *, 16> Visited; 3850 if (auto *I = dyn_cast<Instruction>(V)) 3851 Worklist.push_back(I); 3852 3853 // Traverse the expression tree in bottom-up order looking for loads. If we 3854 // encounter an instruciton we don't yet handle, we give up. 3855 auto MaxWidth = 0u; 3856 auto FoundUnknownInst = false; 3857 while (!Worklist.empty() && !FoundUnknownInst) { 3858 auto *I = Worklist.pop_back_val(); 3859 Visited.insert(I); 3860 3861 // We should only be looking at scalar instructions here. If the current 3862 // instruction has a vector type, give up. 3863 auto *Ty = I->getType(); 3864 if (isa<VectorType>(Ty)) 3865 FoundUnknownInst = true; 3866 3867 // If the current instruction is a load, update MaxWidth to reflect the 3868 // width of the loaded value. 3869 else if (isa<LoadInst>(I)) 3870 MaxWidth = std::max<unsigned>(MaxWidth, DL->getTypeSizeInBits(Ty)); 3871 3872 // Otherwise, we need to visit the operands of the instruction. We only 3873 // handle the interesting cases from buildTree here. If an operand is an 3874 // instruction we haven't yet visited, we add it to the worklist. 3875 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 3876 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I)) { 3877 for (Use &U : I->operands()) 3878 if (auto *J = dyn_cast<Instruction>(U.get())) 3879 if (!Visited.count(J)) 3880 Worklist.push_back(J); 3881 } 3882 3883 // If we don't yet handle the instruction, give up. 3884 else 3885 FoundUnknownInst = true; 3886 } 3887 3888 // If we didn't encounter a memory access in the expression tree, or if we 3889 // gave up for some reason, just return the width of V. 3890 if (!MaxWidth || FoundUnknownInst) 3891 return DL->getTypeSizeInBits(V->getType()); 3892 3893 // Otherwise, return the maximum width we found. 3894 return MaxWidth; 3895 } 3896 3897 // Determine if a value V in a vectorizable expression Expr can be demoted to a 3898 // smaller type with a truncation. We collect the values that will be demoted 3899 // in ToDemote and additional roots that require investigating in Roots. 3900 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, 3901 SmallVectorImpl<Value *> &ToDemote, 3902 SmallVectorImpl<Value *> &Roots) { 3903 // We can always demote constants. 3904 if (isa<Constant>(V)) { 3905 ToDemote.push_back(V); 3906 return true; 3907 } 3908 3909 // If the value is not an instruction in the expression with only one use, it 3910 // cannot be demoted. 3911 auto *I = dyn_cast<Instruction>(V); 3912 if (!I || !I->hasOneUse() || !Expr.count(I)) 3913 return false; 3914 3915 switch (I->getOpcode()) { 3916 3917 // We can always demote truncations and extensions. Since truncations can 3918 // seed additional demotion, we save the truncated value. 3919 case Instruction::Trunc: 3920 Roots.push_back(I->getOperand(0)); 3921 break; 3922 case Instruction::ZExt: 3923 case Instruction::SExt: 3924 break; 3925 3926 // We can demote certain binary operations if we can demote both of their 3927 // operands. 3928 case Instruction::Add: 3929 case Instruction::Sub: 3930 case Instruction::Mul: 3931 case Instruction::And: 3932 case Instruction::Or: 3933 case Instruction::Xor: 3934 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || 3935 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) 3936 return false; 3937 break; 3938 3939 // We can demote selects if we can demote their true and false values. 3940 case Instruction::Select: { 3941 SelectInst *SI = cast<SelectInst>(I); 3942 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || 3943 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) 3944 return false; 3945 break; 3946 } 3947 3948 // We can demote phis if we can demote all their incoming operands. Note that 3949 // we don't need to worry about cycles since we ensure single use above. 3950 case Instruction::PHI: { 3951 PHINode *PN = cast<PHINode>(I); 3952 for (Value *IncValue : PN->incoming_values()) 3953 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) 3954 return false; 3955 break; 3956 } 3957 3958 // Otherwise, conservatively give up. 3959 default: 3960 return false; 3961 } 3962 3963 // Record the value that we can demote. 3964 ToDemote.push_back(V); 3965 return true; 3966 } 3967 3968 void BoUpSLP::computeMinimumValueSizes() { 3969 // If there are no external uses, the expression tree must be rooted by a 3970 // store. We can't demote in-memory values, so there is nothing to do here. 3971 if (ExternalUses.empty()) 3972 return; 3973 3974 // We only attempt to truncate integer expressions. 3975 auto &TreeRoot = VectorizableTree[0].Scalars; 3976 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 3977 if (!TreeRootIT) 3978 return; 3979 3980 // If the expression is not rooted by a store, these roots should have 3981 // external uses. We will rely on InstCombine to rewrite the expression in 3982 // the narrower type. However, InstCombine only rewrites single-use values. 3983 // This means that if a tree entry other than a root is used externally, it 3984 // must have multiple uses and InstCombine will not rewrite it. The code 3985 // below ensures that only the roots are used externally. 3986 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); 3987 for (auto &EU : ExternalUses) 3988 if (!Expr.erase(EU.Scalar)) 3989 return; 3990 if (!Expr.empty()) 3991 return; 3992 3993 // Collect the scalar values of the vectorizable expression. We will use this 3994 // context to determine which values can be demoted. If we see a truncation, 3995 // we mark it as seeding another demotion. 3996 for (auto &Entry : VectorizableTree) 3997 Expr.insert(Entry.Scalars.begin(), Entry.Scalars.end()); 3998 3999 // Ensure the roots of the vectorizable tree don't form a cycle. They must 4000 // have a single external user that is not in the vectorizable tree. 4001 for (auto *Root : TreeRoot) 4002 if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) 4003 return; 4004 4005 // Conservatively determine if we can actually truncate the roots of the 4006 // expression. Collect the values that can be demoted in ToDemote and 4007 // additional roots that require investigating in Roots. 4008 SmallVector<Value *, 32> ToDemote; 4009 SmallVector<Value *, 4> Roots; 4010 for (auto *Root : TreeRoot) 4011 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) 4012 return; 4013 4014 // The maximum bit width required to represent all the values that can be 4015 // demoted without loss of precision. It would be safe to truncate the roots 4016 // of the expression to this width. 4017 auto MaxBitWidth = 8u; 4018 4019 // We first check if all the bits of the roots are demanded. If they're not, 4020 // we can truncate the roots to this narrower type. 4021 for (auto *Root : TreeRoot) { 4022 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 4023 MaxBitWidth = std::max<unsigned>( 4024 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); 4025 } 4026 4027 // True if the roots can be zero-extended back to their original type, rather 4028 // than sign-extended. We know that if the leading bits are not demanded, we 4029 // can safely zero-extend. So we initialize IsKnownPositive to True. 4030 bool IsKnownPositive = true; 4031 4032 // If all the bits of the roots are demanded, we can try a little harder to 4033 // compute a narrower type. This can happen, for example, if the roots are 4034 // getelementptr indices. InstCombine promotes these indices to the pointer 4035 // width. Thus, all their bits are technically demanded even though the 4036 // address computation might be vectorized in a smaller type. 4037 // 4038 // We start by looking at each entry that can be demoted. We compute the 4039 // maximum bit width required to store the scalar by using ValueTracking to 4040 // compute the number of high-order bits we can truncate. 4041 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType())) { 4042 MaxBitWidth = 8u; 4043 4044 // Determine if the sign bit of all the roots is known to be zero. If not, 4045 // IsKnownPositive is set to False. 4046 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { 4047 KnownBits Known = computeKnownBits(R, *DL); 4048 return Known.isNonNegative(); 4049 }); 4050 4051 // Determine the maximum number of bits required to store the scalar 4052 // values. 4053 for (auto *Scalar : ToDemote) { 4054 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); 4055 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 4056 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 4057 } 4058 4059 // If we can't prove that the sign bit is zero, we must add one to the 4060 // maximum bit width to account for the unknown sign bit. This preserves 4061 // the existing sign bit so we can safely sign-extend the root back to the 4062 // original type. Otherwise, if we know the sign bit is zero, we will 4063 // zero-extend the root instead. 4064 // 4065 // FIXME: This is somewhat suboptimal, as there will be cases where adding 4066 // one to the maximum bit width will yield a larger-than-necessary 4067 // type. In general, we need to add an extra bit only if we can't 4068 // prove that the upper bit of the original type is equal to the 4069 // upper bit of the proposed smaller type. If these two bits are the 4070 // same (either zero or one) we know that sign-extending from the 4071 // smaller type will result in the same value. Here, since we can't 4072 // yet prove this, we are just making the proposed smaller type 4073 // larger to ensure correctness. 4074 if (!IsKnownPositive) 4075 ++MaxBitWidth; 4076 } 4077 4078 // Round MaxBitWidth up to the next power-of-two. 4079 if (!isPowerOf2_64(MaxBitWidth)) 4080 MaxBitWidth = NextPowerOf2(MaxBitWidth); 4081 4082 // If the maximum bit width we compute is less than the with of the roots' 4083 // type, we can proceed with the narrowing. Otherwise, do nothing. 4084 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 4085 return; 4086 4087 // If we can truncate the root, we must collect additional values that might 4088 // be demoted as a result. That is, those seeded by truncations we will 4089 // modify. 4090 while (!Roots.empty()) 4091 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); 4092 4093 // Finally, map the values we can demote to the maximum bit with we computed. 4094 for (auto *Scalar : ToDemote) 4095 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive); 4096 } 4097 4098 namespace { 4099 4100 /// The SLPVectorizer Pass. 4101 struct SLPVectorizer : public FunctionPass { 4102 SLPVectorizerPass Impl; 4103 4104 /// Pass identification, replacement for typeid 4105 static char ID; 4106 4107 explicit SLPVectorizer() : FunctionPass(ID) { 4108 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 4109 } 4110 4111 bool doInitialization(Module &M) override { 4112 return false; 4113 } 4114 4115 bool runOnFunction(Function &F) override { 4116 if (skipFunction(F)) 4117 return false; 4118 4119 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 4120 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 4121 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 4122 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 4123 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 4124 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 4125 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 4126 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 4127 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 4128 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 4129 4130 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 4131 } 4132 4133 void getAnalysisUsage(AnalysisUsage &AU) const override { 4134 FunctionPass::getAnalysisUsage(AU); 4135 AU.addRequired<AssumptionCacheTracker>(); 4136 AU.addRequired<ScalarEvolutionWrapperPass>(); 4137 AU.addRequired<AAResultsWrapperPass>(); 4138 AU.addRequired<TargetTransformInfoWrapperPass>(); 4139 AU.addRequired<LoopInfoWrapperPass>(); 4140 AU.addRequired<DominatorTreeWrapperPass>(); 4141 AU.addRequired<DemandedBitsWrapperPass>(); 4142 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 4143 AU.addPreserved<LoopInfoWrapperPass>(); 4144 AU.addPreserved<DominatorTreeWrapperPass>(); 4145 AU.addPreserved<AAResultsWrapperPass>(); 4146 AU.addPreserved<GlobalsAAWrapperPass>(); 4147 AU.setPreservesCFG(); 4148 } 4149 }; 4150 4151 } // end anonymous namespace 4152 4153 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 4154 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 4155 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 4156 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 4157 auto *AA = &AM.getResult<AAManager>(F); 4158 auto *LI = &AM.getResult<LoopAnalysis>(F); 4159 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 4160 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 4161 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 4162 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 4163 4164 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 4165 if (!Changed) 4166 return PreservedAnalyses::all(); 4167 4168 PreservedAnalyses PA; 4169 PA.preserveSet<CFGAnalyses>(); 4170 PA.preserve<AAManager>(); 4171 PA.preserve<GlobalsAA>(); 4172 return PA; 4173 } 4174 4175 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 4176 TargetTransformInfo *TTI_, 4177 TargetLibraryInfo *TLI_, AliasAnalysis *AA_, 4178 LoopInfo *LI_, DominatorTree *DT_, 4179 AssumptionCache *AC_, DemandedBits *DB_, 4180 OptimizationRemarkEmitter *ORE_) { 4181 SE = SE_; 4182 TTI = TTI_; 4183 TLI = TLI_; 4184 AA = AA_; 4185 LI = LI_; 4186 DT = DT_; 4187 AC = AC_; 4188 DB = DB_; 4189 DL = &F.getParent()->getDataLayout(); 4190 4191 Stores.clear(); 4192 GEPs.clear(); 4193 bool Changed = false; 4194 4195 // If the target claims to have no vector registers don't attempt 4196 // vectorization. 4197 if (!TTI->getNumberOfRegisters(true)) 4198 return false; 4199 4200 // Don't vectorize when the attribute NoImplicitFloat is used. 4201 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 4202 return false; 4203 4204 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 4205 4206 // Use the bottom up slp vectorizer to construct chains that start with 4207 // store instructions. 4208 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 4209 4210 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 4211 // delete instructions. 4212 4213 // Scan the blocks in the function in post order. 4214 for (auto BB : post_order(&F.getEntryBlock())) { 4215 collectSeedInstructions(BB); 4216 4217 // Vectorize trees that end at stores. 4218 if (!Stores.empty()) { 4219 DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 4220 << " underlying objects.\n"); 4221 Changed |= vectorizeStoreChains(R); 4222 } 4223 4224 // Vectorize trees that end at reductions. 4225 Changed |= vectorizeChainsInBlock(BB, R); 4226 4227 // Vectorize the index computations of getelementptr instructions. This 4228 // is primarily intended to catch gather-like idioms ending at 4229 // non-consecutive loads. 4230 if (!GEPs.empty()) { 4231 DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 4232 << " underlying objects.\n"); 4233 Changed |= vectorizeGEPIndices(BB, R); 4234 } 4235 } 4236 4237 if (Changed) { 4238 R.optimizeGatherSequence(); 4239 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 4240 DEBUG(verifyFunction(F)); 4241 } 4242 return Changed; 4243 } 4244 4245 /// \brief Check that the Values in the slice in VL array are still existent in 4246 /// the WeakTrackingVH array. 4247 /// Vectorization of part of the VL array may cause later values in the VL array 4248 /// to become invalid. We track when this has happened in the WeakTrackingVH 4249 /// array. 4250 static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, 4251 ArrayRef<WeakTrackingVH> VH, unsigned SliceBegin, 4252 unsigned SliceSize) { 4253 VL = VL.slice(SliceBegin, SliceSize); 4254 VH = VH.slice(SliceBegin, SliceSize); 4255 return !std::equal(VL.begin(), VL.end(), VH.begin()); 4256 } 4257 4258 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 4259 unsigned VecRegSize) { 4260 unsigned ChainLen = Chain.size(); 4261 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 4262 << "\n"); 4263 unsigned Sz = R.getVectorElementSize(Chain[0]); 4264 unsigned VF = VecRegSize / Sz; 4265 4266 if (!isPowerOf2_32(Sz) || VF < 2) 4267 return false; 4268 4269 // Keep track of values that were deleted by vectorizing in the loop below. 4270 SmallVector<WeakTrackingVH, 8> TrackValues(Chain.begin(), Chain.end()); 4271 4272 bool Changed = false; 4273 // Look for profitable vectorizable trees at all offsets, starting at zero. 4274 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 4275 if (i + VF > e) 4276 break; 4277 4278 // Check that a previous iteration of this loop did not delete the Value. 4279 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 4280 continue; 4281 4282 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 4283 << "\n"); 4284 ArrayRef<Value *> Operands = Chain.slice(i, VF); 4285 4286 R.buildTree(Operands); 4287 if (R.isTreeTinyAndNotFullyVectorizable()) 4288 continue; 4289 4290 R.computeMinimumValueSizes(); 4291 4292 int Cost = R.getTreeCost(); 4293 4294 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 4295 if (Cost < -SLPCostThreshold) { 4296 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 4297 4298 using namespace ore; 4299 4300 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 4301 cast<StoreInst>(Chain[i])) 4302 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 4303 << " and with tree size " 4304 << NV("TreeSize", R.getTreeSize())); 4305 4306 R.vectorizeTree(); 4307 4308 // Move to the next bundle. 4309 i += VF - 1; 4310 Changed = true; 4311 } 4312 } 4313 4314 return Changed; 4315 } 4316 4317 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 4318 BoUpSLP &R) { 4319 SetVector<StoreInst *> Heads; 4320 SmallDenseSet<StoreInst *> Tails; 4321 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; 4322 4323 // We may run into multiple chains that merge into a single chain. We mark the 4324 // stores that we vectorized so that we don't visit the same store twice. 4325 BoUpSLP::ValueSet VectorizedStores; 4326 bool Changed = false; 4327 4328 // Do a quadratic search on all of the given stores in reverse order and find 4329 // all of the pairs of stores that follow each other. 4330 SmallVector<unsigned, 16> IndexQueue; 4331 unsigned E = Stores.size(); 4332 IndexQueue.resize(E - 1); 4333 for (unsigned I = E; I > 0; --I) { 4334 unsigned Idx = I - 1; 4335 // If a store has multiple consecutive store candidates, search Stores 4336 // array according to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ... 4337 // This is because usually pairing with immediate succeeding or preceding 4338 // candidate create the best chance to find slp vectorization opportunity. 4339 unsigned Offset = 1; 4340 unsigned Cnt = 0; 4341 for (unsigned J = 0; J < E - 1; ++J, ++Offset) { 4342 if (Idx >= Offset) { 4343 IndexQueue[Cnt] = Idx - Offset; 4344 ++Cnt; 4345 } 4346 if (Idx + Offset < E) { 4347 IndexQueue[Cnt] = Idx + Offset; 4348 ++Cnt; 4349 } 4350 } 4351 4352 for (auto K : IndexQueue) { 4353 if (isConsecutiveAccess(Stores[K], Stores[Idx], *DL, *SE)) { 4354 Tails.insert(Stores[Idx]); 4355 Heads.insert(Stores[K]); 4356 ConsecutiveChain[Stores[K]] = Stores[Idx]; 4357 break; 4358 } 4359 } 4360 } 4361 4362 // For stores that start but don't end a link in the chain: 4363 for (auto *SI : llvm::reverse(Heads)) { 4364 if (Tails.count(SI)) 4365 continue; 4366 4367 // We found a store instr that starts a chain. Now follow the chain and try 4368 // to vectorize it. 4369 BoUpSLP::ValueList Operands; 4370 StoreInst *I = SI; 4371 // Collect the chain into a list. 4372 while ((Tails.count(I) || Heads.count(I)) && !VectorizedStores.count(I)) { 4373 Operands.push_back(I); 4374 // Move to the next value in the chain. 4375 I = ConsecutiveChain[I]; 4376 } 4377 4378 // FIXME: Is division-by-2 the correct step? Should we assert that the 4379 // register size is a power-of-2? 4380 for (unsigned Size = R.getMaxVecRegSize(); Size >= R.getMinVecRegSize(); 4381 Size /= 2) { 4382 if (vectorizeStoreChain(Operands, R, Size)) { 4383 // Mark the vectorized stores so that we don't vectorize them again. 4384 VectorizedStores.insert(Operands.begin(), Operands.end()); 4385 Changed = true; 4386 break; 4387 } 4388 } 4389 } 4390 4391 return Changed; 4392 } 4393 4394 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 4395 // Initialize the collections. We will make a single pass over the block. 4396 Stores.clear(); 4397 GEPs.clear(); 4398 4399 // Visit the store and getelementptr instructions in BB and organize them in 4400 // Stores and GEPs according to the underlying objects of their pointer 4401 // operands. 4402 for (Instruction &I : *BB) { 4403 // Ignore store instructions that are volatile or have a pointer operand 4404 // that doesn't point to a scalar type. 4405 if (auto *SI = dyn_cast<StoreInst>(&I)) { 4406 if (!SI->isSimple()) 4407 continue; 4408 if (!isValidElementType(SI->getValueOperand()->getType())) 4409 continue; 4410 Stores[GetUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI); 4411 } 4412 4413 // Ignore getelementptr instructions that have more than one index, a 4414 // constant index, or a pointer operand that doesn't point to a scalar 4415 // type. 4416 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 4417 auto Idx = GEP->idx_begin()->get(); 4418 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) 4419 continue; 4420 if (!isValidElementType(Idx->getType())) 4421 continue; 4422 if (GEP->getType()->isVectorTy()) 4423 continue; 4424 GEPs[GetUnderlyingObject(GEP->getPointerOperand(), *DL)].push_back(GEP); 4425 } 4426 } 4427 } 4428 4429 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 4430 if (!A || !B) 4431 return false; 4432 Value *VL[] = { A, B }; 4433 return tryToVectorizeList(VL, R, true); 4434 } 4435 4436 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 4437 bool AllowReorder) { 4438 if (VL.size() < 2) 4439 return false; 4440 4441 DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " << VL.size() 4442 << ".\n"); 4443 4444 // Check that all of the parts are scalar instructions of the same type. 4445 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 4446 if (!I0) 4447 return false; 4448 4449 unsigned Opcode0 = I0->getOpcode(); 4450 4451 unsigned Sz = R.getVectorElementSize(I0); 4452 unsigned MinVF = std::max(2U, R.getMinVecRegSize() / Sz); 4453 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF); 4454 if (MaxVF < 2) { 4455 R.getORE()->emit([&]() { 4456 return OptimizationRemarkMissed( 4457 SV_NAME, "SmallVF", I0) 4458 << "Cannot SLP vectorize list: vectorization factor " 4459 << "less than 2 is not supported"; 4460 }); 4461 return false; 4462 } 4463 4464 for (Value *V : VL) { 4465 Type *Ty = V->getType(); 4466 if (!isValidElementType(Ty)) { 4467 // NOTE: the following will give user internal llvm type name, which may not be useful 4468 R.getORE()->emit([&]() { 4469 std::string type_str; 4470 llvm::raw_string_ostream rso(type_str); 4471 Ty->print(rso); 4472 return OptimizationRemarkMissed( 4473 SV_NAME, "UnsupportedType", I0) 4474 << "Cannot SLP vectorize list: type " 4475 << rso.str() + " is unsupported by vectorizer"; 4476 }); 4477 return false; 4478 } 4479 Instruction *Inst = dyn_cast<Instruction>(V); 4480 4481 if (!Inst) 4482 return false; 4483 if (Inst->getOpcode() != Opcode0) { 4484 R.getORE()->emit([&]() { 4485 return OptimizationRemarkMissed( 4486 SV_NAME, "InequableTypes", I0) 4487 << "Cannot SLP vectorize list: not all of the " 4488 << "parts of scalar instructions are of the same type: " 4489 << ore::NV("Instruction1Opcode", I0) << " and " 4490 << ore::NV("Instruction2Opcode", Inst); 4491 }); 4492 return false; 4493 } 4494 } 4495 4496 bool Changed = false; 4497 bool CandidateFound = false; 4498 int MinCost = SLPCostThreshold; 4499 4500 // Keep track of values that were deleted by vectorizing in the loop below. 4501 SmallVector<WeakTrackingVH, 8> TrackValues(VL.begin(), VL.end()); 4502 4503 unsigned NextInst = 0, MaxInst = VL.size(); 4504 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; 4505 VF /= 2) { 4506 // No actual vectorization should happen, if number of parts is the same as 4507 // provided vectorization factor (i.e. the scalar type is used for vector 4508 // code during codegen). 4509 auto *VecTy = VectorType::get(VL[0]->getType(), VF); 4510 if (TTI->getNumberOfParts(VecTy) == VF) 4511 continue; 4512 for (unsigned I = NextInst; I < MaxInst; ++I) { 4513 unsigned OpsWidth = 0; 4514 4515 if (I + VF > MaxInst) 4516 OpsWidth = MaxInst - I; 4517 else 4518 OpsWidth = VF; 4519 4520 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 4521 break; 4522 4523 // Check that a previous iteration of this loop did not delete the Value. 4524 if (hasValueBeenRAUWed(VL, TrackValues, I, OpsWidth)) 4525 continue; 4526 4527 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 4528 << "\n"); 4529 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth); 4530 4531 R.buildTree(Ops); 4532 // TODO: check if we can allow reordering for more cases. 4533 if (AllowReorder && R.shouldReorder()) { 4534 // Conceptually, there is nothing actually preventing us from trying to 4535 // reorder a larger list. In fact, we do exactly this when vectorizing 4536 // reductions. However, at this point, we only expect to get here when 4537 // there are exactly two operations. 4538 assert(Ops.size() == 2); 4539 Value *ReorderedOps[] = {Ops[1], Ops[0]}; 4540 R.buildTree(ReorderedOps, None); 4541 } 4542 if (R.isTreeTinyAndNotFullyVectorizable()) 4543 continue; 4544 4545 R.computeMinimumValueSizes(); 4546 int Cost = R.getTreeCost(); 4547 CandidateFound = true; 4548 MinCost = std::min(MinCost, Cost); 4549 4550 if (Cost < -SLPCostThreshold) { 4551 DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 4552 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 4553 cast<Instruction>(Ops[0])) 4554 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 4555 << " and with tree size " 4556 << ore::NV("TreeSize", R.getTreeSize())); 4557 4558 R.vectorizeTree(); 4559 // Move to the next bundle. 4560 I += VF - 1; 4561 NextInst = I + 1; 4562 Changed = true; 4563 } 4564 } 4565 } 4566 4567 if (!Changed && CandidateFound) { 4568 R.getORE()->emit([&]() { 4569 return OptimizationRemarkMissed( 4570 SV_NAME, "NotBeneficial", I0) 4571 << "List vectorization was possible but not beneficial with cost " 4572 << ore::NV("Cost", MinCost) << " >= " 4573 << ore::NV("Treshold", -SLPCostThreshold); 4574 }); 4575 } else if (!Changed) { 4576 R.getORE()->emit([&]() { 4577 return OptimizationRemarkMissed( 4578 SV_NAME, "NotPossible", I0) 4579 << "Cannot SLP vectorize list: vectorization was impossible" 4580 << " with available vectorization factors"; 4581 }); 4582 } 4583 return Changed; 4584 } 4585 4586 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 4587 if (!I) 4588 return false; 4589 4590 if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I)) 4591 return false; 4592 4593 Value *P = I->getParent(); 4594 4595 // Vectorize in current basic block only. 4596 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 4597 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 4598 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 4599 return false; 4600 4601 // Try to vectorize V. 4602 if (tryToVectorizePair(Op0, Op1, R)) 4603 return true; 4604 4605 auto *A = dyn_cast<BinaryOperator>(Op0); 4606 auto *B = dyn_cast<BinaryOperator>(Op1); 4607 // Try to skip B. 4608 if (B && B->hasOneUse()) { 4609 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 4610 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 4611 if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R)) 4612 return true; 4613 if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R)) 4614 return true; 4615 } 4616 4617 // Try to skip A. 4618 if (A && A->hasOneUse()) { 4619 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 4620 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 4621 if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R)) 4622 return true; 4623 if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R)) 4624 return true; 4625 } 4626 return false; 4627 } 4628 4629 /// \brief Generate a shuffle mask to be used in a reduction tree. 4630 /// 4631 /// \param VecLen The length of the vector to be reduced. 4632 /// \param NumEltsToRdx The number of elements that should be reduced in the 4633 /// vector. 4634 /// \param IsPairwise Whether the reduction is a pairwise or splitting 4635 /// reduction. A pairwise reduction will generate a mask of 4636 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 4637 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 4638 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 4639 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 4640 bool IsPairwise, bool IsLeft, 4641 IRBuilder<> &Builder) { 4642 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 4643 4644 SmallVector<Constant *, 32> ShuffleMask( 4645 VecLen, UndefValue::get(Builder.getInt32Ty())); 4646 4647 if (IsPairwise) 4648 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 4649 for (unsigned i = 0; i != NumEltsToRdx; ++i) 4650 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 4651 else 4652 // Move the upper half of the vector to the lower half. 4653 for (unsigned i = 0; i != NumEltsToRdx; ++i) 4654 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 4655 4656 return ConstantVector::get(ShuffleMask); 4657 } 4658 4659 namespace { 4660 4661 /// Model horizontal reductions. 4662 /// 4663 /// A horizontal reduction is a tree of reduction operations (currently add and 4664 /// fadd) that has operations that can be put into a vector as its leaf. 4665 /// For example, this tree: 4666 /// 4667 /// mul mul mul mul 4668 /// \ / \ / 4669 /// + + 4670 /// \ / 4671 /// + 4672 /// This tree has "mul" as its reduced values and "+" as its reduction 4673 /// operations. A reduction might be feeding into a store or a binary operation 4674 /// feeding a phi. 4675 /// ... 4676 /// \ / 4677 /// + 4678 /// | 4679 /// phi += 4680 /// 4681 /// Or: 4682 /// ... 4683 /// \ / 4684 /// + 4685 /// | 4686 /// *p = 4687 /// 4688 class HorizontalReduction { 4689 using ReductionOpsType = SmallVector<Value *, 16>; 4690 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; 4691 ReductionOpsListType ReductionOps; 4692 SmallVector<Value *, 32> ReducedVals; 4693 // Use map vector to make stable output. 4694 MapVector<Instruction *, Value *> ExtraArgs; 4695 4696 /// Kind of the reduction data. 4697 enum ReductionKind { 4698 RK_None, /// Not a reduction. 4699 RK_Arithmetic, /// Binary reduction data. 4700 RK_Min, /// Minimum reduction data. 4701 RK_UMin, /// Unsigned minimum reduction data. 4702 RK_Max, /// Maximum reduction data. 4703 RK_UMax, /// Unsigned maximum reduction data. 4704 }; 4705 4706 /// Contains info about operation, like its opcode, left and right operands. 4707 class OperationData { 4708 /// Opcode of the instruction. 4709 unsigned Opcode = 0; 4710 4711 /// Left operand of the reduction operation. 4712 Value *LHS = nullptr; 4713 4714 /// Right operand of the reduction operation. 4715 Value *RHS = nullptr; 4716 4717 /// Kind of the reduction operation. 4718 ReductionKind Kind = RK_None; 4719 4720 /// True if float point min/max reduction has no NaNs. 4721 bool NoNaN = false; 4722 4723 /// Checks if the reduction operation can be vectorized. 4724 bool isVectorizable() const { 4725 return LHS && RHS && 4726 // We currently only support adds && min/max reductions. 4727 ((Kind == RK_Arithmetic && 4728 (Opcode == Instruction::Add || Opcode == Instruction::FAdd)) || 4729 ((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && 4730 (Kind == RK_Min || Kind == RK_Max)) || 4731 (Opcode == Instruction::ICmp && 4732 (Kind == RK_UMin || Kind == RK_UMax))); 4733 } 4734 4735 /// Creates reduction operation with the current opcode. 4736 Value *createOp(IRBuilder<> &Builder, const Twine &Name) const { 4737 assert(isVectorizable() && 4738 "Expected add|fadd or min/max reduction operation."); 4739 Value *Cmp; 4740 switch (Kind) { 4741 case RK_Arithmetic: 4742 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, LHS, RHS, 4743 Name); 4744 case RK_Min: 4745 Cmp = Opcode == Instruction::ICmp ? Builder.CreateICmpSLT(LHS, RHS) 4746 : Builder.CreateFCmpOLT(LHS, RHS); 4747 break; 4748 case RK_Max: 4749 Cmp = Opcode == Instruction::ICmp ? Builder.CreateICmpSGT(LHS, RHS) 4750 : Builder.CreateFCmpOGT(LHS, RHS); 4751 break; 4752 case RK_UMin: 4753 assert(Opcode == Instruction::ICmp && "Expected integer types."); 4754 Cmp = Builder.CreateICmpULT(LHS, RHS); 4755 break; 4756 case RK_UMax: 4757 assert(Opcode == Instruction::ICmp && "Expected integer types."); 4758 Cmp = Builder.CreateICmpUGT(LHS, RHS); 4759 break; 4760 case RK_None: 4761 llvm_unreachable("Unknown reduction operation."); 4762 } 4763 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 4764 } 4765 4766 public: 4767 explicit OperationData() = default; 4768 4769 /// Construction for reduced values. They are identified by opcode only and 4770 /// don't have associated LHS/RHS values. 4771 explicit OperationData(Value *V) { 4772 if (auto *I = dyn_cast<Instruction>(V)) 4773 Opcode = I->getOpcode(); 4774 } 4775 4776 /// Constructor for reduction operations with opcode and its left and 4777 /// right operands. 4778 OperationData(unsigned Opcode, Value *LHS, Value *RHS, ReductionKind Kind, 4779 bool NoNaN = false) 4780 : Opcode(Opcode), LHS(LHS), RHS(RHS), Kind(Kind), NoNaN(NoNaN) { 4781 assert(Kind != RK_None && "One of the reduction operations is expected."); 4782 } 4783 4784 explicit operator bool() const { return Opcode; } 4785 4786 /// Get the index of the first operand. 4787 unsigned getFirstOperandIndex() const { 4788 assert(!!*this && "The opcode is not set."); 4789 switch (Kind) { 4790 case RK_Min: 4791 case RK_UMin: 4792 case RK_Max: 4793 case RK_UMax: 4794 return 1; 4795 case RK_Arithmetic: 4796 case RK_None: 4797 break; 4798 } 4799 return 0; 4800 } 4801 4802 /// Total number of operands in the reduction operation. 4803 unsigned getNumberOfOperands() const { 4804 assert(Kind != RK_None && !!*this && LHS && RHS && 4805 "Expected reduction operation."); 4806 switch (Kind) { 4807 case RK_Arithmetic: 4808 return 2; 4809 case RK_Min: 4810 case RK_UMin: 4811 case RK_Max: 4812 case RK_UMax: 4813 return 3; 4814 case RK_None: 4815 break; 4816 } 4817 llvm_unreachable("Reduction kind is not set"); 4818 } 4819 4820 /// Checks if the operation has the same parent as \p P. 4821 bool hasSameParent(Instruction *I, Value *P, bool IsRedOp) const { 4822 assert(Kind != RK_None && !!*this && LHS && RHS && 4823 "Expected reduction operation."); 4824 if (!IsRedOp) 4825 return I->getParent() == P; 4826 switch (Kind) { 4827 case RK_Arithmetic: 4828 // Arithmetic reduction operation must be used once only. 4829 return I->getParent() == P; 4830 case RK_Min: 4831 case RK_UMin: 4832 case RK_Max: 4833 case RK_UMax: { 4834 // SelectInst must be used twice while the condition op must have single 4835 // use only. 4836 auto *Cmp = cast<Instruction>(cast<SelectInst>(I)->getCondition()); 4837 return I->getParent() == P && Cmp && Cmp->getParent() == P; 4838 } 4839 case RK_None: 4840 break; 4841 } 4842 llvm_unreachable("Reduction kind is not set"); 4843 } 4844 /// Expected number of uses for reduction operations/reduced values. 4845 bool hasRequiredNumberOfUses(Instruction *I, bool IsReductionOp) const { 4846 assert(Kind != RK_None && !!*this && LHS && RHS && 4847 "Expected reduction operation."); 4848 switch (Kind) { 4849 case RK_Arithmetic: 4850 return I->hasOneUse(); 4851 case RK_Min: 4852 case RK_UMin: 4853 case RK_Max: 4854 case RK_UMax: 4855 return I->hasNUses(2) && 4856 (!IsReductionOp || 4857 cast<SelectInst>(I)->getCondition()->hasOneUse()); 4858 case RK_None: 4859 break; 4860 } 4861 llvm_unreachable("Reduction kind is not set"); 4862 } 4863 4864 /// Initializes the list of reduction operations. 4865 void initReductionOps(ReductionOpsListType &ReductionOps) { 4866 assert(Kind != RK_None && !!*this && LHS && RHS && 4867 "Expected reduction operation."); 4868 switch (Kind) { 4869 case RK_Arithmetic: 4870 ReductionOps.assign(1, ReductionOpsType()); 4871 break; 4872 case RK_Min: 4873 case RK_UMin: 4874 case RK_Max: 4875 case RK_UMax: 4876 ReductionOps.assign(2, ReductionOpsType()); 4877 break; 4878 case RK_None: 4879 llvm_unreachable("Reduction kind is not set"); 4880 } 4881 } 4882 /// Add all reduction operations for the reduction instruction \p I. 4883 void addReductionOps(Instruction *I, ReductionOpsListType &ReductionOps) { 4884 assert(Kind != RK_None && !!*this && LHS && RHS && 4885 "Expected reduction operation."); 4886 switch (Kind) { 4887 case RK_Arithmetic: 4888 ReductionOps[0].emplace_back(I); 4889 break; 4890 case RK_Min: 4891 case RK_UMin: 4892 case RK_Max: 4893 case RK_UMax: 4894 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition()); 4895 ReductionOps[1].emplace_back(I); 4896 break; 4897 case RK_None: 4898 llvm_unreachable("Reduction kind is not set"); 4899 } 4900 } 4901 4902 /// Checks if instruction is associative and can be vectorized. 4903 bool isAssociative(Instruction *I) const { 4904 assert(Kind != RK_None && *this && LHS && RHS && 4905 "Expected reduction operation."); 4906 switch (Kind) { 4907 case RK_Arithmetic: 4908 return I->isAssociative(); 4909 case RK_Min: 4910 case RK_Max: 4911 return Opcode == Instruction::ICmp || 4912 cast<Instruction>(I->getOperand(0))->isFast(); 4913 case RK_UMin: 4914 case RK_UMax: 4915 assert(Opcode == Instruction::ICmp && 4916 "Only integer compare operation is expected."); 4917 return true; 4918 case RK_None: 4919 break; 4920 } 4921 llvm_unreachable("Reduction kind is not set"); 4922 } 4923 4924 /// Checks if the reduction operation can be vectorized. 4925 bool isVectorizable(Instruction *I) const { 4926 return isVectorizable() && isAssociative(I); 4927 } 4928 4929 /// Checks if two operation data are both a reduction op or both a reduced 4930 /// value. 4931 bool operator==(const OperationData &OD) { 4932 assert(((Kind != OD.Kind) || ((!LHS == !OD.LHS) && (!RHS == !OD.RHS))) && 4933 "One of the comparing operations is incorrect."); 4934 return this == &OD || (Kind == OD.Kind && Opcode == OD.Opcode); 4935 } 4936 bool operator!=(const OperationData &OD) { return !(*this == OD); } 4937 void clear() { 4938 Opcode = 0; 4939 LHS = nullptr; 4940 RHS = nullptr; 4941 Kind = RK_None; 4942 NoNaN = false; 4943 } 4944 4945 /// Get the opcode of the reduction operation. 4946 unsigned getOpcode() const { 4947 assert(isVectorizable() && "Expected vectorizable operation."); 4948 return Opcode; 4949 } 4950 4951 /// Get kind of reduction data. 4952 ReductionKind getKind() const { return Kind; } 4953 Value *getLHS() const { return LHS; } 4954 Value *getRHS() const { return RHS; } 4955 Type *getConditionType() const { 4956 switch (Kind) { 4957 case RK_Arithmetic: 4958 return nullptr; 4959 case RK_Min: 4960 case RK_Max: 4961 case RK_UMin: 4962 case RK_UMax: 4963 return CmpInst::makeCmpResultType(LHS->getType()); 4964 case RK_None: 4965 break; 4966 } 4967 llvm_unreachable("Reduction kind is not set"); 4968 } 4969 4970 /// Creates reduction operation with the current opcode with the IR flags 4971 /// from \p ReductionOps. 4972 Value *createOp(IRBuilder<> &Builder, const Twine &Name, 4973 const ReductionOpsListType &ReductionOps) const { 4974 assert(isVectorizable() && 4975 "Expected add|fadd or min/max reduction operation."); 4976 auto *Op = createOp(Builder, Name); 4977 switch (Kind) { 4978 case RK_Arithmetic: 4979 propagateIRFlags(Op, ReductionOps[0]); 4980 return Op; 4981 case RK_Min: 4982 case RK_Max: 4983 case RK_UMin: 4984 case RK_UMax: 4985 if (auto *SI = dyn_cast<SelectInst>(Op)) 4986 propagateIRFlags(SI->getCondition(), ReductionOps[0]); 4987 propagateIRFlags(Op, ReductionOps[1]); 4988 return Op; 4989 case RK_None: 4990 break; 4991 } 4992 llvm_unreachable("Unknown reduction operation."); 4993 } 4994 /// Creates reduction operation with the current opcode with the IR flags 4995 /// from \p I. 4996 Value *createOp(IRBuilder<> &Builder, const Twine &Name, 4997 Instruction *I) const { 4998 assert(isVectorizable() && 4999 "Expected add|fadd or min/max reduction operation."); 5000 auto *Op = createOp(Builder, Name); 5001 switch (Kind) { 5002 case RK_Arithmetic: 5003 propagateIRFlags(Op, I); 5004 return Op; 5005 case RK_Min: 5006 case RK_Max: 5007 case RK_UMin: 5008 case RK_UMax: 5009 if (auto *SI = dyn_cast<SelectInst>(Op)) { 5010 propagateIRFlags(SI->getCondition(), 5011 cast<SelectInst>(I)->getCondition()); 5012 } 5013 propagateIRFlags(Op, I); 5014 return Op; 5015 case RK_None: 5016 break; 5017 } 5018 llvm_unreachable("Unknown reduction operation."); 5019 } 5020 5021 TargetTransformInfo::ReductionFlags getFlags() const { 5022 TargetTransformInfo::ReductionFlags Flags; 5023 Flags.NoNaN = NoNaN; 5024 switch (Kind) { 5025 case RK_Arithmetic: 5026 break; 5027 case RK_Min: 5028 Flags.IsSigned = Opcode == Instruction::ICmp; 5029 Flags.IsMaxOp = false; 5030 break; 5031 case RK_Max: 5032 Flags.IsSigned = Opcode == Instruction::ICmp; 5033 Flags.IsMaxOp = true; 5034 break; 5035 case RK_UMin: 5036 Flags.IsSigned = false; 5037 Flags.IsMaxOp = false; 5038 break; 5039 case RK_UMax: 5040 Flags.IsSigned = false; 5041 Flags.IsMaxOp = true; 5042 break; 5043 case RK_None: 5044 llvm_unreachable("Reduction kind is not set"); 5045 } 5046 return Flags; 5047 } 5048 }; 5049 5050 Instruction *ReductionRoot = nullptr; 5051 5052 /// The operation data of the reduction operation. 5053 OperationData ReductionData; 5054 5055 /// The operation data of the values we perform a reduction on. 5056 OperationData ReducedValueData; 5057 5058 /// Should we model this reduction as a pairwise reduction tree or a tree that 5059 /// splits the vector in halves and adds those halves. 5060 bool IsPairwiseReduction = false; 5061 5062 /// Checks if the ParentStackElem.first should be marked as a reduction 5063 /// operation with an extra argument or as extra argument itself. 5064 void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem, 5065 Value *ExtraArg) { 5066 if (ExtraArgs.count(ParentStackElem.first)) { 5067 ExtraArgs[ParentStackElem.first] = nullptr; 5068 // We ran into something like: 5069 // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg. 5070 // The whole ParentStackElem.first should be considered as an extra value 5071 // in this case. 5072 // Do not perform analysis of remaining operands of ParentStackElem.first 5073 // instruction, this whole instruction is an extra argument. 5074 ParentStackElem.second = ParentStackElem.first->getNumOperands(); 5075 } else { 5076 // We ran into something like: 5077 // ParentStackElem.first += ... + ExtraArg + ... 5078 ExtraArgs[ParentStackElem.first] = ExtraArg; 5079 } 5080 } 5081 5082 static OperationData getOperationData(Value *V) { 5083 if (!V) 5084 return OperationData(); 5085 5086 Value *LHS; 5087 Value *RHS; 5088 if (m_BinOp(m_Value(LHS), m_Value(RHS)).match(V)) { 5089 return OperationData(cast<BinaryOperator>(V)->getOpcode(), LHS, RHS, 5090 RK_Arithmetic); 5091 } 5092 if (auto *Select = dyn_cast<SelectInst>(V)) { 5093 // Look for a min/max pattern. 5094 if (m_UMin(m_Value(LHS), m_Value(RHS)).match(Select)) { 5095 return OperationData(Instruction::ICmp, LHS, RHS, RK_UMin); 5096 } else if (m_SMin(m_Value(LHS), m_Value(RHS)).match(Select)) { 5097 return OperationData(Instruction::ICmp, LHS, RHS, RK_Min); 5098 } else if (m_OrdFMin(m_Value(LHS), m_Value(RHS)).match(Select) || 5099 m_UnordFMin(m_Value(LHS), m_Value(RHS)).match(Select)) { 5100 return OperationData( 5101 Instruction::FCmp, LHS, RHS, RK_Min, 5102 cast<Instruction>(Select->getCondition())->hasNoNaNs()); 5103 } else if (m_UMax(m_Value(LHS), m_Value(RHS)).match(Select)) { 5104 return OperationData(Instruction::ICmp, LHS, RHS, RK_UMax); 5105 } else if (m_SMax(m_Value(LHS), m_Value(RHS)).match(Select)) { 5106 return OperationData(Instruction::ICmp, LHS, RHS, RK_Max); 5107 } else if (m_OrdFMax(m_Value(LHS), m_Value(RHS)).match(Select) || 5108 m_UnordFMax(m_Value(LHS), m_Value(RHS)).match(Select)) { 5109 return OperationData( 5110 Instruction::FCmp, LHS, RHS, RK_Max, 5111 cast<Instruction>(Select->getCondition())->hasNoNaNs()); 5112 } 5113 } 5114 return OperationData(V); 5115 } 5116 5117 public: 5118 HorizontalReduction() = default; 5119 5120 /// \brief Try to find a reduction tree. 5121 bool matchAssociativeReduction(PHINode *Phi, Instruction *B) { 5122 assert((!Phi || is_contained(Phi->operands(), B)) && 5123 "Thi phi needs to use the binary operator"); 5124 5125 ReductionData = getOperationData(B); 5126 5127 // We could have a initial reductions that is not an add. 5128 // r *= v1 + v2 + v3 + v4 5129 // In such a case start looking for a tree rooted in the first '+'. 5130 if (Phi) { 5131 if (ReductionData.getLHS() == Phi) { 5132 Phi = nullptr; 5133 B = dyn_cast<Instruction>(ReductionData.getRHS()); 5134 ReductionData = getOperationData(B); 5135 } else if (ReductionData.getRHS() == Phi) { 5136 Phi = nullptr; 5137 B = dyn_cast<Instruction>(ReductionData.getLHS()); 5138 ReductionData = getOperationData(B); 5139 } 5140 } 5141 5142 if (!ReductionData.isVectorizable(B)) 5143 return false; 5144 5145 Type *Ty = B->getType(); 5146 if (!isValidElementType(Ty)) 5147 return false; 5148 5149 ReducedValueData.clear(); 5150 ReductionRoot = B; 5151 5152 // Post order traverse the reduction tree starting at B. We only handle true 5153 // trees containing only binary operators. 5154 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; 5155 Stack.push_back(std::make_pair(B, ReductionData.getFirstOperandIndex())); 5156 ReductionData.initReductionOps(ReductionOps); 5157 while (!Stack.empty()) { 5158 Instruction *TreeN = Stack.back().first; 5159 unsigned EdgeToVist = Stack.back().second++; 5160 OperationData OpData = getOperationData(TreeN); 5161 bool IsReducedValue = OpData != ReductionData; 5162 5163 // Postorder vist. 5164 if (IsReducedValue || EdgeToVist == OpData.getNumberOfOperands()) { 5165 if (IsReducedValue) 5166 ReducedVals.push_back(TreeN); 5167 else { 5168 auto I = ExtraArgs.find(TreeN); 5169 if (I != ExtraArgs.end() && !I->second) { 5170 // Check if TreeN is an extra argument of its parent operation. 5171 if (Stack.size() <= 1) { 5172 // TreeN can't be an extra argument as it is a root reduction 5173 // operation. 5174 return false; 5175 } 5176 // Yes, TreeN is an extra argument, do not add it to a list of 5177 // reduction operations. 5178 // Stack[Stack.size() - 2] always points to the parent operation. 5179 markExtraArg(Stack[Stack.size() - 2], TreeN); 5180 ExtraArgs.erase(TreeN); 5181 } else 5182 ReductionData.addReductionOps(TreeN, ReductionOps); 5183 } 5184 // Retract. 5185 Stack.pop_back(); 5186 continue; 5187 } 5188 5189 // Visit left or right. 5190 Value *NextV = TreeN->getOperand(EdgeToVist); 5191 if (NextV != Phi) { 5192 auto *I = dyn_cast<Instruction>(NextV); 5193 OpData = getOperationData(I); 5194 // Continue analysis if the next operand is a reduction operation or 5195 // (possibly) a reduced value. If the reduced value opcode is not set, 5196 // the first met operation != reduction operation is considered as the 5197 // reduced value class. 5198 if (I && (!ReducedValueData || OpData == ReducedValueData || 5199 OpData == ReductionData)) { 5200 const bool IsReductionOperation = OpData == ReductionData; 5201 // Only handle trees in the current basic block. 5202 if (!ReductionData.hasSameParent(I, B->getParent(), 5203 IsReductionOperation)) { 5204 // I is an extra argument for TreeN (its parent operation). 5205 markExtraArg(Stack.back(), I); 5206 continue; 5207 } 5208 5209 // Each tree node needs to have minimal number of users except for the 5210 // ultimate reduction. 5211 if (!ReductionData.hasRequiredNumberOfUses(I, 5212 OpData == ReductionData) && 5213 I != B) { 5214 // I is an extra argument for TreeN (its parent operation). 5215 markExtraArg(Stack.back(), I); 5216 continue; 5217 } 5218 5219 if (IsReductionOperation) { 5220 // We need to be able to reassociate the reduction operations. 5221 if (!OpData.isAssociative(I)) { 5222 // I is an extra argument for TreeN (its parent operation). 5223 markExtraArg(Stack.back(), I); 5224 continue; 5225 } 5226 } else if (ReducedValueData && 5227 ReducedValueData != OpData) { 5228 // Make sure that the opcodes of the operations that we are going to 5229 // reduce match. 5230 // I is an extra argument for TreeN (its parent operation). 5231 markExtraArg(Stack.back(), I); 5232 continue; 5233 } else if (!ReducedValueData) 5234 ReducedValueData = OpData; 5235 5236 Stack.push_back(std::make_pair(I, OpData.getFirstOperandIndex())); 5237 continue; 5238 } 5239 } 5240 // NextV is an extra argument for TreeN (its parent operation). 5241 markExtraArg(Stack.back(), NextV); 5242 } 5243 return true; 5244 } 5245 5246 /// \brief Attempt to vectorize the tree found by 5247 /// matchAssociativeReduction. 5248 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 5249 if (ReducedVals.empty()) 5250 return false; 5251 5252 // If there is a sufficient number of reduction values, reduce 5253 // to a nearby power-of-2. Can safely generate oversized 5254 // vectors and rely on the backend to split them to legal sizes. 5255 unsigned NumReducedVals = ReducedVals.size(); 5256 if (NumReducedVals < 4) 5257 return false; 5258 5259 unsigned ReduxWidth = PowerOf2Floor(NumReducedVals); 5260 5261 Value *VectorizedTree = nullptr; 5262 IRBuilder<> Builder(ReductionRoot); 5263 FastMathFlags Unsafe; 5264 Unsafe.setFast(); 5265 Builder.setFastMathFlags(Unsafe); 5266 unsigned i = 0; 5267 5268 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 5269 // The same extra argument may be used several time, so log each attempt 5270 // to use it. 5271 for (auto &Pair : ExtraArgs) 5272 ExternallyUsedValues[Pair.second].push_back(Pair.first); 5273 SmallVector<Value *, 16> IgnoreList; 5274 for (auto &V : ReductionOps) 5275 IgnoreList.append(V.begin(), V.end()); 5276 while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) { 5277 auto VL = makeArrayRef(&ReducedVals[i], ReduxWidth); 5278 V.buildTree(VL, ExternallyUsedValues, IgnoreList); 5279 if (V.shouldReorder()) { 5280 SmallVector<Value *, 8> Reversed(VL.rbegin(), VL.rend()); 5281 V.buildTree(Reversed, ExternallyUsedValues, IgnoreList); 5282 } 5283 if (V.isTreeTinyAndNotFullyVectorizable()) 5284 break; 5285 5286 V.computeMinimumValueSizes(); 5287 5288 // Estimate cost. 5289 int Cost = 5290 V.getTreeCost() + getReductionCost(TTI, ReducedVals[i], ReduxWidth); 5291 if (Cost >= -SLPCostThreshold) { 5292 V.getORE()->emit([&]() { 5293 return OptimizationRemarkMissed( 5294 SV_NAME, "HorSLPNotBeneficial", cast<Instruction>(VL[0])) 5295 << "Vectorizing horizontal reduction is possible" 5296 << "but not beneficial with cost " 5297 << ore::NV("Cost", Cost) << " and threshold " 5298 << ore::NV("Threshold", -SLPCostThreshold); 5299 }); 5300 break; 5301 } 5302 5303 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost 5304 << ". (HorRdx)\n"); 5305 V.getORE()->emit([&]() { 5306 return OptimizationRemark( 5307 SV_NAME, "VectorizedHorizontalReduction", cast<Instruction>(VL[0])) 5308 << "Vectorized horizontal reduction with cost " 5309 << ore::NV("Cost", Cost) << " and with tree size " 5310 << ore::NV("TreeSize", V.getTreeSize()); 5311 }); 5312 5313 // Vectorize a tree. 5314 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 5315 Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues); 5316 5317 // Emit a reduction. 5318 Value *ReducedSubTree = 5319 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); 5320 if (VectorizedTree) { 5321 Builder.SetCurrentDebugLocation(Loc); 5322 OperationData VectReductionData(ReductionData.getOpcode(), 5323 VectorizedTree, ReducedSubTree, 5324 ReductionData.getKind()); 5325 VectorizedTree = 5326 VectReductionData.createOp(Builder, "op.rdx", ReductionOps); 5327 } else 5328 VectorizedTree = ReducedSubTree; 5329 i += ReduxWidth; 5330 ReduxWidth = PowerOf2Floor(NumReducedVals - i); 5331 } 5332 5333 if (VectorizedTree) { 5334 // Finish the reduction. 5335 for (; i < NumReducedVals; ++i) { 5336 auto *I = cast<Instruction>(ReducedVals[i]); 5337 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 5338 OperationData VectReductionData(ReductionData.getOpcode(), 5339 VectorizedTree, I, 5340 ReductionData.getKind()); 5341 VectorizedTree = VectReductionData.createOp(Builder, "", ReductionOps); 5342 } 5343 for (auto &Pair : ExternallyUsedValues) { 5344 assert(!Pair.second.empty() && 5345 "At least one DebugLoc must be inserted"); 5346 // Add each externally used value to the final reduction. 5347 for (auto *I : Pair.second) { 5348 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 5349 OperationData VectReductionData(ReductionData.getOpcode(), 5350 VectorizedTree, Pair.first, 5351 ReductionData.getKind()); 5352 VectorizedTree = VectReductionData.createOp(Builder, "op.extra", I); 5353 } 5354 } 5355 // Update users. 5356 ReductionRoot->replaceAllUsesWith(VectorizedTree); 5357 } 5358 return VectorizedTree != nullptr; 5359 } 5360 5361 unsigned numReductionValues() const { 5362 return ReducedVals.size(); 5363 } 5364 5365 private: 5366 /// \brief Calculate the cost of a reduction. 5367 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal, 5368 unsigned ReduxWidth) { 5369 Type *ScalarTy = FirstReducedVal->getType(); 5370 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 5371 5372 int PairwiseRdxCost; 5373 int SplittingRdxCost; 5374 switch (ReductionData.getKind()) { 5375 case RK_Arithmetic: 5376 PairwiseRdxCost = 5377 TTI->getArithmeticReductionCost(ReductionData.getOpcode(), VecTy, 5378 /*IsPairwiseForm=*/true); 5379 SplittingRdxCost = 5380 TTI->getArithmeticReductionCost(ReductionData.getOpcode(), VecTy, 5381 /*IsPairwiseForm=*/false); 5382 break; 5383 case RK_Min: 5384 case RK_Max: 5385 case RK_UMin: 5386 case RK_UMax: { 5387 Type *VecCondTy = CmpInst::makeCmpResultType(VecTy); 5388 bool IsUnsigned = ReductionData.getKind() == RK_UMin || 5389 ReductionData.getKind() == RK_UMax; 5390 PairwiseRdxCost = 5391 TTI->getMinMaxReductionCost(VecTy, VecCondTy, 5392 /*IsPairwiseForm=*/true, IsUnsigned); 5393 SplittingRdxCost = 5394 TTI->getMinMaxReductionCost(VecTy, VecCondTy, 5395 /*IsPairwiseForm=*/false, IsUnsigned); 5396 break; 5397 } 5398 case RK_None: 5399 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 5400 } 5401 5402 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 5403 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 5404 5405 int ScalarReduxCost; 5406 switch (ReductionData.getKind()) { 5407 case RK_Arithmetic: 5408 ScalarReduxCost = 5409 TTI->getArithmeticInstrCost(ReductionData.getOpcode(), ScalarTy); 5410 break; 5411 case RK_Min: 5412 case RK_Max: 5413 case RK_UMin: 5414 case RK_UMax: 5415 ScalarReduxCost = 5416 TTI->getCmpSelInstrCost(ReductionData.getOpcode(), ScalarTy) + 5417 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 5418 CmpInst::makeCmpResultType(ScalarTy)); 5419 break; 5420 case RK_None: 5421 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 5422 } 5423 ScalarReduxCost *= (ReduxWidth - 1); 5424 5425 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 5426 << " for reduction that starts with " << *FirstReducedVal 5427 << " (It is a " 5428 << (IsPairwiseReduction ? "pairwise" : "splitting") 5429 << " reduction)\n"); 5430 5431 return VecReduxCost - ScalarReduxCost; 5432 } 5433 5434 /// \brief Emit a horizontal reduction of the vectorized value. 5435 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 5436 unsigned ReduxWidth, const TargetTransformInfo *TTI) { 5437 assert(VectorizedValue && "Need to have a vectorized tree node"); 5438 assert(isPowerOf2_32(ReduxWidth) && 5439 "We only handle power-of-two reductions for now"); 5440 5441 if (!IsPairwiseReduction) 5442 return createSimpleTargetReduction( 5443 Builder, TTI, ReductionData.getOpcode(), VectorizedValue, 5444 ReductionData.getFlags(), ReductionOps.back()); 5445 5446 Value *TmpVec = VectorizedValue; 5447 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 5448 Value *LeftMask = 5449 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 5450 Value *RightMask = 5451 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 5452 5453 Value *LeftShuf = Builder.CreateShuffleVector( 5454 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 5455 Value *RightShuf = Builder.CreateShuffleVector( 5456 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 5457 "rdx.shuf.r"); 5458 OperationData VectReductionData(ReductionData.getOpcode(), LeftShuf, 5459 RightShuf, ReductionData.getKind()); 5460 TmpVec = VectReductionData.createOp(Builder, "op.rdx", ReductionOps); 5461 } 5462 5463 // The result is in the first element of the vector. 5464 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 5465 } 5466 }; 5467 5468 } // end anonymous namespace 5469 5470 /// \brief Recognize construction of vectors like 5471 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 5472 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 5473 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 5474 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 5475 /// starting from the last insertelement instruction. 5476 /// 5477 /// Returns true if it matches 5478 static bool findBuildVector(InsertElementInst *LastInsertElem, 5479 SmallVectorImpl<Value *> &BuildVectorOpds) { 5480 Value *V = nullptr; 5481 do { 5482 BuildVectorOpds.push_back(LastInsertElem->getOperand(1)); 5483 V = LastInsertElem->getOperand(0); 5484 if (isa<UndefValue>(V)) 5485 break; 5486 LastInsertElem = dyn_cast<InsertElementInst>(V); 5487 if (!LastInsertElem || !LastInsertElem->hasOneUse()) 5488 return false; 5489 } while (true); 5490 std::reverse(BuildVectorOpds.begin(), BuildVectorOpds.end()); 5491 return true; 5492 } 5493 5494 /// \brief Like findBuildVector, but looks for construction of aggregate. 5495 /// 5496 /// \return true if it matches. 5497 static bool findBuildAggregate(InsertValueInst *IV, 5498 SmallVectorImpl<Value *> &BuildVectorOpds) { 5499 Value *V; 5500 do { 5501 BuildVectorOpds.push_back(IV->getInsertedValueOperand()); 5502 V = IV->getAggregateOperand(); 5503 if (isa<UndefValue>(V)) 5504 break; 5505 IV = dyn_cast<InsertValueInst>(V); 5506 if (!IV || !IV->hasOneUse()) 5507 return false; 5508 } while (true); 5509 std::reverse(BuildVectorOpds.begin(), BuildVectorOpds.end()); 5510 return true; 5511 } 5512 5513 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 5514 return V->getType() < V2->getType(); 5515 } 5516 5517 /// \brief Try and get a reduction value from a phi node. 5518 /// 5519 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 5520 /// if they come from either \p ParentBB or a containing loop latch. 5521 /// 5522 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 5523 /// if not possible. 5524 static Value *getReductionValue(const DominatorTree *DT, PHINode *P, 5525 BasicBlock *ParentBB, LoopInfo *LI) { 5526 // There are situations where the reduction value is not dominated by the 5527 // reduction phi. Vectorizing such cases has been reported to cause 5528 // miscompiles. See PR25787. 5529 auto DominatedReduxValue = [&](Value *R) { 5530 return ( 5531 dyn_cast<Instruction>(R) && 5532 DT->dominates(P->getParent(), dyn_cast<Instruction>(R)->getParent())); 5533 }; 5534 5535 Value *Rdx = nullptr; 5536 5537 // Return the incoming value if it comes from the same BB as the phi node. 5538 if (P->getIncomingBlock(0) == ParentBB) { 5539 Rdx = P->getIncomingValue(0); 5540 } else if (P->getIncomingBlock(1) == ParentBB) { 5541 Rdx = P->getIncomingValue(1); 5542 } 5543 5544 if (Rdx && DominatedReduxValue(Rdx)) 5545 return Rdx; 5546 5547 // Otherwise, check whether we have a loop latch to look at. 5548 Loop *BBL = LI->getLoopFor(ParentBB); 5549 if (!BBL) 5550 return nullptr; 5551 BasicBlock *BBLatch = BBL->getLoopLatch(); 5552 if (!BBLatch) 5553 return nullptr; 5554 5555 // There is a loop latch, return the incoming value if it comes from 5556 // that. This reduction pattern occasionally turns up. 5557 if (P->getIncomingBlock(0) == BBLatch) { 5558 Rdx = P->getIncomingValue(0); 5559 } else if (P->getIncomingBlock(1) == BBLatch) { 5560 Rdx = P->getIncomingValue(1); 5561 } 5562 5563 if (Rdx && DominatedReduxValue(Rdx)) 5564 return Rdx; 5565 5566 return nullptr; 5567 } 5568 5569 /// Attempt to reduce a horizontal reduction. 5570 /// If it is legal to match a horizontal reduction feeding the phi node \a P 5571 /// with reduction operators \a Root (or one of its operands) in a basic block 5572 /// \a BB, then check if it can be done. If horizontal reduction is not found 5573 /// and root instruction is a binary operation, vectorization of the operands is 5574 /// attempted. 5575 /// \returns true if a horizontal reduction was matched and reduced or operands 5576 /// of one of the binary instruction were vectorized. 5577 /// \returns false if a horizontal reduction was not matched (or not possible) 5578 /// or no vectorization of any binary operation feeding \a Root instruction was 5579 /// performed. 5580 static bool tryToVectorizeHorReductionOrInstOperands( 5581 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, 5582 TargetTransformInfo *TTI, 5583 const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) { 5584 if (!ShouldVectorizeHor) 5585 return false; 5586 5587 if (!Root) 5588 return false; 5589 5590 if (Root->getParent() != BB || isa<PHINode>(Root)) 5591 return false; 5592 // Start analysis starting from Root instruction. If horizontal reduction is 5593 // found, try to vectorize it. If it is not a horizontal reduction or 5594 // vectorization is not possible or not effective, and currently analyzed 5595 // instruction is a binary operation, try to vectorize the operands, using 5596 // pre-order DFS traversal order. If the operands were not vectorized, repeat 5597 // the same procedure considering each operand as a possible root of the 5598 // horizontal reduction. 5599 // Interrupt the process if the Root instruction itself was vectorized or all 5600 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 5601 SmallVector<std::pair<WeakTrackingVH, unsigned>, 8> Stack(1, {Root, 0}); 5602 SmallSet<Value *, 8> VisitedInstrs; 5603 bool Res = false; 5604 while (!Stack.empty()) { 5605 Value *V; 5606 unsigned Level; 5607 std::tie(V, Level) = Stack.pop_back_val(); 5608 if (!V) 5609 continue; 5610 auto *Inst = dyn_cast<Instruction>(V); 5611 if (!Inst) 5612 continue; 5613 auto *BI = dyn_cast<BinaryOperator>(Inst); 5614 auto *SI = dyn_cast<SelectInst>(Inst); 5615 if (BI || SI) { 5616 HorizontalReduction HorRdx; 5617 if (HorRdx.matchAssociativeReduction(P, Inst)) { 5618 if (HorRdx.tryToReduce(R, TTI)) { 5619 Res = true; 5620 // Set P to nullptr to avoid re-analysis of phi node in 5621 // matchAssociativeReduction function unless this is the root node. 5622 P = nullptr; 5623 continue; 5624 } 5625 } 5626 if (P && BI) { 5627 Inst = dyn_cast<Instruction>(BI->getOperand(0)); 5628 if (Inst == P) 5629 Inst = dyn_cast<Instruction>(BI->getOperand(1)); 5630 if (!Inst) { 5631 // Set P to nullptr to avoid re-analysis of phi node in 5632 // matchAssociativeReduction function unless this is the root node. 5633 P = nullptr; 5634 continue; 5635 } 5636 } 5637 } 5638 // Set P to nullptr to avoid re-analysis of phi node in 5639 // matchAssociativeReduction function unless this is the root node. 5640 P = nullptr; 5641 if (Vectorize(Inst, R)) { 5642 Res = true; 5643 continue; 5644 } 5645 5646 // Try to vectorize operands. 5647 // Continue analysis for the instruction from the same basic block only to 5648 // save compile time. 5649 if (++Level < RecursionMaxDepth) 5650 for (auto *Op : Inst->operand_values()) 5651 if (VisitedInstrs.insert(Op).second) 5652 if (auto *I = dyn_cast<Instruction>(Op)) 5653 if (!isa<PHINode>(I) && I->getParent() == BB) 5654 Stack.emplace_back(Op, Level); 5655 } 5656 return Res; 5657 } 5658 5659 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V, 5660 BasicBlock *BB, BoUpSLP &R, 5661 TargetTransformInfo *TTI) { 5662 if (!V) 5663 return false; 5664 auto *I = dyn_cast<Instruction>(V); 5665 if (!I) 5666 return false; 5667 5668 if (!isa<BinaryOperator>(I)) 5669 P = nullptr; 5670 // Try to match and vectorize a horizontal reduction. 5671 auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool { 5672 return tryToVectorize(I, R); 5673 }; 5674 return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI, 5675 ExtraVectorization); 5676 } 5677 5678 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 5679 BasicBlock *BB, BoUpSLP &R) { 5680 const DataLayout &DL = BB->getModule()->getDataLayout(); 5681 if (!R.canMapToVector(IVI->getType(), DL)) 5682 return false; 5683 5684 SmallVector<Value *, 16> BuildVectorOpds; 5685 if (!findBuildAggregate(IVI, BuildVectorOpds)) 5686 return false; 5687 5688 DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 5689 // Aggregate value is unlikely to be processed in vector register, we need to 5690 // extract scalars into scalar registers, so NeedExtraction is set true. 5691 return tryToVectorizeList(BuildVectorOpds, R); 5692 } 5693 5694 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 5695 BasicBlock *BB, BoUpSLP &R) { 5696 SmallVector<Value *, 16> BuildVectorOpds; 5697 if (!findBuildVector(IEI, BuildVectorOpds)) 5698 return false; 5699 5700 // Vectorize starting with the build vector operands ignoring the BuildVector 5701 // instructions for the purpose of scheduling and user extraction. 5702 return tryToVectorizeList(BuildVectorOpds, R); 5703 } 5704 5705 bool SLPVectorizerPass::vectorizeCmpInst(CmpInst *CI, BasicBlock *BB, 5706 BoUpSLP &R) { 5707 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) 5708 return true; 5709 5710 bool OpsChanged = false; 5711 for (int Idx = 0; Idx < 2; ++Idx) { 5712 OpsChanged |= 5713 vectorizeRootInstruction(nullptr, CI->getOperand(Idx), BB, R, TTI); 5714 } 5715 return OpsChanged; 5716 } 5717 5718 bool SLPVectorizerPass::vectorizeSimpleInstructions( 5719 SmallVectorImpl<WeakVH> &Instructions, BasicBlock *BB, BoUpSLP &R) { 5720 bool OpsChanged = false; 5721 for (auto &VH : reverse(Instructions)) { 5722 auto *I = dyn_cast_or_null<Instruction>(VH); 5723 if (!I) 5724 continue; 5725 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) 5726 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 5727 else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) 5728 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 5729 else if (auto *CI = dyn_cast<CmpInst>(I)) 5730 OpsChanged |= vectorizeCmpInst(CI, BB, R); 5731 } 5732 Instructions.clear(); 5733 return OpsChanged; 5734 } 5735 5736 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 5737 bool Changed = false; 5738 SmallVector<Value *, 4> Incoming; 5739 SmallSet<Value *, 16> VisitedInstrs; 5740 5741 bool HaveVectorizedPhiNodes = true; 5742 while (HaveVectorizedPhiNodes) { 5743 HaveVectorizedPhiNodes = false; 5744 5745 // Collect the incoming values from the PHIs. 5746 Incoming.clear(); 5747 for (Instruction &I : *BB) { 5748 PHINode *P = dyn_cast<PHINode>(&I); 5749 if (!P) 5750 break; 5751 5752 if (!VisitedInstrs.count(P)) 5753 Incoming.push_back(P); 5754 } 5755 5756 // Sort by type. 5757 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); 5758 5759 // Try to vectorize elements base on their type. 5760 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 5761 E = Incoming.end(); 5762 IncIt != E;) { 5763 5764 // Look for the next elements with the same type. 5765 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 5766 while (SameTypeIt != E && 5767 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 5768 VisitedInstrs.insert(*SameTypeIt); 5769 ++SameTypeIt; 5770 } 5771 5772 // Try to vectorize them. 5773 unsigned NumElts = (SameTypeIt - IncIt); 5774 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); 5775 // The order in which the phi nodes appear in the program does not matter. 5776 // So allow tryToVectorizeList to reorder them if it is beneficial. This 5777 // is done when there are exactly two elements since tryToVectorizeList 5778 // asserts that there are only two values when AllowReorder is true. 5779 bool AllowReorder = NumElts == 2; 5780 if (NumElts > 1 && 5781 tryToVectorizeList(makeArrayRef(IncIt, NumElts), R, AllowReorder)) { 5782 // Success start over because instructions might have been changed. 5783 HaveVectorizedPhiNodes = true; 5784 Changed = true; 5785 break; 5786 } 5787 5788 // Start over at the next instruction of a different type (or the end). 5789 IncIt = SameTypeIt; 5790 } 5791 } 5792 5793 VisitedInstrs.clear(); 5794 5795 SmallVector<WeakVH, 8> PostProcessInstructions; 5796 SmallDenseSet<Instruction *, 4> KeyNodes; 5797 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 5798 // We may go through BB multiple times so skip the one we have checked. 5799 if (!VisitedInstrs.insert(&*it).second) { 5800 if (it->use_empty() && KeyNodes.count(&*it) > 0 && 5801 vectorizeSimpleInstructions(PostProcessInstructions, BB, R)) { 5802 // We would like to start over since some instructions are deleted 5803 // and the iterator may become invalid value. 5804 Changed = true; 5805 it = BB->begin(); 5806 e = BB->end(); 5807 } 5808 continue; 5809 } 5810 5811 if (isa<DbgInfoIntrinsic>(it)) 5812 continue; 5813 5814 // Try to vectorize reductions that use PHINodes. 5815 if (PHINode *P = dyn_cast<PHINode>(it)) { 5816 // Check that the PHI is a reduction PHI. 5817 if (P->getNumIncomingValues() != 2) 5818 return Changed; 5819 5820 // Try to match and vectorize a horizontal reduction. 5821 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R, 5822 TTI)) { 5823 Changed = true; 5824 it = BB->begin(); 5825 e = BB->end(); 5826 continue; 5827 } 5828 continue; 5829 } 5830 5831 // Ran into an instruction without users, like terminator, or function call 5832 // with ignored return value, store. Ignore unused instructions (basing on 5833 // instruction type, except for CallInst and InvokeInst). 5834 if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) || 5835 isa<InvokeInst>(it))) { 5836 KeyNodes.insert(&*it); 5837 bool OpsChanged = false; 5838 if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) { 5839 for (auto *V : it->operand_values()) { 5840 // Try to match and vectorize a horizontal reduction. 5841 OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI); 5842 } 5843 } 5844 // Start vectorization of post-process list of instructions from the 5845 // top-tree instructions to try to vectorize as many instructions as 5846 // possible. 5847 OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R); 5848 if (OpsChanged) { 5849 // We would like to start over since some instructions are deleted 5850 // and the iterator may become invalid value. 5851 Changed = true; 5852 it = BB->begin(); 5853 e = BB->end(); 5854 continue; 5855 } 5856 } 5857 5858 if (isa<InsertElementInst>(it) || isa<CmpInst>(it) || 5859 isa<InsertValueInst>(it)) 5860 PostProcessInstructions.push_back(&*it); 5861 5862 } 5863 5864 return Changed; 5865 } 5866 5867 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 5868 auto Changed = false; 5869 for (auto &Entry : GEPs) { 5870 // If the getelementptr list has fewer than two elements, there's nothing 5871 // to do. 5872 if (Entry.second.size() < 2) 5873 continue; 5874 5875 DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 5876 << Entry.second.size() << ".\n"); 5877 5878 // We process the getelementptr list in chunks of 16 (like we do for 5879 // stores) to minimize compile-time. 5880 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += 16) { 5881 auto Len = std::min<unsigned>(BE - BI, 16); 5882 auto GEPList = makeArrayRef(&Entry.second[BI], Len); 5883 5884 // Initialize a set a candidate getelementptrs. Note that we use a 5885 // SetVector here to preserve program order. If the index computations 5886 // are vectorizable and begin with loads, we want to minimize the chance 5887 // of having to reorder them later. 5888 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 5889 5890 // Some of the candidates may have already been vectorized after we 5891 // initially collected them. If so, the WeakTrackingVHs will have 5892 // nullified the 5893 // values, so remove them from the set of candidates. 5894 Candidates.remove(nullptr); 5895 5896 // Remove from the set of candidates all pairs of getelementptrs with 5897 // constant differences. Such getelementptrs are likely not good 5898 // candidates for vectorization in a bottom-up phase since one can be 5899 // computed from the other. We also ensure all candidate getelementptr 5900 // indices are unique. 5901 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 5902 auto *GEPI = cast<GetElementPtrInst>(GEPList[I]); 5903 if (!Candidates.count(GEPI)) 5904 continue; 5905 auto *SCEVI = SE->getSCEV(GEPList[I]); 5906 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 5907 auto *GEPJ = cast<GetElementPtrInst>(GEPList[J]); 5908 auto *SCEVJ = SE->getSCEV(GEPList[J]); 5909 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 5910 Candidates.remove(GEPList[I]); 5911 Candidates.remove(GEPList[J]); 5912 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 5913 Candidates.remove(GEPList[J]); 5914 } 5915 } 5916 } 5917 5918 // We break out of the above computation as soon as we know there are 5919 // fewer than two candidates remaining. 5920 if (Candidates.size() < 2) 5921 continue; 5922 5923 // Add the single, non-constant index of each candidate to the bundle. We 5924 // ensured the indices met these constraints when we originally collected 5925 // the getelementptrs. 5926 SmallVector<Value *, 16> Bundle(Candidates.size()); 5927 auto BundleIndex = 0u; 5928 for (auto *V : Candidates) { 5929 auto *GEP = cast<GetElementPtrInst>(V); 5930 auto *GEPIdx = GEP->idx_begin()->get(); 5931 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 5932 Bundle[BundleIndex++] = GEPIdx; 5933 } 5934 5935 // Try and vectorize the indices. We are currently only interested in 5936 // gather-like cases of the form: 5937 // 5938 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 5939 // 5940 // where the loads of "a", the loads of "b", and the subtractions can be 5941 // performed in parallel. It's likely that detecting this pattern in a 5942 // bottom-up phase will be simpler and less costly than building a 5943 // full-blown top-down phase beginning at the consecutive loads. 5944 Changed |= tryToVectorizeList(Bundle, R); 5945 } 5946 } 5947 return Changed; 5948 } 5949 5950 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 5951 bool Changed = false; 5952 // Attempt to sort and vectorize each of the store-groups. 5953 for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e; 5954 ++it) { 5955 if (it->second.size() < 2) 5956 continue; 5957 5958 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 5959 << it->second.size() << ".\n"); 5960 5961 // Process the stores in chunks of 16. 5962 // TODO: The limit of 16 inhibits greater vectorization factors. 5963 // For example, AVX2 supports v32i8. Increasing this limit, however, 5964 // may cause a significant compile-time increase. 5965 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 5966 unsigned Len = std::min<unsigned>(CE - CI, 16); 5967 Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len), R); 5968 } 5969 } 5970 return Changed; 5971 } 5972 5973 char SLPVectorizer::ID = 0; 5974 5975 static const char lv_name[] = "SLP Vectorizer"; 5976 5977 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 5978 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 5979 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 5980 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 5981 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 5982 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 5983 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 5984 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 5985 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 5986 5987 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); } 5988