1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 11 // stores that can be put together into vector-stores. Next, it attempts to 12 // construct vectorizable tree using the use-def chains. If a profitable tree 13 // was found, the SLP vectorizer performs vectorization on the tree. 14 // 15 // The pass is inspired by the work described in the paper: 16 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 21 #include "llvm/ADT/ArrayRef.h" 22 #include "llvm/ADT/DenseMap.h" 23 #include "llvm/ADT/DenseSet.h" 24 #include "llvm/ADT/MapVector.h" 25 #include "llvm/ADT/None.h" 26 #include "llvm/ADT/Optional.h" 27 #include "llvm/ADT/PostOrderIterator.h" 28 #include "llvm/ADT/STLExtras.h" 29 #include "llvm/ADT/SetVector.h" 30 #include "llvm/ADT/SmallPtrSet.h" 31 #include "llvm/ADT/SmallSet.h" 32 #include "llvm/ADT/SmallVector.h" 33 #include "llvm/ADT/Statistic.h" 34 #include "llvm/ADT/iterator.h" 35 #include "llvm/ADT/iterator_range.h" 36 #include "llvm/Analysis/AliasAnalysis.h" 37 #include "llvm/Analysis/CodeMetrics.h" 38 #include "llvm/Analysis/DemandedBits.h" 39 #include "llvm/Analysis/GlobalsModRef.h" 40 #include "llvm/Analysis/LoopAccessAnalysis.h" 41 #include "llvm/Analysis/LoopInfo.h" 42 #include "llvm/Analysis/MemoryLocation.h" 43 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 44 #include "llvm/Analysis/ScalarEvolution.h" 45 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 46 #include "llvm/Analysis/TargetLibraryInfo.h" 47 #include "llvm/Analysis/TargetTransformInfo.h" 48 #include "llvm/Analysis/ValueTracking.h" 49 #include "llvm/Analysis/VectorUtils.h" 50 #include "llvm/IR/Attributes.h" 51 #include "llvm/IR/BasicBlock.h" 52 #include "llvm/IR/Constant.h" 53 #include "llvm/IR/Constants.h" 54 #include "llvm/IR/DataLayout.h" 55 #include "llvm/IR/DebugLoc.h" 56 #include "llvm/IR/DerivedTypes.h" 57 #include "llvm/IR/Dominators.h" 58 #include "llvm/IR/Function.h" 59 #include "llvm/IR/IRBuilder.h" 60 #include "llvm/IR/InstrTypes.h" 61 #include "llvm/IR/Instruction.h" 62 #include "llvm/IR/Instructions.h" 63 #include "llvm/IR/IntrinsicInst.h" 64 #include "llvm/IR/Intrinsics.h" 65 #include "llvm/IR/Module.h" 66 #include "llvm/IR/NoFolder.h" 67 #include "llvm/IR/Operator.h" 68 #include "llvm/IR/PassManager.h" 69 #include "llvm/IR/PatternMatch.h" 70 #include "llvm/IR/Type.h" 71 #include "llvm/IR/Use.h" 72 #include "llvm/IR/User.h" 73 #include "llvm/IR/Value.h" 74 #include "llvm/IR/ValueHandle.h" 75 #include "llvm/IR/Verifier.h" 76 #include "llvm/Pass.h" 77 #include "llvm/Support/Casting.h" 78 #include "llvm/Support/CommandLine.h" 79 #include "llvm/Support/Compiler.h" 80 #include "llvm/Support/DOTGraphTraits.h" 81 #include "llvm/Support/Debug.h" 82 #include "llvm/Support/ErrorHandling.h" 83 #include "llvm/Support/GraphWriter.h" 84 #include "llvm/Support/KnownBits.h" 85 #include "llvm/Support/MathExtras.h" 86 #include "llvm/Support/raw_ostream.h" 87 #include "llvm/Transforms/Utils/LoopUtils.h" 88 #include "llvm/Transforms/Vectorize.h" 89 #include <algorithm> 90 #include <cassert> 91 #include <cstdint> 92 #include <iterator> 93 #include <memory> 94 #include <set> 95 #include <string> 96 #include <tuple> 97 #include <utility> 98 #include <vector> 99 100 using namespace llvm; 101 using namespace llvm::PatternMatch; 102 using namespace slpvectorizer; 103 104 #define SV_NAME "slp-vectorizer" 105 #define DEBUG_TYPE "SLP" 106 107 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 108 109 static cl::opt<int> 110 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 111 cl::desc("Only vectorize if you gain more than this " 112 "number ")); 113 114 static cl::opt<bool> 115 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 116 cl::desc("Attempt to vectorize horizontal reductions")); 117 118 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 119 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 120 cl::desc( 121 "Attempt to vectorize horizontal reductions feeding into a store")); 122 123 static cl::opt<int> 124 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 125 cl::desc("Attempt to vectorize for this register size in bits")); 126 127 /// Limits the size of scheduling regions in a block. 128 /// It avoid long compile times for _very_ large blocks where vector 129 /// instructions are spread over a wide range. 130 /// This limit is way higher than needed by real-world functions. 131 static cl::opt<int> 132 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 133 cl::desc("Limit the size of the SLP scheduling region per block")); 134 135 static cl::opt<int> MinVectorRegSizeOption( 136 "slp-min-reg-size", cl::init(128), cl::Hidden, 137 cl::desc("Attempt to vectorize for this register size in bits")); 138 139 static cl::opt<unsigned> RecursionMaxDepth( 140 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 141 cl::desc("Limit the recursion depth when building a vectorizable tree")); 142 143 static cl::opt<unsigned> MinTreeSize( 144 "slp-min-tree-size", cl::init(3), cl::Hidden, 145 cl::desc("Only vectorize small trees if they are fully vectorizable")); 146 147 static cl::opt<bool> 148 ViewSLPTree("view-slp-tree", cl::Hidden, 149 cl::desc("Display the SLP trees with Graphviz")); 150 151 // Limit the number of alias checks. The limit is chosen so that 152 // it has no negative effect on the llvm benchmarks. 153 static const unsigned AliasedCheckLimit = 10; 154 155 // Another limit for the alias checks: The maximum distance between load/store 156 // instructions where alias checks are done. 157 // This limit is useful for very large basic blocks. 158 static const unsigned MaxMemDepDistance = 160; 159 160 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 161 /// regions to be handled. 162 static const int MinScheduleRegionSize = 16; 163 164 /// \brief Predicate for the element types that the SLP vectorizer supports. 165 /// 166 /// The most important thing to filter here are types which are invalid in LLVM 167 /// vectors. We also filter target specific types which have absolutely no 168 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 169 /// avoids spending time checking the cost model and realizing that they will 170 /// be inevitably scalarized. 171 static bool isValidElementType(Type *Ty) { 172 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 173 !Ty->isPPC_FP128Ty(); 174 } 175 176 /// \returns true if all of the instructions in \p VL are in the same block or 177 /// false otherwise. 178 static bool allSameBlock(ArrayRef<Value *> VL) { 179 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 180 if (!I0) 181 return false; 182 BasicBlock *BB = I0->getParent(); 183 for (int i = 1, e = VL.size(); i < e; i++) { 184 Instruction *I = dyn_cast<Instruction>(VL[i]); 185 if (!I) 186 return false; 187 188 if (BB != I->getParent()) 189 return false; 190 } 191 return true; 192 } 193 194 /// \returns True if all of the values in \p VL are constants. 195 static bool allConstant(ArrayRef<Value *> VL) { 196 for (Value *i : VL) 197 if (!isa<Constant>(i)) 198 return false; 199 return true; 200 } 201 202 /// \returns True if all of the values in \p VL are identical. 203 static bool isSplat(ArrayRef<Value *> VL) { 204 for (unsigned i = 1, e = VL.size(); i < e; ++i) 205 if (VL[i] != VL[0]) 206 return false; 207 return true; 208 } 209 210 /// Checks if the vector of instructions can be represented as a shuffle, like: 211 /// %x0 = extractelement <4 x i8> %x, i32 0 212 /// %x3 = extractelement <4 x i8> %x, i32 3 213 /// %y1 = extractelement <4 x i8> %y, i32 1 214 /// %y2 = extractelement <4 x i8> %y, i32 2 215 /// %x0x0 = mul i8 %x0, %x0 216 /// %x3x3 = mul i8 %x3, %x3 217 /// %y1y1 = mul i8 %y1, %y1 218 /// %y2y2 = mul i8 %y2, %y2 219 /// %ins1 = insertelement <4 x i8> undef, i8 %x0x0, i32 0 220 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 221 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 222 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 223 /// ret <4 x i8> %ins4 224 /// can be transformed into: 225 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 226 /// i32 6> 227 /// %2 = mul <4 x i8> %1, %1 228 /// ret <4 x i8> %2 229 /// We convert this initially to something like: 230 /// %x0 = extractelement <4 x i8> %x, i32 0 231 /// %x3 = extractelement <4 x i8> %x, i32 3 232 /// %y1 = extractelement <4 x i8> %y, i32 1 233 /// %y2 = extractelement <4 x i8> %y, i32 2 234 /// %1 = insertelement <4 x i8> undef, i8 %x0, i32 0 235 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1 236 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2 237 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3 238 /// %5 = mul <4 x i8> %4, %4 239 /// %6 = extractelement <4 x i8> %5, i32 0 240 /// %ins1 = insertelement <4 x i8> undef, i8 %6, i32 0 241 /// %7 = extractelement <4 x i8> %5, i32 1 242 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1 243 /// %8 = extractelement <4 x i8> %5, i32 2 244 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2 245 /// %9 = extractelement <4 x i8> %5, i32 3 246 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3 247 /// ret <4 x i8> %ins4 248 /// InstCombiner transforms this into a shuffle and vector mul 249 static Optional<TargetTransformInfo::ShuffleKind> 250 isShuffle(ArrayRef<Value *> VL) { 251 auto *EI0 = cast<ExtractElementInst>(VL[0]); 252 unsigned Size = EI0->getVectorOperandType()->getVectorNumElements(); 253 Value *Vec1 = nullptr; 254 Value *Vec2 = nullptr; 255 enum ShuffleMode {Unknown, FirstAlternate, SecondAlternate, Permute}; 256 ShuffleMode CommonShuffleMode = Unknown; 257 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 258 auto *EI = cast<ExtractElementInst>(VL[I]); 259 auto *Vec = EI->getVectorOperand(); 260 // All vector operands must have the same number of vector elements. 261 if (Vec->getType()->getVectorNumElements() != Size) 262 return None; 263 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 264 if (!Idx) 265 return None; 266 // Undefined behavior if Idx is negative or >= Size. 267 if (Idx->getValue().uge(Size)) 268 continue; 269 unsigned IntIdx = Idx->getValue().getZExtValue(); 270 // We can extractelement from undef vector. 271 if (isa<UndefValue>(Vec)) 272 continue; 273 // For correct shuffling we have to have at most 2 different vector operands 274 // in all extractelement instructions. 275 if (Vec1 && Vec2 && Vec != Vec1 && Vec != Vec2) 276 return None; 277 if (CommonShuffleMode == Permute) 278 continue; 279 // If the extract index is not the same as the operation number, it is a 280 // permutation. 281 if (IntIdx != I) { 282 CommonShuffleMode = Permute; 283 continue; 284 } 285 // Check the shuffle mode for the current operation. 286 if (!Vec1) 287 Vec1 = Vec; 288 else if (Vec != Vec1) 289 Vec2 = Vec; 290 // Example: shufflevector A, B, <0,5,2,7> 291 // I is odd and IntIdx for A == I - FirstAlternate shuffle. 292 // I is even and IntIdx for B == I - FirstAlternate shuffle. 293 // Example: shufflevector A, B, <4,1,6,3> 294 // I is even and IntIdx for A == I - SecondAlternate shuffle. 295 // I is odd and IntIdx for B == I - SecondAlternate shuffle. 296 const bool IIsEven = I & 1; 297 const bool CurrVecIsA = Vec == Vec1; 298 const bool IIsOdd = !IIsEven; 299 const bool CurrVecIsB = !CurrVecIsA; 300 ShuffleMode CurrentShuffleMode = 301 ((IIsOdd && CurrVecIsA) || (IIsEven && CurrVecIsB)) ? FirstAlternate 302 : SecondAlternate; 303 // Common mode is not set or the same as the shuffle mode of the current 304 // operation - alternate. 305 if (CommonShuffleMode == Unknown) 306 CommonShuffleMode = CurrentShuffleMode; 307 // Common shuffle mode is not the same as the shuffle mode of the current 308 // operation - permutation. 309 if (CommonShuffleMode != CurrentShuffleMode) 310 CommonShuffleMode = Permute; 311 } 312 // If we're not crossing lanes in different vectors, consider it as blending. 313 if ((CommonShuffleMode == FirstAlternate || 314 CommonShuffleMode == SecondAlternate) && 315 Vec2) 316 return TargetTransformInfo::SK_Alternate; 317 // If Vec2 was never used, we have a permutation of a single vector, otherwise 318 // we have permutation of 2 vectors. 319 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 320 : TargetTransformInfo::SK_PermuteSingleSrc; 321 } 322 323 ///\returns Opcode that can be clubbed with \p Op to create an alternate 324 /// sequence which can later be merged as a ShuffleVector instruction. 325 static unsigned getAltOpcode(unsigned Op) { 326 switch (Op) { 327 case Instruction::FAdd: 328 return Instruction::FSub; 329 case Instruction::FSub: 330 return Instruction::FAdd; 331 case Instruction::Add: 332 return Instruction::Sub; 333 case Instruction::Sub: 334 return Instruction::Add; 335 default: 336 return 0; 337 } 338 } 339 340 static bool isOdd(unsigned Value) { 341 return Value & 1; 342 } 343 344 static bool sameOpcodeOrAlt(unsigned Opcode, unsigned AltOpcode, 345 unsigned CheckedOpcode) { 346 return Opcode == CheckedOpcode || AltOpcode == CheckedOpcode; 347 } 348 349 /// Chooses the correct key for scheduling data. If \p Op has the same (or 350 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 351 /// OpValue. 352 static Value *isOneOf(Value *OpValue, Value *Op) { 353 auto *I = dyn_cast<Instruction>(Op); 354 if (!I) 355 return OpValue; 356 auto *OpInst = cast<Instruction>(OpValue); 357 unsigned OpInstOpcode = OpInst->getOpcode(); 358 unsigned IOpcode = I->getOpcode(); 359 if (sameOpcodeOrAlt(OpInstOpcode, getAltOpcode(OpInstOpcode), IOpcode)) 360 return Op; 361 return OpValue; 362 } 363 364 namespace { 365 366 /// Contains data for the instructions going to be vectorized. 367 struct RawInstructionsData { 368 /// Main Opcode of the instructions going to be vectorized. 369 unsigned Opcode = 0; 370 371 /// The list of instructions have some instructions with alternate opcodes. 372 bool HasAltOpcodes = false; 373 }; 374 375 } // end anonymous namespace 376 377 /// Checks the list of the vectorized instructions \p VL and returns info about 378 /// this list. 379 static RawInstructionsData getMainOpcode(ArrayRef<Value *> VL) { 380 auto *I0 = dyn_cast<Instruction>(VL[0]); 381 if (!I0) 382 return {}; 383 RawInstructionsData Res; 384 unsigned Opcode = I0->getOpcode(); 385 // Walk through the list of the vectorized instructions 386 // in order to check its structure described by RawInstructionsData. 387 for (unsigned Cnt = 0, E = VL.size(); Cnt != E; ++Cnt) { 388 auto *I = dyn_cast<Instruction>(VL[Cnt]); 389 if (!I) 390 return {}; 391 if (Opcode != I->getOpcode()) 392 Res.HasAltOpcodes = true; 393 } 394 Res.Opcode = Opcode; 395 return Res; 396 } 397 398 namespace { 399 400 /// Main data required for vectorization of instructions. 401 struct InstructionsState { 402 /// The very first instruction in the list with the main opcode. 403 Value *OpValue = nullptr; 404 405 /// The main opcode for the list of instructions. 406 unsigned Opcode = 0; 407 408 /// Some of the instructions in the list have alternate opcodes. 409 bool IsAltShuffle = false; 410 411 InstructionsState() = default; 412 InstructionsState(Value *OpValue, unsigned Opcode, bool IsAltShuffle) 413 : OpValue(OpValue), Opcode(Opcode), IsAltShuffle(IsAltShuffle) {} 414 }; 415 416 } // end anonymous namespace 417 418 /// \returns analysis of the Instructions in \p VL described in 419 /// InstructionsState, the Opcode that we suppose the whole list 420 /// could be vectorized even if its structure is diverse. 421 static InstructionsState getSameOpcode(ArrayRef<Value *> VL) { 422 auto Res = getMainOpcode(VL); 423 unsigned Opcode = Res.Opcode; 424 if (!Res.HasAltOpcodes) 425 return InstructionsState(VL[0], Opcode, false); 426 auto *OpInst = cast<Instruction>(VL[0]); 427 unsigned AltOpcode = getAltOpcode(Opcode); 428 // Examine each element in the list instructions VL to determine 429 // if some operations there could be considered as an alternative 430 // (for example as subtraction relates to addition operation). 431 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { 432 auto *I = cast<Instruction>(VL[Cnt]); 433 unsigned InstOpcode = I->getOpcode(); 434 if ((Res.HasAltOpcodes && 435 InstOpcode != (isOdd(Cnt) ? AltOpcode : Opcode)) || 436 (!Res.HasAltOpcodes && InstOpcode != Opcode)) { 437 return InstructionsState(OpInst, 0, false); 438 } 439 } 440 return InstructionsState(OpInst, Opcode, Res.HasAltOpcodes); 441 } 442 443 /// \returns true if all of the values in \p VL have the same type or false 444 /// otherwise. 445 static bool allSameType(ArrayRef<Value *> VL) { 446 Type *Ty = VL[0]->getType(); 447 for (int i = 1, e = VL.size(); i < e; i++) 448 if (VL[i]->getType() != Ty) 449 return false; 450 451 return true; 452 } 453 454 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 455 static bool matchExtractIndex(Instruction *E, unsigned Idx, unsigned Opcode) { 456 assert(Opcode == Instruction::ExtractElement || 457 Opcode == Instruction::ExtractValue); 458 if (Opcode == Instruction::ExtractElement) { 459 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 460 return CI && CI->getZExtValue() == Idx; 461 } else { 462 ExtractValueInst *EI = cast<ExtractValueInst>(E); 463 return EI->getNumIndices() == 1 && *EI->idx_begin() == Idx; 464 } 465 } 466 467 /// \returns True if in-tree use also needs extract. This refers to 468 /// possible scalar operand in vectorized instruction. 469 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 470 TargetLibraryInfo *TLI) { 471 unsigned Opcode = UserInst->getOpcode(); 472 switch (Opcode) { 473 case Instruction::Load: { 474 LoadInst *LI = cast<LoadInst>(UserInst); 475 return (LI->getPointerOperand() == Scalar); 476 } 477 case Instruction::Store: { 478 StoreInst *SI = cast<StoreInst>(UserInst); 479 return (SI->getPointerOperand() == Scalar); 480 } 481 case Instruction::Call: { 482 CallInst *CI = cast<CallInst>(UserInst); 483 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 484 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 485 return (CI->getArgOperand(1) == Scalar); 486 } 487 LLVM_FALLTHROUGH; 488 } 489 default: 490 return false; 491 } 492 } 493 494 /// \returns the AA location that is being access by the instruction. 495 static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) { 496 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 497 return MemoryLocation::get(SI); 498 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 499 return MemoryLocation::get(LI); 500 return MemoryLocation(); 501 } 502 503 /// \returns True if the instruction is not a volatile or atomic load/store. 504 static bool isSimple(Instruction *I) { 505 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 506 return LI->isSimple(); 507 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 508 return SI->isSimple(); 509 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 510 return !MI->isVolatile(); 511 return true; 512 } 513 514 namespace llvm { 515 516 namespace slpvectorizer { 517 518 /// Bottom Up SLP Vectorizer. 519 class BoUpSLP { 520 public: 521 using ValueList = SmallVector<Value *, 8>; 522 using InstrList = SmallVector<Instruction *, 16>; 523 using ValueSet = SmallPtrSet<Value *, 16>; 524 using StoreList = SmallVector<StoreInst *, 8>; 525 using ExtraValueToDebugLocsMap = 526 MapVector<Value *, SmallVector<Instruction *, 2>>; 527 528 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 529 TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li, 530 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 531 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 532 : F(Func), SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), 533 DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { 534 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 535 // Use the vector register size specified by the target unless overridden 536 // by a command-line option. 537 // TODO: It would be better to limit the vectorization factor based on 538 // data type rather than just register size. For example, x86 AVX has 539 // 256-bit registers, but it does not support integer operations 540 // at that width (that requires AVX2). 541 if (MaxVectorRegSizeOption.getNumOccurrences()) 542 MaxVecRegSize = MaxVectorRegSizeOption; 543 else 544 MaxVecRegSize = TTI->getRegisterBitWidth(true); 545 546 if (MinVectorRegSizeOption.getNumOccurrences()) 547 MinVecRegSize = MinVectorRegSizeOption; 548 else 549 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 550 } 551 552 /// \brief Vectorize the tree that starts with the elements in \p VL. 553 /// Returns the vectorized root. 554 Value *vectorizeTree(); 555 556 /// Vectorize the tree but with the list of externally used values \p 557 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 558 /// generated extractvalue instructions. 559 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues); 560 561 /// \returns the cost incurred by unwanted spills and fills, caused by 562 /// holding live values over call sites. 563 int getSpillCost(); 564 565 /// \returns the vectorization cost of the subtree that starts at \p VL. 566 /// A negative number means that this is profitable. 567 int getTreeCost(); 568 569 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 570 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 571 void buildTree(ArrayRef<Value *> Roots, 572 ArrayRef<Value *> UserIgnoreLst = None); 573 574 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 575 /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking 576 /// into account (anf updating it, if required) list of externally used 577 /// values stored in \p ExternallyUsedValues. 578 void buildTree(ArrayRef<Value *> Roots, 579 ExtraValueToDebugLocsMap &ExternallyUsedValues, 580 ArrayRef<Value *> UserIgnoreLst = None); 581 582 /// Clear the internal data structures that are created by 'buildTree'. 583 void deleteTree() { 584 VectorizableTree.clear(); 585 ScalarToTreeEntry.clear(); 586 MustGather.clear(); 587 ExternalUses.clear(); 588 NumLoadsWantToKeepOrder = 0; 589 NumLoadsWantToChangeOrder = 0; 590 for (auto &Iter : BlocksSchedules) { 591 BlockScheduling *BS = Iter.second.get(); 592 BS->clear(); 593 } 594 MinBWs.clear(); 595 } 596 597 unsigned getTreeSize() const { return VectorizableTree.size(); } 598 599 /// \brief Perform LICM and CSE on the newly generated gather sequences. 600 void optimizeGatherSequence(); 601 602 /// \returns true if it is beneficial to reverse the vector order. 603 bool shouldReorder() const { 604 return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder; 605 } 606 607 /// \return The vector element size in bits to use when vectorizing the 608 /// expression tree ending at \p V. If V is a store, the size is the width of 609 /// the stored value. Otherwise, the size is the width of the largest loaded 610 /// value reaching V. This method is used by the vectorizer to calculate 611 /// vectorization factors. 612 unsigned getVectorElementSize(Value *V); 613 614 /// Compute the minimum type sizes required to represent the entries in a 615 /// vectorizable tree. 616 void computeMinimumValueSizes(); 617 618 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 619 unsigned getMaxVecRegSize() const { 620 return MaxVecRegSize; 621 } 622 623 // \returns minimum vector register size as set by cl::opt. 624 unsigned getMinVecRegSize() const { 625 return MinVecRegSize; 626 } 627 628 /// \brief Check if ArrayType or StructType is isomorphic to some VectorType. 629 /// 630 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 631 unsigned canMapToVector(Type *T, const DataLayout &DL) const; 632 633 /// \returns True if the VectorizableTree is both tiny and not fully 634 /// vectorizable. We do not vectorize such trees. 635 bool isTreeTinyAndNotFullyVectorizable(); 636 637 OptimizationRemarkEmitter *getORE() { return ORE; } 638 639 private: 640 struct TreeEntry; 641 642 /// Checks if all users of \p I are the part of the vectorization tree. 643 bool areAllUsersVectorized(Instruction *I) const; 644 645 /// \returns the cost of the vectorizable entry. 646 int getEntryCost(TreeEntry *E); 647 648 /// This is the recursive part of buildTree. 649 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, int); 650 651 /// \returns True if the ExtractElement/ExtractValue instructions in VL can 652 /// be vectorized to use the original vector (or aggregate "bitcast" to a vector). 653 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue) const; 654 655 /// Vectorize a single entry in the tree. 656 Value *vectorizeTree(TreeEntry *E); 657 658 /// Vectorize a single entry in the tree, starting in \p VL. 659 Value *vectorizeTree(ArrayRef<Value *> VL); 660 661 /// \returns the pointer to the vectorized value if \p VL is already 662 /// vectorized, or NULL. They may happen in cycles. 663 Value *alreadyVectorized(ArrayRef<Value *> VL, Value *OpValue) const; 664 665 /// \returns the scalarization cost for this type. Scalarization in this 666 /// context means the creation of vectors from a group of scalars. 667 int getGatherCost(Type *Ty); 668 669 /// \returns the scalarization cost for this list of values. Assuming that 670 /// this subtree gets vectorized, we may need to extract the values from the 671 /// roots. This method calculates the cost of extracting the values. 672 int getGatherCost(ArrayRef<Value *> VL); 673 674 /// \brief Set the Builder insert point to one after the last instruction in 675 /// the bundle 676 void setInsertPointAfterBundle(ArrayRef<Value *> VL, Value *OpValue); 677 678 /// \returns a vector from a collection of scalars in \p VL. 679 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 680 681 /// \returns whether the VectorizableTree is fully vectorizable and will 682 /// be beneficial even the tree height is tiny. 683 bool isFullyVectorizableTinyTree(); 684 685 /// \reorder commutative operands in alt shuffle if they result in 686 /// vectorized code. 687 void reorderAltShuffleOperands(unsigned Opcode, ArrayRef<Value *> VL, 688 SmallVectorImpl<Value *> &Left, 689 SmallVectorImpl<Value *> &Right); 690 691 /// \reorder commutative operands to get better probability of 692 /// generating vectorized code. 693 void reorderInputsAccordingToOpcode(unsigned Opcode, ArrayRef<Value *> VL, 694 SmallVectorImpl<Value *> &Left, 695 SmallVectorImpl<Value *> &Right); 696 struct TreeEntry { 697 TreeEntry(std::vector<TreeEntry> &Container) : Container(Container) {} 698 699 /// \returns true if the scalars in VL are equal to this entry. 700 bool isSame(ArrayRef<Value *> VL) const { 701 assert(VL.size() == Scalars.size() && "Invalid size"); 702 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 703 } 704 705 /// A vector of scalars. 706 ValueList Scalars; 707 708 /// The Scalars are vectorized into this value. It is initialized to Null. 709 Value *VectorizedValue = nullptr; 710 711 /// Do we need to gather this sequence ? 712 bool NeedToGather = false; 713 714 /// Points back to the VectorizableTree. 715 /// 716 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 717 /// to be a pointer and needs to be able to initialize the child iterator. 718 /// Thus we need a reference back to the container to translate the indices 719 /// to entries. 720 std::vector<TreeEntry> &Container; 721 722 /// The TreeEntry index containing the user of this entry. We can actually 723 /// have multiple users so the data structure is not truly a tree. 724 SmallVector<int, 1> UserTreeIndices; 725 }; 726 727 /// Create a new VectorizableTree entry. 728 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized, 729 int &UserTreeIdx) { 730 VectorizableTree.emplace_back(VectorizableTree); 731 int idx = VectorizableTree.size() - 1; 732 TreeEntry *Last = &VectorizableTree[idx]; 733 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 734 Last->NeedToGather = !Vectorized; 735 if (Vectorized) { 736 for (int i = 0, e = VL.size(); i != e; ++i) { 737 assert(!getTreeEntry(VL[i]) && "Scalar already in tree!"); 738 ScalarToTreeEntry[VL[i]] = idx; 739 } 740 } else { 741 MustGather.insert(VL.begin(), VL.end()); 742 } 743 744 if (UserTreeIdx >= 0) 745 Last->UserTreeIndices.push_back(UserTreeIdx); 746 UserTreeIdx = idx; 747 return Last; 748 } 749 750 /// -- Vectorization State -- 751 /// Holds all of the tree entries. 752 std::vector<TreeEntry> VectorizableTree; 753 754 TreeEntry *getTreeEntry(Value *V) { 755 auto I = ScalarToTreeEntry.find(V); 756 if (I != ScalarToTreeEntry.end()) 757 return &VectorizableTree[I->second]; 758 return nullptr; 759 } 760 761 const TreeEntry *getTreeEntry(Value *V) const { 762 auto I = ScalarToTreeEntry.find(V); 763 if (I != ScalarToTreeEntry.end()) 764 return &VectorizableTree[I->second]; 765 return nullptr; 766 } 767 768 /// Maps a specific scalar to its tree entry. 769 SmallDenseMap<Value*, int> ScalarToTreeEntry; 770 771 /// A list of scalars that we found that we need to keep as scalars. 772 ValueSet MustGather; 773 774 /// This POD struct describes one external user in the vectorized tree. 775 struct ExternalUser { 776 ExternalUser(Value *S, llvm::User *U, int L) 777 : Scalar(S), User(U), Lane(L) {} 778 779 // Which scalar in our function. 780 Value *Scalar; 781 782 // Which user that uses the scalar. 783 llvm::User *User; 784 785 // Which lane does the scalar belong to. 786 int Lane; 787 }; 788 using UserList = SmallVector<ExternalUser, 16>; 789 790 /// Checks if two instructions may access the same memory. 791 /// 792 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 793 /// is invariant in the calling loop. 794 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 795 Instruction *Inst2) { 796 // First check if the result is already in the cache. 797 AliasCacheKey key = std::make_pair(Inst1, Inst2); 798 Optional<bool> &result = AliasCache[key]; 799 if (result.hasValue()) { 800 return result.getValue(); 801 } 802 MemoryLocation Loc2 = getLocation(Inst2, AA); 803 bool aliased = true; 804 if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) { 805 // Do the alias check. 806 aliased = AA->alias(Loc1, Loc2); 807 } 808 // Store the result in the cache. 809 result = aliased; 810 return aliased; 811 } 812 813 using AliasCacheKey = std::pair<Instruction *, Instruction *>; 814 815 /// Cache for alias results. 816 /// TODO: consider moving this to the AliasAnalysis itself. 817 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 818 819 /// Removes an instruction from its block and eventually deletes it. 820 /// It's like Instruction::eraseFromParent() except that the actual deletion 821 /// is delayed until BoUpSLP is destructed. 822 /// This is required to ensure that there are no incorrect collisions in the 823 /// AliasCache, which can happen if a new instruction is allocated at the 824 /// same address as a previously deleted instruction. 825 void eraseInstruction(Instruction *I) { 826 I->removeFromParent(); 827 I->dropAllReferences(); 828 DeletedInstructions.emplace_back(I); 829 } 830 831 /// Temporary store for deleted instructions. Instructions will be deleted 832 /// eventually when the BoUpSLP is destructed. 833 SmallVector<unique_value, 8> DeletedInstructions; 834 835 /// A list of values that need to extracted out of the tree. 836 /// This list holds pairs of (Internal Scalar : External User). External User 837 /// can be nullptr, it means that this Internal Scalar will be used later, 838 /// after vectorization. 839 UserList ExternalUses; 840 841 /// Values used only by @llvm.assume calls. 842 SmallPtrSet<const Value *, 32> EphValues; 843 844 /// Holds all of the instructions that we gathered. 845 SetVector<Instruction *> GatherSeq; 846 847 /// A list of blocks that we are going to CSE. 848 SetVector<BasicBlock *> CSEBlocks; 849 850 /// Contains all scheduling relevant data for an instruction. 851 /// A ScheduleData either represents a single instruction or a member of an 852 /// instruction bundle (= a group of instructions which is combined into a 853 /// vector instruction). 854 struct ScheduleData { 855 // The initial value for the dependency counters. It means that the 856 // dependencies are not calculated yet. 857 enum { InvalidDeps = -1 }; 858 859 ScheduleData() = default; 860 861 void init(int BlockSchedulingRegionID, Value *OpVal) { 862 FirstInBundle = this; 863 NextInBundle = nullptr; 864 NextLoadStore = nullptr; 865 IsScheduled = false; 866 SchedulingRegionID = BlockSchedulingRegionID; 867 UnscheduledDepsInBundle = UnscheduledDeps; 868 clearDependencies(); 869 OpValue = OpVal; 870 } 871 872 /// Returns true if the dependency information has been calculated. 873 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 874 875 /// Returns true for single instructions and for bundle representatives 876 /// (= the head of a bundle). 877 bool isSchedulingEntity() const { return FirstInBundle == this; } 878 879 /// Returns true if it represents an instruction bundle and not only a 880 /// single instruction. 881 bool isPartOfBundle() const { 882 return NextInBundle != nullptr || FirstInBundle != this; 883 } 884 885 /// Returns true if it is ready for scheduling, i.e. it has no more 886 /// unscheduled depending instructions/bundles. 887 bool isReady() const { 888 assert(isSchedulingEntity() && 889 "can't consider non-scheduling entity for ready list"); 890 return UnscheduledDepsInBundle == 0 && !IsScheduled; 891 } 892 893 /// Modifies the number of unscheduled dependencies, also updating it for 894 /// the whole bundle. 895 int incrementUnscheduledDeps(int Incr) { 896 UnscheduledDeps += Incr; 897 return FirstInBundle->UnscheduledDepsInBundle += Incr; 898 } 899 900 /// Sets the number of unscheduled dependencies to the number of 901 /// dependencies. 902 void resetUnscheduledDeps() { 903 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 904 } 905 906 /// Clears all dependency information. 907 void clearDependencies() { 908 Dependencies = InvalidDeps; 909 resetUnscheduledDeps(); 910 MemoryDependencies.clear(); 911 } 912 913 void dump(raw_ostream &os) const { 914 if (!isSchedulingEntity()) { 915 os << "/ " << *Inst; 916 } else if (NextInBundle) { 917 os << '[' << *Inst; 918 ScheduleData *SD = NextInBundle; 919 while (SD) { 920 os << ';' << *SD->Inst; 921 SD = SD->NextInBundle; 922 } 923 os << ']'; 924 } else { 925 os << *Inst; 926 } 927 } 928 929 Instruction *Inst = nullptr; 930 931 /// Points to the head in an instruction bundle (and always to this for 932 /// single instructions). 933 ScheduleData *FirstInBundle = nullptr; 934 935 /// Single linked list of all instructions in a bundle. Null if it is a 936 /// single instruction. 937 ScheduleData *NextInBundle = nullptr; 938 939 /// Single linked list of all memory instructions (e.g. load, store, call) 940 /// in the block - until the end of the scheduling region. 941 ScheduleData *NextLoadStore = nullptr; 942 943 /// The dependent memory instructions. 944 /// This list is derived on demand in calculateDependencies(). 945 SmallVector<ScheduleData *, 4> MemoryDependencies; 946 947 /// This ScheduleData is in the current scheduling region if this matches 948 /// the current SchedulingRegionID of BlockScheduling. 949 int SchedulingRegionID = 0; 950 951 /// Used for getting a "good" final ordering of instructions. 952 int SchedulingPriority = 0; 953 954 /// The number of dependencies. Constitutes of the number of users of the 955 /// instruction plus the number of dependent memory instructions (if any). 956 /// This value is calculated on demand. 957 /// If InvalidDeps, the number of dependencies is not calculated yet. 958 int Dependencies = InvalidDeps; 959 960 /// The number of dependencies minus the number of dependencies of scheduled 961 /// instructions. As soon as this is zero, the instruction/bundle gets ready 962 /// for scheduling. 963 /// Note that this is negative as long as Dependencies is not calculated. 964 int UnscheduledDeps = InvalidDeps; 965 966 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 967 /// single instructions. 968 int UnscheduledDepsInBundle = InvalidDeps; 969 970 /// True if this instruction is scheduled (or considered as scheduled in the 971 /// dry-run). 972 bool IsScheduled = false; 973 974 /// Opcode of the current instruction in the schedule data. 975 Value *OpValue = nullptr; 976 }; 977 978 #ifndef NDEBUG 979 friend inline raw_ostream &operator<<(raw_ostream &os, 980 const BoUpSLP::ScheduleData &SD) { 981 SD.dump(os); 982 return os; 983 } 984 #endif 985 986 friend struct GraphTraits<BoUpSLP *>; 987 friend struct DOTGraphTraits<BoUpSLP *>; 988 989 /// Contains all scheduling data for a basic block. 990 struct BlockScheduling { 991 BlockScheduling(BasicBlock *BB) 992 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} 993 994 void clear() { 995 ReadyInsts.clear(); 996 ScheduleStart = nullptr; 997 ScheduleEnd = nullptr; 998 FirstLoadStoreInRegion = nullptr; 999 LastLoadStoreInRegion = nullptr; 1000 1001 // Reduce the maximum schedule region size by the size of the 1002 // previous scheduling run. 1003 ScheduleRegionSizeLimit -= ScheduleRegionSize; 1004 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 1005 ScheduleRegionSizeLimit = MinScheduleRegionSize; 1006 ScheduleRegionSize = 0; 1007 1008 // Make a new scheduling region, i.e. all existing ScheduleData is not 1009 // in the new region yet. 1010 ++SchedulingRegionID; 1011 } 1012 1013 ScheduleData *getScheduleData(Value *V) { 1014 ScheduleData *SD = ScheduleDataMap[V]; 1015 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 1016 return SD; 1017 return nullptr; 1018 } 1019 1020 ScheduleData *getScheduleData(Value *V, Value *Key) { 1021 if (V == Key) 1022 return getScheduleData(V); 1023 auto I = ExtraScheduleDataMap.find(V); 1024 if (I != ExtraScheduleDataMap.end()) { 1025 ScheduleData *SD = I->second[Key]; 1026 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 1027 return SD; 1028 } 1029 return nullptr; 1030 } 1031 1032 bool isInSchedulingRegion(ScheduleData *SD) { 1033 return SD->SchedulingRegionID == SchedulingRegionID; 1034 } 1035 1036 /// Marks an instruction as scheduled and puts all dependent ready 1037 /// instructions into the ready-list. 1038 template <typename ReadyListType> 1039 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 1040 SD->IsScheduled = true; 1041 DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 1042 1043 ScheduleData *BundleMember = SD; 1044 while (BundleMember) { 1045 if (BundleMember->Inst != BundleMember->OpValue) { 1046 BundleMember = BundleMember->NextInBundle; 1047 continue; 1048 } 1049 // Handle the def-use chain dependencies. 1050 for (Use &U : BundleMember->Inst->operands()) { 1051 auto *I = dyn_cast<Instruction>(U.get()); 1052 if (!I) 1053 continue; 1054 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 1055 if (OpDef && OpDef->hasValidDependencies() && 1056 OpDef->incrementUnscheduledDeps(-1) == 0) { 1057 // There are no more unscheduled dependencies after 1058 // decrementing, so we can put the dependent instruction 1059 // into the ready list. 1060 ScheduleData *DepBundle = OpDef->FirstInBundle; 1061 assert(!DepBundle->IsScheduled && 1062 "already scheduled bundle gets ready"); 1063 ReadyList.insert(DepBundle); 1064 DEBUG(dbgs() 1065 << "SLP: gets ready (def): " << *DepBundle << "\n"); 1066 } 1067 }); 1068 } 1069 // Handle the memory dependencies. 1070 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 1071 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 1072 // There are no more unscheduled dependencies after decrementing, 1073 // so we can put the dependent instruction into the ready list. 1074 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 1075 assert(!DepBundle->IsScheduled && 1076 "already scheduled bundle gets ready"); 1077 ReadyList.insert(DepBundle); 1078 DEBUG(dbgs() << "SLP: gets ready (mem): " << *DepBundle 1079 << "\n"); 1080 } 1081 } 1082 BundleMember = BundleMember->NextInBundle; 1083 } 1084 } 1085 1086 void doForAllOpcodes(Value *V, 1087 function_ref<void(ScheduleData *SD)> Action) { 1088 if (ScheduleData *SD = getScheduleData(V)) 1089 Action(SD); 1090 auto I = ExtraScheduleDataMap.find(V); 1091 if (I != ExtraScheduleDataMap.end()) 1092 for (auto &P : I->second) 1093 if (P.second->SchedulingRegionID == SchedulingRegionID) 1094 Action(P.second); 1095 } 1096 1097 /// Put all instructions into the ReadyList which are ready for scheduling. 1098 template <typename ReadyListType> 1099 void initialFillReadyList(ReadyListType &ReadyList) { 1100 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 1101 doForAllOpcodes(I, [&](ScheduleData *SD) { 1102 if (SD->isSchedulingEntity() && SD->isReady()) { 1103 ReadyList.insert(SD); 1104 DEBUG(dbgs() << "SLP: initially in ready list: " << *I << "\n"); 1105 } 1106 }); 1107 } 1108 } 1109 1110 /// Checks if a bundle of instructions can be scheduled, i.e. has no 1111 /// cyclic dependencies. This is only a dry-run, no instructions are 1112 /// actually moved at this stage. 1113 bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, Value *OpValue); 1114 1115 /// Un-bundles a group of instructions. 1116 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 1117 1118 /// Allocates schedule data chunk. 1119 ScheduleData *allocateScheduleDataChunks(); 1120 1121 /// Extends the scheduling region so that V is inside the region. 1122 /// \returns true if the region size is within the limit. 1123 bool extendSchedulingRegion(Value *V, Value *OpValue); 1124 1125 /// Initialize the ScheduleData structures for new instructions in the 1126 /// scheduling region. 1127 void initScheduleData(Instruction *FromI, Instruction *ToI, 1128 ScheduleData *PrevLoadStore, 1129 ScheduleData *NextLoadStore); 1130 1131 /// Updates the dependency information of a bundle and of all instructions/ 1132 /// bundles which depend on the original bundle. 1133 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 1134 BoUpSLP *SLP); 1135 1136 /// Sets all instruction in the scheduling region to un-scheduled. 1137 void resetSchedule(); 1138 1139 BasicBlock *BB; 1140 1141 /// Simple memory allocation for ScheduleData. 1142 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 1143 1144 /// The size of a ScheduleData array in ScheduleDataChunks. 1145 int ChunkSize; 1146 1147 /// The allocator position in the current chunk, which is the last entry 1148 /// of ScheduleDataChunks. 1149 int ChunkPos; 1150 1151 /// Attaches ScheduleData to Instruction. 1152 /// Note that the mapping survives during all vectorization iterations, i.e. 1153 /// ScheduleData structures are recycled. 1154 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 1155 1156 /// Attaches ScheduleData to Instruction with the leading key. 1157 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 1158 ExtraScheduleDataMap; 1159 1160 struct ReadyList : SmallVector<ScheduleData *, 8> { 1161 void insert(ScheduleData *SD) { push_back(SD); } 1162 }; 1163 1164 /// The ready-list for scheduling (only used for the dry-run). 1165 ReadyList ReadyInsts; 1166 1167 /// The first instruction of the scheduling region. 1168 Instruction *ScheduleStart = nullptr; 1169 1170 /// The first instruction _after_ the scheduling region. 1171 Instruction *ScheduleEnd = nullptr; 1172 1173 /// The first memory accessing instruction in the scheduling region 1174 /// (can be null). 1175 ScheduleData *FirstLoadStoreInRegion = nullptr; 1176 1177 /// The last memory accessing instruction in the scheduling region 1178 /// (can be null). 1179 ScheduleData *LastLoadStoreInRegion = nullptr; 1180 1181 /// The current size of the scheduling region. 1182 int ScheduleRegionSize = 0; 1183 1184 /// The maximum size allowed for the scheduling region. 1185 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; 1186 1187 /// The ID of the scheduling region. For a new vectorization iteration this 1188 /// is incremented which "removes" all ScheduleData from the region. 1189 // Make sure that the initial SchedulingRegionID is greater than the 1190 // initial SchedulingRegionID in ScheduleData (which is 0). 1191 int SchedulingRegionID = 1; 1192 }; 1193 1194 /// Attaches the BlockScheduling structures to basic blocks. 1195 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 1196 1197 /// Performs the "real" scheduling. Done before vectorization is actually 1198 /// performed in a basic block. 1199 void scheduleBlock(BlockScheduling *BS); 1200 1201 /// List of users to ignore during scheduling and that don't need extracting. 1202 ArrayRef<Value *> UserIgnoreList; 1203 1204 // Number of load bundles that contain consecutive loads. 1205 int NumLoadsWantToKeepOrder = 0; 1206 1207 // Number of load bundles that contain consecutive loads in reversed order. 1208 int NumLoadsWantToChangeOrder = 0; 1209 1210 // Analysis and block reference. 1211 Function *F; 1212 ScalarEvolution *SE; 1213 TargetTransformInfo *TTI; 1214 TargetLibraryInfo *TLI; 1215 AliasAnalysis *AA; 1216 LoopInfo *LI; 1217 DominatorTree *DT; 1218 AssumptionCache *AC; 1219 DemandedBits *DB; 1220 const DataLayout *DL; 1221 OptimizationRemarkEmitter *ORE; 1222 1223 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 1224 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 1225 1226 /// Instruction builder to construct the vectorized tree. 1227 IRBuilder<> Builder; 1228 1229 /// A map of scalar integer values to the smallest bit width with which they 1230 /// can legally be represented. The values map to (width, signed) pairs, 1231 /// where "width" indicates the minimum bit width and "signed" is True if the 1232 /// value must be signed-extended, rather than zero-extended, back to its 1233 /// original width. 1234 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs; 1235 }; 1236 1237 } // end namespace slpvectorizer 1238 1239 template <> struct GraphTraits<BoUpSLP *> { 1240 using TreeEntry = BoUpSLP::TreeEntry; 1241 1242 /// NodeRef has to be a pointer per the GraphWriter. 1243 using NodeRef = TreeEntry *; 1244 1245 /// \brief Add the VectorizableTree to the index iterator to be able to return 1246 /// TreeEntry pointers. 1247 struct ChildIteratorType 1248 : public iterator_adaptor_base<ChildIteratorType, 1249 SmallVector<int, 1>::iterator> { 1250 std::vector<TreeEntry> &VectorizableTree; 1251 1252 ChildIteratorType(SmallVector<int, 1>::iterator W, 1253 std::vector<TreeEntry> &VT) 1254 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 1255 1256 NodeRef operator*() { return &VectorizableTree[*I]; } 1257 }; 1258 1259 static NodeRef getEntryNode(BoUpSLP &R) { return &R.VectorizableTree[0]; } 1260 1261 static ChildIteratorType child_begin(NodeRef N) { 1262 return {N->UserTreeIndices.begin(), N->Container}; 1263 } 1264 1265 static ChildIteratorType child_end(NodeRef N) { 1266 return {N->UserTreeIndices.end(), N->Container}; 1267 } 1268 1269 /// For the node iterator we just need to turn the TreeEntry iterator into a 1270 /// TreeEntry* iterator so that it dereferences to NodeRef. 1271 using nodes_iterator = pointer_iterator<std::vector<TreeEntry>::iterator>; 1272 1273 static nodes_iterator nodes_begin(BoUpSLP *R) { 1274 return nodes_iterator(R->VectorizableTree.begin()); 1275 } 1276 1277 static nodes_iterator nodes_end(BoUpSLP *R) { 1278 return nodes_iterator(R->VectorizableTree.end()); 1279 } 1280 1281 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 1282 }; 1283 1284 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 1285 using TreeEntry = BoUpSLP::TreeEntry; 1286 1287 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 1288 1289 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 1290 std::string Str; 1291 raw_string_ostream OS(Str); 1292 if (isSplat(Entry->Scalars)) { 1293 OS << "<splat> " << *Entry->Scalars[0]; 1294 return Str; 1295 } 1296 for (auto V : Entry->Scalars) { 1297 OS << *V; 1298 if (std::any_of( 1299 R->ExternalUses.begin(), R->ExternalUses.end(), 1300 [&](const BoUpSLP::ExternalUser &EU) { return EU.Scalar == V; })) 1301 OS << " <extract>"; 1302 OS << "\n"; 1303 } 1304 return Str; 1305 } 1306 1307 static std::string getNodeAttributes(const TreeEntry *Entry, 1308 const BoUpSLP *) { 1309 if (Entry->NeedToGather) 1310 return "color=red"; 1311 return ""; 1312 } 1313 }; 1314 1315 } // end namespace llvm 1316 1317 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 1318 ArrayRef<Value *> UserIgnoreLst) { 1319 ExtraValueToDebugLocsMap ExternallyUsedValues; 1320 buildTree(Roots, ExternallyUsedValues, UserIgnoreLst); 1321 } 1322 1323 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 1324 ExtraValueToDebugLocsMap &ExternallyUsedValues, 1325 ArrayRef<Value *> UserIgnoreLst) { 1326 deleteTree(); 1327 UserIgnoreList = UserIgnoreLst; 1328 if (!allSameType(Roots)) 1329 return; 1330 buildTree_rec(Roots, 0, -1); 1331 1332 // Collect the values that we need to extract from the tree. 1333 for (TreeEntry &EIdx : VectorizableTree) { 1334 TreeEntry *Entry = &EIdx; 1335 1336 // No need to handle users of gathered values. 1337 if (Entry->NeedToGather) 1338 continue; 1339 1340 // For each lane: 1341 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 1342 Value *Scalar = Entry->Scalars[Lane]; 1343 1344 // Check if the scalar is externally used as an extra arg. 1345 auto ExtI = ExternallyUsedValues.find(Scalar); 1346 if (ExtI != ExternallyUsedValues.end()) { 1347 DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " << 1348 Lane << " from " << *Scalar << ".\n"); 1349 ExternalUses.emplace_back(Scalar, nullptr, Lane); 1350 continue; 1351 } 1352 for (User *U : Scalar->users()) { 1353 DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 1354 1355 Instruction *UserInst = dyn_cast<Instruction>(U); 1356 if (!UserInst) 1357 continue; 1358 1359 // Skip in-tree scalars that become vectors 1360 if (TreeEntry *UseEntry = getTreeEntry(U)) { 1361 Value *UseScalar = UseEntry->Scalars[0]; 1362 // Some in-tree scalars will remain as scalar in vectorized 1363 // instructions. If that is the case, the one in Lane 0 will 1364 // be used. 1365 if (UseScalar != U || 1366 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 1367 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 1368 << ".\n"); 1369 assert(!UseEntry->NeedToGather && "Bad state"); 1370 continue; 1371 } 1372 } 1373 1374 // Ignore users in the user ignore list. 1375 if (is_contained(UserIgnoreList, UserInst)) 1376 continue; 1377 1378 DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " << 1379 Lane << " from " << *Scalar << ".\n"); 1380 ExternalUses.push_back(ExternalUser(Scalar, U, Lane)); 1381 } 1382 } 1383 } 1384 } 1385 1386 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 1387 int UserTreeIdx) { 1388 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 1389 1390 InstructionsState S = getSameOpcode(VL); 1391 if (Depth == RecursionMaxDepth) { 1392 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 1393 newTreeEntry(VL, false, UserTreeIdx); 1394 return; 1395 } 1396 1397 // Don't handle vectors. 1398 if (S.OpValue->getType()->isVectorTy()) { 1399 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 1400 newTreeEntry(VL, false, UserTreeIdx); 1401 return; 1402 } 1403 1404 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 1405 if (SI->getValueOperand()->getType()->isVectorTy()) { 1406 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 1407 newTreeEntry(VL, false, UserTreeIdx); 1408 return; 1409 } 1410 1411 // If all of the operands are identical or constant we have a simple solution. 1412 if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.Opcode) { 1413 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 1414 newTreeEntry(VL, false, UserTreeIdx); 1415 return; 1416 } 1417 1418 // We now know that this is a vector of instructions of the same type from 1419 // the same block. 1420 1421 // Don't vectorize ephemeral values. 1422 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1423 if (EphValues.count(VL[i])) { 1424 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1425 ") is ephemeral.\n"); 1426 newTreeEntry(VL, false, UserTreeIdx); 1427 return; 1428 } 1429 } 1430 1431 // Check if this is a duplicate of another entry. 1432 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 1433 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1434 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 1435 if (E->Scalars[i] != VL[i]) { 1436 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 1437 newTreeEntry(VL, false, UserTreeIdx); 1438 return; 1439 } 1440 } 1441 // Record the reuse of the tree node. FIXME, currently this is only used to 1442 // properly draw the graph rather than for the actual vectorization. 1443 E->UserTreeIndices.push_back(UserTreeIdx); 1444 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue << ".\n"); 1445 return; 1446 } 1447 1448 // Check that none of the instructions in the bundle are already in the tree. 1449 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1450 auto *I = dyn_cast<Instruction>(VL[i]); 1451 if (!I) 1452 continue; 1453 if (getTreeEntry(I)) { 1454 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1455 ") is already in tree.\n"); 1456 newTreeEntry(VL, false, UserTreeIdx); 1457 return; 1458 } 1459 } 1460 1461 // If any of the scalars is marked as a value that needs to stay scalar, then 1462 // we need to gather the scalars. 1463 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1464 if (MustGather.count(VL[i])) { 1465 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 1466 newTreeEntry(VL, false, UserTreeIdx); 1467 return; 1468 } 1469 } 1470 1471 // Check that all of the users of the scalars that we want to vectorize are 1472 // schedulable. 1473 auto *VL0 = cast<Instruction>(S.OpValue); 1474 BasicBlock *BB = VL0->getParent(); 1475 1476 if (!DT->isReachableFromEntry(BB)) { 1477 // Don't go into unreachable blocks. They may contain instructions with 1478 // dependency cycles which confuse the final scheduling. 1479 DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 1480 newTreeEntry(VL, false, UserTreeIdx); 1481 return; 1482 } 1483 1484 // Check that every instruction appears once in this bundle. 1485 for (unsigned i = 0, e = VL.size(); i < e; ++i) 1486 for (unsigned j = i + 1; j < e; ++j) 1487 if (VL[i] == VL[j]) { 1488 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 1489 newTreeEntry(VL, false, UserTreeIdx); 1490 return; 1491 } 1492 1493 auto &BSRef = BlocksSchedules[BB]; 1494 if (!BSRef) 1495 BSRef = llvm::make_unique<BlockScheduling>(BB); 1496 1497 BlockScheduling &BS = *BSRef.get(); 1498 1499 if (!BS.tryScheduleBundle(VL, this, S.OpValue)) { 1500 DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 1501 assert((!BS.getScheduleData(VL0) || 1502 !BS.getScheduleData(VL0)->isPartOfBundle()) && 1503 "tryScheduleBundle should cancelScheduling on failure"); 1504 newTreeEntry(VL, false, UserTreeIdx); 1505 return; 1506 } 1507 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 1508 1509 unsigned ShuffleOrOp = S.IsAltShuffle ? 1510 (unsigned) Instruction::ShuffleVector : S.Opcode; 1511 switch (ShuffleOrOp) { 1512 case Instruction::PHI: { 1513 PHINode *PH = dyn_cast<PHINode>(VL0); 1514 1515 // Check for terminator values (e.g. invoke). 1516 for (unsigned j = 0; j < VL.size(); ++j) 1517 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1518 TerminatorInst *Term = dyn_cast<TerminatorInst>( 1519 cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i))); 1520 if (Term) { 1521 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); 1522 BS.cancelScheduling(VL, VL0); 1523 newTreeEntry(VL, false, UserTreeIdx); 1524 return; 1525 } 1526 } 1527 1528 newTreeEntry(VL, true, UserTreeIdx); 1529 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 1530 1531 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1532 ValueList Operands; 1533 // Prepare the operand vector. 1534 for (Value *j : VL) 1535 Operands.push_back(cast<PHINode>(j)->getIncomingValueForBlock( 1536 PH->getIncomingBlock(i))); 1537 1538 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1539 } 1540 return; 1541 } 1542 case Instruction::ExtractValue: 1543 case Instruction::ExtractElement: { 1544 bool Reuse = canReuseExtract(VL, VL0); 1545 if (Reuse) { 1546 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 1547 } else { 1548 BS.cancelScheduling(VL, VL0); 1549 } 1550 newTreeEntry(VL, Reuse, UserTreeIdx); 1551 return; 1552 } 1553 case Instruction::Load: { 1554 // Check that a vectorized load would load the same memory as a scalar 1555 // load. For example, we don't want to vectorize loads that are smaller 1556 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 1557 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 1558 // from such a struct, we read/write packed bits disagreeing with the 1559 // unvectorized version. 1560 Type *ScalarTy = VL0->getType(); 1561 1562 if (DL->getTypeSizeInBits(ScalarTy) != 1563 DL->getTypeAllocSizeInBits(ScalarTy)) { 1564 BS.cancelScheduling(VL, VL0); 1565 newTreeEntry(VL, false, UserTreeIdx); 1566 DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 1567 return; 1568 } 1569 1570 // Make sure all loads in the bundle are simple - we can't vectorize 1571 // atomic or volatile loads. 1572 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 1573 LoadInst *L = cast<LoadInst>(VL[i]); 1574 if (!L->isSimple()) { 1575 BS.cancelScheduling(VL, VL0); 1576 newTreeEntry(VL, false, UserTreeIdx); 1577 DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 1578 return; 1579 } 1580 } 1581 1582 // Check if the loads are consecutive, reversed, or neither. 1583 // TODO: What we really want is to sort the loads, but for now, check 1584 // the two likely directions. 1585 bool Consecutive = true; 1586 bool ReverseConsecutive = true; 1587 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 1588 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { 1589 Consecutive = false; 1590 break; 1591 } else { 1592 ReverseConsecutive = false; 1593 } 1594 } 1595 1596 if (Consecutive) { 1597 ++NumLoadsWantToKeepOrder; 1598 newTreeEntry(VL, true, UserTreeIdx); 1599 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 1600 return; 1601 } 1602 1603 // If none of the load pairs were consecutive when checked in order, 1604 // check the reverse order. 1605 if (ReverseConsecutive) 1606 for (unsigned i = VL.size() - 1; i > 0; --i) 1607 if (!isConsecutiveAccess(VL[i], VL[i - 1], *DL, *SE)) { 1608 ReverseConsecutive = false; 1609 break; 1610 } 1611 1612 BS.cancelScheduling(VL, VL0); 1613 newTreeEntry(VL, false, UserTreeIdx); 1614 1615 if (ReverseConsecutive) { 1616 ++NumLoadsWantToChangeOrder; 1617 DEBUG(dbgs() << "SLP: Gathering reversed loads.\n"); 1618 } else { 1619 DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 1620 } 1621 return; 1622 } 1623 case Instruction::ZExt: 1624 case Instruction::SExt: 1625 case Instruction::FPToUI: 1626 case Instruction::FPToSI: 1627 case Instruction::FPExt: 1628 case Instruction::PtrToInt: 1629 case Instruction::IntToPtr: 1630 case Instruction::SIToFP: 1631 case Instruction::UIToFP: 1632 case Instruction::Trunc: 1633 case Instruction::FPTrunc: 1634 case Instruction::BitCast: { 1635 Type *SrcTy = VL0->getOperand(0)->getType(); 1636 for (unsigned i = 0; i < VL.size(); ++i) { 1637 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 1638 if (Ty != SrcTy || !isValidElementType(Ty)) { 1639 BS.cancelScheduling(VL, VL0); 1640 newTreeEntry(VL, false, UserTreeIdx); 1641 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 1642 return; 1643 } 1644 } 1645 newTreeEntry(VL, true, UserTreeIdx); 1646 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 1647 1648 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1649 ValueList Operands; 1650 // Prepare the operand vector. 1651 for (Value *j : VL) 1652 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1653 1654 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1655 } 1656 return; 1657 } 1658 case Instruction::ICmp: 1659 case Instruction::FCmp: { 1660 // Check that all of the compares have the same predicate. 1661 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 1662 Type *ComparedTy = VL0->getOperand(0)->getType(); 1663 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 1664 CmpInst *Cmp = cast<CmpInst>(VL[i]); 1665 if (Cmp->getPredicate() != P0 || 1666 Cmp->getOperand(0)->getType() != ComparedTy) { 1667 BS.cancelScheduling(VL, VL0); 1668 newTreeEntry(VL, false, UserTreeIdx); 1669 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 1670 return; 1671 } 1672 } 1673 1674 newTreeEntry(VL, true, UserTreeIdx); 1675 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 1676 1677 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1678 ValueList Operands; 1679 // Prepare the operand vector. 1680 for (Value *j : VL) 1681 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1682 1683 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1684 } 1685 return; 1686 } 1687 case Instruction::Select: 1688 case Instruction::Add: 1689 case Instruction::FAdd: 1690 case Instruction::Sub: 1691 case Instruction::FSub: 1692 case Instruction::Mul: 1693 case Instruction::FMul: 1694 case Instruction::UDiv: 1695 case Instruction::SDiv: 1696 case Instruction::FDiv: 1697 case Instruction::URem: 1698 case Instruction::SRem: 1699 case Instruction::FRem: 1700 case Instruction::Shl: 1701 case Instruction::LShr: 1702 case Instruction::AShr: 1703 case Instruction::And: 1704 case Instruction::Or: 1705 case Instruction::Xor: 1706 newTreeEntry(VL, true, UserTreeIdx); 1707 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 1708 1709 // Sort operands of the instructions so that each side is more likely to 1710 // have the same opcode. 1711 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 1712 ValueList Left, Right; 1713 reorderInputsAccordingToOpcode(S.Opcode, VL, Left, Right); 1714 buildTree_rec(Left, Depth + 1, UserTreeIdx); 1715 buildTree_rec(Right, Depth + 1, UserTreeIdx); 1716 return; 1717 } 1718 1719 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1720 ValueList Operands; 1721 // Prepare the operand vector. 1722 for (Value *j : VL) 1723 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1724 1725 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1726 } 1727 return; 1728 1729 case Instruction::GetElementPtr: { 1730 // We don't combine GEPs with complicated (nested) indexing. 1731 for (unsigned j = 0; j < VL.size(); ++j) { 1732 if (cast<Instruction>(VL[j])->getNumOperands() != 2) { 1733 DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 1734 BS.cancelScheduling(VL, VL0); 1735 newTreeEntry(VL, false, UserTreeIdx); 1736 return; 1737 } 1738 } 1739 1740 // We can't combine several GEPs into one vector if they operate on 1741 // different types. 1742 Type *Ty0 = VL0->getOperand(0)->getType(); 1743 for (unsigned j = 0; j < VL.size(); ++j) { 1744 Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType(); 1745 if (Ty0 != CurTy) { 1746 DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); 1747 BS.cancelScheduling(VL, VL0); 1748 newTreeEntry(VL, false, UserTreeIdx); 1749 return; 1750 } 1751 } 1752 1753 // We don't combine GEPs with non-constant indexes. 1754 for (unsigned j = 0; j < VL.size(); ++j) { 1755 auto Op = cast<Instruction>(VL[j])->getOperand(1); 1756 if (!isa<ConstantInt>(Op)) { 1757 DEBUG( 1758 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 1759 BS.cancelScheduling(VL, VL0); 1760 newTreeEntry(VL, false, UserTreeIdx); 1761 return; 1762 } 1763 } 1764 1765 newTreeEntry(VL, true, UserTreeIdx); 1766 DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 1767 for (unsigned i = 0, e = 2; i < e; ++i) { 1768 ValueList Operands; 1769 // Prepare the operand vector. 1770 for (Value *j : VL) 1771 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1772 1773 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1774 } 1775 return; 1776 } 1777 case Instruction::Store: { 1778 // Check if the stores are consecutive or of we need to swizzle them. 1779 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 1780 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { 1781 BS.cancelScheduling(VL, VL0); 1782 newTreeEntry(VL, false, UserTreeIdx); 1783 DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 1784 return; 1785 } 1786 1787 newTreeEntry(VL, true, UserTreeIdx); 1788 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 1789 1790 ValueList Operands; 1791 for (Value *j : VL) 1792 Operands.push_back(cast<Instruction>(j)->getOperand(0)); 1793 1794 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1795 return; 1796 } 1797 case Instruction::Call: { 1798 // Check if the calls are all to the same vectorizable intrinsic. 1799 CallInst *CI = cast<CallInst>(VL0); 1800 // Check if this is an Intrinsic call or something that can be 1801 // represented by an intrinsic call 1802 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 1803 if (!isTriviallyVectorizable(ID)) { 1804 BS.cancelScheduling(VL, VL0); 1805 newTreeEntry(VL, false, UserTreeIdx); 1806 DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 1807 return; 1808 } 1809 Function *Int = CI->getCalledFunction(); 1810 Value *A1I = nullptr; 1811 if (hasVectorInstrinsicScalarOpd(ID, 1)) 1812 A1I = CI->getArgOperand(1); 1813 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 1814 CallInst *CI2 = dyn_cast<CallInst>(VL[i]); 1815 if (!CI2 || CI2->getCalledFunction() != Int || 1816 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 1817 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 1818 BS.cancelScheduling(VL, VL0); 1819 newTreeEntry(VL, false, UserTreeIdx); 1820 DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] 1821 << "\n"); 1822 return; 1823 } 1824 // ctlz,cttz and powi are special intrinsics whose second argument 1825 // should be same in order for them to be vectorized. 1826 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 1827 Value *A1J = CI2->getArgOperand(1); 1828 if (A1I != A1J) { 1829 BS.cancelScheduling(VL, VL0); 1830 newTreeEntry(VL, false, UserTreeIdx); 1831 DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 1832 << " argument "<< A1I<<"!=" << A1J 1833 << "\n"); 1834 return; 1835 } 1836 } 1837 // Verify that the bundle operands are identical between the two calls. 1838 if (CI->hasOperandBundles() && 1839 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 1840 CI->op_begin() + CI->getBundleOperandsEndIndex(), 1841 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 1842 BS.cancelScheduling(VL, VL0); 1843 newTreeEntry(VL, false, UserTreeIdx); 1844 DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!=" 1845 << *VL[i] << '\n'); 1846 return; 1847 } 1848 } 1849 1850 newTreeEntry(VL, true, UserTreeIdx); 1851 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 1852 ValueList Operands; 1853 // Prepare the operand vector. 1854 for (Value *j : VL) { 1855 CallInst *CI2 = dyn_cast<CallInst>(j); 1856 Operands.push_back(CI2->getArgOperand(i)); 1857 } 1858 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1859 } 1860 return; 1861 } 1862 case Instruction::ShuffleVector: 1863 // If this is not an alternate sequence of opcode like add-sub 1864 // then do not vectorize this instruction. 1865 if (!S.IsAltShuffle) { 1866 BS.cancelScheduling(VL, VL0); 1867 newTreeEntry(VL, false, UserTreeIdx); 1868 DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 1869 return; 1870 } 1871 newTreeEntry(VL, true, UserTreeIdx); 1872 DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 1873 1874 // Reorder operands if reordering would enable vectorization. 1875 if (isa<BinaryOperator>(VL0)) { 1876 ValueList Left, Right; 1877 reorderAltShuffleOperands(S.Opcode, VL, Left, Right); 1878 buildTree_rec(Left, Depth + 1, UserTreeIdx); 1879 buildTree_rec(Right, Depth + 1, UserTreeIdx); 1880 return; 1881 } 1882 1883 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1884 ValueList Operands; 1885 // Prepare the operand vector. 1886 for (Value *j : VL) 1887 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1888 1889 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1890 } 1891 return; 1892 1893 default: 1894 BS.cancelScheduling(VL, VL0); 1895 newTreeEntry(VL, false, UserTreeIdx); 1896 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 1897 return; 1898 } 1899 } 1900 1901 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { 1902 unsigned N; 1903 Type *EltTy; 1904 auto *ST = dyn_cast<StructType>(T); 1905 if (ST) { 1906 N = ST->getNumElements(); 1907 EltTy = *ST->element_begin(); 1908 } else { 1909 N = cast<ArrayType>(T)->getNumElements(); 1910 EltTy = cast<ArrayType>(T)->getElementType(); 1911 } 1912 if (!isValidElementType(EltTy)) 1913 return 0; 1914 uint64_t VTSize = DL.getTypeStoreSizeInBits(VectorType::get(EltTy, N)); 1915 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) 1916 return 0; 1917 if (ST) { 1918 // Check that struct is homogeneous. 1919 for (const auto *Ty : ST->elements()) 1920 if (Ty != EltTy) 1921 return 0; 1922 } 1923 return N; 1924 } 1925 1926 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue) const { 1927 Instruction *E0 = cast<Instruction>(OpValue); 1928 assert(E0->getOpcode() == Instruction::ExtractElement || 1929 E0->getOpcode() == Instruction::ExtractValue); 1930 assert(E0->getOpcode() == getSameOpcode(VL).Opcode && "Invalid opcode"); 1931 // Check if all of the extracts come from the same vector and from the 1932 // correct offset. 1933 Value *Vec = E0->getOperand(0); 1934 1935 // We have to extract from a vector/aggregate with the same number of elements. 1936 unsigned NElts; 1937 if (E0->getOpcode() == Instruction::ExtractValue) { 1938 const DataLayout &DL = E0->getModule()->getDataLayout(); 1939 NElts = canMapToVector(Vec->getType(), DL); 1940 if (!NElts) 1941 return false; 1942 // Check if load can be rewritten as load of vector. 1943 LoadInst *LI = dyn_cast<LoadInst>(Vec); 1944 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 1945 return false; 1946 } else { 1947 NElts = Vec->getType()->getVectorNumElements(); 1948 } 1949 1950 if (NElts != VL.size()) 1951 return false; 1952 1953 // Check that all of the indices extract from the correct offset. 1954 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 1955 Instruction *Inst = cast<Instruction>(VL[I]); 1956 if (!matchExtractIndex(Inst, I, Inst->getOpcode())) 1957 return false; 1958 if (Inst->getOperand(0) != Vec) 1959 return false; 1960 } 1961 1962 return true; 1963 } 1964 1965 bool BoUpSLP::areAllUsersVectorized(Instruction *I) const { 1966 return I->hasOneUse() || 1967 std::all_of(I->user_begin(), I->user_end(), [this](User *U) { 1968 return ScalarToTreeEntry.count(U) > 0; 1969 }); 1970 } 1971 1972 int BoUpSLP::getEntryCost(TreeEntry *E) { 1973 ArrayRef<Value*> VL = E->Scalars; 1974 1975 Type *ScalarTy = VL[0]->getType(); 1976 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1977 ScalarTy = SI->getValueOperand()->getType(); 1978 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0])) 1979 ScalarTy = CI->getOperand(0)->getType(); 1980 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1981 1982 // If we have computed a smaller type for the expression, update VecTy so 1983 // that the costs will be accurate. 1984 if (MinBWs.count(VL[0])) 1985 VecTy = VectorType::get( 1986 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size()); 1987 1988 if (E->NeedToGather) { 1989 if (allConstant(VL)) 1990 return 0; 1991 if (isSplat(VL)) { 1992 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 1993 } 1994 if (getSameOpcode(VL).Opcode == Instruction::ExtractElement) { 1995 Optional<TargetTransformInfo::ShuffleKind> ShuffleKind = isShuffle(VL); 1996 if (ShuffleKind.hasValue()) { 1997 int Cost = TTI->getShuffleCost(ShuffleKind.getValue(), VecTy); 1998 for (auto *V : VL) { 1999 // If all users of instruction are going to be vectorized and this 2000 // instruction itself is not going to be vectorized, consider this 2001 // instruction as dead and remove its cost from the final cost of the 2002 // vectorized tree. 2003 if (areAllUsersVectorized(cast<Instruction>(V)) && 2004 !ScalarToTreeEntry.count(V)) { 2005 auto *IO = cast<ConstantInt>( 2006 cast<ExtractElementInst>(V)->getIndexOperand()); 2007 Cost -= TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 2008 IO->getZExtValue()); 2009 } 2010 } 2011 return Cost; 2012 } 2013 } 2014 return getGatherCost(E->Scalars); 2015 } 2016 InstructionsState S = getSameOpcode(VL); 2017 assert(S.Opcode && allSameType(VL) && allSameBlock(VL) && "Invalid VL"); 2018 Instruction *VL0 = cast<Instruction>(S.OpValue); 2019 unsigned ShuffleOrOp = S.IsAltShuffle ? 2020 (unsigned) Instruction::ShuffleVector : S.Opcode; 2021 switch (ShuffleOrOp) { 2022 case Instruction::PHI: 2023 return 0; 2024 2025 case Instruction::ExtractValue: 2026 case Instruction::ExtractElement: 2027 if (canReuseExtract(VL, S.OpValue)) { 2028 int DeadCost = 0; 2029 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 2030 Instruction *E = cast<Instruction>(VL[i]); 2031 // If all users are going to be vectorized, instruction can be 2032 // considered as dead. 2033 // The same, if have only one user, it will be vectorized for sure. 2034 if (areAllUsersVectorized(E)) 2035 // Take credit for instruction that will become dead. 2036 DeadCost += 2037 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); 2038 } 2039 return -DeadCost; 2040 } 2041 return getGatherCost(VecTy); 2042 2043 case Instruction::ZExt: 2044 case Instruction::SExt: 2045 case Instruction::FPToUI: 2046 case Instruction::FPToSI: 2047 case Instruction::FPExt: 2048 case Instruction::PtrToInt: 2049 case Instruction::IntToPtr: 2050 case Instruction::SIToFP: 2051 case Instruction::UIToFP: 2052 case Instruction::Trunc: 2053 case Instruction::FPTrunc: 2054 case Instruction::BitCast: { 2055 Type *SrcTy = VL0->getOperand(0)->getType(); 2056 2057 // Calculate the cost of this instruction. 2058 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 2059 VL0->getType(), SrcTy, VL0); 2060 2061 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 2062 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy, VL0); 2063 return VecCost - ScalarCost; 2064 } 2065 case Instruction::FCmp: 2066 case Instruction::ICmp: 2067 case Instruction::Select: { 2068 // Calculate the cost of this instruction. 2069 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 2070 int ScalarCost = VecTy->getNumElements() * 2071 TTI->getCmpSelInstrCost(S.Opcode, ScalarTy, Builder.getInt1Ty(), VL0); 2072 int VecCost = TTI->getCmpSelInstrCost(S.Opcode, VecTy, MaskTy, VL0); 2073 return VecCost - ScalarCost; 2074 } 2075 case Instruction::Add: 2076 case Instruction::FAdd: 2077 case Instruction::Sub: 2078 case Instruction::FSub: 2079 case Instruction::Mul: 2080 case Instruction::FMul: 2081 case Instruction::UDiv: 2082 case Instruction::SDiv: 2083 case Instruction::FDiv: 2084 case Instruction::URem: 2085 case Instruction::SRem: 2086 case Instruction::FRem: 2087 case Instruction::Shl: 2088 case Instruction::LShr: 2089 case Instruction::AShr: 2090 case Instruction::And: 2091 case Instruction::Or: 2092 case Instruction::Xor: { 2093 // Certain instructions can be cheaper to vectorize if they have a 2094 // constant second vector operand. 2095 TargetTransformInfo::OperandValueKind Op1VK = 2096 TargetTransformInfo::OK_AnyValue; 2097 TargetTransformInfo::OperandValueKind Op2VK = 2098 TargetTransformInfo::OK_UniformConstantValue; 2099 TargetTransformInfo::OperandValueProperties Op1VP = 2100 TargetTransformInfo::OP_None; 2101 TargetTransformInfo::OperandValueProperties Op2VP = 2102 TargetTransformInfo::OP_None; 2103 2104 // If all operands are exactly the same ConstantInt then set the 2105 // operand kind to OK_UniformConstantValue. 2106 // If instead not all operands are constants, then set the operand kind 2107 // to OK_AnyValue. If all operands are constants but not the same, 2108 // then set the operand kind to OK_NonUniformConstantValue. 2109 ConstantInt *CInt = nullptr; 2110 for (unsigned i = 0; i < VL.size(); ++i) { 2111 const Instruction *I = cast<Instruction>(VL[i]); 2112 if (!isa<ConstantInt>(I->getOperand(1))) { 2113 Op2VK = TargetTransformInfo::OK_AnyValue; 2114 break; 2115 } 2116 if (i == 0) { 2117 CInt = cast<ConstantInt>(I->getOperand(1)); 2118 continue; 2119 } 2120 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && 2121 CInt != cast<ConstantInt>(I->getOperand(1))) 2122 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 2123 } 2124 // FIXME: Currently cost of model modification for division by power of 2125 // 2 is handled for X86 and AArch64. Add support for other targets. 2126 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt && 2127 CInt->getValue().isPowerOf2()) 2128 Op2VP = TargetTransformInfo::OP_PowerOf2; 2129 2130 SmallVector<const Value *, 4> Operands(VL0->operand_values()); 2131 int ScalarCost = 2132 VecTy->getNumElements() * 2133 TTI->getArithmeticInstrCost(S.Opcode, ScalarTy, Op1VK, Op2VK, Op1VP, 2134 Op2VP, Operands); 2135 int VecCost = TTI->getArithmeticInstrCost(S.Opcode, VecTy, Op1VK, Op2VK, 2136 Op1VP, Op2VP, Operands); 2137 return VecCost - ScalarCost; 2138 } 2139 case Instruction::GetElementPtr: { 2140 TargetTransformInfo::OperandValueKind Op1VK = 2141 TargetTransformInfo::OK_AnyValue; 2142 TargetTransformInfo::OperandValueKind Op2VK = 2143 TargetTransformInfo::OK_UniformConstantValue; 2144 2145 int ScalarCost = 2146 VecTy->getNumElements() * 2147 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK); 2148 int VecCost = 2149 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK); 2150 2151 return VecCost - ScalarCost; 2152 } 2153 case Instruction::Load: { 2154 // Cost of wide load - cost of scalar loads. 2155 unsigned alignment = dyn_cast<LoadInst>(VL0)->getAlignment(); 2156 int ScalarLdCost = VecTy->getNumElements() * 2157 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, alignment, 0, VL0); 2158 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, 2159 VecTy, alignment, 0, VL0); 2160 return VecLdCost - ScalarLdCost; 2161 } 2162 case Instruction::Store: { 2163 // We know that we can merge the stores. Calculate the cost. 2164 unsigned alignment = dyn_cast<StoreInst>(VL0)->getAlignment(); 2165 int ScalarStCost = VecTy->getNumElements() * 2166 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, alignment, 0, VL0); 2167 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, 2168 VecTy, alignment, 0, VL0); 2169 return VecStCost - ScalarStCost; 2170 } 2171 case Instruction::Call: { 2172 CallInst *CI = cast<CallInst>(VL0); 2173 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 2174 2175 // Calculate the cost of the scalar and vector calls. 2176 SmallVector<Type*, 4> ScalarTys; 2177 for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) 2178 ScalarTys.push_back(CI->getArgOperand(op)->getType()); 2179 2180 FastMathFlags FMF; 2181 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 2182 FMF = FPMO->getFastMathFlags(); 2183 2184 int ScalarCallCost = VecTy->getNumElements() * 2185 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys, FMF); 2186 2187 SmallVector<Value *, 4> Args(CI->arg_operands()); 2188 int VecCallCost = TTI->getIntrinsicInstrCost(ID, CI->getType(), Args, FMF, 2189 VecTy->getNumElements()); 2190 2191 DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost 2192 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 2193 << " for " << *CI << "\n"); 2194 2195 return VecCallCost - ScalarCallCost; 2196 } 2197 case Instruction::ShuffleVector: { 2198 TargetTransformInfo::OperandValueKind Op1VK = 2199 TargetTransformInfo::OK_AnyValue; 2200 TargetTransformInfo::OperandValueKind Op2VK = 2201 TargetTransformInfo::OK_AnyValue; 2202 int ScalarCost = 0; 2203 int VecCost = 0; 2204 for (Value *i : VL) { 2205 Instruction *I = cast<Instruction>(i); 2206 if (!I) 2207 break; 2208 ScalarCost += 2209 TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK); 2210 } 2211 // VecCost is equal to sum of the cost of creating 2 vectors 2212 // and the cost of creating shuffle. 2213 Instruction *I0 = cast<Instruction>(VL[0]); 2214 VecCost = 2215 TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK); 2216 Instruction *I1 = cast<Instruction>(VL[1]); 2217 VecCost += 2218 TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK); 2219 VecCost += 2220 TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0); 2221 return VecCost - ScalarCost; 2222 } 2223 default: 2224 llvm_unreachable("Unknown instruction"); 2225 } 2226 } 2227 2228 bool BoUpSLP::isFullyVectorizableTinyTree() { 2229 DEBUG(dbgs() << "SLP: Check whether the tree with height " << 2230 VectorizableTree.size() << " is fully vectorizable .\n"); 2231 2232 // We only handle trees of heights 1 and 2. 2233 if (VectorizableTree.size() == 1 && !VectorizableTree[0].NeedToGather) 2234 return true; 2235 2236 if (VectorizableTree.size() != 2) 2237 return false; 2238 2239 // Handle splat and all-constants stores. 2240 if (!VectorizableTree[0].NeedToGather && 2241 (allConstant(VectorizableTree[1].Scalars) || 2242 isSplat(VectorizableTree[1].Scalars))) 2243 return true; 2244 2245 // Gathering cost would be too much for tiny trees. 2246 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 2247 return false; 2248 2249 return true; 2250 } 2251 2252 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() { 2253 // We can vectorize the tree if its size is greater than or equal to the 2254 // minimum size specified by the MinTreeSize command line option. 2255 if (VectorizableTree.size() >= MinTreeSize) 2256 return false; 2257 2258 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 2259 // can vectorize it if we can prove it fully vectorizable. 2260 if (isFullyVectorizableTinyTree()) 2261 return false; 2262 2263 assert(VectorizableTree.empty() 2264 ? ExternalUses.empty() 2265 : true && "We shouldn't have any external users"); 2266 2267 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 2268 // vectorizable. 2269 return true; 2270 } 2271 2272 int BoUpSLP::getSpillCost() { 2273 // Walk from the bottom of the tree to the top, tracking which values are 2274 // live. When we see a call instruction that is not part of our tree, 2275 // query TTI to see if there is a cost to keeping values live over it 2276 // (for example, if spills and fills are required). 2277 unsigned BundleWidth = VectorizableTree.front().Scalars.size(); 2278 int Cost = 0; 2279 2280 SmallPtrSet<Instruction*, 4> LiveValues; 2281 Instruction *PrevInst = nullptr; 2282 2283 for (const auto &N : VectorizableTree) { 2284 Instruction *Inst = dyn_cast<Instruction>(N.Scalars[0]); 2285 if (!Inst) 2286 continue; 2287 2288 if (!PrevInst) { 2289 PrevInst = Inst; 2290 continue; 2291 } 2292 2293 // Update LiveValues. 2294 LiveValues.erase(PrevInst); 2295 for (auto &J : PrevInst->operands()) { 2296 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 2297 LiveValues.insert(cast<Instruction>(&*J)); 2298 } 2299 2300 DEBUG( 2301 dbgs() << "SLP: #LV: " << LiveValues.size(); 2302 for (auto *X : LiveValues) 2303 dbgs() << " " << X->getName(); 2304 dbgs() << ", Looking at "; 2305 Inst->dump(); 2306 ); 2307 2308 // Now find the sequence of instructions between PrevInst and Inst. 2309 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 2310 PrevInstIt = 2311 PrevInst->getIterator().getReverse(); 2312 while (InstIt != PrevInstIt) { 2313 if (PrevInstIt == PrevInst->getParent()->rend()) { 2314 PrevInstIt = Inst->getParent()->rbegin(); 2315 continue; 2316 } 2317 2318 if (isa<CallInst>(&*PrevInstIt) && &*PrevInstIt != PrevInst) { 2319 SmallVector<Type*, 4> V; 2320 for (auto *II : LiveValues) 2321 V.push_back(VectorType::get(II->getType(), BundleWidth)); 2322 Cost += TTI->getCostOfKeepingLiveOverCall(V); 2323 } 2324 2325 ++PrevInstIt; 2326 } 2327 2328 PrevInst = Inst; 2329 } 2330 2331 return Cost; 2332 } 2333 2334 int BoUpSLP::getTreeCost() { 2335 int Cost = 0; 2336 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 2337 VectorizableTree.size() << ".\n"); 2338 2339 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 2340 2341 for (TreeEntry &TE : VectorizableTree) { 2342 int C = getEntryCost(&TE); 2343 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 2344 << *TE.Scalars[0] << ".\n"); 2345 Cost += C; 2346 } 2347 2348 SmallSet<Value *, 16> ExtractCostCalculated; 2349 int ExtractCost = 0; 2350 for (ExternalUser &EU : ExternalUses) { 2351 // We only add extract cost once for the same scalar. 2352 if (!ExtractCostCalculated.insert(EU.Scalar).second) 2353 continue; 2354 2355 // Uses by ephemeral values are free (because the ephemeral value will be 2356 // removed prior to code generation, and so the extraction will be 2357 // removed as well). 2358 if (EphValues.count(EU.User)) 2359 continue; 2360 2361 // If we plan to rewrite the tree in a smaller type, we will need to sign 2362 // extend the extracted value back to the original type. Here, we account 2363 // for the extract and the added cost of the sign extend if needed. 2364 auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth); 2365 auto *ScalarRoot = VectorizableTree[0].Scalars[0]; 2366 if (MinBWs.count(ScalarRoot)) { 2367 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 2368 auto Extend = 2369 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; 2370 VecTy = VectorType::get(MinTy, BundleWidth); 2371 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 2372 VecTy, EU.Lane); 2373 } else { 2374 ExtractCost += 2375 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); 2376 } 2377 } 2378 2379 int SpillCost = getSpillCost(); 2380 Cost += SpillCost + ExtractCost; 2381 2382 std::string Str; 2383 { 2384 raw_string_ostream OS(Str); 2385 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 2386 << "SLP: Extract Cost = " << ExtractCost << ".\n" 2387 << "SLP: Total Cost = " << Cost << ".\n"; 2388 } 2389 DEBUG(dbgs() << Str); 2390 2391 if (ViewSLPTree) 2392 ViewGraph(this, "SLP" + F->getName(), false, Str); 2393 2394 return Cost; 2395 } 2396 2397 int BoUpSLP::getGatherCost(Type *Ty) { 2398 int Cost = 0; 2399 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 2400 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 2401 return Cost; 2402 } 2403 2404 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 2405 // Find the type of the operands in VL. 2406 Type *ScalarTy = VL[0]->getType(); 2407 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 2408 ScalarTy = SI->getValueOperand()->getType(); 2409 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 2410 // Find the cost of inserting/extracting values from the vector. 2411 return getGatherCost(VecTy); 2412 } 2413 2414 // Reorder commutative operations in alternate shuffle if the resulting vectors 2415 // are consecutive loads. This would allow us to vectorize the tree. 2416 // If we have something like- 2417 // load a[0] - load b[0] 2418 // load b[1] + load a[1] 2419 // load a[2] - load b[2] 2420 // load a[3] + load b[3] 2421 // Reordering the second load b[1] load a[1] would allow us to vectorize this 2422 // code. 2423 void BoUpSLP::reorderAltShuffleOperands(unsigned Opcode, ArrayRef<Value *> VL, 2424 SmallVectorImpl<Value *> &Left, 2425 SmallVectorImpl<Value *> &Right) { 2426 // Push left and right operands of binary operation into Left and Right 2427 unsigned AltOpcode = getAltOpcode(Opcode); 2428 (void)AltOpcode; 2429 for (Value *V : VL) { 2430 auto *I = cast<Instruction>(V); 2431 assert(sameOpcodeOrAlt(Opcode, AltOpcode, I->getOpcode()) && 2432 "Incorrect instruction in vector"); 2433 Left.push_back(I->getOperand(0)); 2434 Right.push_back(I->getOperand(1)); 2435 } 2436 2437 // Reorder if we have a commutative operation and consecutive access 2438 // are on either side of the alternate instructions. 2439 for (unsigned j = 0; j < VL.size() - 1; ++j) { 2440 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 2441 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 2442 Instruction *VL1 = cast<Instruction>(VL[j]); 2443 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 2444 if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) { 2445 std::swap(Left[j], Right[j]); 2446 continue; 2447 } else if (VL2->isCommutative() && 2448 isConsecutiveAccess(L, L1, *DL, *SE)) { 2449 std::swap(Left[j + 1], Right[j + 1]); 2450 continue; 2451 } 2452 // else unchanged 2453 } 2454 } 2455 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 2456 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 2457 Instruction *VL1 = cast<Instruction>(VL[j]); 2458 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 2459 if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) { 2460 std::swap(Left[j], Right[j]); 2461 continue; 2462 } else if (VL2->isCommutative() && 2463 isConsecutiveAccess(L, L1, *DL, *SE)) { 2464 std::swap(Left[j + 1], Right[j + 1]); 2465 continue; 2466 } 2467 // else unchanged 2468 } 2469 } 2470 } 2471 } 2472 2473 // Return true if I should be commuted before adding it's left and right 2474 // operands to the arrays Left and Right. 2475 // 2476 // The vectorizer is trying to either have all elements one side being 2477 // instruction with the same opcode to enable further vectorization, or having 2478 // a splat to lower the vectorizing cost. 2479 static bool shouldReorderOperands( 2480 int i, unsigned Opcode, Instruction &I, ArrayRef<Value *> Left, 2481 ArrayRef<Value *> Right, bool AllSameOpcodeLeft, bool AllSameOpcodeRight, 2482 bool SplatLeft, bool SplatRight, Value *&VLeft, Value *&VRight) { 2483 VLeft = I.getOperand(0); 2484 VRight = I.getOperand(1); 2485 // If we have "SplatRight", try to see if commuting is needed to preserve it. 2486 if (SplatRight) { 2487 if (VRight == Right[i - 1]) 2488 // Preserve SplatRight 2489 return false; 2490 if (VLeft == Right[i - 1]) { 2491 // Commuting would preserve SplatRight, but we don't want to break 2492 // SplatLeft either, i.e. preserve the original order if possible. 2493 // (FIXME: why do we care?) 2494 if (SplatLeft && VLeft == Left[i - 1]) 2495 return false; 2496 return true; 2497 } 2498 } 2499 // Symmetrically handle Right side. 2500 if (SplatLeft) { 2501 if (VLeft == Left[i - 1]) 2502 // Preserve SplatLeft 2503 return false; 2504 if (VRight == Left[i - 1]) 2505 return true; 2506 } 2507 2508 Instruction *ILeft = dyn_cast<Instruction>(VLeft); 2509 Instruction *IRight = dyn_cast<Instruction>(VRight); 2510 2511 // If we have "AllSameOpcodeRight", try to see if the left operands preserves 2512 // it and not the right, in this case we want to commute. 2513 if (AllSameOpcodeRight) { 2514 unsigned RightPrevOpcode = cast<Instruction>(Right[i - 1])->getOpcode(); 2515 if (IRight && RightPrevOpcode == IRight->getOpcode()) 2516 // Do not commute, a match on the right preserves AllSameOpcodeRight 2517 return false; 2518 if (ILeft && RightPrevOpcode == ILeft->getOpcode()) { 2519 // We have a match and may want to commute, but first check if there is 2520 // not also a match on the existing operands on the Left to preserve 2521 // AllSameOpcodeLeft, i.e. preserve the original order if possible. 2522 // (FIXME: why do we care?) 2523 if (AllSameOpcodeLeft && ILeft && 2524 cast<Instruction>(Left[i - 1])->getOpcode() == ILeft->getOpcode()) 2525 return false; 2526 return true; 2527 } 2528 } 2529 // Symmetrically handle Left side. 2530 if (AllSameOpcodeLeft) { 2531 unsigned LeftPrevOpcode = cast<Instruction>(Left[i - 1])->getOpcode(); 2532 if (ILeft && LeftPrevOpcode == ILeft->getOpcode()) 2533 return false; 2534 if (IRight && LeftPrevOpcode == IRight->getOpcode()) 2535 return true; 2536 } 2537 return false; 2538 } 2539 2540 void BoUpSLP::reorderInputsAccordingToOpcode(unsigned Opcode, 2541 ArrayRef<Value *> VL, 2542 SmallVectorImpl<Value *> &Left, 2543 SmallVectorImpl<Value *> &Right) { 2544 if (!VL.empty()) { 2545 // Peel the first iteration out of the loop since there's nothing 2546 // interesting to do anyway and it simplifies the checks in the loop. 2547 auto *I = cast<Instruction>(VL[0]); 2548 Value *VLeft = I->getOperand(0); 2549 Value *VRight = I->getOperand(1); 2550 if (!isa<Instruction>(VRight) && isa<Instruction>(VLeft)) 2551 // Favor having instruction to the right. FIXME: why? 2552 std::swap(VLeft, VRight); 2553 Left.push_back(VLeft); 2554 Right.push_back(VRight); 2555 } 2556 2557 // Keep track if we have instructions with all the same opcode on one side. 2558 bool AllSameOpcodeLeft = isa<Instruction>(Left[0]); 2559 bool AllSameOpcodeRight = isa<Instruction>(Right[0]); 2560 // Keep track if we have one side with all the same value (broadcast). 2561 bool SplatLeft = true; 2562 bool SplatRight = true; 2563 2564 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 2565 Instruction *I = cast<Instruction>(VL[i]); 2566 assert(((I->getOpcode() == Opcode && I->isCommutative()) || 2567 (I->getOpcode() != Opcode && Instruction::isCommutative(Opcode))) && 2568 "Can only process commutative instruction"); 2569 // Commute to favor either a splat or maximizing having the same opcodes on 2570 // one side. 2571 Value *VLeft; 2572 Value *VRight; 2573 if (shouldReorderOperands(i, Opcode, *I, Left, Right, AllSameOpcodeLeft, 2574 AllSameOpcodeRight, SplatLeft, SplatRight, VLeft, 2575 VRight)) { 2576 Left.push_back(VRight); 2577 Right.push_back(VLeft); 2578 } else { 2579 Left.push_back(VLeft); 2580 Right.push_back(VRight); 2581 } 2582 // Update Splat* and AllSameOpcode* after the insertion. 2583 SplatRight = SplatRight && (Right[i - 1] == Right[i]); 2584 SplatLeft = SplatLeft && (Left[i - 1] == Left[i]); 2585 AllSameOpcodeLeft = AllSameOpcodeLeft && isa<Instruction>(Left[i]) && 2586 (cast<Instruction>(Left[i - 1])->getOpcode() == 2587 cast<Instruction>(Left[i])->getOpcode()); 2588 AllSameOpcodeRight = AllSameOpcodeRight && isa<Instruction>(Right[i]) && 2589 (cast<Instruction>(Right[i - 1])->getOpcode() == 2590 cast<Instruction>(Right[i])->getOpcode()); 2591 } 2592 2593 // If one operand end up being broadcast, return this operand order. 2594 if (SplatRight || SplatLeft) 2595 return; 2596 2597 // Finally check if we can get longer vectorizable chain by reordering 2598 // without breaking the good operand order detected above. 2599 // E.g. If we have something like- 2600 // load a[0] load b[0] 2601 // load b[1] load a[1] 2602 // load a[2] load b[2] 2603 // load a[3] load b[3] 2604 // Reordering the second load b[1] load a[1] would allow us to vectorize 2605 // this code and we still retain AllSameOpcode property. 2606 // FIXME: This load reordering might break AllSameOpcode in some rare cases 2607 // such as- 2608 // add a[0],c[0] load b[0] 2609 // add a[1],c[2] load b[1] 2610 // b[2] load b[2] 2611 // add a[3],c[3] load b[3] 2612 for (unsigned j = 0; j < VL.size() - 1; ++j) { 2613 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 2614 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 2615 if (isConsecutiveAccess(L, L1, *DL, *SE)) { 2616 std::swap(Left[j + 1], Right[j + 1]); 2617 continue; 2618 } 2619 } 2620 } 2621 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 2622 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 2623 if (isConsecutiveAccess(L, L1, *DL, *SE)) { 2624 std::swap(Left[j + 1], Right[j + 1]); 2625 continue; 2626 } 2627 } 2628 } 2629 // else unchanged 2630 } 2631 } 2632 2633 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL, Value *OpValue) { 2634 // Get the basic block this bundle is in. All instructions in the bundle 2635 // should be in this block. 2636 auto *Front = cast<Instruction>(OpValue); 2637 auto *BB = Front->getParent(); 2638 const unsigned Opcode = cast<Instruction>(OpValue)->getOpcode(); 2639 const unsigned AltOpcode = getAltOpcode(Opcode); 2640 assert(llvm::all_of(make_range(VL.begin(), VL.end()), [=](Value *V) -> bool { 2641 return !sameOpcodeOrAlt(Opcode, AltOpcode, 2642 cast<Instruction>(V)->getOpcode()) || 2643 cast<Instruction>(V)->getParent() == BB; 2644 })); 2645 2646 // The last instruction in the bundle in program order. 2647 Instruction *LastInst = nullptr; 2648 2649 // Find the last instruction. The common case should be that BB has been 2650 // scheduled, and the last instruction is VL.back(). So we start with 2651 // VL.back() and iterate over schedule data until we reach the end of the 2652 // bundle. The end of the bundle is marked by null ScheduleData. 2653 if (BlocksSchedules.count(BB)) { 2654 auto *Bundle = 2655 BlocksSchedules[BB]->getScheduleData(isOneOf(OpValue, VL.back())); 2656 if (Bundle && Bundle->isPartOfBundle()) 2657 for (; Bundle; Bundle = Bundle->NextInBundle) 2658 if (Bundle->OpValue == Bundle->Inst) 2659 LastInst = Bundle->Inst; 2660 } 2661 2662 // LastInst can still be null at this point if there's either not an entry 2663 // for BB in BlocksSchedules or there's no ScheduleData available for 2664 // VL.back(). This can be the case if buildTree_rec aborts for various 2665 // reasons (e.g., the maximum recursion depth is reached, the maximum region 2666 // size is reached, etc.). ScheduleData is initialized in the scheduling 2667 // "dry-run". 2668 // 2669 // If this happens, we can still find the last instruction by brute force. We 2670 // iterate forwards from Front (inclusive) until we either see all 2671 // instructions in the bundle or reach the end of the block. If Front is the 2672 // last instruction in program order, LastInst will be set to Front, and we 2673 // will visit all the remaining instructions in the block. 2674 // 2675 // One of the reasons we exit early from buildTree_rec is to place an upper 2676 // bound on compile-time. Thus, taking an additional compile-time hit here is 2677 // not ideal. However, this should be exceedingly rare since it requires that 2678 // we both exit early from buildTree_rec and that the bundle be out-of-order 2679 // (causing us to iterate all the way to the end of the block). 2680 if (!LastInst) { 2681 SmallPtrSet<Value *, 16> Bundle(VL.begin(), VL.end()); 2682 for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) { 2683 if (Bundle.erase(&I) && sameOpcodeOrAlt(Opcode, AltOpcode, I.getOpcode())) 2684 LastInst = &I; 2685 if (Bundle.empty()) 2686 break; 2687 } 2688 } 2689 2690 // Set the insertion point after the last instruction in the bundle. Set the 2691 // debug location to Front. 2692 Builder.SetInsertPoint(BB, ++LastInst->getIterator()); 2693 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 2694 } 2695 2696 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 2697 Value *Vec = UndefValue::get(Ty); 2698 // Generate the 'InsertElement' instruction. 2699 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 2700 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 2701 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 2702 GatherSeq.insert(Insrt); 2703 CSEBlocks.insert(Insrt->getParent()); 2704 2705 // Add to our 'need-to-extract' list. 2706 if (TreeEntry *E = getTreeEntry(VL[i])) { 2707 // Find which lane we need to extract. 2708 int FoundLane = -1; 2709 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 2710 // Is this the lane of the scalar that we are looking for ? 2711 if (E->Scalars[Lane] == VL[i]) { 2712 FoundLane = Lane; 2713 break; 2714 } 2715 } 2716 assert(FoundLane >= 0 && "Could not find the correct lane"); 2717 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 2718 } 2719 } 2720 } 2721 2722 return Vec; 2723 } 2724 2725 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL, Value *OpValue) const { 2726 if (const TreeEntry *En = getTreeEntry(OpValue)) { 2727 if (En->isSame(VL) && En->VectorizedValue) 2728 return En->VectorizedValue; 2729 } 2730 return nullptr; 2731 } 2732 2733 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 2734 InstructionsState S = getSameOpcode(VL); 2735 if (S.Opcode) { 2736 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 2737 if (E->isSame(VL)) 2738 return vectorizeTree(E); 2739 } 2740 } 2741 2742 Type *ScalarTy = S.OpValue->getType(); 2743 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 2744 ScalarTy = SI->getValueOperand()->getType(); 2745 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 2746 2747 return Gather(VL, VecTy); 2748 } 2749 2750 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 2751 IRBuilder<>::InsertPointGuard Guard(Builder); 2752 2753 if (E->VectorizedValue) { 2754 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 2755 return E->VectorizedValue; 2756 } 2757 2758 InstructionsState S = getSameOpcode(E->Scalars); 2759 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 2760 Type *ScalarTy = VL0->getType(); 2761 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 2762 ScalarTy = SI->getValueOperand()->getType(); 2763 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 2764 2765 if (E->NeedToGather) { 2766 setInsertPointAfterBundle(E->Scalars, VL0); 2767 auto *V = Gather(E->Scalars, VecTy); 2768 E->VectorizedValue = V; 2769 return V; 2770 } 2771 2772 unsigned ShuffleOrOp = S.IsAltShuffle ? 2773 (unsigned) Instruction::ShuffleVector : S.Opcode; 2774 switch (ShuffleOrOp) { 2775 case Instruction::PHI: { 2776 PHINode *PH = dyn_cast<PHINode>(VL0); 2777 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 2778 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2779 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 2780 E->VectorizedValue = NewPhi; 2781 2782 // PHINodes may have multiple entries from the same block. We want to 2783 // visit every block once. 2784 SmallSet<BasicBlock*, 4> VisitedBBs; 2785 2786 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 2787 ValueList Operands; 2788 BasicBlock *IBB = PH->getIncomingBlock(i); 2789 2790 if (!VisitedBBs.insert(IBB).second) { 2791 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 2792 continue; 2793 } 2794 2795 // Prepare the operand vector. 2796 for (Value *V : E->Scalars) 2797 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(IBB)); 2798 2799 Builder.SetInsertPoint(IBB->getTerminator()); 2800 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2801 Value *Vec = vectorizeTree(Operands); 2802 NewPhi->addIncoming(Vec, IBB); 2803 } 2804 2805 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 2806 "Invalid number of incoming values"); 2807 return NewPhi; 2808 } 2809 2810 case Instruction::ExtractElement: { 2811 if (canReuseExtract(E->Scalars, VL0)) { 2812 Value *V = VL0->getOperand(0); 2813 E->VectorizedValue = V; 2814 return V; 2815 } 2816 setInsertPointAfterBundle(E->Scalars, VL0); 2817 auto *V = Gather(E->Scalars, VecTy); 2818 E->VectorizedValue = V; 2819 return V; 2820 } 2821 case Instruction::ExtractValue: { 2822 if (canReuseExtract(E->Scalars, VL0)) { 2823 LoadInst *LI = cast<LoadInst>(VL0->getOperand(0)); 2824 Builder.SetInsertPoint(LI); 2825 PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); 2826 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); 2827 LoadInst *V = Builder.CreateAlignedLoad(Ptr, LI->getAlignment()); 2828 E->VectorizedValue = V; 2829 return propagateMetadata(V, E->Scalars); 2830 } 2831 setInsertPointAfterBundle(E->Scalars, VL0); 2832 auto *V = Gather(E->Scalars, VecTy); 2833 E->VectorizedValue = V; 2834 return V; 2835 } 2836 case Instruction::ZExt: 2837 case Instruction::SExt: 2838 case Instruction::FPToUI: 2839 case Instruction::FPToSI: 2840 case Instruction::FPExt: 2841 case Instruction::PtrToInt: 2842 case Instruction::IntToPtr: 2843 case Instruction::SIToFP: 2844 case Instruction::UIToFP: 2845 case Instruction::Trunc: 2846 case Instruction::FPTrunc: 2847 case Instruction::BitCast: { 2848 ValueList INVL; 2849 for (Value *V : E->Scalars) 2850 INVL.push_back(cast<Instruction>(V)->getOperand(0)); 2851 2852 setInsertPointAfterBundle(E->Scalars, VL0); 2853 2854 Value *InVec = vectorizeTree(INVL); 2855 2856 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2857 return V; 2858 2859 CastInst *CI = dyn_cast<CastInst>(VL0); 2860 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 2861 E->VectorizedValue = V; 2862 ++NumVectorInstructions; 2863 return V; 2864 } 2865 case Instruction::FCmp: 2866 case Instruction::ICmp: { 2867 ValueList LHSV, RHSV; 2868 for (Value *V : E->Scalars) { 2869 LHSV.push_back(cast<Instruction>(V)->getOperand(0)); 2870 RHSV.push_back(cast<Instruction>(V)->getOperand(1)); 2871 } 2872 2873 setInsertPointAfterBundle(E->Scalars, VL0); 2874 2875 Value *L = vectorizeTree(LHSV); 2876 Value *R = vectorizeTree(RHSV); 2877 2878 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2879 return V; 2880 2881 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 2882 Value *V; 2883 if (S.Opcode == Instruction::FCmp) 2884 V = Builder.CreateFCmp(P0, L, R); 2885 else 2886 V = Builder.CreateICmp(P0, L, R); 2887 2888 E->VectorizedValue = V; 2889 propagateIRFlags(E->VectorizedValue, E->Scalars, VL0); 2890 ++NumVectorInstructions; 2891 return V; 2892 } 2893 case Instruction::Select: { 2894 ValueList TrueVec, FalseVec, CondVec; 2895 for (Value *V : E->Scalars) { 2896 CondVec.push_back(cast<Instruction>(V)->getOperand(0)); 2897 TrueVec.push_back(cast<Instruction>(V)->getOperand(1)); 2898 FalseVec.push_back(cast<Instruction>(V)->getOperand(2)); 2899 } 2900 2901 setInsertPointAfterBundle(E->Scalars, VL0); 2902 2903 Value *Cond = vectorizeTree(CondVec); 2904 Value *True = vectorizeTree(TrueVec); 2905 Value *False = vectorizeTree(FalseVec); 2906 2907 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2908 return V; 2909 2910 Value *V = Builder.CreateSelect(Cond, True, False); 2911 E->VectorizedValue = V; 2912 ++NumVectorInstructions; 2913 return V; 2914 } 2915 case Instruction::Add: 2916 case Instruction::FAdd: 2917 case Instruction::Sub: 2918 case Instruction::FSub: 2919 case Instruction::Mul: 2920 case Instruction::FMul: 2921 case Instruction::UDiv: 2922 case Instruction::SDiv: 2923 case Instruction::FDiv: 2924 case Instruction::URem: 2925 case Instruction::SRem: 2926 case Instruction::FRem: 2927 case Instruction::Shl: 2928 case Instruction::LShr: 2929 case Instruction::AShr: 2930 case Instruction::And: 2931 case Instruction::Or: 2932 case Instruction::Xor: { 2933 ValueList LHSVL, RHSVL; 2934 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) 2935 reorderInputsAccordingToOpcode(S.Opcode, E->Scalars, LHSVL, 2936 RHSVL); 2937 else 2938 for (Value *V : E->Scalars) { 2939 auto *I = cast<Instruction>(V); 2940 LHSVL.push_back(I->getOperand(0)); 2941 RHSVL.push_back(I->getOperand(1)); 2942 } 2943 2944 setInsertPointAfterBundle(E->Scalars, VL0); 2945 2946 Value *LHS = vectorizeTree(LHSVL); 2947 Value *RHS = vectorizeTree(RHSVL); 2948 2949 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2950 return V; 2951 2952 Value *V = Builder.CreateBinOp( 2953 static_cast<Instruction::BinaryOps>(S.Opcode), LHS, RHS); 2954 E->VectorizedValue = V; 2955 propagateIRFlags(E->VectorizedValue, E->Scalars, VL0); 2956 ++NumVectorInstructions; 2957 2958 if (Instruction *I = dyn_cast<Instruction>(V)) 2959 return propagateMetadata(I, E->Scalars); 2960 2961 return V; 2962 } 2963 case Instruction::Load: { 2964 // Loads are inserted at the head of the tree because we don't want to 2965 // sink them all the way down past store instructions. 2966 setInsertPointAfterBundle(E->Scalars, VL0); 2967 2968 LoadInst *LI = cast<LoadInst>(VL0); 2969 Type *ScalarLoadTy = LI->getType(); 2970 unsigned AS = LI->getPointerAddressSpace(); 2971 2972 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 2973 VecTy->getPointerTo(AS)); 2974 2975 // The pointer operand uses an in-tree scalar so we add the new BitCast to 2976 // ExternalUses list to make sure that an extract will be generated in the 2977 // future. 2978 Value *PO = LI->getPointerOperand(); 2979 if (getTreeEntry(PO)) 2980 ExternalUses.push_back(ExternalUser(PO, cast<User>(VecPtr), 0)); 2981 2982 unsigned Alignment = LI->getAlignment(); 2983 LI = Builder.CreateLoad(VecPtr); 2984 if (!Alignment) { 2985 Alignment = DL->getABITypeAlignment(ScalarLoadTy); 2986 } 2987 LI->setAlignment(Alignment); 2988 E->VectorizedValue = LI; 2989 ++NumVectorInstructions; 2990 return propagateMetadata(LI, E->Scalars); 2991 } 2992 case Instruction::Store: { 2993 StoreInst *SI = cast<StoreInst>(VL0); 2994 unsigned Alignment = SI->getAlignment(); 2995 unsigned AS = SI->getPointerAddressSpace(); 2996 2997 ValueList ScalarStoreValues; 2998 for (Value *V : E->Scalars) 2999 ScalarStoreValues.push_back(cast<StoreInst>(V)->getValueOperand()); 3000 3001 setInsertPointAfterBundle(E->Scalars, VL0); 3002 3003 Value *VecValue = vectorizeTree(ScalarStoreValues); 3004 Value *ScalarPtr = SI->getPointerOperand(); 3005 Value *VecPtr = Builder.CreateBitCast(ScalarPtr, VecTy->getPointerTo(AS)); 3006 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 3007 3008 // The pointer operand uses an in-tree scalar, so add the new BitCast to 3009 // ExternalUses to make sure that an extract will be generated in the 3010 // future. 3011 if (getTreeEntry(ScalarPtr)) 3012 ExternalUses.push_back(ExternalUser(ScalarPtr, cast<User>(VecPtr), 0)); 3013 3014 if (!Alignment) 3015 Alignment = DL->getABITypeAlignment(SI->getValueOperand()->getType()); 3016 3017 S->setAlignment(Alignment); 3018 E->VectorizedValue = S; 3019 ++NumVectorInstructions; 3020 return propagateMetadata(S, E->Scalars); 3021 } 3022 case Instruction::GetElementPtr: { 3023 setInsertPointAfterBundle(E->Scalars, VL0); 3024 3025 ValueList Op0VL; 3026 for (Value *V : E->Scalars) 3027 Op0VL.push_back(cast<GetElementPtrInst>(V)->getOperand(0)); 3028 3029 Value *Op0 = vectorizeTree(Op0VL); 3030 3031 std::vector<Value *> OpVecs; 3032 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 3033 ++j) { 3034 ValueList OpVL; 3035 for (Value *V : E->Scalars) 3036 OpVL.push_back(cast<GetElementPtrInst>(V)->getOperand(j)); 3037 3038 Value *OpVec = vectorizeTree(OpVL); 3039 OpVecs.push_back(OpVec); 3040 } 3041 3042 Value *V = Builder.CreateGEP( 3043 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs); 3044 E->VectorizedValue = V; 3045 ++NumVectorInstructions; 3046 3047 if (Instruction *I = dyn_cast<Instruction>(V)) 3048 return propagateMetadata(I, E->Scalars); 3049 3050 return V; 3051 } 3052 case Instruction::Call: { 3053 CallInst *CI = cast<CallInst>(VL0); 3054 setInsertPointAfterBundle(E->Scalars, VL0); 3055 Function *FI; 3056 Intrinsic::ID IID = Intrinsic::not_intrinsic; 3057 Value *ScalarArg = nullptr; 3058 if (CI && (FI = CI->getCalledFunction())) { 3059 IID = FI->getIntrinsicID(); 3060 } 3061 std::vector<Value *> OpVecs; 3062 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 3063 ValueList OpVL; 3064 // ctlz,cttz and powi are special intrinsics whose second argument is 3065 // a scalar. This argument should not be vectorized. 3066 if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) { 3067 CallInst *CEI = cast<CallInst>(VL0); 3068 ScalarArg = CEI->getArgOperand(j); 3069 OpVecs.push_back(CEI->getArgOperand(j)); 3070 continue; 3071 } 3072 for (Value *V : E->Scalars) { 3073 CallInst *CEI = cast<CallInst>(V); 3074 OpVL.push_back(CEI->getArgOperand(j)); 3075 } 3076 3077 Value *OpVec = vectorizeTree(OpVL); 3078 DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 3079 OpVecs.push_back(OpVec); 3080 } 3081 3082 Module *M = F->getParent(); 3083 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3084 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; 3085 Function *CF = Intrinsic::getDeclaration(M, ID, Tys); 3086 SmallVector<OperandBundleDef, 1> OpBundles; 3087 CI->getOperandBundlesAsDefs(OpBundles); 3088 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 3089 3090 // The scalar argument uses an in-tree scalar so we add the new vectorized 3091 // call to ExternalUses list to make sure that an extract will be 3092 // generated in the future. 3093 if (ScalarArg && getTreeEntry(ScalarArg)) 3094 ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0)); 3095 3096 E->VectorizedValue = V; 3097 propagateIRFlags(E->VectorizedValue, E->Scalars, VL0); 3098 ++NumVectorInstructions; 3099 return V; 3100 } 3101 case Instruction::ShuffleVector: { 3102 ValueList LHSVL, RHSVL; 3103 assert(Instruction::isBinaryOp(S.Opcode) && 3104 "Invalid Shuffle Vector Operand"); 3105 reorderAltShuffleOperands(S.Opcode, E->Scalars, LHSVL, RHSVL); 3106 setInsertPointAfterBundle(E->Scalars, VL0); 3107 3108 Value *LHS = vectorizeTree(LHSVL); 3109 Value *RHS = vectorizeTree(RHSVL); 3110 3111 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 3112 return V; 3113 3114 // Create a vector of LHS op1 RHS 3115 Value *V0 = Builder.CreateBinOp( 3116 static_cast<Instruction::BinaryOps>(S.Opcode), LHS, RHS); 3117 3118 unsigned AltOpcode = getAltOpcode(S.Opcode); 3119 // Create a vector of LHS op2 RHS 3120 Value *V1 = Builder.CreateBinOp( 3121 static_cast<Instruction::BinaryOps>(AltOpcode), LHS, RHS); 3122 3123 // Create shuffle to take alternate operations from the vector. 3124 // Also, gather up odd and even scalar ops to propagate IR flags to 3125 // each vector operation. 3126 ValueList OddScalars, EvenScalars; 3127 unsigned e = E->Scalars.size(); 3128 SmallVector<Constant *, 8> Mask(e); 3129 for (unsigned i = 0; i < e; ++i) { 3130 if (isOdd(i)) { 3131 Mask[i] = Builder.getInt32(e + i); 3132 OddScalars.push_back(E->Scalars[i]); 3133 } else { 3134 Mask[i] = Builder.getInt32(i); 3135 EvenScalars.push_back(E->Scalars[i]); 3136 } 3137 } 3138 3139 Value *ShuffleMask = ConstantVector::get(Mask); 3140 propagateIRFlags(V0, EvenScalars); 3141 propagateIRFlags(V1, OddScalars); 3142 3143 Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 3144 E->VectorizedValue = V; 3145 ++NumVectorInstructions; 3146 if (Instruction *I = dyn_cast<Instruction>(V)) 3147 return propagateMetadata(I, E->Scalars); 3148 3149 return V; 3150 } 3151 default: 3152 llvm_unreachable("unknown inst"); 3153 } 3154 return nullptr; 3155 } 3156 3157 Value *BoUpSLP::vectorizeTree() { 3158 ExtraValueToDebugLocsMap ExternallyUsedValues; 3159 return vectorizeTree(ExternallyUsedValues); 3160 } 3161 3162 Value * 3163 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { 3164 // All blocks must be scheduled before any instructions are inserted. 3165 for (auto &BSIter : BlocksSchedules) { 3166 scheduleBlock(BSIter.second.get()); 3167 } 3168 3169 Builder.SetInsertPoint(&F->getEntryBlock().front()); 3170 auto *VectorRoot = vectorizeTree(&VectorizableTree[0]); 3171 3172 // If the vectorized tree can be rewritten in a smaller type, we truncate the 3173 // vectorized root. InstCombine will then rewrite the entire expression. We 3174 // sign extend the extracted values below. 3175 auto *ScalarRoot = VectorizableTree[0].Scalars[0]; 3176 if (MinBWs.count(ScalarRoot)) { 3177 if (auto *I = dyn_cast<Instruction>(VectorRoot)) 3178 Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); 3179 auto BundleWidth = VectorizableTree[0].Scalars.size(); 3180 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 3181 auto *VecTy = VectorType::get(MinTy, BundleWidth); 3182 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); 3183 VectorizableTree[0].VectorizedValue = Trunc; 3184 } 3185 3186 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 3187 3188 // If necessary, sign-extend or zero-extend ScalarRoot to the larger type 3189 // specified by ScalarType. 3190 auto extend = [&](Value *ScalarRoot, Value *Ex, Type *ScalarType) { 3191 if (!MinBWs.count(ScalarRoot)) 3192 return Ex; 3193 if (MinBWs[ScalarRoot].second) 3194 return Builder.CreateSExt(Ex, ScalarType); 3195 return Builder.CreateZExt(Ex, ScalarType); 3196 }; 3197 3198 // Extract all of the elements with the external uses. 3199 for (const auto &ExternalUse : ExternalUses) { 3200 Value *Scalar = ExternalUse.Scalar; 3201 llvm::User *User = ExternalUse.User; 3202 3203 // Skip users that we already RAUW. This happens when one instruction 3204 // has multiple uses of the same value. 3205 if (User && !is_contained(Scalar->users(), User)) 3206 continue; 3207 TreeEntry *E = getTreeEntry(Scalar); 3208 assert(E && "Invalid scalar"); 3209 assert(!E->NeedToGather && "Extracting from a gather list"); 3210 3211 Value *Vec = E->VectorizedValue; 3212 assert(Vec && "Can't find vectorizable value"); 3213 3214 Value *Lane = Builder.getInt32(ExternalUse.Lane); 3215 // If User == nullptr, the Scalar is used as extra arg. Generate 3216 // ExtractElement instruction and update the record for this scalar in 3217 // ExternallyUsedValues. 3218 if (!User) { 3219 assert(ExternallyUsedValues.count(Scalar) && 3220 "Scalar with nullptr as an external user must be registered in " 3221 "ExternallyUsedValues map"); 3222 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 3223 Builder.SetInsertPoint(VecI->getParent(), 3224 std::next(VecI->getIterator())); 3225 } else { 3226 Builder.SetInsertPoint(&F->getEntryBlock().front()); 3227 } 3228 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 3229 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 3230 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent()); 3231 auto &Locs = ExternallyUsedValues[Scalar]; 3232 ExternallyUsedValues.insert({Ex, Locs}); 3233 ExternallyUsedValues.erase(Scalar); 3234 continue; 3235 } 3236 3237 // Generate extracts for out-of-tree users. 3238 // Find the insertion point for the extractelement lane. 3239 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 3240 if (PHINode *PH = dyn_cast<PHINode>(User)) { 3241 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 3242 if (PH->getIncomingValue(i) == Scalar) { 3243 TerminatorInst *IncomingTerminator = 3244 PH->getIncomingBlock(i)->getTerminator(); 3245 if (isa<CatchSwitchInst>(IncomingTerminator)) { 3246 Builder.SetInsertPoint(VecI->getParent(), 3247 std::next(VecI->getIterator())); 3248 } else { 3249 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 3250 } 3251 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 3252 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 3253 CSEBlocks.insert(PH->getIncomingBlock(i)); 3254 PH->setOperand(i, Ex); 3255 } 3256 } 3257 } else { 3258 Builder.SetInsertPoint(cast<Instruction>(User)); 3259 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 3260 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 3261 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 3262 User->replaceUsesOfWith(Scalar, Ex); 3263 } 3264 } else { 3265 Builder.SetInsertPoint(&F->getEntryBlock().front()); 3266 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 3267 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 3268 CSEBlocks.insert(&F->getEntryBlock()); 3269 User->replaceUsesOfWith(Scalar, Ex); 3270 } 3271 3272 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 3273 } 3274 3275 // For each vectorized value: 3276 for (TreeEntry &EIdx : VectorizableTree) { 3277 TreeEntry *Entry = &EIdx; 3278 3279 // No need to handle users of gathered values. 3280 if (Entry->NeedToGather) 3281 continue; 3282 3283 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 3284 3285 // For each lane: 3286 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 3287 Value *Scalar = Entry->Scalars[Lane]; 3288 3289 Type *Ty = Scalar->getType(); 3290 if (!Ty->isVoidTy()) { 3291 #ifndef NDEBUG 3292 for (User *U : Scalar->users()) { 3293 DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 3294 3295 // It is legal to replace users in the ignorelist by undef. 3296 assert((getTreeEntry(U) || is_contained(UserIgnoreList, U)) && 3297 "Replacing out-of-tree value with undef"); 3298 } 3299 #endif 3300 Value *Undef = UndefValue::get(Ty); 3301 Scalar->replaceAllUsesWith(Undef); 3302 } 3303 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 3304 eraseInstruction(cast<Instruction>(Scalar)); 3305 } 3306 } 3307 3308 Builder.ClearInsertionPoint(); 3309 3310 return VectorizableTree[0].VectorizedValue; 3311 } 3312 3313 void BoUpSLP::optimizeGatherSequence() { 3314 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 3315 << " gather sequences instructions.\n"); 3316 // LICM InsertElementInst sequences. 3317 for (Instruction *it : GatherSeq) { 3318 InsertElementInst *Insert = dyn_cast<InsertElementInst>(it); 3319 3320 if (!Insert) 3321 continue; 3322 3323 // Check if this block is inside a loop. 3324 Loop *L = LI->getLoopFor(Insert->getParent()); 3325 if (!L) 3326 continue; 3327 3328 // Check if it has a preheader. 3329 BasicBlock *PreHeader = L->getLoopPreheader(); 3330 if (!PreHeader) 3331 continue; 3332 3333 // If the vector or the element that we insert into it are 3334 // instructions that are defined in this basic block then we can't 3335 // hoist this instruction. 3336 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 3337 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 3338 if (CurrVec && L->contains(CurrVec)) 3339 continue; 3340 if (NewElem && L->contains(NewElem)) 3341 continue; 3342 3343 // We can hoist this instruction. Move it to the pre-header. 3344 Insert->moveBefore(PreHeader->getTerminator()); 3345 } 3346 3347 // Make a list of all reachable blocks in our CSE queue. 3348 SmallVector<const DomTreeNode *, 8> CSEWorkList; 3349 CSEWorkList.reserve(CSEBlocks.size()); 3350 for (BasicBlock *BB : CSEBlocks) 3351 if (DomTreeNode *N = DT->getNode(BB)) { 3352 assert(DT->isReachableFromEntry(N)); 3353 CSEWorkList.push_back(N); 3354 } 3355 3356 // Sort blocks by domination. This ensures we visit a block after all blocks 3357 // dominating it are visited. 3358 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), 3359 [this](const DomTreeNode *A, const DomTreeNode *B) { 3360 return DT->properlyDominates(A, B); 3361 }); 3362 3363 // Perform O(N^2) search over the gather sequences and merge identical 3364 // instructions. TODO: We can further optimize this scan if we split the 3365 // instructions into different buckets based on the insert lane. 3366 SmallVector<Instruction *, 16> Visited; 3367 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 3368 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 3369 "Worklist not sorted properly!"); 3370 BasicBlock *BB = (*I)->getBlock(); 3371 // For all instructions in blocks containing gather sequences: 3372 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 3373 Instruction *In = &*it++; 3374 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 3375 continue; 3376 3377 // Check if we can replace this instruction with any of the 3378 // visited instructions. 3379 for (Instruction *v : Visited) { 3380 if (In->isIdenticalTo(v) && 3381 DT->dominates(v->getParent(), In->getParent())) { 3382 In->replaceAllUsesWith(v); 3383 eraseInstruction(In); 3384 In = nullptr; 3385 break; 3386 } 3387 } 3388 if (In) { 3389 assert(!is_contained(Visited, In)); 3390 Visited.push_back(In); 3391 } 3392 } 3393 } 3394 CSEBlocks.clear(); 3395 GatherSeq.clear(); 3396 } 3397 3398 // Groups the instructions to a bundle (which is then a single scheduling entity) 3399 // and schedules instructions until the bundle gets ready. 3400 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, 3401 BoUpSLP *SLP, Value *OpValue) { 3402 if (isa<PHINode>(OpValue)) 3403 return true; 3404 3405 // Initialize the instruction bundle. 3406 Instruction *OldScheduleEnd = ScheduleEnd; 3407 ScheduleData *PrevInBundle = nullptr; 3408 ScheduleData *Bundle = nullptr; 3409 bool ReSchedule = false; 3410 DEBUG(dbgs() << "SLP: bundle: " << *OpValue << "\n"); 3411 3412 // Make sure that the scheduling region contains all 3413 // instructions of the bundle. 3414 for (Value *V : VL) { 3415 if (!extendSchedulingRegion(V, OpValue)) 3416 return false; 3417 } 3418 3419 for (Value *V : VL) { 3420 ScheduleData *BundleMember = getScheduleData(V); 3421 assert(BundleMember && 3422 "no ScheduleData for bundle member (maybe not in same basic block)"); 3423 if (BundleMember->IsScheduled) { 3424 // A bundle member was scheduled as single instruction before and now 3425 // needs to be scheduled as part of the bundle. We just get rid of the 3426 // existing schedule. 3427 DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 3428 << " was already scheduled\n"); 3429 ReSchedule = true; 3430 } 3431 assert(BundleMember->isSchedulingEntity() && 3432 "bundle member already part of other bundle"); 3433 if (PrevInBundle) { 3434 PrevInBundle->NextInBundle = BundleMember; 3435 } else { 3436 Bundle = BundleMember; 3437 } 3438 BundleMember->UnscheduledDepsInBundle = 0; 3439 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 3440 3441 // Group the instructions to a bundle. 3442 BundleMember->FirstInBundle = Bundle; 3443 PrevInBundle = BundleMember; 3444 } 3445 if (ScheduleEnd != OldScheduleEnd) { 3446 // The scheduling region got new instructions at the lower end (or it is a 3447 // new region for the first bundle). This makes it necessary to 3448 // recalculate all dependencies. 3449 // It is seldom that this needs to be done a second time after adding the 3450 // initial bundle to the region. 3451 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3452 doForAllOpcodes(I, [](ScheduleData *SD) { 3453 SD->clearDependencies(); 3454 }); 3455 } 3456 ReSchedule = true; 3457 } 3458 if (ReSchedule) { 3459 resetSchedule(); 3460 initialFillReadyList(ReadyInsts); 3461 } 3462 3463 DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " 3464 << BB->getName() << "\n"); 3465 3466 calculateDependencies(Bundle, true, SLP); 3467 3468 // Now try to schedule the new bundle. As soon as the bundle is "ready" it 3469 // means that there are no cyclic dependencies and we can schedule it. 3470 // Note that's important that we don't "schedule" the bundle yet (see 3471 // cancelScheduling). 3472 while (!Bundle->isReady() && !ReadyInsts.empty()) { 3473 3474 ScheduleData *pickedSD = ReadyInsts.back(); 3475 ReadyInsts.pop_back(); 3476 3477 if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) { 3478 schedule(pickedSD, ReadyInsts); 3479 } 3480 } 3481 if (!Bundle->isReady()) { 3482 cancelScheduling(VL, OpValue); 3483 return false; 3484 } 3485 return true; 3486 } 3487 3488 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 3489 Value *OpValue) { 3490 if (isa<PHINode>(OpValue)) 3491 return; 3492 3493 ScheduleData *Bundle = getScheduleData(OpValue); 3494 DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 3495 assert(!Bundle->IsScheduled && 3496 "Can't cancel bundle which is already scheduled"); 3497 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 3498 "tried to unbundle something which is not a bundle"); 3499 3500 // Un-bundle: make single instructions out of the bundle. 3501 ScheduleData *BundleMember = Bundle; 3502 while (BundleMember) { 3503 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 3504 BundleMember->FirstInBundle = BundleMember; 3505 ScheduleData *Next = BundleMember->NextInBundle; 3506 BundleMember->NextInBundle = nullptr; 3507 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 3508 if (BundleMember->UnscheduledDepsInBundle == 0) { 3509 ReadyInsts.insert(BundleMember); 3510 } 3511 BundleMember = Next; 3512 } 3513 } 3514 3515 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 3516 // Allocate a new ScheduleData for the instruction. 3517 if (ChunkPos >= ChunkSize) { 3518 ScheduleDataChunks.push_back(llvm::make_unique<ScheduleData[]>(ChunkSize)); 3519 ChunkPos = 0; 3520 } 3521 return &(ScheduleDataChunks.back()[ChunkPos++]); 3522 } 3523 3524 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 3525 Value *OpValue) { 3526 if (getScheduleData(V, isOneOf(OpValue, V))) 3527 return true; 3528 Instruction *I = dyn_cast<Instruction>(V); 3529 assert(I && "bundle member must be an instruction"); 3530 assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled"); 3531 auto &&CheckSheduleForI = [this, OpValue](Instruction *I) -> bool { 3532 ScheduleData *ISD = getScheduleData(I); 3533 if (!ISD) 3534 return false; 3535 assert(isInSchedulingRegion(ISD) && 3536 "ScheduleData not in scheduling region"); 3537 ScheduleData *SD = allocateScheduleDataChunks(); 3538 SD->Inst = I; 3539 SD->init(SchedulingRegionID, OpValue); 3540 ExtraScheduleDataMap[I][OpValue] = SD; 3541 return true; 3542 }; 3543 if (CheckSheduleForI(I)) 3544 return true; 3545 if (!ScheduleStart) { 3546 // It's the first instruction in the new region. 3547 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 3548 ScheduleStart = I; 3549 ScheduleEnd = I->getNextNode(); 3550 if (isOneOf(OpValue, I) != I) 3551 CheckSheduleForI(I); 3552 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 3553 DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 3554 return true; 3555 } 3556 // Search up and down at the same time, because we don't know if the new 3557 // instruction is above or below the existing scheduling region. 3558 BasicBlock::reverse_iterator UpIter = 3559 ++ScheduleStart->getIterator().getReverse(); 3560 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 3561 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 3562 BasicBlock::iterator LowerEnd = BB->end(); 3563 while (true) { 3564 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 3565 DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 3566 return false; 3567 } 3568 3569 if (UpIter != UpperEnd) { 3570 if (&*UpIter == I) { 3571 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 3572 ScheduleStart = I; 3573 if (isOneOf(OpValue, I) != I) 3574 CheckSheduleForI(I); 3575 DEBUG(dbgs() << "SLP: extend schedule region start to " << *I << "\n"); 3576 return true; 3577 } 3578 UpIter++; 3579 } 3580 if (DownIter != LowerEnd) { 3581 if (&*DownIter == I) { 3582 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 3583 nullptr); 3584 ScheduleEnd = I->getNextNode(); 3585 if (isOneOf(OpValue, I) != I) 3586 CheckSheduleForI(I); 3587 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 3588 DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 3589 return true; 3590 } 3591 DownIter++; 3592 } 3593 assert((UpIter != UpperEnd || DownIter != LowerEnd) && 3594 "instruction not found in block"); 3595 } 3596 return true; 3597 } 3598 3599 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 3600 Instruction *ToI, 3601 ScheduleData *PrevLoadStore, 3602 ScheduleData *NextLoadStore) { 3603 ScheduleData *CurrentLoadStore = PrevLoadStore; 3604 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 3605 ScheduleData *SD = ScheduleDataMap[I]; 3606 if (!SD) { 3607 SD = allocateScheduleDataChunks(); 3608 ScheduleDataMap[I] = SD; 3609 SD->Inst = I; 3610 } 3611 assert(!isInSchedulingRegion(SD) && 3612 "new ScheduleData already in scheduling region"); 3613 SD->init(SchedulingRegionID, I); 3614 3615 if (I->mayReadOrWriteMemory() && 3616 (!isa<IntrinsicInst>(I) || 3617 cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect)) { 3618 // Update the linked list of memory accessing instructions. 3619 if (CurrentLoadStore) { 3620 CurrentLoadStore->NextLoadStore = SD; 3621 } else { 3622 FirstLoadStoreInRegion = SD; 3623 } 3624 CurrentLoadStore = SD; 3625 } 3626 } 3627 if (NextLoadStore) { 3628 if (CurrentLoadStore) 3629 CurrentLoadStore->NextLoadStore = NextLoadStore; 3630 } else { 3631 LastLoadStoreInRegion = CurrentLoadStore; 3632 } 3633 } 3634 3635 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 3636 bool InsertInReadyList, 3637 BoUpSLP *SLP) { 3638 assert(SD->isSchedulingEntity()); 3639 3640 SmallVector<ScheduleData *, 10> WorkList; 3641 WorkList.push_back(SD); 3642 3643 while (!WorkList.empty()) { 3644 ScheduleData *SD = WorkList.back(); 3645 WorkList.pop_back(); 3646 3647 ScheduleData *BundleMember = SD; 3648 while (BundleMember) { 3649 assert(isInSchedulingRegion(BundleMember)); 3650 if (!BundleMember->hasValidDependencies()) { 3651 3652 DEBUG(dbgs() << "SLP: update deps of " << *BundleMember << "\n"); 3653 BundleMember->Dependencies = 0; 3654 BundleMember->resetUnscheduledDeps(); 3655 3656 // Handle def-use chain dependencies. 3657 if (BundleMember->OpValue != BundleMember->Inst) { 3658 ScheduleData *UseSD = getScheduleData(BundleMember->Inst); 3659 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 3660 BundleMember->Dependencies++; 3661 ScheduleData *DestBundle = UseSD->FirstInBundle; 3662 if (!DestBundle->IsScheduled) 3663 BundleMember->incrementUnscheduledDeps(1); 3664 if (!DestBundle->hasValidDependencies()) 3665 WorkList.push_back(DestBundle); 3666 } 3667 } else { 3668 for (User *U : BundleMember->Inst->users()) { 3669 if (isa<Instruction>(U)) { 3670 ScheduleData *UseSD = getScheduleData(U); 3671 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 3672 BundleMember->Dependencies++; 3673 ScheduleData *DestBundle = UseSD->FirstInBundle; 3674 if (!DestBundle->IsScheduled) 3675 BundleMember->incrementUnscheduledDeps(1); 3676 if (!DestBundle->hasValidDependencies()) 3677 WorkList.push_back(DestBundle); 3678 } 3679 } else { 3680 // I'm not sure if this can ever happen. But we need to be safe. 3681 // This lets the instruction/bundle never be scheduled and 3682 // eventually disable vectorization. 3683 BundleMember->Dependencies++; 3684 BundleMember->incrementUnscheduledDeps(1); 3685 } 3686 } 3687 } 3688 3689 // Handle the memory dependencies. 3690 ScheduleData *DepDest = BundleMember->NextLoadStore; 3691 if (DepDest) { 3692 Instruction *SrcInst = BundleMember->Inst; 3693 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); 3694 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 3695 unsigned numAliased = 0; 3696 unsigned DistToSrc = 1; 3697 3698 while (DepDest) { 3699 assert(isInSchedulingRegion(DepDest)); 3700 3701 // We have two limits to reduce the complexity: 3702 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 3703 // SLP->isAliased (which is the expensive part in this loop). 3704 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 3705 // the whole loop (even if the loop is fast, it's quadratic). 3706 // It's important for the loop break condition (see below) to 3707 // check this limit even between two read-only instructions. 3708 if (DistToSrc >= MaxMemDepDistance || 3709 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 3710 (numAliased >= AliasedCheckLimit || 3711 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 3712 3713 // We increment the counter only if the locations are aliased 3714 // (instead of counting all alias checks). This gives a better 3715 // balance between reduced runtime and accurate dependencies. 3716 numAliased++; 3717 3718 DepDest->MemoryDependencies.push_back(BundleMember); 3719 BundleMember->Dependencies++; 3720 ScheduleData *DestBundle = DepDest->FirstInBundle; 3721 if (!DestBundle->IsScheduled) { 3722 BundleMember->incrementUnscheduledDeps(1); 3723 } 3724 if (!DestBundle->hasValidDependencies()) { 3725 WorkList.push_back(DestBundle); 3726 } 3727 } 3728 DepDest = DepDest->NextLoadStore; 3729 3730 // Example, explaining the loop break condition: Let's assume our 3731 // starting instruction is i0 and MaxMemDepDistance = 3. 3732 // 3733 // +--------v--v--v 3734 // i0,i1,i2,i3,i4,i5,i6,i7,i8 3735 // +--------^--^--^ 3736 // 3737 // MaxMemDepDistance let us stop alias-checking at i3 and we add 3738 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 3739 // Previously we already added dependencies from i3 to i6,i7,i8 3740 // (because of MaxMemDepDistance). As we added a dependency from 3741 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 3742 // and we can abort this loop at i6. 3743 if (DistToSrc >= 2 * MaxMemDepDistance) 3744 break; 3745 DistToSrc++; 3746 } 3747 } 3748 } 3749 BundleMember = BundleMember->NextInBundle; 3750 } 3751 if (InsertInReadyList && SD->isReady()) { 3752 ReadyInsts.push_back(SD); 3753 DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n"); 3754 } 3755 } 3756 } 3757 3758 void BoUpSLP::BlockScheduling::resetSchedule() { 3759 assert(ScheduleStart && 3760 "tried to reset schedule on block which has not been scheduled"); 3761 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3762 doForAllOpcodes(I, [&](ScheduleData *SD) { 3763 assert(isInSchedulingRegion(SD) && 3764 "ScheduleData not in scheduling region"); 3765 SD->IsScheduled = false; 3766 SD->resetUnscheduledDeps(); 3767 }); 3768 } 3769 ReadyInsts.clear(); 3770 } 3771 3772 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 3773 if (!BS->ScheduleStart) 3774 return; 3775 3776 DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 3777 3778 BS->resetSchedule(); 3779 3780 // For the real scheduling we use a more sophisticated ready-list: it is 3781 // sorted by the original instruction location. This lets the final schedule 3782 // be as close as possible to the original instruction order. 3783 struct ScheduleDataCompare { 3784 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 3785 return SD2->SchedulingPriority < SD1->SchedulingPriority; 3786 } 3787 }; 3788 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 3789 3790 // Ensure that all dependency data is updated and fill the ready-list with 3791 // initial instructions. 3792 int Idx = 0; 3793 int NumToSchedule = 0; 3794 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 3795 I = I->getNextNode()) { 3796 BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) { 3797 assert(SD->isPartOfBundle() == 3798 (getTreeEntry(SD->Inst) != nullptr) && 3799 "scheduler and vectorizer bundle mismatch"); 3800 SD->FirstInBundle->SchedulingPriority = Idx++; 3801 if (SD->isSchedulingEntity()) { 3802 BS->calculateDependencies(SD, false, this); 3803 NumToSchedule++; 3804 } 3805 }); 3806 } 3807 BS->initialFillReadyList(ReadyInsts); 3808 3809 Instruction *LastScheduledInst = BS->ScheduleEnd; 3810 3811 // Do the "real" scheduling. 3812 while (!ReadyInsts.empty()) { 3813 ScheduleData *picked = *ReadyInsts.begin(); 3814 ReadyInsts.erase(ReadyInsts.begin()); 3815 3816 // Move the scheduled instruction(s) to their dedicated places, if not 3817 // there yet. 3818 ScheduleData *BundleMember = picked; 3819 while (BundleMember) { 3820 Instruction *pickedInst = BundleMember->Inst; 3821 if (LastScheduledInst->getNextNode() != pickedInst) { 3822 BS->BB->getInstList().remove(pickedInst); 3823 BS->BB->getInstList().insert(LastScheduledInst->getIterator(), 3824 pickedInst); 3825 } 3826 LastScheduledInst = pickedInst; 3827 BundleMember = BundleMember->NextInBundle; 3828 } 3829 3830 BS->schedule(picked, ReadyInsts); 3831 NumToSchedule--; 3832 } 3833 assert(NumToSchedule == 0 && "could not schedule all instructions"); 3834 3835 // Avoid duplicate scheduling of the block. 3836 BS->ScheduleStart = nullptr; 3837 } 3838 3839 unsigned BoUpSLP::getVectorElementSize(Value *V) { 3840 // If V is a store, just return the width of the stored value without 3841 // traversing the expression tree. This is the common case. 3842 if (auto *Store = dyn_cast<StoreInst>(V)) 3843 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 3844 3845 // If V is not a store, we can traverse the expression tree to find loads 3846 // that feed it. The type of the loaded value may indicate a more suitable 3847 // width than V's type. We want to base the vector element size on the width 3848 // of memory operations where possible. 3849 SmallVector<Instruction *, 16> Worklist; 3850 SmallPtrSet<Instruction *, 16> Visited; 3851 if (auto *I = dyn_cast<Instruction>(V)) 3852 Worklist.push_back(I); 3853 3854 // Traverse the expression tree in bottom-up order looking for loads. If we 3855 // encounter an instruciton we don't yet handle, we give up. 3856 auto MaxWidth = 0u; 3857 auto FoundUnknownInst = false; 3858 while (!Worklist.empty() && !FoundUnknownInst) { 3859 auto *I = Worklist.pop_back_val(); 3860 Visited.insert(I); 3861 3862 // We should only be looking at scalar instructions here. If the current 3863 // instruction has a vector type, give up. 3864 auto *Ty = I->getType(); 3865 if (isa<VectorType>(Ty)) 3866 FoundUnknownInst = true; 3867 3868 // If the current instruction is a load, update MaxWidth to reflect the 3869 // width of the loaded value. 3870 else if (isa<LoadInst>(I)) 3871 MaxWidth = std::max<unsigned>(MaxWidth, DL->getTypeSizeInBits(Ty)); 3872 3873 // Otherwise, we need to visit the operands of the instruction. We only 3874 // handle the interesting cases from buildTree here. If an operand is an 3875 // instruction we haven't yet visited, we add it to the worklist. 3876 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 3877 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I)) { 3878 for (Use &U : I->operands()) 3879 if (auto *J = dyn_cast<Instruction>(U.get())) 3880 if (!Visited.count(J)) 3881 Worklist.push_back(J); 3882 } 3883 3884 // If we don't yet handle the instruction, give up. 3885 else 3886 FoundUnknownInst = true; 3887 } 3888 3889 // If we didn't encounter a memory access in the expression tree, or if we 3890 // gave up for some reason, just return the width of V. 3891 if (!MaxWidth || FoundUnknownInst) 3892 return DL->getTypeSizeInBits(V->getType()); 3893 3894 // Otherwise, return the maximum width we found. 3895 return MaxWidth; 3896 } 3897 3898 // Determine if a value V in a vectorizable expression Expr can be demoted to a 3899 // smaller type with a truncation. We collect the values that will be demoted 3900 // in ToDemote and additional roots that require investigating in Roots. 3901 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, 3902 SmallVectorImpl<Value *> &ToDemote, 3903 SmallVectorImpl<Value *> &Roots) { 3904 // We can always demote constants. 3905 if (isa<Constant>(V)) { 3906 ToDemote.push_back(V); 3907 return true; 3908 } 3909 3910 // If the value is not an instruction in the expression with only one use, it 3911 // cannot be demoted. 3912 auto *I = dyn_cast<Instruction>(V); 3913 if (!I || !I->hasOneUse() || !Expr.count(I)) 3914 return false; 3915 3916 switch (I->getOpcode()) { 3917 3918 // We can always demote truncations and extensions. Since truncations can 3919 // seed additional demotion, we save the truncated value. 3920 case Instruction::Trunc: 3921 Roots.push_back(I->getOperand(0)); 3922 case Instruction::ZExt: 3923 case Instruction::SExt: 3924 break; 3925 3926 // We can demote certain binary operations if we can demote both of their 3927 // operands. 3928 case Instruction::Add: 3929 case Instruction::Sub: 3930 case Instruction::Mul: 3931 case Instruction::And: 3932 case Instruction::Or: 3933 case Instruction::Xor: 3934 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || 3935 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) 3936 return false; 3937 break; 3938 3939 // We can demote selects if we can demote their true and false values. 3940 case Instruction::Select: { 3941 SelectInst *SI = cast<SelectInst>(I); 3942 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || 3943 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) 3944 return false; 3945 break; 3946 } 3947 3948 // We can demote phis if we can demote all their incoming operands. Note that 3949 // we don't need to worry about cycles since we ensure single use above. 3950 case Instruction::PHI: { 3951 PHINode *PN = cast<PHINode>(I); 3952 for (Value *IncValue : PN->incoming_values()) 3953 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) 3954 return false; 3955 break; 3956 } 3957 3958 // Otherwise, conservatively give up. 3959 default: 3960 return false; 3961 } 3962 3963 // Record the value that we can demote. 3964 ToDemote.push_back(V); 3965 return true; 3966 } 3967 3968 void BoUpSLP::computeMinimumValueSizes() { 3969 // If there are no external uses, the expression tree must be rooted by a 3970 // store. We can't demote in-memory values, so there is nothing to do here. 3971 if (ExternalUses.empty()) 3972 return; 3973 3974 // We only attempt to truncate integer expressions. 3975 auto &TreeRoot = VectorizableTree[0].Scalars; 3976 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 3977 if (!TreeRootIT) 3978 return; 3979 3980 // If the expression is not rooted by a store, these roots should have 3981 // external uses. We will rely on InstCombine to rewrite the expression in 3982 // the narrower type. However, InstCombine only rewrites single-use values. 3983 // This means that if a tree entry other than a root is used externally, it 3984 // must have multiple uses and InstCombine will not rewrite it. The code 3985 // below ensures that only the roots are used externally. 3986 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); 3987 for (auto &EU : ExternalUses) 3988 if (!Expr.erase(EU.Scalar)) 3989 return; 3990 if (!Expr.empty()) 3991 return; 3992 3993 // Collect the scalar values of the vectorizable expression. We will use this 3994 // context to determine which values can be demoted. If we see a truncation, 3995 // we mark it as seeding another demotion. 3996 for (auto &Entry : VectorizableTree) 3997 Expr.insert(Entry.Scalars.begin(), Entry.Scalars.end()); 3998 3999 // Ensure the roots of the vectorizable tree don't form a cycle. They must 4000 // have a single external user that is not in the vectorizable tree. 4001 for (auto *Root : TreeRoot) 4002 if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) 4003 return; 4004 4005 // Conservatively determine if we can actually truncate the roots of the 4006 // expression. Collect the values that can be demoted in ToDemote and 4007 // additional roots that require investigating in Roots. 4008 SmallVector<Value *, 32> ToDemote; 4009 SmallVector<Value *, 4> Roots; 4010 for (auto *Root : TreeRoot) 4011 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) 4012 return; 4013 4014 // The maximum bit width required to represent all the values that can be 4015 // demoted without loss of precision. It would be safe to truncate the roots 4016 // of the expression to this width. 4017 auto MaxBitWidth = 8u; 4018 4019 // We first check if all the bits of the roots are demanded. If they're not, 4020 // we can truncate the roots to this narrower type. 4021 for (auto *Root : TreeRoot) { 4022 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 4023 MaxBitWidth = std::max<unsigned>( 4024 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); 4025 } 4026 4027 // True if the roots can be zero-extended back to their original type, rather 4028 // than sign-extended. We know that if the leading bits are not demanded, we 4029 // can safely zero-extend. So we initialize IsKnownPositive to True. 4030 bool IsKnownPositive = true; 4031 4032 // If all the bits of the roots are demanded, we can try a little harder to 4033 // compute a narrower type. This can happen, for example, if the roots are 4034 // getelementptr indices. InstCombine promotes these indices to the pointer 4035 // width. Thus, all their bits are technically demanded even though the 4036 // address computation might be vectorized in a smaller type. 4037 // 4038 // We start by looking at each entry that can be demoted. We compute the 4039 // maximum bit width required to store the scalar by using ValueTracking to 4040 // compute the number of high-order bits we can truncate. 4041 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType())) { 4042 MaxBitWidth = 8u; 4043 4044 // Determine if the sign bit of all the roots is known to be zero. If not, 4045 // IsKnownPositive is set to False. 4046 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { 4047 KnownBits Known = computeKnownBits(R, *DL); 4048 return Known.isNonNegative(); 4049 }); 4050 4051 // Determine the maximum number of bits required to store the scalar 4052 // values. 4053 for (auto *Scalar : ToDemote) { 4054 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); 4055 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 4056 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 4057 } 4058 4059 // If we can't prove that the sign bit is zero, we must add one to the 4060 // maximum bit width to account for the unknown sign bit. This preserves 4061 // the existing sign bit so we can safely sign-extend the root back to the 4062 // original type. Otherwise, if we know the sign bit is zero, we will 4063 // zero-extend the root instead. 4064 // 4065 // FIXME: This is somewhat suboptimal, as there will be cases where adding 4066 // one to the maximum bit width will yield a larger-than-necessary 4067 // type. In general, we need to add an extra bit only if we can't 4068 // prove that the upper bit of the original type is equal to the 4069 // upper bit of the proposed smaller type. If these two bits are the 4070 // same (either zero or one) we know that sign-extending from the 4071 // smaller type will result in the same value. Here, since we can't 4072 // yet prove this, we are just making the proposed smaller type 4073 // larger to ensure correctness. 4074 if (!IsKnownPositive) 4075 ++MaxBitWidth; 4076 } 4077 4078 // Round MaxBitWidth up to the next power-of-two. 4079 if (!isPowerOf2_64(MaxBitWidth)) 4080 MaxBitWidth = NextPowerOf2(MaxBitWidth); 4081 4082 // If the maximum bit width we compute is less than the with of the roots' 4083 // type, we can proceed with the narrowing. Otherwise, do nothing. 4084 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 4085 return; 4086 4087 // If we can truncate the root, we must collect additional values that might 4088 // be demoted as a result. That is, those seeded by truncations we will 4089 // modify. 4090 while (!Roots.empty()) 4091 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); 4092 4093 // Finally, map the values we can demote to the maximum bit with we computed. 4094 for (auto *Scalar : ToDemote) 4095 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive); 4096 } 4097 4098 namespace { 4099 4100 /// The SLPVectorizer Pass. 4101 struct SLPVectorizer : public FunctionPass { 4102 SLPVectorizerPass Impl; 4103 4104 /// Pass identification, replacement for typeid 4105 static char ID; 4106 4107 explicit SLPVectorizer() : FunctionPass(ID) { 4108 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 4109 } 4110 4111 bool doInitialization(Module &M) override { 4112 return false; 4113 } 4114 4115 bool runOnFunction(Function &F) override { 4116 if (skipFunction(F)) 4117 return false; 4118 4119 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 4120 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 4121 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 4122 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 4123 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 4124 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 4125 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 4126 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 4127 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 4128 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 4129 4130 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 4131 } 4132 4133 void getAnalysisUsage(AnalysisUsage &AU) const override { 4134 FunctionPass::getAnalysisUsage(AU); 4135 AU.addRequired<AssumptionCacheTracker>(); 4136 AU.addRequired<ScalarEvolutionWrapperPass>(); 4137 AU.addRequired<AAResultsWrapperPass>(); 4138 AU.addRequired<TargetTransformInfoWrapperPass>(); 4139 AU.addRequired<LoopInfoWrapperPass>(); 4140 AU.addRequired<DominatorTreeWrapperPass>(); 4141 AU.addRequired<DemandedBitsWrapperPass>(); 4142 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 4143 AU.addPreserved<LoopInfoWrapperPass>(); 4144 AU.addPreserved<DominatorTreeWrapperPass>(); 4145 AU.addPreserved<AAResultsWrapperPass>(); 4146 AU.addPreserved<GlobalsAAWrapperPass>(); 4147 AU.setPreservesCFG(); 4148 } 4149 }; 4150 4151 } // end anonymous namespace 4152 4153 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 4154 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 4155 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 4156 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 4157 auto *AA = &AM.getResult<AAManager>(F); 4158 auto *LI = &AM.getResult<LoopAnalysis>(F); 4159 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 4160 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 4161 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 4162 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 4163 4164 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 4165 if (!Changed) 4166 return PreservedAnalyses::all(); 4167 4168 PreservedAnalyses PA; 4169 PA.preserveSet<CFGAnalyses>(); 4170 PA.preserve<AAManager>(); 4171 PA.preserve<GlobalsAA>(); 4172 return PA; 4173 } 4174 4175 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 4176 TargetTransformInfo *TTI_, 4177 TargetLibraryInfo *TLI_, AliasAnalysis *AA_, 4178 LoopInfo *LI_, DominatorTree *DT_, 4179 AssumptionCache *AC_, DemandedBits *DB_, 4180 OptimizationRemarkEmitter *ORE_) { 4181 SE = SE_; 4182 TTI = TTI_; 4183 TLI = TLI_; 4184 AA = AA_; 4185 LI = LI_; 4186 DT = DT_; 4187 AC = AC_; 4188 DB = DB_; 4189 DL = &F.getParent()->getDataLayout(); 4190 4191 Stores.clear(); 4192 GEPs.clear(); 4193 bool Changed = false; 4194 4195 // If the target claims to have no vector registers don't attempt 4196 // vectorization. 4197 if (!TTI->getNumberOfRegisters(true)) 4198 return false; 4199 4200 // Don't vectorize when the attribute NoImplicitFloat is used. 4201 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 4202 return false; 4203 4204 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 4205 4206 // Use the bottom up slp vectorizer to construct chains that start with 4207 // store instructions. 4208 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 4209 4210 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 4211 // delete instructions. 4212 4213 // Scan the blocks in the function in post order. 4214 for (auto BB : post_order(&F.getEntryBlock())) { 4215 collectSeedInstructions(BB); 4216 4217 // Vectorize trees that end at stores. 4218 if (!Stores.empty()) { 4219 DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 4220 << " underlying objects.\n"); 4221 Changed |= vectorizeStoreChains(R); 4222 } 4223 4224 // Vectorize trees that end at reductions. 4225 Changed |= vectorizeChainsInBlock(BB, R); 4226 4227 // Vectorize the index computations of getelementptr instructions. This 4228 // is primarily intended to catch gather-like idioms ending at 4229 // non-consecutive loads. 4230 if (!GEPs.empty()) { 4231 DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 4232 << " underlying objects.\n"); 4233 Changed |= vectorizeGEPIndices(BB, R); 4234 } 4235 } 4236 4237 if (Changed) { 4238 R.optimizeGatherSequence(); 4239 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 4240 DEBUG(verifyFunction(F)); 4241 } 4242 return Changed; 4243 } 4244 4245 /// \brief Check that the Values in the slice in VL array are still existent in 4246 /// the WeakTrackingVH array. 4247 /// Vectorization of part of the VL array may cause later values in the VL array 4248 /// to become invalid. We track when this has happened in the WeakTrackingVH 4249 /// array. 4250 static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, 4251 ArrayRef<WeakTrackingVH> VH, unsigned SliceBegin, 4252 unsigned SliceSize) { 4253 VL = VL.slice(SliceBegin, SliceSize); 4254 VH = VH.slice(SliceBegin, SliceSize); 4255 return !std::equal(VL.begin(), VL.end(), VH.begin()); 4256 } 4257 4258 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 4259 unsigned VecRegSize) { 4260 unsigned ChainLen = Chain.size(); 4261 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 4262 << "\n"); 4263 unsigned Sz = R.getVectorElementSize(Chain[0]); 4264 unsigned VF = VecRegSize / Sz; 4265 4266 if (!isPowerOf2_32(Sz) || VF < 2) 4267 return false; 4268 4269 // Keep track of values that were deleted by vectorizing in the loop below. 4270 SmallVector<WeakTrackingVH, 8> TrackValues(Chain.begin(), Chain.end()); 4271 4272 bool Changed = false; 4273 // Look for profitable vectorizable trees at all offsets, starting at zero. 4274 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 4275 if (i + VF > e) 4276 break; 4277 4278 // Check that a previous iteration of this loop did not delete the Value. 4279 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 4280 continue; 4281 4282 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 4283 << "\n"); 4284 ArrayRef<Value *> Operands = Chain.slice(i, VF); 4285 4286 R.buildTree(Operands); 4287 if (R.isTreeTinyAndNotFullyVectorizable()) 4288 continue; 4289 4290 R.computeMinimumValueSizes(); 4291 4292 int Cost = R.getTreeCost(); 4293 4294 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 4295 if (Cost < -SLPCostThreshold) { 4296 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 4297 4298 using namespace ore; 4299 4300 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 4301 cast<StoreInst>(Chain[i])) 4302 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 4303 << " and with tree size " 4304 << NV("TreeSize", R.getTreeSize())); 4305 4306 R.vectorizeTree(); 4307 4308 // Move to the next bundle. 4309 i += VF - 1; 4310 Changed = true; 4311 } 4312 } 4313 4314 return Changed; 4315 } 4316 4317 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 4318 BoUpSLP &R) { 4319 SetVector<StoreInst *> Heads; 4320 SmallDenseSet<StoreInst *> Tails; 4321 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; 4322 4323 // We may run into multiple chains that merge into a single chain. We mark the 4324 // stores that we vectorized so that we don't visit the same store twice. 4325 BoUpSLP::ValueSet VectorizedStores; 4326 bool Changed = false; 4327 4328 // Do a quadratic search on all of the given stores in reverse order and find 4329 // all of the pairs of stores that follow each other. 4330 SmallVector<unsigned, 16> IndexQueue; 4331 unsigned E = Stores.size(); 4332 IndexQueue.resize(E - 1); 4333 for (unsigned I = E; I > 0; --I) { 4334 unsigned Idx = I - 1; 4335 // If a store has multiple consecutive store candidates, search Stores 4336 // array according to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ... 4337 // This is because usually pairing with immediate succeeding or preceding 4338 // candidate create the best chance to find slp vectorization opportunity. 4339 unsigned Offset = 1; 4340 unsigned Cnt = 0; 4341 for (unsigned J = 0; J < E - 1; ++J, ++Offset) { 4342 if (Idx >= Offset) { 4343 IndexQueue[Cnt] = Idx - Offset; 4344 ++Cnt; 4345 } 4346 if (Idx + Offset < E) { 4347 IndexQueue[Cnt] = Idx + Offset; 4348 ++Cnt; 4349 } 4350 } 4351 4352 for (auto K : IndexQueue) { 4353 if (isConsecutiveAccess(Stores[K], Stores[Idx], *DL, *SE)) { 4354 Tails.insert(Stores[Idx]); 4355 Heads.insert(Stores[K]); 4356 ConsecutiveChain[Stores[K]] = Stores[Idx]; 4357 break; 4358 } 4359 } 4360 } 4361 4362 // For stores that start but don't end a link in the chain: 4363 for (auto *SI : llvm::reverse(Heads)) { 4364 if (Tails.count(SI)) 4365 continue; 4366 4367 // We found a store instr that starts a chain. Now follow the chain and try 4368 // to vectorize it. 4369 BoUpSLP::ValueList Operands; 4370 StoreInst *I = SI; 4371 // Collect the chain into a list. 4372 while ((Tails.count(I) || Heads.count(I)) && !VectorizedStores.count(I)) { 4373 Operands.push_back(I); 4374 // Move to the next value in the chain. 4375 I = ConsecutiveChain[I]; 4376 } 4377 4378 // FIXME: Is division-by-2 the correct step? Should we assert that the 4379 // register size is a power-of-2? 4380 for (unsigned Size = R.getMaxVecRegSize(); Size >= R.getMinVecRegSize(); 4381 Size /= 2) { 4382 if (vectorizeStoreChain(Operands, R, Size)) { 4383 // Mark the vectorized stores so that we don't vectorize them again. 4384 VectorizedStores.insert(Operands.begin(), Operands.end()); 4385 Changed = true; 4386 break; 4387 } 4388 } 4389 } 4390 4391 return Changed; 4392 } 4393 4394 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 4395 // Initialize the collections. We will make a single pass over the block. 4396 Stores.clear(); 4397 GEPs.clear(); 4398 4399 // Visit the store and getelementptr instructions in BB and organize them in 4400 // Stores and GEPs according to the underlying objects of their pointer 4401 // operands. 4402 for (Instruction &I : *BB) { 4403 // Ignore store instructions that are volatile or have a pointer operand 4404 // that doesn't point to a scalar type. 4405 if (auto *SI = dyn_cast<StoreInst>(&I)) { 4406 if (!SI->isSimple()) 4407 continue; 4408 if (!isValidElementType(SI->getValueOperand()->getType())) 4409 continue; 4410 Stores[GetUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI); 4411 } 4412 4413 // Ignore getelementptr instructions that have more than one index, a 4414 // constant index, or a pointer operand that doesn't point to a scalar 4415 // type. 4416 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 4417 auto Idx = GEP->idx_begin()->get(); 4418 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) 4419 continue; 4420 if (!isValidElementType(Idx->getType())) 4421 continue; 4422 if (GEP->getType()->isVectorTy()) 4423 continue; 4424 GEPs[GetUnderlyingObject(GEP->getPointerOperand(), *DL)].push_back(GEP); 4425 } 4426 } 4427 } 4428 4429 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 4430 if (!A || !B) 4431 return false; 4432 Value *VL[] = { A, B }; 4433 return tryToVectorizeList(VL, R, None, true); 4434 } 4435 4436 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 4437 ArrayRef<Value *> BuildVector, 4438 bool AllowReorder) { 4439 if (VL.size() < 2) 4440 return false; 4441 4442 DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " << VL.size() 4443 << ".\n"); 4444 4445 // Check that all of the parts are scalar instructions of the same type. 4446 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 4447 if (!I0) 4448 return false; 4449 4450 unsigned Opcode0 = I0->getOpcode(); 4451 4452 unsigned Sz = R.getVectorElementSize(I0); 4453 unsigned MinVF = std::max(2U, R.getMinVecRegSize() / Sz); 4454 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF); 4455 if (MaxVF < 2) 4456 return false; 4457 4458 for (Value *V : VL) { 4459 Type *Ty = V->getType(); 4460 if (!isValidElementType(Ty)) 4461 return false; 4462 Instruction *Inst = dyn_cast<Instruction>(V); 4463 if (!Inst || Inst->getOpcode() != Opcode0) 4464 return false; 4465 } 4466 4467 bool Changed = false; 4468 4469 // Keep track of values that were deleted by vectorizing in the loop below. 4470 SmallVector<WeakTrackingVH, 8> TrackValues(VL.begin(), VL.end()); 4471 4472 unsigned NextInst = 0, MaxInst = VL.size(); 4473 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; 4474 VF /= 2) { 4475 // No actual vectorization should happen, if number of parts is the same as 4476 // provided vectorization factor (i.e. the scalar type is used for vector 4477 // code during codegen). 4478 auto *VecTy = VectorType::get(VL[0]->getType(), VF); 4479 if (TTI->getNumberOfParts(VecTy) == VF) 4480 continue; 4481 for (unsigned I = NextInst; I < MaxInst; ++I) { 4482 unsigned OpsWidth = 0; 4483 4484 if (I + VF > MaxInst) 4485 OpsWidth = MaxInst - I; 4486 else 4487 OpsWidth = VF; 4488 4489 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 4490 break; 4491 4492 // Check that a previous iteration of this loop did not delete the Value. 4493 if (hasValueBeenRAUWed(VL, TrackValues, I, OpsWidth)) 4494 continue; 4495 4496 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 4497 << "\n"); 4498 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth); 4499 4500 ArrayRef<Value *> BuildVectorSlice; 4501 if (!BuildVector.empty()) 4502 BuildVectorSlice = BuildVector.slice(I, OpsWidth); 4503 4504 R.buildTree(Ops, BuildVectorSlice); 4505 // TODO: check if we can allow reordering for more cases. 4506 if (AllowReorder && R.shouldReorder()) { 4507 // Conceptually, there is nothing actually preventing us from trying to 4508 // reorder a larger list. In fact, we do exactly this when vectorizing 4509 // reductions. However, at this point, we only expect to get here when 4510 // there are exactly two operations. 4511 assert(Ops.size() == 2); 4512 assert(BuildVectorSlice.empty()); 4513 Value *ReorderedOps[] = {Ops[1], Ops[0]}; 4514 R.buildTree(ReorderedOps, None); 4515 } 4516 if (R.isTreeTinyAndNotFullyVectorizable()) 4517 continue; 4518 4519 R.computeMinimumValueSizes(); 4520 int Cost = R.getTreeCost(); 4521 4522 if (Cost < -SLPCostThreshold) { 4523 DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 4524 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 4525 cast<Instruction>(Ops[0])) 4526 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 4527 << " and with tree size " 4528 << ore::NV("TreeSize", R.getTreeSize())); 4529 4530 Value *VectorizedRoot = R.vectorizeTree(); 4531 4532 // Reconstruct the build vector by extracting the vectorized root. This 4533 // way we handle the case where some elements of the vector are 4534 // undefined. 4535 // (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2)) 4536 if (!BuildVectorSlice.empty()) { 4537 // The insert point is the last build vector instruction. The 4538 // vectorized root will precede it. This guarantees that we get an 4539 // instruction. The vectorized tree could have been constant folded. 4540 Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back()); 4541 unsigned VecIdx = 0; 4542 for (auto &V : BuildVectorSlice) { 4543 IRBuilder<NoFolder> Builder(InsertAfter->getParent(), 4544 ++BasicBlock::iterator(InsertAfter)); 4545 Instruction *I = cast<Instruction>(V); 4546 assert(isa<InsertElementInst>(I) || isa<InsertValueInst>(I)); 4547 Instruction *Extract = 4548 cast<Instruction>(Builder.CreateExtractElement( 4549 VectorizedRoot, Builder.getInt32(VecIdx++))); 4550 I->setOperand(1, Extract); 4551 I->moveAfter(Extract); 4552 InsertAfter = I; 4553 } 4554 } 4555 // Move to the next bundle. 4556 I += VF - 1; 4557 NextInst = I + 1; 4558 Changed = true; 4559 } 4560 } 4561 } 4562 4563 return Changed; 4564 } 4565 4566 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 4567 if (!I) 4568 return false; 4569 4570 if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I)) 4571 return false; 4572 4573 Value *P = I->getParent(); 4574 4575 // Vectorize in current basic block only. 4576 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 4577 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 4578 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 4579 return false; 4580 4581 // Try to vectorize V. 4582 if (tryToVectorizePair(Op0, Op1, R)) 4583 return true; 4584 4585 auto *A = dyn_cast<BinaryOperator>(Op0); 4586 auto *B = dyn_cast<BinaryOperator>(Op1); 4587 // Try to skip B. 4588 if (B && B->hasOneUse()) { 4589 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 4590 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 4591 if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R)) 4592 return true; 4593 if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R)) 4594 return true; 4595 } 4596 4597 // Try to skip A. 4598 if (A && A->hasOneUse()) { 4599 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 4600 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 4601 if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R)) 4602 return true; 4603 if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R)) 4604 return true; 4605 } 4606 return false; 4607 } 4608 4609 /// \brief Generate a shuffle mask to be used in a reduction tree. 4610 /// 4611 /// \param VecLen The length of the vector to be reduced. 4612 /// \param NumEltsToRdx The number of elements that should be reduced in the 4613 /// vector. 4614 /// \param IsPairwise Whether the reduction is a pairwise or splitting 4615 /// reduction. A pairwise reduction will generate a mask of 4616 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 4617 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 4618 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 4619 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 4620 bool IsPairwise, bool IsLeft, 4621 IRBuilder<> &Builder) { 4622 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 4623 4624 SmallVector<Constant *, 32> ShuffleMask( 4625 VecLen, UndefValue::get(Builder.getInt32Ty())); 4626 4627 if (IsPairwise) 4628 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 4629 for (unsigned i = 0; i != NumEltsToRdx; ++i) 4630 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 4631 else 4632 // Move the upper half of the vector to the lower half. 4633 for (unsigned i = 0; i != NumEltsToRdx; ++i) 4634 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 4635 4636 return ConstantVector::get(ShuffleMask); 4637 } 4638 4639 namespace { 4640 4641 /// Model horizontal reductions. 4642 /// 4643 /// A horizontal reduction is a tree of reduction operations (currently add and 4644 /// fadd) that has operations that can be put into a vector as its leaf. 4645 /// For example, this tree: 4646 /// 4647 /// mul mul mul mul 4648 /// \ / \ / 4649 /// + + 4650 /// \ / 4651 /// + 4652 /// This tree has "mul" as its reduced values and "+" as its reduction 4653 /// operations. A reduction might be feeding into a store or a binary operation 4654 /// feeding a phi. 4655 /// ... 4656 /// \ / 4657 /// + 4658 /// | 4659 /// phi += 4660 /// 4661 /// Or: 4662 /// ... 4663 /// \ / 4664 /// + 4665 /// | 4666 /// *p = 4667 /// 4668 class HorizontalReduction { 4669 using ReductionOpsType = SmallVector<Value *, 16>; 4670 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; 4671 ReductionOpsListType ReductionOps; 4672 SmallVector<Value *, 32> ReducedVals; 4673 // Use map vector to make stable output. 4674 MapVector<Instruction *, Value *> ExtraArgs; 4675 4676 /// Kind of the reduction data. 4677 enum ReductionKind { 4678 RK_None, /// Not a reduction. 4679 RK_Arithmetic, /// Binary reduction data. 4680 RK_Min, /// Minimum reduction data. 4681 RK_UMin, /// Unsigned minimum reduction data. 4682 RK_Max, /// Maximum reduction data. 4683 RK_UMax, /// Unsigned maximum reduction data. 4684 }; 4685 4686 /// Contains info about operation, like its opcode, left and right operands. 4687 class OperationData { 4688 /// Opcode of the instruction. 4689 unsigned Opcode = 0; 4690 4691 /// Left operand of the reduction operation. 4692 Value *LHS = nullptr; 4693 4694 /// Right operand of the reduction operation. 4695 Value *RHS = nullptr; 4696 4697 /// Kind of the reduction operation. 4698 ReductionKind Kind = RK_None; 4699 4700 /// True if float point min/max reduction has no NaNs. 4701 bool NoNaN = false; 4702 4703 /// Checks if the reduction operation can be vectorized. 4704 bool isVectorizable() const { 4705 return LHS && RHS && 4706 // We currently only support adds && min/max reductions. 4707 ((Kind == RK_Arithmetic && 4708 (Opcode == Instruction::Add || Opcode == Instruction::FAdd)) || 4709 ((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && 4710 (Kind == RK_Min || Kind == RK_Max)) || 4711 (Opcode == Instruction::ICmp && 4712 (Kind == RK_UMin || Kind == RK_UMax))); 4713 } 4714 4715 /// Creates reduction operation with the current opcode. 4716 Value *createOp(IRBuilder<> &Builder, const Twine &Name) const { 4717 assert(isVectorizable() && 4718 "Expected add|fadd or min/max reduction operation."); 4719 Value *Cmp; 4720 switch (Kind) { 4721 case RK_Arithmetic: 4722 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, LHS, RHS, 4723 Name); 4724 case RK_Min: 4725 Cmp = Opcode == Instruction::ICmp ? Builder.CreateICmpSLT(LHS, RHS) 4726 : Builder.CreateFCmpOLT(LHS, RHS); 4727 break; 4728 case RK_Max: 4729 Cmp = Opcode == Instruction::ICmp ? Builder.CreateICmpSGT(LHS, RHS) 4730 : Builder.CreateFCmpOGT(LHS, RHS); 4731 break; 4732 case RK_UMin: 4733 assert(Opcode == Instruction::ICmp && "Expected integer types."); 4734 Cmp = Builder.CreateICmpULT(LHS, RHS); 4735 break; 4736 case RK_UMax: 4737 assert(Opcode == Instruction::ICmp && "Expected integer types."); 4738 Cmp = Builder.CreateICmpUGT(LHS, RHS); 4739 break; 4740 case RK_None: 4741 llvm_unreachable("Unknown reduction operation."); 4742 } 4743 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 4744 } 4745 4746 public: 4747 explicit OperationData() = default; 4748 4749 /// Construction for reduced values. They are identified by opcode only and 4750 /// don't have associated LHS/RHS values. 4751 explicit OperationData(Value *V) { 4752 if (auto *I = dyn_cast<Instruction>(V)) 4753 Opcode = I->getOpcode(); 4754 } 4755 4756 /// Constructor for reduction operations with opcode and its left and 4757 /// right operands. 4758 OperationData(unsigned Opcode, Value *LHS, Value *RHS, ReductionKind Kind, 4759 bool NoNaN = false) 4760 : Opcode(Opcode), LHS(LHS), RHS(RHS), Kind(Kind), NoNaN(NoNaN) { 4761 assert(Kind != RK_None && "One of the reduction operations is expected."); 4762 } 4763 4764 explicit operator bool() const { return Opcode; } 4765 4766 /// Get the index of the first operand. 4767 unsigned getFirstOperandIndex() const { 4768 assert(!!*this && "The opcode is not set."); 4769 switch (Kind) { 4770 case RK_Min: 4771 case RK_UMin: 4772 case RK_Max: 4773 case RK_UMax: 4774 return 1; 4775 case RK_Arithmetic: 4776 case RK_None: 4777 break; 4778 } 4779 return 0; 4780 } 4781 4782 /// Total number of operands in the reduction operation. 4783 unsigned getNumberOfOperands() const { 4784 assert(Kind != RK_None && !!*this && LHS && RHS && 4785 "Expected reduction operation."); 4786 switch (Kind) { 4787 case RK_Arithmetic: 4788 return 2; 4789 case RK_Min: 4790 case RK_UMin: 4791 case RK_Max: 4792 case RK_UMax: 4793 return 3; 4794 case RK_None: 4795 break; 4796 } 4797 llvm_unreachable("Reduction kind is not set"); 4798 } 4799 4800 /// Checks if the operation has the same parent as \p P. 4801 bool hasSameParent(Instruction *I, Value *P, bool IsRedOp) const { 4802 assert(Kind != RK_None && !!*this && LHS && RHS && 4803 "Expected reduction operation."); 4804 if (!IsRedOp) 4805 return I->getParent() == P; 4806 switch (Kind) { 4807 case RK_Arithmetic: 4808 // Arithmetic reduction operation must be used once only. 4809 return I->getParent() == P; 4810 case RK_Min: 4811 case RK_UMin: 4812 case RK_Max: 4813 case RK_UMax: { 4814 // SelectInst must be used twice while the condition op must have single 4815 // use only. 4816 auto *Cmp = cast<Instruction>(cast<SelectInst>(I)->getCondition()); 4817 return I->getParent() == P && Cmp && Cmp->getParent() == P; 4818 } 4819 case RK_None: 4820 break; 4821 } 4822 llvm_unreachable("Reduction kind is not set"); 4823 } 4824 /// Expected number of uses for reduction operations/reduced values. 4825 bool hasRequiredNumberOfUses(Instruction *I, bool IsReductionOp) const { 4826 assert(Kind != RK_None && !!*this && LHS && RHS && 4827 "Expected reduction operation."); 4828 switch (Kind) { 4829 case RK_Arithmetic: 4830 return I->hasOneUse(); 4831 case RK_Min: 4832 case RK_UMin: 4833 case RK_Max: 4834 case RK_UMax: 4835 return I->hasNUses(2) && 4836 (!IsReductionOp || 4837 cast<SelectInst>(I)->getCondition()->hasOneUse()); 4838 case RK_None: 4839 break; 4840 } 4841 llvm_unreachable("Reduction kind is not set"); 4842 } 4843 4844 /// Initializes the list of reduction operations. 4845 void initReductionOps(ReductionOpsListType &ReductionOps) { 4846 assert(Kind != RK_None && !!*this && LHS && RHS && 4847 "Expected reduction operation."); 4848 switch (Kind) { 4849 case RK_Arithmetic: 4850 ReductionOps.assign(1, ReductionOpsType()); 4851 break; 4852 case RK_Min: 4853 case RK_UMin: 4854 case RK_Max: 4855 case RK_UMax: 4856 ReductionOps.assign(2, ReductionOpsType()); 4857 break; 4858 case RK_None: 4859 llvm_unreachable("Reduction kind is not set"); 4860 } 4861 } 4862 /// Add all reduction operations for the reduction instruction \p I. 4863 void addReductionOps(Instruction *I, ReductionOpsListType &ReductionOps) { 4864 assert(Kind != RK_None && !!*this && LHS && RHS && 4865 "Expected reduction operation."); 4866 switch (Kind) { 4867 case RK_Arithmetic: 4868 ReductionOps[0].emplace_back(I); 4869 break; 4870 case RK_Min: 4871 case RK_UMin: 4872 case RK_Max: 4873 case RK_UMax: 4874 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition()); 4875 ReductionOps[1].emplace_back(I); 4876 break; 4877 case RK_None: 4878 llvm_unreachable("Reduction kind is not set"); 4879 } 4880 } 4881 4882 /// Checks if instruction is associative and can be vectorized. 4883 bool isAssociative(Instruction *I) const { 4884 assert(Kind != RK_None && *this && LHS && RHS && 4885 "Expected reduction operation."); 4886 switch (Kind) { 4887 case RK_Arithmetic: 4888 return I->isAssociative(); 4889 case RK_Min: 4890 case RK_Max: 4891 return Opcode == Instruction::ICmp || 4892 cast<Instruction>(I->getOperand(0))->isFast(); 4893 case RK_UMin: 4894 case RK_UMax: 4895 assert(Opcode == Instruction::ICmp && 4896 "Only integer compare operation is expected."); 4897 return true; 4898 case RK_None: 4899 break; 4900 } 4901 llvm_unreachable("Reduction kind is not set"); 4902 } 4903 4904 /// Checks if the reduction operation can be vectorized. 4905 bool isVectorizable(Instruction *I) const { 4906 return isVectorizable() && isAssociative(I); 4907 } 4908 4909 /// Checks if two operation data are both a reduction op or both a reduced 4910 /// value. 4911 bool operator==(const OperationData &OD) { 4912 assert(((Kind != OD.Kind) || ((!LHS == !OD.LHS) && (!RHS == !OD.RHS))) && 4913 "One of the comparing operations is incorrect."); 4914 return this == &OD || (Kind == OD.Kind && Opcode == OD.Opcode); 4915 } 4916 bool operator!=(const OperationData &OD) { return !(*this == OD); } 4917 void clear() { 4918 Opcode = 0; 4919 LHS = nullptr; 4920 RHS = nullptr; 4921 Kind = RK_None; 4922 NoNaN = false; 4923 } 4924 4925 /// Get the opcode of the reduction operation. 4926 unsigned getOpcode() const { 4927 assert(isVectorizable() && "Expected vectorizable operation."); 4928 return Opcode; 4929 } 4930 4931 /// Get kind of reduction data. 4932 ReductionKind getKind() const { return Kind; } 4933 Value *getLHS() const { return LHS; } 4934 Value *getRHS() const { return RHS; } 4935 Type *getConditionType() const { 4936 switch (Kind) { 4937 case RK_Arithmetic: 4938 return nullptr; 4939 case RK_Min: 4940 case RK_Max: 4941 case RK_UMin: 4942 case RK_UMax: 4943 return CmpInst::makeCmpResultType(LHS->getType()); 4944 case RK_None: 4945 break; 4946 } 4947 llvm_unreachable("Reduction kind is not set"); 4948 } 4949 4950 /// Creates reduction operation with the current opcode with the IR flags 4951 /// from \p ReductionOps. 4952 Value *createOp(IRBuilder<> &Builder, const Twine &Name, 4953 const ReductionOpsListType &ReductionOps) const { 4954 assert(isVectorizable() && 4955 "Expected add|fadd or min/max reduction operation."); 4956 auto *Op = createOp(Builder, Name); 4957 switch (Kind) { 4958 case RK_Arithmetic: 4959 propagateIRFlags(Op, ReductionOps[0]); 4960 return Op; 4961 case RK_Min: 4962 case RK_Max: 4963 case RK_UMin: 4964 case RK_UMax: 4965 if (auto *SI = dyn_cast<SelectInst>(Op)) 4966 propagateIRFlags(SI->getCondition(), ReductionOps[0]); 4967 propagateIRFlags(Op, ReductionOps[1]); 4968 return Op; 4969 case RK_None: 4970 break; 4971 } 4972 llvm_unreachable("Unknown reduction operation."); 4973 } 4974 /// Creates reduction operation with the current opcode with the IR flags 4975 /// from \p I. 4976 Value *createOp(IRBuilder<> &Builder, const Twine &Name, 4977 Instruction *I) const { 4978 assert(isVectorizable() && 4979 "Expected add|fadd or min/max reduction operation."); 4980 auto *Op = createOp(Builder, Name); 4981 switch (Kind) { 4982 case RK_Arithmetic: 4983 propagateIRFlags(Op, I); 4984 return Op; 4985 case RK_Min: 4986 case RK_Max: 4987 case RK_UMin: 4988 case RK_UMax: 4989 if (auto *SI = dyn_cast<SelectInst>(Op)) { 4990 propagateIRFlags(SI->getCondition(), 4991 cast<SelectInst>(I)->getCondition()); 4992 } 4993 propagateIRFlags(Op, I); 4994 return Op; 4995 case RK_None: 4996 break; 4997 } 4998 llvm_unreachable("Unknown reduction operation."); 4999 } 5000 5001 TargetTransformInfo::ReductionFlags getFlags() const { 5002 TargetTransformInfo::ReductionFlags Flags; 5003 Flags.NoNaN = NoNaN; 5004 switch (Kind) { 5005 case RK_Arithmetic: 5006 break; 5007 case RK_Min: 5008 Flags.IsSigned = Opcode == Instruction::ICmp; 5009 Flags.IsMaxOp = false; 5010 break; 5011 case RK_Max: 5012 Flags.IsSigned = Opcode == Instruction::ICmp; 5013 Flags.IsMaxOp = true; 5014 break; 5015 case RK_UMin: 5016 Flags.IsSigned = false; 5017 Flags.IsMaxOp = false; 5018 break; 5019 case RK_UMax: 5020 Flags.IsSigned = false; 5021 Flags.IsMaxOp = true; 5022 break; 5023 case RK_None: 5024 llvm_unreachable("Reduction kind is not set"); 5025 } 5026 return Flags; 5027 } 5028 }; 5029 5030 Instruction *ReductionRoot = nullptr; 5031 5032 /// The operation data of the reduction operation. 5033 OperationData ReductionData; 5034 5035 /// The operation data of the values we perform a reduction on. 5036 OperationData ReducedValueData; 5037 5038 /// Should we model this reduction as a pairwise reduction tree or a tree that 5039 /// splits the vector in halves and adds those halves. 5040 bool IsPairwiseReduction = false; 5041 5042 /// Checks if the ParentStackElem.first should be marked as a reduction 5043 /// operation with an extra argument or as extra argument itself. 5044 void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem, 5045 Value *ExtraArg) { 5046 if (ExtraArgs.count(ParentStackElem.first)) { 5047 ExtraArgs[ParentStackElem.first] = nullptr; 5048 // We ran into something like: 5049 // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg. 5050 // The whole ParentStackElem.first should be considered as an extra value 5051 // in this case. 5052 // Do not perform analysis of remaining operands of ParentStackElem.first 5053 // instruction, this whole instruction is an extra argument. 5054 ParentStackElem.second = ParentStackElem.first->getNumOperands(); 5055 } else { 5056 // We ran into something like: 5057 // ParentStackElem.first += ... + ExtraArg + ... 5058 ExtraArgs[ParentStackElem.first] = ExtraArg; 5059 } 5060 } 5061 5062 static OperationData getOperationData(Value *V) { 5063 if (!V) 5064 return OperationData(); 5065 5066 Value *LHS; 5067 Value *RHS; 5068 if (m_BinOp(m_Value(LHS), m_Value(RHS)).match(V)) { 5069 return OperationData(cast<BinaryOperator>(V)->getOpcode(), LHS, RHS, 5070 RK_Arithmetic); 5071 } 5072 if (auto *Select = dyn_cast<SelectInst>(V)) { 5073 // Look for a min/max pattern. 5074 if (m_UMin(m_Value(LHS), m_Value(RHS)).match(Select)) { 5075 return OperationData(Instruction::ICmp, LHS, RHS, RK_UMin); 5076 } else if (m_SMin(m_Value(LHS), m_Value(RHS)).match(Select)) { 5077 return OperationData(Instruction::ICmp, LHS, RHS, RK_Min); 5078 } else if (m_OrdFMin(m_Value(LHS), m_Value(RHS)).match(Select) || 5079 m_UnordFMin(m_Value(LHS), m_Value(RHS)).match(Select)) { 5080 return OperationData( 5081 Instruction::FCmp, LHS, RHS, RK_Min, 5082 cast<Instruction>(Select->getCondition())->hasNoNaNs()); 5083 } else if (m_UMax(m_Value(LHS), m_Value(RHS)).match(Select)) { 5084 return OperationData(Instruction::ICmp, LHS, RHS, RK_UMax); 5085 } else if (m_SMax(m_Value(LHS), m_Value(RHS)).match(Select)) { 5086 return OperationData(Instruction::ICmp, LHS, RHS, RK_Max); 5087 } else if (m_OrdFMax(m_Value(LHS), m_Value(RHS)).match(Select) || 5088 m_UnordFMax(m_Value(LHS), m_Value(RHS)).match(Select)) { 5089 return OperationData( 5090 Instruction::FCmp, LHS, RHS, RK_Max, 5091 cast<Instruction>(Select->getCondition())->hasNoNaNs()); 5092 } 5093 } 5094 return OperationData(V); 5095 } 5096 5097 public: 5098 HorizontalReduction() = default; 5099 5100 /// \brief Try to find a reduction tree. 5101 bool matchAssociativeReduction(PHINode *Phi, Instruction *B) { 5102 assert((!Phi || is_contained(Phi->operands(), B)) && 5103 "Thi phi needs to use the binary operator"); 5104 5105 ReductionData = getOperationData(B); 5106 5107 // We could have a initial reductions that is not an add. 5108 // r *= v1 + v2 + v3 + v4 5109 // In such a case start looking for a tree rooted in the first '+'. 5110 if (Phi) { 5111 if (ReductionData.getLHS() == Phi) { 5112 Phi = nullptr; 5113 B = dyn_cast<Instruction>(ReductionData.getRHS()); 5114 ReductionData = getOperationData(B); 5115 } else if (ReductionData.getRHS() == Phi) { 5116 Phi = nullptr; 5117 B = dyn_cast<Instruction>(ReductionData.getLHS()); 5118 ReductionData = getOperationData(B); 5119 } 5120 } 5121 5122 if (!ReductionData.isVectorizable(B)) 5123 return false; 5124 5125 Type *Ty = B->getType(); 5126 if (!isValidElementType(Ty)) 5127 return false; 5128 5129 ReducedValueData.clear(); 5130 ReductionRoot = B; 5131 5132 // Post order traverse the reduction tree starting at B. We only handle true 5133 // trees containing only binary operators. 5134 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; 5135 Stack.push_back(std::make_pair(B, ReductionData.getFirstOperandIndex())); 5136 ReductionData.initReductionOps(ReductionOps); 5137 while (!Stack.empty()) { 5138 Instruction *TreeN = Stack.back().first; 5139 unsigned EdgeToVist = Stack.back().second++; 5140 OperationData OpData = getOperationData(TreeN); 5141 bool IsReducedValue = OpData != ReductionData; 5142 5143 // Postorder vist. 5144 if (IsReducedValue || EdgeToVist == OpData.getNumberOfOperands()) { 5145 if (IsReducedValue) 5146 ReducedVals.push_back(TreeN); 5147 else { 5148 auto I = ExtraArgs.find(TreeN); 5149 if (I != ExtraArgs.end() && !I->second) { 5150 // Check if TreeN is an extra argument of its parent operation. 5151 if (Stack.size() <= 1) { 5152 // TreeN can't be an extra argument as it is a root reduction 5153 // operation. 5154 return false; 5155 } 5156 // Yes, TreeN is an extra argument, do not add it to a list of 5157 // reduction operations. 5158 // Stack[Stack.size() - 2] always points to the parent operation. 5159 markExtraArg(Stack[Stack.size() - 2], TreeN); 5160 ExtraArgs.erase(TreeN); 5161 } else 5162 ReductionData.addReductionOps(TreeN, ReductionOps); 5163 } 5164 // Retract. 5165 Stack.pop_back(); 5166 continue; 5167 } 5168 5169 // Visit left or right. 5170 Value *NextV = TreeN->getOperand(EdgeToVist); 5171 if (NextV != Phi) { 5172 auto *I = dyn_cast<Instruction>(NextV); 5173 OpData = getOperationData(I); 5174 // Continue analysis if the next operand is a reduction operation or 5175 // (possibly) a reduced value. If the reduced value opcode is not set, 5176 // the first met operation != reduction operation is considered as the 5177 // reduced value class. 5178 if (I && (!ReducedValueData || OpData == ReducedValueData || 5179 OpData == ReductionData)) { 5180 const bool IsReductionOperation = OpData == ReductionData; 5181 // Only handle trees in the current basic block. 5182 if (!ReductionData.hasSameParent(I, B->getParent(), 5183 IsReductionOperation)) { 5184 // I is an extra argument for TreeN (its parent operation). 5185 markExtraArg(Stack.back(), I); 5186 continue; 5187 } 5188 5189 // Each tree node needs to have minimal number of users except for the 5190 // ultimate reduction. 5191 if (!ReductionData.hasRequiredNumberOfUses(I, 5192 OpData == ReductionData) && 5193 I != B) { 5194 // I is an extra argument for TreeN (its parent operation). 5195 markExtraArg(Stack.back(), I); 5196 continue; 5197 } 5198 5199 if (IsReductionOperation) { 5200 // We need to be able to reassociate the reduction operations. 5201 if (!OpData.isAssociative(I)) { 5202 // I is an extra argument for TreeN (its parent operation). 5203 markExtraArg(Stack.back(), I); 5204 continue; 5205 } 5206 } else if (ReducedValueData && 5207 ReducedValueData != OpData) { 5208 // Make sure that the opcodes of the operations that we are going to 5209 // reduce match. 5210 // I is an extra argument for TreeN (its parent operation). 5211 markExtraArg(Stack.back(), I); 5212 continue; 5213 } else if (!ReducedValueData) 5214 ReducedValueData = OpData; 5215 5216 Stack.push_back(std::make_pair(I, OpData.getFirstOperandIndex())); 5217 continue; 5218 } 5219 } 5220 // NextV is an extra argument for TreeN (its parent operation). 5221 markExtraArg(Stack.back(), NextV); 5222 } 5223 return true; 5224 } 5225 5226 /// \brief Attempt to vectorize the tree found by 5227 /// matchAssociativeReduction. 5228 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 5229 if (ReducedVals.empty()) 5230 return false; 5231 5232 // If there is a sufficient number of reduction values, reduce 5233 // to a nearby power-of-2. Can safely generate oversized 5234 // vectors and rely on the backend to split them to legal sizes. 5235 unsigned NumReducedVals = ReducedVals.size(); 5236 if (NumReducedVals < 4) 5237 return false; 5238 5239 unsigned ReduxWidth = PowerOf2Floor(NumReducedVals); 5240 5241 Value *VectorizedTree = nullptr; 5242 IRBuilder<> Builder(ReductionRoot); 5243 FastMathFlags Unsafe; 5244 Unsafe.setFast(); 5245 Builder.setFastMathFlags(Unsafe); 5246 unsigned i = 0; 5247 5248 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 5249 // The same extra argument may be used several time, so log each attempt 5250 // to use it. 5251 for (auto &Pair : ExtraArgs) 5252 ExternallyUsedValues[Pair.second].push_back(Pair.first); 5253 SmallVector<Value *, 16> IgnoreList; 5254 for (auto &V : ReductionOps) 5255 IgnoreList.append(V.begin(), V.end()); 5256 while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) { 5257 auto VL = makeArrayRef(&ReducedVals[i], ReduxWidth); 5258 V.buildTree(VL, ExternallyUsedValues, IgnoreList); 5259 if (V.shouldReorder()) { 5260 SmallVector<Value *, 8> Reversed(VL.rbegin(), VL.rend()); 5261 V.buildTree(Reversed, ExternallyUsedValues, IgnoreList); 5262 } 5263 if (V.isTreeTinyAndNotFullyVectorizable()) 5264 break; 5265 5266 V.computeMinimumValueSizes(); 5267 5268 // Estimate cost. 5269 int Cost = 5270 V.getTreeCost() + getReductionCost(TTI, ReducedVals[i], ReduxWidth); 5271 if (Cost >= -SLPCostThreshold) 5272 break; 5273 5274 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost 5275 << ". (HorRdx)\n"); 5276 auto *I0 = cast<Instruction>(VL[0]); 5277 V.getORE()->emit( 5278 OptimizationRemark(SV_NAME, "VectorizedHorizontalReduction", I0) 5279 << "Vectorized horizontal reduction with cost " 5280 << ore::NV("Cost", Cost) << " and with tree size " 5281 << ore::NV("TreeSize", V.getTreeSize())); 5282 5283 // Vectorize a tree. 5284 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 5285 Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues); 5286 5287 // Emit a reduction. 5288 Value *ReducedSubTree = 5289 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); 5290 if (VectorizedTree) { 5291 Builder.SetCurrentDebugLocation(Loc); 5292 OperationData VectReductionData(ReductionData.getOpcode(), 5293 VectorizedTree, ReducedSubTree, 5294 ReductionData.getKind()); 5295 VectorizedTree = 5296 VectReductionData.createOp(Builder, "op.rdx", ReductionOps); 5297 } else 5298 VectorizedTree = ReducedSubTree; 5299 i += ReduxWidth; 5300 ReduxWidth = PowerOf2Floor(NumReducedVals - i); 5301 } 5302 5303 if (VectorizedTree) { 5304 // Finish the reduction. 5305 for (; i < NumReducedVals; ++i) { 5306 auto *I = cast<Instruction>(ReducedVals[i]); 5307 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 5308 OperationData VectReductionData(ReductionData.getOpcode(), 5309 VectorizedTree, I, 5310 ReductionData.getKind()); 5311 VectorizedTree = VectReductionData.createOp(Builder, "", ReductionOps); 5312 } 5313 for (auto &Pair : ExternallyUsedValues) { 5314 assert(!Pair.second.empty() && 5315 "At least one DebugLoc must be inserted"); 5316 // Add each externally used value to the final reduction. 5317 for (auto *I : Pair.second) { 5318 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 5319 OperationData VectReductionData(ReductionData.getOpcode(), 5320 VectorizedTree, Pair.first, 5321 ReductionData.getKind()); 5322 VectorizedTree = VectReductionData.createOp(Builder, "op.extra", I); 5323 } 5324 } 5325 // Update users. 5326 ReductionRoot->replaceAllUsesWith(VectorizedTree); 5327 } 5328 return VectorizedTree != nullptr; 5329 } 5330 5331 unsigned numReductionValues() const { 5332 return ReducedVals.size(); 5333 } 5334 5335 private: 5336 /// \brief Calculate the cost of a reduction. 5337 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal, 5338 unsigned ReduxWidth) { 5339 Type *ScalarTy = FirstReducedVal->getType(); 5340 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 5341 5342 int PairwiseRdxCost; 5343 int SplittingRdxCost; 5344 switch (ReductionData.getKind()) { 5345 case RK_Arithmetic: 5346 PairwiseRdxCost = 5347 TTI->getArithmeticReductionCost(ReductionData.getOpcode(), VecTy, 5348 /*IsPairwiseForm=*/true); 5349 SplittingRdxCost = 5350 TTI->getArithmeticReductionCost(ReductionData.getOpcode(), VecTy, 5351 /*IsPairwiseForm=*/false); 5352 break; 5353 case RK_Min: 5354 case RK_Max: 5355 case RK_UMin: 5356 case RK_UMax: { 5357 Type *VecCondTy = CmpInst::makeCmpResultType(VecTy); 5358 bool IsUnsigned = ReductionData.getKind() == RK_UMin || 5359 ReductionData.getKind() == RK_UMax; 5360 PairwiseRdxCost = 5361 TTI->getMinMaxReductionCost(VecTy, VecCondTy, 5362 /*IsPairwiseForm=*/true, IsUnsigned); 5363 SplittingRdxCost = 5364 TTI->getMinMaxReductionCost(VecTy, VecCondTy, 5365 /*IsPairwiseForm=*/false, IsUnsigned); 5366 break; 5367 } 5368 case RK_None: 5369 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 5370 } 5371 5372 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 5373 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 5374 5375 int ScalarReduxCost; 5376 switch (ReductionData.getKind()) { 5377 case RK_Arithmetic: 5378 ScalarReduxCost = 5379 TTI->getArithmeticInstrCost(ReductionData.getOpcode(), ScalarTy); 5380 break; 5381 case RK_Min: 5382 case RK_Max: 5383 case RK_UMin: 5384 case RK_UMax: 5385 ScalarReduxCost = 5386 TTI->getCmpSelInstrCost(ReductionData.getOpcode(), ScalarTy) + 5387 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 5388 CmpInst::makeCmpResultType(ScalarTy)); 5389 break; 5390 case RK_None: 5391 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 5392 } 5393 ScalarReduxCost *= (ReduxWidth - 1); 5394 5395 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 5396 << " for reduction that starts with " << *FirstReducedVal 5397 << " (It is a " 5398 << (IsPairwiseReduction ? "pairwise" : "splitting") 5399 << " reduction)\n"); 5400 5401 return VecReduxCost - ScalarReduxCost; 5402 } 5403 5404 /// \brief Emit a horizontal reduction of the vectorized value. 5405 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 5406 unsigned ReduxWidth, const TargetTransformInfo *TTI) { 5407 assert(VectorizedValue && "Need to have a vectorized tree node"); 5408 assert(isPowerOf2_32(ReduxWidth) && 5409 "We only handle power-of-two reductions for now"); 5410 5411 if (!IsPairwiseReduction) 5412 return createSimpleTargetReduction( 5413 Builder, TTI, ReductionData.getOpcode(), VectorizedValue, 5414 ReductionData.getFlags(), ReductionOps.back()); 5415 5416 Value *TmpVec = VectorizedValue; 5417 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 5418 Value *LeftMask = 5419 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 5420 Value *RightMask = 5421 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 5422 5423 Value *LeftShuf = Builder.CreateShuffleVector( 5424 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 5425 Value *RightShuf = Builder.CreateShuffleVector( 5426 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 5427 "rdx.shuf.r"); 5428 OperationData VectReductionData(ReductionData.getOpcode(), LeftShuf, 5429 RightShuf, ReductionData.getKind()); 5430 TmpVec = VectReductionData.createOp(Builder, "op.rdx", ReductionOps); 5431 } 5432 5433 // The result is in the first element of the vector. 5434 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 5435 } 5436 }; 5437 5438 } // end anonymous namespace 5439 5440 /// \brief Recognize construction of vectors like 5441 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 5442 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 5443 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 5444 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 5445 /// starting from the last insertelement instruction. 5446 /// 5447 /// Returns true if it matches 5448 static bool findBuildVector(InsertElementInst *LastInsertElem, 5449 SmallVectorImpl<Value *> &BuildVector, 5450 SmallVectorImpl<Value *> &BuildVectorOpds) { 5451 Value *V = nullptr; 5452 do { 5453 BuildVector.push_back(LastInsertElem); 5454 BuildVectorOpds.push_back(LastInsertElem->getOperand(1)); 5455 V = LastInsertElem->getOperand(0); 5456 if (isa<UndefValue>(V)) 5457 break; 5458 LastInsertElem = dyn_cast<InsertElementInst>(V); 5459 if (!LastInsertElem || !LastInsertElem->hasOneUse()) 5460 return false; 5461 } while (true); 5462 std::reverse(BuildVector.begin(), BuildVector.end()); 5463 std::reverse(BuildVectorOpds.begin(), BuildVectorOpds.end()); 5464 return true; 5465 } 5466 5467 /// \brief Like findBuildVector, but looks for construction of aggregate. 5468 /// 5469 /// \return true if it matches. 5470 static bool findBuildAggregate(InsertValueInst *IV, 5471 SmallVectorImpl<Value *> &BuildVector, 5472 SmallVectorImpl<Value *> &BuildVectorOpds) { 5473 Value *V; 5474 do { 5475 BuildVector.push_back(IV); 5476 BuildVectorOpds.push_back(IV->getInsertedValueOperand()); 5477 V = IV->getAggregateOperand(); 5478 if (isa<UndefValue>(V)) 5479 break; 5480 IV = dyn_cast<InsertValueInst>(V); 5481 if (!IV || !IV->hasOneUse()) 5482 return false; 5483 } while (true); 5484 std::reverse(BuildVector.begin(), BuildVector.end()); 5485 std::reverse(BuildVectorOpds.begin(), BuildVectorOpds.end()); 5486 return true; 5487 } 5488 5489 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 5490 return V->getType() < V2->getType(); 5491 } 5492 5493 /// \brief Try and get a reduction value from a phi node. 5494 /// 5495 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 5496 /// if they come from either \p ParentBB or a containing loop latch. 5497 /// 5498 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 5499 /// if not possible. 5500 static Value *getReductionValue(const DominatorTree *DT, PHINode *P, 5501 BasicBlock *ParentBB, LoopInfo *LI) { 5502 // There are situations where the reduction value is not dominated by the 5503 // reduction phi. Vectorizing such cases has been reported to cause 5504 // miscompiles. See PR25787. 5505 auto DominatedReduxValue = [&](Value *R) { 5506 return ( 5507 dyn_cast<Instruction>(R) && 5508 DT->dominates(P->getParent(), dyn_cast<Instruction>(R)->getParent())); 5509 }; 5510 5511 Value *Rdx = nullptr; 5512 5513 // Return the incoming value if it comes from the same BB as the phi node. 5514 if (P->getIncomingBlock(0) == ParentBB) { 5515 Rdx = P->getIncomingValue(0); 5516 } else if (P->getIncomingBlock(1) == ParentBB) { 5517 Rdx = P->getIncomingValue(1); 5518 } 5519 5520 if (Rdx && DominatedReduxValue(Rdx)) 5521 return Rdx; 5522 5523 // Otherwise, check whether we have a loop latch to look at. 5524 Loop *BBL = LI->getLoopFor(ParentBB); 5525 if (!BBL) 5526 return nullptr; 5527 BasicBlock *BBLatch = BBL->getLoopLatch(); 5528 if (!BBLatch) 5529 return nullptr; 5530 5531 // There is a loop latch, return the incoming value if it comes from 5532 // that. This reduction pattern occasionally turns up. 5533 if (P->getIncomingBlock(0) == BBLatch) { 5534 Rdx = P->getIncomingValue(0); 5535 } else if (P->getIncomingBlock(1) == BBLatch) { 5536 Rdx = P->getIncomingValue(1); 5537 } 5538 5539 if (Rdx && DominatedReduxValue(Rdx)) 5540 return Rdx; 5541 5542 return nullptr; 5543 } 5544 5545 /// Attempt to reduce a horizontal reduction. 5546 /// If it is legal to match a horizontal reduction feeding the phi node \a P 5547 /// with reduction operators \a Root (or one of its operands) in a basic block 5548 /// \a BB, then check if it can be done. If horizontal reduction is not found 5549 /// and root instruction is a binary operation, vectorization of the operands is 5550 /// attempted. 5551 /// \returns true if a horizontal reduction was matched and reduced or operands 5552 /// of one of the binary instruction were vectorized. 5553 /// \returns false if a horizontal reduction was not matched (or not possible) 5554 /// or no vectorization of any binary operation feeding \a Root instruction was 5555 /// performed. 5556 static bool tryToVectorizeHorReductionOrInstOperands( 5557 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, 5558 TargetTransformInfo *TTI, 5559 const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) { 5560 if (!ShouldVectorizeHor) 5561 return false; 5562 5563 if (!Root) 5564 return false; 5565 5566 if (Root->getParent() != BB || isa<PHINode>(Root)) 5567 return false; 5568 // Start analysis starting from Root instruction. If horizontal reduction is 5569 // found, try to vectorize it. If it is not a horizontal reduction or 5570 // vectorization is not possible or not effective, and currently analyzed 5571 // instruction is a binary operation, try to vectorize the operands, using 5572 // pre-order DFS traversal order. If the operands were not vectorized, repeat 5573 // the same procedure considering each operand as a possible root of the 5574 // horizontal reduction. 5575 // Interrupt the process if the Root instruction itself was vectorized or all 5576 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 5577 SmallVector<std::pair<WeakTrackingVH, unsigned>, 8> Stack(1, {Root, 0}); 5578 SmallSet<Value *, 8> VisitedInstrs; 5579 bool Res = false; 5580 while (!Stack.empty()) { 5581 Value *V; 5582 unsigned Level; 5583 std::tie(V, Level) = Stack.pop_back_val(); 5584 if (!V) 5585 continue; 5586 auto *Inst = dyn_cast<Instruction>(V); 5587 if (!Inst) 5588 continue; 5589 auto *BI = dyn_cast<BinaryOperator>(Inst); 5590 auto *SI = dyn_cast<SelectInst>(Inst); 5591 if (BI || SI) { 5592 HorizontalReduction HorRdx; 5593 if (HorRdx.matchAssociativeReduction(P, Inst)) { 5594 if (HorRdx.tryToReduce(R, TTI)) { 5595 Res = true; 5596 // Set P to nullptr to avoid re-analysis of phi node in 5597 // matchAssociativeReduction function unless this is the root node. 5598 P = nullptr; 5599 continue; 5600 } 5601 } 5602 if (P && BI) { 5603 Inst = dyn_cast<Instruction>(BI->getOperand(0)); 5604 if (Inst == P) 5605 Inst = dyn_cast<Instruction>(BI->getOperand(1)); 5606 if (!Inst) { 5607 // Set P to nullptr to avoid re-analysis of phi node in 5608 // matchAssociativeReduction function unless this is the root node. 5609 P = nullptr; 5610 continue; 5611 } 5612 } 5613 } 5614 // Set P to nullptr to avoid re-analysis of phi node in 5615 // matchAssociativeReduction function unless this is the root node. 5616 P = nullptr; 5617 if (Vectorize(Inst, R)) { 5618 Res = true; 5619 continue; 5620 } 5621 5622 // Try to vectorize operands. 5623 // Continue analysis for the instruction from the same basic block only to 5624 // save compile time. 5625 if (++Level < RecursionMaxDepth) 5626 for (auto *Op : Inst->operand_values()) 5627 if (VisitedInstrs.insert(Op).second) 5628 if (auto *I = dyn_cast<Instruction>(Op)) 5629 if (!isa<PHINode>(I) && I->getParent() == BB) 5630 Stack.emplace_back(Op, Level); 5631 } 5632 return Res; 5633 } 5634 5635 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V, 5636 BasicBlock *BB, BoUpSLP &R, 5637 TargetTransformInfo *TTI) { 5638 if (!V) 5639 return false; 5640 auto *I = dyn_cast<Instruction>(V); 5641 if (!I) 5642 return false; 5643 5644 if (!isa<BinaryOperator>(I)) 5645 P = nullptr; 5646 // Try to match and vectorize a horizontal reduction. 5647 auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool { 5648 return tryToVectorize(I, R); 5649 }; 5650 return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI, 5651 ExtraVectorization); 5652 } 5653 5654 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 5655 BasicBlock *BB, BoUpSLP &R) { 5656 const DataLayout &DL = BB->getModule()->getDataLayout(); 5657 if (!R.canMapToVector(IVI->getType(), DL)) 5658 return false; 5659 5660 SmallVector<Value *, 16> BuildVector; 5661 SmallVector<Value *, 16> BuildVectorOpds; 5662 if (!findBuildAggregate(IVI, BuildVector, BuildVectorOpds)) 5663 return false; 5664 5665 DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 5666 return tryToVectorizeList(BuildVectorOpds, R, BuildVector, false); 5667 } 5668 5669 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 5670 BasicBlock *BB, BoUpSLP &R) { 5671 SmallVector<Value *, 16> BuildVector; 5672 SmallVector<Value *, 16> BuildVectorOpds; 5673 if (!findBuildVector(IEI, BuildVector, BuildVectorOpds)) 5674 return false; 5675 5676 // Vectorize starting with the build vector operands ignoring the BuildVector 5677 // instructions for the purpose of scheduling and user extraction. 5678 return tryToVectorizeList(BuildVectorOpds, R, BuildVector); 5679 } 5680 5681 bool SLPVectorizerPass::vectorizeCmpInst(CmpInst *CI, BasicBlock *BB, 5682 BoUpSLP &R) { 5683 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) 5684 return true; 5685 5686 bool OpsChanged = false; 5687 for (int Idx = 0; Idx < 2; ++Idx) { 5688 OpsChanged |= 5689 vectorizeRootInstruction(nullptr, CI->getOperand(Idx), BB, R, TTI); 5690 } 5691 return OpsChanged; 5692 } 5693 5694 bool SLPVectorizerPass::vectorizeSimpleInstructions( 5695 SmallVectorImpl<WeakVH> &Instructions, BasicBlock *BB, BoUpSLP &R) { 5696 bool OpsChanged = false; 5697 for (auto &VH : reverse(Instructions)) { 5698 auto *I = dyn_cast_or_null<Instruction>(VH); 5699 if (!I) 5700 continue; 5701 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) 5702 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 5703 else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) 5704 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 5705 else if (auto *CI = dyn_cast<CmpInst>(I)) 5706 OpsChanged |= vectorizeCmpInst(CI, BB, R); 5707 } 5708 Instructions.clear(); 5709 return OpsChanged; 5710 } 5711 5712 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 5713 bool Changed = false; 5714 SmallVector<Value *, 4> Incoming; 5715 SmallSet<Value *, 16> VisitedInstrs; 5716 5717 bool HaveVectorizedPhiNodes = true; 5718 while (HaveVectorizedPhiNodes) { 5719 HaveVectorizedPhiNodes = false; 5720 5721 // Collect the incoming values from the PHIs. 5722 Incoming.clear(); 5723 for (Instruction &I : *BB) { 5724 PHINode *P = dyn_cast<PHINode>(&I); 5725 if (!P) 5726 break; 5727 5728 if (!VisitedInstrs.count(P)) 5729 Incoming.push_back(P); 5730 } 5731 5732 // Sort by type. 5733 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); 5734 5735 // Try to vectorize elements base on their type. 5736 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 5737 E = Incoming.end(); 5738 IncIt != E;) { 5739 5740 // Look for the next elements with the same type. 5741 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 5742 while (SameTypeIt != E && 5743 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 5744 VisitedInstrs.insert(*SameTypeIt); 5745 ++SameTypeIt; 5746 } 5747 5748 // Try to vectorize them. 5749 unsigned NumElts = (SameTypeIt - IncIt); 5750 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); 5751 // The order in which the phi nodes appear in the program does not matter. 5752 // So allow tryToVectorizeList to reorder them if it is beneficial. This 5753 // is done when there are exactly two elements since tryToVectorizeList 5754 // asserts that there are only two values when AllowReorder is true. 5755 bool AllowReorder = NumElts == 2; 5756 if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R, 5757 None, AllowReorder)) { 5758 // Success start over because instructions might have been changed. 5759 HaveVectorizedPhiNodes = true; 5760 Changed = true; 5761 break; 5762 } 5763 5764 // Start over at the next instruction of a different type (or the end). 5765 IncIt = SameTypeIt; 5766 } 5767 } 5768 5769 VisitedInstrs.clear(); 5770 5771 SmallVector<WeakVH, 8> PostProcessInstructions; 5772 SmallDenseSet<Instruction *, 4> KeyNodes; 5773 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 5774 // We may go through BB multiple times so skip the one we have checked. 5775 if (!VisitedInstrs.insert(&*it).second) { 5776 if (it->use_empty() && KeyNodes.count(&*it) > 0 && 5777 vectorizeSimpleInstructions(PostProcessInstructions, BB, R)) { 5778 // We would like to start over since some instructions are deleted 5779 // and the iterator may become invalid value. 5780 Changed = true; 5781 it = BB->begin(); 5782 e = BB->end(); 5783 } 5784 continue; 5785 } 5786 5787 if (isa<DbgInfoIntrinsic>(it)) 5788 continue; 5789 5790 // Try to vectorize reductions that use PHINodes. 5791 if (PHINode *P = dyn_cast<PHINode>(it)) { 5792 // Check that the PHI is a reduction PHI. 5793 if (P->getNumIncomingValues() != 2) 5794 return Changed; 5795 5796 // Try to match and vectorize a horizontal reduction. 5797 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R, 5798 TTI)) { 5799 Changed = true; 5800 it = BB->begin(); 5801 e = BB->end(); 5802 continue; 5803 } 5804 continue; 5805 } 5806 5807 // Ran into an instruction without users, like terminator, or function call 5808 // with ignored return value, store. Ignore unused instructions (basing on 5809 // instruction type, except for CallInst and InvokeInst). 5810 if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) || 5811 isa<InvokeInst>(it))) { 5812 KeyNodes.insert(&*it); 5813 bool OpsChanged = false; 5814 if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) { 5815 for (auto *V : it->operand_values()) { 5816 // Try to match and vectorize a horizontal reduction. 5817 OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI); 5818 } 5819 } 5820 // Start vectorization of post-process list of instructions from the 5821 // top-tree instructions to try to vectorize as many instructions as 5822 // possible. 5823 OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R); 5824 if (OpsChanged) { 5825 // We would like to start over since some instructions are deleted 5826 // and the iterator may become invalid value. 5827 Changed = true; 5828 it = BB->begin(); 5829 e = BB->end(); 5830 continue; 5831 } 5832 } 5833 5834 if (isa<InsertElementInst>(it) || isa<CmpInst>(it) || 5835 isa<InsertValueInst>(it)) 5836 PostProcessInstructions.push_back(&*it); 5837 5838 } 5839 5840 return Changed; 5841 } 5842 5843 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 5844 auto Changed = false; 5845 for (auto &Entry : GEPs) { 5846 // If the getelementptr list has fewer than two elements, there's nothing 5847 // to do. 5848 if (Entry.second.size() < 2) 5849 continue; 5850 5851 DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 5852 << Entry.second.size() << ".\n"); 5853 5854 // We process the getelementptr list in chunks of 16 (like we do for 5855 // stores) to minimize compile-time. 5856 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += 16) { 5857 auto Len = std::min<unsigned>(BE - BI, 16); 5858 auto GEPList = makeArrayRef(&Entry.second[BI], Len); 5859 5860 // Initialize a set a candidate getelementptrs. Note that we use a 5861 // SetVector here to preserve program order. If the index computations 5862 // are vectorizable and begin with loads, we want to minimize the chance 5863 // of having to reorder them later. 5864 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 5865 5866 // Some of the candidates may have already been vectorized after we 5867 // initially collected them. If so, the WeakTrackingVHs will have 5868 // nullified the 5869 // values, so remove them from the set of candidates. 5870 Candidates.remove(nullptr); 5871 5872 // Remove from the set of candidates all pairs of getelementptrs with 5873 // constant differences. Such getelementptrs are likely not good 5874 // candidates for vectorization in a bottom-up phase since one can be 5875 // computed from the other. We also ensure all candidate getelementptr 5876 // indices are unique. 5877 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 5878 auto *GEPI = cast<GetElementPtrInst>(GEPList[I]); 5879 if (!Candidates.count(GEPI)) 5880 continue; 5881 auto *SCEVI = SE->getSCEV(GEPList[I]); 5882 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 5883 auto *GEPJ = cast<GetElementPtrInst>(GEPList[J]); 5884 auto *SCEVJ = SE->getSCEV(GEPList[J]); 5885 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 5886 Candidates.remove(GEPList[I]); 5887 Candidates.remove(GEPList[J]); 5888 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 5889 Candidates.remove(GEPList[J]); 5890 } 5891 } 5892 } 5893 5894 // We break out of the above computation as soon as we know there are 5895 // fewer than two candidates remaining. 5896 if (Candidates.size() < 2) 5897 continue; 5898 5899 // Add the single, non-constant index of each candidate to the bundle. We 5900 // ensured the indices met these constraints when we originally collected 5901 // the getelementptrs. 5902 SmallVector<Value *, 16> Bundle(Candidates.size()); 5903 auto BundleIndex = 0u; 5904 for (auto *V : Candidates) { 5905 auto *GEP = cast<GetElementPtrInst>(V); 5906 auto *GEPIdx = GEP->idx_begin()->get(); 5907 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 5908 Bundle[BundleIndex++] = GEPIdx; 5909 } 5910 5911 // Try and vectorize the indices. We are currently only interested in 5912 // gather-like cases of the form: 5913 // 5914 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 5915 // 5916 // where the loads of "a", the loads of "b", and the subtractions can be 5917 // performed in parallel. It's likely that detecting this pattern in a 5918 // bottom-up phase will be simpler and less costly than building a 5919 // full-blown top-down phase beginning at the consecutive loads. 5920 Changed |= tryToVectorizeList(Bundle, R); 5921 } 5922 } 5923 return Changed; 5924 } 5925 5926 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 5927 bool Changed = false; 5928 // Attempt to sort and vectorize each of the store-groups. 5929 for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e; 5930 ++it) { 5931 if (it->second.size() < 2) 5932 continue; 5933 5934 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 5935 << it->second.size() << ".\n"); 5936 5937 // Process the stores in chunks of 16. 5938 // TODO: The limit of 16 inhibits greater vectorization factors. 5939 // For example, AVX2 supports v32i8. Increasing this limit, however, 5940 // may cause a significant compile-time increase. 5941 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 5942 unsigned Len = std::min<unsigned>(CE - CI, 16); 5943 Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len), R); 5944 } 5945 } 5946 return Changed; 5947 } 5948 5949 char SLPVectorizer::ID = 0; 5950 5951 static const char lv_name[] = "SLP Vectorizer"; 5952 5953 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 5954 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 5955 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 5956 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 5957 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 5958 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 5959 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 5960 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 5961 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 5962 5963 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); } 5964