1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 20 #include "llvm/ADT/ArrayRef.h" 21 #include "llvm/ADT/DenseMap.h" 22 #include "llvm/ADT/DenseSet.h" 23 #include "llvm/ADT/MapVector.h" 24 #include "llvm/ADT/None.h" 25 #include "llvm/ADT/Optional.h" 26 #include "llvm/ADT/PostOrderIterator.h" 27 #include "llvm/ADT/STLExtras.h" 28 #include "llvm/ADT/SetVector.h" 29 #include "llvm/ADT/SmallPtrSet.h" 30 #include "llvm/ADT/SmallSet.h" 31 #include "llvm/ADT/SmallVector.h" 32 #include "llvm/ADT/Statistic.h" 33 #include "llvm/ADT/iterator.h" 34 #include "llvm/ADT/iterator_range.h" 35 #include "llvm/Analysis/AliasAnalysis.h" 36 #include "llvm/Analysis/CodeMetrics.h" 37 #include "llvm/Analysis/DemandedBits.h" 38 #include "llvm/Analysis/GlobalsModRef.h" 39 #include "llvm/Analysis/LoopAccessAnalysis.h" 40 #include "llvm/Analysis/LoopInfo.h" 41 #include "llvm/Analysis/MemoryLocation.h" 42 #include "llvm/Analysis/OptimizationDiagnosticInfo.h" 43 #include "llvm/Analysis/ScalarEvolution.h" 44 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 45 #include "llvm/Analysis/TargetLibraryInfo.h" 46 #include "llvm/Analysis/TargetTransformInfo.h" 47 #include "llvm/Analysis/ValueTracking.h" 48 #include "llvm/Analysis/VectorUtils.h" 49 #include "llvm/IR/Attributes.h" 50 #include "llvm/IR/BasicBlock.h" 51 #include "llvm/IR/Constant.h" 52 #include "llvm/IR/Constants.h" 53 #include "llvm/IR/DataLayout.h" 54 #include "llvm/IR/DebugLoc.h" 55 #include "llvm/IR/DerivedTypes.h" 56 #include "llvm/IR/Dominators.h" 57 #include "llvm/IR/Function.h" 58 #include "llvm/IR/IRBuilder.h" 59 #include "llvm/IR/InstrTypes.h" 60 #include "llvm/IR/Instruction.h" 61 #include "llvm/IR/Instructions.h" 62 #include "llvm/IR/IntrinsicInst.h" 63 #include "llvm/IR/Intrinsics.h" 64 #include "llvm/IR/Module.h" 65 #include "llvm/IR/NoFolder.h" 66 #include "llvm/IR/Operator.h" 67 #include "llvm/IR/PassManager.h" 68 #include "llvm/IR/PatternMatch.h" 69 #include "llvm/IR/Type.h" 70 #include "llvm/IR/Use.h" 71 #include "llvm/IR/User.h" 72 #include "llvm/IR/Value.h" 73 #include "llvm/IR/ValueHandle.h" 74 #include "llvm/IR/Verifier.h" 75 #include "llvm/Pass.h" 76 #include "llvm/Support/Casting.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Compiler.h" 79 #include "llvm/Support/DOTGraphTraits.h" 80 #include "llvm/Support/Debug.h" 81 #include "llvm/Support/ErrorHandling.h" 82 #include "llvm/Support/GraphWriter.h" 83 #include "llvm/Support/KnownBits.h" 84 #include "llvm/Support/MathExtras.h" 85 #include "llvm/Support/raw_ostream.h" 86 #include "llvm/Transforms/Utils/LoopUtils.h" 87 #include "llvm/Transforms/Vectorize.h" 88 #include <algorithm> 89 #include <cassert> 90 #include <cstdint> 91 #include <iterator> 92 #include <memory> 93 #include <set> 94 #include <string> 95 #include <tuple> 96 #include <utility> 97 #include <vector> 98 99 using namespace llvm; 100 using namespace llvm::PatternMatch; 101 using namespace slpvectorizer; 102 103 #define SV_NAME "slp-vectorizer" 104 #define DEBUG_TYPE "SLP" 105 106 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 107 108 static cl::opt<int> 109 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 110 cl::desc("Only vectorize if you gain more than this " 111 "number ")); 112 113 static cl::opt<bool> 114 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 115 cl::desc("Attempt to vectorize horizontal reductions")); 116 117 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 118 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 119 cl::desc( 120 "Attempt to vectorize horizontal reductions feeding into a store")); 121 122 static cl::opt<int> 123 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 124 cl::desc("Attempt to vectorize for this register size in bits")); 125 126 /// Limits the size of scheduling regions in a block. 127 /// It avoid long compile times for _very_ large blocks where vector 128 /// instructions are spread over a wide range. 129 /// This limit is way higher than needed by real-world functions. 130 static cl::opt<int> 131 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 132 cl::desc("Limit the size of the SLP scheduling region per block")); 133 134 static cl::opt<int> MinVectorRegSizeOption( 135 "slp-min-reg-size", cl::init(128), cl::Hidden, 136 cl::desc("Attempt to vectorize for this register size in bits")); 137 138 static cl::opt<unsigned> RecursionMaxDepth( 139 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 140 cl::desc("Limit the recursion depth when building a vectorizable tree")); 141 142 static cl::opt<unsigned> MinTreeSize( 143 "slp-min-tree-size", cl::init(3), cl::Hidden, 144 cl::desc("Only vectorize small trees if they are fully vectorizable")); 145 146 static cl::opt<bool> 147 ViewSLPTree("view-slp-tree", cl::Hidden, 148 cl::desc("Display the SLP trees with Graphviz")); 149 150 // Limit the number of alias checks. The limit is chosen so that 151 // it has no negative effect on the llvm benchmarks. 152 static const unsigned AliasedCheckLimit = 10; 153 154 // Another limit for the alias checks: The maximum distance between load/store 155 // instructions where alias checks are done. 156 // This limit is useful for very large basic blocks. 157 static const unsigned MaxMemDepDistance = 160; 158 159 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 160 /// regions to be handled. 161 static const int MinScheduleRegionSize = 16; 162 163 /// \brief Predicate for the element types that the SLP vectorizer supports. 164 /// 165 /// The most important thing to filter here are types which are invalid in LLVM 166 /// vectors. We also filter target specific types which have absolutely no 167 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 168 /// avoids spending time checking the cost model and realizing that they will 169 /// be inevitably scalarized. 170 static bool isValidElementType(Type *Ty) { 171 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 172 !Ty->isPPC_FP128Ty(); 173 } 174 175 /// \returns true if all of the instructions in \p VL are in the same block or 176 /// false otherwise. 177 static bool allSameBlock(ArrayRef<Value *> VL) { 178 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 179 if (!I0) 180 return false; 181 BasicBlock *BB = I0->getParent(); 182 for (int i = 1, e = VL.size(); i < e; i++) { 183 Instruction *I = dyn_cast<Instruction>(VL[i]); 184 if (!I) 185 return false; 186 187 if (BB != I->getParent()) 188 return false; 189 } 190 return true; 191 } 192 193 /// \returns True if all of the values in \p VL are constants. 194 static bool allConstant(ArrayRef<Value *> VL) { 195 for (Value *i : VL) 196 if (!isa<Constant>(i)) 197 return false; 198 return true; 199 } 200 201 /// \returns True if all of the values in \p VL are identical. 202 static bool isSplat(ArrayRef<Value *> VL) { 203 for (unsigned i = 1, e = VL.size(); i < e; ++i) 204 if (VL[i] != VL[0]) 205 return false; 206 return true; 207 } 208 209 /// Checks if the vector of instructions can be represented as a shuffle, like: 210 /// %x0 = extractelement <4 x i8> %x, i32 0 211 /// %x3 = extractelement <4 x i8> %x, i32 3 212 /// %y1 = extractelement <4 x i8> %y, i32 1 213 /// %y2 = extractelement <4 x i8> %y, i32 2 214 /// %x0x0 = mul i8 %x0, %x0 215 /// %x3x3 = mul i8 %x3, %x3 216 /// %y1y1 = mul i8 %y1, %y1 217 /// %y2y2 = mul i8 %y2, %y2 218 /// %ins1 = insertelement <4 x i8> undef, i8 %x0x0, i32 0 219 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 220 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 221 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 222 /// ret <4 x i8> %ins4 223 /// can be transformed into: 224 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 225 /// i32 6> 226 /// %2 = mul <4 x i8> %1, %1 227 /// ret <4 x i8> %2 228 /// We convert this initially to something like: 229 /// %x0 = extractelement <4 x i8> %x, i32 0 230 /// %x3 = extractelement <4 x i8> %x, i32 3 231 /// %y1 = extractelement <4 x i8> %y, i32 1 232 /// %y2 = extractelement <4 x i8> %y, i32 2 233 /// %1 = insertelement <4 x i8> undef, i8 %x0, i32 0 234 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1 235 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2 236 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3 237 /// %5 = mul <4 x i8> %4, %4 238 /// %6 = extractelement <4 x i8> %5, i32 0 239 /// %ins1 = insertelement <4 x i8> undef, i8 %6, i32 0 240 /// %7 = extractelement <4 x i8> %5, i32 1 241 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1 242 /// %8 = extractelement <4 x i8> %5, i32 2 243 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2 244 /// %9 = extractelement <4 x i8> %5, i32 3 245 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3 246 /// ret <4 x i8> %ins4 247 /// InstCombiner transforms this into a shuffle and vector mul 248 static Optional<TargetTransformInfo::ShuffleKind> 249 isShuffle(ArrayRef<Value *> VL) { 250 auto *EI0 = cast<ExtractElementInst>(VL[0]); 251 unsigned Size = EI0->getVectorOperandType()->getVectorNumElements(); 252 Value *Vec1 = nullptr; 253 Value *Vec2 = nullptr; 254 enum ShuffleMode {Unknown, FirstAlternate, SecondAlternate, Permute}; 255 ShuffleMode CommonShuffleMode = Unknown; 256 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 257 auto *EI = cast<ExtractElementInst>(VL[I]); 258 auto *Vec = EI->getVectorOperand(); 259 // All vector operands must have the same number of vector elements. 260 if (Vec->getType()->getVectorNumElements() != Size) 261 return None; 262 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 263 if (!Idx) 264 return None; 265 // Undefined behavior if Idx is negative or >= Size. 266 if (Idx->getValue().uge(Size)) 267 continue; 268 unsigned IntIdx = Idx->getValue().getZExtValue(); 269 // We can extractelement from undef vector. 270 if (isa<UndefValue>(Vec)) 271 continue; 272 // For correct shuffling we have to have at most 2 different vector operands 273 // in all extractelement instructions. 274 if (Vec1 && Vec2 && Vec != Vec1 && Vec != Vec2) 275 return None; 276 if (CommonShuffleMode == Permute) 277 continue; 278 // If the extract index is not the same as the operation number, it is a 279 // permutation. 280 if (IntIdx != I) { 281 CommonShuffleMode = Permute; 282 continue; 283 } 284 // Check the shuffle mode for the current operation. 285 if (!Vec1) 286 Vec1 = Vec; 287 else if (Vec != Vec1) 288 Vec2 = Vec; 289 // Example: shufflevector A, B, <0,5,2,7> 290 // I is odd and IntIdx for A == I - FirstAlternate shuffle. 291 // I is even and IntIdx for B == I - FirstAlternate shuffle. 292 // Example: shufflevector A, B, <4,1,6,3> 293 // I is even and IntIdx for A == I - SecondAlternate shuffle. 294 // I is odd and IntIdx for B == I - SecondAlternate shuffle. 295 const bool IIsEven = I & 1; 296 const bool CurrVecIsA = Vec == Vec1; 297 const bool IIsOdd = !IIsEven; 298 const bool CurrVecIsB = !CurrVecIsA; 299 ShuffleMode CurrentShuffleMode = 300 ((IIsOdd && CurrVecIsA) || (IIsEven && CurrVecIsB)) ? FirstAlternate 301 : SecondAlternate; 302 // Common mode is not set or the same as the shuffle mode of the current 303 // operation - alternate. 304 if (CommonShuffleMode == Unknown) 305 CommonShuffleMode = CurrentShuffleMode; 306 // Common shuffle mode is not the same as the shuffle mode of the current 307 // operation - permutation. 308 if (CommonShuffleMode != CurrentShuffleMode) 309 CommonShuffleMode = Permute; 310 } 311 // If we're not crossing lanes in different vectors, consider it as blending. 312 if ((CommonShuffleMode == FirstAlternate || 313 CommonShuffleMode == SecondAlternate) && 314 Vec2) 315 return TargetTransformInfo::SK_Alternate; 316 // If Vec2 was never used, we have a permutation of a single vector, otherwise 317 // we have permutation of 2 vectors. 318 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 319 : TargetTransformInfo::SK_PermuteSingleSrc; 320 } 321 322 ///\returns Opcode that can be clubbed with \p Op to create an alternate 323 /// sequence which can later be merged as a ShuffleVector instruction. 324 static unsigned getAltOpcode(unsigned Op) { 325 switch (Op) { 326 case Instruction::FAdd: 327 return Instruction::FSub; 328 case Instruction::FSub: 329 return Instruction::FAdd; 330 case Instruction::Add: 331 return Instruction::Sub; 332 case Instruction::Sub: 333 return Instruction::Add; 334 default: 335 return 0; 336 } 337 } 338 339 static bool isOdd(unsigned Value) { 340 return Value & 1; 341 } 342 343 static bool sameOpcodeOrAlt(unsigned Opcode, unsigned AltOpcode, 344 unsigned CheckedOpcode) { 345 return Opcode == CheckedOpcode || AltOpcode == CheckedOpcode; 346 } 347 348 /// Chooses the correct key for scheduling data. If \p Op has the same (or 349 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 350 /// OpValue. 351 static Value *isOneOf(Value *OpValue, Value *Op) { 352 auto *I = dyn_cast<Instruction>(Op); 353 if (!I) 354 return OpValue; 355 auto *OpInst = cast<Instruction>(OpValue); 356 unsigned OpInstOpcode = OpInst->getOpcode(); 357 unsigned IOpcode = I->getOpcode(); 358 if (sameOpcodeOrAlt(OpInstOpcode, getAltOpcode(OpInstOpcode), IOpcode)) 359 return Op; 360 return OpValue; 361 } 362 363 namespace { 364 /// Contains data for the instructions going to be vectorized. 365 struct RawInstructionsData { 366 /// Main Opcode of the instructions going to be vectorized. 367 unsigned Opcode = 0; 368 /// The list of instructions have some instructions with alternate opcodes. 369 bool HasAltOpcodes = false; 370 }; 371 } // namespace 372 373 /// Checks the list of the vectorized instructions \p VL and returns info about 374 /// this list. 375 static RawInstructionsData getMainOpcode(ArrayRef<Value *> VL) { 376 auto *I0 = dyn_cast<Instruction>(VL[0]); 377 if (!I0) 378 return {}; 379 RawInstructionsData Res; 380 unsigned Opcode = I0->getOpcode(); 381 // Walk through the list of the vectorized instructions 382 // in order to check its structure described by RawInstructionsData. 383 for (unsigned Cnt = 0, E = VL.size(); Cnt != E; ++Cnt) { 384 auto *I = dyn_cast<Instruction>(VL[Cnt]); 385 if (!I) 386 return {}; 387 if (Opcode != I->getOpcode()) 388 Res.HasAltOpcodes = true; 389 } 390 Res.Opcode = Opcode; 391 return Res; 392 } 393 394 namespace { 395 /// Main data required for vectorization of instructions. 396 struct InstructionsState { 397 /// The very first instruction in the list with the main opcode. 398 Value *OpValue = nullptr; 399 /// The main opcode for the list of instructions. 400 unsigned Opcode = 0; 401 /// Some of the instructions in the list have alternate opcodes. 402 bool IsAltShuffle = false; 403 InstructionsState() = default; 404 InstructionsState(Value *OpValue, unsigned Opcode, bool IsAltShuffle) 405 : OpValue(OpValue), Opcode(Opcode), IsAltShuffle(IsAltShuffle) {} 406 }; 407 } // namespace 408 409 /// \returns analysis of the Instructions in \p VL described in 410 /// InstructionsState, the Opcode that we suppose the whole list 411 /// could be vectorized even if its structure is diverse. 412 static InstructionsState getSameOpcode(ArrayRef<Value *> VL) { 413 auto Res = getMainOpcode(VL); 414 unsigned Opcode = Res.Opcode; 415 if (!Res.HasAltOpcodes) 416 return InstructionsState(VL[0], Opcode, false); 417 auto *OpInst = cast<Instruction>(VL[0]); 418 unsigned AltOpcode = getAltOpcode(Opcode); 419 // Examine each element in the list instructions VL to determine 420 // if some operations there could be considered as an alternative 421 // (for example as subtraction relates to addition operation). 422 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { 423 auto *I = cast<Instruction>(VL[Cnt]); 424 unsigned InstOpcode = I->getOpcode(); 425 if ((Res.HasAltOpcodes && 426 InstOpcode != (isOdd(Cnt) ? AltOpcode : Opcode)) || 427 (!Res.HasAltOpcodes && InstOpcode != Opcode)) { 428 return InstructionsState(OpInst, 0, false); 429 } 430 } 431 return InstructionsState(OpInst, Opcode, Res.HasAltOpcodes); 432 } 433 434 /// \returns true if all of the values in \p VL have the same type or false 435 /// otherwise. 436 static bool allSameType(ArrayRef<Value *> VL) { 437 Type *Ty = VL[0]->getType(); 438 for (int i = 1, e = VL.size(); i < e; i++) 439 if (VL[i]->getType() != Ty) 440 return false; 441 442 return true; 443 } 444 445 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 446 static bool matchExtractIndex(Instruction *E, unsigned Idx, unsigned Opcode) { 447 assert(Opcode == Instruction::ExtractElement || 448 Opcode == Instruction::ExtractValue); 449 if (Opcode == Instruction::ExtractElement) { 450 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 451 return CI && CI->getZExtValue() == Idx; 452 } else { 453 ExtractValueInst *EI = cast<ExtractValueInst>(E); 454 return EI->getNumIndices() == 1 && *EI->idx_begin() == Idx; 455 } 456 } 457 458 /// \returns True if in-tree use also needs extract. This refers to 459 /// possible scalar operand in vectorized instruction. 460 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 461 TargetLibraryInfo *TLI) { 462 unsigned Opcode = UserInst->getOpcode(); 463 switch (Opcode) { 464 case Instruction::Load: { 465 LoadInst *LI = cast<LoadInst>(UserInst); 466 return (LI->getPointerOperand() == Scalar); 467 } 468 case Instruction::Store: { 469 StoreInst *SI = cast<StoreInst>(UserInst); 470 return (SI->getPointerOperand() == Scalar); 471 } 472 case Instruction::Call: { 473 CallInst *CI = cast<CallInst>(UserInst); 474 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 475 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 476 return (CI->getArgOperand(1) == Scalar); 477 } 478 LLVM_FALLTHROUGH; 479 } 480 default: 481 return false; 482 } 483 } 484 485 /// \returns the AA location that is being access by the instruction. 486 static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) { 487 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 488 return MemoryLocation::get(SI); 489 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 490 return MemoryLocation::get(LI); 491 return MemoryLocation(); 492 } 493 494 /// \returns True if the instruction is not a volatile or atomic load/store. 495 static bool isSimple(Instruction *I) { 496 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 497 return LI->isSimple(); 498 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 499 return SI->isSimple(); 500 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 501 return !MI->isVolatile(); 502 return true; 503 } 504 505 namespace llvm { 506 507 namespace slpvectorizer { 508 509 /// Bottom Up SLP Vectorizer. 510 class BoUpSLP { 511 public: 512 using ValueList = SmallVector<Value *, 8>; 513 using InstrList = SmallVector<Instruction *, 16>; 514 using ValueSet = SmallPtrSet<Value *, 16>; 515 using StoreList = SmallVector<StoreInst *, 8>; 516 using ExtraValueToDebugLocsMap = 517 MapVector<Value *, SmallVector<Instruction *, 2>>; 518 519 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 520 TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li, 521 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 522 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 523 : F(Func), SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), 524 DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { 525 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 526 // Use the vector register size specified by the target unless overridden 527 // by a command-line option. 528 // TODO: It would be better to limit the vectorization factor based on 529 // data type rather than just register size. For example, x86 AVX has 530 // 256-bit registers, but it does not support integer operations 531 // at that width (that requires AVX2). 532 if (MaxVectorRegSizeOption.getNumOccurrences()) 533 MaxVecRegSize = MaxVectorRegSizeOption; 534 else 535 MaxVecRegSize = TTI->getRegisterBitWidth(true); 536 537 if (MinVectorRegSizeOption.getNumOccurrences()) 538 MinVecRegSize = MinVectorRegSizeOption; 539 else 540 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 541 } 542 543 /// \brief Vectorize the tree that starts with the elements in \p VL. 544 /// Returns the vectorized root. 545 Value *vectorizeTree(); 546 547 /// Vectorize the tree but with the list of externally used values \p 548 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 549 /// generated extractvalue instructions. 550 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues); 551 552 /// \returns the cost incurred by unwanted spills and fills, caused by 553 /// holding live values over call sites. 554 int getSpillCost(); 555 556 /// \returns the vectorization cost of the subtree that starts at \p VL. 557 /// A negative number means that this is profitable. 558 int getTreeCost(); 559 560 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 561 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 562 void buildTree(ArrayRef<Value *> Roots, 563 ArrayRef<Value *> UserIgnoreLst = None); 564 565 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 566 /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking 567 /// into account (anf updating it, if required) list of externally used 568 /// values stored in \p ExternallyUsedValues. 569 void buildTree(ArrayRef<Value *> Roots, 570 ExtraValueToDebugLocsMap &ExternallyUsedValues, 571 ArrayRef<Value *> UserIgnoreLst = None); 572 573 /// Clear the internal data structures that are created by 'buildTree'. 574 void deleteTree() { 575 VectorizableTree.clear(); 576 ScalarToTreeEntry.clear(); 577 MustGather.clear(); 578 ExternalUses.clear(); 579 NumLoadsWantToKeepOrder = 0; 580 NumLoadsWantToChangeOrder = 0; 581 for (auto &Iter : BlocksSchedules) { 582 BlockScheduling *BS = Iter.second.get(); 583 BS->clear(); 584 } 585 MinBWs.clear(); 586 } 587 588 unsigned getTreeSize() const { return VectorizableTree.size(); } 589 590 /// \brief Perform LICM and CSE on the newly generated gather sequences. 591 void optimizeGatherSequence(); 592 593 /// \returns true if it is beneficial to reverse the vector order. 594 bool shouldReorder() const { 595 return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder; 596 } 597 598 /// \return The vector element size in bits to use when vectorizing the 599 /// expression tree ending at \p V. If V is a store, the size is the width of 600 /// the stored value. Otherwise, the size is the width of the largest loaded 601 /// value reaching V. This method is used by the vectorizer to calculate 602 /// vectorization factors. 603 unsigned getVectorElementSize(Value *V); 604 605 /// Compute the minimum type sizes required to represent the entries in a 606 /// vectorizable tree. 607 void computeMinimumValueSizes(); 608 609 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 610 unsigned getMaxVecRegSize() const { 611 return MaxVecRegSize; 612 } 613 614 // \returns minimum vector register size as set by cl::opt. 615 unsigned getMinVecRegSize() const { 616 return MinVecRegSize; 617 } 618 619 /// \brief Check if ArrayType or StructType is isomorphic to some VectorType. 620 /// 621 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 622 unsigned canMapToVector(Type *T, const DataLayout &DL) const; 623 624 /// \returns True if the VectorizableTree is both tiny and not fully 625 /// vectorizable. We do not vectorize such trees. 626 bool isTreeTinyAndNotFullyVectorizable(); 627 628 OptimizationRemarkEmitter *getORE() { return ORE; } 629 630 private: 631 struct TreeEntry; 632 633 /// Checks if all users of \p I are the part of the vectorization tree. 634 bool areAllUsersVectorized(Instruction *I) const; 635 636 /// \returns the cost of the vectorizable entry. 637 int getEntryCost(TreeEntry *E); 638 639 /// This is the recursive part of buildTree. 640 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, int); 641 642 /// \returns True if the ExtractElement/ExtractValue instructions in VL can 643 /// be vectorized to use the original vector (or aggregate "bitcast" to a vector). 644 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue) const; 645 646 /// Vectorize a single entry in the tree. 647 Value *vectorizeTree(TreeEntry *E); 648 649 /// Vectorize a single entry in the tree, starting in \p VL. 650 Value *vectorizeTree(ArrayRef<Value *> VL); 651 652 /// \returns the pointer to the vectorized value if \p VL is already 653 /// vectorized, or NULL. They may happen in cycles. 654 Value *alreadyVectorized(ArrayRef<Value *> VL, Value *OpValue) const; 655 656 /// \returns the scalarization cost for this type. Scalarization in this 657 /// context means the creation of vectors from a group of scalars. 658 int getGatherCost(Type *Ty); 659 660 /// \returns the scalarization cost for this list of values. Assuming that 661 /// this subtree gets vectorized, we may need to extract the values from the 662 /// roots. This method calculates the cost of extracting the values. 663 int getGatherCost(ArrayRef<Value *> VL); 664 665 /// \brief Set the Builder insert point to one after the last instruction in 666 /// the bundle 667 void setInsertPointAfterBundle(ArrayRef<Value *> VL, Value *OpValue); 668 669 /// \returns a vector from a collection of scalars in \p VL. 670 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 671 672 /// \returns whether the VectorizableTree is fully vectorizable and will 673 /// be beneficial even the tree height is tiny. 674 bool isFullyVectorizableTinyTree(); 675 676 /// \reorder commutative operands in alt shuffle if they result in 677 /// vectorized code. 678 void reorderAltShuffleOperands(unsigned Opcode, ArrayRef<Value *> VL, 679 SmallVectorImpl<Value *> &Left, 680 SmallVectorImpl<Value *> &Right); 681 682 /// \reorder commutative operands to get better probability of 683 /// generating vectorized code. 684 void reorderInputsAccordingToOpcode(unsigned Opcode, ArrayRef<Value *> VL, 685 SmallVectorImpl<Value *> &Left, 686 SmallVectorImpl<Value *> &Right); 687 struct TreeEntry { 688 TreeEntry(std::vector<TreeEntry> &Container) : Container(Container) {} 689 690 /// \returns true if the scalars in VL are equal to this entry. 691 bool isSame(ArrayRef<Value *> VL) const { 692 assert(VL.size() == Scalars.size() && "Invalid size"); 693 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 694 } 695 696 /// A vector of scalars. 697 ValueList Scalars; 698 699 /// The Scalars are vectorized into this value. It is initialized to Null. 700 Value *VectorizedValue = nullptr; 701 702 /// Do we need to gather this sequence ? 703 bool NeedToGather = false; 704 705 /// Points back to the VectorizableTree. 706 /// 707 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 708 /// to be a pointer and needs to be able to initialize the child iterator. 709 /// Thus we need a reference back to the container to translate the indices 710 /// to entries. 711 std::vector<TreeEntry> &Container; 712 713 /// The TreeEntry index containing the user of this entry. We can actually 714 /// have multiple users so the data structure is not truly a tree. 715 SmallVector<int, 1> UserTreeIndices; 716 }; 717 718 /// Create a new VectorizableTree entry. 719 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized, 720 int &UserTreeIdx) { 721 VectorizableTree.emplace_back(VectorizableTree); 722 int idx = VectorizableTree.size() - 1; 723 TreeEntry *Last = &VectorizableTree[idx]; 724 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 725 Last->NeedToGather = !Vectorized; 726 if (Vectorized) { 727 for (int i = 0, e = VL.size(); i != e; ++i) { 728 assert(!getTreeEntry(VL[i]) && "Scalar already in tree!"); 729 ScalarToTreeEntry[VL[i]] = idx; 730 } 731 } else { 732 MustGather.insert(VL.begin(), VL.end()); 733 } 734 735 if (UserTreeIdx >= 0) 736 Last->UserTreeIndices.push_back(UserTreeIdx); 737 UserTreeIdx = idx; 738 return Last; 739 } 740 741 /// -- Vectorization State -- 742 /// Holds all of the tree entries. 743 std::vector<TreeEntry> VectorizableTree; 744 745 TreeEntry *getTreeEntry(Value *V) { 746 auto I = ScalarToTreeEntry.find(V); 747 if (I != ScalarToTreeEntry.end()) 748 return &VectorizableTree[I->second]; 749 return nullptr; 750 } 751 752 const TreeEntry *getTreeEntry(Value *V) const { 753 auto I = ScalarToTreeEntry.find(V); 754 if (I != ScalarToTreeEntry.end()) 755 return &VectorizableTree[I->second]; 756 return nullptr; 757 } 758 759 /// Maps a specific scalar to its tree entry. 760 SmallDenseMap<Value*, int> ScalarToTreeEntry; 761 762 /// A list of scalars that we found that we need to keep as scalars. 763 ValueSet MustGather; 764 765 /// This POD struct describes one external user in the vectorized tree. 766 struct ExternalUser { 767 ExternalUser(Value *S, llvm::User *U, int L) 768 : Scalar(S), User(U), Lane(L) {} 769 770 // Which scalar in our function. 771 Value *Scalar; 772 773 // Which user that uses the scalar. 774 llvm::User *User; 775 776 // Which lane does the scalar belong to. 777 int Lane; 778 }; 779 using UserList = SmallVector<ExternalUser, 16>; 780 781 /// Checks if two instructions may access the same memory. 782 /// 783 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 784 /// is invariant in the calling loop. 785 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 786 Instruction *Inst2) { 787 // First check if the result is already in the cache. 788 AliasCacheKey key = std::make_pair(Inst1, Inst2); 789 Optional<bool> &result = AliasCache[key]; 790 if (result.hasValue()) { 791 return result.getValue(); 792 } 793 MemoryLocation Loc2 = getLocation(Inst2, AA); 794 bool aliased = true; 795 if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) { 796 // Do the alias check. 797 aliased = AA->alias(Loc1, Loc2); 798 } 799 // Store the result in the cache. 800 result = aliased; 801 return aliased; 802 } 803 804 using AliasCacheKey = std::pair<Instruction *, Instruction *>; 805 806 /// Cache for alias results. 807 /// TODO: consider moving this to the AliasAnalysis itself. 808 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 809 810 /// Removes an instruction from its block and eventually deletes it. 811 /// It's like Instruction::eraseFromParent() except that the actual deletion 812 /// is delayed until BoUpSLP is destructed. 813 /// This is required to ensure that there are no incorrect collisions in the 814 /// AliasCache, which can happen if a new instruction is allocated at the 815 /// same address as a previously deleted instruction. 816 void eraseInstruction(Instruction *I) { 817 I->removeFromParent(); 818 I->dropAllReferences(); 819 DeletedInstructions.emplace_back(I); 820 } 821 822 /// Temporary store for deleted instructions. Instructions will be deleted 823 /// eventually when the BoUpSLP is destructed. 824 SmallVector<unique_value, 8> DeletedInstructions; 825 826 /// A list of values that need to extracted out of the tree. 827 /// This list holds pairs of (Internal Scalar : External User). External User 828 /// can be nullptr, it means that this Internal Scalar will be used later, 829 /// after vectorization. 830 UserList ExternalUses; 831 832 /// Values used only by @llvm.assume calls. 833 SmallPtrSet<const Value *, 32> EphValues; 834 835 /// Holds all of the instructions that we gathered. 836 SetVector<Instruction *> GatherSeq; 837 838 /// A list of blocks that we are going to CSE. 839 SetVector<BasicBlock *> CSEBlocks; 840 841 /// Contains all scheduling relevant data for an instruction. 842 /// A ScheduleData either represents a single instruction or a member of an 843 /// instruction bundle (= a group of instructions which is combined into a 844 /// vector instruction). 845 struct ScheduleData { 846 // The initial value for the dependency counters. It means that the 847 // dependencies are not calculated yet. 848 enum { InvalidDeps = -1 }; 849 850 ScheduleData() = default; 851 852 void init(int BlockSchedulingRegionID, Value *OpVal) { 853 FirstInBundle = this; 854 NextInBundle = nullptr; 855 NextLoadStore = nullptr; 856 IsScheduled = false; 857 SchedulingRegionID = BlockSchedulingRegionID; 858 UnscheduledDepsInBundle = UnscheduledDeps; 859 clearDependencies(); 860 OpValue = OpVal; 861 } 862 863 /// Returns true if the dependency information has been calculated. 864 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 865 866 /// Returns true for single instructions and for bundle representatives 867 /// (= the head of a bundle). 868 bool isSchedulingEntity() const { return FirstInBundle == this; } 869 870 /// Returns true if it represents an instruction bundle and not only a 871 /// single instruction. 872 bool isPartOfBundle() const { 873 return NextInBundle != nullptr || FirstInBundle != this; 874 } 875 876 /// Returns true if it is ready for scheduling, i.e. it has no more 877 /// unscheduled depending instructions/bundles. 878 bool isReady() const { 879 assert(isSchedulingEntity() && 880 "can't consider non-scheduling entity for ready list"); 881 return UnscheduledDepsInBundle == 0 && !IsScheduled; 882 } 883 884 /// Modifies the number of unscheduled dependencies, also updating it for 885 /// the whole bundle. 886 int incrementUnscheduledDeps(int Incr) { 887 UnscheduledDeps += Incr; 888 return FirstInBundle->UnscheduledDepsInBundle += Incr; 889 } 890 891 /// Sets the number of unscheduled dependencies to the number of 892 /// dependencies. 893 void resetUnscheduledDeps() { 894 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 895 } 896 897 /// Clears all dependency information. 898 void clearDependencies() { 899 Dependencies = InvalidDeps; 900 resetUnscheduledDeps(); 901 MemoryDependencies.clear(); 902 } 903 904 void dump(raw_ostream &os) const { 905 if (!isSchedulingEntity()) { 906 os << "/ " << *Inst; 907 } else if (NextInBundle) { 908 os << '[' << *Inst; 909 ScheduleData *SD = NextInBundle; 910 while (SD) { 911 os << ';' << *SD->Inst; 912 SD = SD->NextInBundle; 913 } 914 os << ']'; 915 } else { 916 os << *Inst; 917 } 918 } 919 920 Instruction *Inst = nullptr; 921 922 /// Points to the head in an instruction bundle (and always to this for 923 /// single instructions). 924 ScheduleData *FirstInBundle = nullptr; 925 926 /// Single linked list of all instructions in a bundle. Null if it is a 927 /// single instruction. 928 ScheduleData *NextInBundle = nullptr; 929 930 /// Single linked list of all memory instructions (e.g. load, store, call) 931 /// in the block - until the end of the scheduling region. 932 ScheduleData *NextLoadStore = nullptr; 933 934 /// The dependent memory instructions. 935 /// This list is derived on demand in calculateDependencies(). 936 SmallVector<ScheduleData *, 4> MemoryDependencies; 937 938 /// This ScheduleData is in the current scheduling region if this matches 939 /// the current SchedulingRegionID of BlockScheduling. 940 int SchedulingRegionID = 0; 941 942 /// Used for getting a "good" final ordering of instructions. 943 int SchedulingPriority = 0; 944 945 /// The number of dependencies. Constitutes of the number of users of the 946 /// instruction plus the number of dependent memory instructions (if any). 947 /// This value is calculated on demand. 948 /// If InvalidDeps, the number of dependencies is not calculated yet. 949 int Dependencies = InvalidDeps; 950 951 /// The number of dependencies minus the number of dependencies of scheduled 952 /// instructions. As soon as this is zero, the instruction/bundle gets ready 953 /// for scheduling. 954 /// Note that this is negative as long as Dependencies is not calculated. 955 int UnscheduledDeps = InvalidDeps; 956 957 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 958 /// single instructions. 959 int UnscheduledDepsInBundle = InvalidDeps; 960 961 /// True if this instruction is scheduled (or considered as scheduled in the 962 /// dry-run). 963 bool IsScheduled = false; 964 965 /// Opcode of the current instruction in the schedule data. 966 Value *OpValue = nullptr; 967 }; 968 969 #ifndef NDEBUG 970 friend inline raw_ostream &operator<<(raw_ostream &os, 971 const BoUpSLP::ScheduleData &SD) { 972 SD.dump(os); 973 return os; 974 } 975 #endif 976 friend struct GraphTraits<BoUpSLP *>; 977 friend struct DOTGraphTraits<BoUpSLP *>; 978 979 /// Contains all scheduling data for a basic block. 980 struct BlockScheduling { 981 BlockScheduling(BasicBlock *BB) 982 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} 983 984 void clear() { 985 ReadyInsts.clear(); 986 ScheduleStart = nullptr; 987 ScheduleEnd = nullptr; 988 FirstLoadStoreInRegion = nullptr; 989 LastLoadStoreInRegion = nullptr; 990 991 // Reduce the maximum schedule region size by the size of the 992 // previous scheduling run. 993 ScheduleRegionSizeLimit -= ScheduleRegionSize; 994 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 995 ScheduleRegionSizeLimit = MinScheduleRegionSize; 996 ScheduleRegionSize = 0; 997 998 // Make a new scheduling region, i.e. all existing ScheduleData is not 999 // in the new region yet. 1000 ++SchedulingRegionID; 1001 } 1002 1003 ScheduleData *getScheduleData(Value *V) { 1004 ScheduleData *SD = ScheduleDataMap[V]; 1005 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 1006 return SD; 1007 return nullptr; 1008 } 1009 1010 ScheduleData *getScheduleData(Value *V, Value *Key) { 1011 if (V == Key) 1012 return getScheduleData(V); 1013 auto I = ExtraScheduleDataMap.find(V); 1014 if (I != ExtraScheduleDataMap.end()) { 1015 ScheduleData *SD = I->second[Key]; 1016 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 1017 return SD; 1018 } 1019 return nullptr; 1020 } 1021 1022 bool isInSchedulingRegion(ScheduleData *SD) { 1023 return SD->SchedulingRegionID == SchedulingRegionID; 1024 } 1025 1026 /// Marks an instruction as scheduled and puts all dependent ready 1027 /// instructions into the ready-list. 1028 template <typename ReadyListType> 1029 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 1030 SD->IsScheduled = true; 1031 DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 1032 1033 ScheduleData *BundleMember = SD; 1034 while (BundleMember) { 1035 if (BundleMember->Inst != BundleMember->OpValue) { 1036 BundleMember = BundleMember->NextInBundle; 1037 continue; 1038 } 1039 // Handle the def-use chain dependencies. 1040 for (Use &U : BundleMember->Inst->operands()) { 1041 auto *I = dyn_cast<Instruction>(U.get()); 1042 if (!I) 1043 continue; 1044 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 1045 if (OpDef && OpDef->hasValidDependencies() && 1046 OpDef->incrementUnscheduledDeps(-1) == 0) { 1047 // There are no more unscheduled dependencies after 1048 // decrementing, so we can put the dependent instruction 1049 // into the ready list. 1050 ScheduleData *DepBundle = OpDef->FirstInBundle; 1051 assert(!DepBundle->IsScheduled && 1052 "already scheduled bundle gets ready"); 1053 ReadyList.insert(DepBundle); 1054 DEBUG(dbgs() 1055 << "SLP: gets ready (def): " << *DepBundle << "\n"); 1056 } 1057 }); 1058 } 1059 // Handle the memory dependencies. 1060 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 1061 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 1062 // There are no more unscheduled dependencies after decrementing, 1063 // so we can put the dependent instruction into the ready list. 1064 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 1065 assert(!DepBundle->IsScheduled && 1066 "already scheduled bundle gets ready"); 1067 ReadyList.insert(DepBundle); 1068 DEBUG(dbgs() << "SLP: gets ready (mem): " << *DepBundle 1069 << "\n"); 1070 } 1071 } 1072 BundleMember = BundleMember->NextInBundle; 1073 } 1074 } 1075 1076 void doForAllOpcodes(Value *V, 1077 function_ref<void(ScheduleData *SD)> Action) { 1078 if (ScheduleData *SD = getScheduleData(V)) 1079 Action(SD); 1080 auto I = ExtraScheduleDataMap.find(V); 1081 if (I != ExtraScheduleDataMap.end()) 1082 for (auto &P : I->second) 1083 if (P.second->SchedulingRegionID == SchedulingRegionID) 1084 Action(P.second); 1085 } 1086 1087 /// Put all instructions into the ReadyList which are ready for scheduling. 1088 template <typename ReadyListType> 1089 void initialFillReadyList(ReadyListType &ReadyList) { 1090 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 1091 doForAllOpcodes(I, [&](ScheduleData *SD) { 1092 if (SD->isSchedulingEntity() && SD->isReady()) { 1093 ReadyList.insert(SD); 1094 DEBUG(dbgs() << "SLP: initially in ready list: " << *I << "\n"); 1095 } 1096 }); 1097 } 1098 } 1099 1100 /// Checks if a bundle of instructions can be scheduled, i.e. has no 1101 /// cyclic dependencies. This is only a dry-run, no instructions are 1102 /// actually moved at this stage. 1103 bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, Value *OpValue); 1104 1105 /// Un-bundles a group of instructions. 1106 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 1107 1108 /// Allocates schedule data chunk. 1109 ScheduleData *allocateScheduleDataChunks(); 1110 1111 /// Extends the scheduling region so that V is inside the region. 1112 /// \returns true if the region size is within the limit. 1113 bool extendSchedulingRegion(Value *V, Value *OpValue); 1114 1115 /// Initialize the ScheduleData structures for new instructions in the 1116 /// scheduling region. 1117 void initScheduleData(Instruction *FromI, Instruction *ToI, 1118 ScheduleData *PrevLoadStore, 1119 ScheduleData *NextLoadStore); 1120 1121 /// Updates the dependency information of a bundle and of all instructions/ 1122 /// bundles which depend on the original bundle. 1123 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 1124 BoUpSLP *SLP); 1125 1126 /// Sets all instruction in the scheduling region to un-scheduled. 1127 void resetSchedule(); 1128 1129 BasicBlock *BB; 1130 1131 /// Simple memory allocation for ScheduleData. 1132 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 1133 1134 /// The size of a ScheduleData array in ScheduleDataChunks. 1135 int ChunkSize; 1136 1137 /// The allocator position in the current chunk, which is the last entry 1138 /// of ScheduleDataChunks. 1139 int ChunkPos; 1140 1141 /// Attaches ScheduleData to Instruction. 1142 /// Note that the mapping survives during all vectorization iterations, i.e. 1143 /// ScheduleData structures are recycled. 1144 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 1145 1146 /// Attaches ScheduleData to Instruction with the leading key. 1147 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 1148 ExtraScheduleDataMap; 1149 1150 struct ReadyList : SmallVector<ScheduleData *, 8> { 1151 void insert(ScheduleData *SD) { push_back(SD); } 1152 }; 1153 1154 /// The ready-list for scheduling (only used for the dry-run). 1155 ReadyList ReadyInsts; 1156 1157 /// The first instruction of the scheduling region. 1158 Instruction *ScheduleStart = nullptr; 1159 1160 /// The first instruction _after_ the scheduling region. 1161 Instruction *ScheduleEnd = nullptr; 1162 1163 /// The first memory accessing instruction in the scheduling region 1164 /// (can be null). 1165 ScheduleData *FirstLoadStoreInRegion = nullptr; 1166 1167 /// The last memory accessing instruction in the scheduling region 1168 /// (can be null). 1169 ScheduleData *LastLoadStoreInRegion = nullptr; 1170 1171 /// The current size of the scheduling region. 1172 int ScheduleRegionSize = 0; 1173 1174 /// The maximum size allowed for the scheduling region. 1175 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; 1176 1177 /// The ID of the scheduling region. For a new vectorization iteration this 1178 /// is incremented which "removes" all ScheduleData from the region. 1179 int SchedulingRegionID = 1; 1180 // Make sure that the initial SchedulingRegionID is greater than the 1181 // initial SchedulingRegionID in ScheduleData (which is 0). 1182 }; 1183 1184 /// Attaches the BlockScheduling structures to basic blocks. 1185 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 1186 1187 /// Performs the "real" scheduling. Done before vectorization is actually 1188 /// performed in a basic block. 1189 void scheduleBlock(BlockScheduling *BS); 1190 1191 /// List of users to ignore during scheduling and that don't need extracting. 1192 ArrayRef<Value *> UserIgnoreList; 1193 1194 // Number of load bundles that contain consecutive loads. 1195 int NumLoadsWantToKeepOrder = 0; 1196 1197 // Number of load bundles that contain consecutive loads in reversed order. 1198 int NumLoadsWantToChangeOrder = 0; 1199 1200 // Analysis and block reference. 1201 Function *F; 1202 ScalarEvolution *SE; 1203 TargetTransformInfo *TTI; 1204 TargetLibraryInfo *TLI; 1205 AliasAnalysis *AA; 1206 LoopInfo *LI; 1207 DominatorTree *DT; 1208 AssumptionCache *AC; 1209 DemandedBits *DB; 1210 const DataLayout *DL; 1211 OptimizationRemarkEmitter *ORE; 1212 1213 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 1214 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 1215 /// Instruction builder to construct the vectorized tree. 1216 IRBuilder<> Builder; 1217 1218 /// A map of scalar integer values to the smallest bit width with which they 1219 /// can legally be represented. The values map to (width, signed) pairs, 1220 /// where "width" indicates the minimum bit width and "signed" is True if the 1221 /// value must be signed-extended, rather than zero-extended, back to its 1222 /// original width. 1223 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs; 1224 }; 1225 1226 } // end namespace slpvectorizer 1227 1228 template <> struct GraphTraits<BoUpSLP *> { 1229 using TreeEntry = BoUpSLP::TreeEntry; 1230 1231 /// NodeRef has to be a pointer per the GraphWriter. 1232 using NodeRef = TreeEntry *; 1233 1234 /// \brief Add the VectorizableTree to the index iterator to be able to return 1235 /// TreeEntry pointers. 1236 struct ChildIteratorType 1237 : public iterator_adaptor_base<ChildIteratorType, 1238 SmallVector<int, 1>::iterator> { 1239 std::vector<TreeEntry> &VectorizableTree; 1240 1241 ChildIteratorType(SmallVector<int, 1>::iterator W, 1242 std::vector<TreeEntry> &VT) 1243 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 1244 1245 NodeRef operator*() { return &VectorizableTree[*I]; } 1246 }; 1247 1248 static NodeRef getEntryNode(BoUpSLP &R) { return &R.VectorizableTree[0]; } 1249 1250 static ChildIteratorType child_begin(NodeRef N) { 1251 return {N->UserTreeIndices.begin(), N->Container}; 1252 } 1253 1254 static ChildIteratorType child_end(NodeRef N) { 1255 return {N->UserTreeIndices.end(), N->Container}; 1256 } 1257 1258 /// For the node iterator we just need to turn the TreeEntry iterator into a 1259 /// TreeEntry* iterator so that it dereferences to NodeRef. 1260 using nodes_iterator = pointer_iterator<std::vector<TreeEntry>::iterator>; 1261 1262 static nodes_iterator nodes_begin(BoUpSLP *R) { 1263 return nodes_iterator(R->VectorizableTree.begin()); 1264 } 1265 1266 static nodes_iterator nodes_end(BoUpSLP *R) { 1267 return nodes_iterator(R->VectorizableTree.end()); 1268 } 1269 1270 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 1271 }; 1272 1273 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 1274 using TreeEntry = BoUpSLP::TreeEntry; 1275 1276 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 1277 1278 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 1279 std::string Str; 1280 raw_string_ostream OS(Str); 1281 if (isSplat(Entry->Scalars)) { 1282 OS << "<splat> " << *Entry->Scalars[0]; 1283 return Str; 1284 } 1285 for (auto V : Entry->Scalars) { 1286 OS << *V; 1287 if (std::any_of( 1288 R->ExternalUses.begin(), R->ExternalUses.end(), 1289 [&](const BoUpSLP::ExternalUser &EU) { return EU.Scalar == V; })) 1290 OS << " <extract>"; 1291 OS << "\n"; 1292 } 1293 return Str; 1294 } 1295 1296 static std::string getNodeAttributes(const TreeEntry *Entry, 1297 const BoUpSLP *) { 1298 if (Entry->NeedToGather) 1299 return "color=red"; 1300 return ""; 1301 } 1302 }; 1303 1304 } // end namespace llvm 1305 1306 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 1307 ArrayRef<Value *> UserIgnoreLst) { 1308 ExtraValueToDebugLocsMap ExternallyUsedValues; 1309 buildTree(Roots, ExternallyUsedValues, UserIgnoreLst); 1310 } 1311 1312 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 1313 ExtraValueToDebugLocsMap &ExternallyUsedValues, 1314 ArrayRef<Value *> UserIgnoreLst) { 1315 deleteTree(); 1316 UserIgnoreList = UserIgnoreLst; 1317 if (!allSameType(Roots)) 1318 return; 1319 buildTree_rec(Roots, 0, -1); 1320 1321 // Collect the values that we need to extract from the tree. 1322 for (TreeEntry &EIdx : VectorizableTree) { 1323 TreeEntry *Entry = &EIdx; 1324 1325 // No need to handle users of gathered values. 1326 if (Entry->NeedToGather) 1327 continue; 1328 1329 // For each lane: 1330 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 1331 Value *Scalar = Entry->Scalars[Lane]; 1332 1333 // Check if the scalar is externally used as an extra arg. 1334 auto ExtI = ExternallyUsedValues.find(Scalar); 1335 if (ExtI != ExternallyUsedValues.end()) { 1336 DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " << 1337 Lane << " from " << *Scalar << ".\n"); 1338 ExternalUses.emplace_back(Scalar, nullptr, Lane); 1339 continue; 1340 } 1341 for (User *U : Scalar->users()) { 1342 DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 1343 1344 Instruction *UserInst = dyn_cast<Instruction>(U); 1345 if (!UserInst) 1346 continue; 1347 1348 // Skip in-tree scalars that become vectors 1349 if (TreeEntry *UseEntry = getTreeEntry(U)) { 1350 Value *UseScalar = UseEntry->Scalars[0]; 1351 // Some in-tree scalars will remain as scalar in vectorized 1352 // instructions. If that is the case, the one in Lane 0 will 1353 // be used. 1354 if (UseScalar != U || 1355 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 1356 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 1357 << ".\n"); 1358 assert(!UseEntry->NeedToGather && "Bad state"); 1359 continue; 1360 } 1361 } 1362 1363 // Ignore users in the user ignore list. 1364 if (is_contained(UserIgnoreList, UserInst)) 1365 continue; 1366 1367 DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " << 1368 Lane << " from " << *Scalar << ".\n"); 1369 ExternalUses.push_back(ExternalUser(Scalar, U, Lane)); 1370 } 1371 } 1372 } 1373 } 1374 1375 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 1376 int UserTreeIdx) { 1377 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 1378 1379 InstructionsState S = getSameOpcode(VL); 1380 if (Depth == RecursionMaxDepth) { 1381 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 1382 newTreeEntry(VL, false, UserTreeIdx); 1383 return; 1384 } 1385 1386 // Don't handle vectors. 1387 if (S.OpValue->getType()->isVectorTy()) { 1388 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 1389 newTreeEntry(VL, false, UserTreeIdx); 1390 return; 1391 } 1392 1393 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 1394 if (SI->getValueOperand()->getType()->isVectorTy()) { 1395 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 1396 newTreeEntry(VL, false, UserTreeIdx); 1397 return; 1398 } 1399 1400 // If all of the operands are identical or constant we have a simple solution. 1401 if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.Opcode) { 1402 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 1403 newTreeEntry(VL, false, UserTreeIdx); 1404 return; 1405 } 1406 1407 // We now know that this is a vector of instructions of the same type from 1408 // the same block. 1409 1410 // Don't vectorize ephemeral values. 1411 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1412 if (EphValues.count(VL[i])) { 1413 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1414 ") is ephemeral.\n"); 1415 newTreeEntry(VL, false, UserTreeIdx); 1416 return; 1417 } 1418 } 1419 1420 // Check if this is a duplicate of another entry. 1421 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 1422 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1423 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); 1424 if (E->Scalars[i] != VL[i]) { 1425 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 1426 newTreeEntry(VL, false, UserTreeIdx); 1427 return; 1428 } 1429 } 1430 // Record the reuse of the tree node. FIXME, currently this is only used to 1431 // properly draw the graph rather than for the actual vectorization. 1432 E->UserTreeIndices.push_back(UserTreeIdx); 1433 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue << ".\n"); 1434 return; 1435 } 1436 1437 // Check that none of the instructions in the bundle are already in the tree. 1438 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1439 auto *I = dyn_cast<Instruction>(VL[i]); 1440 if (!I) 1441 continue; 1442 if (getTreeEntry(I)) { 1443 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << 1444 ") is already in tree.\n"); 1445 newTreeEntry(VL, false, UserTreeIdx); 1446 return; 1447 } 1448 } 1449 1450 // If any of the scalars is marked as a value that needs to stay scalar, then 1451 // we need to gather the scalars. 1452 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 1453 if (MustGather.count(VL[i])) { 1454 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 1455 newTreeEntry(VL, false, UserTreeIdx); 1456 return; 1457 } 1458 } 1459 1460 // Check that all of the users of the scalars that we want to vectorize are 1461 // schedulable. 1462 auto *VL0 = cast<Instruction>(S.OpValue); 1463 BasicBlock *BB = VL0->getParent(); 1464 1465 if (!DT->isReachableFromEntry(BB)) { 1466 // Don't go into unreachable blocks. They may contain instructions with 1467 // dependency cycles which confuse the final scheduling. 1468 DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 1469 newTreeEntry(VL, false, UserTreeIdx); 1470 return; 1471 } 1472 1473 // Check that every instruction appears once in this bundle. 1474 for (unsigned i = 0, e = VL.size(); i < e; ++i) 1475 for (unsigned j = i + 1; j < e; ++j) 1476 if (VL[i] == VL[j]) { 1477 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 1478 newTreeEntry(VL, false, UserTreeIdx); 1479 return; 1480 } 1481 1482 auto &BSRef = BlocksSchedules[BB]; 1483 if (!BSRef) 1484 BSRef = llvm::make_unique<BlockScheduling>(BB); 1485 1486 BlockScheduling &BS = *BSRef.get(); 1487 1488 if (!BS.tryScheduleBundle(VL, this, S.OpValue)) { 1489 DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 1490 assert((!BS.getScheduleData(VL0) || 1491 !BS.getScheduleData(VL0)->isPartOfBundle()) && 1492 "tryScheduleBundle should cancelScheduling on failure"); 1493 newTreeEntry(VL, false, UserTreeIdx); 1494 return; 1495 } 1496 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 1497 1498 unsigned ShuffleOrOp = S.IsAltShuffle ? 1499 (unsigned) Instruction::ShuffleVector : S.Opcode; 1500 switch (ShuffleOrOp) { 1501 case Instruction::PHI: { 1502 PHINode *PH = dyn_cast<PHINode>(VL0); 1503 1504 // Check for terminator values (e.g. invoke). 1505 for (unsigned j = 0; j < VL.size(); ++j) 1506 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1507 TerminatorInst *Term = dyn_cast<TerminatorInst>( 1508 cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i))); 1509 if (Term) { 1510 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); 1511 BS.cancelScheduling(VL, VL0); 1512 newTreeEntry(VL, false, UserTreeIdx); 1513 return; 1514 } 1515 } 1516 1517 newTreeEntry(VL, true, UserTreeIdx); 1518 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 1519 1520 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 1521 ValueList Operands; 1522 // Prepare the operand vector. 1523 for (Value *j : VL) 1524 Operands.push_back(cast<PHINode>(j)->getIncomingValueForBlock( 1525 PH->getIncomingBlock(i))); 1526 1527 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1528 } 1529 return; 1530 } 1531 case Instruction::ExtractValue: 1532 case Instruction::ExtractElement: { 1533 bool Reuse = canReuseExtract(VL, VL0); 1534 if (Reuse) { 1535 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n"); 1536 } else { 1537 BS.cancelScheduling(VL, VL0); 1538 } 1539 newTreeEntry(VL, Reuse, UserTreeIdx); 1540 return; 1541 } 1542 case Instruction::Load: { 1543 // Check that a vectorized load would load the same memory as a scalar 1544 // load. For example, we don't want to vectorize loads that are smaller 1545 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 1546 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 1547 // from such a struct, we read/write packed bits disagreeing with the 1548 // unvectorized version. 1549 Type *ScalarTy = VL0->getType(); 1550 1551 if (DL->getTypeSizeInBits(ScalarTy) != 1552 DL->getTypeAllocSizeInBits(ScalarTy)) { 1553 BS.cancelScheduling(VL, VL0); 1554 newTreeEntry(VL, false, UserTreeIdx); 1555 DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 1556 return; 1557 } 1558 1559 // Make sure all loads in the bundle are simple - we can't vectorize 1560 // atomic or volatile loads. 1561 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 1562 LoadInst *L = cast<LoadInst>(VL[i]); 1563 if (!L->isSimple()) { 1564 BS.cancelScheduling(VL, VL0); 1565 newTreeEntry(VL, false, UserTreeIdx); 1566 DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 1567 return; 1568 } 1569 } 1570 1571 // Check if the loads are consecutive, reversed, or neither. 1572 // TODO: What we really want is to sort the loads, but for now, check 1573 // the two likely directions. 1574 bool Consecutive = true; 1575 bool ReverseConsecutive = true; 1576 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { 1577 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { 1578 Consecutive = false; 1579 break; 1580 } else { 1581 ReverseConsecutive = false; 1582 } 1583 } 1584 1585 if (Consecutive) { 1586 ++NumLoadsWantToKeepOrder; 1587 newTreeEntry(VL, true, UserTreeIdx); 1588 DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 1589 return; 1590 } 1591 1592 // If none of the load pairs were consecutive when checked in order, 1593 // check the reverse order. 1594 if (ReverseConsecutive) 1595 for (unsigned i = VL.size() - 1; i > 0; --i) 1596 if (!isConsecutiveAccess(VL[i], VL[i - 1], *DL, *SE)) { 1597 ReverseConsecutive = false; 1598 break; 1599 } 1600 1601 BS.cancelScheduling(VL, VL0); 1602 newTreeEntry(VL, false, UserTreeIdx); 1603 1604 if (ReverseConsecutive) { 1605 ++NumLoadsWantToChangeOrder; 1606 DEBUG(dbgs() << "SLP: Gathering reversed loads.\n"); 1607 } else { 1608 DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 1609 } 1610 return; 1611 } 1612 case Instruction::ZExt: 1613 case Instruction::SExt: 1614 case Instruction::FPToUI: 1615 case Instruction::FPToSI: 1616 case Instruction::FPExt: 1617 case Instruction::PtrToInt: 1618 case Instruction::IntToPtr: 1619 case Instruction::SIToFP: 1620 case Instruction::UIToFP: 1621 case Instruction::Trunc: 1622 case Instruction::FPTrunc: 1623 case Instruction::BitCast: { 1624 Type *SrcTy = VL0->getOperand(0)->getType(); 1625 for (unsigned i = 0; i < VL.size(); ++i) { 1626 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 1627 if (Ty != SrcTy || !isValidElementType(Ty)) { 1628 BS.cancelScheduling(VL, VL0); 1629 newTreeEntry(VL, false, UserTreeIdx); 1630 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); 1631 return; 1632 } 1633 } 1634 newTreeEntry(VL, true, UserTreeIdx); 1635 DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 1636 1637 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1638 ValueList Operands; 1639 // Prepare the operand vector. 1640 for (Value *j : VL) 1641 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1642 1643 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1644 } 1645 return; 1646 } 1647 case Instruction::ICmp: 1648 case Instruction::FCmp: { 1649 // Check that all of the compares have the same predicate. 1650 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 1651 Type *ComparedTy = VL0->getOperand(0)->getType(); 1652 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 1653 CmpInst *Cmp = cast<CmpInst>(VL[i]); 1654 if (Cmp->getPredicate() != P0 || 1655 Cmp->getOperand(0)->getType() != ComparedTy) { 1656 BS.cancelScheduling(VL, VL0); 1657 newTreeEntry(VL, false, UserTreeIdx); 1658 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 1659 return; 1660 } 1661 } 1662 1663 newTreeEntry(VL, true, UserTreeIdx); 1664 DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 1665 1666 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1667 ValueList Operands; 1668 // Prepare the operand vector. 1669 for (Value *j : VL) 1670 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1671 1672 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1673 } 1674 return; 1675 } 1676 case Instruction::Select: 1677 case Instruction::Add: 1678 case Instruction::FAdd: 1679 case Instruction::Sub: 1680 case Instruction::FSub: 1681 case Instruction::Mul: 1682 case Instruction::FMul: 1683 case Instruction::UDiv: 1684 case Instruction::SDiv: 1685 case Instruction::FDiv: 1686 case Instruction::URem: 1687 case Instruction::SRem: 1688 case Instruction::FRem: 1689 case Instruction::Shl: 1690 case Instruction::LShr: 1691 case Instruction::AShr: 1692 case Instruction::And: 1693 case Instruction::Or: 1694 case Instruction::Xor: 1695 newTreeEntry(VL, true, UserTreeIdx); 1696 DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 1697 1698 // Sort operands of the instructions so that each side is more likely to 1699 // have the same opcode. 1700 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 1701 ValueList Left, Right; 1702 reorderInputsAccordingToOpcode(S.Opcode, VL, Left, Right); 1703 buildTree_rec(Left, Depth + 1, UserTreeIdx); 1704 buildTree_rec(Right, Depth + 1, UserTreeIdx); 1705 return; 1706 } 1707 1708 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1709 ValueList Operands; 1710 // Prepare the operand vector. 1711 for (Value *j : VL) 1712 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1713 1714 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1715 } 1716 return; 1717 1718 case Instruction::GetElementPtr: { 1719 // We don't combine GEPs with complicated (nested) indexing. 1720 for (unsigned j = 0; j < VL.size(); ++j) { 1721 if (cast<Instruction>(VL[j])->getNumOperands() != 2) { 1722 DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 1723 BS.cancelScheduling(VL, VL0); 1724 newTreeEntry(VL, false, UserTreeIdx); 1725 return; 1726 } 1727 } 1728 1729 // We can't combine several GEPs into one vector if they operate on 1730 // different types. 1731 Type *Ty0 = VL0->getOperand(0)->getType(); 1732 for (unsigned j = 0; j < VL.size(); ++j) { 1733 Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType(); 1734 if (Ty0 != CurTy) { 1735 DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); 1736 BS.cancelScheduling(VL, VL0); 1737 newTreeEntry(VL, false, UserTreeIdx); 1738 return; 1739 } 1740 } 1741 1742 // We don't combine GEPs with non-constant indexes. 1743 for (unsigned j = 0; j < VL.size(); ++j) { 1744 auto Op = cast<Instruction>(VL[j])->getOperand(1); 1745 if (!isa<ConstantInt>(Op)) { 1746 DEBUG( 1747 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 1748 BS.cancelScheduling(VL, VL0); 1749 newTreeEntry(VL, false, UserTreeIdx); 1750 return; 1751 } 1752 } 1753 1754 newTreeEntry(VL, true, UserTreeIdx); 1755 DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 1756 for (unsigned i = 0, e = 2; i < e; ++i) { 1757 ValueList Operands; 1758 // Prepare the operand vector. 1759 for (Value *j : VL) 1760 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1761 1762 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1763 } 1764 return; 1765 } 1766 case Instruction::Store: { 1767 // Check if the stores are consecutive or of we need to swizzle them. 1768 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 1769 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { 1770 BS.cancelScheduling(VL, VL0); 1771 newTreeEntry(VL, false, UserTreeIdx); 1772 DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 1773 return; 1774 } 1775 1776 newTreeEntry(VL, true, UserTreeIdx); 1777 DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 1778 1779 ValueList Operands; 1780 for (Value *j : VL) 1781 Operands.push_back(cast<Instruction>(j)->getOperand(0)); 1782 1783 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1784 return; 1785 } 1786 case Instruction::Call: { 1787 // Check if the calls are all to the same vectorizable intrinsic. 1788 CallInst *CI = cast<CallInst>(VL0); 1789 // Check if this is an Intrinsic call or something that can be 1790 // represented by an intrinsic call 1791 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 1792 if (!isTriviallyVectorizable(ID)) { 1793 BS.cancelScheduling(VL, VL0); 1794 newTreeEntry(VL, false, UserTreeIdx); 1795 DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 1796 return; 1797 } 1798 Function *Int = CI->getCalledFunction(); 1799 Value *A1I = nullptr; 1800 if (hasVectorInstrinsicScalarOpd(ID, 1)) 1801 A1I = CI->getArgOperand(1); 1802 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 1803 CallInst *CI2 = dyn_cast<CallInst>(VL[i]); 1804 if (!CI2 || CI2->getCalledFunction() != Int || 1805 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 1806 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 1807 BS.cancelScheduling(VL, VL0); 1808 newTreeEntry(VL, false, UserTreeIdx); 1809 DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] 1810 << "\n"); 1811 return; 1812 } 1813 // ctlz,cttz and powi are special intrinsics whose second argument 1814 // should be same in order for them to be vectorized. 1815 if (hasVectorInstrinsicScalarOpd(ID, 1)) { 1816 Value *A1J = CI2->getArgOperand(1); 1817 if (A1I != A1J) { 1818 BS.cancelScheduling(VL, VL0); 1819 newTreeEntry(VL, false, UserTreeIdx); 1820 DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 1821 << " argument "<< A1I<<"!=" << A1J 1822 << "\n"); 1823 return; 1824 } 1825 } 1826 // Verify that the bundle operands are identical between the two calls. 1827 if (CI->hasOperandBundles() && 1828 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 1829 CI->op_begin() + CI->getBundleOperandsEndIndex(), 1830 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 1831 BS.cancelScheduling(VL, VL0); 1832 newTreeEntry(VL, false, UserTreeIdx); 1833 DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!=" 1834 << *VL[i] << '\n'); 1835 return; 1836 } 1837 } 1838 1839 newTreeEntry(VL, true, UserTreeIdx); 1840 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 1841 ValueList Operands; 1842 // Prepare the operand vector. 1843 for (Value *j : VL) { 1844 CallInst *CI2 = dyn_cast<CallInst>(j); 1845 Operands.push_back(CI2->getArgOperand(i)); 1846 } 1847 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1848 } 1849 return; 1850 } 1851 case Instruction::ShuffleVector: 1852 // If this is not an alternate sequence of opcode like add-sub 1853 // then do not vectorize this instruction. 1854 if (!S.IsAltShuffle) { 1855 BS.cancelScheduling(VL, VL0); 1856 newTreeEntry(VL, false, UserTreeIdx); 1857 DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 1858 return; 1859 } 1860 newTreeEntry(VL, true, UserTreeIdx); 1861 DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 1862 1863 // Reorder operands if reordering would enable vectorization. 1864 if (isa<BinaryOperator>(VL0)) { 1865 ValueList Left, Right; 1866 reorderAltShuffleOperands(S.Opcode, VL, Left, Right); 1867 buildTree_rec(Left, Depth + 1, UserTreeIdx); 1868 buildTree_rec(Right, Depth + 1, UserTreeIdx); 1869 return; 1870 } 1871 1872 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 1873 ValueList Operands; 1874 // Prepare the operand vector. 1875 for (Value *j : VL) 1876 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 1877 1878 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 1879 } 1880 return; 1881 1882 default: 1883 BS.cancelScheduling(VL, VL0); 1884 newTreeEntry(VL, false, UserTreeIdx); 1885 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 1886 return; 1887 } 1888 } 1889 1890 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { 1891 unsigned N; 1892 Type *EltTy; 1893 auto *ST = dyn_cast<StructType>(T); 1894 if (ST) { 1895 N = ST->getNumElements(); 1896 EltTy = *ST->element_begin(); 1897 } else { 1898 N = cast<ArrayType>(T)->getNumElements(); 1899 EltTy = cast<ArrayType>(T)->getElementType(); 1900 } 1901 if (!isValidElementType(EltTy)) 1902 return 0; 1903 uint64_t VTSize = DL.getTypeStoreSizeInBits(VectorType::get(EltTy, N)); 1904 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) 1905 return 0; 1906 if (ST) { 1907 // Check that struct is homogeneous. 1908 for (const auto *Ty : ST->elements()) 1909 if (Ty != EltTy) 1910 return 0; 1911 } 1912 return N; 1913 } 1914 1915 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue) const { 1916 Instruction *E0 = cast<Instruction>(OpValue); 1917 assert(E0->getOpcode() == Instruction::ExtractElement || 1918 E0->getOpcode() == Instruction::ExtractValue); 1919 assert(E0->getOpcode() == getSameOpcode(VL).Opcode && "Invalid opcode"); 1920 // Check if all of the extracts come from the same vector and from the 1921 // correct offset. 1922 Value *Vec = E0->getOperand(0); 1923 1924 // We have to extract from a vector/aggregate with the same number of elements. 1925 unsigned NElts; 1926 if (E0->getOpcode() == Instruction::ExtractValue) { 1927 const DataLayout &DL = E0->getModule()->getDataLayout(); 1928 NElts = canMapToVector(Vec->getType(), DL); 1929 if (!NElts) 1930 return false; 1931 // Check if load can be rewritten as load of vector. 1932 LoadInst *LI = dyn_cast<LoadInst>(Vec); 1933 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 1934 return false; 1935 } else { 1936 NElts = Vec->getType()->getVectorNumElements(); 1937 } 1938 1939 if (NElts != VL.size()) 1940 return false; 1941 1942 // Check that all of the indices extract from the correct offset. 1943 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 1944 Instruction *Inst = cast<Instruction>(VL[I]); 1945 if (!matchExtractIndex(Inst, I, Inst->getOpcode())) 1946 return false; 1947 if (Inst->getOperand(0) != Vec) 1948 return false; 1949 } 1950 1951 return true; 1952 } 1953 1954 bool BoUpSLP::areAllUsersVectorized(Instruction *I) const { 1955 return I->hasOneUse() || 1956 std::all_of(I->user_begin(), I->user_end(), [this](User *U) { 1957 return ScalarToTreeEntry.count(U) > 0; 1958 }); 1959 } 1960 1961 int BoUpSLP::getEntryCost(TreeEntry *E) { 1962 ArrayRef<Value*> VL = E->Scalars; 1963 1964 Type *ScalarTy = VL[0]->getType(); 1965 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 1966 ScalarTy = SI->getValueOperand()->getType(); 1967 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0])) 1968 ScalarTy = CI->getOperand(0)->getType(); 1969 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 1970 1971 // If we have computed a smaller type for the expression, update VecTy so 1972 // that the costs will be accurate. 1973 if (MinBWs.count(VL[0])) 1974 VecTy = VectorType::get( 1975 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size()); 1976 1977 if (E->NeedToGather) { 1978 if (allConstant(VL)) 1979 return 0; 1980 if (isSplat(VL)) { 1981 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 1982 } 1983 if (getSameOpcode(VL).Opcode == Instruction::ExtractElement) { 1984 Optional<TargetTransformInfo::ShuffleKind> ShuffleKind = isShuffle(VL); 1985 if (ShuffleKind.hasValue()) { 1986 int Cost = TTI->getShuffleCost(ShuffleKind.getValue(), VecTy); 1987 for (auto *V : VL) { 1988 // If all users of instruction are going to be vectorized and this 1989 // instruction itself is not going to be vectorized, consider this 1990 // instruction as dead and remove its cost from the final cost of the 1991 // vectorized tree. 1992 if (areAllUsersVectorized(cast<Instruction>(V)) && 1993 !ScalarToTreeEntry.count(V)) { 1994 auto *IO = cast<ConstantInt>( 1995 cast<ExtractElementInst>(V)->getIndexOperand()); 1996 Cost -= TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 1997 IO->getZExtValue()); 1998 } 1999 } 2000 return Cost; 2001 } 2002 } 2003 return getGatherCost(E->Scalars); 2004 } 2005 InstructionsState S = getSameOpcode(VL); 2006 assert(S.Opcode && allSameType(VL) && allSameBlock(VL) && "Invalid VL"); 2007 Instruction *VL0 = cast<Instruction>(S.OpValue); 2008 unsigned ShuffleOrOp = S.IsAltShuffle ? 2009 (unsigned) Instruction::ShuffleVector : S.Opcode; 2010 switch (ShuffleOrOp) { 2011 case Instruction::PHI: 2012 return 0; 2013 2014 case Instruction::ExtractValue: 2015 case Instruction::ExtractElement: 2016 if (canReuseExtract(VL, S.OpValue)) { 2017 int DeadCost = 0; 2018 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 2019 Instruction *E = cast<Instruction>(VL[i]); 2020 // If all users are going to be vectorized, instruction can be 2021 // considered as dead. 2022 // The same, if have only one user, it will be vectorized for sure. 2023 if (areAllUsersVectorized(E)) 2024 // Take credit for instruction that will become dead. 2025 DeadCost += 2026 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); 2027 } 2028 return -DeadCost; 2029 } 2030 return getGatherCost(VecTy); 2031 2032 case Instruction::ZExt: 2033 case Instruction::SExt: 2034 case Instruction::FPToUI: 2035 case Instruction::FPToSI: 2036 case Instruction::FPExt: 2037 case Instruction::PtrToInt: 2038 case Instruction::IntToPtr: 2039 case Instruction::SIToFP: 2040 case Instruction::UIToFP: 2041 case Instruction::Trunc: 2042 case Instruction::FPTrunc: 2043 case Instruction::BitCast: { 2044 Type *SrcTy = VL0->getOperand(0)->getType(); 2045 2046 // Calculate the cost of this instruction. 2047 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(), 2048 VL0->getType(), SrcTy, VL0); 2049 2050 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 2051 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy, VL0); 2052 return VecCost - ScalarCost; 2053 } 2054 case Instruction::FCmp: 2055 case Instruction::ICmp: 2056 case Instruction::Select: { 2057 // Calculate the cost of this instruction. 2058 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 2059 int ScalarCost = VecTy->getNumElements() * 2060 TTI->getCmpSelInstrCost(S.Opcode, ScalarTy, Builder.getInt1Ty(), VL0); 2061 int VecCost = TTI->getCmpSelInstrCost(S.Opcode, VecTy, MaskTy, VL0); 2062 return VecCost - ScalarCost; 2063 } 2064 case Instruction::Add: 2065 case Instruction::FAdd: 2066 case Instruction::Sub: 2067 case Instruction::FSub: 2068 case Instruction::Mul: 2069 case Instruction::FMul: 2070 case Instruction::UDiv: 2071 case Instruction::SDiv: 2072 case Instruction::FDiv: 2073 case Instruction::URem: 2074 case Instruction::SRem: 2075 case Instruction::FRem: 2076 case Instruction::Shl: 2077 case Instruction::LShr: 2078 case Instruction::AShr: 2079 case Instruction::And: 2080 case Instruction::Or: 2081 case Instruction::Xor: { 2082 // Certain instructions can be cheaper to vectorize if they have a 2083 // constant second vector operand. 2084 TargetTransformInfo::OperandValueKind Op1VK = 2085 TargetTransformInfo::OK_AnyValue; 2086 TargetTransformInfo::OperandValueKind Op2VK = 2087 TargetTransformInfo::OK_UniformConstantValue; 2088 TargetTransformInfo::OperandValueProperties Op1VP = 2089 TargetTransformInfo::OP_None; 2090 TargetTransformInfo::OperandValueProperties Op2VP = 2091 TargetTransformInfo::OP_None; 2092 2093 // If all operands are exactly the same ConstantInt then set the 2094 // operand kind to OK_UniformConstantValue. 2095 // If instead not all operands are constants, then set the operand kind 2096 // to OK_AnyValue. If all operands are constants but not the same, 2097 // then set the operand kind to OK_NonUniformConstantValue. 2098 ConstantInt *CInt = nullptr; 2099 for (unsigned i = 0; i < VL.size(); ++i) { 2100 const Instruction *I = cast<Instruction>(VL[i]); 2101 if (!isa<ConstantInt>(I->getOperand(1))) { 2102 Op2VK = TargetTransformInfo::OK_AnyValue; 2103 break; 2104 } 2105 if (i == 0) { 2106 CInt = cast<ConstantInt>(I->getOperand(1)); 2107 continue; 2108 } 2109 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && 2110 CInt != cast<ConstantInt>(I->getOperand(1))) 2111 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 2112 } 2113 // FIXME: Currently cost of model modification for division by power of 2114 // 2 is handled for X86 and AArch64. Add support for other targets. 2115 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt && 2116 CInt->getValue().isPowerOf2()) 2117 Op2VP = TargetTransformInfo::OP_PowerOf2; 2118 2119 SmallVector<const Value *, 4> Operands(VL0->operand_values()); 2120 int ScalarCost = 2121 VecTy->getNumElements() * 2122 TTI->getArithmeticInstrCost(S.Opcode, ScalarTy, Op1VK, Op2VK, Op1VP, 2123 Op2VP, Operands); 2124 int VecCost = TTI->getArithmeticInstrCost(S.Opcode, VecTy, Op1VK, Op2VK, 2125 Op1VP, Op2VP, Operands); 2126 return VecCost - ScalarCost; 2127 } 2128 case Instruction::GetElementPtr: { 2129 TargetTransformInfo::OperandValueKind Op1VK = 2130 TargetTransformInfo::OK_AnyValue; 2131 TargetTransformInfo::OperandValueKind Op2VK = 2132 TargetTransformInfo::OK_UniformConstantValue; 2133 2134 int ScalarCost = 2135 VecTy->getNumElements() * 2136 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK); 2137 int VecCost = 2138 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK); 2139 2140 return VecCost - ScalarCost; 2141 } 2142 case Instruction::Load: { 2143 // Cost of wide load - cost of scalar loads. 2144 unsigned alignment = dyn_cast<LoadInst>(VL0)->getAlignment(); 2145 int ScalarLdCost = VecTy->getNumElements() * 2146 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, alignment, 0, VL0); 2147 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, 2148 VecTy, alignment, 0, VL0); 2149 return VecLdCost - ScalarLdCost; 2150 } 2151 case Instruction::Store: { 2152 // We know that we can merge the stores. Calculate the cost. 2153 unsigned alignment = dyn_cast<StoreInst>(VL0)->getAlignment(); 2154 int ScalarStCost = VecTy->getNumElements() * 2155 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, alignment, 0, VL0); 2156 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, 2157 VecTy, alignment, 0, VL0); 2158 return VecStCost - ScalarStCost; 2159 } 2160 case Instruction::Call: { 2161 CallInst *CI = cast<CallInst>(VL0); 2162 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 2163 2164 // Calculate the cost of the scalar and vector calls. 2165 SmallVector<Type*, 4> ScalarTys; 2166 for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) 2167 ScalarTys.push_back(CI->getArgOperand(op)->getType()); 2168 2169 FastMathFlags FMF; 2170 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 2171 FMF = FPMO->getFastMathFlags(); 2172 2173 int ScalarCallCost = VecTy->getNumElements() * 2174 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys, FMF); 2175 2176 SmallVector<Value *, 4> Args(CI->arg_operands()); 2177 int VecCallCost = TTI->getIntrinsicInstrCost(ID, CI->getType(), Args, FMF, 2178 VecTy->getNumElements()); 2179 2180 DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost 2181 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 2182 << " for " << *CI << "\n"); 2183 2184 return VecCallCost - ScalarCallCost; 2185 } 2186 case Instruction::ShuffleVector: { 2187 TargetTransformInfo::OperandValueKind Op1VK = 2188 TargetTransformInfo::OK_AnyValue; 2189 TargetTransformInfo::OperandValueKind Op2VK = 2190 TargetTransformInfo::OK_AnyValue; 2191 int ScalarCost = 0; 2192 int VecCost = 0; 2193 for (Value *i : VL) { 2194 Instruction *I = cast<Instruction>(i); 2195 if (!I) 2196 break; 2197 ScalarCost += 2198 TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK); 2199 } 2200 // VecCost is equal to sum of the cost of creating 2 vectors 2201 // and the cost of creating shuffle. 2202 Instruction *I0 = cast<Instruction>(VL[0]); 2203 VecCost = 2204 TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK); 2205 Instruction *I1 = cast<Instruction>(VL[1]); 2206 VecCost += 2207 TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK); 2208 VecCost += 2209 TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0); 2210 return VecCost - ScalarCost; 2211 } 2212 default: 2213 llvm_unreachable("Unknown instruction"); 2214 } 2215 } 2216 2217 bool BoUpSLP::isFullyVectorizableTinyTree() { 2218 DEBUG(dbgs() << "SLP: Check whether the tree with height " << 2219 VectorizableTree.size() << " is fully vectorizable .\n"); 2220 2221 // We only handle trees of heights 1 and 2. 2222 if (VectorizableTree.size() == 1 && !VectorizableTree[0].NeedToGather) 2223 return true; 2224 2225 if (VectorizableTree.size() != 2) 2226 return false; 2227 2228 // Handle splat and all-constants stores. 2229 if (!VectorizableTree[0].NeedToGather && 2230 (allConstant(VectorizableTree[1].Scalars) || 2231 isSplat(VectorizableTree[1].Scalars))) 2232 return true; 2233 2234 // Gathering cost would be too much for tiny trees. 2235 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 2236 return false; 2237 2238 return true; 2239 } 2240 2241 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() { 2242 // We can vectorize the tree if its size is greater than or equal to the 2243 // minimum size specified by the MinTreeSize command line option. 2244 if (VectorizableTree.size() >= MinTreeSize) 2245 return false; 2246 2247 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 2248 // can vectorize it if we can prove it fully vectorizable. 2249 if (isFullyVectorizableTinyTree()) 2250 return false; 2251 2252 assert(VectorizableTree.empty() 2253 ? ExternalUses.empty() 2254 : true && "We shouldn't have any external users"); 2255 2256 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 2257 // vectorizable. 2258 return true; 2259 } 2260 2261 int BoUpSLP::getSpillCost() { 2262 // Walk from the bottom of the tree to the top, tracking which values are 2263 // live. When we see a call instruction that is not part of our tree, 2264 // query TTI to see if there is a cost to keeping values live over it 2265 // (for example, if spills and fills are required). 2266 unsigned BundleWidth = VectorizableTree.front().Scalars.size(); 2267 int Cost = 0; 2268 2269 SmallPtrSet<Instruction*, 4> LiveValues; 2270 Instruction *PrevInst = nullptr; 2271 2272 for (const auto &N : VectorizableTree) { 2273 Instruction *Inst = dyn_cast<Instruction>(N.Scalars[0]); 2274 if (!Inst) 2275 continue; 2276 2277 if (!PrevInst) { 2278 PrevInst = Inst; 2279 continue; 2280 } 2281 2282 // Update LiveValues. 2283 LiveValues.erase(PrevInst); 2284 for (auto &J : PrevInst->operands()) { 2285 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 2286 LiveValues.insert(cast<Instruction>(&*J)); 2287 } 2288 2289 DEBUG( 2290 dbgs() << "SLP: #LV: " << LiveValues.size(); 2291 for (auto *X : LiveValues) 2292 dbgs() << " " << X->getName(); 2293 dbgs() << ", Looking at "; 2294 Inst->dump(); 2295 ); 2296 2297 // Now find the sequence of instructions between PrevInst and Inst. 2298 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 2299 PrevInstIt = 2300 PrevInst->getIterator().getReverse(); 2301 while (InstIt != PrevInstIt) { 2302 if (PrevInstIt == PrevInst->getParent()->rend()) { 2303 PrevInstIt = Inst->getParent()->rbegin(); 2304 continue; 2305 } 2306 2307 if (isa<CallInst>(&*PrevInstIt) && &*PrevInstIt != PrevInst) { 2308 SmallVector<Type*, 4> V; 2309 for (auto *II : LiveValues) 2310 V.push_back(VectorType::get(II->getType(), BundleWidth)); 2311 Cost += TTI->getCostOfKeepingLiveOverCall(V); 2312 } 2313 2314 ++PrevInstIt; 2315 } 2316 2317 PrevInst = Inst; 2318 } 2319 2320 return Cost; 2321 } 2322 2323 int BoUpSLP::getTreeCost() { 2324 int Cost = 0; 2325 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " << 2326 VectorizableTree.size() << ".\n"); 2327 2328 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 2329 2330 for (TreeEntry &TE : VectorizableTree) { 2331 int C = getEntryCost(&TE); 2332 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with " 2333 << *TE.Scalars[0] << ".\n"); 2334 Cost += C; 2335 } 2336 2337 SmallSet<Value *, 16> ExtractCostCalculated; 2338 int ExtractCost = 0; 2339 for (ExternalUser &EU : ExternalUses) { 2340 // We only add extract cost once for the same scalar. 2341 if (!ExtractCostCalculated.insert(EU.Scalar).second) 2342 continue; 2343 2344 // Uses by ephemeral values are free (because the ephemeral value will be 2345 // removed prior to code generation, and so the extraction will be 2346 // removed as well). 2347 if (EphValues.count(EU.User)) 2348 continue; 2349 2350 // If we plan to rewrite the tree in a smaller type, we will need to sign 2351 // extend the extracted value back to the original type. Here, we account 2352 // for the extract and the added cost of the sign extend if needed. 2353 auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth); 2354 auto *ScalarRoot = VectorizableTree[0].Scalars[0]; 2355 if (MinBWs.count(ScalarRoot)) { 2356 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 2357 auto Extend = 2358 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; 2359 VecTy = VectorType::get(MinTy, BundleWidth); 2360 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 2361 VecTy, EU.Lane); 2362 } else { 2363 ExtractCost += 2364 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); 2365 } 2366 } 2367 2368 int SpillCost = getSpillCost(); 2369 Cost += SpillCost + ExtractCost; 2370 2371 std::string Str; 2372 { 2373 raw_string_ostream OS(Str); 2374 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 2375 << "SLP: Extract Cost = " << ExtractCost << ".\n" 2376 << "SLP: Total Cost = " << Cost << ".\n"; 2377 } 2378 DEBUG(dbgs() << Str); 2379 2380 if (ViewSLPTree) 2381 ViewGraph(this, "SLP" + F->getName(), false, Str); 2382 2383 return Cost; 2384 } 2385 2386 int BoUpSLP::getGatherCost(Type *Ty) { 2387 int Cost = 0; 2388 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 2389 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 2390 return Cost; 2391 } 2392 2393 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) { 2394 // Find the type of the operands in VL. 2395 Type *ScalarTy = VL[0]->getType(); 2396 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 2397 ScalarTy = SI->getValueOperand()->getType(); 2398 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 2399 // Find the cost of inserting/extracting values from the vector. 2400 return getGatherCost(VecTy); 2401 } 2402 2403 // Reorder commutative operations in alternate shuffle if the resulting vectors 2404 // are consecutive loads. This would allow us to vectorize the tree. 2405 // If we have something like- 2406 // load a[0] - load b[0] 2407 // load b[1] + load a[1] 2408 // load a[2] - load b[2] 2409 // load a[3] + load b[3] 2410 // Reordering the second load b[1] load a[1] would allow us to vectorize this 2411 // code. 2412 void BoUpSLP::reorderAltShuffleOperands(unsigned Opcode, ArrayRef<Value *> VL, 2413 SmallVectorImpl<Value *> &Left, 2414 SmallVectorImpl<Value *> &Right) { 2415 // Push left and right operands of binary operation into Left and Right 2416 unsigned AltOpcode = getAltOpcode(Opcode); 2417 (void)AltOpcode; 2418 for (Value *V : VL) { 2419 auto *I = cast<Instruction>(V); 2420 assert(sameOpcodeOrAlt(Opcode, AltOpcode, I->getOpcode()) && 2421 "Incorrect instruction in vector"); 2422 Left.push_back(I->getOperand(0)); 2423 Right.push_back(I->getOperand(1)); 2424 } 2425 2426 // Reorder if we have a commutative operation and consecutive access 2427 // are on either side of the alternate instructions. 2428 for (unsigned j = 0; j < VL.size() - 1; ++j) { 2429 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 2430 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 2431 Instruction *VL1 = cast<Instruction>(VL[j]); 2432 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 2433 if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) { 2434 std::swap(Left[j], Right[j]); 2435 continue; 2436 } else if (VL2->isCommutative() && 2437 isConsecutiveAccess(L, L1, *DL, *SE)) { 2438 std::swap(Left[j + 1], Right[j + 1]); 2439 continue; 2440 } 2441 // else unchanged 2442 } 2443 } 2444 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 2445 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 2446 Instruction *VL1 = cast<Instruction>(VL[j]); 2447 Instruction *VL2 = cast<Instruction>(VL[j + 1]); 2448 if (VL1->isCommutative() && isConsecutiveAccess(L, L1, *DL, *SE)) { 2449 std::swap(Left[j], Right[j]); 2450 continue; 2451 } else if (VL2->isCommutative() && 2452 isConsecutiveAccess(L, L1, *DL, *SE)) { 2453 std::swap(Left[j + 1], Right[j + 1]); 2454 continue; 2455 } 2456 // else unchanged 2457 } 2458 } 2459 } 2460 } 2461 2462 // Return true if I should be commuted before adding it's left and right 2463 // operands to the arrays Left and Right. 2464 // 2465 // The vectorizer is trying to either have all elements one side being 2466 // instruction with the same opcode to enable further vectorization, or having 2467 // a splat to lower the vectorizing cost. 2468 static bool shouldReorderOperands( 2469 int i, unsigned Opcode, Instruction &I, ArrayRef<Value *> Left, 2470 ArrayRef<Value *> Right, bool AllSameOpcodeLeft, bool AllSameOpcodeRight, 2471 bool SplatLeft, bool SplatRight, Value *&VLeft, Value *&VRight) { 2472 VLeft = I.getOperand(0); 2473 VRight = I.getOperand(1); 2474 // If we have "SplatRight", try to see if commuting is needed to preserve it. 2475 if (SplatRight) { 2476 if (VRight == Right[i - 1]) 2477 // Preserve SplatRight 2478 return false; 2479 if (VLeft == Right[i - 1]) { 2480 // Commuting would preserve SplatRight, but we don't want to break 2481 // SplatLeft either, i.e. preserve the original order if possible. 2482 // (FIXME: why do we care?) 2483 if (SplatLeft && VLeft == Left[i - 1]) 2484 return false; 2485 return true; 2486 } 2487 } 2488 // Symmetrically handle Right side. 2489 if (SplatLeft) { 2490 if (VLeft == Left[i - 1]) 2491 // Preserve SplatLeft 2492 return false; 2493 if (VRight == Left[i - 1]) 2494 return true; 2495 } 2496 2497 Instruction *ILeft = dyn_cast<Instruction>(VLeft); 2498 Instruction *IRight = dyn_cast<Instruction>(VRight); 2499 2500 // If we have "AllSameOpcodeRight", try to see if the left operands preserves 2501 // it and not the right, in this case we want to commute. 2502 if (AllSameOpcodeRight) { 2503 unsigned RightPrevOpcode = cast<Instruction>(Right[i - 1])->getOpcode(); 2504 if (IRight && RightPrevOpcode == IRight->getOpcode()) 2505 // Do not commute, a match on the right preserves AllSameOpcodeRight 2506 return false; 2507 if (ILeft && RightPrevOpcode == ILeft->getOpcode()) { 2508 // We have a match and may want to commute, but first check if there is 2509 // not also a match on the existing operands on the Left to preserve 2510 // AllSameOpcodeLeft, i.e. preserve the original order if possible. 2511 // (FIXME: why do we care?) 2512 if (AllSameOpcodeLeft && ILeft && 2513 cast<Instruction>(Left[i - 1])->getOpcode() == ILeft->getOpcode()) 2514 return false; 2515 return true; 2516 } 2517 } 2518 // Symmetrically handle Left side. 2519 if (AllSameOpcodeLeft) { 2520 unsigned LeftPrevOpcode = cast<Instruction>(Left[i - 1])->getOpcode(); 2521 if (ILeft && LeftPrevOpcode == ILeft->getOpcode()) 2522 return false; 2523 if (IRight && LeftPrevOpcode == IRight->getOpcode()) 2524 return true; 2525 } 2526 return false; 2527 } 2528 2529 void BoUpSLP::reorderInputsAccordingToOpcode(unsigned Opcode, 2530 ArrayRef<Value *> VL, 2531 SmallVectorImpl<Value *> &Left, 2532 SmallVectorImpl<Value *> &Right) { 2533 if (!VL.empty()) { 2534 // Peel the first iteration out of the loop since there's nothing 2535 // interesting to do anyway and it simplifies the checks in the loop. 2536 auto *I = cast<Instruction>(VL[0]); 2537 Value *VLeft = I->getOperand(0); 2538 Value *VRight = I->getOperand(1); 2539 if (!isa<Instruction>(VRight) && isa<Instruction>(VLeft)) 2540 // Favor having instruction to the right. FIXME: why? 2541 std::swap(VLeft, VRight); 2542 Left.push_back(VLeft); 2543 Right.push_back(VRight); 2544 } 2545 2546 // Keep track if we have instructions with all the same opcode on one side. 2547 bool AllSameOpcodeLeft = isa<Instruction>(Left[0]); 2548 bool AllSameOpcodeRight = isa<Instruction>(Right[0]); 2549 // Keep track if we have one side with all the same value (broadcast). 2550 bool SplatLeft = true; 2551 bool SplatRight = true; 2552 2553 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 2554 Instruction *I = cast<Instruction>(VL[i]); 2555 assert(((I->getOpcode() == Opcode && I->isCommutative()) || 2556 (I->getOpcode() != Opcode && Instruction::isCommutative(Opcode))) && 2557 "Can only process commutative instruction"); 2558 // Commute to favor either a splat or maximizing having the same opcodes on 2559 // one side. 2560 Value *VLeft; 2561 Value *VRight; 2562 if (shouldReorderOperands(i, Opcode, *I, Left, Right, AllSameOpcodeLeft, 2563 AllSameOpcodeRight, SplatLeft, SplatRight, VLeft, 2564 VRight)) { 2565 Left.push_back(VRight); 2566 Right.push_back(VLeft); 2567 } else { 2568 Left.push_back(VLeft); 2569 Right.push_back(VRight); 2570 } 2571 // Update Splat* and AllSameOpcode* after the insertion. 2572 SplatRight = SplatRight && (Right[i - 1] == Right[i]); 2573 SplatLeft = SplatLeft && (Left[i - 1] == Left[i]); 2574 AllSameOpcodeLeft = AllSameOpcodeLeft && isa<Instruction>(Left[i]) && 2575 (cast<Instruction>(Left[i - 1])->getOpcode() == 2576 cast<Instruction>(Left[i])->getOpcode()); 2577 AllSameOpcodeRight = AllSameOpcodeRight && isa<Instruction>(Right[i]) && 2578 (cast<Instruction>(Right[i - 1])->getOpcode() == 2579 cast<Instruction>(Right[i])->getOpcode()); 2580 } 2581 2582 // If one operand end up being broadcast, return this operand order. 2583 if (SplatRight || SplatLeft) 2584 return; 2585 2586 // Finally check if we can get longer vectorizable chain by reordering 2587 // without breaking the good operand order detected above. 2588 // E.g. If we have something like- 2589 // load a[0] load b[0] 2590 // load b[1] load a[1] 2591 // load a[2] load b[2] 2592 // load a[3] load b[3] 2593 // Reordering the second load b[1] load a[1] would allow us to vectorize 2594 // this code and we still retain AllSameOpcode property. 2595 // FIXME: This load reordering might break AllSameOpcode in some rare cases 2596 // such as- 2597 // add a[0],c[0] load b[0] 2598 // add a[1],c[2] load b[1] 2599 // b[2] load b[2] 2600 // add a[3],c[3] load b[3] 2601 for (unsigned j = 0; j < VL.size() - 1; ++j) { 2602 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) { 2603 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) { 2604 if (isConsecutiveAccess(L, L1, *DL, *SE)) { 2605 std::swap(Left[j + 1], Right[j + 1]); 2606 continue; 2607 } 2608 } 2609 } 2610 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) { 2611 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) { 2612 if (isConsecutiveAccess(L, L1, *DL, *SE)) { 2613 std::swap(Left[j + 1], Right[j + 1]); 2614 continue; 2615 } 2616 } 2617 } 2618 // else unchanged 2619 } 2620 } 2621 2622 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL, Value *OpValue) { 2623 // Get the basic block this bundle is in. All instructions in the bundle 2624 // should be in this block. 2625 auto *Front = cast<Instruction>(OpValue); 2626 auto *BB = Front->getParent(); 2627 const unsigned Opcode = cast<Instruction>(OpValue)->getOpcode(); 2628 const unsigned AltOpcode = getAltOpcode(Opcode); 2629 assert(llvm::all_of(make_range(VL.begin(), VL.end()), [=](Value *V) -> bool { 2630 return !sameOpcodeOrAlt(Opcode, AltOpcode, 2631 cast<Instruction>(V)->getOpcode()) || 2632 cast<Instruction>(V)->getParent() == BB; 2633 })); 2634 2635 // The last instruction in the bundle in program order. 2636 Instruction *LastInst = nullptr; 2637 2638 // Find the last instruction. The common case should be that BB has been 2639 // scheduled, and the last instruction is VL.back(). So we start with 2640 // VL.back() and iterate over schedule data until we reach the end of the 2641 // bundle. The end of the bundle is marked by null ScheduleData. 2642 if (BlocksSchedules.count(BB)) { 2643 auto *Bundle = 2644 BlocksSchedules[BB]->getScheduleData(isOneOf(OpValue, VL.back())); 2645 if (Bundle && Bundle->isPartOfBundle()) 2646 for (; Bundle; Bundle = Bundle->NextInBundle) 2647 if (Bundle->OpValue == Bundle->Inst) 2648 LastInst = Bundle->Inst; 2649 } 2650 2651 // LastInst can still be null at this point if there's either not an entry 2652 // for BB in BlocksSchedules or there's no ScheduleData available for 2653 // VL.back(). This can be the case if buildTree_rec aborts for various 2654 // reasons (e.g., the maximum recursion depth is reached, the maximum region 2655 // size is reached, etc.). ScheduleData is initialized in the scheduling 2656 // "dry-run". 2657 // 2658 // If this happens, we can still find the last instruction by brute force. We 2659 // iterate forwards from Front (inclusive) until we either see all 2660 // instructions in the bundle or reach the end of the block. If Front is the 2661 // last instruction in program order, LastInst will be set to Front, and we 2662 // will visit all the remaining instructions in the block. 2663 // 2664 // One of the reasons we exit early from buildTree_rec is to place an upper 2665 // bound on compile-time. Thus, taking an additional compile-time hit here is 2666 // not ideal. However, this should be exceedingly rare since it requires that 2667 // we both exit early from buildTree_rec and that the bundle be out-of-order 2668 // (causing us to iterate all the way to the end of the block). 2669 if (!LastInst) { 2670 SmallPtrSet<Value *, 16> Bundle(VL.begin(), VL.end()); 2671 for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) { 2672 if (Bundle.erase(&I) && sameOpcodeOrAlt(Opcode, AltOpcode, I.getOpcode())) 2673 LastInst = &I; 2674 if (Bundle.empty()) 2675 break; 2676 } 2677 } 2678 2679 // Set the insertion point after the last instruction in the bundle. Set the 2680 // debug location to Front. 2681 Builder.SetInsertPoint(BB, ++LastInst->getIterator()); 2682 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 2683 } 2684 2685 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 2686 Value *Vec = UndefValue::get(Ty); 2687 // Generate the 'InsertElement' instruction. 2688 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 2689 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 2690 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 2691 GatherSeq.insert(Insrt); 2692 CSEBlocks.insert(Insrt->getParent()); 2693 2694 // Add to our 'need-to-extract' list. 2695 if (TreeEntry *E = getTreeEntry(VL[i])) { 2696 // Find which lane we need to extract. 2697 int FoundLane = -1; 2698 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) { 2699 // Is this the lane of the scalar that we are looking for ? 2700 if (E->Scalars[Lane] == VL[i]) { 2701 FoundLane = Lane; 2702 break; 2703 } 2704 } 2705 assert(FoundLane >= 0 && "Could not find the correct lane"); 2706 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 2707 } 2708 } 2709 } 2710 2711 return Vec; 2712 } 2713 2714 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL, Value *OpValue) const { 2715 if (const TreeEntry *En = getTreeEntry(OpValue)) { 2716 if (En->isSame(VL) && En->VectorizedValue) 2717 return En->VectorizedValue; 2718 } 2719 return nullptr; 2720 } 2721 2722 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 2723 InstructionsState S = getSameOpcode(VL); 2724 if (S.Opcode) { 2725 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 2726 if (E->isSame(VL)) 2727 return vectorizeTree(E); 2728 } 2729 } 2730 2731 Type *ScalarTy = S.OpValue->getType(); 2732 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 2733 ScalarTy = SI->getValueOperand()->getType(); 2734 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 2735 2736 return Gather(VL, VecTy); 2737 } 2738 2739 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 2740 IRBuilder<>::InsertPointGuard Guard(Builder); 2741 2742 if (E->VectorizedValue) { 2743 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 2744 return E->VectorizedValue; 2745 } 2746 2747 InstructionsState S = getSameOpcode(E->Scalars); 2748 Instruction *VL0 = cast<Instruction>(E->Scalars[0]); 2749 Type *ScalarTy = VL0->getType(); 2750 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 2751 ScalarTy = SI->getValueOperand()->getType(); 2752 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 2753 2754 if (E->NeedToGather) { 2755 setInsertPointAfterBundle(E->Scalars, VL0); 2756 auto *V = Gather(E->Scalars, VecTy); 2757 E->VectorizedValue = V; 2758 return V; 2759 } 2760 2761 unsigned ShuffleOrOp = S.IsAltShuffle ? 2762 (unsigned) Instruction::ShuffleVector : S.Opcode; 2763 switch (ShuffleOrOp) { 2764 case Instruction::PHI: { 2765 PHINode *PH = dyn_cast<PHINode>(VL0); 2766 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 2767 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2768 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 2769 E->VectorizedValue = NewPhi; 2770 2771 // PHINodes may have multiple entries from the same block. We want to 2772 // visit every block once. 2773 SmallSet<BasicBlock*, 4> VisitedBBs; 2774 2775 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 2776 ValueList Operands; 2777 BasicBlock *IBB = PH->getIncomingBlock(i); 2778 2779 if (!VisitedBBs.insert(IBB).second) { 2780 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 2781 continue; 2782 } 2783 2784 // Prepare the operand vector. 2785 for (Value *V : E->Scalars) 2786 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(IBB)); 2787 2788 Builder.SetInsertPoint(IBB->getTerminator()); 2789 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 2790 Value *Vec = vectorizeTree(Operands); 2791 NewPhi->addIncoming(Vec, IBB); 2792 } 2793 2794 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 2795 "Invalid number of incoming values"); 2796 return NewPhi; 2797 } 2798 2799 case Instruction::ExtractElement: { 2800 if (canReuseExtract(E->Scalars, VL0)) { 2801 Value *V = VL0->getOperand(0); 2802 E->VectorizedValue = V; 2803 return V; 2804 } 2805 setInsertPointAfterBundle(E->Scalars, VL0); 2806 auto *V = Gather(E->Scalars, VecTy); 2807 E->VectorizedValue = V; 2808 return V; 2809 } 2810 case Instruction::ExtractValue: { 2811 if (canReuseExtract(E->Scalars, VL0)) { 2812 LoadInst *LI = cast<LoadInst>(VL0->getOperand(0)); 2813 Builder.SetInsertPoint(LI); 2814 PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); 2815 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); 2816 LoadInst *V = Builder.CreateAlignedLoad(Ptr, LI->getAlignment()); 2817 E->VectorizedValue = V; 2818 return propagateMetadata(V, E->Scalars); 2819 } 2820 setInsertPointAfterBundle(E->Scalars, VL0); 2821 auto *V = Gather(E->Scalars, VecTy); 2822 E->VectorizedValue = V; 2823 return V; 2824 } 2825 case Instruction::ZExt: 2826 case Instruction::SExt: 2827 case Instruction::FPToUI: 2828 case Instruction::FPToSI: 2829 case Instruction::FPExt: 2830 case Instruction::PtrToInt: 2831 case Instruction::IntToPtr: 2832 case Instruction::SIToFP: 2833 case Instruction::UIToFP: 2834 case Instruction::Trunc: 2835 case Instruction::FPTrunc: 2836 case Instruction::BitCast: { 2837 ValueList INVL; 2838 for (Value *V : E->Scalars) 2839 INVL.push_back(cast<Instruction>(V)->getOperand(0)); 2840 2841 setInsertPointAfterBundle(E->Scalars, VL0); 2842 2843 Value *InVec = vectorizeTree(INVL); 2844 2845 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2846 return V; 2847 2848 CastInst *CI = dyn_cast<CastInst>(VL0); 2849 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 2850 E->VectorizedValue = V; 2851 ++NumVectorInstructions; 2852 return V; 2853 } 2854 case Instruction::FCmp: 2855 case Instruction::ICmp: { 2856 ValueList LHSV, RHSV; 2857 for (Value *V : E->Scalars) { 2858 LHSV.push_back(cast<Instruction>(V)->getOperand(0)); 2859 RHSV.push_back(cast<Instruction>(V)->getOperand(1)); 2860 } 2861 2862 setInsertPointAfterBundle(E->Scalars, VL0); 2863 2864 Value *L = vectorizeTree(LHSV); 2865 Value *R = vectorizeTree(RHSV); 2866 2867 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2868 return V; 2869 2870 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 2871 Value *V; 2872 if (S.Opcode == Instruction::FCmp) 2873 V = Builder.CreateFCmp(P0, L, R); 2874 else 2875 V = Builder.CreateICmp(P0, L, R); 2876 2877 E->VectorizedValue = V; 2878 propagateIRFlags(E->VectorizedValue, E->Scalars, VL0); 2879 ++NumVectorInstructions; 2880 return V; 2881 } 2882 case Instruction::Select: { 2883 ValueList TrueVec, FalseVec, CondVec; 2884 for (Value *V : E->Scalars) { 2885 CondVec.push_back(cast<Instruction>(V)->getOperand(0)); 2886 TrueVec.push_back(cast<Instruction>(V)->getOperand(1)); 2887 FalseVec.push_back(cast<Instruction>(V)->getOperand(2)); 2888 } 2889 2890 setInsertPointAfterBundle(E->Scalars, VL0); 2891 2892 Value *Cond = vectorizeTree(CondVec); 2893 Value *True = vectorizeTree(TrueVec); 2894 Value *False = vectorizeTree(FalseVec); 2895 2896 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2897 return V; 2898 2899 Value *V = Builder.CreateSelect(Cond, True, False); 2900 E->VectorizedValue = V; 2901 ++NumVectorInstructions; 2902 return V; 2903 } 2904 case Instruction::Add: 2905 case Instruction::FAdd: 2906 case Instruction::Sub: 2907 case Instruction::FSub: 2908 case Instruction::Mul: 2909 case Instruction::FMul: 2910 case Instruction::UDiv: 2911 case Instruction::SDiv: 2912 case Instruction::FDiv: 2913 case Instruction::URem: 2914 case Instruction::SRem: 2915 case Instruction::FRem: 2916 case Instruction::Shl: 2917 case Instruction::LShr: 2918 case Instruction::AShr: 2919 case Instruction::And: 2920 case Instruction::Or: 2921 case Instruction::Xor: { 2922 ValueList LHSVL, RHSVL; 2923 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) 2924 reorderInputsAccordingToOpcode(S.Opcode, E->Scalars, LHSVL, 2925 RHSVL); 2926 else 2927 for (Value *V : E->Scalars) { 2928 auto *I = cast<Instruction>(V); 2929 LHSVL.push_back(I->getOperand(0)); 2930 RHSVL.push_back(I->getOperand(1)); 2931 } 2932 2933 setInsertPointAfterBundle(E->Scalars, VL0); 2934 2935 Value *LHS = vectorizeTree(LHSVL); 2936 Value *RHS = vectorizeTree(RHSVL); 2937 2938 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 2939 return V; 2940 2941 Value *V = Builder.CreateBinOp( 2942 static_cast<Instruction::BinaryOps>(S.Opcode), LHS, RHS); 2943 E->VectorizedValue = V; 2944 propagateIRFlags(E->VectorizedValue, E->Scalars, VL0); 2945 ++NumVectorInstructions; 2946 2947 if (Instruction *I = dyn_cast<Instruction>(V)) 2948 return propagateMetadata(I, E->Scalars); 2949 2950 return V; 2951 } 2952 case Instruction::Load: { 2953 // Loads are inserted at the head of the tree because we don't want to 2954 // sink them all the way down past store instructions. 2955 setInsertPointAfterBundle(E->Scalars, VL0); 2956 2957 LoadInst *LI = cast<LoadInst>(VL0); 2958 Type *ScalarLoadTy = LI->getType(); 2959 unsigned AS = LI->getPointerAddressSpace(); 2960 2961 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 2962 VecTy->getPointerTo(AS)); 2963 2964 // The pointer operand uses an in-tree scalar so we add the new BitCast to 2965 // ExternalUses list to make sure that an extract will be generated in the 2966 // future. 2967 Value *PO = LI->getPointerOperand(); 2968 if (getTreeEntry(PO)) 2969 ExternalUses.push_back(ExternalUser(PO, cast<User>(VecPtr), 0)); 2970 2971 unsigned Alignment = LI->getAlignment(); 2972 LI = Builder.CreateLoad(VecPtr); 2973 if (!Alignment) { 2974 Alignment = DL->getABITypeAlignment(ScalarLoadTy); 2975 } 2976 LI->setAlignment(Alignment); 2977 E->VectorizedValue = LI; 2978 ++NumVectorInstructions; 2979 return propagateMetadata(LI, E->Scalars); 2980 } 2981 case Instruction::Store: { 2982 StoreInst *SI = cast<StoreInst>(VL0); 2983 unsigned Alignment = SI->getAlignment(); 2984 unsigned AS = SI->getPointerAddressSpace(); 2985 2986 ValueList ScalarStoreValues; 2987 for (Value *V : E->Scalars) 2988 ScalarStoreValues.push_back(cast<StoreInst>(V)->getValueOperand()); 2989 2990 setInsertPointAfterBundle(E->Scalars, VL0); 2991 2992 Value *VecValue = vectorizeTree(ScalarStoreValues); 2993 Value *ScalarPtr = SI->getPointerOperand(); 2994 Value *VecPtr = Builder.CreateBitCast(ScalarPtr, VecTy->getPointerTo(AS)); 2995 StoreInst *S = Builder.CreateStore(VecValue, VecPtr); 2996 2997 // The pointer operand uses an in-tree scalar, so add the new BitCast to 2998 // ExternalUses to make sure that an extract will be generated in the 2999 // future. 3000 if (getTreeEntry(ScalarPtr)) 3001 ExternalUses.push_back(ExternalUser(ScalarPtr, cast<User>(VecPtr), 0)); 3002 3003 if (!Alignment) 3004 Alignment = DL->getABITypeAlignment(SI->getValueOperand()->getType()); 3005 3006 S->setAlignment(Alignment); 3007 E->VectorizedValue = S; 3008 ++NumVectorInstructions; 3009 return propagateMetadata(S, E->Scalars); 3010 } 3011 case Instruction::GetElementPtr: { 3012 setInsertPointAfterBundle(E->Scalars, VL0); 3013 3014 ValueList Op0VL; 3015 for (Value *V : E->Scalars) 3016 Op0VL.push_back(cast<GetElementPtrInst>(V)->getOperand(0)); 3017 3018 Value *Op0 = vectorizeTree(Op0VL); 3019 3020 std::vector<Value *> OpVecs; 3021 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 3022 ++j) { 3023 ValueList OpVL; 3024 for (Value *V : E->Scalars) 3025 OpVL.push_back(cast<GetElementPtrInst>(V)->getOperand(j)); 3026 3027 Value *OpVec = vectorizeTree(OpVL); 3028 OpVecs.push_back(OpVec); 3029 } 3030 3031 Value *V = Builder.CreateGEP( 3032 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs); 3033 E->VectorizedValue = V; 3034 ++NumVectorInstructions; 3035 3036 if (Instruction *I = dyn_cast<Instruction>(V)) 3037 return propagateMetadata(I, E->Scalars); 3038 3039 return V; 3040 } 3041 case Instruction::Call: { 3042 CallInst *CI = cast<CallInst>(VL0); 3043 setInsertPointAfterBundle(E->Scalars, VL0); 3044 Function *FI; 3045 Intrinsic::ID IID = Intrinsic::not_intrinsic; 3046 Value *ScalarArg = nullptr; 3047 if (CI && (FI = CI->getCalledFunction())) { 3048 IID = FI->getIntrinsicID(); 3049 } 3050 std::vector<Value *> OpVecs; 3051 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 3052 ValueList OpVL; 3053 // ctlz,cttz and powi are special intrinsics whose second argument is 3054 // a scalar. This argument should not be vectorized. 3055 if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) { 3056 CallInst *CEI = cast<CallInst>(VL0); 3057 ScalarArg = CEI->getArgOperand(j); 3058 OpVecs.push_back(CEI->getArgOperand(j)); 3059 continue; 3060 } 3061 for (Value *V : E->Scalars) { 3062 CallInst *CEI = cast<CallInst>(V); 3063 OpVL.push_back(CEI->getArgOperand(j)); 3064 } 3065 3066 Value *OpVec = vectorizeTree(OpVL); 3067 DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 3068 OpVecs.push_back(OpVec); 3069 } 3070 3071 Module *M = F->getParent(); 3072 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3073 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; 3074 Function *CF = Intrinsic::getDeclaration(M, ID, Tys); 3075 SmallVector<OperandBundleDef, 1> OpBundles; 3076 CI->getOperandBundlesAsDefs(OpBundles); 3077 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 3078 3079 // The scalar argument uses an in-tree scalar so we add the new vectorized 3080 // call to ExternalUses list to make sure that an extract will be 3081 // generated in the future. 3082 if (ScalarArg && getTreeEntry(ScalarArg)) 3083 ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0)); 3084 3085 E->VectorizedValue = V; 3086 propagateIRFlags(E->VectorizedValue, E->Scalars, VL0); 3087 ++NumVectorInstructions; 3088 return V; 3089 } 3090 case Instruction::ShuffleVector: { 3091 ValueList LHSVL, RHSVL; 3092 assert(Instruction::isBinaryOp(S.Opcode) && 3093 "Invalid Shuffle Vector Operand"); 3094 reorderAltShuffleOperands(S.Opcode, E->Scalars, LHSVL, RHSVL); 3095 setInsertPointAfterBundle(E->Scalars, VL0); 3096 3097 Value *LHS = vectorizeTree(LHSVL); 3098 Value *RHS = vectorizeTree(RHSVL); 3099 3100 if (Value *V = alreadyVectorized(E->Scalars, VL0)) 3101 return V; 3102 3103 // Create a vector of LHS op1 RHS 3104 Value *V0 = Builder.CreateBinOp( 3105 static_cast<Instruction::BinaryOps>(S.Opcode), LHS, RHS); 3106 3107 unsigned AltOpcode = getAltOpcode(S.Opcode); 3108 // Create a vector of LHS op2 RHS 3109 Value *V1 = Builder.CreateBinOp( 3110 static_cast<Instruction::BinaryOps>(AltOpcode), LHS, RHS); 3111 3112 // Create shuffle to take alternate operations from the vector. 3113 // Also, gather up odd and even scalar ops to propagate IR flags to 3114 // each vector operation. 3115 ValueList OddScalars, EvenScalars; 3116 unsigned e = E->Scalars.size(); 3117 SmallVector<Constant *, 8> Mask(e); 3118 for (unsigned i = 0; i < e; ++i) { 3119 if (isOdd(i)) { 3120 Mask[i] = Builder.getInt32(e + i); 3121 OddScalars.push_back(E->Scalars[i]); 3122 } else { 3123 Mask[i] = Builder.getInt32(i); 3124 EvenScalars.push_back(E->Scalars[i]); 3125 } 3126 } 3127 3128 Value *ShuffleMask = ConstantVector::get(Mask); 3129 propagateIRFlags(V0, EvenScalars); 3130 propagateIRFlags(V1, OddScalars); 3131 3132 Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 3133 E->VectorizedValue = V; 3134 ++NumVectorInstructions; 3135 if (Instruction *I = dyn_cast<Instruction>(V)) 3136 return propagateMetadata(I, E->Scalars); 3137 3138 return V; 3139 } 3140 default: 3141 llvm_unreachable("unknown inst"); 3142 } 3143 return nullptr; 3144 } 3145 3146 Value *BoUpSLP::vectorizeTree() { 3147 ExtraValueToDebugLocsMap ExternallyUsedValues; 3148 return vectorizeTree(ExternallyUsedValues); 3149 } 3150 3151 Value * 3152 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { 3153 // All blocks must be scheduled before any instructions are inserted. 3154 for (auto &BSIter : BlocksSchedules) { 3155 scheduleBlock(BSIter.second.get()); 3156 } 3157 3158 Builder.SetInsertPoint(&F->getEntryBlock().front()); 3159 auto *VectorRoot = vectorizeTree(&VectorizableTree[0]); 3160 3161 // If the vectorized tree can be rewritten in a smaller type, we truncate the 3162 // vectorized root. InstCombine will then rewrite the entire expression. We 3163 // sign extend the extracted values below. 3164 auto *ScalarRoot = VectorizableTree[0].Scalars[0]; 3165 if (MinBWs.count(ScalarRoot)) { 3166 if (auto *I = dyn_cast<Instruction>(VectorRoot)) 3167 Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); 3168 auto BundleWidth = VectorizableTree[0].Scalars.size(); 3169 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 3170 auto *VecTy = VectorType::get(MinTy, BundleWidth); 3171 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); 3172 VectorizableTree[0].VectorizedValue = Trunc; 3173 } 3174 3175 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n"); 3176 3177 // If necessary, sign-extend or zero-extend ScalarRoot to the larger type 3178 // specified by ScalarType. 3179 auto extend = [&](Value *ScalarRoot, Value *Ex, Type *ScalarType) { 3180 if (!MinBWs.count(ScalarRoot)) 3181 return Ex; 3182 if (MinBWs[ScalarRoot].second) 3183 return Builder.CreateSExt(Ex, ScalarType); 3184 return Builder.CreateZExt(Ex, ScalarType); 3185 }; 3186 3187 // Extract all of the elements with the external uses. 3188 for (const auto &ExternalUse : ExternalUses) { 3189 Value *Scalar = ExternalUse.Scalar; 3190 llvm::User *User = ExternalUse.User; 3191 3192 // Skip users that we already RAUW. This happens when one instruction 3193 // has multiple uses of the same value. 3194 if (User && !is_contained(Scalar->users(), User)) 3195 continue; 3196 TreeEntry *E = getTreeEntry(Scalar); 3197 assert(E && "Invalid scalar"); 3198 assert(!E->NeedToGather && "Extracting from a gather list"); 3199 3200 Value *Vec = E->VectorizedValue; 3201 assert(Vec && "Can't find vectorizable value"); 3202 3203 Value *Lane = Builder.getInt32(ExternalUse.Lane); 3204 // If User == nullptr, the Scalar is used as extra arg. Generate 3205 // ExtractElement instruction and update the record for this scalar in 3206 // ExternallyUsedValues. 3207 if (!User) { 3208 assert(ExternallyUsedValues.count(Scalar) && 3209 "Scalar with nullptr as an external user must be registered in " 3210 "ExternallyUsedValues map"); 3211 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 3212 Builder.SetInsertPoint(VecI->getParent(), 3213 std::next(VecI->getIterator())); 3214 } else { 3215 Builder.SetInsertPoint(&F->getEntryBlock().front()); 3216 } 3217 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 3218 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 3219 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent()); 3220 auto &Locs = ExternallyUsedValues[Scalar]; 3221 ExternallyUsedValues.insert({Ex, Locs}); 3222 ExternallyUsedValues.erase(Scalar); 3223 continue; 3224 } 3225 3226 // Generate extracts for out-of-tree users. 3227 // Find the insertion point for the extractelement lane. 3228 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 3229 if (PHINode *PH = dyn_cast<PHINode>(User)) { 3230 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 3231 if (PH->getIncomingValue(i) == Scalar) { 3232 TerminatorInst *IncomingTerminator = 3233 PH->getIncomingBlock(i)->getTerminator(); 3234 if (isa<CatchSwitchInst>(IncomingTerminator)) { 3235 Builder.SetInsertPoint(VecI->getParent(), 3236 std::next(VecI->getIterator())); 3237 } else { 3238 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 3239 } 3240 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 3241 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 3242 CSEBlocks.insert(PH->getIncomingBlock(i)); 3243 PH->setOperand(i, Ex); 3244 } 3245 } 3246 } else { 3247 Builder.SetInsertPoint(cast<Instruction>(User)); 3248 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 3249 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 3250 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 3251 User->replaceUsesOfWith(Scalar, Ex); 3252 } 3253 } else { 3254 Builder.SetInsertPoint(&F->getEntryBlock().front()); 3255 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 3256 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 3257 CSEBlocks.insert(&F->getEntryBlock()); 3258 User->replaceUsesOfWith(Scalar, Ex); 3259 } 3260 3261 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 3262 } 3263 3264 // For each vectorized value: 3265 for (TreeEntry &EIdx : VectorizableTree) { 3266 TreeEntry *Entry = &EIdx; 3267 3268 // No need to handle users of gathered values. 3269 if (Entry->NeedToGather) 3270 continue; 3271 3272 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 3273 3274 // For each lane: 3275 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 3276 Value *Scalar = Entry->Scalars[Lane]; 3277 3278 Type *Ty = Scalar->getType(); 3279 if (!Ty->isVoidTy()) { 3280 #ifndef NDEBUG 3281 for (User *U : Scalar->users()) { 3282 DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 3283 3284 // It is legal to replace users in the ignorelist by undef. 3285 assert((getTreeEntry(U) || is_contained(UserIgnoreList, U)) && 3286 "Replacing out-of-tree value with undef"); 3287 } 3288 #endif 3289 Value *Undef = UndefValue::get(Ty); 3290 Scalar->replaceAllUsesWith(Undef); 3291 } 3292 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 3293 eraseInstruction(cast<Instruction>(Scalar)); 3294 } 3295 } 3296 3297 Builder.ClearInsertionPoint(); 3298 3299 return VectorizableTree[0].VectorizedValue; 3300 } 3301 3302 void BoUpSLP::optimizeGatherSequence() { 3303 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 3304 << " gather sequences instructions.\n"); 3305 // LICM InsertElementInst sequences. 3306 for (Instruction *it : GatherSeq) { 3307 InsertElementInst *Insert = dyn_cast<InsertElementInst>(it); 3308 3309 if (!Insert) 3310 continue; 3311 3312 // Check if this block is inside a loop. 3313 Loop *L = LI->getLoopFor(Insert->getParent()); 3314 if (!L) 3315 continue; 3316 3317 // Check if it has a preheader. 3318 BasicBlock *PreHeader = L->getLoopPreheader(); 3319 if (!PreHeader) 3320 continue; 3321 3322 // If the vector or the element that we insert into it are 3323 // instructions that are defined in this basic block then we can't 3324 // hoist this instruction. 3325 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0)); 3326 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1)); 3327 if (CurrVec && L->contains(CurrVec)) 3328 continue; 3329 if (NewElem && L->contains(NewElem)) 3330 continue; 3331 3332 // We can hoist this instruction. Move it to the pre-header. 3333 Insert->moveBefore(PreHeader->getTerminator()); 3334 } 3335 3336 // Make a list of all reachable blocks in our CSE queue. 3337 SmallVector<const DomTreeNode *, 8> CSEWorkList; 3338 CSEWorkList.reserve(CSEBlocks.size()); 3339 for (BasicBlock *BB : CSEBlocks) 3340 if (DomTreeNode *N = DT->getNode(BB)) { 3341 assert(DT->isReachableFromEntry(N)); 3342 CSEWorkList.push_back(N); 3343 } 3344 3345 // Sort blocks by domination. This ensures we visit a block after all blocks 3346 // dominating it are visited. 3347 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(), 3348 [this](const DomTreeNode *A, const DomTreeNode *B) { 3349 return DT->properlyDominates(A, B); 3350 }); 3351 3352 // Perform O(N^2) search over the gather sequences and merge identical 3353 // instructions. TODO: We can further optimize this scan if we split the 3354 // instructions into different buckets based on the insert lane. 3355 SmallVector<Instruction *, 16> Visited; 3356 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 3357 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 3358 "Worklist not sorted properly!"); 3359 BasicBlock *BB = (*I)->getBlock(); 3360 // For all instructions in blocks containing gather sequences: 3361 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 3362 Instruction *In = &*it++; 3363 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 3364 continue; 3365 3366 // Check if we can replace this instruction with any of the 3367 // visited instructions. 3368 for (Instruction *v : Visited) { 3369 if (In->isIdenticalTo(v) && 3370 DT->dominates(v->getParent(), In->getParent())) { 3371 In->replaceAllUsesWith(v); 3372 eraseInstruction(In); 3373 In = nullptr; 3374 break; 3375 } 3376 } 3377 if (In) { 3378 assert(!is_contained(Visited, In)); 3379 Visited.push_back(In); 3380 } 3381 } 3382 } 3383 CSEBlocks.clear(); 3384 GatherSeq.clear(); 3385 } 3386 3387 // Groups the instructions to a bundle (which is then a single scheduling entity) 3388 // and schedules instructions until the bundle gets ready. 3389 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, 3390 BoUpSLP *SLP, Value *OpValue) { 3391 if (isa<PHINode>(OpValue)) 3392 return true; 3393 3394 // Initialize the instruction bundle. 3395 Instruction *OldScheduleEnd = ScheduleEnd; 3396 ScheduleData *PrevInBundle = nullptr; 3397 ScheduleData *Bundle = nullptr; 3398 bool ReSchedule = false; 3399 DEBUG(dbgs() << "SLP: bundle: " << *OpValue << "\n"); 3400 3401 // Make sure that the scheduling region contains all 3402 // instructions of the bundle. 3403 for (Value *V : VL) { 3404 if (!extendSchedulingRegion(V, OpValue)) 3405 return false; 3406 } 3407 3408 for (Value *V : VL) { 3409 ScheduleData *BundleMember = getScheduleData(V); 3410 assert(BundleMember && 3411 "no ScheduleData for bundle member (maybe not in same basic block)"); 3412 if (BundleMember->IsScheduled) { 3413 // A bundle member was scheduled as single instruction before and now 3414 // needs to be scheduled as part of the bundle. We just get rid of the 3415 // existing schedule. 3416 DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 3417 << " was already scheduled\n"); 3418 ReSchedule = true; 3419 } 3420 assert(BundleMember->isSchedulingEntity() && 3421 "bundle member already part of other bundle"); 3422 if (PrevInBundle) { 3423 PrevInBundle->NextInBundle = BundleMember; 3424 } else { 3425 Bundle = BundleMember; 3426 } 3427 BundleMember->UnscheduledDepsInBundle = 0; 3428 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 3429 3430 // Group the instructions to a bundle. 3431 BundleMember->FirstInBundle = Bundle; 3432 PrevInBundle = BundleMember; 3433 } 3434 if (ScheduleEnd != OldScheduleEnd) { 3435 // The scheduling region got new instructions at the lower end (or it is a 3436 // new region for the first bundle). This makes it necessary to 3437 // recalculate all dependencies. 3438 // It is seldom that this needs to be done a second time after adding the 3439 // initial bundle to the region. 3440 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3441 doForAllOpcodes(I, [](ScheduleData *SD) { 3442 SD->clearDependencies(); 3443 }); 3444 } 3445 ReSchedule = true; 3446 } 3447 if (ReSchedule) { 3448 resetSchedule(); 3449 initialFillReadyList(ReadyInsts); 3450 } 3451 3452 DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " 3453 << BB->getName() << "\n"); 3454 3455 calculateDependencies(Bundle, true, SLP); 3456 3457 // Now try to schedule the new bundle. As soon as the bundle is "ready" it 3458 // means that there are no cyclic dependencies and we can schedule it. 3459 // Note that's important that we don't "schedule" the bundle yet (see 3460 // cancelScheduling). 3461 while (!Bundle->isReady() && !ReadyInsts.empty()) { 3462 3463 ScheduleData *pickedSD = ReadyInsts.back(); 3464 ReadyInsts.pop_back(); 3465 3466 if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) { 3467 schedule(pickedSD, ReadyInsts); 3468 } 3469 } 3470 if (!Bundle->isReady()) { 3471 cancelScheduling(VL, OpValue); 3472 return false; 3473 } 3474 return true; 3475 } 3476 3477 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 3478 Value *OpValue) { 3479 if (isa<PHINode>(OpValue)) 3480 return; 3481 3482 ScheduleData *Bundle = getScheduleData(OpValue); 3483 DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 3484 assert(!Bundle->IsScheduled && 3485 "Can't cancel bundle which is already scheduled"); 3486 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 3487 "tried to unbundle something which is not a bundle"); 3488 3489 // Un-bundle: make single instructions out of the bundle. 3490 ScheduleData *BundleMember = Bundle; 3491 while (BundleMember) { 3492 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 3493 BundleMember->FirstInBundle = BundleMember; 3494 ScheduleData *Next = BundleMember->NextInBundle; 3495 BundleMember->NextInBundle = nullptr; 3496 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 3497 if (BundleMember->UnscheduledDepsInBundle == 0) { 3498 ReadyInsts.insert(BundleMember); 3499 } 3500 BundleMember = Next; 3501 } 3502 } 3503 3504 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 3505 // Allocate a new ScheduleData for the instruction. 3506 if (ChunkPos >= ChunkSize) { 3507 ScheduleDataChunks.push_back(llvm::make_unique<ScheduleData[]>(ChunkSize)); 3508 ChunkPos = 0; 3509 } 3510 return &(ScheduleDataChunks.back()[ChunkPos++]); 3511 } 3512 3513 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 3514 Value *OpValue) { 3515 if (getScheduleData(V, isOneOf(OpValue, V))) 3516 return true; 3517 Instruction *I = dyn_cast<Instruction>(V); 3518 assert(I && "bundle member must be an instruction"); 3519 assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled"); 3520 auto &&CheckSheduleForI = [this, OpValue](Instruction *I) -> bool { 3521 ScheduleData *ISD = getScheduleData(I); 3522 if (!ISD) 3523 return false; 3524 assert(isInSchedulingRegion(ISD) && 3525 "ScheduleData not in scheduling region"); 3526 ScheduleData *SD = allocateScheduleDataChunks(); 3527 SD->Inst = I; 3528 SD->init(SchedulingRegionID, OpValue); 3529 ExtraScheduleDataMap[I][OpValue] = SD; 3530 return true; 3531 }; 3532 if (CheckSheduleForI(I)) 3533 return true; 3534 if (!ScheduleStart) { 3535 // It's the first instruction in the new region. 3536 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 3537 ScheduleStart = I; 3538 ScheduleEnd = I->getNextNode(); 3539 if (isOneOf(OpValue, I) != I) 3540 CheckSheduleForI(I); 3541 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 3542 DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 3543 return true; 3544 } 3545 // Search up and down at the same time, because we don't know if the new 3546 // instruction is above or below the existing scheduling region. 3547 BasicBlock::reverse_iterator UpIter = 3548 ++ScheduleStart->getIterator().getReverse(); 3549 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 3550 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 3551 BasicBlock::iterator LowerEnd = BB->end(); 3552 while (true) { 3553 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 3554 DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 3555 return false; 3556 } 3557 3558 if (UpIter != UpperEnd) { 3559 if (&*UpIter == I) { 3560 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 3561 ScheduleStart = I; 3562 if (isOneOf(OpValue, I) != I) 3563 CheckSheduleForI(I); 3564 DEBUG(dbgs() << "SLP: extend schedule region start to " << *I << "\n"); 3565 return true; 3566 } 3567 UpIter++; 3568 } 3569 if (DownIter != LowerEnd) { 3570 if (&*DownIter == I) { 3571 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 3572 nullptr); 3573 ScheduleEnd = I->getNextNode(); 3574 if (isOneOf(OpValue, I) != I) 3575 CheckSheduleForI(I); 3576 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?"); 3577 DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 3578 return true; 3579 } 3580 DownIter++; 3581 } 3582 assert((UpIter != UpperEnd || DownIter != LowerEnd) && 3583 "instruction not found in block"); 3584 } 3585 return true; 3586 } 3587 3588 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 3589 Instruction *ToI, 3590 ScheduleData *PrevLoadStore, 3591 ScheduleData *NextLoadStore) { 3592 ScheduleData *CurrentLoadStore = PrevLoadStore; 3593 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 3594 ScheduleData *SD = ScheduleDataMap[I]; 3595 if (!SD) { 3596 SD = allocateScheduleDataChunks(); 3597 ScheduleDataMap[I] = SD; 3598 SD->Inst = I; 3599 } 3600 assert(!isInSchedulingRegion(SD) && 3601 "new ScheduleData already in scheduling region"); 3602 SD->init(SchedulingRegionID, I); 3603 3604 if (I->mayReadOrWriteMemory()) { 3605 // Update the linked list of memory accessing instructions. 3606 if (CurrentLoadStore) { 3607 CurrentLoadStore->NextLoadStore = SD; 3608 } else { 3609 FirstLoadStoreInRegion = SD; 3610 } 3611 CurrentLoadStore = SD; 3612 } 3613 } 3614 if (NextLoadStore) { 3615 if (CurrentLoadStore) 3616 CurrentLoadStore->NextLoadStore = NextLoadStore; 3617 } else { 3618 LastLoadStoreInRegion = CurrentLoadStore; 3619 } 3620 } 3621 3622 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 3623 bool InsertInReadyList, 3624 BoUpSLP *SLP) { 3625 assert(SD->isSchedulingEntity()); 3626 3627 SmallVector<ScheduleData *, 10> WorkList; 3628 WorkList.push_back(SD); 3629 3630 while (!WorkList.empty()) { 3631 ScheduleData *SD = WorkList.back(); 3632 WorkList.pop_back(); 3633 3634 ScheduleData *BundleMember = SD; 3635 while (BundleMember) { 3636 assert(isInSchedulingRegion(BundleMember)); 3637 if (!BundleMember->hasValidDependencies()) { 3638 3639 DEBUG(dbgs() << "SLP: update deps of " << *BundleMember << "\n"); 3640 BundleMember->Dependencies = 0; 3641 BundleMember->resetUnscheduledDeps(); 3642 3643 // Handle def-use chain dependencies. 3644 if (BundleMember->OpValue != BundleMember->Inst) { 3645 ScheduleData *UseSD = getScheduleData(BundleMember->Inst); 3646 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 3647 BundleMember->Dependencies++; 3648 ScheduleData *DestBundle = UseSD->FirstInBundle; 3649 if (!DestBundle->IsScheduled) 3650 BundleMember->incrementUnscheduledDeps(1); 3651 if (!DestBundle->hasValidDependencies()) 3652 WorkList.push_back(DestBundle); 3653 } 3654 } else { 3655 for (User *U : BundleMember->Inst->users()) { 3656 if (isa<Instruction>(U)) { 3657 ScheduleData *UseSD = getScheduleData(U); 3658 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 3659 BundleMember->Dependencies++; 3660 ScheduleData *DestBundle = UseSD->FirstInBundle; 3661 if (!DestBundle->IsScheduled) 3662 BundleMember->incrementUnscheduledDeps(1); 3663 if (!DestBundle->hasValidDependencies()) 3664 WorkList.push_back(DestBundle); 3665 } 3666 } else { 3667 // I'm not sure if this can ever happen. But we need to be safe. 3668 // This lets the instruction/bundle never be scheduled and 3669 // eventually disable vectorization. 3670 BundleMember->Dependencies++; 3671 BundleMember->incrementUnscheduledDeps(1); 3672 } 3673 } 3674 } 3675 3676 // Handle the memory dependencies. 3677 ScheduleData *DepDest = BundleMember->NextLoadStore; 3678 if (DepDest) { 3679 Instruction *SrcInst = BundleMember->Inst; 3680 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); 3681 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 3682 unsigned numAliased = 0; 3683 unsigned DistToSrc = 1; 3684 3685 while (DepDest) { 3686 assert(isInSchedulingRegion(DepDest)); 3687 3688 // We have two limits to reduce the complexity: 3689 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 3690 // SLP->isAliased (which is the expensive part in this loop). 3691 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 3692 // the whole loop (even if the loop is fast, it's quadratic). 3693 // It's important for the loop break condition (see below) to 3694 // check this limit even between two read-only instructions. 3695 if (DistToSrc >= MaxMemDepDistance || 3696 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 3697 (numAliased >= AliasedCheckLimit || 3698 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 3699 3700 // We increment the counter only if the locations are aliased 3701 // (instead of counting all alias checks). This gives a better 3702 // balance between reduced runtime and accurate dependencies. 3703 numAliased++; 3704 3705 DepDest->MemoryDependencies.push_back(BundleMember); 3706 BundleMember->Dependencies++; 3707 ScheduleData *DestBundle = DepDest->FirstInBundle; 3708 if (!DestBundle->IsScheduled) { 3709 BundleMember->incrementUnscheduledDeps(1); 3710 } 3711 if (!DestBundle->hasValidDependencies()) { 3712 WorkList.push_back(DestBundle); 3713 } 3714 } 3715 DepDest = DepDest->NextLoadStore; 3716 3717 // Example, explaining the loop break condition: Let's assume our 3718 // starting instruction is i0 and MaxMemDepDistance = 3. 3719 // 3720 // +--------v--v--v 3721 // i0,i1,i2,i3,i4,i5,i6,i7,i8 3722 // +--------^--^--^ 3723 // 3724 // MaxMemDepDistance let us stop alias-checking at i3 and we add 3725 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 3726 // Previously we already added dependencies from i3 to i6,i7,i8 3727 // (because of MaxMemDepDistance). As we added a dependency from 3728 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 3729 // and we can abort this loop at i6. 3730 if (DistToSrc >= 2 * MaxMemDepDistance) 3731 break; 3732 DistToSrc++; 3733 } 3734 } 3735 } 3736 BundleMember = BundleMember->NextInBundle; 3737 } 3738 if (InsertInReadyList && SD->isReady()) { 3739 ReadyInsts.push_back(SD); 3740 DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n"); 3741 } 3742 } 3743 } 3744 3745 void BoUpSLP::BlockScheduling::resetSchedule() { 3746 assert(ScheduleStart && 3747 "tried to reset schedule on block which has not been scheduled"); 3748 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3749 doForAllOpcodes(I, [&](ScheduleData *SD) { 3750 assert(isInSchedulingRegion(SD) && 3751 "ScheduleData not in scheduling region"); 3752 SD->IsScheduled = false; 3753 SD->resetUnscheduledDeps(); 3754 }); 3755 } 3756 ReadyInsts.clear(); 3757 } 3758 3759 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 3760 if (!BS->ScheduleStart) 3761 return; 3762 3763 DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 3764 3765 BS->resetSchedule(); 3766 3767 // For the real scheduling we use a more sophisticated ready-list: it is 3768 // sorted by the original instruction location. This lets the final schedule 3769 // be as close as possible to the original instruction order. 3770 struct ScheduleDataCompare { 3771 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 3772 return SD2->SchedulingPriority < SD1->SchedulingPriority; 3773 } 3774 }; 3775 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 3776 3777 // Ensure that all dependency data is updated and fill the ready-list with 3778 // initial instructions. 3779 int Idx = 0; 3780 int NumToSchedule = 0; 3781 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 3782 I = I->getNextNode()) { 3783 BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) { 3784 assert(SD->isPartOfBundle() == 3785 (getTreeEntry(SD->Inst) != nullptr) && 3786 "scheduler and vectorizer bundle mismatch"); 3787 SD->FirstInBundle->SchedulingPriority = Idx++; 3788 if (SD->isSchedulingEntity()) { 3789 BS->calculateDependencies(SD, false, this); 3790 NumToSchedule++; 3791 } 3792 }); 3793 } 3794 BS->initialFillReadyList(ReadyInsts); 3795 3796 Instruction *LastScheduledInst = BS->ScheduleEnd; 3797 3798 // Do the "real" scheduling. 3799 while (!ReadyInsts.empty()) { 3800 ScheduleData *picked = *ReadyInsts.begin(); 3801 ReadyInsts.erase(ReadyInsts.begin()); 3802 3803 // Move the scheduled instruction(s) to their dedicated places, if not 3804 // there yet. 3805 ScheduleData *BundleMember = picked; 3806 while (BundleMember) { 3807 Instruction *pickedInst = BundleMember->Inst; 3808 if (LastScheduledInst->getNextNode() != pickedInst) { 3809 BS->BB->getInstList().remove(pickedInst); 3810 BS->BB->getInstList().insert(LastScheduledInst->getIterator(), 3811 pickedInst); 3812 } 3813 LastScheduledInst = pickedInst; 3814 BundleMember = BundleMember->NextInBundle; 3815 } 3816 3817 BS->schedule(picked, ReadyInsts); 3818 NumToSchedule--; 3819 } 3820 assert(NumToSchedule == 0 && "could not schedule all instructions"); 3821 3822 // Avoid duplicate scheduling of the block. 3823 BS->ScheduleStart = nullptr; 3824 } 3825 3826 unsigned BoUpSLP::getVectorElementSize(Value *V) { 3827 // If V is a store, just return the width of the stored value without 3828 // traversing the expression tree. This is the common case. 3829 if (auto *Store = dyn_cast<StoreInst>(V)) 3830 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 3831 3832 // If V is not a store, we can traverse the expression tree to find loads 3833 // that feed it. The type of the loaded value may indicate a more suitable 3834 // width than V's type. We want to base the vector element size on the width 3835 // of memory operations where possible. 3836 SmallVector<Instruction *, 16> Worklist; 3837 SmallPtrSet<Instruction *, 16> Visited; 3838 if (auto *I = dyn_cast<Instruction>(V)) 3839 Worklist.push_back(I); 3840 3841 // Traverse the expression tree in bottom-up order looking for loads. If we 3842 // encounter an instruciton we don't yet handle, we give up. 3843 auto MaxWidth = 0u; 3844 auto FoundUnknownInst = false; 3845 while (!Worklist.empty() && !FoundUnknownInst) { 3846 auto *I = Worklist.pop_back_val(); 3847 Visited.insert(I); 3848 3849 // We should only be looking at scalar instructions here. If the current 3850 // instruction has a vector type, give up. 3851 auto *Ty = I->getType(); 3852 if (isa<VectorType>(Ty)) 3853 FoundUnknownInst = true; 3854 3855 // If the current instruction is a load, update MaxWidth to reflect the 3856 // width of the loaded value. 3857 else if (isa<LoadInst>(I)) 3858 MaxWidth = std::max<unsigned>(MaxWidth, DL->getTypeSizeInBits(Ty)); 3859 3860 // Otherwise, we need to visit the operands of the instruction. We only 3861 // handle the interesting cases from buildTree here. If an operand is an 3862 // instruction we haven't yet visited, we add it to the worklist. 3863 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 3864 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I)) { 3865 for (Use &U : I->operands()) 3866 if (auto *J = dyn_cast<Instruction>(U.get())) 3867 if (!Visited.count(J)) 3868 Worklist.push_back(J); 3869 } 3870 3871 // If we don't yet handle the instruction, give up. 3872 else 3873 FoundUnknownInst = true; 3874 } 3875 3876 // If we didn't encounter a memory access in the expression tree, or if we 3877 // gave up for some reason, just return the width of V. 3878 if (!MaxWidth || FoundUnknownInst) 3879 return DL->getTypeSizeInBits(V->getType()); 3880 3881 // Otherwise, return the maximum width we found. 3882 return MaxWidth; 3883 } 3884 3885 // Determine if a value V in a vectorizable expression Expr can be demoted to a 3886 // smaller type with a truncation. We collect the values that will be demoted 3887 // in ToDemote and additional roots that require investigating in Roots. 3888 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, 3889 SmallVectorImpl<Value *> &ToDemote, 3890 SmallVectorImpl<Value *> &Roots) { 3891 // We can always demote constants. 3892 if (isa<Constant>(V)) { 3893 ToDemote.push_back(V); 3894 return true; 3895 } 3896 3897 // If the value is not an instruction in the expression with only one use, it 3898 // cannot be demoted. 3899 auto *I = dyn_cast<Instruction>(V); 3900 if (!I || !I->hasOneUse() || !Expr.count(I)) 3901 return false; 3902 3903 switch (I->getOpcode()) { 3904 3905 // We can always demote truncations and extensions. Since truncations can 3906 // seed additional demotion, we save the truncated value. 3907 case Instruction::Trunc: 3908 Roots.push_back(I->getOperand(0)); 3909 case Instruction::ZExt: 3910 case Instruction::SExt: 3911 break; 3912 3913 // We can demote certain binary operations if we can demote both of their 3914 // operands. 3915 case Instruction::Add: 3916 case Instruction::Sub: 3917 case Instruction::Mul: 3918 case Instruction::And: 3919 case Instruction::Or: 3920 case Instruction::Xor: 3921 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || 3922 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) 3923 return false; 3924 break; 3925 3926 // We can demote selects if we can demote their true and false values. 3927 case Instruction::Select: { 3928 SelectInst *SI = cast<SelectInst>(I); 3929 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || 3930 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) 3931 return false; 3932 break; 3933 } 3934 3935 // We can demote phis if we can demote all their incoming operands. Note that 3936 // we don't need to worry about cycles since we ensure single use above. 3937 case Instruction::PHI: { 3938 PHINode *PN = cast<PHINode>(I); 3939 for (Value *IncValue : PN->incoming_values()) 3940 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) 3941 return false; 3942 break; 3943 } 3944 3945 // Otherwise, conservatively give up. 3946 default: 3947 return false; 3948 } 3949 3950 // Record the value that we can demote. 3951 ToDemote.push_back(V); 3952 return true; 3953 } 3954 3955 void BoUpSLP::computeMinimumValueSizes() { 3956 // If there are no external uses, the expression tree must be rooted by a 3957 // store. We can't demote in-memory values, so there is nothing to do here. 3958 if (ExternalUses.empty()) 3959 return; 3960 3961 // We only attempt to truncate integer expressions. 3962 auto &TreeRoot = VectorizableTree[0].Scalars; 3963 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 3964 if (!TreeRootIT) 3965 return; 3966 3967 // If the expression is not rooted by a store, these roots should have 3968 // external uses. We will rely on InstCombine to rewrite the expression in 3969 // the narrower type. However, InstCombine only rewrites single-use values. 3970 // This means that if a tree entry other than a root is used externally, it 3971 // must have multiple uses and InstCombine will not rewrite it. The code 3972 // below ensures that only the roots are used externally. 3973 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); 3974 for (auto &EU : ExternalUses) 3975 if (!Expr.erase(EU.Scalar)) 3976 return; 3977 if (!Expr.empty()) 3978 return; 3979 3980 // Collect the scalar values of the vectorizable expression. We will use this 3981 // context to determine which values can be demoted. If we see a truncation, 3982 // we mark it as seeding another demotion. 3983 for (auto &Entry : VectorizableTree) 3984 Expr.insert(Entry.Scalars.begin(), Entry.Scalars.end()); 3985 3986 // Ensure the roots of the vectorizable tree don't form a cycle. They must 3987 // have a single external user that is not in the vectorizable tree. 3988 for (auto *Root : TreeRoot) 3989 if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) 3990 return; 3991 3992 // Conservatively determine if we can actually truncate the roots of the 3993 // expression. Collect the values that can be demoted in ToDemote and 3994 // additional roots that require investigating in Roots. 3995 SmallVector<Value *, 32> ToDemote; 3996 SmallVector<Value *, 4> Roots; 3997 for (auto *Root : TreeRoot) 3998 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) 3999 return; 4000 4001 // The maximum bit width required to represent all the values that can be 4002 // demoted without loss of precision. It would be safe to truncate the roots 4003 // of the expression to this width. 4004 auto MaxBitWidth = 8u; 4005 4006 // We first check if all the bits of the roots are demanded. If they're not, 4007 // we can truncate the roots to this narrower type. 4008 for (auto *Root : TreeRoot) { 4009 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 4010 MaxBitWidth = std::max<unsigned>( 4011 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); 4012 } 4013 4014 // True if the roots can be zero-extended back to their original type, rather 4015 // than sign-extended. We know that if the leading bits are not demanded, we 4016 // can safely zero-extend. So we initialize IsKnownPositive to True. 4017 bool IsKnownPositive = true; 4018 4019 // If all the bits of the roots are demanded, we can try a little harder to 4020 // compute a narrower type. This can happen, for example, if the roots are 4021 // getelementptr indices. InstCombine promotes these indices to the pointer 4022 // width. Thus, all their bits are technically demanded even though the 4023 // address computation might be vectorized in a smaller type. 4024 // 4025 // We start by looking at each entry that can be demoted. We compute the 4026 // maximum bit width required to store the scalar by using ValueTracking to 4027 // compute the number of high-order bits we can truncate. 4028 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType())) { 4029 MaxBitWidth = 8u; 4030 4031 // Determine if the sign bit of all the roots is known to be zero. If not, 4032 // IsKnownPositive is set to False. 4033 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { 4034 KnownBits Known = computeKnownBits(R, *DL); 4035 return Known.isNonNegative(); 4036 }); 4037 4038 // Determine the maximum number of bits required to store the scalar 4039 // values. 4040 for (auto *Scalar : ToDemote) { 4041 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); 4042 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 4043 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 4044 } 4045 4046 // If we can't prove that the sign bit is zero, we must add one to the 4047 // maximum bit width to account for the unknown sign bit. This preserves 4048 // the existing sign bit so we can safely sign-extend the root back to the 4049 // original type. Otherwise, if we know the sign bit is zero, we will 4050 // zero-extend the root instead. 4051 // 4052 // FIXME: This is somewhat suboptimal, as there will be cases where adding 4053 // one to the maximum bit width will yield a larger-than-necessary 4054 // type. In general, we need to add an extra bit only if we can't 4055 // prove that the upper bit of the original type is equal to the 4056 // upper bit of the proposed smaller type. If these two bits are the 4057 // same (either zero or one) we know that sign-extending from the 4058 // smaller type will result in the same value. Here, since we can't 4059 // yet prove this, we are just making the proposed smaller type 4060 // larger to ensure correctness. 4061 if (!IsKnownPositive) 4062 ++MaxBitWidth; 4063 } 4064 4065 // Round MaxBitWidth up to the next power-of-two. 4066 if (!isPowerOf2_64(MaxBitWidth)) 4067 MaxBitWidth = NextPowerOf2(MaxBitWidth); 4068 4069 // If the maximum bit width we compute is less than the with of the roots' 4070 // type, we can proceed with the narrowing. Otherwise, do nothing. 4071 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 4072 return; 4073 4074 // If we can truncate the root, we must collect additional values that might 4075 // be demoted as a result. That is, those seeded by truncations we will 4076 // modify. 4077 while (!Roots.empty()) 4078 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); 4079 4080 // Finally, map the values we can demote to the maximum bit with we computed. 4081 for (auto *Scalar : ToDemote) 4082 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive); 4083 } 4084 4085 namespace { 4086 4087 /// The SLPVectorizer Pass. 4088 struct SLPVectorizer : public FunctionPass { 4089 SLPVectorizerPass Impl; 4090 4091 /// Pass identification, replacement for typeid 4092 static char ID; 4093 4094 explicit SLPVectorizer() : FunctionPass(ID) { 4095 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 4096 } 4097 4098 bool doInitialization(Module &M) override { 4099 return false; 4100 } 4101 4102 bool runOnFunction(Function &F) override { 4103 if (skipFunction(F)) 4104 return false; 4105 4106 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 4107 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 4108 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 4109 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 4110 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 4111 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 4112 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 4113 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 4114 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 4115 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 4116 4117 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 4118 } 4119 4120 void getAnalysisUsage(AnalysisUsage &AU) const override { 4121 FunctionPass::getAnalysisUsage(AU); 4122 AU.addRequired<AssumptionCacheTracker>(); 4123 AU.addRequired<ScalarEvolutionWrapperPass>(); 4124 AU.addRequired<AAResultsWrapperPass>(); 4125 AU.addRequired<TargetTransformInfoWrapperPass>(); 4126 AU.addRequired<LoopInfoWrapperPass>(); 4127 AU.addRequired<DominatorTreeWrapperPass>(); 4128 AU.addRequired<DemandedBitsWrapperPass>(); 4129 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 4130 AU.addPreserved<LoopInfoWrapperPass>(); 4131 AU.addPreserved<DominatorTreeWrapperPass>(); 4132 AU.addPreserved<AAResultsWrapperPass>(); 4133 AU.addPreserved<GlobalsAAWrapperPass>(); 4134 AU.setPreservesCFG(); 4135 } 4136 }; 4137 4138 } // end anonymous namespace 4139 4140 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 4141 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 4142 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 4143 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 4144 auto *AA = &AM.getResult<AAManager>(F); 4145 auto *LI = &AM.getResult<LoopAnalysis>(F); 4146 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 4147 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 4148 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 4149 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 4150 4151 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 4152 if (!Changed) 4153 return PreservedAnalyses::all(); 4154 4155 PreservedAnalyses PA; 4156 PA.preserveSet<CFGAnalyses>(); 4157 PA.preserve<AAManager>(); 4158 PA.preserve<GlobalsAA>(); 4159 return PA; 4160 } 4161 4162 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 4163 TargetTransformInfo *TTI_, 4164 TargetLibraryInfo *TLI_, AliasAnalysis *AA_, 4165 LoopInfo *LI_, DominatorTree *DT_, 4166 AssumptionCache *AC_, DemandedBits *DB_, 4167 OptimizationRemarkEmitter *ORE_) { 4168 SE = SE_; 4169 TTI = TTI_; 4170 TLI = TLI_; 4171 AA = AA_; 4172 LI = LI_; 4173 DT = DT_; 4174 AC = AC_; 4175 DB = DB_; 4176 DL = &F.getParent()->getDataLayout(); 4177 4178 Stores.clear(); 4179 GEPs.clear(); 4180 bool Changed = false; 4181 4182 // If the target claims to have no vector registers don't attempt 4183 // vectorization. 4184 if (!TTI->getNumberOfRegisters(true)) 4185 return false; 4186 4187 // Don't vectorize when the attribute NoImplicitFloat is used. 4188 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 4189 return false; 4190 4191 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 4192 4193 // Use the bottom up slp vectorizer to construct chains that start with 4194 // store instructions. 4195 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 4196 4197 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 4198 // delete instructions. 4199 4200 // Scan the blocks in the function in post order. 4201 for (auto BB : post_order(&F.getEntryBlock())) { 4202 collectSeedInstructions(BB); 4203 4204 // Vectorize trees that end at stores. 4205 if (!Stores.empty()) { 4206 DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 4207 << " underlying objects.\n"); 4208 Changed |= vectorizeStoreChains(R); 4209 } 4210 4211 // Vectorize trees that end at reductions. 4212 Changed |= vectorizeChainsInBlock(BB, R); 4213 4214 // Vectorize the index computations of getelementptr instructions. This 4215 // is primarily intended to catch gather-like idioms ending at 4216 // non-consecutive loads. 4217 if (!GEPs.empty()) { 4218 DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 4219 << " underlying objects.\n"); 4220 Changed |= vectorizeGEPIndices(BB, R); 4221 } 4222 } 4223 4224 if (Changed) { 4225 R.optimizeGatherSequence(); 4226 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 4227 DEBUG(verifyFunction(F)); 4228 } 4229 return Changed; 4230 } 4231 4232 /// \brief Check that the Values in the slice in VL array are still existent in 4233 /// the WeakTrackingVH array. 4234 /// Vectorization of part of the VL array may cause later values in the VL array 4235 /// to become invalid. We track when this has happened in the WeakTrackingVH 4236 /// array. 4237 static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, 4238 ArrayRef<WeakTrackingVH> VH, unsigned SliceBegin, 4239 unsigned SliceSize) { 4240 VL = VL.slice(SliceBegin, SliceSize); 4241 VH = VH.slice(SliceBegin, SliceSize); 4242 return !std::equal(VL.begin(), VL.end(), VH.begin()); 4243 } 4244 4245 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 4246 unsigned VecRegSize) { 4247 unsigned ChainLen = Chain.size(); 4248 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 4249 << "\n"); 4250 unsigned Sz = R.getVectorElementSize(Chain[0]); 4251 unsigned VF = VecRegSize / Sz; 4252 4253 if (!isPowerOf2_32(Sz) || VF < 2) 4254 return false; 4255 4256 // Keep track of values that were deleted by vectorizing in the loop below. 4257 SmallVector<WeakTrackingVH, 8> TrackValues(Chain.begin(), Chain.end()); 4258 4259 bool Changed = false; 4260 // Look for profitable vectorizable trees at all offsets, starting at zero. 4261 for (unsigned i = 0, e = ChainLen; i < e; ++i) { 4262 if (i + VF > e) 4263 break; 4264 4265 // Check that a previous iteration of this loop did not delete the Value. 4266 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 4267 continue; 4268 4269 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 4270 << "\n"); 4271 ArrayRef<Value *> Operands = Chain.slice(i, VF); 4272 4273 R.buildTree(Operands); 4274 if (R.isTreeTinyAndNotFullyVectorizable()) 4275 continue; 4276 4277 R.computeMinimumValueSizes(); 4278 4279 int Cost = R.getTreeCost(); 4280 4281 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 4282 if (Cost < -SLPCostThreshold) { 4283 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 4284 4285 using namespace ore; 4286 4287 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 4288 cast<StoreInst>(Chain[i])) 4289 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 4290 << " and with tree size " 4291 << NV("TreeSize", R.getTreeSize())); 4292 4293 R.vectorizeTree(); 4294 4295 // Move to the next bundle. 4296 i += VF - 1; 4297 Changed = true; 4298 } 4299 } 4300 4301 return Changed; 4302 } 4303 4304 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 4305 BoUpSLP &R) { 4306 SetVector<StoreInst *> Heads, Tails; 4307 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; 4308 4309 // We may run into multiple chains that merge into a single chain. We mark the 4310 // stores that we vectorized so that we don't visit the same store twice. 4311 BoUpSLP::ValueSet VectorizedStores; 4312 bool Changed = false; 4313 4314 // Do a quadratic search on all of the given stores and find 4315 // all of the pairs of stores that follow each other. 4316 SmallVector<unsigned, 16> IndexQueue; 4317 for (unsigned i = 0, e = Stores.size(); i < e; ++i) { 4318 IndexQueue.clear(); 4319 // If a store has multiple consecutive store candidates, search Stores 4320 // array according to the sequence: from i+1 to e, then from i-1 to 0. 4321 // This is because usually pairing with immediate succeeding or preceding 4322 // candidate create the best chance to find slp vectorization opportunity. 4323 unsigned j = 0; 4324 for (j = i + 1; j < e; ++j) 4325 IndexQueue.push_back(j); 4326 for (j = i; j > 0; --j) 4327 IndexQueue.push_back(j - 1); 4328 4329 for (auto &k : IndexQueue) { 4330 if (isConsecutiveAccess(Stores[i], Stores[k], *DL, *SE)) { 4331 Tails.insert(Stores[k]); 4332 Heads.insert(Stores[i]); 4333 ConsecutiveChain[Stores[i]] = Stores[k]; 4334 break; 4335 } 4336 } 4337 } 4338 4339 // For stores that start but don't end a link in the chain: 4340 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end(); 4341 it != e; ++it) { 4342 if (Tails.count(*it)) 4343 continue; 4344 4345 // We found a store instr that starts a chain. Now follow the chain and try 4346 // to vectorize it. 4347 BoUpSLP::ValueList Operands; 4348 StoreInst *I = *it; 4349 // Collect the chain into a list. 4350 while (Tails.count(I) || Heads.count(I)) { 4351 if (VectorizedStores.count(I)) 4352 break; 4353 Operands.push_back(I); 4354 // Move to the next value in the chain. 4355 I = ConsecutiveChain[I]; 4356 } 4357 4358 // FIXME: Is division-by-2 the correct step? Should we assert that the 4359 // register size is a power-of-2? 4360 for (unsigned Size = R.getMaxVecRegSize(); Size >= R.getMinVecRegSize(); 4361 Size /= 2) { 4362 if (vectorizeStoreChain(Operands, R, Size)) { 4363 // Mark the vectorized stores so that we don't vectorize them again. 4364 VectorizedStores.insert(Operands.begin(), Operands.end()); 4365 Changed = true; 4366 break; 4367 } 4368 } 4369 } 4370 4371 return Changed; 4372 } 4373 4374 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 4375 // Initialize the collections. We will make a single pass over the block. 4376 Stores.clear(); 4377 GEPs.clear(); 4378 4379 // Visit the store and getelementptr instructions in BB and organize them in 4380 // Stores and GEPs according to the underlying objects of their pointer 4381 // operands. 4382 for (Instruction &I : *BB) { 4383 // Ignore store instructions that are volatile or have a pointer operand 4384 // that doesn't point to a scalar type. 4385 if (auto *SI = dyn_cast<StoreInst>(&I)) { 4386 if (!SI->isSimple()) 4387 continue; 4388 if (!isValidElementType(SI->getValueOperand()->getType())) 4389 continue; 4390 Stores[GetUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI); 4391 } 4392 4393 // Ignore getelementptr instructions that have more than one index, a 4394 // constant index, or a pointer operand that doesn't point to a scalar 4395 // type. 4396 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 4397 auto Idx = GEP->idx_begin()->get(); 4398 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) 4399 continue; 4400 if (!isValidElementType(Idx->getType())) 4401 continue; 4402 if (GEP->getType()->isVectorTy()) 4403 continue; 4404 GEPs[GetUnderlyingObject(GEP->getPointerOperand(), *DL)].push_back(GEP); 4405 } 4406 } 4407 } 4408 4409 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 4410 if (!A || !B) 4411 return false; 4412 Value *VL[] = { A, B }; 4413 return tryToVectorizeList(VL, R, None, true); 4414 } 4415 4416 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 4417 ArrayRef<Value *> BuildVector, 4418 bool AllowReorder) { 4419 if (VL.size() < 2) 4420 return false; 4421 4422 DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " << VL.size() 4423 << ".\n"); 4424 4425 // Check that all of the parts are scalar instructions of the same type. 4426 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 4427 if (!I0) 4428 return false; 4429 4430 unsigned Opcode0 = I0->getOpcode(); 4431 4432 unsigned Sz = R.getVectorElementSize(I0); 4433 unsigned MinVF = std::max(2U, R.getMinVecRegSize() / Sz); 4434 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF); 4435 if (MaxVF < 2) 4436 return false; 4437 4438 for (Value *V : VL) { 4439 Type *Ty = V->getType(); 4440 if (!isValidElementType(Ty)) 4441 return false; 4442 Instruction *Inst = dyn_cast<Instruction>(V); 4443 if (!Inst || Inst->getOpcode() != Opcode0) 4444 return false; 4445 } 4446 4447 bool Changed = false; 4448 4449 // Keep track of values that were deleted by vectorizing in the loop below. 4450 SmallVector<WeakTrackingVH, 8> TrackValues(VL.begin(), VL.end()); 4451 4452 unsigned NextInst = 0, MaxInst = VL.size(); 4453 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; 4454 VF /= 2) { 4455 // No actual vectorization should happen, if number of parts is the same as 4456 // provided vectorization factor (i.e. the scalar type is used for vector 4457 // code during codegen). 4458 auto *VecTy = VectorType::get(VL[0]->getType(), VF); 4459 if (TTI->getNumberOfParts(VecTy) == VF) 4460 continue; 4461 for (unsigned I = NextInst; I < MaxInst; ++I) { 4462 unsigned OpsWidth = 0; 4463 4464 if (I + VF > MaxInst) 4465 OpsWidth = MaxInst - I; 4466 else 4467 OpsWidth = VF; 4468 4469 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 4470 break; 4471 4472 // Check that a previous iteration of this loop did not delete the Value. 4473 if (hasValueBeenRAUWed(VL, TrackValues, I, OpsWidth)) 4474 continue; 4475 4476 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 4477 << "\n"); 4478 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth); 4479 4480 ArrayRef<Value *> BuildVectorSlice; 4481 if (!BuildVector.empty()) 4482 BuildVectorSlice = BuildVector.slice(I, OpsWidth); 4483 4484 R.buildTree(Ops, BuildVectorSlice); 4485 // TODO: check if we can allow reordering for more cases. 4486 if (AllowReorder && R.shouldReorder()) { 4487 // Conceptually, there is nothing actually preventing us from trying to 4488 // reorder a larger list. In fact, we do exactly this when vectorizing 4489 // reductions. However, at this point, we only expect to get here when 4490 // there are exactly two operations. 4491 assert(Ops.size() == 2); 4492 assert(BuildVectorSlice.empty()); 4493 Value *ReorderedOps[] = {Ops[1], Ops[0]}; 4494 R.buildTree(ReorderedOps, None); 4495 } 4496 if (R.isTreeTinyAndNotFullyVectorizable()) 4497 continue; 4498 4499 R.computeMinimumValueSizes(); 4500 int Cost = R.getTreeCost(); 4501 4502 if (Cost < -SLPCostThreshold) { 4503 DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 4504 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 4505 cast<Instruction>(Ops[0])) 4506 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 4507 << " and with tree size " 4508 << ore::NV("TreeSize", R.getTreeSize())); 4509 4510 Value *VectorizedRoot = R.vectorizeTree(); 4511 4512 // Reconstruct the build vector by extracting the vectorized root. This 4513 // way we handle the case where some elements of the vector are 4514 // undefined. 4515 // (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2)) 4516 if (!BuildVectorSlice.empty()) { 4517 // The insert point is the last build vector instruction. The 4518 // vectorized root will precede it. This guarantees that we get an 4519 // instruction. The vectorized tree could have been constant folded. 4520 Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back()); 4521 unsigned VecIdx = 0; 4522 for (auto &V : BuildVectorSlice) { 4523 IRBuilder<NoFolder> Builder(InsertAfter->getParent(), 4524 ++BasicBlock::iterator(InsertAfter)); 4525 Instruction *I = cast<Instruction>(V); 4526 assert(isa<InsertElementInst>(I) || isa<InsertValueInst>(I)); 4527 Instruction *Extract = 4528 cast<Instruction>(Builder.CreateExtractElement( 4529 VectorizedRoot, Builder.getInt32(VecIdx++))); 4530 I->setOperand(1, Extract); 4531 I->moveAfter(Extract); 4532 InsertAfter = I; 4533 } 4534 } 4535 // Move to the next bundle. 4536 I += VF - 1; 4537 NextInst = I + 1; 4538 Changed = true; 4539 } 4540 } 4541 } 4542 4543 return Changed; 4544 } 4545 4546 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 4547 if (!I) 4548 return false; 4549 4550 if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I)) 4551 return false; 4552 4553 Value *P = I->getParent(); 4554 4555 // Vectorize in current basic block only. 4556 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 4557 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 4558 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 4559 return false; 4560 4561 // Try to vectorize V. 4562 if (tryToVectorizePair(Op0, Op1, R)) 4563 return true; 4564 4565 auto *A = dyn_cast<BinaryOperator>(Op0); 4566 auto *B = dyn_cast<BinaryOperator>(Op1); 4567 // Try to skip B. 4568 if (B && B->hasOneUse()) { 4569 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 4570 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 4571 if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R)) 4572 return true; 4573 if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R)) 4574 return true; 4575 } 4576 4577 // Try to skip A. 4578 if (A && A->hasOneUse()) { 4579 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 4580 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 4581 if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R)) 4582 return true; 4583 if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R)) 4584 return true; 4585 } 4586 return false; 4587 } 4588 4589 /// \brief Generate a shuffle mask to be used in a reduction tree. 4590 /// 4591 /// \param VecLen The length of the vector to be reduced. 4592 /// \param NumEltsToRdx The number of elements that should be reduced in the 4593 /// vector. 4594 /// \param IsPairwise Whether the reduction is a pairwise or splitting 4595 /// reduction. A pairwise reduction will generate a mask of 4596 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 4597 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 4598 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 4599 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 4600 bool IsPairwise, bool IsLeft, 4601 IRBuilder<> &Builder) { 4602 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 4603 4604 SmallVector<Constant *, 32> ShuffleMask( 4605 VecLen, UndefValue::get(Builder.getInt32Ty())); 4606 4607 if (IsPairwise) 4608 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 4609 for (unsigned i = 0; i != NumEltsToRdx; ++i) 4610 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 4611 else 4612 // Move the upper half of the vector to the lower half. 4613 for (unsigned i = 0; i != NumEltsToRdx; ++i) 4614 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 4615 4616 return ConstantVector::get(ShuffleMask); 4617 } 4618 4619 namespace { 4620 4621 /// Model horizontal reductions. 4622 /// 4623 /// A horizontal reduction is a tree of reduction operations (currently add and 4624 /// fadd) that has operations that can be put into a vector as its leaf. 4625 /// For example, this tree: 4626 /// 4627 /// mul mul mul mul 4628 /// \ / \ / 4629 /// + + 4630 /// \ / 4631 /// + 4632 /// This tree has "mul" as its reduced values and "+" as its reduction 4633 /// operations. A reduction might be feeding into a store or a binary operation 4634 /// feeding a phi. 4635 /// ... 4636 /// \ / 4637 /// + 4638 /// | 4639 /// phi += 4640 /// 4641 /// Or: 4642 /// ... 4643 /// \ / 4644 /// + 4645 /// | 4646 /// *p = 4647 /// 4648 class HorizontalReduction { 4649 using ReductionOpsType = SmallVector<Value *, 16>; 4650 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; 4651 ReductionOpsListType ReductionOps; 4652 SmallVector<Value *, 32> ReducedVals; 4653 // Use map vector to make stable output. 4654 MapVector<Instruction *, Value *> ExtraArgs; 4655 4656 /// Kind of the reduction data. 4657 enum ReductionKind { 4658 RK_None, /// Not a reduction. 4659 RK_Arithmetic, /// Binary reduction data. 4660 RK_Min, /// Minimum reduction data. 4661 RK_UMin, /// Unsigned minimum reduction data. 4662 RK_Max, /// Maximum reduction data. 4663 RK_UMax, /// Unsigned maximum reduction data. 4664 }; 4665 /// Contains info about operation, like its opcode, left and right operands. 4666 class OperationData { 4667 /// Opcode of the instruction. 4668 unsigned Opcode = 0; 4669 4670 /// Left operand of the reduction operation. 4671 Value *LHS = nullptr; 4672 4673 /// Right operand of the reduction operation. 4674 Value *RHS = nullptr; 4675 /// Kind of the reduction operation. 4676 ReductionKind Kind = RK_None; 4677 /// True if float point min/max reduction has no NaNs. 4678 bool NoNaN = false; 4679 4680 /// Checks if the reduction operation can be vectorized. 4681 bool isVectorizable() const { 4682 return LHS && RHS && 4683 // We currently only support adds && min/max reductions. 4684 ((Kind == RK_Arithmetic && 4685 (Opcode == Instruction::Add || Opcode == Instruction::FAdd)) || 4686 ((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && 4687 (Kind == RK_Min || Kind == RK_Max)) || 4688 (Opcode == Instruction::ICmp && 4689 (Kind == RK_UMin || Kind == RK_UMax))); 4690 } 4691 4692 /// Creates reduction operation with the current opcode. 4693 Value *createOp(IRBuilder<> &Builder, const Twine &Name) const { 4694 assert(isVectorizable() && 4695 "Expected add|fadd or min/max reduction operation."); 4696 Value *Cmp; 4697 switch (Kind) { 4698 case RK_Arithmetic: 4699 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, LHS, RHS, 4700 Name); 4701 case RK_Min: 4702 Cmp = Opcode == Instruction::ICmp ? Builder.CreateICmpSLT(LHS, RHS) 4703 : Builder.CreateFCmpOLT(LHS, RHS); 4704 break; 4705 case RK_Max: 4706 Cmp = Opcode == Instruction::ICmp ? Builder.CreateICmpSGT(LHS, RHS) 4707 : Builder.CreateFCmpOGT(LHS, RHS); 4708 break; 4709 case RK_UMin: 4710 assert(Opcode == Instruction::ICmp && "Expected integer types."); 4711 Cmp = Builder.CreateICmpULT(LHS, RHS); 4712 break; 4713 case RK_UMax: 4714 assert(Opcode == Instruction::ICmp && "Expected integer types."); 4715 Cmp = Builder.CreateICmpUGT(LHS, RHS); 4716 break; 4717 case RK_None: 4718 llvm_unreachable("Unknown reduction operation."); 4719 } 4720 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 4721 } 4722 4723 public: 4724 explicit OperationData() = default; 4725 4726 /// Construction for reduced values. They are identified by opcode only and 4727 /// don't have associated LHS/RHS values. 4728 explicit OperationData(Value *V) : Kind(RK_None) { 4729 if (auto *I = dyn_cast<Instruction>(V)) 4730 Opcode = I->getOpcode(); 4731 } 4732 4733 /// Constructor for reduction operations with opcode and its left and 4734 /// right operands. 4735 OperationData(unsigned Opcode, Value *LHS, Value *RHS, ReductionKind Kind, 4736 bool NoNaN = false) 4737 : Opcode(Opcode), LHS(LHS), RHS(RHS), Kind(Kind), NoNaN(NoNaN) { 4738 assert(Kind != RK_None && "One of the reduction operations is expected."); 4739 } 4740 explicit operator bool() const { return Opcode; } 4741 4742 /// Get the index of the first operand. 4743 unsigned getFirstOperandIndex() const { 4744 assert(!!*this && "The opcode is not set."); 4745 switch (Kind) { 4746 case RK_Min: 4747 case RK_UMin: 4748 case RK_Max: 4749 case RK_UMax: 4750 return 1; 4751 case RK_Arithmetic: 4752 case RK_None: 4753 break; 4754 } 4755 return 0; 4756 } 4757 4758 /// Total number of operands in the reduction operation. 4759 unsigned getNumberOfOperands() const { 4760 assert(Kind != RK_None && !!*this && LHS && RHS && 4761 "Expected reduction operation."); 4762 switch (Kind) { 4763 case RK_Arithmetic: 4764 return 2; 4765 case RK_Min: 4766 case RK_UMin: 4767 case RK_Max: 4768 case RK_UMax: 4769 return 3; 4770 case RK_None: 4771 break; 4772 } 4773 llvm_unreachable("Reduction kind is not set"); 4774 } 4775 4776 /// Checks if the operation has the same parent as \p P. 4777 bool hasSameParent(Instruction *I, Value *P, bool IsRedOp) const { 4778 assert(Kind != RK_None && !!*this && LHS && RHS && 4779 "Expected reduction operation."); 4780 if (!IsRedOp) 4781 return I->getParent() == P; 4782 switch (Kind) { 4783 case RK_Arithmetic: 4784 // Arithmetic reduction operation must be used once only. 4785 return I->getParent() == P; 4786 case RK_Min: 4787 case RK_UMin: 4788 case RK_Max: 4789 case RK_UMax: { 4790 // SelectInst must be used twice while the condition op must have single 4791 // use only. 4792 auto *Cmp = cast<Instruction>(cast<SelectInst>(I)->getCondition()); 4793 return I->getParent() == P && Cmp && Cmp->getParent() == P; 4794 } 4795 case RK_None: 4796 break; 4797 } 4798 llvm_unreachable("Reduction kind is not set"); 4799 } 4800 /// Expected number of uses for reduction operations/reduced values. 4801 bool hasRequiredNumberOfUses(Instruction *I, bool IsReductionOp) const { 4802 assert(Kind != RK_None && !!*this && LHS && RHS && 4803 "Expected reduction operation."); 4804 switch (Kind) { 4805 case RK_Arithmetic: 4806 return I->hasOneUse(); 4807 case RK_Min: 4808 case RK_UMin: 4809 case RK_Max: 4810 case RK_UMax: 4811 return I->hasNUses(2) && 4812 (!IsReductionOp || 4813 cast<SelectInst>(I)->getCondition()->hasOneUse()); 4814 case RK_None: 4815 break; 4816 } 4817 llvm_unreachable("Reduction kind is not set"); 4818 } 4819 4820 /// Initializes the list of reduction operations. 4821 void initReductionOps(ReductionOpsListType &ReductionOps) { 4822 assert(Kind != RK_None && !!*this && LHS && RHS && 4823 "Expected reduction operation."); 4824 switch (Kind) { 4825 case RK_Arithmetic: 4826 ReductionOps.assign(1, ReductionOpsType()); 4827 break; 4828 case RK_Min: 4829 case RK_UMin: 4830 case RK_Max: 4831 case RK_UMax: 4832 ReductionOps.assign(2, ReductionOpsType()); 4833 break; 4834 case RK_None: 4835 llvm_unreachable("Reduction kind is not set"); 4836 } 4837 } 4838 /// Add all reduction operations for the reduction instruction \p I. 4839 void addReductionOps(Instruction *I, ReductionOpsListType &ReductionOps) { 4840 assert(Kind != RK_None && !!*this && LHS && RHS && 4841 "Expected reduction operation."); 4842 switch (Kind) { 4843 case RK_Arithmetic: 4844 ReductionOps[0].emplace_back(I); 4845 break; 4846 case RK_Min: 4847 case RK_UMin: 4848 case RK_Max: 4849 case RK_UMax: 4850 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition()); 4851 ReductionOps[1].emplace_back(I); 4852 break; 4853 case RK_None: 4854 llvm_unreachable("Reduction kind is not set"); 4855 } 4856 } 4857 4858 /// Checks if instruction is associative and can be vectorized. 4859 bool isAssociative(Instruction *I) const { 4860 assert(Kind != RK_None && *this && LHS && RHS && 4861 "Expected reduction operation."); 4862 switch (Kind) { 4863 case RK_Arithmetic: 4864 return I->isAssociative(); 4865 case RK_Min: 4866 case RK_Max: 4867 return Opcode == Instruction::ICmp || 4868 cast<Instruction>(I->getOperand(0))->hasUnsafeAlgebra(); 4869 case RK_UMin: 4870 case RK_UMax: 4871 assert(Opcode == Instruction::ICmp && 4872 "Only integer compare operation is expected."); 4873 return true; 4874 case RK_None: 4875 break; 4876 } 4877 llvm_unreachable("Reduction kind is not set"); 4878 } 4879 4880 /// Checks if the reduction operation can be vectorized. 4881 bool isVectorizable(Instruction *I) const { 4882 return isVectorizable() && isAssociative(I); 4883 } 4884 4885 /// Checks if two operation data are both a reduction op or both a reduced 4886 /// value. 4887 bool operator==(const OperationData &OD) { 4888 assert(((Kind != OD.Kind) || ((!LHS == !OD.LHS) && (!RHS == !OD.RHS))) && 4889 "One of the comparing operations is incorrect."); 4890 return this == &OD || (Kind == OD.Kind && Opcode == OD.Opcode); 4891 } 4892 bool operator!=(const OperationData &OD) { return !(*this == OD); } 4893 void clear() { 4894 Opcode = 0; 4895 LHS = nullptr; 4896 RHS = nullptr; 4897 Kind = RK_None; 4898 NoNaN = false; 4899 } 4900 4901 /// Get the opcode of the reduction operation. 4902 unsigned getOpcode() const { 4903 assert(isVectorizable() && "Expected vectorizable operation."); 4904 return Opcode; 4905 } 4906 4907 /// Get kind of reduction data. 4908 ReductionKind getKind() const { return Kind; } 4909 Value *getLHS() const { return LHS; } 4910 Value *getRHS() const { return RHS; } 4911 Type *getConditionType() const { 4912 switch (Kind) { 4913 case RK_Arithmetic: 4914 return nullptr; 4915 case RK_Min: 4916 case RK_Max: 4917 case RK_UMin: 4918 case RK_UMax: 4919 return CmpInst::makeCmpResultType(LHS->getType()); 4920 case RK_None: 4921 break; 4922 } 4923 llvm_unreachable("Reduction kind is not set"); 4924 } 4925 4926 /// Creates reduction operation with the current opcode with the IR flags 4927 /// from \p ReductionOps. 4928 Value *createOp(IRBuilder<> &Builder, const Twine &Name, 4929 const ReductionOpsListType &ReductionOps) const { 4930 assert(isVectorizable() && 4931 "Expected add|fadd or min/max reduction operation."); 4932 auto *Op = createOp(Builder, Name); 4933 switch (Kind) { 4934 case RK_Arithmetic: 4935 propagateIRFlags(Op, ReductionOps[0]); 4936 return Op; 4937 case RK_Min: 4938 case RK_Max: 4939 case RK_UMin: 4940 case RK_UMax: 4941 if (auto *SI = dyn_cast<SelectInst>(Op)) 4942 propagateIRFlags(SI->getCondition(), ReductionOps[0]); 4943 propagateIRFlags(Op, ReductionOps[1]); 4944 return Op; 4945 case RK_None: 4946 break; 4947 } 4948 llvm_unreachable("Unknown reduction operation."); 4949 } 4950 /// Creates reduction operation with the current opcode with the IR flags 4951 /// from \p I. 4952 Value *createOp(IRBuilder<> &Builder, const Twine &Name, 4953 Instruction *I) const { 4954 assert(isVectorizable() && 4955 "Expected add|fadd or min/max reduction operation."); 4956 auto *Op = createOp(Builder, Name); 4957 switch (Kind) { 4958 case RK_Arithmetic: 4959 propagateIRFlags(Op, I); 4960 return Op; 4961 case RK_Min: 4962 case RK_Max: 4963 case RK_UMin: 4964 case RK_UMax: 4965 if (auto *SI = dyn_cast<SelectInst>(Op)) { 4966 propagateIRFlags(SI->getCondition(), 4967 cast<SelectInst>(I)->getCondition()); 4968 } 4969 propagateIRFlags(Op, I); 4970 return Op; 4971 case RK_None: 4972 break; 4973 } 4974 llvm_unreachable("Unknown reduction operation."); 4975 } 4976 4977 TargetTransformInfo::ReductionFlags getFlags() const { 4978 TargetTransformInfo::ReductionFlags Flags; 4979 Flags.NoNaN = NoNaN; 4980 switch (Kind) { 4981 case RK_Arithmetic: 4982 break; 4983 case RK_Min: 4984 Flags.IsSigned = Opcode == Instruction::ICmp; 4985 Flags.IsMaxOp = false; 4986 break; 4987 case RK_Max: 4988 Flags.IsSigned = Opcode == Instruction::ICmp; 4989 Flags.IsMaxOp = true; 4990 break; 4991 case RK_UMin: 4992 Flags.IsSigned = false; 4993 Flags.IsMaxOp = false; 4994 break; 4995 case RK_UMax: 4996 Flags.IsSigned = false; 4997 Flags.IsMaxOp = true; 4998 break; 4999 case RK_None: 5000 llvm_unreachable("Reduction kind is not set"); 5001 } 5002 return Flags; 5003 } 5004 }; 5005 5006 Instruction *ReductionRoot = nullptr; 5007 5008 /// The operation data of the reduction operation. 5009 OperationData ReductionData; 5010 5011 /// The operation data of the values we perform a reduction on. 5012 OperationData ReducedValueData; 5013 5014 /// Should we model this reduction as a pairwise reduction tree or a tree that 5015 /// splits the vector in halves and adds those halves. 5016 bool IsPairwiseReduction = false; 5017 5018 /// Checks if the ParentStackElem.first should be marked as a reduction 5019 /// operation with an extra argument or as extra argument itself. 5020 void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem, 5021 Value *ExtraArg) { 5022 if (ExtraArgs.count(ParentStackElem.first)) { 5023 ExtraArgs[ParentStackElem.first] = nullptr; 5024 // We ran into something like: 5025 // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg. 5026 // The whole ParentStackElem.first should be considered as an extra value 5027 // in this case. 5028 // Do not perform analysis of remaining operands of ParentStackElem.first 5029 // instruction, this whole instruction is an extra argument. 5030 ParentStackElem.second = ParentStackElem.first->getNumOperands(); 5031 } else { 5032 // We ran into something like: 5033 // ParentStackElem.first += ... + ExtraArg + ... 5034 ExtraArgs[ParentStackElem.first] = ExtraArg; 5035 } 5036 } 5037 5038 static OperationData getOperationData(Value *V) { 5039 if (!V) 5040 return OperationData(); 5041 5042 Value *LHS; 5043 Value *RHS; 5044 if (m_BinOp(m_Value(LHS), m_Value(RHS)).match(V)) { 5045 return OperationData(cast<BinaryOperator>(V)->getOpcode(), LHS, RHS, 5046 RK_Arithmetic); 5047 } 5048 if (auto *Select = dyn_cast<SelectInst>(V)) { 5049 // Look for a min/max pattern. 5050 if (m_UMin(m_Value(LHS), m_Value(RHS)).match(Select)) { 5051 return OperationData(Instruction::ICmp, LHS, RHS, RK_UMin); 5052 } else if (m_SMin(m_Value(LHS), m_Value(RHS)).match(Select)) { 5053 return OperationData(Instruction::ICmp, LHS, RHS, RK_Min); 5054 } else if (m_OrdFMin(m_Value(LHS), m_Value(RHS)).match(Select) || 5055 m_UnordFMin(m_Value(LHS), m_Value(RHS)).match(Select)) { 5056 return OperationData( 5057 Instruction::FCmp, LHS, RHS, RK_Min, 5058 cast<Instruction>(Select->getCondition())->hasNoNaNs()); 5059 } else if (m_UMax(m_Value(LHS), m_Value(RHS)).match(Select)) { 5060 return OperationData(Instruction::ICmp, LHS, RHS, RK_UMax); 5061 } else if (m_SMax(m_Value(LHS), m_Value(RHS)).match(Select)) { 5062 return OperationData(Instruction::ICmp, LHS, RHS, RK_Max); 5063 } else if (m_OrdFMax(m_Value(LHS), m_Value(RHS)).match(Select) || 5064 m_UnordFMax(m_Value(LHS), m_Value(RHS)).match(Select)) { 5065 return OperationData( 5066 Instruction::FCmp, LHS, RHS, RK_Max, 5067 cast<Instruction>(Select->getCondition())->hasNoNaNs()); 5068 } 5069 } 5070 return OperationData(V); 5071 } 5072 5073 public: 5074 HorizontalReduction() = default; 5075 5076 /// \brief Try to find a reduction tree. 5077 bool matchAssociativeReduction(PHINode *Phi, Instruction *B) { 5078 assert((!Phi || is_contained(Phi->operands(), B)) && 5079 "Thi phi needs to use the binary operator"); 5080 5081 ReductionData = getOperationData(B); 5082 5083 // We could have a initial reductions that is not an add. 5084 // r *= v1 + v2 + v3 + v4 5085 // In such a case start looking for a tree rooted in the first '+'. 5086 if (Phi) { 5087 if (ReductionData.getLHS() == Phi) { 5088 Phi = nullptr; 5089 B = dyn_cast<Instruction>(ReductionData.getRHS()); 5090 ReductionData = getOperationData(B); 5091 } else if (ReductionData.getRHS() == Phi) { 5092 Phi = nullptr; 5093 B = dyn_cast<Instruction>(ReductionData.getLHS()); 5094 ReductionData = getOperationData(B); 5095 } 5096 } 5097 5098 if (!ReductionData.isVectorizable(B)) 5099 return false; 5100 5101 Type *Ty = B->getType(); 5102 if (!isValidElementType(Ty)) 5103 return false; 5104 5105 ReducedValueData.clear(); 5106 ReductionRoot = B; 5107 5108 // Post order traverse the reduction tree starting at B. We only handle true 5109 // trees containing only binary operators. 5110 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; 5111 Stack.push_back(std::make_pair(B, ReductionData.getFirstOperandIndex())); 5112 ReductionData.initReductionOps(ReductionOps); 5113 while (!Stack.empty()) { 5114 Instruction *TreeN = Stack.back().first; 5115 unsigned EdgeToVist = Stack.back().second++; 5116 OperationData OpData = getOperationData(TreeN); 5117 bool IsReducedValue = OpData != ReductionData; 5118 5119 // Postorder vist. 5120 if (IsReducedValue || EdgeToVist == OpData.getNumberOfOperands()) { 5121 if (IsReducedValue) 5122 ReducedVals.push_back(TreeN); 5123 else { 5124 auto I = ExtraArgs.find(TreeN); 5125 if (I != ExtraArgs.end() && !I->second) { 5126 // Check if TreeN is an extra argument of its parent operation. 5127 if (Stack.size() <= 1) { 5128 // TreeN can't be an extra argument as it is a root reduction 5129 // operation. 5130 return false; 5131 } 5132 // Yes, TreeN is an extra argument, do not add it to a list of 5133 // reduction operations. 5134 // Stack[Stack.size() - 2] always points to the parent operation. 5135 markExtraArg(Stack[Stack.size() - 2], TreeN); 5136 ExtraArgs.erase(TreeN); 5137 } else 5138 ReductionData.addReductionOps(TreeN, ReductionOps); 5139 } 5140 // Retract. 5141 Stack.pop_back(); 5142 continue; 5143 } 5144 5145 // Visit left or right. 5146 Value *NextV = TreeN->getOperand(EdgeToVist); 5147 if (NextV != Phi) { 5148 auto *I = dyn_cast<Instruction>(NextV); 5149 OpData = getOperationData(I); 5150 // Continue analysis if the next operand is a reduction operation or 5151 // (possibly) a reduced value. If the reduced value opcode is not set, 5152 // the first met operation != reduction operation is considered as the 5153 // reduced value class. 5154 if (I && (!ReducedValueData || OpData == ReducedValueData || 5155 OpData == ReductionData)) { 5156 const bool IsReductionOperation = OpData == ReductionData; 5157 // Only handle trees in the current basic block. 5158 if (!ReductionData.hasSameParent(I, B->getParent(), 5159 IsReductionOperation)) { 5160 // I is an extra argument for TreeN (its parent operation). 5161 markExtraArg(Stack.back(), I); 5162 continue; 5163 } 5164 5165 // Each tree node needs to have minimal number of users except for the 5166 // ultimate reduction. 5167 if (!ReductionData.hasRequiredNumberOfUses(I, 5168 OpData == ReductionData) && 5169 I != B) { 5170 // I is an extra argument for TreeN (its parent operation). 5171 markExtraArg(Stack.back(), I); 5172 continue; 5173 } 5174 5175 if (IsReductionOperation) { 5176 // We need to be able to reassociate the reduction operations. 5177 if (!OpData.isAssociative(I)) { 5178 // I is an extra argument for TreeN (its parent operation). 5179 markExtraArg(Stack.back(), I); 5180 continue; 5181 } 5182 } else if (ReducedValueData && 5183 ReducedValueData != OpData) { 5184 // Make sure that the opcodes of the operations that we are going to 5185 // reduce match. 5186 // I is an extra argument for TreeN (its parent operation). 5187 markExtraArg(Stack.back(), I); 5188 continue; 5189 } else if (!ReducedValueData) 5190 ReducedValueData = OpData; 5191 5192 Stack.push_back(std::make_pair(I, OpData.getFirstOperandIndex())); 5193 continue; 5194 } 5195 } 5196 // NextV is an extra argument for TreeN (its parent operation). 5197 markExtraArg(Stack.back(), NextV); 5198 } 5199 return true; 5200 } 5201 5202 /// \brief Attempt to vectorize the tree found by 5203 /// matchAssociativeReduction. 5204 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 5205 if (ReducedVals.empty()) 5206 return false; 5207 5208 // If there is a sufficient number of reduction values, reduce 5209 // to a nearby power-of-2. Can safely generate oversized 5210 // vectors and rely on the backend to split them to legal sizes. 5211 unsigned NumReducedVals = ReducedVals.size(); 5212 if (NumReducedVals < 4) 5213 return false; 5214 5215 unsigned ReduxWidth = PowerOf2Floor(NumReducedVals); 5216 5217 Value *VectorizedTree = nullptr; 5218 IRBuilder<> Builder(ReductionRoot); 5219 FastMathFlags Unsafe; 5220 Unsafe.setUnsafeAlgebra(); 5221 Builder.setFastMathFlags(Unsafe); 5222 unsigned i = 0; 5223 5224 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 5225 // The same extra argument may be used several time, so log each attempt 5226 // to use it. 5227 for (auto &Pair : ExtraArgs) 5228 ExternallyUsedValues[Pair.second].push_back(Pair.first); 5229 SmallVector<Value *, 16> IgnoreList; 5230 for (auto &V : ReductionOps) 5231 IgnoreList.append(V.begin(), V.end()); 5232 while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) { 5233 auto VL = makeArrayRef(&ReducedVals[i], ReduxWidth); 5234 V.buildTree(VL, ExternallyUsedValues, IgnoreList); 5235 if (V.shouldReorder()) { 5236 SmallVector<Value *, 8> Reversed(VL.rbegin(), VL.rend()); 5237 V.buildTree(Reversed, ExternallyUsedValues, IgnoreList); 5238 } 5239 if (V.isTreeTinyAndNotFullyVectorizable()) 5240 break; 5241 5242 V.computeMinimumValueSizes(); 5243 5244 // Estimate cost. 5245 int Cost = 5246 V.getTreeCost() + getReductionCost(TTI, ReducedVals[i], ReduxWidth); 5247 if (Cost >= -SLPCostThreshold) 5248 break; 5249 5250 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost 5251 << ". (HorRdx)\n"); 5252 auto *I0 = cast<Instruction>(VL[0]); 5253 V.getORE()->emit( 5254 OptimizationRemark(SV_NAME, "VectorizedHorizontalReduction", I0) 5255 << "Vectorized horizontal reduction with cost " 5256 << ore::NV("Cost", Cost) << " and with tree size " 5257 << ore::NV("TreeSize", V.getTreeSize())); 5258 5259 // Vectorize a tree. 5260 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 5261 Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues); 5262 5263 // Emit a reduction. 5264 Value *ReducedSubTree = 5265 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); 5266 if (VectorizedTree) { 5267 Builder.SetCurrentDebugLocation(Loc); 5268 OperationData VectReductionData(ReductionData.getOpcode(), 5269 VectorizedTree, ReducedSubTree, 5270 ReductionData.getKind()); 5271 VectorizedTree = 5272 VectReductionData.createOp(Builder, "op.rdx", ReductionOps); 5273 } else 5274 VectorizedTree = ReducedSubTree; 5275 i += ReduxWidth; 5276 ReduxWidth = PowerOf2Floor(NumReducedVals - i); 5277 } 5278 5279 if (VectorizedTree) { 5280 // Finish the reduction. 5281 for (; i < NumReducedVals; ++i) { 5282 auto *I = cast<Instruction>(ReducedVals[i]); 5283 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 5284 OperationData VectReductionData(ReductionData.getOpcode(), 5285 VectorizedTree, I, 5286 ReductionData.getKind()); 5287 VectorizedTree = VectReductionData.createOp(Builder, "", ReductionOps); 5288 } 5289 for (auto &Pair : ExternallyUsedValues) { 5290 assert(!Pair.second.empty() && 5291 "At least one DebugLoc must be inserted"); 5292 // Add each externally used value to the final reduction. 5293 for (auto *I : Pair.second) { 5294 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 5295 OperationData VectReductionData(ReductionData.getOpcode(), 5296 VectorizedTree, Pair.first, 5297 ReductionData.getKind()); 5298 VectorizedTree = VectReductionData.createOp(Builder, "op.extra", I); 5299 } 5300 } 5301 // Update users. 5302 ReductionRoot->replaceAllUsesWith(VectorizedTree); 5303 } 5304 return VectorizedTree != nullptr; 5305 } 5306 5307 unsigned numReductionValues() const { 5308 return ReducedVals.size(); 5309 } 5310 5311 private: 5312 /// \brief Calculate the cost of a reduction. 5313 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal, 5314 unsigned ReduxWidth) { 5315 Type *ScalarTy = FirstReducedVal->getType(); 5316 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 5317 5318 int PairwiseRdxCost; 5319 int SplittingRdxCost; 5320 switch (ReductionData.getKind()) { 5321 case RK_Arithmetic: 5322 PairwiseRdxCost = 5323 TTI->getArithmeticReductionCost(ReductionData.getOpcode(), VecTy, 5324 /*IsPairwiseForm=*/true); 5325 SplittingRdxCost = 5326 TTI->getArithmeticReductionCost(ReductionData.getOpcode(), VecTy, 5327 /*IsPairwiseForm=*/false); 5328 break; 5329 case RK_Min: 5330 case RK_Max: 5331 case RK_UMin: 5332 case RK_UMax: { 5333 Type *VecCondTy = CmpInst::makeCmpResultType(VecTy); 5334 bool IsUnsigned = ReductionData.getKind() == RK_UMin || 5335 ReductionData.getKind() == RK_UMax; 5336 PairwiseRdxCost = 5337 TTI->getMinMaxReductionCost(VecTy, VecCondTy, 5338 /*IsPairwiseForm=*/true, IsUnsigned); 5339 SplittingRdxCost = 5340 TTI->getMinMaxReductionCost(VecTy, VecCondTy, 5341 /*IsPairwiseForm=*/false, IsUnsigned); 5342 break; 5343 } 5344 case RK_None: 5345 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 5346 } 5347 5348 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 5349 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 5350 5351 int ScalarReduxCost; 5352 switch (ReductionData.getKind()) { 5353 case RK_Arithmetic: 5354 ScalarReduxCost = 5355 TTI->getArithmeticInstrCost(ReductionData.getOpcode(), ScalarTy); 5356 break; 5357 case RK_Min: 5358 case RK_Max: 5359 case RK_UMin: 5360 case RK_UMax: 5361 ScalarReduxCost = 5362 TTI->getCmpSelInstrCost(ReductionData.getOpcode(), ScalarTy) + 5363 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 5364 CmpInst::makeCmpResultType(ScalarTy)); 5365 break; 5366 case RK_None: 5367 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 5368 } 5369 ScalarReduxCost *= (ReduxWidth - 1); 5370 5371 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 5372 << " for reduction that starts with " << *FirstReducedVal 5373 << " (It is a " 5374 << (IsPairwiseReduction ? "pairwise" : "splitting") 5375 << " reduction)\n"); 5376 5377 return VecReduxCost - ScalarReduxCost; 5378 } 5379 5380 /// \brief Emit a horizontal reduction of the vectorized value. 5381 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 5382 unsigned ReduxWidth, const TargetTransformInfo *TTI) { 5383 assert(VectorizedValue && "Need to have a vectorized tree node"); 5384 assert(isPowerOf2_32(ReduxWidth) && 5385 "We only handle power-of-two reductions for now"); 5386 5387 if (!IsPairwiseReduction) 5388 return createSimpleTargetReduction( 5389 Builder, TTI, ReductionData.getOpcode(), VectorizedValue, 5390 ReductionData.getFlags(), ReductionOps.back()); 5391 5392 Value *TmpVec = VectorizedValue; 5393 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 5394 Value *LeftMask = 5395 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 5396 Value *RightMask = 5397 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 5398 5399 Value *LeftShuf = Builder.CreateShuffleVector( 5400 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 5401 Value *RightShuf = Builder.CreateShuffleVector( 5402 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 5403 "rdx.shuf.r"); 5404 OperationData VectReductionData(ReductionData.getOpcode(), LeftShuf, 5405 RightShuf, ReductionData.getKind()); 5406 TmpVec = VectReductionData.createOp(Builder, "op.rdx", ReductionOps); 5407 } 5408 5409 // The result is in the first element of the vector. 5410 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 5411 } 5412 }; 5413 5414 } // end anonymous namespace 5415 5416 /// \brief Recognize construction of vectors like 5417 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 5418 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 5419 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 5420 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 5421 /// starting from the last insertelement instruction. 5422 /// 5423 /// Returns true if it matches 5424 /// 5425 static bool findBuildVector(InsertElementInst *LastInsertElem, 5426 SmallVectorImpl<Value *> &BuildVector, 5427 SmallVectorImpl<Value *> &BuildVectorOpds) { 5428 Value *V = nullptr; 5429 do { 5430 BuildVector.push_back(LastInsertElem); 5431 BuildVectorOpds.push_back(LastInsertElem->getOperand(1)); 5432 V = LastInsertElem->getOperand(0); 5433 if (isa<UndefValue>(V)) 5434 break; 5435 LastInsertElem = dyn_cast<InsertElementInst>(V); 5436 if (!LastInsertElem || !LastInsertElem->hasOneUse()) 5437 return false; 5438 } while (true); 5439 std::reverse(BuildVector.begin(), BuildVector.end()); 5440 std::reverse(BuildVectorOpds.begin(), BuildVectorOpds.end()); 5441 return true; 5442 } 5443 5444 /// \brief Like findBuildVector, but looks for construction of aggregate. 5445 /// 5446 /// \return true if it matches. 5447 static bool findBuildAggregate(InsertValueInst *IV, 5448 SmallVectorImpl<Value *> &BuildVector, 5449 SmallVectorImpl<Value *> &BuildVectorOpds) { 5450 Value *V; 5451 do { 5452 BuildVector.push_back(IV); 5453 BuildVectorOpds.push_back(IV->getInsertedValueOperand()); 5454 V = IV->getAggregateOperand(); 5455 if (isa<UndefValue>(V)) 5456 break; 5457 IV = dyn_cast<InsertValueInst>(V); 5458 if (!IV || !IV->hasOneUse()) 5459 return false; 5460 } while (true); 5461 std::reverse(BuildVector.begin(), BuildVector.end()); 5462 std::reverse(BuildVectorOpds.begin(), BuildVectorOpds.end()); 5463 return true; 5464 } 5465 5466 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 5467 return V->getType() < V2->getType(); 5468 } 5469 5470 /// \brief Try and get a reduction value from a phi node. 5471 /// 5472 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 5473 /// if they come from either \p ParentBB or a containing loop latch. 5474 /// 5475 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 5476 /// if not possible. 5477 static Value *getReductionValue(const DominatorTree *DT, PHINode *P, 5478 BasicBlock *ParentBB, LoopInfo *LI) { 5479 // There are situations where the reduction value is not dominated by the 5480 // reduction phi. Vectorizing such cases has been reported to cause 5481 // miscompiles. See PR25787. 5482 auto DominatedReduxValue = [&](Value *R) { 5483 return ( 5484 dyn_cast<Instruction>(R) && 5485 DT->dominates(P->getParent(), dyn_cast<Instruction>(R)->getParent())); 5486 }; 5487 5488 Value *Rdx = nullptr; 5489 5490 // Return the incoming value if it comes from the same BB as the phi node. 5491 if (P->getIncomingBlock(0) == ParentBB) { 5492 Rdx = P->getIncomingValue(0); 5493 } else if (P->getIncomingBlock(1) == ParentBB) { 5494 Rdx = P->getIncomingValue(1); 5495 } 5496 5497 if (Rdx && DominatedReduxValue(Rdx)) 5498 return Rdx; 5499 5500 // Otherwise, check whether we have a loop latch to look at. 5501 Loop *BBL = LI->getLoopFor(ParentBB); 5502 if (!BBL) 5503 return nullptr; 5504 BasicBlock *BBLatch = BBL->getLoopLatch(); 5505 if (!BBLatch) 5506 return nullptr; 5507 5508 // There is a loop latch, return the incoming value if it comes from 5509 // that. This reduction pattern occasionally turns up. 5510 if (P->getIncomingBlock(0) == BBLatch) { 5511 Rdx = P->getIncomingValue(0); 5512 } else if (P->getIncomingBlock(1) == BBLatch) { 5513 Rdx = P->getIncomingValue(1); 5514 } 5515 5516 if (Rdx && DominatedReduxValue(Rdx)) 5517 return Rdx; 5518 5519 return nullptr; 5520 } 5521 5522 /// Attempt to reduce a horizontal reduction. 5523 /// If it is legal to match a horizontal reduction feeding the phi node \a P 5524 /// with reduction operators \a Root (or one of its operands) in a basic block 5525 /// \a BB, then check if it can be done. If horizontal reduction is not found 5526 /// and root instruction is a binary operation, vectorization of the operands is 5527 /// attempted. 5528 /// \returns true if a horizontal reduction was matched and reduced or operands 5529 /// of one of the binary instruction were vectorized. 5530 /// \returns false if a horizontal reduction was not matched (or not possible) 5531 /// or no vectorization of any binary operation feeding \a Root instruction was 5532 /// performed. 5533 static bool tryToVectorizeHorReductionOrInstOperands( 5534 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, 5535 TargetTransformInfo *TTI, 5536 const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) { 5537 if (!ShouldVectorizeHor) 5538 return false; 5539 5540 if (!Root) 5541 return false; 5542 5543 if (Root->getParent() != BB || isa<PHINode>(Root)) 5544 return false; 5545 // Start analysis starting from Root instruction. If horizontal reduction is 5546 // found, try to vectorize it. If it is not a horizontal reduction or 5547 // vectorization is not possible or not effective, and currently analyzed 5548 // instruction is a binary operation, try to vectorize the operands, using 5549 // pre-order DFS traversal order. If the operands were not vectorized, repeat 5550 // the same procedure considering each operand as a possible root of the 5551 // horizontal reduction. 5552 // Interrupt the process if the Root instruction itself was vectorized or all 5553 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 5554 SmallVector<std::pair<WeakTrackingVH, unsigned>, 8> Stack(1, {Root, 0}); 5555 SmallSet<Value *, 8> VisitedInstrs; 5556 bool Res = false; 5557 while (!Stack.empty()) { 5558 Value *V; 5559 unsigned Level; 5560 std::tie(V, Level) = Stack.pop_back_val(); 5561 if (!V) 5562 continue; 5563 auto *Inst = dyn_cast<Instruction>(V); 5564 if (!Inst) 5565 continue; 5566 auto *BI = dyn_cast<BinaryOperator>(Inst); 5567 auto *SI = dyn_cast<SelectInst>(Inst); 5568 if (BI || SI) { 5569 HorizontalReduction HorRdx; 5570 if (HorRdx.matchAssociativeReduction(P, Inst)) { 5571 if (HorRdx.tryToReduce(R, TTI)) { 5572 Res = true; 5573 // Set P to nullptr to avoid re-analysis of phi node in 5574 // matchAssociativeReduction function unless this is the root node. 5575 P = nullptr; 5576 continue; 5577 } 5578 } 5579 if (P && BI) { 5580 Inst = dyn_cast<Instruction>(BI->getOperand(0)); 5581 if (Inst == P) 5582 Inst = dyn_cast<Instruction>(BI->getOperand(1)); 5583 if (!Inst) { 5584 // Set P to nullptr to avoid re-analysis of phi node in 5585 // matchAssociativeReduction function unless this is the root node. 5586 P = nullptr; 5587 continue; 5588 } 5589 } 5590 } 5591 // Set P to nullptr to avoid re-analysis of phi node in 5592 // matchAssociativeReduction function unless this is the root node. 5593 P = nullptr; 5594 if (Vectorize(Inst, R)) { 5595 Res = true; 5596 continue; 5597 } 5598 5599 // Try to vectorize operands. 5600 // Continue analysis for the instruction from the same basic block only to 5601 // save compile time. 5602 if (++Level < RecursionMaxDepth) 5603 for (auto *Op : Inst->operand_values()) 5604 if (VisitedInstrs.insert(Op).second) 5605 if (auto *I = dyn_cast<Instruction>(Op)) 5606 if (!isa<PHINode>(I) && I->getParent() == BB) 5607 Stack.emplace_back(Op, Level); 5608 } 5609 return Res; 5610 } 5611 5612 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V, 5613 BasicBlock *BB, BoUpSLP &R, 5614 TargetTransformInfo *TTI) { 5615 if (!V) 5616 return false; 5617 auto *I = dyn_cast<Instruction>(V); 5618 if (!I) 5619 return false; 5620 5621 if (!isa<BinaryOperator>(I)) 5622 P = nullptr; 5623 // Try to match and vectorize a horizontal reduction. 5624 auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool { 5625 return tryToVectorize(I, R); 5626 }; 5627 return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI, 5628 ExtraVectorization); 5629 } 5630 5631 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 5632 BasicBlock *BB, BoUpSLP &R) { 5633 const DataLayout &DL = BB->getModule()->getDataLayout(); 5634 if (!R.canMapToVector(IVI->getType(), DL)) 5635 return false; 5636 5637 SmallVector<Value *, 16> BuildVector; 5638 SmallVector<Value *, 16> BuildVectorOpds; 5639 if (!findBuildAggregate(IVI, BuildVector, BuildVectorOpds)) 5640 return false; 5641 5642 DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 5643 return tryToVectorizeList(BuildVectorOpds, R, BuildVector, false); 5644 } 5645 5646 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 5647 BasicBlock *BB, BoUpSLP &R) { 5648 SmallVector<Value *, 16> BuildVector; 5649 SmallVector<Value *, 16> BuildVectorOpds; 5650 if (!findBuildVector(IEI, BuildVector, BuildVectorOpds)) 5651 return false; 5652 5653 // Vectorize starting with the build vector operands ignoring the BuildVector 5654 // instructions for the purpose of scheduling and user extraction. 5655 return tryToVectorizeList(BuildVectorOpds, R, BuildVector); 5656 } 5657 5658 bool SLPVectorizerPass::vectorizeCmpInst(CmpInst *CI, BasicBlock *BB, 5659 BoUpSLP &R) { 5660 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) 5661 return true; 5662 5663 bool OpsChanged = false; 5664 for (int Idx = 0; Idx < 2; ++Idx) { 5665 OpsChanged |= 5666 vectorizeRootInstruction(nullptr, CI->getOperand(Idx), BB, R, TTI); 5667 } 5668 return OpsChanged; 5669 } 5670 5671 bool SLPVectorizerPass::vectorizeSimpleInstructions( 5672 SmallVectorImpl<WeakVH> &Instructions, BasicBlock *BB, BoUpSLP &R) { 5673 bool OpsChanged = false; 5674 for (auto &VH : reverse(Instructions)) { 5675 auto *I = dyn_cast_or_null<Instruction>(VH); 5676 if (!I) 5677 continue; 5678 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) 5679 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 5680 else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) 5681 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 5682 else if (auto *CI = dyn_cast<CmpInst>(I)) 5683 OpsChanged |= vectorizeCmpInst(CI, BB, R); 5684 } 5685 Instructions.clear(); 5686 return OpsChanged; 5687 } 5688 5689 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 5690 bool Changed = false; 5691 SmallVector<Value *, 4> Incoming; 5692 SmallSet<Value *, 16> VisitedInstrs; 5693 5694 bool HaveVectorizedPhiNodes = true; 5695 while (HaveVectorizedPhiNodes) { 5696 HaveVectorizedPhiNodes = false; 5697 5698 // Collect the incoming values from the PHIs. 5699 Incoming.clear(); 5700 for (Instruction &I : *BB) { 5701 PHINode *P = dyn_cast<PHINode>(&I); 5702 if (!P) 5703 break; 5704 5705 if (!VisitedInstrs.count(P)) 5706 Incoming.push_back(P); 5707 } 5708 5709 // Sort by type. 5710 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc); 5711 5712 // Try to vectorize elements base on their type. 5713 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 5714 E = Incoming.end(); 5715 IncIt != E;) { 5716 5717 // Look for the next elements with the same type. 5718 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 5719 while (SameTypeIt != E && 5720 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 5721 VisitedInstrs.insert(*SameTypeIt); 5722 ++SameTypeIt; 5723 } 5724 5725 // Try to vectorize them. 5726 unsigned NumElts = (SameTypeIt - IncIt); 5727 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n"); 5728 // The order in which the phi nodes appear in the program does not matter. 5729 // So allow tryToVectorizeList to reorder them if it is beneficial. This 5730 // is done when there are exactly two elements since tryToVectorizeList 5731 // asserts that there are only two values when AllowReorder is true. 5732 bool AllowReorder = NumElts == 2; 5733 if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R, 5734 None, AllowReorder)) { 5735 // Success start over because instructions might have been changed. 5736 HaveVectorizedPhiNodes = true; 5737 Changed = true; 5738 break; 5739 } 5740 5741 // Start over at the next instruction of a different type (or the end). 5742 IncIt = SameTypeIt; 5743 } 5744 } 5745 5746 VisitedInstrs.clear(); 5747 5748 SmallVector<WeakVH, 8> PostProcessInstructions; 5749 SmallDenseSet<Instruction *, 4> KeyNodes; 5750 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) { 5751 // We may go through BB multiple times so skip the one we have checked. 5752 if (!VisitedInstrs.insert(&*it).second) { 5753 if (it->use_empty() && KeyNodes.count(&*it) > 0 && 5754 vectorizeSimpleInstructions(PostProcessInstructions, BB, R)) { 5755 // We would like to start over since some instructions are deleted 5756 // and the iterator may become invalid value. 5757 Changed = true; 5758 it = BB->begin(); 5759 e = BB->end(); 5760 } 5761 continue; 5762 } 5763 5764 if (isa<DbgInfoIntrinsic>(it)) 5765 continue; 5766 5767 // Try to vectorize reductions that use PHINodes. 5768 if (PHINode *P = dyn_cast<PHINode>(it)) { 5769 // Check that the PHI is a reduction PHI. 5770 if (P->getNumIncomingValues() != 2) 5771 return Changed; 5772 5773 // Try to match and vectorize a horizontal reduction. 5774 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R, 5775 TTI)) { 5776 Changed = true; 5777 it = BB->begin(); 5778 e = BB->end(); 5779 continue; 5780 } 5781 continue; 5782 } 5783 5784 // Ran into an instruction without users, like terminator, or function call 5785 // with ignored return value, store. Ignore unused instructions (basing on 5786 // instruction type, except for CallInst and InvokeInst). 5787 if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) || 5788 isa<InvokeInst>(it))) { 5789 KeyNodes.insert(&*it); 5790 bool OpsChanged = false; 5791 if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) { 5792 for (auto *V : it->operand_values()) { 5793 // Try to match and vectorize a horizontal reduction. 5794 OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI); 5795 } 5796 } 5797 // Start vectorization of post-process list of instructions from the 5798 // top-tree instructions to try to vectorize as many instructions as 5799 // possible. 5800 OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R); 5801 if (OpsChanged) { 5802 // We would like to start over since some instructions are deleted 5803 // and the iterator may become invalid value. 5804 Changed = true; 5805 it = BB->begin(); 5806 e = BB->end(); 5807 continue; 5808 } 5809 } 5810 5811 if (isa<InsertElementInst>(it) || isa<CmpInst>(it) || 5812 isa<InsertValueInst>(it)) 5813 PostProcessInstructions.push_back(&*it); 5814 5815 } 5816 5817 return Changed; 5818 } 5819 5820 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 5821 auto Changed = false; 5822 for (auto &Entry : GEPs) { 5823 // If the getelementptr list has fewer than two elements, there's nothing 5824 // to do. 5825 if (Entry.second.size() < 2) 5826 continue; 5827 5828 DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 5829 << Entry.second.size() << ".\n"); 5830 5831 // We process the getelementptr list in chunks of 16 (like we do for 5832 // stores) to minimize compile-time. 5833 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += 16) { 5834 auto Len = std::min<unsigned>(BE - BI, 16); 5835 auto GEPList = makeArrayRef(&Entry.second[BI], Len); 5836 5837 // Initialize a set a candidate getelementptrs. Note that we use a 5838 // SetVector here to preserve program order. If the index computations 5839 // are vectorizable and begin with loads, we want to minimize the chance 5840 // of having to reorder them later. 5841 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 5842 5843 // Some of the candidates may have already been vectorized after we 5844 // initially collected them. If so, the WeakTrackingVHs will have 5845 // nullified the 5846 // values, so remove them from the set of candidates. 5847 Candidates.remove(nullptr); 5848 5849 // Remove from the set of candidates all pairs of getelementptrs with 5850 // constant differences. Such getelementptrs are likely not good 5851 // candidates for vectorization in a bottom-up phase since one can be 5852 // computed from the other. We also ensure all candidate getelementptr 5853 // indices are unique. 5854 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 5855 auto *GEPI = cast<GetElementPtrInst>(GEPList[I]); 5856 if (!Candidates.count(GEPI)) 5857 continue; 5858 auto *SCEVI = SE->getSCEV(GEPList[I]); 5859 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 5860 auto *GEPJ = cast<GetElementPtrInst>(GEPList[J]); 5861 auto *SCEVJ = SE->getSCEV(GEPList[J]); 5862 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 5863 Candidates.remove(GEPList[I]); 5864 Candidates.remove(GEPList[J]); 5865 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 5866 Candidates.remove(GEPList[J]); 5867 } 5868 } 5869 } 5870 5871 // We break out of the above computation as soon as we know there are 5872 // fewer than two candidates remaining. 5873 if (Candidates.size() < 2) 5874 continue; 5875 5876 // Add the single, non-constant index of each candidate to the bundle. We 5877 // ensured the indices met these constraints when we originally collected 5878 // the getelementptrs. 5879 SmallVector<Value *, 16> Bundle(Candidates.size()); 5880 auto BundleIndex = 0u; 5881 for (auto *V : Candidates) { 5882 auto *GEP = cast<GetElementPtrInst>(V); 5883 auto *GEPIdx = GEP->idx_begin()->get(); 5884 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 5885 Bundle[BundleIndex++] = GEPIdx; 5886 } 5887 5888 // Try and vectorize the indices. We are currently only interested in 5889 // gather-like cases of the form: 5890 // 5891 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 5892 // 5893 // where the loads of "a", the loads of "b", and the subtractions can be 5894 // performed in parallel. It's likely that detecting this pattern in a 5895 // bottom-up phase will be simpler and less costly than building a 5896 // full-blown top-down phase beginning at the consecutive loads. 5897 Changed |= tryToVectorizeList(Bundle, R); 5898 } 5899 } 5900 return Changed; 5901 } 5902 5903 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 5904 bool Changed = false; 5905 // Attempt to sort and vectorize each of the store-groups. 5906 for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e; 5907 ++it) { 5908 if (it->second.size() < 2) 5909 continue; 5910 5911 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 5912 << it->second.size() << ".\n"); 5913 5914 // Process the stores in chunks of 16. 5915 // TODO: The limit of 16 inhibits greater vectorization factors. 5916 // For example, AVX2 supports v32i8. Increasing this limit, however, 5917 // may cause a significant compile-time increase. 5918 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) { 5919 unsigned Len = std::min<unsigned>(CE - CI, 16); 5920 Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len), R); 5921 } 5922 } 5923 return Changed; 5924 } 5925 5926 char SLPVectorizer::ID = 0; 5927 5928 static const char lv_name[] = "SLP Vectorizer"; 5929 5930 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 5931 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 5932 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 5933 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 5934 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 5935 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 5936 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 5937 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 5938 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 5939 5940 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); } 5941