1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 20 #include "llvm/ADT/ArrayRef.h" 21 #include "llvm/ADT/DenseMap.h" 22 #include "llvm/ADT/DenseSet.h" 23 #include "llvm/ADT/MapVector.h" 24 #include "llvm/ADT/None.h" 25 #include "llvm/ADT/Optional.h" 26 #include "llvm/ADT/PostOrderIterator.h" 27 #include "llvm/ADT/STLExtras.h" 28 #include "llvm/ADT/SetVector.h" 29 #include "llvm/ADT/SmallPtrSet.h" 30 #include "llvm/ADT/SmallSet.h" 31 #include "llvm/ADT/SmallVector.h" 32 #include "llvm/ADT/Statistic.h" 33 #include "llvm/ADT/iterator.h" 34 #include "llvm/ADT/iterator_range.h" 35 #include "llvm/Analysis/AliasAnalysis.h" 36 #include "llvm/Analysis/CodeMetrics.h" 37 #include "llvm/Analysis/DemandedBits.h" 38 #include "llvm/Analysis/GlobalsModRef.h" 39 #include "llvm/Analysis/LoopAccessAnalysis.h" 40 #include "llvm/Analysis/LoopInfo.h" 41 #include "llvm/Analysis/MemoryLocation.h" 42 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 43 #include "llvm/Analysis/ScalarEvolution.h" 44 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 45 #include "llvm/Analysis/TargetLibraryInfo.h" 46 #include "llvm/Analysis/TargetTransformInfo.h" 47 #include "llvm/Analysis/ValueTracking.h" 48 #include "llvm/Analysis/VectorUtils.h" 49 #include "llvm/IR/Attributes.h" 50 #include "llvm/IR/BasicBlock.h" 51 #include "llvm/IR/Constant.h" 52 #include "llvm/IR/Constants.h" 53 #include "llvm/IR/DataLayout.h" 54 #include "llvm/IR/DebugLoc.h" 55 #include "llvm/IR/DerivedTypes.h" 56 #include "llvm/IR/Dominators.h" 57 #include "llvm/IR/Function.h" 58 #include "llvm/IR/IRBuilder.h" 59 #include "llvm/IR/InstrTypes.h" 60 #include "llvm/IR/Instruction.h" 61 #include "llvm/IR/Instructions.h" 62 #include "llvm/IR/IntrinsicInst.h" 63 #include "llvm/IR/Intrinsics.h" 64 #include "llvm/IR/Module.h" 65 #include "llvm/IR/NoFolder.h" 66 #include "llvm/IR/Operator.h" 67 #include "llvm/IR/PassManager.h" 68 #include "llvm/IR/PatternMatch.h" 69 #include "llvm/IR/Type.h" 70 #include "llvm/IR/Use.h" 71 #include "llvm/IR/User.h" 72 #include "llvm/IR/Value.h" 73 #include "llvm/IR/ValueHandle.h" 74 #include "llvm/IR/Verifier.h" 75 #include "llvm/Pass.h" 76 #include "llvm/Support/Casting.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Compiler.h" 79 #include "llvm/Support/DOTGraphTraits.h" 80 #include "llvm/Support/Debug.h" 81 #include "llvm/Support/ErrorHandling.h" 82 #include "llvm/Support/GraphWriter.h" 83 #include "llvm/Support/KnownBits.h" 84 #include "llvm/Support/MathExtras.h" 85 #include "llvm/Support/raw_ostream.h" 86 #include "llvm/Transforms/Utils/LoopUtils.h" 87 #include "llvm/Transforms/Vectorize.h" 88 #include <algorithm> 89 #include <cassert> 90 #include <cstdint> 91 #include <iterator> 92 #include <memory> 93 #include <set> 94 #include <string> 95 #include <tuple> 96 #include <utility> 97 #include <vector> 98 99 using namespace llvm; 100 using namespace llvm::PatternMatch; 101 using namespace slpvectorizer; 102 103 #define SV_NAME "slp-vectorizer" 104 #define DEBUG_TYPE "SLP" 105 106 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 107 108 static cl::opt<int> 109 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 110 cl::desc("Only vectorize if you gain more than this " 111 "number ")); 112 113 static cl::opt<bool> 114 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 115 cl::desc("Attempt to vectorize horizontal reductions")); 116 117 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 118 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 119 cl::desc( 120 "Attempt to vectorize horizontal reductions feeding into a store")); 121 122 static cl::opt<int> 123 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 124 cl::desc("Attempt to vectorize for this register size in bits")); 125 126 /// Limits the size of scheduling regions in a block. 127 /// It avoid long compile times for _very_ large blocks where vector 128 /// instructions are spread over a wide range. 129 /// This limit is way higher than needed by real-world functions. 130 static cl::opt<int> 131 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 132 cl::desc("Limit the size of the SLP scheduling region per block")); 133 134 static cl::opt<int> MinVectorRegSizeOption( 135 "slp-min-reg-size", cl::init(128), cl::Hidden, 136 cl::desc("Attempt to vectorize for this register size in bits")); 137 138 static cl::opt<unsigned> RecursionMaxDepth( 139 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 140 cl::desc("Limit the recursion depth when building a vectorizable tree")); 141 142 static cl::opt<unsigned> MinTreeSize( 143 "slp-min-tree-size", cl::init(3), cl::Hidden, 144 cl::desc("Only vectorize small trees if they are fully vectorizable")); 145 146 static cl::opt<bool> 147 ViewSLPTree("view-slp-tree", cl::Hidden, 148 cl::desc("Display the SLP trees with Graphviz")); 149 150 // Limit the number of alias checks. The limit is chosen so that 151 // it has no negative effect on the llvm benchmarks. 152 static const unsigned AliasedCheckLimit = 10; 153 154 // Another limit for the alias checks: The maximum distance between load/store 155 // instructions where alias checks are done. 156 // This limit is useful for very large basic blocks. 157 static const unsigned MaxMemDepDistance = 160; 158 159 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 160 /// regions to be handled. 161 static const int MinScheduleRegionSize = 16; 162 163 /// Predicate for the element types that the SLP vectorizer supports. 164 /// 165 /// The most important thing to filter here are types which are invalid in LLVM 166 /// vectors. We also filter target specific types which have absolutely no 167 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 168 /// avoids spending time checking the cost model and realizing that they will 169 /// be inevitably scalarized. 170 static bool isValidElementType(Type *Ty) { 171 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 172 !Ty->isPPC_FP128Ty(); 173 } 174 175 /// \returns true if all of the instructions in \p VL are in the same block or 176 /// false otherwise. 177 static bool allSameBlock(ArrayRef<Value *> VL) { 178 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 179 if (!I0) 180 return false; 181 BasicBlock *BB = I0->getParent(); 182 for (int i = 1, e = VL.size(); i < e; i++) { 183 Instruction *I = dyn_cast<Instruction>(VL[i]); 184 if (!I) 185 return false; 186 187 if (BB != I->getParent()) 188 return false; 189 } 190 return true; 191 } 192 193 /// \returns True if all of the values in \p VL are constants. 194 static bool allConstant(ArrayRef<Value *> VL) { 195 for (Value *i : VL) 196 if (!isa<Constant>(i)) 197 return false; 198 return true; 199 } 200 201 /// \returns True if all of the values in \p VL are identical. 202 static bool isSplat(ArrayRef<Value *> VL) { 203 for (unsigned i = 1, e = VL.size(); i < e; ++i) 204 if (VL[i] != VL[0]) 205 return false; 206 return true; 207 } 208 209 /// \returns True if \p I is commutative, handles CmpInst as well as Instruction. 210 static bool isCommutative(Instruction *I) { 211 if (auto *IC = dyn_cast<CmpInst>(I)) 212 return IC->isCommutative(); 213 return I->isCommutative(); 214 } 215 216 /// Checks if the vector of instructions can be represented as a shuffle, like: 217 /// %x0 = extractelement <4 x i8> %x, i32 0 218 /// %x3 = extractelement <4 x i8> %x, i32 3 219 /// %y1 = extractelement <4 x i8> %y, i32 1 220 /// %y2 = extractelement <4 x i8> %y, i32 2 221 /// %x0x0 = mul i8 %x0, %x0 222 /// %x3x3 = mul i8 %x3, %x3 223 /// %y1y1 = mul i8 %y1, %y1 224 /// %y2y2 = mul i8 %y2, %y2 225 /// %ins1 = insertelement <4 x i8> undef, i8 %x0x0, i32 0 226 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 227 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 228 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 229 /// ret <4 x i8> %ins4 230 /// can be transformed into: 231 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 232 /// i32 6> 233 /// %2 = mul <4 x i8> %1, %1 234 /// ret <4 x i8> %2 235 /// We convert this initially to something like: 236 /// %x0 = extractelement <4 x i8> %x, i32 0 237 /// %x3 = extractelement <4 x i8> %x, i32 3 238 /// %y1 = extractelement <4 x i8> %y, i32 1 239 /// %y2 = extractelement <4 x i8> %y, i32 2 240 /// %1 = insertelement <4 x i8> undef, i8 %x0, i32 0 241 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1 242 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2 243 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3 244 /// %5 = mul <4 x i8> %4, %4 245 /// %6 = extractelement <4 x i8> %5, i32 0 246 /// %ins1 = insertelement <4 x i8> undef, i8 %6, i32 0 247 /// %7 = extractelement <4 x i8> %5, i32 1 248 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1 249 /// %8 = extractelement <4 x i8> %5, i32 2 250 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2 251 /// %9 = extractelement <4 x i8> %5, i32 3 252 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3 253 /// ret <4 x i8> %ins4 254 /// InstCombiner transforms this into a shuffle and vector mul 255 /// TODO: Can we split off and reuse the shuffle mask detection from 256 /// TargetTransformInfo::getInstructionThroughput? 257 static Optional<TargetTransformInfo::ShuffleKind> 258 isShuffle(ArrayRef<Value *> VL) { 259 auto *EI0 = cast<ExtractElementInst>(VL[0]); 260 unsigned Size = EI0->getVectorOperandType()->getVectorNumElements(); 261 Value *Vec1 = nullptr; 262 Value *Vec2 = nullptr; 263 enum ShuffleMode { Unknown, Select, Permute }; 264 ShuffleMode CommonShuffleMode = Unknown; 265 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 266 auto *EI = cast<ExtractElementInst>(VL[I]); 267 auto *Vec = EI->getVectorOperand(); 268 // All vector operands must have the same number of vector elements. 269 if (Vec->getType()->getVectorNumElements() != Size) 270 return None; 271 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 272 if (!Idx) 273 return None; 274 // Undefined behavior if Idx is negative or >= Size. 275 if (Idx->getValue().uge(Size)) 276 continue; 277 unsigned IntIdx = Idx->getValue().getZExtValue(); 278 // We can extractelement from undef vector. 279 if (isa<UndefValue>(Vec)) 280 continue; 281 // For correct shuffling we have to have at most 2 different vector operands 282 // in all extractelement instructions. 283 if (!Vec1 || Vec1 == Vec) 284 Vec1 = Vec; 285 else if (!Vec2 || Vec2 == Vec) 286 Vec2 = Vec; 287 else 288 return None; 289 if (CommonShuffleMode == Permute) 290 continue; 291 // If the extract index is not the same as the operation number, it is a 292 // permutation. 293 if (IntIdx != I) { 294 CommonShuffleMode = Permute; 295 continue; 296 } 297 CommonShuffleMode = Select; 298 } 299 // If we're not crossing lanes in different vectors, consider it as blending. 300 if (CommonShuffleMode == Select && Vec2) 301 return TargetTransformInfo::SK_Select; 302 // If Vec2 was never used, we have a permutation of a single vector, otherwise 303 // we have permutation of 2 vectors. 304 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 305 : TargetTransformInfo::SK_PermuteSingleSrc; 306 } 307 308 namespace { 309 310 /// Main data required for vectorization of instructions. 311 struct InstructionsState { 312 /// The very first instruction in the list with the main opcode. 313 Value *OpValue = nullptr; 314 315 /// The main/alternate instruction. 316 Instruction *MainOp = nullptr; 317 Instruction *AltOp = nullptr; 318 319 /// The main/alternate opcodes for the list of instructions. 320 unsigned getOpcode() const { 321 return MainOp ? MainOp->getOpcode() : 0; 322 } 323 324 unsigned getAltOpcode() const { 325 return AltOp ? AltOp->getOpcode() : 0; 326 } 327 328 /// Some of the instructions in the list have alternate opcodes. 329 bool isAltShuffle() const { return getOpcode() != getAltOpcode(); } 330 331 bool isOpcodeOrAlt(Instruction *I) const { 332 unsigned CheckedOpcode = I->getOpcode(); 333 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode; 334 } 335 336 InstructionsState() = delete; 337 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp) 338 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {} 339 }; 340 341 } // end anonymous namespace 342 343 /// Chooses the correct key for scheduling data. If \p Op has the same (or 344 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 345 /// OpValue. 346 static Value *isOneOf(const InstructionsState &S, Value *Op) { 347 auto *I = dyn_cast<Instruction>(Op); 348 if (I && S.isOpcodeOrAlt(I)) 349 return Op; 350 return S.OpValue; 351 } 352 353 /// \returns analysis of the Instructions in \p VL described in 354 /// InstructionsState, the Opcode that we suppose the whole list 355 /// could be vectorized even if its structure is diverse. 356 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 357 unsigned BaseIndex = 0) { 358 // Make sure these are all Instructions. 359 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); })) 360 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 361 362 bool IsCastOp = isa<CastInst>(VL[BaseIndex]); 363 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]); 364 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode(); 365 unsigned AltOpcode = Opcode; 366 unsigned AltIndex = BaseIndex; 367 368 // Check for one alternate opcode from another BinaryOperator. 369 // TODO - generalize to support all operators (types, calls etc.). 370 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { 371 unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode(); 372 if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) { 373 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 374 continue; 375 if (Opcode == AltOpcode) { 376 AltOpcode = InstOpcode; 377 AltIndex = Cnt; 378 continue; 379 } 380 } else if (IsCastOp && isa<CastInst>(VL[Cnt])) { 381 Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType(); 382 Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType(); 383 if (Ty0 == Ty1) { 384 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 385 continue; 386 if (Opcode == AltOpcode) { 387 AltOpcode = InstOpcode; 388 AltIndex = Cnt; 389 continue; 390 } 391 } 392 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode) 393 continue; 394 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 395 } 396 397 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]), 398 cast<Instruction>(VL[AltIndex])); 399 } 400 401 /// \returns true if all of the values in \p VL have the same type or false 402 /// otherwise. 403 static bool allSameType(ArrayRef<Value *> VL) { 404 Type *Ty = VL[0]->getType(); 405 for (int i = 1, e = VL.size(); i < e; i++) 406 if (VL[i]->getType() != Ty) 407 return false; 408 409 return true; 410 } 411 412 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 413 static Optional<unsigned> getExtractIndex(Instruction *E) { 414 unsigned Opcode = E->getOpcode(); 415 assert((Opcode == Instruction::ExtractElement || 416 Opcode == Instruction::ExtractValue) && 417 "Expected extractelement or extractvalue instruction."); 418 if (Opcode == Instruction::ExtractElement) { 419 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 420 if (!CI) 421 return None; 422 return CI->getZExtValue(); 423 } 424 ExtractValueInst *EI = cast<ExtractValueInst>(E); 425 if (EI->getNumIndices() != 1) 426 return None; 427 return *EI->idx_begin(); 428 } 429 430 /// \returns True if in-tree use also needs extract. This refers to 431 /// possible scalar operand in vectorized instruction. 432 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 433 TargetLibraryInfo *TLI) { 434 unsigned Opcode = UserInst->getOpcode(); 435 switch (Opcode) { 436 case Instruction::Load: { 437 LoadInst *LI = cast<LoadInst>(UserInst); 438 return (LI->getPointerOperand() == Scalar); 439 } 440 case Instruction::Store: { 441 StoreInst *SI = cast<StoreInst>(UserInst); 442 return (SI->getPointerOperand() == Scalar); 443 } 444 case Instruction::Call: { 445 CallInst *CI = cast<CallInst>(UserInst); 446 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 447 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 448 if (hasVectorInstrinsicScalarOpd(ID, i)) 449 return (CI->getArgOperand(i) == Scalar); 450 } 451 LLVM_FALLTHROUGH; 452 } 453 default: 454 return false; 455 } 456 } 457 458 /// \returns the AA location that is being access by the instruction. 459 static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) { 460 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 461 return MemoryLocation::get(SI); 462 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 463 return MemoryLocation::get(LI); 464 return MemoryLocation(); 465 } 466 467 /// \returns True if the instruction is not a volatile or atomic load/store. 468 static bool isSimple(Instruction *I) { 469 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 470 return LI->isSimple(); 471 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 472 return SI->isSimple(); 473 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 474 return !MI->isVolatile(); 475 return true; 476 } 477 478 namespace llvm { 479 480 namespace slpvectorizer { 481 482 /// Bottom Up SLP Vectorizer. 483 class BoUpSLP { 484 public: 485 using ValueList = SmallVector<Value *, 8>; 486 using InstrList = SmallVector<Instruction *, 16>; 487 using ValueSet = SmallPtrSet<Value *, 16>; 488 using StoreList = SmallVector<StoreInst *, 8>; 489 using ExtraValueToDebugLocsMap = 490 MapVector<Value *, SmallVector<Instruction *, 2>>; 491 492 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 493 TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li, 494 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 495 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 496 : F(Func), SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), 497 DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { 498 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 499 // Use the vector register size specified by the target unless overridden 500 // by a command-line option. 501 // TODO: It would be better to limit the vectorization factor based on 502 // data type rather than just register size. For example, x86 AVX has 503 // 256-bit registers, but it does not support integer operations 504 // at that width (that requires AVX2). 505 if (MaxVectorRegSizeOption.getNumOccurrences()) 506 MaxVecRegSize = MaxVectorRegSizeOption; 507 else 508 MaxVecRegSize = TTI->getRegisterBitWidth(true); 509 510 if (MinVectorRegSizeOption.getNumOccurrences()) 511 MinVecRegSize = MinVectorRegSizeOption; 512 else 513 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 514 } 515 516 /// Vectorize the tree that starts with the elements in \p VL. 517 /// Returns the vectorized root. 518 Value *vectorizeTree(); 519 520 /// Vectorize the tree but with the list of externally used values \p 521 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 522 /// generated extractvalue instructions. 523 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues); 524 525 /// \returns the cost incurred by unwanted spills and fills, caused by 526 /// holding live values over call sites. 527 int getSpillCost() const; 528 529 /// \returns the vectorization cost of the subtree that starts at \p VL. 530 /// A negative number means that this is profitable. 531 int getTreeCost(); 532 533 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 534 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 535 void buildTree(ArrayRef<Value *> Roots, 536 ArrayRef<Value *> UserIgnoreLst = None); 537 538 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 539 /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking 540 /// into account (anf updating it, if required) list of externally used 541 /// values stored in \p ExternallyUsedValues. 542 void buildTree(ArrayRef<Value *> Roots, 543 ExtraValueToDebugLocsMap &ExternallyUsedValues, 544 ArrayRef<Value *> UserIgnoreLst = None); 545 546 /// Clear the internal data structures that are created by 'buildTree'. 547 void deleteTree() { 548 VectorizableTree.clear(); 549 ScalarToTreeEntry.clear(); 550 MustGather.clear(); 551 ExternalUses.clear(); 552 NumOpsWantToKeepOrder.clear(); 553 NumOpsWantToKeepOriginalOrder = 0; 554 for (auto &Iter : BlocksSchedules) { 555 BlockScheduling *BS = Iter.second.get(); 556 BS->clear(); 557 } 558 MinBWs.clear(); 559 } 560 561 unsigned getTreeSize() const { return VectorizableTree.size(); } 562 563 /// Perform LICM and CSE on the newly generated gather sequences. 564 void optimizeGatherSequence(); 565 566 /// \returns The best order of instructions for vectorization. 567 Optional<ArrayRef<unsigned>> bestOrder() const { 568 auto I = std::max_element( 569 NumOpsWantToKeepOrder.begin(), NumOpsWantToKeepOrder.end(), 570 [](const decltype(NumOpsWantToKeepOrder)::value_type &D1, 571 const decltype(NumOpsWantToKeepOrder)::value_type &D2) { 572 return D1.second < D2.second; 573 }); 574 if (I == NumOpsWantToKeepOrder.end() || 575 I->getSecond() <= NumOpsWantToKeepOriginalOrder) 576 return None; 577 578 return makeArrayRef(I->getFirst()); 579 } 580 581 /// \return The vector element size in bits to use when vectorizing the 582 /// expression tree ending at \p V. If V is a store, the size is the width of 583 /// the stored value. Otherwise, the size is the width of the largest loaded 584 /// value reaching V. This method is used by the vectorizer to calculate 585 /// vectorization factors. 586 unsigned getVectorElementSize(Value *V) const; 587 588 /// Compute the minimum type sizes required to represent the entries in a 589 /// vectorizable tree. 590 void computeMinimumValueSizes(); 591 592 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 593 unsigned getMaxVecRegSize() const { 594 return MaxVecRegSize; 595 } 596 597 // \returns minimum vector register size as set by cl::opt. 598 unsigned getMinVecRegSize() const { 599 return MinVecRegSize; 600 } 601 602 /// Check if ArrayType or StructType is isomorphic to some VectorType. 603 /// 604 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 605 unsigned canMapToVector(Type *T, const DataLayout &DL) const; 606 607 /// \returns True if the VectorizableTree is both tiny and not fully 608 /// vectorizable. We do not vectorize such trees. 609 bool isTreeTinyAndNotFullyVectorizable() const; 610 611 OptimizationRemarkEmitter *getORE() { return ORE; } 612 613 /// This structure holds any data we need about the edges being traversed 614 /// during buildTree_rec(). We keep track of: 615 /// (i) the user TreeEntry index, and 616 /// (ii) the index of the edge. 617 struct EdgeInfo { 618 EdgeInfo() = default; 619 /// The index of the user TreeEntry in VectorizableTree. 620 int Idx = -1; 621 /// The operand index of the use. 622 unsigned EdgeIdx = UINT_MAX; 623 #ifndef NDEBUG 624 friend inline raw_ostream &operator<<(raw_ostream &OS, 625 const BoUpSLP::EdgeInfo &EI) { 626 EI.dump(OS); 627 return OS; 628 } 629 /// Debug print. 630 void dump(raw_ostream &OS) const { 631 OS << "{User:" << Idx << " EdgeIdx:" << EdgeIdx << "}"; 632 } 633 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); } 634 #endif 635 }; 636 637 /// A helper data structure to hold the operands of a vector of instructions. 638 /// This supports a fixed vector length for all operand vectors. 639 class VLOperands { 640 /// For each operand we need (i) the value, and (ii) the opcode that it 641 /// would be attached to if the expression was in a left-linearized form. 642 /// This is required to avoid illegal operand reordering. 643 /// For example: 644 /// \verbatim 645 /// 0 Op1 646 /// |/ 647 /// Op1 Op2 Linearized + Op2 648 /// \ / ----------> |/ 649 /// - - 650 /// 651 /// Op1 - Op2 (0 + Op1) - Op2 652 /// \endverbatim 653 /// 654 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'. 655 /// 656 /// Another way to think of this is to track all the operations across the 657 /// path from the operand all the way to the root of the tree and to 658 /// calculate the operation that corresponds to this path. For example, the 659 /// path from Op2 to the root crosses the RHS of the '-', therefore the 660 /// corresponding operation is a '-' (which matches the one in the 661 /// linearized tree, as shown above). 662 /// 663 /// For lack of a better term, we refer to this operation as Accumulated 664 /// Path Operation (APO). 665 struct OperandData { 666 OperandData() = default; 667 OperandData(Value *V, bool APO, bool IsUsed) 668 : V(V), APO(APO), IsUsed(IsUsed) {} 669 /// The operand value. 670 Value *V = nullptr; 671 /// TreeEntries only allow a single opcode, or an alternate sequence of 672 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the 673 /// APO. It is set to 'true' if 'V' is attached to an inverse operation 674 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise 675 /// (e.g., Add/Mul) 676 bool APO = false; 677 /// Helper data for the reordering function. 678 bool IsUsed = false; 679 }; 680 681 /// During operand reordering, we are trying to select the operand at lane 682 /// that matches best with the operand at the neighboring lane. Our 683 /// selection is based on the type of value we are looking for. For example, 684 /// if the neighboring lane has a load, we need to look for a load that is 685 /// accessing a consecutive address. These strategies are summarized in the 686 /// 'ReorderingMode' enumerator. 687 enum class ReorderingMode { 688 Load, ///< Matching loads to consecutive memory addresses 689 Opcode, ///< Matching instructions based on opcode (same or alternate) 690 Constant, ///< Matching constants 691 Splat, ///< Matching the same instruction multiple times (broadcast) 692 Failed, ///< We failed to create a vectorizable group 693 }; 694 695 using OperandDataVec = SmallVector<OperandData, 2>; 696 697 /// A vector of operand vectors. 698 SmallVector<OperandDataVec, 4> OpsVec; 699 700 const DataLayout &DL; 701 ScalarEvolution &SE; 702 703 /// \returns the operand data at \p OpIdx and \p Lane. 704 OperandData &getData(unsigned OpIdx, unsigned Lane) { 705 return OpsVec[OpIdx][Lane]; 706 } 707 708 /// \returns the operand data at \p OpIdx and \p Lane. Const version. 709 const OperandData &getData(unsigned OpIdx, unsigned Lane) const { 710 return OpsVec[OpIdx][Lane]; 711 } 712 713 /// Clears the used flag for all entries. 714 void clearUsed() { 715 for (unsigned OpIdx = 0, NumOperands = getNumOperands(); 716 OpIdx != NumOperands; ++OpIdx) 717 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 718 ++Lane) 719 OpsVec[OpIdx][Lane].IsUsed = false; 720 } 721 722 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2. 723 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) { 724 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]); 725 } 726 727 // Search all operands in Ops[*][Lane] for the one that matches best 728 // Ops[OpIdx][LastLane] and return its opreand index. 729 // If no good match can be found, return None. 730 Optional<unsigned> 731 getBestOperand(unsigned OpIdx, int Lane, int LastLane, 732 ArrayRef<ReorderingMode> ReorderingModes) { 733 unsigned NumOperands = getNumOperands(); 734 735 // The operand of the previous lane at OpIdx. 736 Value *OpLastLane = getData(OpIdx, LastLane).V; 737 738 // Our strategy mode for OpIdx. 739 ReorderingMode RMode = ReorderingModes[OpIdx]; 740 741 // The linearized opcode of the operand at OpIdx, Lane. 742 bool OpIdxAPO = getData(OpIdx, Lane).APO; 743 744 const unsigned BestScore = 2; 745 const unsigned GoodScore = 1; 746 747 // The best operand index and its score. 748 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we 749 // are using the score to differentiate between the two. 750 struct BestOpData { 751 Optional<unsigned> Idx = None; 752 unsigned Score = 0; 753 } BestOp; 754 755 // Iterate through all unused operands and look for the best. 756 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) { 757 // Get the operand at Idx and Lane. 758 OperandData &OpData = getData(Idx, Lane); 759 Value *Op = OpData.V; 760 bool OpAPO = OpData.APO; 761 762 // Skip already selected operands. 763 if (OpData.IsUsed) 764 continue; 765 766 // Skip if we are trying to move the operand to a position with a 767 // different opcode in the linearized tree form. This would break the 768 // semantics. 769 if (OpAPO != OpIdxAPO) 770 continue; 771 772 // Look for an operand that matches the current mode. 773 switch (RMode) { 774 case ReorderingMode::Load: 775 if (isa<LoadInst>(Op)) { 776 // Figure out which is left and right, so that we can check for 777 // consecutive loads 778 bool LeftToRight = Lane > LastLane; 779 Value *OpLeft = (LeftToRight) ? OpLastLane : Op; 780 Value *OpRight = (LeftToRight) ? Op : OpLastLane; 781 if (isConsecutiveAccess(cast<LoadInst>(OpLeft), 782 cast<LoadInst>(OpRight), DL, SE)) 783 BestOp.Idx = Idx; 784 } 785 break; 786 case ReorderingMode::Opcode: 787 // We accept both Instructions and Undefs, but with different scores. 788 if ((isa<Instruction>(Op) && isa<Instruction>(OpLastLane) && 789 cast<Instruction>(Op)->getOpcode() == 790 cast<Instruction>(OpLastLane)->getOpcode()) || 791 (isa<UndefValue>(OpLastLane) && isa<Instruction>(Op)) || 792 isa<UndefValue>(Op)) { 793 // An instruction has a higher score than an undef. 794 unsigned Score = (isa<UndefValue>(Op)) ? GoodScore : BestScore; 795 if (Score > BestOp.Score) { 796 BestOp.Idx = Idx; 797 BestOp.Score = Score; 798 } 799 } 800 break; 801 case ReorderingMode::Constant: 802 if (isa<Constant>(Op)) { 803 unsigned Score = (isa<UndefValue>(Op)) ? GoodScore : BestScore; 804 if (Score > BestOp.Score) { 805 BestOp.Idx = Idx; 806 BestOp.Score = Score; 807 } 808 } 809 break; 810 case ReorderingMode::Splat: 811 if (Op == OpLastLane) 812 BestOp.Idx = Idx; 813 break; 814 case ReorderingMode::Failed: 815 return None; 816 } 817 } 818 819 if (BestOp.Idx) { 820 getData(BestOp.Idx.getValue(), Lane).IsUsed = true; 821 return BestOp.Idx; 822 } 823 // If we could not find a good match return None. 824 return None; 825 } 826 827 /// Helper for reorderOperandVecs. \Returns the lane that we should start 828 /// reordering from. This is the one which has the least number of operands 829 /// that can freely move about. 830 unsigned getBestLaneToStartReordering() const { 831 unsigned BestLane = 0; 832 unsigned Min = UINT_MAX; 833 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 834 ++Lane) { 835 unsigned NumFreeOps = getMaxNumOperandsThatCanBeReordered(Lane); 836 if (NumFreeOps < Min) { 837 Min = NumFreeOps; 838 BestLane = Lane; 839 } 840 } 841 return BestLane; 842 } 843 844 /// \Returns the maximum number of operands that are allowed to be reordered 845 /// for \p Lane. This is used as a heuristic for selecting the first lane to 846 /// start operand reordering. 847 unsigned getMaxNumOperandsThatCanBeReordered(unsigned Lane) const { 848 unsigned CntTrue = 0; 849 unsigned NumOperands = getNumOperands(); 850 // Operands with the same APO can be reordered. We therefore need to count 851 // how many of them we have for each APO, like this: Cnt[APO] = x. 852 // Since we only have two APOs, namely true and false, we can avoid using 853 // a map. Instead we can simply count the number of operands that 854 // correspond to one of them (in this case the 'true' APO), and calculate 855 // the other by subtracting it from the total number of operands. 856 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) 857 if (getData(OpIdx, Lane).APO) 858 ++CntTrue; 859 unsigned CntFalse = NumOperands - CntTrue; 860 return std::max(CntTrue, CntFalse); 861 } 862 863 /// Go through the instructions in VL and append their operands. 864 void appendOperandsOfVL(ArrayRef<Value *> VL) { 865 assert(!VL.empty() && "Bad VL"); 866 assert((empty() || VL.size() == getNumLanes()) && 867 "Expected same number of lanes"); 868 assert(isa<Instruction>(VL[0]) && "Expected instruction"); 869 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands(); 870 OpsVec.resize(NumOperands); 871 unsigned NumLanes = VL.size(); 872 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 873 OpsVec[OpIdx].resize(NumLanes); 874 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 875 assert(isa<Instruction>(VL[Lane]) && "Expected instruction"); 876 // Our tree has just 3 nodes: the root and two operands. 877 // It is therefore trivial to get the APO. We only need to check the 878 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or 879 // RHS operand. The LHS operand of both add and sub is never attached 880 // to an inversese operation in the linearized form, therefore its APO 881 // is false. The RHS is true only if VL[Lane] is an inverse operation. 882 883 // Since operand reordering is performed on groups of commutative 884 // operations or alternating sequences (e.g., +, -), we can safely 885 // tell the inverse operations by checking commutativity. 886 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane])); 887 bool APO = (OpIdx == 0) ? false : IsInverseOperation; 888 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx), 889 APO, false}; 890 } 891 } 892 } 893 894 /// \returns the number of operands. 895 unsigned getNumOperands() const { return OpsVec.size(); } 896 897 /// \returns the number of lanes. 898 unsigned getNumLanes() const { return OpsVec[0].size(); } 899 900 /// \returns the operand value at \p OpIdx and \p Lane. 901 Value *getValue(unsigned OpIdx, unsigned Lane) const { 902 return getData(OpIdx, Lane).V; 903 } 904 905 /// \returns true if the data structure is empty. 906 bool empty() const { return OpsVec.empty(); } 907 908 /// Clears the data. 909 void clear() { OpsVec.clear(); } 910 911 public: 912 /// Initialize with all the operands of the instruction vector \p RootVL. 913 VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL, 914 ScalarEvolution &SE) 915 : DL(DL), SE(SE) { 916 // Append all the operands of RootVL. 917 appendOperandsOfVL(RootVL); 918 } 919 920 /// \Returns a value vector with the operands across all lanes for the 921 /// opearnd at \p OpIdx. 922 ValueList getVL(unsigned OpIdx) const { 923 ValueList OpVL(OpsVec[OpIdx].size()); 924 assert(OpsVec[OpIdx].size() == getNumLanes() && 925 "Expected same num of lanes across all operands"); 926 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane) 927 OpVL[Lane] = OpsVec[OpIdx][Lane].V; 928 return OpVL; 929 } 930 931 // Performs operand reordering for 2 or more operands. 932 // The original operands are in OrigOps[OpIdx][Lane]. 933 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'. 934 void reorder() { 935 unsigned NumOperands = getNumOperands(); 936 unsigned NumLanes = getNumLanes(); 937 // Each operand has its own mode. We are using this mode to help us select 938 // the instructions for each lane, so that they match best with the ones 939 // we have selected so far. 940 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands); 941 942 // This is a greedy single-pass algorithm. We are going over each lane 943 // once and deciding on the best order right away with no back-tracking. 944 // However, in order to increase its effectiveness, we start with the lane 945 // that has operands that can move the least. For example, given the 946 // following lanes: 947 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd 948 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st 949 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd 950 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th 951 // we will start at Lane 1, since the operands of the subtraction cannot 952 // be reordered. Then we will visit the rest of the lanes in a circular 953 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3. 954 955 // Find the first lane that we will start our search from. 956 unsigned FirstLane = getBestLaneToStartReordering(); 957 958 // Initialize the modes. 959 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 960 Value *OpLane0 = getValue(OpIdx, FirstLane); 961 // Keep track if we have instructions with all the same opcode on one 962 // side. 963 if (isa<LoadInst>(OpLane0)) 964 ReorderingModes[OpIdx] = ReorderingMode::Load; 965 else if (isa<Instruction>(OpLane0)) 966 ReorderingModes[OpIdx] = ReorderingMode::Opcode; 967 else if (isa<Constant>(OpLane0)) 968 ReorderingModes[OpIdx] = ReorderingMode::Constant; 969 else if (isa<Argument>(OpLane0)) 970 // Our best hope is a Splat. It may save some cost in some cases. 971 ReorderingModes[OpIdx] = ReorderingMode::Splat; 972 else 973 // NOTE: This should be unreachable. 974 ReorderingModes[OpIdx] = ReorderingMode::Failed; 975 } 976 977 // If the initial strategy fails for any of the operand indexes, then we 978 // perform reordering again in a second pass. This helps avoid assigning 979 // high priority to the failed strategy, and should improve reordering for 980 // the non-failed operand indexes. 981 for (int Pass = 0; Pass != 2; ++Pass) { 982 // Skip the second pass if the first pass did not fail. 983 bool StrategyFailed = false; 984 // Mark the operand data as free to use for all but the first pass. 985 if (Pass > 0) 986 clearUsed(); 987 // We keep the original operand order for the FirstLane, so reorder the 988 // rest of the lanes. We are visiting the nodes in a circular fashion, 989 // using FirstLane as the center point and increasing the radius 990 // distance. 991 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) { 992 // Visit the lane on the right and then the lane on the left. 993 for (int Direction : {+1, -1}) { 994 int Lane = FirstLane + Direction * Distance; 995 if (Lane < 0 || Lane >= (int)NumLanes) 996 continue; 997 int LastLane = Lane - Direction; 998 assert(LastLane >= 0 && LastLane < (int)NumLanes && 999 "Out of bounds"); 1000 // Look for a good match for each operand. 1001 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1002 // Search for the operand that matches SortedOps[OpIdx][Lane-1]. 1003 Optional<unsigned> BestIdx = 1004 getBestOperand(OpIdx, Lane, LastLane, ReorderingModes); 1005 // By not selecting a value, we allow the operands that follow to 1006 // select a better matching value. We will get a non-null value in 1007 // the next run of getBestOperand(). 1008 if (BestIdx) { 1009 // Swap the current operand with the one returned by 1010 // getBestOperand(). 1011 swap(OpIdx, BestIdx.getValue(), Lane); 1012 } else { 1013 // We failed to find a best operand, set mode to 'Failed'. 1014 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1015 // Enable the second pass. 1016 StrategyFailed = true; 1017 } 1018 } 1019 } 1020 } 1021 // Skip second pass if the strategy did not fail. 1022 if (!StrategyFailed) 1023 break; 1024 } 1025 } 1026 1027 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1028 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) { 1029 switch (RMode) { 1030 case ReorderingMode::Load: 1031 return "Load"; 1032 case ReorderingMode::Opcode: 1033 return "Opcode"; 1034 case ReorderingMode::Constant: 1035 return "Constant"; 1036 case ReorderingMode::Splat: 1037 return "Splat"; 1038 case ReorderingMode::Failed: 1039 return "Failed"; 1040 } 1041 llvm_unreachable("Unimplemented Reordering Type"); 1042 } 1043 1044 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode, 1045 raw_ostream &OS) { 1046 return OS << getModeStr(RMode); 1047 } 1048 1049 /// Debug print. 1050 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) { 1051 printMode(RMode, dbgs()); 1052 } 1053 1054 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) { 1055 return printMode(RMode, OS); 1056 } 1057 1058 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const { 1059 const unsigned Indent = 2; 1060 unsigned Cnt = 0; 1061 for (const OperandDataVec &OpDataVec : OpsVec) { 1062 OS << "Operand " << Cnt++ << "\n"; 1063 for (const OperandData &OpData : OpDataVec) { 1064 OS.indent(Indent) << "{"; 1065 if (Value *V = OpData.V) 1066 OS << *V; 1067 else 1068 OS << "null"; 1069 OS << ", APO:" << OpData.APO << "}\n"; 1070 } 1071 OS << "\n"; 1072 } 1073 return OS; 1074 } 1075 1076 /// Debug print. 1077 LLVM_DUMP_METHOD void dump() const { print(dbgs()); } 1078 #endif 1079 }; 1080 1081 private: 1082 struct TreeEntry; 1083 1084 /// Checks if all users of \p I are the part of the vectorization tree. 1085 bool areAllUsersVectorized(Instruction *I) const; 1086 1087 /// \returns the cost of the vectorizable entry. 1088 int getEntryCost(TreeEntry *E); 1089 1090 /// This is the recursive part of buildTree. 1091 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, EdgeInfo EI); 1092 1093 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can 1094 /// be vectorized to use the original vector (or aggregate "bitcast" to a 1095 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise 1096 /// returns false, setting \p CurrentOrder to either an empty vector or a 1097 /// non-identity permutation that allows to reuse extract instructions. 1098 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 1099 SmallVectorImpl<unsigned> &CurrentOrder) const; 1100 1101 /// Vectorize a single entry in the tree. 1102 Value *vectorizeTree(TreeEntry *E); 1103 1104 /// Vectorize a single entry in the tree, starting in \p VL. 1105 Value *vectorizeTree(ArrayRef<Value *> VL); 1106 1107 /// \returns the scalarization cost for this type. Scalarization in this 1108 /// context means the creation of vectors from a group of scalars. 1109 int getGatherCost(Type *Ty, const DenseSet<unsigned> &ShuffledIndices) const; 1110 1111 /// \returns the scalarization cost for this list of values. Assuming that 1112 /// this subtree gets vectorized, we may need to extract the values from the 1113 /// roots. This method calculates the cost of extracting the values. 1114 int getGatherCost(ArrayRef<Value *> VL) const; 1115 1116 /// Set the Builder insert point to one after the last instruction in 1117 /// the bundle 1118 void setInsertPointAfterBundle(ArrayRef<Value *> VL, 1119 const InstructionsState &S); 1120 1121 /// \returns a vector from a collection of scalars in \p VL. 1122 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty); 1123 1124 /// \returns whether the VectorizableTree is fully vectorizable and will 1125 /// be beneficial even the tree height is tiny. 1126 bool isFullyVectorizableTinyTree() const; 1127 1128 /// Reorder commutative or alt operands to get better probability of 1129 /// generating vectorized code. 1130 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 1131 SmallVectorImpl<Value *> &Left, 1132 SmallVectorImpl<Value *> &Right, 1133 const DataLayout &DL, 1134 ScalarEvolution &SE); 1135 struct TreeEntry { 1136 TreeEntry(std::vector<TreeEntry> &Container) : Container(Container) {} 1137 1138 /// \returns true if the scalars in VL are equal to this entry. 1139 bool isSame(ArrayRef<Value *> VL) const { 1140 if (VL.size() == Scalars.size()) 1141 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 1142 return VL.size() == ReuseShuffleIndices.size() && 1143 std::equal( 1144 VL.begin(), VL.end(), ReuseShuffleIndices.begin(), 1145 [this](Value *V, unsigned Idx) { return V == Scalars[Idx]; }); 1146 } 1147 1148 /// A vector of scalars. 1149 ValueList Scalars; 1150 1151 /// The Scalars are vectorized into this value. It is initialized to Null. 1152 Value *VectorizedValue = nullptr; 1153 1154 /// Do we need to gather this sequence ? 1155 bool NeedToGather = false; 1156 1157 /// Does this sequence require some shuffling? 1158 SmallVector<unsigned, 4> ReuseShuffleIndices; 1159 1160 /// Does this entry require reordering? 1161 ArrayRef<unsigned> ReorderIndices; 1162 1163 /// Points back to the VectorizableTree. 1164 /// 1165 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 1166 /// to be a pointer and needs to be able to initialize the child iterator. 1167 /// Thus we need a reference back to the container to translate the indices 1168 /// to entries. 1169 std::vector<TreeEntry> &Container; 1170 1171 /// The TreeEntry index containing the user of this entry. We can actually 1172 /// have multiple users so the data structure is not truly a tree. 1173 SmallVector<EdgeInfo, 1> UserTreeIndices; 1174 1175 private: 1176 /// The operands of each instruction in each lane Operands[op_index][lane]. 1177 /// Note: This helps avoid the replication of the code that performs the 1178 /// reordering of operands during buildTree_rec() and vectorizeTree(). 1179 SmallVector<ValueList, 2> Operands; 1180 1181 public: 1182 /// Set this bundle's \p OpIdx'th operand to \p OpVL. 1183 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL, 1184 ArrayRef<unsigned> ReuseShuffleIndices) { 1185 if (Operands.size() < OpIdx + 1) 1186 Operands.resize(OpIdx + 1); 1187 assert(Operands[OpIdx].size() == 0 && "Already resized?"); 1188 Operands[OpIdx].resize(Scalars.size()); 1189 for (unsigned Lane = 0, E = Scalars.size(); Lane != E; ++Lane) 1190 Operands[OpIdx][Lane] = (!ReuseShuffleIndices.empty()) 1191 ? OpVL[ReuseShuffleIndices[Lane]] 1192 : OpVL[Lane]; 1193 } 1194 1195 /// If there is a user TreeEntry, then set its operand. 1196 void trySetUserTEOperand(const EdgeInfo &UserTreeIdx, 1197 ArrayRef<Value *> OpVL, 1198 ArrayRef<unsigned> ReuseShuffleIndices) { 1199 if (UserTreeIdx.Idx >= 0) { 1200 auto &VectorizableTree = Container; 1201 VectorizableTree[UserTreeIdx.Idx].setOperand(UserTreeIdx.EdgeIdx, OpVL, 1202 ReuseShuffleIndices); 1203 } 1204 } 1205 1206 /// \returns the \p OpIdx operand of this TreeEntry. 1207 ValueList &getOperand(unsigned OpIdx) { 1208 assert(OpIdx < Operands.size() && "Off bounds"); 1209 return Operands[OpIdx]; 1210 } 1211 1212 /// \return the single \p OpIdx operand. 1213 Value *getSingleOperand(unsigned OpIdx) const { 1214 assert(OpIdx < Operands.size() && "Off bounds"); 1215 assert(!Operands[OpIdx].empty() && "No operand availabe"); 1216 return Operands[OpIdx][0]; 1217 } 1218 1219 #ifndef NDEBUG 1220 /// Debug printer. 1221 LLVM_DUMP_METHOD void dump() const { 1222 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) { 1223 dbgs() << "Operand " << OpI << ":\n"; 1224 for (const Value *V : Operands[OpI]) 1225 dbgs().indent(2) << *V << "\n"; 1226 } 1227 dbgs() << "Scalars: \n"; 1228 for (Value *V : Scalars) 1229 dbgs().indent(2) << *V << "\n"; 1230 dbgs() << "NeedToGather: " << NeedToGather << "\n"; 1231 dbgs() << "VectorizedValue: "; 1232 if (VectorizedValue) 1233 dbgs() << *VectorizedValue; 1234 else 1235 dbgs() << "NULL"; 1236 dbgs() << "\n"; 1237 dbgs() << "ReuseShuffleIndices: "; 1238 if (ReuseShuffleIndices.empty()) 1239 dbgs() << "Emtpy"; 1240 else 1241 for (unsigned Idx : ReuseShuffleIndices) 1242 dbgs() << Idx << ", "; 1243 dbgs() << "\n"; 1244 dbgs() << "ReorderIndices: "; 1245 for (unsigned Idx : ReorderIndices) 1246 dbgs() << Idx << ", "; 1247 dbgs() << "\n"; 1248 dbgs() << "UserTreeIndices: "; 1249 for (const auto &EInfo : UserTreeIndices) 1250 dbgs() << EInfo << ", "; 1251 dbgs() << "\n"; 1252 } 1253 #endif 1254 }; 1255 1256 /// Create a new VectorizableTree entry. 1257 void newTreeEntry(ArrayRef<Value *> VL, bool Vectorized, 1258 EdgeInfo &UserTreeIdx, 1259 ArrayRef<unsigned> ReuseShuffleIndices = None, 1260 ArrayRef<unsigned> ReorderIndices = None) { 1261 VectorizableTree.emplace_back(VectorizableTree); 1262 int idx = VectorizableTree.size() - 1; 1263 TreeEntry *Last = &VectorizableTree[idx]; 1264 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 1265 Last->NeedToGather = !Vectorized; 1266 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(), 1267 ReuseShuffleIndices.end()); 1268 Last->ReorderIndices = ReorderIndices; 1269 if (Vectorized) { 1270 for (int i = 0, e = VL.size(); i != e; ++i) { 1271 assert(!getTreeEntry(VL[i]) && "Scalar already in tree!"); 1272 ScalarToTreeEntry[VL[i]] = idx; 1273 } 1274 } else { 1275 MustGather.insert(VL.begin(), VL.end()); 1276 } 1277 1278 if (UserTreeIdx.Idx >= 0) 1279 Last->UserTreeIndices.push_back(UserTreeIdx); 1280 1281 Last->trySetUserTEOperand(UserTreeIdx, VL, ReuseShuffleIndices); 1282 1283 UserTreeIdx.Idx = idx; 1284 } 1285 1286 /// -- Vectorization State -- 1287 /// Holds all of the tree entries. 1288 std::vector<TreeEntry> VectorizableTree; 1289 1290 #ifndef NDEBUG 1291 /// Debug printer. 1292 LLVM_DUMP_METHOD void dumpVectorizableTree() const { 1293 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) { 1294 dbgs() << Id << ".\n"; 1295 VectorizableTree[Id].dump(); 1296 dbgs() << "\n"; 1297 } 1298 } 1299 #endif 1300 1301 TreeEntry *getTreeEntry(Value *V) { 1302 auto I = ScalarToTreeEntry.find(V); 1303 if (I != ScalarToTreeEntry.end()) 1304 return &VectorizableTree[I->second]; 1305 return nullptr; 1306 } 1307 1308 const TreeEntry *getTreeEntry(Value *V) const { 1309 auto I = ScalarToTreeEntry.find(V); 1310 if (I != ScalarToTreeEntry.end()) 1311 return &VectorizableTree[I->second]; 1312 return nullptr; 1313 } 1314 1315 /// Maps a specific scalar to its tree entry. 1316 SmallDenseMap<Value*, int> ScalarToTreeEntry; 1317 1318 /// A list of scalars that we found that we need to keep as scalars. 1319 ValueSet MustGather; 1320 1321 /// This POD struct describes one external user in the vectorized tree. 1322 struct ExternalUser { 1323 ExternalUser(Value *S, llvm::User *U, int L) 1324 : Scalar(S), User(U), Lane(L) {} 1325 1326 // Which scalar in our function. 1327 Value *Scalar; 1328 1329 // Which user that uses the scalar. 1330 llvm::User *User; 1331 1332 // Which lane does the scalar belong to. 1333 int Lane; 1334 }; 1335 using UserList = SmallVector<ExternalUser, 16>; 1336 1337 /// Checks if two instructions may access the same memory. 1338 /// 1339 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 1340 /// is invariant in the calling loop. 1341 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 1342 Instruction *Inst2) { 1343 // First check if the result is already in the cache. 1344 AliasCacheKey key = std::make_pair(Inst1, Inst2); 1345 Optional<bool> &result = AliasCache[key]; 1346 if (result.hasValue()) { 1347 return result.getValue(); 1348 } 1349 MemoryLocation Loc2 = getLocation(Inst2, AA); 1350 bool aliased = true; 1351 if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) { 1352 // Do the alias check. 1353 aliased = AA->alias(Loc1, Loc2); 1354 } 1355 // Store the result in the cache. 1356 result = aliased; 1357 return aliased; 1358 } 1359 1360 using AliasCacheKey = std::pair<Instruction *, Instruction *>; 1361 1362 /// Cache for alias results. 1363 /// TODO: consider moving this to the AliasAnalysis itself. 1364 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 1365 1366 /// Removes an instruction from its block and eventually deletes it. 1367 /// It's like Instruction::eraseFromParent() except that the actual deletion 1368 /// is delayed until BoUpSLP is destructed. 1369 /// This is required to ensure that there are no incorrect collisions in the 1370 /// AliasCache, which can happen if a new instruction is allocated at the 1371 /// same address as a previously deleted instruction. 1372 void eraseInstruction(Instruction *I) { 1373 I->removeFromParent(); 1374 I->dropAllReferences(); 1375 DeletedInstructions.emplace_back(I); 1376 } 1377 1378 /// Temporary store for deleted instructions. Instructions will be deleted 1379 /// eventually when the BoUpSLP is destructed. 1380 SmallVector<unique_value, 8> DeletedInstructions; 1381 1382 /// A list of values that need to extracted out of the tree. 1383 /// This list holds pairs of (Internal Scalar : External User). External User 1384 /// can be nullptr, it means that this Internal Scalar will be used later, 1385 /// after vectorization. 1386 UserList ExternalUses; 1387 1388 /// Values used only by @llvm.assume calls. 1389 SmallPtrSet<const Value *, 32> EphValues; 1390 1391 /// Holds all of the instructions that we gathered. 1392 SetVector<Instruction *> GatherSeq; 1393 1394 /// A list of blocks that we are going to CSE. 1395 SetVector<BasicBlock *> CSEBlocks; 1396 1397 /// Contains all scheduling relevant data for an instruction. 1398 /// A ScheduleData either represents a single instruction or a member of an 1399 /// instruction bundle (= a group of instructions which is combined into a 1400 /// vector instruction). 1401 struct ScheduleData { 1402 // The initial value for the dependency counters. It means that the 1403 // dependencies are not calculated yet. 1404 enum { InvalidDeps = -1 }; 1405 1406 ScheduleData() = default; 1407 1408 void init(int BlockSchedulingRegionID, Value *OpVal) { 1409 FirstInBundle = this; 1410 NextInBundle = nullptr; 1411 NextLoadStore = nullptr; 1412 IsScheduled = false; 1413 SchedulingRegionID = BlockSchedulingRegionID; 1414 UnscheduledDepsInBundle = UnscheduledDeps; 1415 clearDependencies(); 1416 OpValue = OpVal; 1417 } 1418 1419 /// Returns true if the dependency information has been calculated. 1420 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 1421 1422 /// Returns true for single instructions and for bundle representatives 1423 /// (= the head of a bundle). 1424 bool isSchedulingEntity() const { return FirstInBundle == this; } 1425 1426 /// Returns true if it represents an instruction bundle and not only a 1427 /// single instruction. 1428 bool isPartOfBundle() const { 1429 return NextInBundle != nullptr || FirstInBundle != this; 1430 } 1431 1432 /// Returns true if it is ready for scheduling, i.e. it has no more 1433 /// unscheduled depending instructions/bundles. 1434 bool isReady() const { 1435 assert(isSchedulingEntity() && 1436 "can't consider non-scheduling entity for ready list"); 1437 return UnscheduledDepsInBundle == 0 && !IsScheduled; 1438 } 1439 1440 /// Modifies the number of unscheduled dependencies, also updating it for 1441 /// the whole bundle. 1442 int incrementUnscheduledDeps(int Incr) { 1443 UnscheduledDeps += Incr; 1444 return FirstInBundle->UnscheduledDepsInBundle += Incr; 1445 } 1446 1447 /// Sets the number of unscheduled dependencies to the number of 1448 /// dependencies. 1449 void resetUnscheduledDeps() { 1450 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 1451 } 1452 1453 /// Clears all dependency information. 1454 void clearDependencies() { 1455 Dependencies = InvalidDeps; 1456 resetUnscheduledDeps(); 1457 MemoryDependencies.clear(); 1458 } 1459 1460 void dump(raw_ostream &os) const { 1461 if (!isSchedulingEntity()) { 1462 os << "/ " << *Inst; 1463 } else if (NextInBundle) { 1464 os << '[' << *Inst; 1465 ScheduleData *SD = NextInBundle; 1466 while (SD) { 1467 os << ';' << *SD->Inst; 1468 SD = SD->NextInBundle; 1469 } 1470 os << ']'; 1471 } else { 1472 os << *Inst; 1473 } 1474 } 1475 1476 Instruction *Inst = nullptr; 1477 1478 /// Points to the head in an instruction bundle (and always to this for 1479 /// single instructions). 1480 ScheduleData *FirstInBundle = nullptr; 1481 1482 /// Single linked list of all instructions in a bundle. Null if it is a 1483 /// single instruction. 1484 ScheduleData *NextInBundle = nullptr; 1485 1486 /// Single linked list of all memory instructions (e.g. load, store, call) 1487 /// in the block - until the end of the scheduling region. 1488 ScheduleData *NextLoadStore = nullptr; 1489 1490 /// The dependent memory instructions. 1491 /// This list is derived on demand in calculateDependencies(). 1492 SmallVector<ScheduleData *, 4> MemoryDependencies; 1493 1494 /// This ScheduleData is in the current scheduling region if this matches 1495 /// the current SchedulingRegionID of BlockScheduling. 1496 int SchedulingRegionID = 0; 1497 1498 /// Used for getting a "good" final ordering of instructions. 1499 int SchedulingPriority = 0; 1500 1501 /// The number of dependencies. Constitutes of the number of users of the 1502 /// instruction plus the number of dependent memory instructions (if any). 1503 /// This value is calculated on demand. 1504 /// If InvalidDeps, the number of dependencies is not calculated yet. 1505 int Dependencies = InvalidDeps; 1506 1507 /// The number of dependencies minus the number of dependencies of scheduled 1508 /// instructions. As soon as this is zero, the instruction/bundle gets ready 1509 /// for scheduling. 1510 /// Note that this is negative as long as Dependencies is not calculated. 1511 int UnscheduledDeps = InvalidDeps; 1512 1513 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 1514 /// single instructions. 1515 int UnscheduledDepsInBundle = InvalidDeps; 1516 1517 /// True if this instruction is scheduled (or considered as scheduled in the 1518 /// dry-run). 1519 bool IsScheduled = false; 1520 1521 /// Opcode of the current instruction in the schedule data. 1522 Value *OpValue = nullptr; 1523 }; 1524 1525 #ifndef NDEBUG 1526 friend inline raw_ostream &operator<<(raw_ostream &os, 1527 const BoUpSLP::ScheduleData &SD) { 1528 SD.dump(os); 1529 return os; 1530 } 1531 #endif 1532 1533 friend struct GraphTraits<BoUpSLP *>; 1534 friend struct DOTGraphTraits<BoUpSLP *>; 1535 1536 /// Contains all scheduling data for a basic block. 1537 struct BlockScheduling { 1538 BlockScheduling(BasicBlock *BB) 1539 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} 1540 1541 void clear() { 1542 ReadyInsts.clear(); 1543 ScheduleStart = nullptr; 1544 ScheduleEnd = nullptr; 1545 FirstLoadStoreInRegion = nullptr; 1546 LastLoadStoreInRegion = nullptr; 1547 1548 // Reduce the maximum schedule region size by the size of the 1549 // previous scheduling run. 1550 ScheduleRegionSizeLimit -= ScheduleRegionSize; 1551 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 1552 ScheduleRegionSizeLimit = MinScheduleRegionSize; 1553 ScheduleRegionSize = 0; 1554 1555 // Make a new scheduling region, i.e. all existing ScheduleData is not 1556 // in the new region yet. 1557 ++SchedulingRegionID; 1558 } 1559 1560 ScheduleData *getScheduleData(Value *V) { 1561 ScheduleData *SD = ScheduleDataMap[V]; 1562 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 1563 return SD; 1564 return nullptr; 1565 } 1566 1567 ScheduleData *getScheduleData(Value *V, Value *Key) { 1568 if (V == Key) 1569 return getScheduleData(V); 1570 auto I = ExtraScheduleDataMap.find(V); 1571 if (I != ExtraScheduleDataMap.end()) { 1572 ScheduleData *SD = I->second[Key]; 1573 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 1574 return SD; 1575 } 1576 return nullptr; 1577 } 1578 1579 bool isInSchedulingRegion(ScheduleData *SD) { 1580 return SD->SchedulingRegionID == SchedulingRegionID; 1581 } 1582 1583 /// Marks an instruction as scheduled and puts all dependent ready 1584 /// instructions into the ready-list. 1585 template <typename ReadyListType> 1586 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 1587 SD->IsScheduled = true; 1588 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 1589 1590 ScheduleData *BundleMember = SD; 1591 while (BundleMember) { 1592 if (BundleMember->Inst != BundleMember->OpValue) { 1593 BundleMember = BundleMember->NextInBundle; 1594 continue; 1595 } 1596 // Handle the def-use chain dependencies. 1597 for (Use &U : BundleMember->Inst->operands()) { 1598 auto *I = dyn_cast<Instruction>(U.get()); 1599 if (!I) 1600 continue; 1601 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 1602 if (OpDef && OpDef->hasValidDependencies() && 1603 OpDef->incrementUnscheduledDeps(-1) == 0) { 1604 // There are no more unscheduled dependencies after 1605 // decrementing, so we can put the dependent instruction 1606 // into the ready list. 1607 ScheduleData *DepBundle = OpDef->FirstInBundle; 1608 assert(!DepBundle->IsScheduled && 1609 "already scheduled bundle gets ready"); 1610 ReadyList.insert(DepBundle); 1611 LLVM_DEBUG(dbgs() 1612 << "SLP: gets ready (def): " << *DepBundle << "\n"); 1613 } 1614 }); 1615 } 1616 // Handle the memory dependencies. 1617 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 1618 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 1619 // There are no more unscheduled dependencies after decrementing, 1620 // so we can put the dependent instruction into the ready list. 1621 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 1622 assert(!DepBundle->IsScheduled && 1623 "already scheduled bundle gets ready"); 1624 ReadyList.insert(DepBundle); 1625 LLVM_DEBUG(dbgs() 1626 << "SLP: gets ready (mem): " << *DepBundle << "\n"); 1627 } 1628 } 1629 BundleMember = BundleMember->NextInBundle; 1630 } 1631 } 1632 1633 void doForAllOpcodes(Value *V, 1634 function_ref<void(ScheduleData *SD)> Action) { 1635 if (ScheduleData *SD = getScheduleData(V)) 1636 Action(SD); 1637 auto I = ExtraScheduleDataMap.find(V); 1638 if (I != ExtraScheduleDataMap.end()) 1639 for (auto &P : I->second) 1640 if (P.second->SchedulingRegionID == SchedulingRegionID) 1641 Action(P.second); 1642 } 1643 1644 /// Put all instructions into the ReadyList which are ready for scheduling. 1645 template <typename ReadyListType> 1646 void initialFillReadyList(ReadyListType &ReadyList) { 1647 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 1648 doForAllOpcodes(I, [&](ScheduleData *SD) { 1649 if (SD->isSchedulingEntity() && SD->isReady()) { 1650 ReadyList.insert(SD); 1651 LLVM_DEBUG(dbgs() 1652 << "SLP: initially in ready list: " << *I << "\n"); 1653 } 1654 }); 1655 } 1656 } 1657 1658 /// Checks if a bundle of instructions can be scheduled, i.e. has no 1659 /// cyclic dependencies. This is only a dry-run, no instructions are 1660 /// actually moved at this stage. 1661 bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 1662 const InstructionsState &S); 1663 1664 /// Un-bundles a group of instructions. 1665 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 1666 1667 /// Allocates schedule data chunk. 1668 ScheduleData *allocateScheduleDataChunks(); 1669 1670 /// Extends the scheduling region so that V is inside the region. 1671 /// \returns true if the region size is within the limit. 1672 bool extendSchedulingRegion(Value *V, const InstructionsState &S); 1673 1674 /// Initialize the ScheduleData structures for new instructions in the 1675 /// scheduling region. 1676 void initScheduleData(Instruction *FromI, Instruction *ToI, 1677 ScheduleData *PrevLoadStore, 1678 ScheduleData *NextLoadStore); 1679 1680 /// Updates the dependency information of a bundle and of all instructions/ 1681 /// bundles which depend on the original bundle. 1682 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 1683 BoUpSLP *SLP); 1684 1685 /// Sets all instruction in the scheduling region to un-scheduled. 1686 void resetSchedule(); 1687 1688 BasicBlock *BB; 1689 1690 /// Simple memory allocation for ScheduleData. 1691 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 1692 1693 /// The size of a ScheduleData array in ScheduleDataChunks. 1694 int ChunkSize; 1695 1696 /// The allocator position in the current chunk, which is the last entry 1697 /// of ScheduleDataChunks. 1698 int ChunkPos; 1699 1700 /// Attaches ScheduleData to Instruction. 1701 /// Note that the mapping survives during all vectorization iterations, i.e. 1702 /// ScheduleData structures are recycled. 1703 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 1704 1705 /// Attaches ScheduleData to Instruction with the leading key. 1706 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 1707 ExtraScheduleDataMap; 1708 1709 struct ReadyList : SmallVector<ScheduleData *, 8> { 1710 void insert(ScheduleData *SD) { push_back(SD); } 1711 }; 1712 1713 /// The ready-list for scheduling (only used for the dry-run). 1714 ReadyList ReadyInsts; 1715 1716 /// The first instruction of the scheduling region. 1717 Instruction *ScheduleStart = nullptr; 1718 1719 /// The first instruction _after_ the scheduling region. 1720 Instruction *ScheduleEnd = nullptr; 1721 1722 /// The first memory accessing instruction in the scheduling region 1723 /// (can be null). 1724 ScheduleData *FirstLoadStoreInRegion = nullptr; 1725 1726 /// The last memory accessing instruction in the scheduling region 1727 /// (can be null). 1728 ScheduleData *LastLoadStoreInRegion = nullptr; 1729 1730 /// The current size of the scheduling region. 1731 int ScheduleRegionSize = 0; 1732 1733 /// The maximum size allowed for the scheduling region. 1734 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; 1735 1736 /// The ID of the scheduling region. For a new vectorization iteration this 1737 /// is incremented which "removes" all ScheduleData from the region. 1738 // Make sure that the initial SchedulingRegionID is greater than the 1739 // initial SchedulingRegionID in ScheduleData (which is 0). 1740 int SchedulingRegionID = 1; 1741 }; 1742 1743 /// Attaches the BlockScheduling structures to basic blocks. 1744 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 1745 1746 /// Performs the "real" scheduling. Done before vectorization is actually 1747 /// performed in a basic block. 1748 void scheduleBlock(BlockScheduling *BS); 1749 1750 /// List of users to ignore during scheduling and that don't need extracting. 1751 ArrayRef<Value *> UserIgnoreList; 1752 1753 using OrdersType = SmallVector<unsigned, 4>; 1754 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of 1755 /// sorted SmallVectors of unsigned. 1756 struct OrdersTypeDenseMapInfo { 1757 static OrdersType getEmptyKey() { 1758 OrdersType V; 1759 V.push_back(~1U); 1760 return V; 1761 } 1762 1763 static OrdersType getTombstoneKey() { 1764 OrdersType V; 1765 V.push_back(~2U); 1766 return V; 1767 } 1768 1769 static unsigned getHashValue(const OrdersType &V) { 1770 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); 1771 } 1772 1773 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) { 1774 return LHS == RHS; 1775 } 1776 }; 1777 1778 /// Contains orders of operations along with the number of bundles that have 1779 /// operations in this order. It stores only those orders that require 1780 /// reordering, if reordering is not required it is counted using \a 1781 /// NumOpsWantToKeepOriginalOrder. 1782 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo> NumOpsWantToKeepOrder; 1783 /// Number of bundles that do not require reordering. 1784 unsigned NumOpsWantToKeepOriginalOrder = 0; 1785 1786 // Analysis and block reference. 1787 Function *F; 1788 ScalarEvolution *SE; 1789 TargetTransformInfo *TTI; 1790 TargetLibraryInfo *TLI; 1791 AliasAnalysis *AA; 1792 LoopInfo *LI; 1793 DominatorTree *DT; 1794 AssumptionCache *AC; 1795 DemandedBits *DB; 1796 const DataLayout *DL; 1797 OptimizationRemarkEmitter *ORE; 1798 1799 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 1800 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 1801 1802 /// Instruction builder to construct the vectorized tree. 1803 IRBuilder<> Builder; 1804 1805 /// A map of scalar integer values to the smallest bit width with which they 1806 /// can legally be represented. The values map to (width, signed) pairs, 1807 /// where "width" indicates the minimum bit width and "signed" is True if the 1808 /// value must be signed-extended, rather than zero-extended, back to its 1809 /// original width. 1810 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs; 1811 }; 1812 1813 } // end namespace slpvectorizer 1814 1815 template <> struct GraphTraits<BoUpSLP *> { 1816 using TreeEntry = BoUpSLP::TreeEntry; 1817 1818 /// NodeRef has to be a pointer per the GraphWriter. 1819 using NodeRef = TreeEntry *; 1820 1821 /// Add the VectorizableTree to the index iterator to be able to return 1822 /// TreeEntry pointers. 1823 struct ChildIteratorType 1824 : public iterator_adaptor_base< 1825 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> { 1826 std::vector<TreeEntry> &VectorizableTree; 1827 1828 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W, 1829 std::vector<TreeEntry> &VT) 1830 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 1831 1832 NodeRef operator*() { return &VectorizableTree[I->Idx]; } 1833 }; 1834 1835 static NodeRef getEntryNode(BoUpSLP &R) { return &R.VectorizableTree[0]; } 1836 1837 static ChildIteratorType child_begin(NodeRef N) { 1838 return {N->UserTreeIndices.begin(), N->Container}; 1839 } 1840 1841 static ChildIteratorType child_end(NodeRef N) { 1842 return {N->UserTreeIndices.end(), N->Container}; 1843 } 1844 1845 /// For the node iterator we just need to turn the TreeEntry iterator into a 1846 /// TreeEntry* iterator so that it dereferences to NodeRef. 1847 using nodes_iterator = pointer_iterator<std::vector<TreeEntry>::iterator>; 1848 1849 static nodes_iterator nodes_begin(BoUpSLP *R) { 1850 return nodes_iterator(R->VectorizableTree.begin()); 1851 } 1852 1853 static nodes_iterator nodes_end(BoUpSLP *R) { 1854 return nodes_iterator(R->VectorizableTree.end()); 1855 } 1856 1857 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 1858 }; 1859 1860 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 1861 using TreeEntry = BoUpSLP::TreeEntry; 1862 1863 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 1864 1865 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 1866 std::string Str; 1867 raw_string_ostream OS(Str); 1868 if (isSplat(Entry->Scalars)) { 1869 OS << "<splat> " << *Entry->Scalars[0]; 1870 return Str; 1871 } 1872 for (auto V : Entry->Scalars) { 1873 OS << *V; 1874 if (std::any_of( 1875 R->ExternalUses.begin(), R->ExternalUses.end(), 1876 [&](const BoUpSLP::ExternalUser &EU) { return EU.Scalar == V; })) 1877 OS << " <extract>"; 1878 OS << "\n"; 1879 } 1880 return Str; 1881 } 1882 1883 static std::string getNodeAttributes(const TreeEntry *Entry, 1884 const BoUpSLP *) { 1885 if (Entry->NeedToGather) 1886 return "color=red"; 1887 return ""; 1888 } 1889 }; 1890 1891 } // end namespace llvm 1892 1893 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 1894 ArrayRef<Value *> UserIgnoreLst) { 1895 ExtraValueToDebugLocsMap ExternallyUsedValues; 1896 buildTree(Roots, ExternallyUsedValues, UserIgnoreLst); 1897 } 1898 1899 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 1900 ExtraValueToDebugLocsMap &ExternallyUsedValues, 1901 ArrayRef<Value *> UserIgnoreLst) { 1902 deleteTree(); 1903 UserIgnoreList = UserIgnoreLst; 1904 if (!allSameType(Roots)) 1905 return; 1906 buildTree_rec(Roots, 0, EdgeInfo()); 1907 1908 // Collect the values that we need to extract from the tree. 1909 for (TreeEntry &EIdx : VectorizableTree) { 1910 TreeEntry *Entry = &EIdx; 1911 1912 // No need to handle users of gathered values. 1913 if (Entry->NeedToGather) 1914 continue; 1915 1916 // For each lane: 1917 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 1918 Value *Scalar = Entry->Scalars[Lane]; 1919 int FoundLane = Lane; 1920 if (!Entry->ReuseShuffleIndices.empty()) { 1921 FoundLane = 1922 std::distance(Entry->ReuseShuffleIndices.begin(), 1923 llvm::find(Entry->ReuseShuffleIndices, FoundLane)); 1924 } 1925 1926 // Check if the scalar is externally used as an extra arg. 1927 auto ExtI = ExternallyUsedValues.find(Scalar); 1928 if (ExtI != ExternallyUsedValues.end()) { 1929 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " 1930 << Lane << " from " << *Scalar << ".\n"); 1931 ExternalUses.emplace_back(Scalar, nullptr, FoundLane); 1932 } 1933 for (User *U : Scalar->users()) { 1934 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 1935 1936 Instruction *UserInst = dyn_cast<Instruction>(U); 1937 if (!UserInst) 1938 continue; 1939 1940 // Skip in-tree scalars that become vectors 1941 if (TreeEntry *UseEntry = getTreeEntry(U)) { 1942 Value *UseScalar = UseEntry->Scalars[0]; 1943 // Some in-tree scalars will remain as scalar in vectorized 1944 // instructions. If that is the case, the one in Lane 0 will 1945 // be used. 1946 if (UseScalar != U || 1947 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 1948 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 1949 << ".\n"); 1950 assert(!UseEntry->NeedToGather && "Bad state"); 1951 continue; 1952 } 1953 } 1954 1955 // Ignore users in the user ignore list. 1956 if (is_contained(UserIgnoreList, UserInst)) 1957 continue; 1958 1959 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " 1960 << Lane << " from " << *Scalar << ".\n"); 1961 ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane)); 1962 } 1963 } 1964 } 1965 } 1966 1967 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 1968 EdgeInfo UserTreeIdx) { 1969 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 1970 1971 InstructionsState S = getSameOpcode(VL); 1972 if (Depth == RecursionMaxDepth) { 1973 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 1974 newTreeEntry(VL, false, UserTreeIdx); 1975 return; 1976 } 1977 1978 // Don't handle vectors. 1979 if (S.OpValue->getType()->isVectorTy()) { 1980 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 1981 newTreeEntry(VL, false, UserTreeIdx); 1982 return; 1983 } 1984 1985 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 1986 if (SI->getValueOperand()->getType()->isVectorTy()) { 1987 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 1988 newTreeEntry(VL, false, UserTreeIdx); 1989 return; 1990 } 1991 1992 // If all of the operands are identical or constant we have a simple solution. 1993 if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.getOpcode()) { 1994 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 1995 newTreeEntry(VL, false, UserTreeIdx); 1996 return; 1997 } 1998 1999 // We now know that this is a vector of instructions of the same type from 2000 // the same block. 2001 2002 // Don't vectorize ephemeral values. 2003 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 2004 if (EphValues.count(VL[i])) { 2005 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] 2006 << ") is ephemeral.\n"); 2007 newTreeEntry(VL, false, UserTreeIdx); 2008 return; 2009 } 2010 } 2011 2012 // Check if this is a duplicate of another entry. 2013 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 2014 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n"); 2015 if (!E->isSame(VL)) { 2016 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 2017 newTreeEntry(VL, false, UserTreeIdx); 2018 return; 2019 } 2020 // Record the reuse of the tree node. FIXME, currently this is only used to 2021 // properly draw the graph rather than for the actual vectorization. 2022 E->UserTreeIndices.push_back(UserTreeIdx); 2023 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue 2024 << ".\n"); 2025 E->trySetUserTEOperand(UserTreeIdx, VL, None); 2026 return; 2027 } 2028 2029 // Check that none of the instructions in the bundle are already in the tree. 2030 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 2031 auto *I = dyn_cast<Instruction>(VL[i]); 2032 if (!I) 2033 continue; 2034 if (getTreeEntry(I)) { 2035 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] 2036 << ") is already in tree.\n"); 2037 newTreeEntry(VL, false, UserTreeIdx); 2038 return; 2039 } 2040 } 2041 2042 // If any of the scalars is marked as a value that needs to stay scalar, then 2043 // we need to gather the scalars. 2044 // The reduction nodes (stored in UserIgnoreList) also should stay scalar. 2045 for (unsigned i = 0, e = VL.size(); i != e; ++i) { 2046 if (MustGather.count(VL[i]) || is_contained(UserIgnoreList, VL[i])) { 2047 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 2048 newTreeEntry(VL, false, UserTreeIdx); 2049 return; 2050 } 2051 } 2052 2053 // Check that all of the users of the scalars that we want to vectorize are 2054 // schedulable. 2055 auto *VL0 = cast<Instruction>(S.OpValue); 2056 BasicBlock *BB = VL0->getParent(); 2057 2058 if (!DT->isReachableFromEntry(BB)) { 2059 // Don't go into unreachable blocks. They may contain instructions with 2060 // dependency cycles which confuse the final scheduling. 2061 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 2062 newTreeEntry(VL, false, UserTreeIdx); 2063 return; 2064 } 2065 2066 // Check that every instruction appears once in this bundle. 2067 SmallVector<unsigned, 4> ReuseShuffleIndicies; 2068 SmallVector<Value *, 4> UniqueValues; 2069 DenseMap<Value *, unsigned> UniquePositions; 2070 for (Value *V : VL) { 2071 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 2072 ReuseShuffleIndicies.emplace_back(Res.first->second); 2073 if (Res.second) 2074 UniqueValues.emplace_back(V); 2075 } 2076 if (UniqueValues.size() == VL.size()) { 2077 ReuseShuffleIndicies.clear(); 2078 } else { 2079 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n"); 2080 if (UniqueValues.size() <= 1 || !llvm::isPowerOf2_32(UniqueValues.size())) { 2081 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 2082 newTreeEntry(VL, false, UserTreeIdx); 2083 return; 2084 } 2085 VL = UniqueValues; 2086 } 2087 2088 auto &BSRef = BlocksSchedules[BB]; 2089 if (!BSRef) 2090 BSRef = llvm::make_unique<BlockScheduling>(BB); 2091 2092 BlockScheduling &BS = *BSRef.get(); 2093 2094 if (!BS.tryScheduleBundle(VL, this, S)) { 2095 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 2096 assert((!BS.getScheduleData(VL0) || 2097 !BS.getScheduleData(VL0)->isPartOfBundle()) && 2098 "tryScheduleBundle should cancelScheduling on failure"); 2099 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2100 return; 2101 } 2102 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 2103 2104 unsigned ShuffleOrOp = S.isAltShuffle() ? 2105 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 2106 switch (ShuffleOrOp) { 2107 case Instruction::PHI: { 2108 PHINode *PH = dyn_cast<PHINode>(VL0); 2109 2110 // Check for terminator values (e.g. invoke). 2111 for (unsigned j = 0; j < VL.size(); ++j) 2112 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 2113 Instruction *Term = dyn_cast<Instruction>( 2114 cast<PHINode>(VL[j])->getIncomingValueForBlock( 2115 PH->getIncomingBlock(i))); 2116 if (Term && Term->isTerminator()) { 2117 LLVM_DEBUG(dbgs() 2118 << "SLP: Need to swizzle PHINodes (terminator use).\n"); 2119 BS.cancelScheduling(VL, VL0); 2120 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2121 return; 2122 } 2123 } 2124 2125 newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); 2126 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 2127 2128 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 2129 ValueList Operands; 2130 // Prepare the operand vector. 2131 for (Value *j : VL) 2132 Operands.push_back(cast<PHINode>(j)->getIncomingValueForBlock( 2133 PH->getIncomingBlock(i))); 2134 2135 UserTreeIdx.EdgeIdx = i; 2136 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 2137 } 2138 return; 2139 } 2140 case Instruction::ExtractValue: 2141 case Instruction::ExtractElement: { 2142 OrdersType CurrentOrder; 2143 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder); 2144 if (Reuse) { 2145 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n"); 2146 ++NumOpsWantToKeepOriginalOrder; 2147 newTreeEntry(VL, /*Vectorized=*/true, UserTreeIdx, 2148 ReuseShuffleIndicies); 2149 // This is a special case, as it does not gather, but at the same time 2150 // we are not extending buildTree_rec() towards the operands. 2151 ValueList Op0; 2152 Op0.assign(VL.size(), VL0->getOperand(0)); 2153 VectorizableTree.back().setOperand(0, Op0, ReuseShuffleIndicies); 2154 return; 2155 } 2156 if (!CurrentOrder.empty()) { 2157 LLVM_DEBUG({ 2158 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " 2159 "with order"; 2160 for (unsigned Idx : CurrentOrder) 2161 dbgs() << " " << Idx; 2162 dbgs() << "\n"; 2163 }); 2164 // Insert new order with initial value 0, if it does not exist, 2165 // otherwise return the iterator to the existing one. 2166 auto StoredCurrentOrderAndNum = 2167 NumOpsWantToKeepOrder.try_emplace(CurrentOrder).first; 2168 ++StoredCurrentOrderAndNum->getSecond(); 2169 newTreeEntry(VL, /*Vectorized=*/true, UserTreeIdx, ReuseShuffleIndicies, 2170 StoredCurrentOrderAndNum->getFirst()); 2171 // This is a special case, as it does not gather, but at the same time 2172 // we are not extending buildTree_rec() towards the operands. 2173 ValueList Op0; 2174 Op0.assign(VL.size(), VL0->getOperand(0)); 2175 VectorizableTree.back().setOperand(0, Op0, ReuseShuffleIndicies); 2176 return; 2177 } 2178 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n"); 2179 newTreeEntry(VL, /*Vectorized=*/false, UserTreeIdx, ReuseShuffleIndicies); 2180 BS.cancelScheduling(VL, VL0); 2181 return; 2182 } 2183 case Instruction::Load: { 2184 // Check that a vectorized load would load the same memory as a scalar 2185 // load. For example, we don't want to vectorize loads that are smaller 2186 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 2187 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 2188 // from such a struct, we read/write packed bits disagreeing with the 2189 // unvectorized version. 2190 Type *ScalarTy = VL0->getType(); 2191 2192 if (DL->getTypeSizeInBits(ScalarTy) != 2193 DL->getTypeAllocSizeInBits(ScalarTy)) { 2194 BS.cancelScheduling(VL, VL0); 2195 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2196 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 2197 return; 2198 } 2199 2200 // Make sure all loads in the bundle are simple - we can't vectorize 2201 // atomic or volatile loads. 2202 SmallVector<Value *, 4> PointerOps(VL.size()); 2203 auto POIter = PointerOps.begin(); 2204 for (Value *V : VL) { 2205 auto *L = cast<LoadInst>(V); 2206 if (!L->isSimple()) { 2207 BS.cancelScheduling(VL, VL0); 2208 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2209 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 2210 return; 2211 } 2212 *POIter = L->getPointerOperand(); 2213 ++POIter; 2214 } 2215 2216 OrdersType CurrentOrder; 2217 // Check the order of pointer operands. 2218 if (llvm::sortPtrAccesses(PointerOps, *DL, *SE, CurrentOrder)) { 2219 Value *Ptr0; 2220 Value *PtrN; 2221 if (CurrentOrder.empty()) { 2222 Ptr0 = PointerOps.front(); 2223 PtrN = PointerOps.back(); 2224 } else { 2225 Ptr0 = PointerOps[CurrentOrder.front()]; 2226 PtrN = PointerOps[CurrentOrder.back()]; 2227 } 2228 const SCEV *Scev0 = SE->getSCEV(Ptr0); 2229 const SCEV *ScevN = SE->getSCEV(PtrN); 2230 const auto *Diff = 2231 dyn_cast<SCEVConstant>(SE->getMinusSCEV(ScevN, Scev0)); 2232 uint64_t Size = DL->getTypeAllocSize(ScalarTy); 2233 // Check that the sorted loads are consecutive. 2234 if (Diff && Diff->getAPInt().getZExtValue() == (VL.size() - 1) * Size) { 2235 if (CurrentOrder.empty()) { 2236 // Original loads are consecutive and does not require reordering. 2237 ++NumOpsWantToKeepOriginalOrder; 2238 newTreeEntry(VL, /*Vectorized=*/true, UserTreeIdx, 2239 ReuseShuffleIndicies); 2240 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 2241 } else { 2242 // Need to reorder. 2243 auto I = NumOpsWantToKeepOrder.try_emplace(CurrentOrder).first; 2244 ++I->getSecond(); 2245 newTreeEntry(VL, /*Vectorized=*/true, UserTreeIdx, 2246 ReuseShuffleIndicies, I->getFirst()); 2247 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n"); 2248 } 2249 return; 2250 } 2251 } 2252 2253 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 2254 BS.cancelScheduling(VL, VL0); 2255 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2256 return; 2257 } 2258 case Instruction::ZExt: 2259 case Instruction::SExt: 2260 case Instruction::FPToUI: 2261 case Instruction::FPToSI: 2262 case Instruction::FPExt: 2263 case Instruction::PtrToInt: 2264 case Instruction::IntToPtr: 2265 case Instruction::SIToFP: 2266 case Instruction::UIToFP: 2267 case Instruction::Trunc: 2268 case Instruction::FPTrunc: 2269 case Instruction::BitCast: { 2270 Type *SrcTy = VL0->getOperand(0)->getType(); 2271 for (unsigned i = 0; i < VL.size(); ++i) { 2272 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); 2273 if (Ty != SrcTy || !isValidElementType(Ty)) { 2274 BS.cancelScheduling(VL, VL0); 2275 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2276 LLVM_DEBUG(dbgs() 2277 << "SLP: Gathering casts with different src types.\n"); 2278 return; 2279 } 2280 } 2281 newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); 2282 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 2283 2284 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 2285 ValueList Operands; 2286 // Prepare the operand vector. 2287 for (Value *j : VL) 2288 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 2289 2290 UserTreeIdx.EdgeIdx = i; 2291 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 2292 } 2293 return; 2294 } 2295 case Instruction::ICmp: 2296 case Instruction::FCmp: { 2297 // Check that all of the compares have the same predicate. 2298 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 2299 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0); 2300 Type *ComparedTy = VL0->getOperand(0)->getType(); 2301 for (unsigned i = 1, e = VL.size(); i < e; ++i) { 2302 CmpInst *Cmp = cast<CmpInst>(VL[i]); 2303 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) || 2304 Cmp->getOperand(0)->getType() != ComparedTy) { 2305 BS.cancelScheduling(VL, VL0); 2306 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2307 LLVM_DEBUG(dbgs() 2308 << "SLP: Gathering cmp with different predicate.\n"); 2309 return; 2310 } 2311 } 2312 2313 newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); 2314 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 2315 2316 ValueList Left, Right; 2317 if (cast<CmpInst>(VL0)->isCommutative()) { 2318 // Commutative predicate - collect + sort operands of the instructions 2319 // so that each side is more likely to have the same opcode. 2320 assert(P0 == SwapP0 && "Commutative Predicate mismatch"); 2321 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE); 2322 } else { 2323 // Collect operands - commute if it uses the swapped predicate. 2324 for (Value *V : VL) { 2325 auto *Cmp = cast<CmpInst>(V); 2326 Value *LHS = Cmp->getOperand(0); 2327 Value *RHS = Cmp->getOperand(1); 2328 if (Cmp->getPredicate() != P0) 2329 std::swap(LHS, RHS); 2330 Left.push_back(LHS); 2331 Right.push_back(RHS); 2332 } 2333 } 2334 2335 UserTreeIdx.EdgeIdx = 0; 2336 buildTree_rec(Left, Depth + 1, UserTreeIdx); 2337 UserTreeIdx.EdgeIdx = 1; 2338 buildTree_rec(Right, Depth + 1, UserTreeIdx); 2339 return; 2340 } 2341 case Instruction::Select: 2342 case Instruction::Add: 2343 case Instruction::FAdd: 2344 case Instruction::Sub: 2345 case Instruction::FSub: 2346 case Instruction::Mul: 2347 case Instruction::FMul: 2348 case Instruction::UDiv: 2349 case Instruction::SDiv: 2350 case Instruction::FDiv: 2351 case Instruction::URem: 2352 case Instruction::SRem: 2353 case Instruction::FRem: 2354 case Instruction::Shl: 2355 case Instruction::LShr: 2356 case Instruction::AShr: 2357 case Instruction::And: 2358 case Instruction::Or: 2359 case Instruction::Xor: 2360 newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); 2361 LLVM_DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); 2362 2363 // Sort operands of the instructions so that each side is more likely to 2364 // have the same opcode. 2365 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 2366 ValueList Left, Right; 2367 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE); 2368 UserTreeIdx.EdgeIdx = 0; 2369 buildTree_rec(Left, Depth + 1, UserTreeIdx); 2370 UserTreeIdx.EdgeIdx = 1; 2371 buildTree_rec(Right, Depth + 1, UserTreeIdx); 2372 return; 2373 } 2374 2375 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 2376 ValueList Operands; 2377 // Prepare the operand vector. 2378 for (Value *j : VL) 2379 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 2380 2381 UserTreeIdx.EdgeIdx = i; 2382 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 2383 } 2384 return; 2385 2386 case Instruction::GetElementPtr: { 2387 // We don't combine GEPs with complicated (nested) indexing. 2388 for (unsigned j = 0; j < VL.size(); ++j) { 2389 if (cast<Instruction>(VL[j])->getNumOperands() != 2) { 2390 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 2391 BS.cancelScheduling(VL, VL0); 2392 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2393 return; 2394 } 2395 } 2396 2397 // We can't combine several GEPs into one vector if they operate on 2398 // different types. 2399 Type *Ty0 = VL0->getOperand(0)->getType(); 2400 for (unsigned j = 0; j < VL.size(); ++j) { 2401 Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType(); 2402 if (Ty0 != CurTy) { 2403 LLVM_DEBUG(dbgs() 2404 << "SLP: not-vectorizable GEP (different types).\n"); 2405 BS.cancelScheduling(VL, VL0); 2406 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2407 return; 2408 } 2409 } 2410 2411 // We don't combine GEPs with non-constant indexes. 2412 for (unsigned j = 0; j < VL.size(); ++j) { 2413 auto Op = cast<Instruction>(VL[j])->getOperand(1); 2414 if (!isa<ConstantInt>(Op)) { 2415 LLVM_DEBUG(dbgs() 2416 << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 2417 BS.cancelScheduling(VL, VL0); 2418 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2419 return; 2420 } 2421 } 2422 2423 newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); 2424 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 2425 for (unsigned i = 0, e = 2; i < e; ++i) { 2426 ValueList Operands; 2427 // Prepare the operand vector. 2428 for (Value *j : VL) 2429 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 2430 2431 UserTreeIdx.EdgeIdx = i; 2432 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 2433 } 2434 return; 2435 } 2436 case Instruction::Store: { 2437 // Check if the stores are consecutive or of we need to swizzle them. 2438 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) 2439 if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { 2440 BS.cancelScheduling(VL, VL0); 2441 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2442 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 2443 return; 2444 } 2445 2446 newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); 2447 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 2448 2449 ValueList Operands; 2450 for (Value *j : VL) 2451 Operands.push_back(cast<Instruction>(j)->getOperand(0)); 2452 2453 UserTreeIdx.EdgeIdx = 0; 2454 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 2455 return; 2456 } 2457 case Instruction::Call: { 2458 // Check if the calls are all to the same vectorizable intrinsic. 2459 CallInst *CI = cast<CallInst>(VL0); 2460 // Check if this is an Intrinsic call or something that can be 2461 // represented by an intrinsic call 2462 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 2463 if (!isTriviallyVectorizable(ID)) { 2464 BS.cancelScheduling(VL, VL0); 2465 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2466 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 2467 return; 2468 } 2469 Function *Int = CI->getCalledFunction(); 2470 unsigned NumArgs = CI->getNumArgOperands(); 2471 SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr); 2472 for (unsigned j = 0; j != NumArgs; ++j) 2473 if (hasVectorInstrinsicScalarOpd(ID, j)) 2474 ScalarArgs[j] = CI->getArgOperand(j); 2475 for (unsigned i = 1, e = VL.size(); i != e; ++i) { 2476 CallInst *CI2 = dyn_cast<CallInst>(VL[i]); 2477 if (!CI2 || CI2->getCalledFunction() != Int || 2478 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 2479 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 2480 BS.cancelScheduling(VL, VL0); 2481 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2482 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] 2483 << "\n"); 2484 return; 2485 } 2486 // Some intrinsics have scalar arguments and should be same in order for 2487 // them to be vectorized. 2488 for (unsigned j = 0; j != NumArgs; ++j) { 2489 if (hasVectorInstrinsicScalarOpd(ID, j)) { 2490 Value *A1J = CI2->getArgOperand(j); 2491 if (ScalarArgs[j] != A1J) { 2492 BS.cancelScheduling(VL, VL0); 2493 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2494 LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 2495 << " argument " << ScalarArgs[j] << "!=" << A1J 2496 << "\n"); 2497 return; 2498 } 2499 } 2500 } 2501 // Verify that the bundle operands are identical between the two calls. 2502 if (CI->hasOperandBundles() && 2503 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 2504 CI->op_begin() + CI->getBundleOperandsEndIndex(), 2505 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 2506 BS.cancelScheduling(VL, VL0); 2507 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2508 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" 2509 << *CI << "!=" << *VL[i] << '\n'); 2510 return; 2511 } 2512 } 2513 2514 newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); 2515 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 2516 ValueList Operands; 2517 // Prepare the operand vector. 2518 for (Value *j : VL) { 2519 CallInst *CI2 = dyn_cast<CallInst>(j); 2520 Operands.push_back(CI2->getArgOperand(i)); 2521 } 2522 UserTreeIdx.EdgeIdx = i; 2523 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 2524 } 2525 return; 2526 } 2527 case Instruction::ShuffleVector: 2528 // If this is not an alternate sequence of opcode like add-sub 2529 // then do not vectorize this instruction. 2530 if (!S.isAltShuffle()) { 2531 BS.cancelScheduling(VL, VL0); 2532 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2533 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 2534 return; 2535 } 2536 newTreeEntry(VL, true, UserTreeIdx, ReuseShuffleIndicies); 2537 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 2538 2539 // Reorder operands if reordering would enable vectorization. 2540 if (isa<BinaryOperator>(VL0)) { 2541 ValueList Left, Right; 2542 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE); 2543 UserTreeIdx.EdgeIdx = 0; 2544 buildTree_rec(Left, Depth + 1, UserTreeIdx); 2545 UserTreeIdx.EdgeIdx = 1; 2546 buildTree_rec(Right, Depth + 1, UserTreeIdx); 2547 return; 2548 } 2549 2550 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 2551 ValueList Operands; 2552 // Prepare the operand vector. 2553 for (Value *j : VL) 2554 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 2555 2556 UserTreeIdx.EdgeIdx = i; 2557 buildTree_rec(Operands, Depth + 1, UserTreeIdx); 2558 } 2559 return; 2560 2561 default: 2562 BS.cancelScheduling(VL, VL0); 2563 newTreeEntry(VL, false, UserTreeIdx, ReuseShuffleIndicies); 2564 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 2565 return; 2566 } 2567 } 2568 2569 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { 2570 unsigned N; 2571 Type *EltTy; 2572 auto *ST = dyn_cast<StructType>(T); 2573 if (ST) { 2574 N = ST->getNumElements(); 2575 EltTy = *ST->element_begin(); 2576 } else { 2577 N = cast<ArrayType>(T)->getNumElements(); 2578 EltTy = cast<ArrayType>(T)->getElementType(); 2579 } 2580 if (!isValidElementType(EltTy)) 2581 return 0; 2582 uint64_t VTSize = DL.getTypeStoreSizeInBits(VectorType::get(EltTy, N)); 2583 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) 2584 return 0; 2585 if (ST) { 2586 // Check that struct is homogeneous. 2587 for (const auto *Ty : ST->elements()) 2588 if (Ty != EltTy) 2589 return 0; 2590 } 2591 return N; 2592 } 2593 2594 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 2595 SmallVectorImpl<unsigned> &CurrentOrder) const { 2596 Instruction *E0 = cast<Instruction>(OpValue); 2597 assert(E0->getOpcode() == Instruction::ExtractElement || 2598 E0->getOpcode() == Instruction::ExtractValue); 2599 assert(E0->getOpcode() == getSameOpcode(VL).getOpcode() && "Invalid opcode"); 2600 // Check if all of the extracts come from the same vector and from the 2601 // correct offset. 2602 Value *Vec = E0->getOperand(0); 2603 2604 CurrentOrder.clear(); 2605 2606 // We have to extract from a vector/aggregate with the same number of elements. 2607 unsigned NElts; 2608 if (E0->getOpcode() == Instruction::ExtractValue) { 2609 const DataLayout &DL = E0->getModule()->getDataLayout(); 2610 NElts = canMapToVector(Vec->getType(), DL); 2611 if (!NElts) 2612 return false; 2613 // Check if load can be rewritten as load of vector. 2614 LoadInst *LI = dyn_cast<LoadInst>(Vec); 2615 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 2616 return false; 2617 } else { 2618 NElts = Vec->getType()->getVectorNumElements(); 2619 } 2620 2621 if (NElts != VL.size()) 2622 return false; 2623 2624 // Check that all of the indices extract from the correct offset. 2625 bool ShouldKeepOrder = true; 2626 unsigned E = VL.size(); 2627 // Assign to all items the initial value E + 1 so we can check if the extract 2628 // instruction index was used already. 2629 // Also, later we can check that all the indices are used and we have a 2630 // consecutive access in the extract instructions, by checking that no 2631 // element of CurrentOrder still has value E + 1. 2632 CurrentOrder.assign(E, E + 1); 2633 unsigned I = 0; 2634 for (; I < E; ++I) { 2635 auto *Inst = cast<Instruction>(VL[I]); 2636 if (Inst->getOperand(0) != Vec) 2637 break; 2638 Optional<unsigned> Idx = getExtractIndex(Inst); 2639 if (!Idx) 2640 break; 2641 const unsigned ExtIdx = *Idx; 2642 if (ExtIdx != I) { 2643 if (ExtIdx >= E || CurrentOrder[ExtIdx] != E + 1) 2644 break; 2645 ShouldKeepOrder = false; 2646 CurrentOrder[ExtIdx] = I; 2647 } else { 2648 if (CurrentOrder[I] != E + 1) 2649 break; 2650 CurrentOrder[I] = I; 2651 } 2652 } 2653 if (I < E) { 2654 CurrentOrder.clear(); 2655 return false; 2656 } 2657 2658 return ShouldKeepOrder; 2659 } 2660 2661 bool BoUpSLP::areAllUsersVectorized(Instruction *I) const { 2662 return I->hasOneUse() || 2663 std::all_of(I->user_begin(), I->user_end(), [this](User *U) { 2664 return ScalarToTreeEntry.count(U) > 0; 2665 }); 2666 } 2667 2668 int BoUpSLP::getEntryCost(TreeEntry *E) { 2669 ArrayRef<Value*> VL = E->Scalars; 2670 2671 Type *ScalarTy = VL[0]->getType(); 2672 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 2673 ScalarTy = SI->getValueOperand()->getType(); 2674 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0])) 2675 ScalarTy = CI->getOperand(0)->getType(); 2676 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 2677 2678 // If we have computed a smaller type for the expression, update VecTy so 2679 // that the costs will be accurate. 2680 if (MinBWs.count(VL[0])) 2681 VecTy = VectorType::get( 2682 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size()); 2683 2684 unsigned ReuseShuffleNumbers = E->ReuseShuffleIndices.size(); 2685 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 2686 int ReuseShuffleCost = 0; 2687 if (NeedToShuffleReuses) { 2688 ReuseShuffleCost = 2689 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy); 2690 } 2691 if (E->NeedToGather) { 2692 if (allConstant(VL)) 2693 return 0; 2694 if (isSplat(VL)) { 2695 return ReuseShuffleCost + 2696 TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 2697 } 2698 if (getSameOpcode(VL).getOpcode() == Instruction::ExtractElement && 2699 allSameType(VL) && allSameBlock(VL)) { 2700 Optional<TargetTransformInfo::ShuffleKind> ShuffleKind = isShuffle(VL); 2701 if (ShuffleKind.hasValue()) { 2702 int Cost = TTI->getShuffleCost(ShuffleKind.getValue(), VecTy); 2703 for (auto *V : VL) { 2704 // If all users of instruction are going to be vectorized and this 2705 // instruction itself is not going to be vectorized, consider this 2706 // instruction as dead and remove its cost from the final cost of the 2707 // vectorized tree. 2708 if (areAllUsersVectorized(cast<Instruction>(V)) && 2709 !ScalarToTreeEntry.count(V)) { 2710 auto *IO = cast<ConstantInt>( 2711 cast<ExtractElementInst>(V)->getIndexOperand()); 2712 Cost -= TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 2713 IO->getZExtValue()); 2714 } 2715 } 2716 return ReuseShuffleCost + Cost; 2717 } 2718 } 2719 return ReuseShuffleCost + getGatherCost(VL); 2720 } 2721 InstructionsState S = getSameOpcode(VL); 2722 assert(S.getOpcode() && allSameType(VL) && allSameBlock(VL) && "Invalid VL"); 2723 Instruction *VL0 = cast<Instruction>(S.OpValue); 2724 unsigned ShuffleOrOp = S.isAltShuffle() ? 2725 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 2726 switch (ShuffleOrOp) { 2727 case Instruction::PHI: 2728 return 0; 2729 2730 case Instruction::ExtractValue: 2731 case Instruction::ExtractElement: 2732 if (NeedToShuffleReuses) { 2733 unsigned Idx = 0; 2734 for (unsigned I : E->ReuseShuffleIndices) { 2735 if (ShuffleOrOp == Instruction::ExtractElement) { 2736 auto *IO = cast<ConstantInt>( 2737 cast<ExtractElementInst>(VL[I])->getIndexOperand()); 2738 Idx = IO->getZExtValue(); 2739 ReuseShuffleCost -= TTI->getVectorInstrCost( 2740 Instruction::ExtractElement, VecTy, Idx); 2741 } else { 2742 ReuseShuffleCost -= TTI->getVectorInstrCost( 2743 Instruction::ExtractElement, VecTy, Idx); 2744 ++Idx; 2745 } 2746 } 2747 Idx = ReuseShuffleNumbers; 2748 for (Value *V : VL) { 2749 if (ShuffleOrOp == Instruction::ExtractElement) { 2750 auto *IO = cast<ConstantInt>( 2751 cast<ExtractElementInst>(V)->getIndexOperand()); 2752 Idx = IO->getZExtValue(); 2753 } else { 2754 --Idx; 2755 } 2756 ReuseShuffleCost += 2757 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, Idx); 2758 } 2759 } 2760 if (!E->NeedToGather) { 2761 int DeadCost = ReuseShuffleCost; 2762 if (!E->ReorderIndices.empty()) { 2763 // TODO: Merge this shuffle with the ReuseShuffleCost. 2764 DeadCost += TTI->getShuffleCost( 2765 TargetTransformInfo::SK_PermuteSingleSrc, VecTy); 2766 } 2767 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 2768 Instruction *E = cast<Instruction>(VL[i]); 2769 // If all users are going to be vectorized, instruction can be 2770 // considered as dead. 2771 // The same, if have only one user, it will be vectorized for sure. 2772 if (areAllUsersVectorized(E)) { 2773 // Take credit for instruction that will become dead. 2774 if (E->hasOneUse()) { 2775 Instruction *Ext = E->user_back(); 2776 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 2777 all_of(Ext->users(), 2778 [](User *U) { return isa<GetElementPtrInst>(U); })) { 2779 // Use getExtractWithExtendCost() to calculate the cost of 2780 // extractelement/ext pair. 2781 DeadCost -= TTI->getExtractWithExtendCost( 2782 Ext->getOpcode(), Ext->getType(), VecTy, i); 2783 // Add back the cost of s|zext which is subtracted separately. 2784 DeadCost += TTI->getCastInstrCost( 2785 Ext->getOpcode(), Ext->getType(), E->getType(), Ext); 2786 continue; 2787 } 2788 } 2789 DeadCost -= 2790 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); 2791 } 2792 } 2793 return DeadCost; 2794 } 2795 return ReuseShuffleCost + getGatherCost(VL); 2796 2797 case Instruction::ZExt: 2798 case Instruction::SExt: 2799 case Instruction::FPToUI: 2800 case Instruction::FPToSI: 2801 case Instruction::FPExt: 2802 case Instruction::PtrToInt: 2803 case Instruction::IntToPtr: 2804 case Instruction::SIToFP: 2805 case Instruction::UIToFP: 2806 case Instruction::Trunc: 2807 case Instruction::FPTrunc: 2808 case Instruction::BitCast: { 2809 Type *SrcTy = VL0->getOperand(0)->getType(); 2810 int ScalarEltCost = 2811 TTI->getCastInstrCost(S.getOpcode(), ScalarTy, SrcTy, VL0); 2812 if (NeedToShuffleReuses) { 2813 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 2814 } 2815 2816 // Calculate the cost of this instruction. 2817 int ScalarCost = VL.size() * ScalarEltCost; 2818 2819 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size()); 2820 int VecCost = 0; 2821 // Check if the values are candidates to demote. 2822 if (!MinBWs.count(VL0) || VecTy != SrcVecTy) { 2823 VecCost = ReuseShuffleCost + 2824 TTI->getCastInstrCost(S.getOpcode(), VecTy, SrcVecTy, VL0); 2825 } 2826 return VecCost - ScalarCost; 2827 } 2828 case Instruction::FCmp: 2829 case Instruction::ICmp: 2830 case Instruction::Select: { 2831 // Calculate the cost of this instruction. 2832 int ScalarEltCost = TTI->getCmpSelInstrCost(S.getOpcode(), ScalarTy, 2833 Builder.getInt1Ty(), VL0); 2834 if (NeedToShuffleReuses) { 2835 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 2836 } 2837 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size()); 2838 int ScalarCost = VecTy->getNumElements() * ScalarEltCost; 2839 int VecCost = TTI->getCmpSelInstrCost(S.getOpcode(), VecTy, MaskTy, VL0); 2840 return ReuseShuffleCost + VecCost - ScalarCost; 2841 } 2842 case Instruction::Add: 2843 case Instruction::FAdd: 2844 case Instruction::Sub: 2845 case Instruction::FSub: 2846 case Instruction::Mul: 2847 case Instruction::FMul: 2848 case Instruction::UDiv: 2849 case Instruction::SDiv: 2850 case Instruction::FDiv: 2851 case Instruction::URem: 2852 case Instruction::SRem: 2853 case Instruction::FRem: 2854 case Instruction::Shl: 2855 case Instruction::LShr: 2856 case Instruction::AShr: 2857 case Instruction::And: 2858 case Instruction::Or: 2859 case Instruction::Xor: { 2860 // Certain instructions can be cheaper to vectorize if they have a 2861 // constant second vector operand. 2862 TargetTransformInfo::OperandValueKind Op1VK = 2863 TargetTransformInfo::OK_AnyValue; 2864 TargetTransformInfo::OperandValueKind Op2VK = 2865 TargetTransformInfo::OK_UniformConstantValue; 2866 TargetTransformInfo::OperandValueProperties Op1VP = 2867 TargetTransformInfo::OP_None; 2868 TargetTransformInfo::OperandValueProperties Op2VP = 2869 TargetTransformInfo::OP_PowerOf2; 2870 2871 // If all operands are exactly the same ConstantInt then set the 2872 // operand kind to OK_UniformConstantValue. 2873 // If instead not all operands are constants, then set the operand kind 2874 // to OK_AnyValue. If all operands are constants but not the same, 2875 // then set the operand kind to OK_NonUniformConstantValue. 2876 ConstantInt *CInt0 = nullptr; 2877 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 2878 const Instruction *I = cast<Instruction>(VL[i]); 2879 ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(1)); 2880 if (!CInt) { 2881 Op2VK = TargetTransformInfo::OK_AnyValue; 2882 Op2VP = TargetTransformInfo::OP_None; 2883 break; 2884 } 2885 if (Op2VP == TargetTransformInfo::OP_PowerOf2 && 2886 !CInt->getValue().isPowerOf2()) 2887 Op2VP = TargetTransformInfo::OP_None; 2888 if (i == 0) { 2889 CInt0 = CInt; 2890 continue; 2891 } 2892 if (CInt0 != CInt) 2893 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 2894 } 2895 2896 SmallVector<const Value *, 4> Operands(VL0->operand_values()); 2897 int ScalarEltCost = TTI->getArithmeticInstrCost( 2898 S.getOpcode(), ScalarTy, Op1VK, Op2VK, Op1VP, Op2VP, Operands); 2899 if (NeedToShuffleReuses) { 2900 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 2901 } 2902 int ScalarCost = VecTy->getNumElements() * ScalarEltCost; 2903 int VecCost = TTI->getArithmeticInstrCost(S.getOpcode(), VecTy, Op1VK, 2904 Op2VK, Op1VP, Op2VP, Operands); 2905 return ReuseShuffleCost + VecCost - ScalarCost; 2906 } 2907 case Instruction::GetElementPtr: { 2908 TargetTransformInfo::OperandValueKind Op1VK = 2909 TargetTransformInfo::OK_AnyValue; 2910 TargetTransformInfo::OperandValueKind Op2VK = 2911 TargetTransformInfo::OK_UniformConstantValue; 2912 2913 int ScalarEltCost = 2914 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK); 2915 if (NeedToShuffleReuses) { 2916 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 2917 } 2918 int ScalarCost = VecTy->getNumElements() * ScalarEltCost; 2919 int VecCost = 2920 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK); 2921 return ReuseShuffleCost + VecCost - ScalarCost; 2922 } 2923 case Instruction::Load: { 2924 // Cost of wide load - cost of scalar loads. 2925 unsigned alignment = cast<LoadInst>(VL0)->getAlignment(); 2926 int ScalarEltCost = 2927 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, alignment, 0, VL0); 2928 if (NeedToShuffleReuses) { 2929 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 2930 } 2931 int ScalarLdCost = VecTy->getNumElements() * ScalarEltCost; 2932 int VecLdCost = 2933 TTI->getMemoryOpCost(Instruction::Load, VecTy, alignment, 0, VL0); 2934 if (!E->ReorderIndices.empty()) { 2935 // TODO: Merge this shuffle with the ReuseShuffleCost. 2936 VecLdCost += TTI->getShuffleCost( 2937 TargetTransformInfo::SK_PermuteSingleSrc, VecTy); 2938 } 2939 return ReuseShuffleCost + VecLdCost - ScalarLdCost; 2940 } 2941 case Instruction::Store: { 2942 // We know that we can merge the stores. Calculate the cost. 2943 unsigned alignment = cast<StoreInst>(VL0)->getAlignment(); 2944 int ScalarEltCost = 2945 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, alignment, 0, VL0); 2946 if (NeedToShuffleReuses) { 2947 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 2948 } 2949 int ScalarStCost = VecTy->getNumElements() * ScalarEltCost; 2950 int VecStCost = 2951 TTI->getMemoryOpCost(Instruction::Store, VecTy, alignment, 0, VL0); 2952 return ReuseShuffleCost + VecStCost - ScalarStCost; 2953 } 2954 case Instruction::Call: { 2955 CallInst *CI = cast<CallInst>(VL0); 2956 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 2957 2958 // Calculate the cost of the scalar and vector calls. 2959 SmallVector<Type *, 4> ScalarTys; 2960 for (unsigned op = 0, opc = CI->getNumArgOperands(); op != opc; ++op) 2961 ScalarTys.push_back(CI->getArgOperand(op)->getType()); 2962 2963 FastMathFlags FMF; 2964 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 2965 FMF = FPMO->getFastMathFlags(); 2966 2967 int ScalarEltCost = 2968 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys, FMF); 2969 if (NeedToShuffleReuses) { 2970 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 2971 } 2972 int ScalarCallCost = VecTy->getNumElements() * ScalarEltCost; 2973 2974 SmallVector<Value *, 4> Args(CI->arg_operands()); 2975 int VecCallCost = TTI->getIntrinsicInstrCost(ID, CI->getType(), Args, FMF, 2976 VecTy->getNumElements()); 2977 2978 LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost 2979 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 2980 << " for " << *CI << "\n"); 2981 2982 return ReuseShuffleCost + VecCallCost - ScalarCallCost; 2983 } 2984 case Instruction::ShuffleVector: { 2985 assert(S.isAltShuffle() && 2986 ((Instruction::isBinaryOp(S.getOpcode()) && 2987 Instruction::isBinaryOp(S.getAltOpcode())) || 2988 (Instruction::isCast(S.getOpcode()) && 2989 Instruction::isCast(S.getAltOpcode()))) && 2990 "Invalid Shuffle Vector Operand"); 2991 int ScalarCost = 0; 2992 if (NeedToShuffleReuses) { 2993 for (unsigned Idx : E->ReuseShuffleIndices) { 2994 Instruction *I = cast<Instruction>(VL[Idx]); 2995 ReuseShuffleCost -= TTI->getInstructionCost( 2996 I, TargetTransformInfo::TCK_RecipThroughput); 2997 } 2998 for (Value *V : VL) { 2999 Instruction *I = cast<Instruction>(V); 3000 ReuseShuffleCost += TTI->getInstructionCost( 3001 I, TargetTransformInfo::TCK_RecipThroughput); 3002 } 3003 } 3004 for (Value *i : VL) { 3005 Instruction *I = cast<Instruction>(i); 3006 assert(S.isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 3007 ScalarCost += TTI->getInstructionCost( 3008 I, TargetTransformInfo::TCK_RecipThroughput); 3009 } 3010 // VecCost is equal to sum of the cost of creating 2 vectors 3011 // and the cost of creating shuffle. 3012 int VecCost = 0; 3013 if (Instruction::isBinaryOp(S.getOpcode())) { 3014 VecCost = TTI->getArithmeticInstrCost(S.getOpcode(), VecTy); 3015 VecCost += TTI->getArithmeticInstrCost(S.getAltOpcode(), VecTy); 3016 } else { 3017 Type *Src0SclTy = S.MainOp->getOperand(0)->getType(); 3018 Type *Src1SclTy = S.AltOp->getOperand(0)->getType(); 3019 VectorType *Src0Ty = VectorType::get(Src0SclTy, VL.size()); 3020 VectorType *Src1Ty = VectorType::get(Src1SclTy, VL.size()); 3021 VecCost = TTI->getCastInstrCost(S.getOpcode(), VecTy, Src0Ty); 3022 VecCost += TTI->getCastInstrCost(S.getAltOpcode(), VecTy, Src1Ty); 3023 } 3024 VecCost += TTI->getShuffleCost(TargetTransformInfo::SK_Select, VecTy, 0); 3025 return ReuseShuffleCost + VecCost - ScalarCost; 3026 } 3027 default: 3028 llvm_unreachable("Unknown instruction"); 3029 } 3030 } 3031 3032 bool BoUpSLP::isFullyVectorizableTinyTree() const { 3033 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height " 3034 << VectorizableTree.size() << " is fully vectorizable .\n"); 3035 3036 // We only handle trees of heights 1 and 2. 3037 if (VectorizableTree.size() == 1 && !VectorizableTree[0].NeedToGather) 3038 return true; 3039 3040 if (VectorizableTree.size() != 2) 3041 return false; 3042 3043 // Handle splat and all-constants stores. 3044 if (!VectorizableTree[0].NeedToGather && 3045 (allConstant(VectorizableTree[1].Scalars) || 3046 isSplat(VectorizableTree[1].Scalars))) 3047 return true; 3048 3049 // Gathering cost would be too much for tiny trees. 3050 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather) 3051 return false; 3052 3053 return true; 3054 } 3055 3056 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() const { 3057 // We can vectorize the tree if its size is greater than or equal to the 3058 // minimum size specified by the MinTreeSize command line option. 3059 if (VectorizableTree.size() >= MinTreeSize) 3060 return false; 3061 3062 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 3063 // can vectorize it if we can prove it fully vectorizable. 3064 if (isFullyVectorizableTinyTree()) 3065 return false; 3066 3067 assert(VectorizableTree.empty() 3068 ? ExternalUses.empty() 3069 : true && "We shouldn't have any external users"); 3070 3071 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 3072 // vectorizable. 3073 return true; 3074 } 3075 3076 int BoUpSLP::getSpillCost() const { 3077 // Walk from the bottom of the tree to the top, tracking which values are 3078 // live. When we see a call instruction that is not part of our tree, 3079 // query TTI to see if there is a cost to keeping values live over it 3080 // (for example, if spills and fills are required). 3081 unsigned BundleWidth = VectorizableTree.front().Scalars.size(); 3082 int Cost = 0; 3083 3084 SmallPtrSet<Instruction*, 4> LiveValues; 3085 Instruction *PrevInst = nullptr; 3086 3087 for (const auto &N : VectorizableTree) { 3088 Instruction *Inst = dyn_cast<Instruction>(N.Scalars[0]); 3089 if (!Inst) 3090 continue; 3091 3092 if (!PrevInst) { 3093 PrevInst = Inst; 3094 continue; 3095 } 3096 3097 // Update LiveValues. 3098 LiveValues.erase(PrevInst); 3099 for (auto &J : PrevInst->operands()) { 3100 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 3101 LiveValues.insert(cast<Instruction>(&*J)); 3102 } 3103 3104 LLVM_DEBUG({ 3105 dbgs() << "SLP: #LV: " << LiveValues.size(); 3106 for (auto *X : LiveValues) 3107 dbgs() << " " << X->getName(); 3108 dbgs() << ", Looking at "; 3109 Inst->dump(); 3110 }); 3111 3112 // Now find the sequence of instructions between PrevInst and Inst. 3113 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 3114 PrevInstIt = 3115 PrevInst->getIterator().getReverse(); 3116 while (InstIt != PrevInstIt) { 3117 if (PrevInstIt == PrevInst->getParent()->rend()) { 3118 PrevInstIt = Inst->getParent()->rbegin(); 3119 continue; 3120 } 3121 3122 // Debug informations don't impact spill cost. 3123 if ((isa<CallInst>(&*PrevInstIt) && 3124 !isa<DbgInfoIntrinsic>(&*PrevInstIt)) && 3125 &*PrevInstIt != PrevInst) { 3126 SmallVector<Type*, 4> V; 3127 for (auto *II : LiveValues) 3128 V.push_back(VectorType::get(II->getType(), BundleWidth)); 3129 Cost += TTI->getCostOfKeepingLiveOverCall(V); 3130 } 3131 3132 ++PrevInstIt; 3133 } 3134 3135 PrevInst = Inst; 3136 } 3137 3138 return Cost; 3139 } 3140 3141 int BoUpSLP::getTreeCost() { 3142 int Cost = 0; 3143 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size " 3144 << VectorizableTree.size() << ".\n"); 3145 3146 unsigned BundleWidth = VectorizableTree[0].Scalars.size(); 3147 3148 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) { 3149 TreeEntry &TE = VectorizableTree[I]; 3150 3151 // We create duplicate tree entries for gather sequences that have multiple 3152 // uses. However, we should not compute the cost of duplicate sequences. 3153 // For example, if we have a build vector (i.e., insertelement sequence) 3154 // that is used by more than one vector instruction, we only need to 3155 // compute the cost of the insertelement instructions once. The redundant 3156 // instructions will be eliminated by CSE. 3157 // 3158 // We should consider not creating duplicate tree entries for gather 3159 // sequences, and instead add additional edges to the tree representing 3160 // their uses. Since such an approach results in fewer total entries, 3161 // existing heuristics based on tree size may yield different results. 3162 // 3163 if (TE.NeedToGather && 3164 std::any_of(std::next(VectorizableTree.begin(), I + 1), 3165 VectorizableTree.end(), [TE](TreeEntry &Entry) { 3166 return Entry.NeedToGather && Entry.isSame(TE.Scalars); 3167 })) 3168 continue; 3169 3170 int C = getEntryCost(&TE); 3171 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 3172 << " for bundle that starts with " << *TE.Scalars[0] 3173 << ".\n"); 3174 Cost += C; 3175 } 3176 3177 SmallPtrSet<Value *, 16> ExtractCostCalculated; 3178 int ExtractCost = 0; 3179 for (ExternalUser &EU : ExternalUses) { 3180 // We only add extract cost once for the same scalar. 3181 if (!ExtractCostCalculated.insert(EU.Scalar).second) 3182 continue; 3183 3184 // Uses by ephemeral values are free (because the ephemeral value will be 3185 // removed prior to code generation, and so the extraction will be 3186 // removed as well). 3187 if (EphValues.count(EU.User)) 3188 continue; 3189 3190 // If we plan to rewrite the tree in a smaller type, we will need to sign 3191 // extend the extracted value back to the original type. Here, we account 3192 // for the extract and the added cost of the sign extend if needed. 3193 auto *VecTy = VectorType::get(EU.Scalar->getType(), BundleWidth); 3194 auto *ScalarRoot = VectorizableTree[0].Scalars[0]; 3195 if (MinBWs.count(ScalarRoot)) { 3196 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 3197 auto Extend = 3198 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; 3199 VecTy = VectorType::get(MinTy, BundleWidth); 3200 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 3201 VecTy, EU.Lane); 3202 } else { 3203 ExtractCost += 3204 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); 3205 } 3206 } 3207 3208 int SpillCost = getSpillCost(); 3209 Cost += SpillCost + ExtractCost; 3210 3211 std::string Str; 3212 { 3213 raw_string_ostream OS(Str); 3214 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 3215 << "SLP: Extract Cost = " << ExtractCost << ".\n" 3216 << "SLP: Total Cost = " << Cost << ".\n"; 3217 } 3218 LLVM_DEBUG(dbgs() << Str); 3219 3220 if (ViewSLPTree) 3221 ViewGraph(this, "SLP" + F->getName(), false, Str); 3222 3223 return Cost; 3224 } 3225 3226 int BoUpSLP::getGatherCost(Type *Ty, 3227 const DenseSet<unsigned> &ShuffledIndices) const { 3228 int Cost = 0; 3229 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i) 3230 if (!ShuffledIndices.count(i)) 3231 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 3232 if (!ShuffledIndices.empty()) 3233 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty); 3234 return Cost; 3235 } 3236 3237 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const { 3238 // Find the type of the operands in VL. 3239 Type *ScalarTy = VL[0]->getType(); 3240 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 3241 ScalarTy = SI->getValueOperand()->getType(); 3242 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 3243 // Find the cost of inserting/extracting values from the vector. 3244 // Check if the same elements are inserted several times and count them as 3245 // shuffle candidates. 3246 DenseSet<unsigned> ShuffledElements; 3247 DenseSet<Value *> UniqueElements; 3248 // Iterate in reverse order to consider insert elements with the high cost. 3249 for (unsigned I = VL.size(); I > 0; --I) { 3250 unsigned Idx = I - 1; 3251 if (!UniqueElements.insert(VL[Idx]).second) 3252 ShuffledElements.insert(Idx); 3253 } 3254 return getGatherCost(VecTy, ShuffledElements); 3255 } 3256 3257 // Perform operand reordering on the instructions in VL and return the reordered 3258 // operands in Left and Right. 3259 void BoUpSLP::reorderInputsAccordingToOpcode( 3260 ArrayRef<Value *> VL, SmallVectorImpl<Value *> &Left, 3261 SmallVectorImpl<Value *> &Right, const DataLayout &DL, 3262 ScalarEvolution &SE) { 3263 if (VL.empty()) 3264 return; 3265 VLOperands Ops(VL, DL, SE); 3266 // Reorder the operands in place. 3267 Ops.reorder(); 3268 Left = Ops.getVL(0); 3269 Right = Ops.getVL(1); 3270 } 3271 3272 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL, 3273 const InstructionsState &S) { 3274 // Get the basic block this bundle is in. All instructions in the bundle 3275 // should be in this block. 3276 auto *Front = cast<Instruction>(S.OpValue); 3277 auto *BB = Front->getParent(); 3278 assert(llvm::all_of(make_range(VL.begin(), VL.end()), [=](Value *V) -> bool { 3279 auto *I = cast<Instruction>(V); 3280 return !S.isOpcodeOrAlt(I) || I->getParent() == BB; 3281 })); 3282 3283 // The last instruction in the bundle in program order. 3284 Instruction *LastInst = nullptr; 3285 3286 // Find the last instruction. The common case should be that BB has been 3287 // scheduled, and the last instruction is VL.back(). So we start with 3288 // VL.back() and iterate over schedule data until we reach the end of the 3289 // bundle. The end of the bundle is marked by null ScheduleData. 3290 if (BlocksSchedules.count(BB)) { 3291 auto *Bundle = 3292 BlocksSchedules[BB]->getScheduleData(isOneOf(S, VL.back())); 3293 if (Bundle && Bundle->isPartOfBundle()) 3294 for (; Bundle; Bundle = Bundle->NextInBundle) 3295 if (Bundle->OpValue == Bundle->Inst) 3296 LastInst = Bundle->Inst; 3297 } 3298 3299 // LastInst can still be null at this point if there's either not an entry 3300 // for BB in BlocksSchedules or there's no ScheduleData available for 3301 // VL.back(). This can be the case if buildTree_rec aborts for various 3302 // reasons (e.g., the maximum recursion depth is reached, the maximum region 3303 // size is reached, etc.). ScheduleData is initialized in the scheduling 3304 // "dry-run". 3305 // 3306 // If this happens, we can still find the last instruction by brute force. We 3307 // iterate forwards from Front (inclusive) until we either see all 3308 // instructions in the bundle or reach the end of the block. If Front is the 3309 // last instruction in program order, LastInst will be set to Front, and we 3310 // will visit all the remaining instructions in the block. 3311 // 3312 // One of the reasons we exit early from buildTree_rec is to place an upper 3313 // bound on compile-time. Thus, taking an additional compile-time hit here is 3314 // not ideal. However, this should be exceedingly rare since it requires that 3315 // we both exit early from buildTree_rec and that the bundle be out-of-order 3316 // (causing us to iterate all the way to the end of the block). 3317 if (!LastInst) { 3318 SmallPtrSet<Value *, 16> Bundle(VL.begin(), VL.end()); 3319 for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) { 3320 if (Bundle.erase(&I) && S.isOpcodeOrAlt(&I)) 3321 LastInst = &I; 3322 if (Bundle.empty()) 3323 break; 3324 } 3325 } 3326 3327 // Set the insertion point after the last instruction in the bundle. Set the 3328 // debug location to Front. 3329 Builder.SetInsertPoint(BB, ++LastInst->getIterator()); 3330 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 3331 } 3332 3333 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) { 3334 Value *Vec = UndefValue::get(Ty); 3335 // Generate the 'InsertElement' instruction. 3336 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 3337 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 3338 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) { 3339 GatherSeq.insert(Insrt); 3340 CSEBlocks.insert(Insrt->getParent()); 3341 3342 // Add to our 'need-to-extract' list. 3343 if (TreeEntry *E = getTreeEntry(VL[i])) { 3344 // Find which lane we need to extract. 3345 int FoundLane = -1; 3346 for (unsigned Lane = 0, LE = E->Scalars.size(); Lane != LE; ++Lane) { 3347 // Is this the lane of the scalar that we are looking for ? 3348 if (E->Scalars[Lane] == VL[i]) { 3349 FoundLane = Lane; 3350 break; 3351 } 3352 } 3353 assert(FoundLane >= 0 && "Could not find the correct lane"); 3354 if (!E->ReuseShuffleIndices.empty()) { 3355 FoundLane = 3356 std::distance(E->ReuseShuffleIndices.begin(), 3357 llvm::find(E->ReuseShuffleIndices, FoundLane)); 3358 } 3359 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 3360 } 3361 } 3362 } 3363 3364 return Vec; 3365 } 3366 3367 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 3368 InstructionsState S = getSameOpcode(VL); 3369 if (S.getOpcode()) { 3370 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 3371 if (E->isSame(VL)) { 3372 Value *V = vectorizeTree(E); 3373 if (VL.size() == E->Scalars.size() && !E->ReuseShuffleIndices.empty()) { 3374 // We need to get the vectorized value but without shuffle. 3375 if (auto *SV = dyn_cast<ShuffleVectorInst>(V)) { 3376 V = SV->getOperand(0); 3377 } else { 3378 // Reshuffle to get only unique values. 3379 SmallVector<unsigned, 4> UniqueIdxs; 3380 SmallSet<unsigned, 4> UsedIdxs; 3381 for(unsigned Idx : E->ReuseShuffleIndices) 3382 if (UsedIdxs.insert(Idx).second) 3383 UniqueIdxs.emplace_back(Idx); 3384 V = Builder.CreateShuffleVector(V, UndefValue::get(V->getType()), 3385 UniqueIdxs); 3386 } 3387 } 3388 return V; 3389 } 3390 } 3391 } 3392 3393 Type *ScalarTy = S.OpValue->getType(); 3394 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 3395 ScalarTy = SI->getValueOperand()->getType(); 3396 3397 // Check that every instruction appears once in this bundle. 3398 SmallVector<unsigned, 4> ReuseShuffleIndicies; 3399 SmallVector<Value *, 4> UniqueValues; 3400 if (VL.size() > 2) { 3401 DenseMap<Value *, unsigned> UniquePositions; 3402 for (Value *V : VL) { 3403 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 3404 ReuseShuffleIndicies.emplace_back(Res.first->second); 3405 if (Res.second || isa<Constant>(V)) 3406 UniqueValues.emplace_back(V); 3407 } 3408 // Do not shuffle single element or if number of unique values is not power 3409 // of 2. 3410 if (UniqueValues.size() == VL.size() || UniqueValues.size() <= 1 || 3411 !llvm::isPowerOf2_32(UniqueValues.size())) 3412 ReuseShuffleIndicies.clear(); 3413 else 3414 VL = UniqueValues; 3415 } 3416 VectorType *VecTy = VectorType::get(ScalarTy, VL.size()); 3417 3418 Value *V = Gather(VL, VecTy); 3419 if (!ReuseShuffleIndicies.empty()) { 3420 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3421 ReuseShuffleIndicies, "shuffle"); 3422 if (auto *I = dyn_cast<Instruction>(V)) { 3423 GatherSeq.insert(I); 3424 CSEBlocks.insert(I->getParent()); 3425 } 3426 } 3427 return V; 3428 } 3429 3430 static void inversePermutation(ArrayRef<unsigned> Indices, 3431 SmallVectorImpl<unsigned> &Mask) { 3432 Mask.clear(); 3433 const unsigned E = Indices.size(); 3434 Mask.resize(E); 3435 for (unsigned I = 0; I < E; ++I) 3436 Mask[Indices[I]] = I; 3437 } 3438 3439 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 3440 IRBuilder<>::InsertPointGuard Guard(Builder); 3441 3442 if (E->VectorizedValue) { 3443 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 3444 return E->VectorizedValue; 3445 } 3446 3447 InstructionsState S = getSameOpcode(E->Scalars); 3448 Instruction *VL0 = cast<Instruction>(S.OpValue); 3449 Type *ScalarTy = VL0->getType(); 3450 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 3451 ScalarTy = SI->getValueOperand()->getType(); 3452 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size()); 3453 3454 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 3455 3456 if (E->NeedToGather) { 3457 setInsertPointAfterBundle(E->Scalars, S); 3458 auto *V = Gather(E->Scalars, VecTy); 3459 if (NeedToShuffleReuses) { 3460 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3461 E->ReuseShuffleIndices, "shuffle"); 3462 if (auto *I = dyn_cast<Instruction>(V)) { 3463 GatherSeq.insert(I); 3464 CSEBlocks.insert(I->getParent()); 3465 } 3466 } 3467 E->VectorizedValue = V; 3468 return V; 3469 } 3470 3471 unsigned ShuffleOrOp = S.isAltShuffle() ? 3472 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 3473 switch (ShuffleOrOp) { 3474 case Instruction::PHI: { 3475 PHINode *PH = dyn_cast<PHINode>(VL0); 3476 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 3477 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 3478 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 3479 Value *V = NewPhi; 3480 if (NeedToShuffleReuses) { 3481 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3482 E->ReuseShuffleIndices, "shuffle"); 3483 } 3484 E->VectorizedValue = V; 3485 3486 // PHINodes may have multiple entries from the same block. We want to 3487 // visit every block once. 3488 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 3489 3490 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 3491 ValueList Operands; 3492 BasicBlock *IBB = PH->getIncomingBlock(i); 3493 3494 if (!VisitedBBs.insert(IBB).second) { 3495 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 3496 continue; 3497 } 3498 3499 Builder.SetInsertPoint(IBB->getTerminator()); 3500 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 3501 Value *Vec = vectorizeTree(E->getOperand(i)); 3502 NewPhi->addIncoming(Vec, IBB); 3503 } 3504 3505 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 3506 "Invalid number of incoming values"); 3507 return V; 3508 } 3509 3510 case Instruction::ExtractElement: { 3511 if (!E->NeedToGather) { 3512 Value *V = E->getSingleOperand(0); 3513 if (!E->ReorderIndices.empty()) { 3514 OrdersType Mask; 3515 inversePermutation(E->ReorderIndices, Mask); 3516 Builder.SetInsertPoint(VL0); 3517 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), Mask, 3518 "reorder_shuffle"); 3519 } 3520 if (NeedToShuffleReuses) { 3521 // TODO: Merge this shuffle with the ReorderShuffleMask. 3522 if (E->ReorderIndices.empty()) 3523 Builder.SetInsertPoint(VL0); 3524 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3525 E->ReuseShuffleIndices, "shuffle"); 3526 } 3527 E->VectorizedValue = V; 3528 return V; 3529 } 3530 setInsertPointAfterBundle(E->Scalars, S); 3531 auto *V = Gather(E->Scalars, VecTy); 3532 if (NeedToShuffleReuses) { 3533 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3534 E->ReuseShuffleIndices, "shuffle"); 3535 if (auto *I = dyn_cast<Instruction>(V)) { 3536 GatherSeq.insert(I); 3537 CSEBlocks.insert(I->getParent()); 3538 } 3539 } 3540 E->VectorizedValue = V; 3541 return V; 3542 } 3543 case Instruction::ExtractValue: { 3544 if (!E->NeedToGather) { 3545 LoadInst *LI = cast<LoadInst>(E->getSingleOperand(0)); 3546 Builder.SetInsertPoint(LI); 3547 PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); 3548 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); 3549 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlignment()); 3550 Value *NewV = propagateMetadata(V, E->Scalars); 3551 if (!E->ReorderIndices.empty()) { 3552 OrdersType Mask; 3553 inversePermutation(E->ReorderIndices, Mask); 3554 NewV = Builder.CreateShuffleVector(NewV, UndefValue::get(VecTy), Mask, 3555 "reorder_shuffle"); 3556 } 3557 if (NeedToShuffleReuses) { 3558 // TODO: Merge this shuffle with the ReorderShuffleMask. 3559 NewV = Builder.CreateShuffleVector( 3560 NewV, UndefValue::get(VecTy), E->ReuseShuffleIndices, "shuffle"); 3561 } 3562 E->VectorizedValue = NewV; 3563 return NewV; 3564 } 3565 setInsertPointAfterBundle(E->Scalars, S); 3566 auto *V = Gather(E->Scalars, VecTy); 3567 if (NeedToShuffleReuses) { 3568 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3569 E->ReuseShuffleIndices, "shuffle"); 3570 if (auto *I = dyn_cast<Instruction>(V)) { 3571 GatherSeq.insert(I); 3572 CSEBlocks.insert(I->getParent()); 3573 } 3574 } 3575 E->VectorizedValue = V; 3576 return V; 3577 } 3578 case Instruction::ZExt: 3579 case Instruction::SExt: 3580 case Instruction::FPToUI: 3581 case Instruction::FPToSI: 3582 case Instruction::FPExt: 3583 case Instruction::PtrToInt: 3584 case Instruction::IntToPtr: 3585 case Instruction::SIToFP: 3586 case Instruction::UIToFP: 3587 case Instruction::Trunc: 3588 case Instruction::FPTrunc: 3589 case Instruction::BitCast: { 3590 setInsertPointAfterBundle(E->Scalars, S); 3591 3592 Value *InVec = vectorizeTree(E->getOperand(0)); 3593 3594 if (E->VectorizedValue) { 3595 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 3596 return E->VectorizedValue; 3597 } 3598 3599 CastInst *CI = dyn_cast<CastInst>(VL0); 3600 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 3601 if (NeedToShuffleReuses) { 3602 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3603 E->ReuseShuffleIndices, "shuffle"); 3604 } 3605 E->VectorizedValue = V; 3606 ++NumVectorInstructions; 3607 return V; 3608 } 3609 case Instruction::FCmp: 3610 case Instruction::ICmp: { 3611 setInsertPointAfterBundle(E->Scalars, S); 3612 3613 Value *L = vectorizeTree(E->getOperand(0)); 3614 Value *R = vectorizeTree(E->getOperand(1)); 3615 3616 if (E->VectorizedValue) { 3617 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 3618 return E->VectorizedValue; 3619 } 3620 3621 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 3622 Value *V; 3623 if (S.getOpcode() == Instruction::FCmp) 3624 V = Builder.CreateFCmp(P0, L, R); 3625 else 3626 V = Builder.CreateICmp(P0, L, R); 3627 3628 propagateIRFlags(V, E->Scalars, VL0); 3629 if (NeedToShuffleReuses) { 3630 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3631 E->ReuseShuffleIndices, "shuffle"); 3632 } 3633 E->VectorizedValue = V; 3634 ++NumVectorInstructions; 3635 return V; 3636 } 3637 case Instruction::Select: { 3638 setInsertPointAfterBundle(E->Scalars, S); 3639 3640 Value *Cond = vectorizeTree(E->getOperand(0)); 3641 Value *True = vectorizeTree(E->getOperand(1)); 3642 Value *False = vectorizeTree(E->getOperand(2)); 3643 3644 if (E->VectorizedValue) { 3645 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 3646 return E->VectorizedValue; 3647 } 3648 3649 Value *V = Builder.CreateSelect(Cond, True, False); 3650 if (NeedToShuffleReuses) { 3651 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3652 E->ReuseShuffleIndices, "shuffle"); 3653 } 3654 E->VectorizedValue = V; 3655 ++NumVectorInstructions; 3656 return V; 3657 } 3658 case Instruction::Add: 3659 case Instruction::FAdd: 3660 case Instruction::Sub: 3661 case Instruction::FSub: 3662 case Instruction::Mul: 3663 case Instruction::FMul: 3664 case Instruction::UDiv: 3665 case Instruction::SDiv: 3666 case Instruction::FDiv: 3667 case Instruction::URem: 3668 case Instruction::SRem: 3669 case Instruction::FRem: 3670 case Instruction::Shl: 3671 case Instruction::LShr: 3672 case Instruction::AShr: 3673 case Instruction::And: 3674 case Instruction::Or: 3675 case Instruction::Xor: { 3676 setInsertPointAfterBundle(E->Scalars, S); 3677 3678 Value *LHS = vectorizeTree(E->getOperand(0)); 3679 Value *RHS = vectorizeTree(E->getOperand(1)); 3680 3681 if (E->VectorizedValue) { 3682 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 3683 return E->VectorizedValue; 3684 } 3685 3686 Value *V = Builder.CreateBinOp( 3687 static_cast<Instruction::BinaryOps>(S.getOpcode()), LHS, RHS); 3688 propagateIRFlags(V, E->Scalars, VL0); 3689 if (auto *I = dyn_cast<Instruction>(V)) 3690 V = propagateMetadata(I, E->Scalars); 3691 3692 if (NeedToShuffleReuses) { 3693 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3694 E->ReuseShuffleIndices, "shuffle"); 3695 } 3696 E->VectorizedValue = V; 3697 ++NumVectorInstructions; 3698 3699 return V; 3700 } 3701 case Instruction::Load: { 3702 // Loads are inserted at the head of the tree because we don't want to 3703 // sink them all the way down past store instructions. 3704 bool IsReorder = !E->ReorderIndices.empty(); 3705 if (IsReorder) { 3706 S = getSameOpcode(E->Scalars, E->ReorderIndices.front()); 3707 VL0 = cast<Instruction>(S.OpValue); 3708 } 3709 setInsertPointAfterBundle(E->Scalars, S); 3710 3711 LoadInst *LI = cast<LoadInst>(VL0); 3712 Type *ScalarLoadTy = LI->getType(); 3713 unsigned AS = LI->getPointerAddressSpace(); 3714 3715 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 3716 VecTy->getPointerTo(AS)); 3717 3718 // The pointer operand uses an in-tree scalar so we add the new BitCast to 3719 // ExternalUses list to make sure that an extract will be generated in the 3720 // future. 3721 Value *PO = LI->getPointerOperand(); 3722 if (getTreeEntry(PO)) 3723 ExternalUses.push_back(ExternalUser(PO, cast<User>(VecPtr), 0)); 3724 3725 unsigned Alignment = LI->getAlignment(); 3726 LI = Builder.CreateLoad(VecTy, VecPtr); 3727 if (!Alignment) { 3728 Alignment = DL->getABITypeAlignment(ScalarLoadTy); 3729 } 3730 LI->setAlignment(Alignment); 3731 Value *V = propagateMetadata(LI, E->Scalars); 3732 if (IsReorder) { 3733 OrdersType Mask; 3734 inversePermutation(E->ReorderIndices, Mask); 3735 V = Builder.CreateShuffleVector(V, UndefValue::get(V->getType()), 3736 Mask, "reorder_shuffle"); 3737 } 3738 if (NeedToShuffleReuses) { 3739 // TODO: Merge this shuffle with the ReorderShuffleMask. 3740 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3741 E->ReuseShuffleIndices, "shuffle"); 3742 } 3743 E->VectorizedValue = V; 3744 ++NumVectorInstructions; 3745 return V; 3746 } 3747 case Instruction::Store: { 3748 StoreInst *SI = cast<StoreInst>(VL0); 3749 unsigned Alignment = SI->getAlignment(); 3750 unsigned AS = SI->getPointerAddressSpace(); 3751 3752 setInsertPointAfterBundle(E->Scalars, S); 3753 3754 Value *VecValue = vectorizeTree(E->getOperand(0)); 3755 Value *ScalarPtr = SI->getPointerOperand(); 3756 Value *VecPtr = Builder.CreateBitCast(ScalarPtr, VecTy->getPointerTo(AS)); 3757 StoreInst *ST = Builder.CreateStore(VecValue, VecPtr); 3758 3759 // The pointer operand uses an in-tree scalar, so add the new BitCast to 3760 // ExternalUses to make sure that an extract will be generated in the 3761 // future. 3762 if (getTreeEntry(ScalarPtr)) 3763 ExternalUses.push_back(ExternalUser(ScalarPtr, cast<User>(VecPtr), 0)); 3764 3765 if (!Alignment) 3766 Alignment = DL->getABITypeAlignment(SI->getValueOperand()->getType()); 3767 3768 ST->setAlignment(Alignment); 3769 Value *V = propagateMetadata(ST, E->Scalars); 3770 if (NeedToShuffleReuses) { 3771 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3772 E->ReuseShuffleIndices, "shuffle"); 3773 } 3774 E->VectorizedValue = V; 3775 ++NumVectorInstructions; 3776 return V; 3777 } 3778 case Instruction::GetElementPtr: { 3779 setInsertPointAfterBundle(E->Scalars, S); 3780 3781 Value *Op0 = vectorizeTree(E->getOperand(0)); 3782 3783 std::vector<Value *> OpVecs; 3784 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 3785 ++j) { 3786 Value *OpVec = vectorizeTree(E->getOperand(j)); 3787 OpVecs.push_back(OpVec); 3788 } 3789 3790 Value *V = Builder.CreateGEP( 3791 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs); 3792 if (Instruction *I = dyn_cast<Instruction>(V)) 3793 V = propagateMetadata(I, E->Scalars); 3794 3795 if (NeedToShuffleReuses) { 3796 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3797 E->ReuseShuffleIndices, "shuffle"); 3798 } 3799 E->VectorizedValue = V; 3800 ++NumVectorInstructions; 3801 3802 return V; 3803 } 3804 case Instruction::Call: { 3805 CallInst *CI = cast<CallInst>(VL0); 3806 setInsertPointAfterBundle(E->Scalars, S); 3807 Function *FI; 3808 Intrinsic::ID IID = Intrinsic::not_intrinsic; 3809 Value *ScalarArg = nullptr; 3810 if (CI && (FI = CI->getCalledFunction())) { 3811 IID = FI->getIntrinsicID(); 3812 } 3813 std::vector<Value *> OpVecs; 3814 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 3815 ValueList OpVL; 3816 // Some intrinsics have scalar arguments. This argument should not be 3817 // vectorized. 3818 if (hasVectorInstrinsicScalarOpd(IID, j)) { 3819 CallInst *CEI = cast<CallInst>(VL0); 3820 ScalarArg = CEI->getArgOperand(j); 3821 OpVecs.push_back(CEI->getArgOperand(j)); 3822 continue; 3823 } 3824 3825 Value *OpVec = vectorizeTree(E->getOperand(j)); 3826 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 3827 OpVecs.push_back(OpVec); 3828 } 3829 3830 Module *M = F->getParent(); 3831 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3832 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) }; 3833 Function *CF = Intrinsic::getDeclaration(M, ID, Tys); 3834 SmallVector<OperandBundleDef, 1> OpBundles; 3835 CI->getOperandBundlesAsDefs(OpBundles); 3836 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 3837 3838 // The scalar argument uses an in-tree scalar so we add the new vectorized 3839 // call to ExternalUses list to make sure that an extract will be 3840 // generated in the future. 3841 if (ScalarArg && getTreeEntry(ScalarArg)) 3842 ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0)); 3843 3844 propagateIRFlags(V, E->Scalars, VL0); 3845 if (NeedToShuffleReuses) { 3846 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3847 E->ReuseShuffleIndices, "shuffle"); 3848 } 3849 E->VectorizedValue = V; 3850 ++NumVectorInstructions; 3851 return V; 3852 } 3853 case Instruction::ShuffleVector: { 3854 assert(S.isAltShuffle() && 3855 ((Instruction::isBinaryOp(S.getOpcode()) && 3856 Instruction::isBinaryOp(S.getAltOpcode())) || 3857 (Instruction::isCast(S.getOpcode()) && 3858 Instruction::isCast(S.getAltOpcode()))) && 3859 "Invalid Shuffle Vector Operand"); 3860 3861 Value *LHS, *RHS; 3862 if (Instruction::isBinaryOp(S.getOpcode())) { 3863 setInsertPointAfterBundle(E->Scalars, S); 3864 LHS = vectorizeTree(E->getOperand(0)); 3865 RHS = vectorizeTree(E->getOperand(1)); 3866 } else { 3867 setInsertPointAfterBundle(E->Scalars, S); 3868 LHS = vectorizeTree(E->getOperand(0)); 3869 } 3870 3871 if (E->VectorizedValue) { 3872 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 3873 return E->VectorizedValue; 3874 } 3875 3876 Value *V0, *V1; 3877 if (Instruction::isBinaryOp(S.getOpcode())) { 3878 V0 = Builder.CreateBinOp( 3879 static_cast<Instruction::BinaryOps>(S.getOpcode()), LHS, RHS); 3880 V1 = Builder.CreateBinOp( 3881 static_cast<Instruction::BinaryOps>(S.getAltOpcode()), LHS, RHS); 3882 } else { 3883 V0 = Builder.CreateCast( 3884 static_cast<Instruction::CastOps>(S.getOpcode()), LHS, VecTy); 3885 V1 = Builder.CreateCast( 3886 static_cast<Instruction::CastOps>(S.getAltOpcode()), LHS, VecTy); 3887 } 3888 3889 // Create shuffle to take alternate operations from the vector. 3890 // Also, gather up main and alt scalar ops to propagate IR flags to 3891 // each vector operation. 3892 ValueList OpScalars, AltScalars; 3893 unsigned e = E->Scalars.size(); 3894 SmallVector<Constant *, 8> Mask(e); 3895 for (unsigned i = 0; i < e; ++i) { 3896 auto *OpInst = cast<Instruction>(E->Scalars[i]); 3897 assert(S.isOpcodeOrAlt(OpInst) && "Unexpected main/alternate opcode"); 3898 if (OpInst->getOpcode() == S.getAltOpcode()) { 3899 Mask[i] = Builder.getInt32(e + i); 3900 AltScalars.push_back(E->Scalars[i]); 3901 } else { 3902 Mask[i] = Builder.getInt32(i); 3903 OpScalars.push_back(E->Scalars[i]); 3904 } 3905 } 3906 3907 Value *ShuffleMask = ConstantVector::get(Mask); 3908 propagateIRFlags(V0, OpScalars); 3909 propagateIRFlags(V1, AltScalars); 3910 3911 Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 3912 if (Instruction *I = dyn_cast<Instruction>(V)) 3913 V = propagateMetadata(I, E->Scalars); 3914 if (NeedToShuffleReuses) { 3915 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 3916 E->ReuseShuffleIndices, "shuffle"); 3917 } 3918 E->VectorizedValue = V; 3919 ++NumVectorInstructions; 3920 3921 return V; 3922 } 3923 default: 3924 llvm_unreachable("unknown inst"); 3925 } 3926 return nullptr; 3927 } 3928 3929 Value *BoUpSLP::vectorizeTree() { 3930 ExtraValueToDebugLocsMap ExternallyUsedValues; 3931 return vectorizeTree(ExternallyUsedValues); 3932 } 3933 3934 Value * 3935 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { 3936 // All blocks must be scheduled before any instructions are inserted. 3937 for (auto &BSIter : BlocksSchedules) { 3938 scheduleBlock(BSIter.second.get()); 3939 } 3940 3941 Builder.SetInsertPoint(&F->getEntryBlock().front()); 3942 auto *VectorRoot = vectorizeTree(&VectorizableTree[0]); 3943 3944 // If the vectorized tree can be rewritten in a smaller type, we truncate the 3945 // vectorized root. InstCombine will then rewrite the entire expression. We 3946 // sign extend the extracted values below. 3947 auto *ScalarRoot = VectorizableTree[0].Scalars[0]; 3948 if (MinBWs.count(ScalarRoot)) { 3949 if (auto *I = dyn_cast<Instruction>(VectorRoot)) 3950 Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); 3951 auto BundleWidth = VectorizableTree[0].Scalars.size(); 3952 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 3953 auto *VecTy = VectorType::get(MinTy, BundleWidth); 3954 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); 3955 VectorizableTree[0].VectorizedValue = Trunc; 3956 } 3957 3958 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() 3959 << " values .\n"); 3960 3961 // If necessary, sign-extend or zero-extend ScalarRoot to the larger type 3962 // specified by ScalarType. 3963 auto extend = [&](Value *ScalarRoot, Value *Ex, Type *ScalarType) { 3964 if (!MinBWs.count(ScalarRoot)) 3965 return Ex; 3966 if (MinBWs[ScalarRoot].second) 3967 return Builder.CreateSExt(Ex, ScalarType); 3968 return Builder.CreateZExt(Ex, ScalarType); 3969 }; 3970 3971 // Extract all of the elements with the external uses. 3972 for (const auto &ExternalUse : ExternalUses) { 3973 Value *Scalar = ExternalUse.Scalar; 3974 llvm::User *User = ExternalUse.User; 3975 3976 // Skip users that we already RAUW. This happens when one instruction 3977 // has multiple uses of the same value. 3978 if (User && !is_contained(Scalar->users(), User)) 3979 continue; 3980 TreeEntry *E = getTreeEntry(Scalar); 3981 assert(E && "Invalid scalar"); 3982 assert(!E->NeedToGather && "Extracting from a gather list"); 3983 3984 Value *Vec = E->VectorizedValue; 3985 assert(Vec && "Can't find vectorizable value"); 3986 3987 Value *Lane = Builder.getInt32(ExternalUse.Lane); 3988 // If User == nullptr, the Scalar is used as extra arg. Generate 3989 // ExtractElement instruction and update the record for this scalar in 3990 // ExternallyUsedValues. 3991 if (!User) { 3992 assert(ExternallyUsedValues.count(Scalar) && 3993 "Scalar with nullptr as an external user must be registered in " 3994 "ExternallyUsedValues map"); 3995 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 3996 Builder.SetInsertPoint(VecI->getParent(), 3997 std::next(VecI->getIterator())); 3998 } else { 3999 Builder.SetInsertPoint(&F->getEntryBlock().front()); 4000 } 4001 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 4002 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 4003 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent()); 4004 auto &Locs = ExternallyUsedValues[Scalar]; 4005 ExternallyUsedValues.insert({Ex, Locs}); 4006 ExternallyUsedValues.erase(Scalar); 4007 // Required to update internally referenced instructions. 4008 Scalar->replaceAllUsesWith(Ex); 4009 continue; 4010 } 4011 4012 // Generate extracts for out-of-tree users. 4013 // Find the insertion point for the extractelement lane. 4014 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 4015 if (PHINode *PH = dyn_cast<PHINode>(User)) { 4016 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 4017 if (PH->getIncomingValue(i) == Scalar) { 4018 Instruction *IncomingTerminator = 4019 PH->getIncomingBlock(i)->getTerminator(); 4020 if (isa<CatchSwitchInst>(IncomingTerminator)) { 4021 Builder.SetInsertPoint(VecI->getParent(), 4022 std::next(VecI->getIterator())); 4023 } else { 4024 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 4025 } 4026 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 4027 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 4028 CSEBlocks.insert(PH->getIncomingBlock(i)); 4029 PH->setOperand(i, Ex); 4030 } 4031 } 4032 } else { 4033 Builder.SetInsertPoint(cast<Instruction>(User)); 4034 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 4035 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 4036 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 4037 User->replaceUsesOfWith(Scalar, Ex); 4038 } 4039 } else { 4040 Builder.SetInsertPoint(&F->getEntryBlock().front()); 4041 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 4042 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 4043 CSEBlocks.insert(&F->getEntryBlock()); 4044 User->replaceUsesOfWith(Scalar, Ex); 4045 } 4046 4047 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 4048 } 4049 4050 // For each vectorized value: 4051 for (TreeEntry &EIdx : VectorizableTree) { 4052 TreeEntry *Entry = &EIdx; 4053 4054 // No need to handle users of gathered values. 4055 if (Entry->NeedToGather) 4056 continue; 4057 4058 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 4059 4060 // For each lane: 4061 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 4062 Value *Scalar = Entry->Scalars[Lane]; 4063 4064 Type *Ty = Scalar->getType(); 4065 if (!Ty->isVoidTy()) { 4066 #ifndef NDEBUG 4067 for (User *U : Scalar->users()) { 4068 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 4069 4070 // It is legal to replace users in the ignorelist by undef. 4071 assert((getTreeEntry(U) || is_contained(UserIgnoreList, U)) && 4072 "Replacing out-of-tree value with undef"); 4073 } 4074 #endif 4075 Value *Undef = UndefValue::get(Ty); 4076 Scalar->replaceAllUsesWith(Undef); 4077 } 4078 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 4079 eraseInstruction(cast<Instruction>(Scalar)); 4080 } 4081 } 4082 4083 Builder.ClearInsertionPoint(); 4084 4085 return VectorizableTree[0].VectorizedValue; 4086 } 4087 4088 void BoUpSLP::optimizeGatherSequence() { 4089 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 4090 << " gather sequences instructions.\n"); 4091 // LICM InsertElementInst sequences. 4092 for (Instruction *I : GatherSeq) { 4093 if (!isa<InsertElementInst>(I) && !isa<ShuffleVectorInst>(I)) 4094 continue; 4095 4096 // Check if this block is inside a loop. 4097 Loop *L = LI->getLoopFor(I->getParent()); 4098 if (!L) 4099 continue; 4100 4101 // Check if it has a preheader. 4102 BasicBlock *PreHeader = L->getLoopPreheader(); 4103 if (!PreHeader) 4104 continue; 4105 4106 // If the vector or the element that we insert into it are 4107 // instructions that are defined in this basic block then we can't 4108 // hoist this instruction. 4109 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 4110 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 4111 if (Op0 && L->contains(Op0)) 4112 continue; 4113 if (Op1 && L->contains(Op1)) 4114 continue; 4115 4116 // We can hoist this instruction. Move it to the pre-header. 4117 I->moveBefore(PreHeader->getTerminator()); 4118 } 4119 4120 // Make a list of all reachable blocks in our CSE queue. 4121 SmallVector<const DomTreeNode *, 8> CSEWorkList; 4122 CSEWorkList.reserve(CSEBlocks.size()); 4123 for (BasicBlock *BB : CSEBlocks) 4124 if (DomTreeNode *N = DT->getNode(BB)) { 4125 assert(DT->isReachableFromEntry(N)); 4126 CSEWorkList.push_back(N); 4127 } 4128 4129 // Sort blocks by domination. This ensures we visit a block after all blocks 4130 // dominating it are visited. 4131 llvm::stable_sort(CSEWorkList, 4132 [this](const DomTreeNode *A, const DomTreeNode *B) { 4133 return DT->properlyDominates(A, B); 4134 }); 4135 4136 // Perform O(N^2) search over the gather sequences and merge identical 4137 // instructions. TODO: We can further optimize this scan if we split the 4138 // instructions into different buckets based on the insert lane. 4139 SmallVector<Instruction *, 16> Visited; 4140 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 4141 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 4142 "Worklist not sorted properly!"); 4143 BasicBlock *BB = (*I)->getBlock(); 4144 // For all instructions in blocks containing gather sequences: 4145 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 4146 Instruction *In = &*it++; 4147 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 4148 continue; 4149 4150 // Check if we can replace this instruction with any of the 4151 // visited instructions. 4152 for (Instruction *v : Visited) { 4153 if (In->isIdenticalTo(v) && 4154 DT->dominates(v->getParent(), In->getParent())) { 4155 In->replaceAllUsesWith(v); 4156 eraseInstruction(In); 4157 In = nullptr; 4158 break; 4159 } 4160 } 4161 if (In) { 4162 assert(!is_contained(Visited, In)); 4163 Visited.push_back(In); 4164 } 4165 } 4166 } 4167 CSEBlocks.clear(); 4168 GatherSeq.clear(); 4169 } 4170 4171 // Groups the instructions to a bundle (which is then a single scheduling entity) 4172 // and schedules instructions until the bundle gets ready. 4173 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, 4174 BoUpSLP *SLP, 4175 const InstructionsState &S) { 4176 if (isa<PHINode>(S.OpValue)) 4177 return true; 4178 4179 // Initialize the instruction bundle. 4180 Instruction *OldScheduleEnd = ScheduleEnd; 4181 ScheduleData *PrevInBundle = nullptr; 4182 ScheduleData *Bundle = nullptr; 4183 bool ReSchedule = false; 4184 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n"); 4185 4186 // Make sure that the scheduling region contains all 4187 // instructions of the bundle. 4188 for (Value *V : VL) { 4189 if (!extendSchedulingRegion(V, S)) 4190 return false; 4191 } 4192 4193 for (Value *V : VL) { 4194 ScheduleData *BundleMember = getScheduleData(V); 4195 assert(BundleMember && 4196 "no ScheduleData for bundle member (maybe not in same basic block)"); 4197 if (BundleMember->IsScheduled) { 4198 // A bundle member was scheduled as single instruction before and now 4199 // needs to be scheduled as part of the bundle. We just get rid of the 4200 // existing schedule. 4201 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 4202 << " was already scheduled\n"); 4203 ReSchedule = true; 4204 } 4205 assert(BundleMember->isSchedulingEntity() && 4206 "bundle member already part of other bundle"); 4207 if (PrevInBundle) { 4208 PrevInBundle->NextInBundle = BundleMember; 4209 } else { 4210 Bundle = BundleMember; 4211 } 4212 BundleMember->UnscheduledDepsInBundle = 0; 4213 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 4214 4215 // Group the instructions to a bundle. 4216 BundleMember->FirstInBundle = Bundle; 4217 PrevInBundle = BundleMember; 4218 } 4219 if (ScheduleEnd != OldScheduleEnd) { 4220 // The scheduling region got new instructions at the lower end (or it is a 4221 // new region for the first bundle). This makes it necessary to 4222 // recalculate all dependencies. 4223 // It is seldom that this needs to be done a second time after adding the 4224 // initial bundle to the region. 4225 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 4226 doForAllOpcodes(I, [](ScheduleData *SD) { 4227 SD->clearDependencies(); 4228 }); 4229 } 4230 ReSchedule = true; 4231 } 4232 if (ReSchedule) { 4233 resetSchedule(); 4234 initialFillReadyList(ReadyInsts); 4235 } 4236 4237 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " 4238 << BB->getName() << "\n"); 4239 4240 calculateDependencies(Bundle, true, SLP); 4241 4242 // Now try to schedule the new bundle. As soon as the bundle is "ready" it 4243 // means that there are no cyclic dependencies and we can schedule it. 4244 // Note that's important that we don't "schedule" the bundle yet (see 4245 // cancelScheduling). 4246 while (!Bundle->isReady() && !ReadyInsts.empty()) { 4247 4248 ScheduleData *pickedSD = ReadyInsts.back(); 4249 ReadyInsts.pop_back(); 4250 4251 if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) { 4252 schedule(pickedSD, ReadyInsts); 4253 } 4254 } 4255 if (!Bundle->isReady()) { 4256 cancelScheduling(VL, S.OpValue); 4257 return false; 4258 } 4259 return true; 4260 } 4261 4262 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 4263 Value *OpValue) { 4264 if (isa<PHINode>(OpValue)) 4265 return; 4266 4267 ScheduleData *Bundle = getScheduleData(OpValue); 4268 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 4269 assert(!Bundle->IsScheduled && 4270 "Can't cancel bundle which is already scheduled"); 4271 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 4272 "tried to unbundle something which is not a bundle"); 4273 4274 // Un-bundle: make single instructions out of the bundle. 4275 ScheduleData *BundleMember = Bundle; 4276 while (BundleMember) { 4277 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 4278 BundleMember->FirstInBundle = BundleMember; 4279 ScheduleData *Next = BundleMember->NextInBundle; 4280 BundleMember->NextInBundle = nullptr; 4281 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 4282 if (BundleMember->UnscheduledDepsInBundle == 0) { 4283 ReadyInsts.insert(BundleMember); 4284 } 4285 BundleMember = Next; 4286 } 4287 } 4288 4289 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 4290 // Allocate a new ScheduleData for the instruction. 4291 if (ChunkPos >= ChunkSize) { 4292 ScheduleDataChunks.push_back(llvm::make_unique<ScheduleData[]>(ChunkSize)); 4293 ChunkPos = 0; 4294 } 4295 return &(ScheduleDataChunks.back()[ChunkPos++]); 4296 } 4297 4298 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 4299 const InstructionsState &S) { 4300 if (getScheduleData(V, isOneOf(S, V))) 4301 return true; 4302 Instruction *I = dyn_cast<Instruction>(V); 4303 assert(I && "bundle member must be an instruction"); 4304 assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled"); 4305 auto &&CheckSheduleForI = [this, &S](Instruction *I) -> bool { 4306 ScheduleData *ISD = getScheduleData(I); 4307 if (!ISD) 4308 return false; 4309 assert(isInSchedulingRegion(ISD) && 4310 "ScheduleData not in scheduling region"); 4311 ScheduleData *SD = allocateScheduleDataChunks(); 4312 SD->Inst = I; 4313 SD->init(SchedulingRegionID, S.OpValue); 4314 ExtraScheduleDataMap[I][S.OpValue] = SD; 4315 return true; 4316 }; 4317 if (CheckSheduleForI(I)) 4318 return true; 4319 if (!ScheduleStart) { 4320 // It's the first instruction in the new region. 4321 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 4322 ScheduleStart = I; 4323 ScheduleEnd = I->getNextNode(); 4324 if (isOneOf(S, I) != I) 4325 CheckSheduleForI(I); 4326 assert(ScheduleEnd && "tried to vectorize a terminator?"); 4327 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 4328 return true; 4329 } 4330 // Search up and down at the same time, because we don't know if the new 4331 // instruction is above or below the existing scheduling region. 4332 BasicBlock::reverse_iterator UpIter = 4333 ++ScheduleStart->getIterator().getReverse(); 4334 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 4335 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 4336 BasicBlock::iterator LowerEnd = BB->end(); 4337 while (true) { 4338 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 4339 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 4340 return false; 4341 } 4342 4343 if (UpIter != UpperEnd) { 4344 if (&*UpIter == I) { 4345 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 4346 ScheduleStart = I; 4347 if (isOneOf(S, I) != I) 4348 CheckSheduleForI(I); 4349 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I 4350 << "\n"); 4351 return true; 4352 } 4353 ++UpIter; 4354 } 4355 if (DownIter != LowerEnd) { 4356 if (&*DownIter == I) { 4357 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 4358 nullptr); 4359 ScheduleEnd = I->getNextNode(); 4360 if (isOneOf(S, I) != I) 4361 CheckSheduleForI(I); 4362 assert(ScheduleEnd && "tried to vectorize a terminator?"); 4363 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I 4364 << "\n"); 4365 return true; 4366 } 4367 ++DownIter; 4368 } 4369 assert((UpIter != UpperEnd || DownIter != LowerEnd) && 4370 "instruction not found in block"); 4371 } 4372 return true; 4373 } 4374 4375 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 4376 Instruction *ToI, 4377 ScheduleData *PrevLoadStore, 4378 ScheduleData *NextLoadStore) { 4379 ScheduleData *CurrentLoadStore = PrevLoadStore; 4380 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 4381 ScheduleData *SD = ScheduleDataMap[I]; 4382 if (!SD) { 4383 SD = allocateScheduleDataChunks(); 4384 ScheduleDataMap[I] = SD; 4385 SD->Inst = I; 4386 } 4387 assert(!isInSchedulingRegion(SD) && 4388 "new ScheduleData already in scheduling region"); 4389 SD->init(SchedulingRegionID, I); 4390 4391 if (I->mayReadOrWriteMemory() && 4392 (!isa<IntrinsicInst>(I) || 4393 cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect)) { 4394 // Update the linked list of memory accessing instructions. 4395 if (CurrentLoadStore) { 4396 CurrentLoadStore->NextLoadStore = SD; 4397 } else { 4398 FirstLoadStoreInRegion = SD; 4399 } 4400 CurrentLoadStore = SD; 4401 } 4402 } 4403 if (NextLoadStore) { 4404 if (CurrentLoadStore) 4405 CurrentLoadStore->NextLoadStore = NextLoadStore; 4406 } else { 4407 LastLoadStoreInRegion = CurrentLoadStore; 4408 } 4409 } 4410 4411 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 4412 bool InsertInReadyList, 4413 BoUpSLP *SLP) { 4414 assert(SD->isSchedulingEntity()); 4415 4416 SmallVector<ScheduleData *, 10> WorkList; 4417 WorkList.push_back(SD); 4418 4419 while (!WorkList.empty()) { 4420 ScheduleData *SD = WorkList.back(); 4421 WorkList.pop_back(); 4422 4423 ScheduleData *BundleMember = SD; 4424 while (BundleMember) { 4425 assert(isInSchedulingRegion(BundleMember)); 4426 if (!BundleMember->hasValidDependencies()) { 4427 4428 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember 4429 << "\n"); 4430 BundleMember->Dependencies = 0; 4431 BundleMember->resetUnscheduledDeps(); 4432 4433 // Handle def-use chain dependencies. 4434 if (BundleMember->OpValue != BundleMember->Inst) { 4435 ScheduleData *UseSD = getScheduleData(BundleMember->Inst); 4436 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 4437 BundleMember->Dependencies++; 4438 ScheduleData *DestBundle = UseSD->FirstInBundle; 4439 if (!DestBundle->IsScheduled) 4440 BundleMember->incrementUnscheduledDeps(1); 4441 if (!DestBundle->hasValidDependencies()) 4442 WorkList.push_back(DestBundle); 4443 } 4444 } else { 4445 for (User *U : BundleMember->Inst->users()) { 4446 if (isa<Instruction>(U)) { 4447 ScheduleData *UseSD = getScheduleData(U); 4448 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 4449 BundleMember->Dependencies++; 4450 ScheduleData *DestBundle = UseSD->FirstInBundle; 4451 if (!DestBundle->IsScheduled) 4452 BundleMember->incrementUnscheduledDeps(1); 4453 if (!DestBundle->hasValidDependencies()) 4454 WorkList.push_back(DestBundle); 4455 } 4456 } else { 4457 // I'm not sure if this can ever happen. But we need to be safe. 4458 // This lets the instruction/bundle never be scheduled and 4459 // eventually disable vectorization. 4460 BundleMember->Dependencies++; 4461 BundleMember->incrementUnscheduledDeps(1); 4462 } 4463 } 4464 } 4465 4466 // Handle the memory dependencies. 4467 ScheduleData *DepDest = BundleMember->NextLoadStore; 4468 if (DepDest) { 4469 Instruction *SrcInst = BundleMember->Inst; 4470 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); 4471 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 4472 unsigned numAliased = 0; 4473 unsigned DistToSrc = 1; 4474 4475 while (DepDest) { 4476 assert(isInSchedulingRegion(DepDest)); 4477 4478 // We have two limits to reduce the complexity: 4479 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 4480 // SLP->isAliased (which is the expensive part in this loop). 4481 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 4482 // the whole loop (even if the loop is fast, it's quadratic). 4483 // It's important for the loop break condition (see below) to 4484 // check this limit even between two read-only instructions. 4485 if (DistToSrc >= MaxMemDepDistance || 4486 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 4487 (numAliased >= AliasedCheckLimit || 4488 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 4489 4490 // We increment the counter only if the locations are aliased 4491 // (instead of counting all alias checks). This gives a better 4492 // balance between reduced runtime and accurate dependencies. 4493 numAliased++; 4494 4495 DepDest->MemoryDependencies.push_back(BundleMember); 4496 BundleMember->Dependencies++; 4497 ScheduleData *DestBundle = DepDest->FirstInBundle; 4498 if (!DestBundle->IsScheduled) { 4499 BundleMember->incrementUnscheduledDeps(1); 4500 } 4501 if (!DestBundle->hasValidDependencies()) { 4502 WorkList.push_back(DestBundle); 4503 } 4504 } 4505 DepDest = DepDest->NextLoadStore; 4506 4507 // Example, explaining the loop break condition: Let's assume our 4508 // starting instruction is i0 and MaxMemDepDistance = 3. 4509 // 4510 // +--------v--v--v 4511 // i0,i1,i2,i3,i4,i5,i6,i7,i8 4512 // +--------^--^--^ 4513 // 4514 // MaxMemDepDistance let us stop alias-checking at i3 and we add 4515 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 4516 // Previously we already added dependencies from i3 to i6,i7,i8 4517 // (because of MaxMemDepDistance). As we added a dependency from 4518 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 4519 // and we can abort this loop at i6. 4520 if (DistToSrc >= 2 * MaxMemDepDistance) 4521 break; 4522 DistToSrc++; 4523 } 4524 } 4525 } 4526 BundleMember = BundleMember->NextInBundle; 4527 } 4528 if (InsertInReadyList && SD->isReady()) { 4529 ReadyInsts.push_back(SD); 4530 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst 4531 << "\n"); 4532 } 4533 } 4534 } 4535 4536 void BoUpSLP::BlockScheduling::resetSchedule() { 4537 assert(ScheduleStart && 4538 "tried to reset schedule on block which has not been scheduled"); 4539 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 4540 doForAllOpcodes(I, [&](ScheduleData *SD) { 4541 assert(isInSchedulingRegion(SD) && 4542 "ScheduleData not in scheduling region"); 4543 SD->IsScheduled = false; 4544 SD->resetUnscheduledDeps(); 4545 }); 4546 } 4547 ReadyInsts.clear(); 4548 } 4549 4550 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 4551 if (!BS->ScheduleStart) 4552 return; 4553 4554 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 4555 4556 BS->resetSchedule(); 4557 4558 // For the real scheduling we use a more sophisticated ready-list: it is 4559 // sorted by the original instruction location. This lets the final schedule 4560 // be as close as possible to the original instruction order. 4561 struct ScheduleDataCompare { 4562 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 4563 return SD2->SchedulingPriority < SD1->SchedulingPriority; 4564 } 4565 }; 4566 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 4567 4568 // Ensure that all dependency data is updated and fill the ready-list with 4569 // initial instructions. 4570 int Idx = 0; 4571 int NumToSchedule = 0; 4572 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 4573 I = I->getNextNode()) { 4574 BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) { 4575 assert(SD->isPartOfBundle() == 4576 (getTreeEntry(SD->Inst) != nullptr) && 4577 "scheduler and vectorizer bundle mismatch"); 4578 SD->FirstInBundle->SchedulingPriority = Idx++; 4579 if (SD->isSchedulingEntity()) { 4580 BS->calculateDependencies(SD, false, this); 4581 NumToSchedule++; 4582 } 4583 }); 4584 } 4585 BS->initialFillReadyList(ReadyInsts); 4586 4587 Instruction *LastScheduledInst = BS->ScheduleEnd; 4588 4589 // Do the "real" scheduling. 4590 while (!ReadyInsts.empty()) { 4591 ScheduleData *picked = *ReadyInsts.begin(); 4592 ReadyInsts.erase(ReadyInsts.begin()); 4593 4594 // Move the scheduled instruction(s) to their dedicated places, if not 4595 // there yet. 4596 ScheduleData *BundleMember = picked; 4597 while (BundleMember) { 4598 Instruction *pickedInst = BundleMember->Inst; 4599 if (LastScheduledInst->getNextNode() != pickedInst) { 4600 BS->BB->getInstList().remove(pickedInst); 4601 BS->BB->getInstList().insert(LastScheduledInst->getIterator(), 4602 pickedInst); 4603 } 4604 LastScheduledInst = pickedInst; 4605 BundleMember = BundleMember->NextInBundle; 4606 } 4607 4608 BS->schedule(picked, ReadyInsts); 4609 NumToSchedule--; 4610 } 4611 assert(NumToSchedule == 0 && "could not schedule all instructions"); 4612 4613 // Avoid duplicate scheduling of the block. 4614 BS->ScheduleStart = nullptr; 4615 } 4616 4617 unsigned BoUpSLP::getVectorElementSize(Value *V) const { 4618 // If V is a store, just return the width of the stored value without 4619 // traversing the expression tree. This is the common case. 4620 if (auto *Store = dyn_cast<StoreInst>(V)) 4621 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 4622 4623 // If V is not a store, we can traverse the expression tree to find loads 4624 // that feed it. The type of the loaded value may indicate a more suitable 4625 // width than V's type. We want to base the vector element size on the width 4626 // of memory operations where possible. 4627 SmallVector<Instruction *, 16> Worklist; 4628 SmallPtrSet<Instruction *, 16> Visited; 4629 if (auto *I = dyn_cast<Instruction>(V)) 4630 Worklist.push_back(I); 4631 4632 // Traverse the expression tree in bottom-up order looking for loads. If we 4633 // encounter an instruction we don't yet handle, we give up. 4634 auto MaxWidth = 0u; 4635 auto FoundUnknownInst = false; 4636 while (!Worklist.empty() && !FoundUnknownInst) { 4637 auto *I = Worklist.pop_back_val(); 4638 Visited.insert(I); 4639 4640 // We should only be looking at scalar instructions here. If the current 4641 // instruction has a vector type, give up. 4642 auto *Ty = I->getType(); 4643 if (isa<VectorType>(Ty)) 4644 FoundUnknownInst = true; 4645 4646 // If the current instruction is a load, update MaxWidth to reflect the 4647 // width of the loaded value. 4648 else if (isa<LoadInst>(I)) 4649 MaxWidth = std::max<unsigned>(MaxWidth, DL->getTypeSizeInBits(Ty)); 4650 4651 // Otherwise, we need to visit the operands of the instruction. We only 4652 // handle the interesting cases from buildTree here. If an operand is an 4653 // instruction we haven't yet visited, we add it to the worklist. 4654 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 4655 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I)) { 4656 for (Use &U : I->operands()) 4657 if (auto *J = dyn_cast<Instruction>(U.get())) 4658 if (!Visited.count(J)) 4659 Worklist.push_back(J); 4660 } 4661 4662 // If we don't yet handle the instruction, give up. 4663 else 4664 FoundUnknownInst = true; 4665 } 4666 4667 // If we didn't encounter a memory access in the expression tree, or if we 4668 // gave up for some reason, just return the width of V. 4669 if (!MaxWidth || FoundUnknownInst) 4670 return DL->getTypeSizeInBits(V->getType()); 4671 4672 // Otherwise, return the maximum width we found. 4673 return MaxWidth; 4674 } 4675 4676 // Determine if a value V in a vectorizable expression Expr can be demoted to a 4677 // smaller type with a truncation. We collect the values that will be demoted 4678 // in ToDemote and additional roots that require investigating in Roots. 4679 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, 4680 SmallVectorImpl<Value *> &ToDemote, 4681 SmallVectorImpl<Value *> &Roots) { 4682 // We can always demote constants. 4683 if (isa<Constant>(V)) { 4684 ToDemote.push_back(V); 4685 return true; 4686 } 4687 4688 // If the value is not an instruction in the expression with only one use, it 4689 // cannot be demoted. 4690 auto *I = dyn_cast<Instruction>(V); 4691 if (!I || !I->hasOneUse() || !Expr.count(I)) 4692 return false; 4693 4694 switch (I->getOpcode()) { 4695 4696 // We can always demote truncations and extensions. Since truncations can 4697 // seed additional demotion, we save the truncated value. 4698 case Instruction::Trunc: 4699 Roots.push_back(I->getOperand(0)); 4700 break; 4701 case Instruction::ZExt: 4702 case Instruction::SExt: 4703 break; 4704 4705 // We can demote certain binary operations if we can demote both of their 4706 // operands. 4707 case Instruction::Add: 4708 case Instruction::Sub: 4709 case Instruction::Mul: 4710 case Instruction::And: 4711 case Instruction::Or: 4712 case Instruction::Xor: 4713 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || 4714 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) 4715 return false; 4716 break; 4717 4718 // We can demote selects if we can demote their true and false values. 4719 case Instruction::Select: { 4720 SelectInst *SI = cast<SelectInst>(I); 4721 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || 4722 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) 4723 return false; 4724 break; 4725 } 4726 4727 // We can demote phis if we can demote all their incoming operands. Note that 4728 // we don't need to worry about cycles since we ensure single use above. 4729 case Instruction::PHI: { 4730 PHINode *PN = cast<PHINode>(I); 4731 for (Value *IncValue : PN->incoming_values()) 4732 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) 4733 return false; 4734 break; 4735 } 4736 4737 // Otherwise, conservatively give up. 4738 default: 4739 return false; 4740 } 4741 4742 // Record the value that we can demote. 4743 ToDemote.push_back(V); 4744 return true; 4745 } 4746 4747 void BoUpSLP::computeMinimumValueSizes() { 4748 // If there are no external uses, the expression tree must be rooted by a 4749 // store. We can't demote in-memory values, so there is nothing to do here. 4750 if (ExternalUses.empty()) 4751 return; 4752 4753 // We only attempt to truncate integer expressions. 4754 auto &TreeRoot = VectorizableTree[0].Scalars; 4755 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 4756 if (!TreeRootIT) 4757 return; 4758 4759 // If the expression is not rooted by a store, these roots should have 4760 // external uses. We will rely on InstCombine to rewrite the expression in 4761 // the narrower type. However, InstCombine only rewrites single-use values. 4762 // This means that if a tree entry other than a root is used externally, it 4763 // must have multiple uses and InstCombine will not rewrite it. The code 4764 // below ensures that only the roots are used externally. 4765 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); 4766 for (auto &EU : ExternalUses) 4767 if (!Expr.erase(EU.Scalar)) 4768 return; 4769 if (!Expr.empty()) 4770 return; 4771 4772 // Collect the scalar values of the vectorizable expression. We will use this 4773 // context to determine which values can be demoted. If we see a truncation, 4774 // we mark it as seeding another demotion. 4775 for (auto &Entry : VectorizableTree) 4776 Expr.insert(Entry.Scalars.begin(), Entry.Scalars.end()); 4777 4778 // Ensure the roots of the vectorizable tree don't form a cycle. They must 4779 // have a single external user that is not in the vectorizable tree. 4780 for (auto *Root : TreeRoot) 4781 if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) 4782 return; 4783 4784 // Conservatively determine if we can actually truncate the roots of the 4785 // expression. Collect the values that can be demoted in ToDemote and 4786 // additional roots that require investigating in Roots. 4787 SmallVector<Value *, 32> ToDemote; 4788 SmallVector<Value *, 4> Roots; 4789 for (auto *Root : TreeRoot) 4790 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) 4791 return; 4792 4793 // The maximum bit width required to represent all the values that can be 4794 // demoted without loss of precision. It would be safe to truncate the roots 4795 // of the expression to this width. 4796 auto MaxBitWidth = 8u; 4797 4798 // We first check if all the bits of the roots are demanded. If they're not, 4799 // we can truncate the roots to this narrower type. 4800 for (auto *Root : TreeRoot) { 4801 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 4802 MaxBitWidth = std::max<unsigned>( 4803 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); 4804 } 4805 4806 // True if the roots can be zero-extended back to their original type, rather 4807 // than sign-extended. We know that if the leading bits are not demanded, we 4808 // can safely zero-extend. So we initialize IsKnownPositive to True. 4809 bool IsKnownPositive = true; 4810 4811 // If all the bits of the roots are demanded, we can try a little harder to 4812 // compute a narrower type. This can happen, for example, if the roots are 4813 // getelementptr indices. InstCombine promotes these indices to the pointer 4814 // width. Thus, all their bits are technically demanded even though the 4815 // address computation might be vectorized in a smaller type. 4816 // 4817 // We start by looking at each entry that can be demoted. We compute the 4818 // maximum bit width required to store the scalar by using ValueTracking to 4819 // compute the number of high-order bits we can truncate. 4820 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) && 4821 llvm::all_of(TreeRoot, [](Value *R) { 4822 assert(R->hasOneUse() && "Root should have only one use!"); 4823 return isa<GetElementPtrInst>(R->user_back()); 4824 })) { 4825 MaxBitWidth = 8u; 4826 4827 // Determine if the sign bit of all the roots is known to be zero. If not, 4828 // IsKnownPositive is set to False. 4829 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { 4830 KnownBits Known = computeKnownBits(R, *DL); 4831 return Known.isNonNegative(); 4832 }); 4833 4834 // Determine the maximum number of bits required to store the scalar 4835 // values. 4836 for (auto *Scalar : ToDemote) { 4837 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); 4838 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 4839 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 4840 } 4841 4842 // If we can't prove that the sign bit is zero, we must add one to the 4843 // maximum bit width to account for the unknown sign bit. This preserves 4844 // the existing sign bit so we can safely sign-extend the root back to the 4845 // original type. Otherwise, if we know the sign bit is zero, we will 4846 // zero-extend the root instead. 4847 // 4848 // FIXME: This is somewhat suboptimal, as there will be cases where adding 4849 // one to the maximum bit width will yield a larger-than-necessary 4850 // type. In general, we need to add an extra bit only if we can't 4851 // prove that the upper bit of the original type is equal to the 4852 // upper bit of the proposed smaller type. If these two bits are the 4853 // same (either zero or one) we know that sign-extending from the 4854 // smaller type will result in the same value. Here, since we can't 4855 // yet prove this, we are just making the proposed smaller type 4856 // larger to ensure correctness. 4857 if (!IsKnownPositive) 4858 ++MaxBitWidth; 4859 } 4860 4861 // Round MaxBitWidth up to the next power-of-two. 4862 if (!isPowerOf2_64(MaxBitWidth)) 4863 MaxBitWidth = NextPowerOf2(MaxBitWidth); 4864 4865 // If the maximum bit width we compute is less than the with of the roots' 4866 // type, we can proceed with the narrowing. Otherwise, do nothing. 4867 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 4868 return; 4869 4870 // If we can truncate the root, we must collect additional values that might 4871 // be demoted as a result. That is, those seeded by truncations we will 4872 // modify. 4873 while (!Roots.empty()) 4874 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); 4875 4876 // Finally, map the values we can demote to the maximum bit with we computed. 4877 for (auto *Scalar : ToDemote) 4878 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive); 4879 } 4880 4881 namespace { 4882 4883 /// The SLPVectorizer Pass. 4884 struct SLPVectorizer : public FunctionPass { 4885 SLPVectorizerPass Impl; 4886 4887 /// Pass identification, replacement for typeid 4888 static char ID; 4889 4890 explicit SLPVectorizer() : FunctionPass(ID) { 4891 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 4892 } 4893 4894 bool doInitialization(Module &M) override { 4895 return false; 4896 } 4897 4898 bool runOnFunction(Function &F) override { 4899 if (skipFunction(F)) 4900 return false; 4901 4902 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 4903 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 4904 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 4905 auto *TLI = TLIP ? &TLIP->getTLI() : nullptr; 4906 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 4907 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 4908 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 4909 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 4910 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 4911 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 4912 4913 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 4914 } 4915 4916 void getAnalysisUsage(AnalysisUsage &AU) const override { 4917 FunctionPass::getAnalysisUsage(AU); 4918 AU.addRequired<AssumptionCacheTracker>(); 4919 AU.addRequired<ScalarEvolutionWrapperPass>(); 4920 AU.addRequired<AAResultsWrapperPass>(); 4921 AU.addRequired<TargetTransformInfoWrapperPass>(); 4922 AU.addRequired<LoopInfoWrapperPass>(); 4923 AU.addRequired<DominatorTreeWrapperPass>(); 4924 AU.addRequired<DemandedBitsWrapperPass>(); 4925 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 4926 AU.addPreserved<LoopInfoWrapperPass>(); 4927 AU.addPreserved<DominatorTreeWrapperPass>(); 4928 AU.addPreserved<AAResultsWrapperPass>(); 4929 AU.addPreserved<GlobalsAAWrapperPass>(); 4930 AU.setPreservesCFG(); 4931 } 4932 }; 4933 4934 } // end anonymous namespace 4935 4936 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 4937 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 4938 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 4939 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 4940 auto *AA = &AM.getResult<AAManager>(F); 4941 auto *LI = &AM.getResult<LoopAnalysis>(F); 4942 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 4943 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 4944 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 4945 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 4946 4947 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 4948 if (!Changed) 4949 return PreservedAnalyses::all(); 4950 4951 PreservedAnalyses PA; 4952 PA.preserveSet<CFGAnalyses>(); 4953 PA.preserve<AAManager>(); 4954 PA.preserve<GlobalsAA>(); 4955 return PA; 4956 } 4957 4958 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 4959 TargetTransformInfo *TTI_, 4960 TargetLibraryInfo *TLI_, AliasAnalysis *AA_, 4961 LoopInfo *LI_, DominatorTree *DT_, 4962 AssumptionCache *AC_, DemandedBits *DB_, 4963 OptimizationRemarkEmitter *ORE_) { 4964 SE = SE_; 4965 TTI = TTI_; 4966 TLI = TLI_; 4967 AA = AA_; 4968 LI = LI_; 4969 DT = DT_; 4970 AC = AC_; 4971 DB = DB_; 4972 DL = &F.getParent()->getDataLayout(); 4973 4974 Stores.clear(); 4975 GEPs.clear(); 4976 bool Changed = false; 4977 4978 // If the target claims to have no vector registers don't attempt 4979 // vectorization. 4980 if (!TTI->getNumberOfRegisters(true)) 4981 return false; 4982 4983 // Don't vectorize when the attribute NoImplicitFloat is used. 4984 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 4985 return false; 4986 4987 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 4988 4989 // Use the bottom up slp vectorizer to construct chains that start with 4990 // store instructions. 4991 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 4992 4993 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 4994 // delete instructions. 4995 4996 // Scan the blocks in the function in post order. 4997 for (auto BB : post_order(&F.getEntryBlock())) { 4998 collectSeedInstructions(BB); 4999 5000 // Vectorize trees that end at stores. 5001 if (!Stores.empty()) { 5002 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 5003 << " underlying objects.\n"); 5004 Changed |= vectorizeStoreChains(R); 5005 } 5006 5007 // Vectorize trees that end at reductions. 5008 Changed |= vectorizeChainsInBlock(BB, R); 5009 5010 // Vectorize the index computations of getelementptr instructions. This 5011 // is primarily intended to catch gather-like idioms ending at 5012 // non-consecutive loads. 5013 if (!GEPs.empty()) { 5014 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 5015 << " underlying objects.\n"); 5016 Changed |= vectorizeGEPIndices(BB, R); 5017 } 5018 } 5019 5020 if (Changed) { 5021 R.optimizeGatherSequence(); 5022 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 5023 LLVM_DEBUG(verifyFunction(F)); 5024 } 5025 return Changed; 5026 } 5027 5028 /// Check that the Values in the slice in VL array are still existent in 5029 /// the WeakTrackingVH array. 5030 /// Vectorization of part of the VL array may cause later values in the VL array 5031 /// to become invalid. We track when this has happened in the WeakTrackingVH 5032 /// array. 5033 static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, 5034 ArrayRef<WeakTrackingVH> VH, unsigned SliceBegin, 5035 unsigned SliceSize) { 5036 VL = VL.slice(SliceBegin, SliceSize); 5037 VH = VH.slice(SliceBegin, SliceSize); 5038 return !std::equal(VL.begin(), VL.end(), VH.begin()); 5039 } 5040 5041 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 5042 unsigned VecRegSize) { 5043 const unsigned ChainLen = Chain.size(); 5044 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen 5045 << "\n"); 5046 const unsigned Sz = R.getVectorElementSize(Chain[0]); 5047 const unsigned VF = VecRegSize / Sz; 5048 5049 if (!isPowerOf2_32(Sz) || VF < 2) 5050 return false; 5051 5052 // Keep track of values that were deleted by vectorizing in the loop below. 5053 const SmallVector<WeakTrackingVH, 8> TrackValues(Chain.begin(), Chain.end()); 5054 5055 bool Changed = false; 5056 // Look for profitable vectorizable trees at all offsets, starting at zero. 5057 for (unsigned i = 0, e = ChainLen; i + VF <= e; ++i) { 5058 5059 // Check that a previous iteration of this loop did not delete the Value. 5060 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF)) 5061 continue; 5062 5063 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i 5064 << "\n"); 5065 ArrayRef<Value *> Operands = Chain.slice(i, VF); 5066 5067 R.buildTree(Operands); 5068 if (R.isTreeTinyAndNotFullyVectorizable()) 5069 continue; 5070 5071 R.computeMinimumValueSizes(); 5072 5073 int Cost = R.getTreeCost(); 5074 5075 LLVM_DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF 5076 << "\n"); 5077 if (Cost < -SLPCostThreshold) { 5078 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 5079 5080 using namespace ore; 5081 5082 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 5083 cast<StoreInst>(Chain[i])) 5084 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 5085 << " and with tree size " 5086 << NV("TreeSize", R.getTreeSize())); 5087 5088 R.vectorizeTree(); 5089 5090 // Move to the next bundle. 5091 i += VF - 1; 5092 Changed = true; 5093 } 5094 } 5095 5096 return Changed; 5097 } 5098 5099 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 5100 BoUpSLP &R) { 5101 SetVector<StoreInst *> Heads; 5102 SmallDenseSet<StoreInst *> Tails; 5103 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; 5104 5105 // We may run into multiple chains that merge into a single chain. We mark the 5106 // stores that we vectorized so that we don't visit the same store twice. 5107 BoUpSLP::ValueSet VectorizedStores; 5108 bool Changed = false; 5109 5110 auto &&FindConsecutiveAccess = 5111 [this, &Stores, &Heads, &Tails, &ConsecutiveChain] (int K, int Idx) { 5112 if (!isConsecutiveAccess(Stores[K], Stores[Idx], *DL, *SE)) 5113 return false; 5114 5115 Tails.insert(Stores[Idx]); 5116 Heads.insert(Stores[K]); 5117 ConsecutiveChain[Stores[K]] = Stores[Idx]; 5118 return true; 5119 }; 5120 5121 // Do a quadratic search on all of the given stores in reverse order and find 5122 // all of the pairs of stores that follow each other. 5123 int E = Stores.size(); 5124 for (int Idx = E - 1; Idx >= 0; --Idx) { 5125 // If a store has multiple consecutive store candidates, search according 5126 // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ... 5127 // This is because usually pairing with immediate succeeding or preceding 5128 // candidate create the best chance to find slp vectorization opportunity. 5129 for (int Offset = 1, F = std::max(E - Idx, Idx + 1); Offset < F; ++Offset) 5130 if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) || 5131 (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx))) 5132 break; 5133 } 5134 5135 // For stores that start but don't end a link in the chain: 5136 for (auto *SI : llvm::reverse(Heads)) { 5137 if (Tails.count(SI)) 5138 continue; 5139 5140 // We found a store instr that starts a chain. Now follow the chain and try 5141 // to vectorize it. 5142 BoUpSLP::ValueList Operands; 5143 StoreInst *I = SI; 5144 // Collect the chain into a list. 5145 while ((Tails.count(I) || Heads.count(I)) && !VectorizedStores.count(I)) { 5146 Operands.push_back(I); 5147 // Move to the next value in the chain. 5148 I = ConsecutiveChain[I]; 5149 } 5150 5151 // FIXME: Is division-by-2 the correct step? Should we assert that the 5152 // register size is a power-of-2? 5153 for (unsigned Size = R.getMaxVecRegSize(); Size >= R.getMinVecRegSize(); 5154 Size /= 2) { 5155 if (vectorizeStoreChain(Operands, R, Size)) { 5156 // Mark the vectorized stores so that we don't vectorize them again. 5157 VectorizedStores.insert(Operands.begin(), Operands.end()); 5158 Changed = true; 5159 break; 5160 } 5161 } 5162 } 5163 5164 return Changed; 5165 } 5166 5167 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 5168 // Initialize the collections. We will make a single pass over the block. 5169 Stores.clear(); 5170 GEPs.clear(); 5171 5172 // Visit the store and getelementptr instructions in BB and organize them in 5173 // Stores and GEPs according to the underlying objects of their pointer 5174 // operands. 5175 for (Instruction &I : *BB) { 5176 // Ignore store instructions that are volatile or have a pointer operand 5177 // that doesn't point to a scalar type. 5178 if (auto *SI = dyn_cast<StoreInst>(&I)) { 5179 if (!SI->isSimple()) 5180 continue; 5181 if (!isValidElementType(SI->getValueOperand()->getType())) 5182 continue; 5183 Stores[GetUnderlyingObject(SI->getPointerOperand(), *DL)].push_back(SI); 5184 } 5185 5186 // Ignore getelementptr instructions that have more than one index, a 5187 // constant index, or a pointer operand that doesn't point to a scalar 5188 // type. 5189 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 5190 auto Idx = GEP->idx_begin()->get(); 5191 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) 5192 continue; 5193 if (!isValidElementType(Idx->getType())) 5194 continue; 5195 if (GEP->getType()->isVectorTy()) 5196 continue; 5197 GEPs[GEP->getPointerOperand()].push_back(GEP); 5198 } 5199 } 5200 } 5201 5202 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 5203 if (!A || !B) 5204 return false; 5205 Value *VL[] = { A, B }; 5206 return tryToVectorizeList(VL, R, /*UserCost=*/0, true); 5207 } 5208 5209 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 5210 int UserCost, bool AllowReorder) { 5211 if (VL.size() < 2) 5212 return false; 5213 5214 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " 5215 << VL.size() << ".\n"); 5216 5217 // Check that all of the parts are scalar instructions of the same type, 5218 // we permit an alternate opcode via InstructionsState. 5219 InstructionsState S = getSameOpcode(VL); 5220 if (!S.getOpcode()) 5221 return false; 5222 5223 Instruction *I0 = cast<Instruction>(S.OpValue); 5224 unsigned Sz = R.getVectorElementSize(I0); 5225 unsigned MinVF = std::max(2U, R.getMinVecRegSize() / Sz); 5226 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF); 5227 if (MaxVF < 2) { 5228 R.getORE()->emit([&]() { 5229 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0) 5230 << "Cannot SLP vectorize list: vectorization factor " 5231 << "less than 2 is not supported"; 5232 }); 5233 return false; 5234 } 5235 5236 for (Value *V : VL) { 5237 Type *Ty = V->getType(); 5238 if (!isValidElementType(Ty)) { 5239 // NOTE: the following will give user internal llvm type name, which may 5240 // not be useful. 5241 R.getORE()->emit([&]() { 5242 std::string type_str; 5243 llvm::raw_string_ostream rso(type_str); 5244 Ty->print(rso); 5245 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0) 5246 << "Cannot SLP vectorize list: type " 5247 << rso.str() + " is unsupported by vectorizer"; 5248 }); 5249 return false; 5250 } 5251 } 5252 5253 bool Changed = false; 5254 bool CandidateFound = false; 5255 int MinCost = SLPCostThreshold; 5256 5257 // Keep track of values that were deleted by vectorizing in the loop below. 5258 SmallVector<WeakTrackingVH, 8> TrackValues(VL.begin(), VL.end()); 5259 5260 unsigned NextInst = 0, MaxInst = VL.size(); 5261 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; 5262 VF /= 2) { 5263 // No actual vectorization should happen, if number of parts is the same as 5264 // provided vectorization factor (i.e. the scalar type is used for vector 5265 // code during codegen). 5266 auto *VecTy = VectorType::get(VL[0]->getType(), VF); 5267 if (TTI->getNumberOfParts(VecTy) == VF) 5268 continue; 5269 for (unsigned I = NextInst; I < MaxInst; ++I) { 5270 unsigned OpsWidth = 0; 5271 5272 if (I + VF > MaxInst) 5273 OpsWidth = MaxInst - I; 5274 else 5275 OpsWidth = VF; 5276 5277 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 5278 break; 5279 5280 // Check that a previous iteration of this loop did not delete the Value. 5281 if (hasValueBeenRAUWed(VL, TrackValues, I, OpsWidth)) 5282 continue; 5283 5284 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 5285 << "\n"); 5286 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth); 5287 5288 R.buildTree(Ops); 5289 Optional<ArrayRef<unsigned>> Order = R.bestOrder(); 5290 // TODO: check if we can allow reordering for more cases. 5291 if (AllowReorder && Order) { 5292 // TODO: reorder tree nodes without tree rebuilding. 5293 // Conceptually, there is nothing actually preventing us from trying to 5294 // reorder a larger list. In fact, we do exactly this when vectorizing 5295 // reductions. However, at this point, we only expect to get here when 5296 // there are exactly two operations. 5297 assert(Ops.size() == 2); 5298 Value *ReorderedOps[] = {Ops[1], Ops[0]}; 5299 R.buildTree(ReorderedOps, None); 5300 } 5301 if (R.isTreeTinyAndNotFullyVectorizable()) 5302 continue; 5303 5304 R.computeMinimumValueSizes(); 5305 int Cost = R.getTreeCost() - UserCost; 5306 CandidateFound = true; 5307 MinCost = std::min(MinCost, Cost); 5308 5309 if (Cost < -SLPCostThreshold) { 5310 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 5311 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 5312 cast<Instruction>(Ops[0])) 5313 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 5314 << " and with tree size " 5315 << ore::NV("TreeSize", R.getTreeSize())); 5316 5317 R.vectorizeTree(); 5318 // Move to the next bundle. 5319 I += VF - 1; 5320 NextInst = I + 1; 5321 Changed = true; 5322 } 5323 } 5324 } 5325 5326 if (!Changed && CandidateFound) { 5327 R.getORE()->emit([&]() { 5328 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0) 5329 << "List vectorization was possible but not beneficial with cost " 5330 << ore::NV("Cost", MinCost) << " >= " 5331 << ore::NV("Treshold", -SLPCostThreshold); 5332 }); 5333 } else if (!Changed) { 5334 R.getORE()->emit([&]() { 5335 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0) 5336 << "Cannot SLP vectorize list: vectorization was impossible" 5337 << " with available vectorization factors"; 5338 }); 5339 } 5340 return Changed; 5341 } 5342 5343 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 5344 if (!I) 5345 return false; 5346 5347 if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I)) 5348 return false; 5349 5350 Value *P = I->getParent(); 5351 5352 // Vectorize in current basic block only. 5353 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 5354 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 5355 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 5356 return false; 5357 5358 // Try to vectorize V. 5359 if (tryToVectorizePair(Op0, Op1, R)) 5360 return true; 5361 5362 auto *A = dyn_cast<BinaryOperator>(Op0); 5363 auto *B = dyn_cast<BinaryOperator>(Op1); 5364 // Try to skip B. 5365 if (B && B->hasOneUse()) { 5366 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 5367 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 5368 if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R)) 5369 return true; 5370 if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R)) 5371 return true; 5372 } 5373 5374 // Try to skip A. 5375 if (A && A->hasOneUse()) { 5376 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 5377 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 5378 if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R)) 5379 return true; 5380 if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R)) 5381 return true; 5382 } 5383 return false; 5384 } 5385 5386 /// Generate a shuffle mask to be used in a reduction tree. 5387 /// 5388 /// \param VecLen The length of the vector to be reduced. 5389 /// \param NumEltsToRdx The number of elements that should be reduced in the 5390 /// vector. 5391 /// \param IsPairwise Whether the reduction is a pairwise or splitting 5392 /// reduction. A pairwise reduction will generate a mask of 5393 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 5394 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 5395 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 5396 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx, 5397 bool IsPairwise, bool IsLeft, 5398 IRBuilder<> &Builder) { 5399 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 5400 5401 SmallVector<Constant *, 32> ShuffleMask( 5402 VecLen, UndefValue::get(Builder.getInt32Ty())); 5403 5404 if (IsPairwise) 5405 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 5406 for (unsigned i = 0; i != NumEltsToRdx; ++i) 5407 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft); 5408 else 5409 // Move the upper half of the vector to the lower half. 5410 for (unsigned i = 0; i != NumEltsToRdx; ++i) 5411 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i); 5412 5413 return ConstantVector::get(ShuffleMask); 5414 } 5415 5416 namespace { 5417 5418 /// Model horizontal reductions. 5419 /// 5420 /// A horizontal reduction is a tree of reduction operations (currently add and 5421 /// fadd) that has operations that can be put into a vector as its leaf. 5422 /// For example, this tree: 5423 /// 5424 /// mul mul mul mul 5425 /// \ / \ / 5426 /// + + 5427 /// \ / 5428 /// + 5429 /// This tree has "mul" as its reduced values and "+" as its reduction 5430 /// operations. A reduction might be feeding into a store or a binary operation 5431 /// feeding a phi. 5432 /// ... 5433 /// \ / 5434 /// + 5435 /// | 5436 /// phi += 5437 /// 5438 /// Or: 5439 /// ... 5440 /// \ / 5441 /// + 5442 /// | 5443 /// *p = 5444 /// 5445 class HorizontalReduction { 5446 using ReductionOpsType = SmallVector<Value *, 16>; 5447 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; 5448 ReductionOpsListType ReductionOps; 5449 SmallVector<Value *, 32> ReducedVals; 5450 // Use map vector to make stable output. 5451 MapVector<Instruction *, Value *> ExtraArgs; 5452 5453 /// Kind of the reduction data. 5454 enum ReductionKind { 5455 RK_None, /// Not a reduction. 5456 RK_Arithmetic, /// Binary reduction data. 5457 RK_Min, /// Minimum reduction data. 5458 RK_UMin, /// Unsigned minimum reduction data. 5459 RK_Max, /// Maximum reduction data. 5460 RK_UMax, /// Unsigned maximum reduction data. 5461 }; 5462 5463 /// Contains info about operation, like its opcode, left and right operands. 5464 class OperationData { 5465 /// Opcode of the instruction. 5466 unsigned Opcode = 0; 5467 5468 /// Left operand of the reduction operation. 5469 Value *LHS = nullptr; 5470 5471 /// Right operand of the reduction operation. 5472 Value *RHS = nullptr; 5473 5474 /// Kind of the reduction operation. 5475 ReductionKind Kind = RK_None; 5476 5477 /// True if float point min/max reduction has no NaNs. 5478 bool NoNaN = false; 5479 5480 /// Checks if the reduction operation can be vectorized. 5481 bool isVectorizable() const { 5482 return LHS && RHS && 5483 // We currently only support add/mul/logical && min/max reductions. 5484 ((Kind == RK_Arithmetic && 5485 (Opcode == Instruction::Add || Opcode == Instruction::FAdd || 5486 Opcode == Instruction::Mul || Opcode == Instruction::FMul || 5487 Opcode == Instruction::And || Opcode == Instruction::Or || 5488 Opcode == Instruction::Xor)) || 5489 ((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && 5490 (Kind == RK_Min || Kind == RK_Max)) || 5491 (Opcode == Instruction::ICmp && 5492 (Kind == RK_UMin || Kind == RK_UMax))); 5493 } 5494 5495 /// Creates reduction operation with the current opcode. 5496 Value *createOp(IRBuilder<> &Builder, const Twine &Name) const { 5497 assert(isVectorizable() && 5498 "Expected add|fadd or min/max reduction operation."); 5499 Value *Cmp; 5500 switch (Kind) { 5501 case RK_Arithmetic: 5502 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, LHS, RHS, 5503 Name); 5504 case RK_Min: 5505 Cmp = Opcode == Instruction::ICmp ? Builder.CreateICmpSLT(LHS, RHS) 5506 : Builder.CreateFCmpOLT(LHS, RHS); 5507 break; 5508 case RK_Max: 5509 Cmp = Opcode == Instruction::ICmp ? Builder.CreateICmpSGT(LHS, RHS) 5510 : Builder.CreateFCmpOGT(LHS, RHS); 5511 break; 5512 case RK_UMin: 5513 assert(Opcode == Instruction::ICmp && "Expected integer types."); 5514 Cmp = Builder.CreateICmpULT(LHS, RHS); 5515 break; 5516 case RK_UMax: 5517 assert(Opcode == Instruction::ICmp && "Expected integer types."); 5518 Cmp = Builder.CreateICmpUGT(LHS, RHS); 5519 break; 5520 case RK_None: 5521 llvm_unreachable("Unknown reduction operation."); 5522 } 5523 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 5524 } 5525 5526 public: 5527 explicit OperationData() = default; 5528 5529 /// Construction for reduced values. They are identified by opcode only and 5530 /// don't have associated LHS/RHS values. 5531 explicit OperationData(Value *V) { 5532 if (auto *I = dyn_cast<Instruction>(V)) 5533 Opcode = I->getOpcode(); 5534 } 5535 5536 /// Constructor for reduction operations with opcode and its left and 5537 /// right operands. 5538 OperationData(unsigned Opcode, Value *LHS, Value *RHS, ReductionKind Kind, 5539 bool NoNaN = false) 5540 : Opcode(Opcode), LHS(LHS), RHS(RHS), Kind(Kind), NoNaN(NoNaN) { 5541 assert(Kind != RK_None && "One of the reduction operations is expected."); 5542 } 5543 5544 explicit operator bool() const { return Opcode; } 5545 5546 /// Get the index of the first operand. 5547 unsigned getFirstOperandIndex() const { 5548 assert(!!*this && "The opcode is not set."); 5549 switch (Kind) { 5550 case RK_Min: 5551 case RK_UMin: 5552 case RK_Max: 5553 case RK_UMax: 5554 return 1; 5555 case RK_Arithmetic: 5556 case RK_None: 5557 break; 5558 } 5559 return 0; 5560 } 5561 5562 /// Total number of operands in the reduction operation. 5563 unsigned getNumberOfOperands() const { 5564 assert(Kind != RK_None && !!*this && LHS && RHS && 5565 "Expected reduction operation."); 5566 switch (Kind) { 5567 case RK_Arithmetic: 5568 return 2; 5569 case RK_Min: 5570 case RK_UMin: 5571 case RK_Max: 5572 case RK_UMax: 5573 return 3; 5574 case RK_None: 5575 break; 5576 } 5577 llvm_unreachable("Reduction kind is not set"); 5578 } 5579 5580 /// Checks if the operation has the same parent as \p P. 5581 bool hasSameParent(Instruction *I, Value *P, bool IsRedOp) const { 5582 assert(Kind != RK_None && !!*this && LHS && RHS && 5583 "Expected reduction operation."); 5584 if (!IsRedOp) 5585 return I->getParent() == P; 5586 switch (Kind) { 5587 case RK_Arithmetic: 5588 // Arithmetic reduction operation must be used once only. 5589 return I->getParent() == P; 5590 case RK_Min: 5591 case RK_UMin: 5592 case RK_Max: 5593 case RK_UMax: { 5594 // SelectInst must be used twice while the condition op must have single 5595 // use only. 5596 auto *Cmp = cast<Instruction>(cast<SelectInst>(I)->getCondition()); 5597 return I->getParent() == P && Cmp && Cmp->getParent() == P; 5598 } 5599 case RK_None: 5600 break; 5601 } 5602 llvm_unreachable("Reduction kind is not set"); 5603 } 5604 /// Expected number of uses for reduction operations/reduced values. 5605 bool hasRequiredNumberOfUses(Instruction *I, bool IsReductionOp) const { 5606 assert(Kind != RK_None && !!*this && LHS && RHS && 5607 "Expected reduction operation."); 5608 switch (Kind) { 5609 case RK_Arithmetic: 5610 return I->hasOneUse(); 5611 case RK_Min: 5612 case RK_UMin: 5613 case RK_Max: 5614 case RK_UMax: 5615 return I->hasNUses(2) && 5616 (!IsReductionOp || 5617 cast<SelectInst>(I)->getCondition()->hasOneUse()); 5618 case RK_None: 5619 break; 5620 } 5621 llvm_unreachable("Reduction kind is not set"); 5622 } 5623 5624 /// Initializes the list of reduction operations. 5625 void initReductionOps(ReductionOpsListType &ReductionOps) { 5626 assert(Kind != RK_None && !!*this && LHS && RHS && 5627 "Expected reduction operation."); 5628 switch (Kind) { 5629 case RK_Arithmetic: 5630 ReductionOps.assign(1, ReductionOpsType()); 5631 break; 5632 case RK_Min: 5633 case RK_UMin: 5634 case RK_Max: 5635 case RK_UMax: 5636 ReductionOps.assign(2, ReductionOpsType()); 5637 break; 5638 case RK_None: 5639 llvm_unreachable("Reduction kind is not set"); 5640 } 5641 } 5642 /// Add all reduction operations for the reduction instruction \p I. 5643 void addReductionOps(Instruction *I, ReductionOpsListType &ReductionOps) { 5644 assert(Kind != RK_None && !!*this && LHS && RHS && 5645 "Expected reduction operation."); 5646 switch (Kind) { 5647 case RK_Arithmetic: 5648 ReductionOps[0].emplace_back(I); 5649 break; 5650 case RK_Min: 5651 case RK_UMin: 5652 case RK_Max: 5653 case RK_UMax: 5654 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition()); 5655 ReductionOps[1].emplace_back(I); 5656 break; 5657 case RK_None: 5658 llvm_unreachable("Reduction kind is not set"); 5659 } 5660 } 5661 5662 /// Checks if instruction is associative and can be vectorized. 5663 bool isAssociative(Instruction *I) const { 5664 assert(Kind != RK_None && *this && LHS && RHS && 5665 "Expected reduction operation."); 5666 switch (Kind) { 5667 case RK_Arithmetic: 5668 return I->isAssociative(); 5669 case RK_Min: 5670 case RK_Max: 5671 return Opcode == Instruction::ICmp || 5672 cast<Instruction>(I->getOperand(0))->isFast(); 5673 case RK_UMin: 5674 case RK_UMax: 5675 assert(Opcode == Instruction::ICmp && 5676 "Only integer compare operation is expected."); 5677 return true; 5678 case RK_None: 5679 break; 5680 } 5681 llvm_unreachable("Reduction kind is not set"); 5682 } 5683 5684 /// Checks if the reduction operation can be vectorized. 5685 bool isVectorizable(Instruction *I) const { 5686 return isVectorizable() && isAssociative(I); 5687 } 5688 5689 /// Checks if two operation data are both a reduction op or both a reduced 5690 /// value. 5691 bool operator==(const OperationData &OD) { 5692 assert(((Kind != OD.Kind) || ((!LHS == !OD.LHS) && (!RHS == !OD.RHS))) && 5693 "One of the comparing operations is incorrect."); 5694 return this == &OD || (Kind == OD.Kind && Opcode == OD.Opcode); 5695 } 5696 bool operator!=(const OperationData &OD) { return !(*this == OD); } 5697 void clear() { 5698 Opcode = 0; 5699 LHS = nullptr; 5700 RHS = nullptr; 5701 Kind = RK_None; 5702 NoNaN = false; 5703 } 5704 5705 /// Get the opcode of the reduction operation. 5706 unsigned getOpcode() const { 5707 assert(isVectorizable() && "Expected vectorizable operation."); 5708 return Opcode; 5709 } 5710 5711 /// Get kind of reduction data. 5712 ReductionKind getKind() const { return Kind; } 5713 Value *getLHS() const { return LHS; } 5714 Value *getRHS() const { return RHS; } 5715 Type *getConditionType() const { 5716 switch (Kind) { 5717 case RK_Arithmetic: 5718 return nullptr; 5719 case RK_Min: 5720 case RK_Max: 5721 case RK_UMin: 5722 case RK_UMax: 5723 return CmpInst::makeCmpResultType(LHS->getType()); 5724 case RK_None: 5725 break; 5726 } 5727 llvm_unreachable("Reduction kind is not set"); 5728 } 5729 5730 /// Creates reduction operation with the current opcode with the IR flags 5731 /// from \p ReductionOps. 5732 Value *createOp(IRBuilder<> &Builder, const Twine &Name, 5733 const ReductionOpsListType &ReductionOps) const { 5734 assert(isVectorizable() && 5735 "Expected add|fadd or min/max reduction operation."); 5736 auto *Op = createOp(Builder, Name); 5737 switch (Kind) { 5738 case RK_Arithmetic: 5739 propagateIRFlags(Op, ReductionOps[0]); 5740 return Op; 5741 case RK_Min: 5742 case RK_Max: 5743 case RK_UMin: 5744 case RK_UMax: 5745 if (auto *SI = dyn_cast<SelectInst>(Op)) 5746 propagateIRFlags(SI->getCondition(), ReductionOps[0]); 5747 propagateIRFlags(Op, ReductionOps[1]); 5748 return Op; 5749 case RK_None: 5750 break; 5751 } 5752 llvm_unreachable("Unknown reduction operation."); 5753 } 5754 /// Creates reduction operation with the current opcode with the IR flags 5755 /// from \p I. 5756 Value *createOp(IRBuilder<> &Builder, const Twine &Name, 5757 Instruction *I) const { 5758 assert(isVectorizable() && 5759 "Expected add|fadd or min/max reduction operation."); 5760 auto *Op = createOp(Builder, Name); 5761 switch (Kind) { 5762 case RK_Arithmetic: 5763 propagateIRFlags(Op, I); 5764 return Op; 5765 case RK_Min: 5766 case RK_Max: 5767 case RK_UMin: 5768 case RK_UMax: 5769 if (auto *SI = dyn_cast<SelectInst>(Op)) { 5770 propagateIRFlags(SI->getCondition(), 5771 cast<SelectInst>(I)->getCondition()); 5772 } 5773 propagateIRFlags(Op, I); 5774 return Op; 5775 case RK_None: 5776 break; 5777 } 5778 llvm_unreachable("Unknown reduction operation."); 5779 } 5780 5781 TargetTransformInfo::ReductionFlags getFlags() const { 5782 TargetTransformInfo::ReductionFlags Flags; 5783 Flags.NoNaN = NoNaN; 5784 switch (Kind) { 5785 case RK_Arithmetic: 5786 break; 5787 case RK_Min: 5788 Flags.IsSigned = Opcode == Instruction::ICmp; 5789 Flags.IsMaxOp = false; 5790 break; 5791 case RK_Max: 5792 Flags.IsSigned = Opcode == Instruction::ICmp; 5793 Flags.IsMaxOp = true; 5794 break; 5795 case RK_UMin: 5796 Flags.IsSigned = false; 5797 Flags.IsMaxOp = false; 5798 break; 5799 case RK_UMax: 5800 Flags.IsSigned = false; 5801 Flags.IsMaxOp = true; 5802 break; 5803 case RK_None: 5804 llvm_unreachable("Reduction kind is not set"); 5805 } 5806 return Flags; 5807 } 5808 }; 5809 5810 WeakTrackingVH ReductionRoot; 5811 5812 /// The operation data of the reduction operation. 5813 OperationData ReductionData; 5814 5815 /// The operation data of the values we perform a reduction on. 5816 OperationData ReducedValueData; 5817 5818 /// Should we model this reduction as a pairwise reduction tree or a tree that 5819 /// splits the vector in halves and adds those halves. 5820 bool IsPairwiseReduction = false; 5821 5822 /// Checks if the ParentStackElem.first should be marked as a reduction 5823 /// operation with an extra argument or as extra argument itself. 5824 void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem, 5825 Value *ExtraArg) { 5826 if (ExtraArgs.count(ParentStackElem.first)) { 5827 ExtraArgs[ParentStackElem.first] = nullptr; 5828 // We ran into something like: 5829 // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg. 5830 // The whole ParentStackElem.first should be considered as an extra value 5831 // in this case. 5832 // Do not perform analysis of remaining operands of ParentStackElem.first 5833 // instruction, this whole instruction is an extra argument. 5834 ParentStackElem.second = ParentStackElem.first->getNumOperands(); 5835 } else { 5836 // We ran into something like: 5837 // ParentStackElem.first += ... + ExtraArg + ... 5838 ExtraArgs[ParentStackElem.first] = ExtraArg; 5839 } 5840 } 5841 5842 static OperationData getOperationData(Value *V) { 5843 if (!V) 5844 return OperationData(); 5845 5846 Value *LHS; 5847 Value *RHS; 5848 if (m_BinOp(m_Value(LHS), m_Value(RHS)).match(V)) { 5849 return OperationData(cast<BinaryOperator>(V)->getOpcode(), LHS, RHS, 5850 RK_Arithmetic); 5851 } 5852 if (auto *Select = dyn_cast<SelectInst>(V)) { 5853 // Look for a min/max pattern. 5854 if (m_UMin(m_Value(LHS), m_Value(RHS)).match(Select)) { 5855 return OperationData(Instruction::ICmp, LHS, RHS, RK_UMin); 5856 } else if (m_SMin(m_Value(LHS), m_Value(RHS)).match(Select)) { 5857 return OperationData(Instruction::ICmp, LHS, RHS, RK_Min); 5858 } else if (m_OrdFMin(m_Value(LHS), m_Value(RHS)).match(Select) || 5859 m_UnordFMin(m_Value(LHS), m_Value(RHS)).match(Select)) { 5860 return OperationData( 5861 Instruction::FCmp, LHS, RHS, RK_Min, 5862 cast<Instruction>(Select->getCondition())->hasNoNaNs()); 5863 } else if (m_UMax(m_Value(LHS), m_Value(RHS)).match(Select)) { 5864 return OperationData(Instruction::ICmp, LHS, RHS, RK_UMax); 5865 } else if (m_SMax(m_Value(LHS), m_Value(RHS)).match(Select)) { 5866 return OperationData(Instruction::ICmp, LHS, RHS, RK_Max); 5867 } else if (m_OrdFMax(m_Value(LHS), m_Value(RHS)).match(Select) || 5868 m_UnordFMax(m_Value(LHS), m_Value(RHS)).match(Select)) { 5869 return OperationData( 5870 Instruction::FCmp, LHS, RHS, RK_Max, 5871 cast<Instruction>(Select->getCondition())->hasNoNaNs()); 5872 } else { 5873 // Try harder: look for min/max pattern based on instructions producing 5874 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2). 5875 // During the intermediate stages of SLP, it's very common to have 5876 // pattern like this (since optimizeGatherSequence is run only once 5877 // at the end): 5878 // %1 = extractelement <2 x i32> %a, i32 0 5879 // %2 = extractelement <2 x i32> %a, i32 1 5880 // %cond = icmp sgt i32 %1, %2 5881 // %3 = extractelement <2 x i32> %a, i32 0 5882 // %4 = extractelement <2 x i32> %a, i32 1 5883 // %select = select i1 %cond, i32 %3, i32 %4 5884 CmpInst::Predicate Pred; 5885 Instruction *L1; 5886 Instruction *L2; 5887 5888 LHS = Select->getTrueValue(); 5889 RHS = Select->getFalseValue(); 5890 Value *Cond = Select->getCondition(); 5891 5892 // TODO: Support inverse predicates. 5893 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) { 5894 if (!isa<ExtractElementInst>(RHS) || 5895 !L2->isIdenticalTo(cast<Instruction>(RHS))) 5896 return OperationData(V); 5897 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) { 5898 if (!isa<ExtractElementInst>(LHS) || 5899 !L1->isIdenticalTo(cast<Instruction>(LHS))) 5900 return OperationData(V); 5901 } else { 5902 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS)) 5903 return OperationData(V); 5904 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) || 5905 !L1->isIdenticalTo(cast<Instruction>(LHS)) || 5906 !L2->isIdenticalTo(cast<Instruction>(RHS))) 5907 return OperationData(V); 5908 } 5909 switch (Pred) { 5910 default: 5911 return OperationData(V); 5912 5913 case CmpInst::ICMP_ULT: 5914 case CmpInst::ICMP_ULE: 5915 return OperationData(Instruction::ICmp, LHS, RHS, RK_UMin); 5916 5917 case CmpInst::ICMP_SLT: 5918 case CmpInst::ICMP_SLE: 5919 return OperationData(Instruction::ICmp, LHS, RHS, RK_Min); 5920 5921 case CmpInst::FCMP_OLT: 5922 case CmpInst::FCMP_OLE: 5923 case CmpInst::FCMP_ULT: 5924 case CmpInst::FCMP_ULE: 5925 return OperationData(Instruction::FCmp, LHS, RHS, RK_Min, 5926 cast<Instruction>(Cond)->hasNoNaNs()); 5927 5928 case CmpInst::ICMP_UGT: 5929 case CmpInst::ICMP_UGE: 5930 return OperationData(Instruction::ICmp, LHS, RHS, RK_UMax); 5931 5932 case CmpInst::ICMP_SGT: 5933 case CmpInst::ICMP_SGE: 5934 return OperationData(Instruction::ICmp, LHS, RHS, RK_Max); 5935 5936 case CmpInst::FCMP_OGT: 5937 case CmpInst::FCMP_OGE: 5938 case CmpInst::FCMP_UGT: 5939 case CmpInst::FCMP_UGE: 5940 return OperationData(Instruction::FCmp, LHS, RHS, RK_Max, 5941 cast<Instruction>(Cond)->hasNoNaNs()); 5942 } 5943 } 5944 } 5945 return OperationData(V); 5946 } 5947 5948 public: 5949 HorizontalReduction() = default; 5950 5951 /// Try to find a reduction tree. 5952 bool matchAssociativeReduction(PHINode *Phi, Instruction *B) { 5953 assert((!Phi || is_contained(Phi->operands(), B)) && 5954 "Thi phi needs to use the binary operator"); 5955 5956 ReductionData = getOperationData(B); 5957 5958 // We could have a initial reductions that is not an add. 5959 // r *= v1 + v2 + v3 + v4 5960 // In such a case start looking for a tree rooted in the first '+'. 5961 if (Phi) { 5962 if (ReductionData.getLHS() == Phi) { 5963 Phi = nullptr; 5964 B = dyn_cast<Instruction>(ReductionData.getRHS()); 5965 ReductionData = getOperationData(B); 5966 } else if (ReductionData.getRHS() == Phi) { 5967 Phi = nullptr; 5968 B = dyn_cast<Instruction>(ReductionData.getLHS()); 5969 ReductionData = getOperationData(B); 5970 } 5971 } 5972 5973 if (!ReductionData.isVectorizable(B)) 5974 return false; 5975 5976 Type *Ty = B->getType(); 5977 if (!isValidElementType(Ty)) 5978 return false; 5979 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy()) 5980 return false; 5981 5982 ReducedValueData.clear(); 5983 ReductionRoot = B; 5984 5985 // Post order traverse the reduction tree starting at B. We only handle true 5986 // trees containing only binary operators. 5987 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; 5988 Stack.push_back(std::make_pair(B, ReductionData.getFirstOperandIndex())); 5989 ReductionData.initReductionOps(ReductionOps); 5990 while (!Stack.empty()) { 5991 Instruction *TreeN = Stack.back().first; 5992 unsigned EdgeToVist = Stack.back().second++; 5993 OperationData OpData = getOperationData(TreeN); 5994 bool IsReducedValue = OpData != ReductionData; 5995 5996 // Postorder vist. 5997 if (IsReducedValue || EdgeToVist == OpData.getNumberOfOperands()) { 5998 if (IsReducedValue) 5999 ReducedVals.push_back(TreeN); 6000 else { 6001 auto I = ExtraArgs.find(TreeN); 6002 if (I != ExtraArgs.end() && !I->second) { 6003 // Check if TreeN is an extra argument of its parent operation. 6004 if (Stack.size() <= 1) { 6005 // TreeN can't be an extra argument as it is a root reduction 6006 // operation. 6007 return false; 6008 } 6009 // Yes, TreeN is an extra argument, do not add it to a list of 6010 // reduction operations. 6011 // Stack[Stack.size() - 2] always points to the parent operation. 6012 markExtraArg(Stack[Stack.size() - 2], TreeN); 6013 ExtraArgs.erase(TreeN); 6014 } else 6015 ReductionData.addReductionOps(TreeN, ReductionOps); 6016 } 6017 // Retract. 6018 Stack.pop_back(); 6019 continue; 6020 } 6021 6022 // Visit left or right. 6023 Value *NextV = TreeN->getOperand(EdgeToVist); 6024 if (NextV != Phi) { 6025 auto *I = dyn_cast<Instruction>(NextV); 6026 OpData = getOperationData(I); 6027 // Continue analysis if the next operand is a reduction operation or 6028 // (possibly) a reduced value. If the reduced value opcode is not set, 6029 // the first met operation != reduction operation is considered as the 6030 // reduced value class. 6031 if (I && (!ReducedValueData || OpData == ReducedValueData || 6032 OpData == ReductionData)) { 6033 const bool IsReductionOperation = OpData == ReductionData; 6034 // Only handle trees in the current basic block. 6035 if (!ReductionData.hasSameParent(I, B->getParent(), 6036 IsReductionOperation)) { 6037 // I is an extra argument for TreeN (its parent operation). 6038 markExtraArg(Stack.back(), I); 6039 continue; 6040 } 6041 6042 // Each tree node needs to have minimal number of users except for the 6043 // ultimate reduction. 6044 if (!ReductionData.hasRequiredNumberOfUses(I, 6045 OpData == ReductionData) && 6046 I != B) { 6047 // I is an extra argument for TreeN (its parent operation). 6048 markExtraArg(Stack.back(), I); 6049 continue; 6050 } 6051 6052 if (IsReductionOperation) { 6053 // We need to be able to reassociate the reduction operations. 6054 if (!OpData.isAssociative(I)) { 6055 // I is an extra argument for TreeN (its parent operation). 6056 markExtraArg(Stack.back(), I); 6057 continue; 6058 } 6059 } else if (ReducedValueData && 6060 ReducedValueData != OpData) { 6061 // Make sure that the opcodes of the operations that we are going to 6062 // reduce match. 6063 // I is an extra argument for TreeN (its parent operation). 6064 markExtraArg(Stack.back(), I); 6065 continue; 6066 } else if (!ReducedValueData) 6067 ReducedValueData = OpData; 6068 6069 Stack.push_back(std::make_pair(I, OpData.getFirstOperandIndex())); 6070 continue; 6071 } 6072 } 6073 // NextV is an extra argument for TreeN (its parent operation). 6074 markExtraArg(Stack.back(), NextV); 6075 } 6076 return true; 6077 } 6078 6079 /// Attempt to vectorize the tree found by 6080 /// matchAssociativeReduction. 6081 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 6082 if (ReducedVals.empty()) 6083 return false; 6084 6085 // If there is a sufficient number of reduction values, reduce 6086 // to a nearby power-of-2. Can safely generate oversized 6087 // vectors and rely on the backend to split them to legal sizes. 6088 unsigned NumReducedVals = ReducedVals.size(); 6089 if (NumReducedVals < 4) 6090 return false; 6091 6092 unsigned ReduxWidth = PowerOf2Floor(NumReducedVals); 6093 6094 Value *VectorizedTree = nullptr; 6095 IRBuilder<> Builder(cast<Instruction>(ReductionRoot)); 6096 FastMathFlags Unsafe; 6097 Unsafe.setFast(); 6098 Builder.setFastMathFlags(Unsafe); 6099 unsigned i = 0; 6100 6101 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 6102 // The same extra argument may be used several time, so log each attempt 6103 // to use it. 6104 for (auto &Pair : ExtraArgs) { 6105 assert(Pair.first && "DebugLoc must be set."); 6106 ExternallyUsedValues[Pair.second].push_back(Pair.first); 6107 } 6108 // The reduction root is used as the insertion point for new instructions, 6109 // so set it as externally used to prevent it from being deleted. 6110 ExternallyUsedValues[ReductionRoot]; 6111 SmallVector<Value *, 16> IgnoreList; 6112 for (auto &V : ReductionOps) 6113 IgnoreList.append(V.begin(), V.end()); 6114 while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) { 6115 auto VL = makeArrayRef(&ReducedVals[i], ReduxWidth); 6116 V.buildTree(VL, ExternallyUsedValues, IgnoreList); 6117 Optional<ArrayRef<unsigned>> Order = V.bestOrder(); 6118 // TODO: Handle orders of size less than number of elements in the vector. 6119 if (Order && Order->size() == VL.size()) { 6120 // TODO: reorder tree nodes without tree rebuilding. 6121 SmallVector<Value *, 4> ReorderedOps(VL.size()); 6122 llvm::transform(*Order, ReorderedOps.begin(), 6123 [VL](const unsigned Idx) { return VL[Idx]; }); 6124 V.buildTree(ReorderedOps, ExternallyUsedValues, IgnoreList); 6125 } 6126 if (V.isTreeTinyAndNotFullyVectorizable()) 6127 break; 6128 6129 V.computeMinimumValueSizes(); 6130 6131 // Estimate cost. 6132 int TreeCost = V.getTreeCost(); 6133 int ReductionCost = getReductionCost(TTI, ReducedVals[i], ReduxWidth); 6134 int Cost = TreeCost + ReductionCost; 6135 if (Cost >= -SLPCostThreshold) { 6136 V.getORE()->emit([&]() { 6137 return OptimizationRemarkMissed( 6138 SV_NAME, "HorSLPNotBeneficial", cast<Instruction>(VL[0])) 6139 << "Vectorizing horizontal reduction is possible" 6140 << "but not beneficial with cost " 6141 << ore::NV("Cost", Cost) << " and threshold " 6142 << ore::NV("Threshold", -SLPCostThreshold); 6143 }); 6144 break; 6145 } 6146 6147 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" 6148 << Cost << ". (HorRdx)\n"); 6149 V.getORE()->emit([&]() { 6150 return OptimizationRemark( 6151 SV_NAME, "VectorizedHorizontalReduction", cast<Instruction>(VL[0])) 6152 << "Vectorized horizontal reduction with cost " 6153 << ore::NV("Cost", Cost) << " and with tree size " 6154 << ore::NV("TreeSize", V.getTreeSize()); 6155 }); 6156 6157 // Vectorize a tree. 6158 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 6159 Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues); 6160 6161 // Emit a reduction. 6162 Builder.SetInsertPoint(cast<Instruction>(ReductionRoot)); 6163 Value *ReducedSubTree = 6164 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); 6165 if (VectorizedTree) { 6166 Builder.SetCurrentDebugLocation(Loc); 6167 OperationData VectReductionData(ReductionData.getOpcode(), 6168 VectorizedTree, ReducedSubTree, 6169 ReductionData.getKind()); 6170 VectorizedTree = 6171 VectReductionData.createOp(Builder, "op.rdx", ReductionOps); 6172 } else 6173 VectorizedTree = ReducedSubTree; 6174 i += ReduxWidth; 6175 ReduxWidth = PowerOf2Floor(NumReducedVals - i); 6176 } 6177 6178 if (VectorizedTree) { 6179 // Finish the reduction. 6180 for (; i < NumReducedVals; ++i) { 6181 auto *I = cast<Instruction>(ReducedVals[i]); 6182 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 6183 OperationData VectReductionData(ReductionData.getOpcode(), 6184 VectorizedTree, I, 6185 ReductionData.getKind()); 6186 VectorizedTree = VectReductionData.createOp(Builder, "", ReductionOps); 6187 } 6188 for (auto &Pair : ExternallyUsedValues) { 6189 // Add each externally used value to the final reduction. 6190 for (auto *I : Pair.second) { 6191 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 6192 OperationData VectReductionData(ReductionData.getOpcode(), 6193 VectorizedTree, Pair.first, 6194 ReductionData.getKind()); 6195 VectorizedTree = VectReductionData.createOp(Builder, "op.extra", I); 6196 } 6197 } 6198 // Update users. 6199 ReductionRoot->replaceAllUsesWith(VectorizedTree); 6200 } 6201 return VectorizedTree != nullptr; 6202 } 6203 6204 unsigned numReductionValues() const { 6205 return ReducedVals.size(); 6206 } 6207 6208 private: 6209 /// Calculate the cost of a reduction. 6210 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal, 6211 unsigned ReduxWidth) { 6212 Type *ScalarTy = FirstReducedVal->getType(); 6213 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth); 6214 6215 int PairwiseRdxCost; 6216 int SplittingRdxCost; 6217 switch (ReductionData.getKind()) { 6218 case RK_Arithmetic: 6219 PairwiseRdxCost = 6220 TTI->getArithmeticReductionCost(ReductionData.getOpcode(), VecTy, 6221 /*IsPairwiseForm=*/true); 6222 SplittingRdxCost = 6223 TTI->getArithmeticReductionCost(ReductionData.getOpcode(), VecTy, 6224 /*IsPairwiseForm=*/false); 6225 break; 6226 case RK_Min: 6227 case RK_Max: 6228 case RK_UMin: 6229 case RK_UMax: { 6230 Type *VecCondTy = CmpInst::makeCmpResultType(VecTy); 6231 bool IsUnsigned = ReductionData.getKind() == RK_UMin || 6232 ReductionData.getKind() == RK_UMax; 6233 PairwiseRdxCost = 6234 TTI->getMinMaxReductionCost(VecTy, VecCondTy, 6235 /*IsPairwiseForm=*/true, IsUnsigned); 6236 SplittingRdxCost = 6237 TTI->getMinMaxReductionCost(VecTy, VecCondTy, 6238 /*IsPairwiseForm=*/false, IsUnsigned); 6239 break; 6240 } 6241 case RK_None: 6242 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 6243 } 6244 6245 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 6246 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 6247 6248 int ScalarReduxCost; 6249 switch (ReductionData.getKind()) { 6250 case RK_Arithmetic: 6251 ScalarReduxCost = 6252 TTI->getArithmeticInstrCost(ReductionData.getOpcode(), ScalarTy); 6253 break; 6254 case RK_Min: 6255 case RK_Max: 6256 case RK_UMin: 6257 case RK_UMax: 6258 ScalarReduxCost = 6259 TTI->getCmpSelInstrCost(ReductionData.getOpcode(), ScalarTy) + 6260 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 6261 CmpInst::makeCmpResultType(ScalarTy)); 6262 break; 6263 case RK_None: 6264 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 6265 } 6266 ScalarReduxCost *= (ReduxWidth - 1); 6267 6268 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 6269 << " for reduction that starts with " << *FirstReducedVal 6270 << " (It is a " 6271 << (IsPairwiseReduction ? "pairwise" : "splitting") 6272 << " reduction)\n"); 6273 6274 return VecReduxCost - ScalarReduxCost; 6275 } 6276 6277 /// Emit a horizontal reduction of the vectorized value. 6278 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 6279 unsigned ReduxWidth, const TargetTransformInfo *TTI) { 6280 assert(VectorizedValue && "Need to have a vectorized tree node"); 6281 assert(isPowerOf2_32(ReduxWidth) && 6282 "We only handle power-of-two reductions for now"); 6283 6284 if (!IsPairwiseReduction) 6285 return createSimpleTargetReduction( 6286 Builder, TTI, ReductionData.getOpcode(), VectorizedValue, 6287 ReductionData.getFlags(), FastMathFlags::getFast(), 6288 ReductionOps.back()); 6289 6290 Value *TmpVec = VectorizedValue; 6291 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 6292 Value *LeftMask = 6293 createRdxShuffleMask(ReduxWidth, i, true, true, Builder); 6294 Value *RightMask = 6295 createRdxShuffleMask(ReduxWidth, i, true, false, Builder); 6296 6297 Value *LeftShuf = Builder.CreateShuffleVector( 6298 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 6299 Value *RightShuf = Builder.CreateShuffleVector( 6300 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 6301 "rdx.shuf.r"); 6302 OperationData VectReductionData(ReductionData.getOpcode(), LeftShuf, 6303 RightShuf, ReductionData.getKind()); 6304 TmpVec = VectReductionData.createOp(Builder, "op.rdx", ReductionOps); 6305 } 6306 6307 // The result is in the first element of the vector. 6308 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 6309 } 6310 }; 6311 6312 } // end anonymous namespace 6313 6314 /// Recognize construction of vectors like 6315 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 6316 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 6317 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 6318 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 6319 /// starting from the last insertelement instruction. 6320 /// 6321 /// Returns true if it matches 6322 static bool findBuildVector(InsertElementInst *LastInsertElem, 6323 TargetTransformInfo *TTI, 6324 SmallVectorImpl<Value *> &BuildVectorOpds, 6325 int &UserCost) { 6326 UserCost = 0; 6327 Value *V = nullptr; 6328 do { 6329 if (auto *CI = dyn_cast<ConstantInt>(LastInsertElem->getOperand(2))) { 6330 UserCost += TTI->getVectorInstrCost(Instruction::InsertElement, 6331 LastInsertElem->getType(), 6332 CI->getZExtValue()); 6333 } 6334 BuildVectorOpds.push_back(LastInsertElem->getOperand(1)); 6335 V = LastInsertElem->getOperand(0); 6336 if (isa<UndefValue>(V)) 6337 break; 6338 LastInsertElem = dyn_cast<InsertElementInst>(V); 6339 if (!LastInsertElem || !LastInsertElem->hasOneUse()) 6340 return false; 6341 } while (true); 6342 std::reverse(BuildVectorOpds.begin(), BuildVectorOpds.end()); 6343 return true; 6344 } 6345 6346 /// Like findBuildVector, but looks for construction of aggregate. 6347 /// 6348 /// \return true if it matches. 6349 static bool findBuildAggregate(InsertValueInst *IV, 6350 SmallVectorImpl<Value *> &BuildVectorOpds) { 6351 Value *V; 6352 do { 6353 BuildVectorOpds.push_back(IV->getInsertedValueOperand()); 6354 V = IV->getAggregateOperand(); 6355 if (isa<UndefValue>(V)) 6356 break; 6357 IV = dyn_cast<InsertValueInst>(V); 6358 if (!IV || !IV->hasOneUse()) 6359 return false; 6360 } while (true); 6361 std::reverse(BuildVectorOpds.begin(), BuildVectorOpds.end()); 6362 return true; 6363 } 6364 6365 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 6366 return V->getType() < V2->getType(); 6367 } 6368 6369 /// Try and get a reduction value from a phi node. 6370 /// 6371 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 6372 /// if they come from either \p ParentBB or a containing loop latch. 6373 /// 6374 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 6375 /// if not possible. 6376 static Value *getReductionValue(const DominatorTree *DT, PHINode *P, 6377 BasicBlock *ParentBB, LoopInfo *LI) { 6378 // There are situations where the reduction value is not dominated by the 6379 // reduction phi. Vectorizing such cases has been reported to cause 6380 // miscompiles. See PR25787. 6381 auto DominatedReduxValue = [&](Value *R) { 6382 return isa<Instruction>(R) && 6383 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent()); 6384 }; 6385 6386 Value *Rdx = nullptr; 6387 6388 // Return the incoming value if it comes from the same BB as the phi node. 6389 if (P->getIncomingBlock(0) == ParentBB) { 6390 Rdx = P->getIncomingValue(0); 6391 } else if (P->getIncomingBlock(1) == ParentBB) { 6392 Rdx = P->getIncomingValue(1); 6393 } 6394 6395 if (Rdx && DominatedReduxValue(Rdx)) 6396 return Rdx; 6397 6398 // Otherwise, check whether we have a loop latch to look at. 6399 Loop *BBL = LI->getLoopFor(ParentBB); 6400 if (!BBL) 6401 return nullptr; 6402 BasicBlock *BBLatch = BBL->getLoopLatch(); 6403 if (!BBLatch) 6404 return nullptr; 6405 6406 // There is a loop latch, return the incoming value if it comes from 6407 // that. This reduction pattern occasionally turns up. 6408 if (P->getIncomingBlock(0) == BBLatch) { 6409 Rdx = P->getIncomingValue(0); 6410 } else if (P->getIncomingBlock(1) == BBLatch) { 6411 Rdx = P->getIncomingValue(1); 6412 } 6413 6414 if (Rdx && DominatedReduxValue(Rdx)) 6415 return Rdx; 6416 6417 return nullptr; 6418 } 6419 6420 /// Attempt to reduce a horizontal reduction. 6421 /// If it is legal to match a horizontal reduction feeding the phi node \a P 6422 /// with reduction operators \a Root (or one of its operands) in a basic block 6423 /// \a BB, then check if it can be done. If horizontal reduction is not found 6424 /// and root instruction is a binary operation, vectorization of the operands is 6425 /// attempted. 6426 /// \returns true if a horizontal reduction was matched and reduced or operands 6427 /// of one of the binary instruction were vectorized. 6428 /// \returns false if a horizontal reduction was not matched (or not possible) 6429 /// or no vectorization of any binary operation feeding \a Root instruction was 6430 /// performed. 6431 static bool tryToVectorizeHorReductionOrInstOperands( 6432 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, 6433 TargetTransformInfo *TTI, 6434 const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) { 6435 if (!ShouldVectorizeHor) 6436 return false; 6437 6438 if (!Root) 6439 return false; 6440 6441 if (Root->getParent() != BB || isa<PHINode>(Root)) 6442 return false; 6443 // Start analysis starting from Root instruction. If horizontal reduction is 6444 // found, try to vectorize it. If it is not a horizontal reduction or 6445 // vectorization is not possible or not effective, and currently analyzed 6446 // instruction is a binary operation, try to vectorize the operands, using 6447 // pre-order DFS traversal order. If the operands were not vectorized, repeat 6448 // the same procedure considering each operand as a possible root of the 6449 // horizontal reduction. 6450 // Interrupt the process if the Root instruction itself was vectorized or all 6451 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 6452 SmallVector<std::pair<WeakTrackingVH, unsigned>, 8> Stack(1, {Root, 0}); 6453 SmallPtrSet<Value *, 8> VisitedInstrs; 6454 bool Res = false; 6455 while (!Stack.empty()) { 6456 Value *V; 6457 unsigned Level; 6458 std::tie(V, Level) = Stack.pop_back_val(); 6459 if (!V) 6460 continue; 6461 auto *Inst = dyn_cast<Instruction>(V); 6462 if (!Inst) 6463 continue; 6464 auto *BI = dyn_cast<BinaryOperator>(Inst); 6465 auto *SI = dyn_cast<SelectInst>(Inst); 6466 if (BI || SI) { 6467 HorizontalReduction HorRdx; 6468 if (HorRdx.matchAssociativeReduction(P, Inst)) { 6469 if (HorRdx.tryToReduce(R, TTI)) { 6470 Res = true; 6471 // Set P to nullptr to avoid re-analysis of phi node in 6472 // matchAssociativeReduction function unless this is the root node. 6473 P = nullptr; 6474 continue; 6475 } 6476 } 6477 if (P && BI) { 6478 Inst = dyn_cast<Instruction>(BI->getOperand(0)); 6479 if (Inst == P) 6480 Inst = dyn_cast<Instruction>(BI->getOperand(1)); 6481 if (!Inst) { 6482 // Set P to nullptr to avoid re-analysis of phi node in 6483 // matchAssociativeReduction function unless this is the root node. 6484 P = nullptr; 6485 continue; 6486 } 6487 } 6488 } 6489 // Set P to nullptr to avoid re-analysis of phi node in 6490 // matchAssociativeReduction function unless this is the root node. 6491 P = nullptr; 6492 if (Vectorize(Inst, R)) { 6493 Res = true; 6494 continue; 6495 } 6496 6497 // Try to vectorize operands. 6498 // Continue analysis for the instruction from the same basic block only to 6499 // save compile time. 6500 if (++Level < RecursionMaxDepth) 6501 for (auto *Op : Inst->operand_values()) 6502 if (VisitedInstrs.insert(Op).second) 6503 if (auto *I = dyn_cast<Instruction>(Op)) 6504 if (!isa<PHINode>(I) && I->getParent() == BB) 6505 Stack.emplace_back(Op, Level); 6506 } 6507 return Res; 6508 } 6509 6510 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V, 6511 BasicBlock *BB, BoUpSLP &R, 6512 TargetTransformInfo *TTI) { 6513 if (!V) 6514 return false; 6515 auto *I = dyn_cast<Instruction>(V); 6516 if (!I) 6517 return false; 6518 6519 if (!isa<BinaryOperator>(I)) 6520 P = nullptr; 6521 // Try to match and vectorize a horizontal reduction. 6522 auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool { 6523 return tryToVectorize(I, R); 6524 }; 6525 return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI, 6526 ExtraVectorization); 6527 } 6528 6529 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 6530 BasicBlock *BB, BoUpSLP &R) { 6531 const DataLayout &DL = BB->getModule()->getDataLayout(); 6532 if (!R.canMapToVector(IVI->getType(), DL)) 6533 return false; 6534 6535 SmallVector<Value *, 16> BuildVectorOpds; 6536 if (!findBuildAggregate(IVI, BuildVectorOpds)) 6537 return false; 6538 6539 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 6540 // Aggregate value is unlikely to be processed in vector register, we need to 6541 // extract scalars into scalar registers, so NeedExtraction is set true. 6542 return tryToVectorizeList(BuildVectorOpds, R); 6543 } 6544 6545 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 6546 BasicBlock *BB, BoUpSLP &R) { 6547 int UserCost; 6548 SmallVector<Value *, 16> BuildVectorOpds; 6549 if (!findBuildVector(IEI, TTI, BuildVectorOpds, UserCost) || 6550 (llvm::all_of(BuildVectorOpds, 6551 [](Value *V) { return isa<ExtractElementInst>(V); }) && 6552 isShuffle(BuildVectorOpds))) 6553 return false; 6554 6555 // Vectorize starting with the build vector operands ignoring the BuildVector 6556 // instructions for the purpose of scheduling and user extraction. 6557 return tryToVectorizeList(BuildVectorOpds, R, UserCost); 6558 } 6559 6560 bool SLPVectorizerPass::vectorizeCmpInst(CmpInst *CI, BasicBlock *BB, 6561 BoUpSLP &R) { 6562 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) 6563 return true; 6564 6565 bool OpsChanged = false; 6566 for (int Idx = 0; Idx < 2; ++Idx) { 6567 OpsChanged |= 6568 vectorizeRootInstruction(nullptr, CI->getOperand(Idx), BB, R, TTI); 6569 } 6570 return OpsChanged; 6571 } 6572 6573 bool SLPVectorizerPass::vectorizeSimpleInstructions( 6574 SmallVectorImpl<WeakVH> &Instructions, BasicBlock *BB, BoUpSLP &R) { 6575 bool OpsChanged = false; 6576 for (auto &VH : reverse(Instructions)) { 6577 auto *I = dyn_cast_or_null<Instruction>(VH); 6578 if (!I) 6579 continue; 6580 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) 6581 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 6582 else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) 6583 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 6584 else if (auto *CI = dyn_cast<CmpInst>(I)) 6585 OpsChanged |= vectorizeCmpInst(CI, BB, R); 6586 } 6587 Instructions.clear(); 6588 return OpsChanged; 6589 } 6590 6591 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 6592 bool Changed = false; 6593 SmallVector<Value *, 4> Incoming; 6594 SmallPtrSet<Value *, 16> VisitedInstrs; 6595 6596 bool HaveVectorizedPhiNodes = true; 6597 while (HaveVectorizedPhiNodes) { 6598 HaveVectorizedPhiNodes = false; 6599 6600 // Collect the incoming values from the PHIs. 6601 Incoming.clear(); 6602 for (Instruction &I : *BB) { 6603 PHINode *P = dyn_cast<PHINode>(&I); 6604 if (!P) 6605 break; 6606 6607 if (!VisitedInstrs.count(P)) 6608 Incoming.push_back(P); 6609 } 6610 6611 // Sort by type. 6612 llvm::stable_sort(Incoming, PhiTypeSorterFunc); 6613 6614 // Try to vectorize elements base on their type. 6615 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 6616 E = Incoming.end(); 6617 IncIt != E;) { 6618 6619 // Look for the next elements with the same type. 6620 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 6621 while (SameTypeIt != E && 6622 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 6623 VisitedInstrs.insert(*SameTypeIt); 6624 ++SameTypeIt; 6625 } 6626 6627 // Try to vectorize them. 6628 unsigned NumElts = (SameTypeIt - IncIt); 6629 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at PHIs (" 6630 << NumElts << ")\n"); 6631 // The order in which the phi nodes appear in the program does not matter. 6632 // So allow tryToVectorizeList to reorder them if it is beneficial. This 6633 // is done when there are exactly two elements since tryToVectorizeList 6634 // asserts that there are only two values when AllowReorder is true. 6635 bool AllowReorder = NumElts == 2; 6636 if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R, 6637 /*UserCost=*/0, AllowReorder)) { 6638 // Success start over because instructions might have been changed. 6639 HaveVectorizedPhiNodes = true; 6640 Changed = true; 6641 break; 6642 } 6643 6644 // Start over at the next instruction of a different type (or the end). 6645 IncIt = SameTypeIt; 6646 } 6647 } 6648 6649 VisitedInstrs.clear(); 6650 6651 SmallVector<WeakVH, 8> PostProcessInstructions; 6652 SmallDenseSet<Instruction *, 4> KeyNodes; 6653 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 6654 // We may go through BB multiple times so skip the one we have checked. 6655 if (!VisitedInstrs.insert(&*it).second) { 6656 if (it->use_empty() && KeyNodes.count(&*it) > 0 && 6657 vectorizeSimpleInstructions(PostProcessInstructions, BB, R)) { 6658 // We would like to start over since some instructions are deleted 6659 // and the iterator may become invalid value. 6660 Changed = true; 6661 it = BB->begin(); 6662 e = BB->end(); 6663 } 6664 continue; 6665 } 6666 6667 if (isa<DbgInfoIntrinsic>(it)) 6668 continue; 6669 6670 // Try to vectorize reductions that use PHINodes. 6671 if (PHINode *P = dyn_cast<PHINode>(it)) { 6672 // Check that the PHI is a reduction PHI. 6673 if (P->getNumIncomingValues() != 2) 6674 return Changed; 6675 6676 // Try to match and vectorize a horizontal reduction. 6677 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R, 6678 TTI)) { 6679 Changed = true; 6680 it = BB->begin(); 6681 e = BB->end(); 6682 continue; 6683 } 6684 continue; 6685 } 6686 6687 // Ran into an instruction without users, like terminator, or function call 6688 // with ignored return value, store. Ignore unused instructions (basing on 6689 // instruction type, except for CallInst and InvokeInst). 6690 if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) || 6691 isa<InvokeInst>(it))) { 6692 KeyNodes.insert(&*it); 6693 bool OpsChanged = false; 6694 if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) { 6695 for (auto *V : it->operand_values()) { 6696 // Try to match and vectorize a horizontal reduction. 6697 OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI); 6698 } 6699 } 6700 // Start vectorization of post-process list of instructions from the 6701 // top-tree instructions to try to vectorize as many instructions as 6702 // possible. 6703 OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R); 6704 if (OpsChanged) { 6705 // We would like to start over since some instructions are deleted 6706 // and the iterator may become invalid value. 6707 Changed = true; 6708 it = BB->begin(); 6709 e = BB->end(); 6710 continue; 6711 } 6712 } 6713 6714 if (isa<InsertElementInst>(it) || isa<CmpInst>(it) || 6715 isa<InsertValueInst>(it)) 6716 PostProcessInstructions.push_back(&*it); 6717 } 6718 6719 return Changed; 6720 } 6721 6722 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 6723 auto Changed = false; 6724 for (auto &Entry : GEPs) { 6725 // If the getelementptr list has fewer than two elements, there's nothing 6726 // to do. 6727 if (Entry.second.size() < 2) 6728 continue; 6729 6730 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 6731 << Entry.second.size() << ".\n"); 6732 6733 // We process the getelementptr list in chunks of 16 (like we do for 6734 // stores) to minimize compile-time. 6735 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += 16) { 6736 auto Len = std::min<unsigned>(BE - BI, 16); 6737 auto GEPList = makeArrayRef(&Entry.second[BI], Len); 6738 6739 // Initialize a set a candidate getelementptrs. Note that we use a 6740 // SetVector here to preserve program order. If the index computations 6741 // are vectorizable and begin with loads, we want to minimize the chance 6742 // of having to reorder them later. 6743 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 6744 6745 // Some of the candidates may have already been vectorized after we 6746 // initially collected them. If so, the WeakTrackingVHs will have 6747 // nullified the 6748 // values, so remove them from the set of candidates. 6749 Candidates.remove(nullptr); 6750 6751 // Remove from the set of candidates all pairs of getelementptrs with 6752 // constant differences. Such getelementptrs are likely not good 6753 // candidates for vectorization in a bottom-up phase since one can be 6754 // computed from the other. We also ensure all candidate getelementptr 6755 // indices are unique. 6756 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 6757 auto *GEPI = cast<GetElementPtrInst>(GEPList[I]); 6758 if (!Candidates.count(GEPI)) 6759 continue; 6760 auto *SCEVI = SE->getSCEV(GEPList[I]); 6761 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 6762 auto *GEPJ = cast<GetElementPtrInst>(GEPList[J]); 6763 auto *SCEVJ = SE->getSCEV(GEPList[J]); 6764 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 6765 Candidates.remove(GEPList[I]); 6766 Candidates.remove(GEPList[J]); 6767 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 6768 Candidates.remove(GEPList[J]); 6769 } 6770 } 6771 } 6772 6773 // We break out of the above computation as soon as we know there are 6774 // fewer than two candidates remaining. 6775 if (Candidates.size() < 2) 6776 continue; 6777 6778 // Add the single, non-constant index of each candidate to the bundle. We 6779 // ensured the indices met these constraints when we originally collected 6780 // the getelementptrs. 6781 SmallVector<Value *, 16> Bundle(Candidates.size()); 6782 auto BundleIndex = 0u; 6783 for (auto *V : Candidates) { 6784 auto *GEP = cast<GetElementPtrInst>(V); 6785 auto *GEPIdx = GEP->idx_begin()->get(); 6786 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 6787 Bundle[BundleIndex++] = GEPIdx; 6788 } 6789 6790 // Try and vectorize the indices. We are currently only interested in 6791 // gather-like cases of the form: 6792 // 6793 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 6794 // 6795 // where the loads of "a", the loads of "b", and the subtractions can be 6796 // performed in parallel. It's likely that detecting this pattern in a 6797 // bottom-up phase will be simpler and less costly than building a 6798 // full-blown top-down phase beginning at the consecutive loads. 6799 Changed |= tryToVectorizeList(Bundle, R); 6800 } 6801 } 6802 return Changed; 6803 } 6804 6805 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 6806 bool Changed = false; 6807 // Attempt to sort and vectorize each of the store-groups. 6808 for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e; 6809 ++it) { 6810 if (it->second.size() < 2) 6811 continue; 6812 6813 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 6814 << it->second.size() << ".\n"); 6815 6816 // Process the stores in chunks of 16. 6817 // TODO: The limit of 16 inhibits greater vectorization factors. 6818 // For example, AVX2 supports v32i8. Increasing this limit, however, 6819 // may cause a significant compile-time increase. 6820 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI += 16) { 6821 unsigned Len = std::min<unsigned>(CE - CI, 16); 6822 Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len), R); 6823 } 6824 } 6825 return Changed; 6826 } 6827 6828 char SLPVectorizer::ID = 0; 6829 6830 static const char lv_name[] = "SLP Vectorizer"; 6831 6832 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 6833 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 6834 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 6835 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 6836 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 6837 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 6838 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 6839 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 6840 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 6841 6842 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); } 6843