1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 20 #include "llvm/ADT/DenseMap.h" 21 #include "llvm/ADT/DenseSet.h" 22 #include "llvm/ADT/Optional.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/ADT/SetVector.h" 26 #include "llvm/ADT/SmallBitVector.h" 27 #include "llvm/ADT/SmallPtrSet.h" 28 #include "llvm/ADT/SmallSet.h" 29 #include "llvm/ADT/SmallString.h" 30 #include "llvm/ADT/Statistic.h" 31 #include "llvm/ADT/iterator.h" 32 #include "llvm/ADT/iterator_range.h" 33 #include "llvm/Analysis/AliasAnalysis.h" 34 #include "llvm/Analysis/CodeMetrics.h" 35 #include "llvm/Analysis/DemandedBits.h" 36 #include "llvm/Analysis/GlobalsModRef.h" 37 #include "llvm/Analysis/LoopAccessAnalysis.h" 38 #include "llvm/Analysis/LoopInfo.h" 39 #include "llvm/Analysis/MemoryLocation.h" 40 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 41 #include "llvm/Analysis/ScalarEvolution.h" 42 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 43 #include "llvm/Analysis/TargetLibraryInfo.h" 44 #include "llvm/Analysis/TargetTransformInfo.h" 45 #include "llvm/Analysis/ValueTracking.h" 46 #include "llvm/Analysis/VectorUtils.h" 47 #include "llvm/Analysis/AssumptionCache.h" 48 #include "llvm/IR/Attributes.h" 49 #include "llvm/IR/BasicBlock.h" 50 #include "llvm/IR/Constant.h" 51 #include "llvm/IR/Constants.h" 52 #include "llvm/IR/DataLayout.h" 53 #include "llvm/IR/DebugLoc.h" 54 #include "llvm/IR/DerivedTypes.h" 55 #include "llvm/IR/Dominators.h" 56 #include "llvm/IR/Function.h" 57 #include "llvm/IR/IRBuilder.h" 58 #include "llvm/IR/InstrTypes.h" 59 #include "llvm/IR/Instruction.h" 60 #include "llvm/IR/Instructions.h" 61 #include "llvm/IR/IntrinsicInst.h" 62 #include "llvm/IR/Intrinsics.h" 63 #include "llvm/IR/Module.h" 64 #include "llvm/IR/NoFolder.h" 65 #include "llvm/IR/Operator.h" 66 #include "llvm/IR/PatternMatch.h" 67 #include "llvm/IR/Type.h" 68 #include "llvm/IR/Use.h" 69 #include "llvm/IR/User.h" 70 #include "llvm/IR/Value.h" 71 #include "llvm/IR/ValueHandle.h" 72 #include "llvm/IR/Verifier.h" 73 #include "llvm/InitializePasses.h" 74 #include "llvm/Pass.h" 75 #include "llvm/Support/Casting.h" 76 #include "llvm/Support/CommandLine.h" 77 #include "llvm/Support/Compiler.h" 78 #include "llvm/Support/DOTGraphTraits.h" 79 #include "llvm/Support/Debug.h" 80 #include "llvm/Support/ErrorHandling.h" 81 #include "llvm/Support/GraphWriter.h" 82 #include "llvm/Support/KnownBits.h" 83 #include "llvm/Support/MathExtras.h" 84 #include "llvm/Support/raw_ostream.h" 85 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 86 #include "llvm/Transforms/Utils/LoopUtils.h" 87 #include "llvm/Transforms/Vectorize.h" 88 #include <algorithm> 89 #include <cassert> 90 #include <cstdint> 91 #include <iterator> 92 #include <memory> 93 #include <set> 94 #include <string> 95 #include <tuple> 96 #include <utility> 97 #include <vector> 98 99 using namespace llvm; 100 using namespace llvm::PatternMatch; 101 using namespace slpvectorizer; 102 103 #define SV_NAME "slp-vectorizer" 104 #define DEBUG_TYPE "SLP" 105 106 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 107 108 cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden, 109 cl::desc("Run the SLP vectorization passes")); 110 111 static cl::opt<int> 112 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 113 cl::desc("Only vectorize if you gain more than this " 114 "number ")); 115 116 static cl::opt<bool> 117 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 118 cl::desc("Attempt to vectorize horizontal reductions")); 119 120 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 121 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 122 cl::desc( 123 "Attempt to vectorize horizontal reductions feeding into a store")); 124 125 static cl::opt<int> 126 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 127 cl::desc("Attempt to vectorize for this register size in bits")); 128 129 static cl::opt<int> 130 MaxStoreLookup("slp-max-store-lookup", cl::init(32), cl::Hidden, 131 cl::desc("Maximum depth of the lookup for consecutive stores.")); 132 133 /// Limits the size of scheduling regions in a block. 134 /// It avoid long compile times for _very_ large blocks where vector 135 /// instructions are spread over a wide range. 136 /// This limit is way higher than needed by real-world functions. 137 static cl::opt<int> 138 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 139 cl::desc("Limit the size of the SLP scheduling region per block")); 140 141 static cl::opt<int> MinVectorRegSizeOption( 142 "slp-min-reg-size", cl::init(128), cl::Hidden, 143 cl::desc("Attempt to vectorize for this register size in bits")); 144 145 static cl::opt<unsigned> RecursionMaxDepth( 146 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 147 cl::desc("Limit the recursion depth when building a vectorizable tree")); 148 149 static cl::opt<unsigned> MinTreeSize( 150 "slp-min-tree-size", cl::init(3), cl::Hidden, 151 cl::desc("Only vectorize small trees if they are fully vectorizable")); 152 153 // The maximum depth that the look-ahead score heuristic will explore. 154 // The higher this value, the higher the compilation time overhead. 155 static cl::opt<int> LookAheadMaxDepth( 156 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden, 157 cl::desc("The maximum look-ahead depth for operand reordering scores")); 158 159 // The Look-ahead heuristic goes through the users of the bundle to calculate 160 // the users cost in getExternalUsesCost(). To avoid compilation time increase 161 // we limit the number of users visited to this value. 162 static cl::opt<unsigned> LookAheadUsersBudget( 163 "slp-look-ahead-users-budget", cl::init(2), cl::Hidden, 164 cl::desc("The maximum number of users to visit while visiting the " 165 "predecessors. This prevents compilation time increase.")); 166 167 static cl::opt<bool> 168 ViewSLPTree("view-slp-tree", cl::Hidden, 169 cl::desc("Display the SLP trees with Graphviz")); 170 171 // Limit the number of alias checks. The limit is chosen so that 172 // it has no negative effect on the llvm benchmarks. 173 static const unsigned AliasedCheckLimit = 10; 174 175 // Another limit for the alias checks: The maximum distance between load/store 176 // instructions where alias checks are done. 177 // This limit is useful for very large basic blocks. 178 static const unsigned MaxMemDepDistance = 160; 179 180 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 181 /// regions to be handled. 182 static const int MinScheduleRegionSize = 16; 183 184 /// Predicate for the element types that the SLP vectorizer supports. 185 /// 186 /// The most important thing to filter here are types which are invalid in LLVM 187 /// vectors. We also filter target specific types which have absolutely no 188 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 189 /// avoids spending time checking the cost model and realizing that they will 190 /// be inevitably scalarized. 191 static bool isValidElementType(Type *Ty) { 192 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 193 !Ty->isPPC_FP128Ty(); 194 } 195 196 /// \returns true if all of the instructions in \p VL are in the same block or 197 /// false otherwise. 198 static bool allSameBlock(ArrayRef<Value *> VL) { 199 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 200 if (!I0) 201 return false; 202 BasicBlock *BB = I0->getParent(); 203 for (int i = 1, e = VL.size(); i < e; i++) { 204 Instruction *I = dyn_cast<Instruction>(VL[i]); 205 if (!I) 206 return false; 207 208 if (BB != I->getParent()) 209 return false; 210 } 211 return true; 212 } 213 214 /// \returns True if all of the values in \p VL are constants (but not 215 /// globals/constant expressions). 216 static bool allConstant(ArrayRef<Value *> VL) { 217 // Constant expressions and globals can't be vectorized like normal integer/FP 218 // constants. 219 for (Value *i : VL) 220 if (!isa<Constant>(i) || isa<ConstantExpr>(i) || isa<GlobalValue>(i)) 221 return false; 222 return true; 223 } 224 225 /// \returns True if all of the values in \p VL are identical. 226 static bool isSplat(ArrayRef<Value *> VL) { 227 for (unsigned i = 1, e = VL.size(); i < e; ++i) 228 if (VL[i] != VL[0]) 229 return false; 230 return true; 231 } 232 233 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator. 234 static bool isCommutative(Instruction *I) { 235 if (auto *Cmp = dyn_cast<CmpInst>(I)) 236 return Cmp->isCommutative(); 237 if (auto *BO = dyn_cast<BinaryOperator>(I)) 238 return BO->isCommutative(); 239 // TODO: This should check for generic Instruction::isCommutative(), but 240 // we need to confirm that the caller code correctly handles Intrinsics 241 // for example (does not have 2 operands). 242 return false; 243 } 244 245 /// Checks if the vector of instructions can be represented as a shuffle, like: 246 /// %x0 = extractelement <4 x i8> %x, i32 0 247 /// %x3 = extractelement <4 x i8> %x, i32 3 248 /// %y1 = extractelement <4 x i8> %y, i32 1 249 /// %y2 = extractelement <4 x i8> %y, i32 2 250 /// %x0x0 = mul i8 %x0, %x0 251 /// %x3x3 = mul i8 %x3, %x3 252 /// %y1y1 = mul i8 %y1, %y1 253 /// %y2y2 = mul i8 %y2, %y2 254 /// %ins1 = insertelement <4 x i8> undef, i8 %x0x0, i32 0 255 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 256 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 257 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 258 /// ret <4 x i8> %ins4 259 /// can be transformed into: 260 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 261 /// i32 6> 262 /// %2 = mul <4 x i8> %1, %1 263 /// ret <4 x i8> %2 264 /// We convert this initially to something like: 265 /// %x0 = extractelement <4 x i8> %x, i32 0 266 /// %x3 = extractelement <4 x i8> %x, i32 3 267 /// %y1 = extractelement <4 x i8> %y, i32 1 268 /// %y2 = extractelement <4 x i8> %y, i32 2 269 /// %1 = insertelement <4 x i8> undef, i8 %x0, i32 0 270 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1 271 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2 272 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3 273 /// %5 = mul <4 x i8> %4, %4 274 /// %6 = extractelement <4 x i8> %5, i32 0 275 /// %ins1 = insertelement <4 x i8> undef, i8 %6, i32 0 276 /// %7 = extractelement <4 x i8> %5, i32 1 277 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1 278 /// %8 = extractelement <4 x i8> %5, i32 2 279 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2 280 /// %9 = extractelement <4 x i8> %5, i32 3 281 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3 282 /// ret <4 x i8> %ins4 283 /// InstCombiner transforms this into a shuffle and vector mul 284 /// TODO: Can we split off and reuse the shuffle mask detection from 285 /// TargetTransformInfo::getInstructionThroughput? 286 static Optional<TargetTransformInfo::ShuffleKind> 287 isShuffle(ArrayRef<Value *> VL) { 288 auto *EI0 = cast<ExtractElementInst>(VL[0]); 289 unsigned Size = 290 cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements(); 291 Value *Vec1 = nullptr; 292 Value *Vec2 = nullptr; 293 enum ShuffleMode { Unknown, Select, Permute }; 294 ShuffleMode CommonShuffleMode = Unknown; 295 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 296 auto *EI = cast<ExtractElementInst>(VL[I]); 297 auto *Vec = EI->getVectorOperand(); 298 // All vector operands must have the same number of vector elements. 299 if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size) 300 return None; 301 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 302 if (!Idx) 303 return None; 304 // Undefined behavior if Idx is negative or >= Size. 305 if (Idx->getValue().uge(Size)) 306 continue; 307 unsigned IntIdx = Idx->getValue().getZExtValue(); 308 // We can extractelement from undef vector. 309 if (isa<UndefValue>(Vec)) 310 continue; 311 // For correct shuffling we have to have at most 2 different vector operands 312 // in all extractelement instructions. 313 if (!Vec1 || Vec1 == Vec) 314 Vec1 = Vec; 315 else if (!Vec2 || Vec2 == Vec) 316 Vec2 = Vec; 317 else 318 return None; 319 if (CommonShuffleMode == Permute) 320 continue; 321 // If the extract index is not the same as the operation number, it is a 322 // permutation. 323 if (IntIdx != I) { 324 CommonShuffleMode = Permute; 325 continue; 326 } 327 CommonShuffleMode = Select; 328 } 329 // If we're not crossing lanes in different vectors, consider it as blending. 330 if (CommonShuffleMode == Select && Vec2) 331 return TargetTransformInfo::SK_Select; 332 // If Vec2 was never used, we have a permutation of a single vector, otherwise 333 // we have permutation of 2 vectors. 334 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 335 : TargetTransformInfo::SK_PermuteSingleSrc; 336 } 337 338 namespace { 339 340 /// Main data required for vectorization of instructions. 341 struct InstructionsState { 342 /// The very first instruction in the list with the main opcode. 343 Value *OpValue = nullptr; 344 345 /// The main/alternate instruction. 346 Instruction *MainOp = nullptr; 347 Instruction *AltOp = nullptr; 348 349 /// The main/alternate opcodes for the list of instructions. 350 unsigned getOpcode() const { 351 return MainOp ? MainOp->getOpcode() : 0; 352 } 353 354 unsigned getAltOpcode() const { 355 return AltOp ? AltOp->getOpcode() : 0; 356 } 357 358 /// Some of the instructions in the list have alternate opcodes. 359 bool isAltShuffle() const { return getOpcode() != getAltOpcode(); } 360 361 bool isOpcodeOrAlt(Instruction *I) const { 362 unsigned CheckedOpcode = I->getOpcode(); 363 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode; 364 } 365 366 InstructionsState() = delete; 367 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp) 368 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {} 369 }; 370 371 } // end anonymous namespace 372 373 /// Chooses the correct key for scheduling data. If \p Op has the same (or 374 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 375 /// OpValue. 376 static Value *isOneOf(const InstructionsState &S, Value *Op) { 377 auto *I = dyn_cast<Instruction>(Op); 378 if (I && S.isOpcodeOrAlt(I)) 379 return Op; 380 return S.OpValue; 381 } 382 383 /// \returns true if \p Opcode is allowed as part of of the main/alternate 384 /// instruction for SLP vectorization. 385 /// 386 /// Example of unsupported opcode is SDIV that can potentially cause UB if the 387 /// "shuffled out" lane would result in division by zero. 388 static bool isValidForAlternation(unsigned Opcode) { 389 if (Instruction::isIntDivRem(Opcode)) 390 return false; 391 392 return true; 393 } 394 395 /// \returns analysis of the Instructions in \p VL described in 396 /// InstructionsState, the Opcode that we suppose the whole list 397 /// could be vectorized even if its structure is diverse. 398 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 399 unsigned BaseIndex = 0) { 400 // Make sure these are all Instructions. 401 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); })) 402 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 403 404 bool IsCastOp = isa<CastInst>(VL[BaseIndex]); 405 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]); 406 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode(); 407 unsigned AltOpcode = Opcode; 408 unsigned AltIndex = BaseIndex; 409 410 // Check for one alternate opcode from another BinaryOperator. 411 // TODO - generalize to support all operators (types, calls etc.). 412 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { 413 unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode(); 414 if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) { 415 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 416 continue; 417 if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) && 418 isValidForAlternation(Opcode)) { 419 AltOpcode = InstOpcode; 420 AltIndex = Cnt; 421 continue; 422 } 423 } else if (IsCastOp && isa<CastInst>(VL[Cnt])) { 424 Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType(); 425 Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType(); 426 if (Ty0 == Ty1) { 427 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 428 continue; 429 if (Opcode == AltOpcode) { 430 assert(isValidForAlternation(Opcode) && 431 isValidForAlternation(InstOpcode) && 432 "Cast isn't safe for alternation, logic needs to be updated!"); 433 AltOpcode = InstOpcode; 434 AltIndex = Cnt; 435 continue; 436 } 437 } 438 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode) 439 continue; 440 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 441 } 442 443 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]), 444 cast<Instruction>(VL[AltIndex])); 445 } 446 447 /// \returns true if all of the values in \p VL have the same type or false 448 /// otherwise. 449 static bool allSameType(ArrayRef<Value *> VL) { 450 Type *Ty = VL[0]->getType(); 451 for (int i = 1, e = VL.size(); i < e; i++) 452 if (VL[i]->getType() != Ty) 453 return false; 454 455 return true; 456 } 457 458 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 459 static Optional<unsigned> getExtractIndex(Instruction *E) { 460 unsigned Opcode = E->getOpcode(); 461 assert((Opcode == Instruction::ExtractElement || 462 Opcode == Instruction::ExtractValue) && 463 "Expected extractelement or extractvalue instruction."); 464 if (Opcode == Instruction::ExtractElement) { 465 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 466 if (!CI) 467 return None; 468 return CI->getZExtValue(); 469 } 470 ExtractValueInst *EI = cast<ExtractValueInst>(E); 471 if (EI->getNumIndices() != 1) 472 return None; 473 return *EI->idx_begin(); 474 } 475 476 /// \returns True if in-tree use also needs extract. This refers to 477 /// possible scalar operand in vectorized instruction. 478 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 479 TargetLibraryInfo *TLI) { 480 unsigned Opcode = UserInst->getOpcode(); 481 switch (Opcode) { 482 case Instruction::Load: { 483 LoadInst *LI = cast<LoadInst>(UserInst); 484 return (LI->getPointerOperand() == Scalar); 485 } 486 case Instruction::Store: { 487 StoreInst *SI = cast<StoreInst>(UserInst); 488 return (SI->getPointerOperand() == Scalar); 489 } 490 case Instruction::Call: { 491 CallInst *CI = cast<CallInst>(UserInst); 492 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 493 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 494 if (hasVectorInstrinsicScalarOpd(ID, i)) 495 return (CI->getArgOperand(i) == Scalar); 496 } 497 LLVM_FALLTHROUGH; 498 } 499 default: 500 return false; 501 } 502 } 503 504 /// \returns the AA location that is being access by the instruction. 505 static MemoryLocation getLocation(Instruction *I, AAResults *AA) { 506 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 507 return MemoryLocation::get(SI); 508 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 509 return MemoryLocation::get(LI); 510 return MemoryLocation(); 511 } 512 513 /// \returns True if the instruction is not a volatile or atomic load/store. 514 static bool isSimple(Instruction *I) { 515 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 516 return LI->isSimple(); 517 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 518 return SI->isSimple(); 519 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 520 return !MI->isVolatile(); 521 return true; 522 } 523 524 namespace llvm { 525 526 static void inversePermutation(ArrayRef<unsigned> Indices, 527 SmallVectorImpl<int> &Mask) { 528 Mask.clear(); 529 const unsigned E = Indices.size(); 530 Mask.resize(E, E + 1); 531 for (unsigned I = 0; I < E; ++I) 532 Mask[Indices[I]] = I; 533 } 534 535 namespace slpvectorizer { 536 537 /// Bottom Up SLP Vectorizer. 538 class BoUpSLP { 539 struct TreeEntry; 540 struct ScheduleData; 541 542 public: 543 using ValueList = SmallVector<Value *, 8>; 544 using InstrList = SmallVector<Instruction *, 16>; 545 using ValueSet = SmallPtrSet<Value *, 16>; 546 using StoreList = SmallVector<StoreInst *, 8>; 547 using ExtraValueToDebugLocsMap = 548 MapVector<Value *, SmallVector<Instruction *, 2>>; 549 using OrdersType = SmallVector<unsigned, 4>; 550 551 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 552 TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li, 553 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 554 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 555 : F(Func), SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), 556 DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { 557 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 558 // Use the vector register size specified by the target unless overridden 559 // by a command-line option. 560 // TODO: It would be better to limit the vectorization factor based on 561 // data type rather than just register size. For example, x86 AVX has 562 // 256-bit registers, but it does not support integer operations 563 // at that width (that requires AVX2). 564 if (MaxVectorRegSizeOption.getNumOccurrences()) 565 MaxVecRegSize = MaxVectorRegSizeOption; 566 else 567 MaxVecRegSize = TTI->getRegisterBitWidth(true); 568 569 if (MinVectorRegSizeOption.getNumOccurrences()) 570 MinVecRegSize = MinVectorRegSizeOption; 571 else 572 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 573 } 574 575 /// Vectorize the tree that starts with the elements in \p VL. 576 /// Returns the vectorized root. 577 Value *vectorizeTree(); 578 579 /// Vectorize the tree but with the list of externally used values \p 580 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 581 /// generated extractvalue instructions. 582 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues); 583 584 /// \returns the cost incurred by unwanted spills and fills, caused by 585 /// holding live values over call sites. 586 int getSpillCost() const; 587 588 /// \returns the vectorization cost of the subtree that starts at \p VL. 589 /// A negative number means that this is profitable. 590 int getTreeCost(); 591 592 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 593 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 594 void buildTree(ArrayRef<Value *> Roots, 595 ArrayRef<Value *> UserIgnoreLst = None); 596 597 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 598 /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking 599 /// into account (and updating it, if required) list of externally used 600 /// values stored in \p ExternallyUsedValues. 601 void buildTree(ArrayRef<Value *> Roots, 602 ExtraValueToDebugLocsMap &ExternallyUsedValues, 603 ArrayRef<Value *> UserIgnoreLst = None); 604 605 /// Clear the internal data structures that are created by 'buildTree'. 606 void deleteTree() { 607 VectorizableTree.clear(); 608 ScalarToTreeEntry.clear(); 609 MustGather.clear(); 610 ExternalUses.clear(); 611 NumOpsWantToKeepOrder.clear(); 612 NumOpsWantToKeepOriginalOrder = 0; 613 for (auto &Iter : BlocksSchedules) { 614 BlockScheduling *BS = Iter.second.get(); 615 BS->clear(); 616 } 617 MinBWs.clear(); 618 } 619 620 unsigned getTreeSize() const { return VectorizableTree.size(); } 621 622 /// Perform LICM and CSE on the newly generated gather sequences. 623 void optimizeGatherSequence(); 624 625 /// \returns The best order of instructions for vectorization. 626 Optional<ArrayRef<unsigned>> bestOrder() const { 627 assert(llvm::all_of( 628 NumOpsWantToKeepOrder, 629 [this](const decltype(NumOpsWantToKeepOrder)::value_type &D) { 630 return D.getFirst().size() == 631 VectorizableTree[0]->Scalars.size(); 632 }) && 633 "All orders must have the same size as number of instructions in " 634 "tree node."); 635 auto I = std::max_element( 636 NumOpsWantToKeepOrder.begin(), NumOpsWantToKeepOrder.end(), 637 [](const decltype(NumOpsWantToKeepOrder)::value_type &D1, 638 const decltype(NumOpsWantToKeepOrder)::value_type &D2) { 639 return D1.second < D2.second; 640 }); 641 if (I == NumOpsWantToKeepOrder.end() || 642 I->getSecond() <= NumOpsWantToKeepOriginalOrder) 643 return None; 644 645 return makeArrayRef(I->getFirst()); 646 } 647 648 /// Builds the correct order for root instructions. 649 /// If some leaves have the same instructions to be vectorized, we may 650 /// incorrectly evaluate the best order for the root node (it is built for the 651 /// vector of instructions without repeated instructions and, thus, has less 652 /// elements than the root node). This function builds the correct order for 653 /// the root node. 654 /// For example, if the root node is \<a+b, a+c, a+d, f+e\>, then the leaves 655 /// are \<a, a, a, f\> and \<b, c, d, e\>. When we try to vectorize the first 656 /// leaf, it will be shrink to \<a, b\>. If instructions in this leaf should 657 /// be reordered, the best order will be \<1, 0\>. We need to extend this 658 /// order for the root node. For the root node this order should look like 659 /// \<3, 0, 1, 2\>. This function extends the order for the reused 660 /// instructions. 661 void findRootOrder(OrdersType &Order) { 662 // If the leaf has the same number of instructions to vectorize as the root 663 // - order must be set already. 664 unsigned RootSize = VectorizableTree[0]->Scalars.size(); 665 if (Order.size() == RootSize) 666 return; 667 SmallVector<unsigned, 4> RealOrder(Order.size()); 668 std::swap(Order, RealOrder); 669 SmallVector<int, 4> Mask; 670 inversePermutation(RealOrder, Mask); 671 for (int I = 0, E = Mask.size(); I < E; ++I) 672 Order[I] = Mask[I]; 673 // The leaf has less number of instructions - need to find the true order of 674 // the root. 675 // Scan the nodes starting from the leaf back to the root. 676 const TreeEntry *PNode = VectorizableTree.back().get(); 677 while (PNode) { 678 const TreeEntry &Node = *PNode; 679 PNode = Node.UserTreeIndices.back().UserTE; 680 if (Node.ReuseShuffleIndices.empty()) 681 continue; 682 // Build the order for the parent node. 683 OrdersType NewOrder(Node.ReuseShuffleIndices.size(), RootSize); 684 SmallVector<unsigned, 4> OrderCounter(Order.size(), 0); 685 // The algorithm of the order extension is: 686 // 1. Calculate the number of the same instructions for the order. 687 // 2. Calculate the index of the new order: total number of instructions 688 // with order less than the order of the current instruction + reuse 689 // number of the current instruction. 690 // 3. The new order is just the index of the instruction in the original 691 // vector of the instructions. 692 for (unsigned I : Node.ReuseShuffleIndices) 693 ++OrderCounter[Order[I]]; 694 SmallVector<unsigned, 4> CurrentCounter(Order.size(), 0); 695 for (unsigned I = 0, E = Node.ReuseShuffleIndices.size(); I < E; ++I) { 696 unsigned ReusedIdx = Node.ReuseShuffleIndices[I]; 697 unsigned OrderIdx = Order[ReusedIdx]; 698 unsigned NewIdx = 0; 699 for (unsigned J = 0; J < OrderIdx; ++J) 700 NewIdx += OrderCounter[J]; 701 NewIdx += CurrentCounter[OrderIdx]; 702 ++CurrentCounter[OrderIdx]; 703 assert(NewOrder[NewIdx] == RootSize && 704 "The order index should not be written already."); 705 NewOrder[NewIdx] = I; 706 } 707 std::swap(Order, NewOrder); 708 // If the size of the order is the same as number of instructions in the 709 // root node, no need to extend it more. 710 if (Order.size() == RootSize) 711 break; 712 } 713 assert((!PNode || Order.size() == RootSize) && 714 "Root node is expected or the size of the order must be the same as " 715 "the number of elements in the root node."); 716 assert(llvm::all_of(Order, 717 [RootSize](unsigned Val) { return Val != RootSize; }) && 718 "All indices must be initialized"); 719 } 720 721 /// \return The vector element size in bits to use when vectorizing the 722 /// expression tree ending at \p V. If V is a store, the size is the width of 723 /// the stored value. Otherwise, the size is the width of the largest loaded 724 /// value reaching V. This method is used by the vectorizer to calculate 725 /// vectorization factors. 726 unsigned getVectorElementSize(Value *V); 727 728 /// Compute the minimum type sizes required to represent the entries in a 729 /// vectorizable tree. 730 void computeMinimumValueSizes(); 731 732 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 733 unsigned getMaxVecRegSize() const { 734 return MaxVecRegSize; 735 } 736 737 // \returns minimum vector register size as set by cl::opt. 738 unsigned getMinVecRegSize() const { 739 return MinVecRegSize; 740 } 741 742 /// Check if homogeneous aggregate is isomorphic to some VectorType. 743 /// Accepts homogeneous multidimensional aggregate of scalars/vectors like 744 /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> }, 745 /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on. 746 /// 747 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 748 unsigned canMapToVector(Type *T, const DataLayout &DL) const; 749 750 /// \returns True if the VectorizableTree is both tiny and not fully 751 /// vectorizable. We do not vectorize such trees. 752 bool isTreeTinyAndNotFullyVectorizable() const; 753 754 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values 755 /// can be load combined in the backend. Load combining may not be allowed in 756 /// the IR optimizer, so we do not want to alter the pattern. For example, 757 /// partially transforming a scalar bswap() pattern into vector code is 758 /// effectively impossible for the backend to undo. 759 /// TODO: If load combining is allowed in the IR optimizer, this analysis 760 /// may not be necessary. 761 bool isLoadCombineReductionCandidate(unsigned ReductionOpcode) const; 762 763 /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values 764 /// can be load combined in the backend. Load combining may not be allowed in 765 /// the IR optimizer, so we do not want to alter the pattern. For example, 766 /// partially transforming a scalar bswap() pattern into vector code is 767 /// effectively impossible for the backend to undo. 768 /// TODO: If load combining is allowed in the IR optimizer, this analysis 769 /// may not be necessary. 770 bool isLoadCombineCandidate() const; 771 772 OptimizationRemarkEmitter *getORE() { return ORE; } 773 774 /// This structure holds any data we need about the edges being traversed 775 /// during buildTree_rec(). We keep track of: 776 /// (i) the user TreeEntry index, and 777 /// (ii) the index of the edge. 778 struct EdgeInfo { 779 EdgeInfo() = default; 780 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx) 781 : UserTE(UserTE), EdgeIdx(EdgeIdx) {} 782 /// The user TreeEntry. 783 TreeEntry *UserTE = nullptr; 784 /// The operand index of the use. 785 unsigned EdgeIdx = UINT_MAX; 786 #ifndef NDEBUG 787 friend inline raw_ostream &operator<<(raw_ostream &OS, 788 const BoUpSLP::EdgeInfo &EI) { 789 EI.dump(OS); 790 return OS; 791 } 792 /// Debug print. 793 void dump(raw_ostream &OS) const { 794 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null") 795 << " EdgeIdx:" << EdgeIdx << "}"; 796 } 797 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); } 798 #endif 799 }; 800 801 /// A helper data structure to hold the operands of a vector of instructions. 802 /// This supports a fixed vector length for all operand vectors. 803 class VLOperands { 804 /// For each operand we need (i) the value, and (ii) the opcode that it 805 /// would be attached to if the expression was in a left-linearized form. 806 /// This is required to avoid illegal operand reordering. 807 /// For example: 808 /// \verbatim 809 /// 0 Op1 810 /// |/ 811 /// Op1 Op2 Linearized + Op2 812 /// \ / ----------> |/ 813 /// - - 814 /// 815 /// Op1 - Op2 (0 + Op1) - Op2 816 /// \endverbatim 817 /// 818 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'. 819 /// 820 /// Another way to think of this is to track all the operations across the 821 /// path from the operand all the way to the root of the tree and to 822 /// calculate the operation that corresponds to this path. For example, the 823 /// path from Op2 to the root crosses the RHS of the '-', therefore the 824 /// corresponding operation is a '-' (which matches the one in the 825 /// linearized tree, as shown above). 826 /// 827 /// For lack of a better term, we refer to this operation as Accumulated 828 /// Path Operation (APO). 829 struct OperandData { 830 OperandData() = default; 831 OperandData(Value *V, bool APO, bool IsUsed) 832 : V(V), APO(APO), IsUsed(IsUsed) {} 833 /// The operand value. 834 Value *V = nullptr; 835 /// TreeEntries only allow a single opcode, or an alternate sequence of 836 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the 837 /// APO. It is set to 'true' if 'V' is attached to an inverse operation 838 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise 839 /// (e.g., Add/Mul) 840 bool APO = false; 841 /// Helper data for the reordering function. 842 bool IsUsed = false; 843 }; 844 845 /// During operand reordering, we are trying to select the operand at lane 846 /// that matches best with the operand at the neighboring lane. Our 847 /// selection is based on the type of value we are looking for. For example, 848 /// if the neighboring lane has a load, we need to look for a load that is 849 /// accessing a consecutive address. These strategies are summarized in the 850 /// 'ReorderingMode' enumerator. 851 enum class ReorderingMode { 852 Load, ///< Matching loads to consecutive memory addresses 853 Opcode, ///< Matching instructions based on opcode (same or alternate) 854 Constant, ///< Matching constants 855 Splat, ///< Matching the same instruction multiple times (broadcast) 856 Failed, ///< We failed to create a vectorizable group 857 }; 858 859 using OperandDataVec = SmallVector<OperandData, 2>; 860 861 /// A vector of operand vectors. 862 SmallVector<OperandDataVec, 4> OpsVec; 863 864 const DataLayout &DL; 865 ScalarEvolution &SE; 866 const BoUpSLP &R; 867 868 /// \returns the operand data at \p OpIdx and \p Lane. 869 OperandData &getData(unsigned OpIdx, unsigned Lane) { 870 return OpsVec[OpIdx][Lane]; 871 } 872 873 /// \returns the operand data at \p OpIdx and \p Lane. Const version. 874 const OperandData &getData(unsigned OpIdx, unsigned Lane) const { 875 return OpsVec[OpIdx][Lane]; 876 } 877 878 /// Clears the used flag for all entries. 879 void clearUsed() { 880 for (unsigned OpIdx = 0, NumOperands = getNumOperands(); 881 OpIdx != NumOperands; ++OpIdx) 882 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 883 ++Lane) 884 OpsVec[OpIdx][Lane].IsUsed = false; 885 } 886 887 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2. 888 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) { 889 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]); 890 } 891 892 // The hard-coded scores listed here are not very important. When computing 893 // the scores of matching one sub-tree with another, we are basically 894 // counting the number of values that are matching. So even if all scores 895 // are set to 1, we would still get a decent matching result. 896 // However, sometimes we have to break ties. For example we may have to 897 // choose between matching loads vs matching opcodes. This is what these 898 // scores are helping us with: they provide the order of preference. 899 900 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]). 901 static const int ScoreConsecutiveLoads = 3; 902 /// ExtractElementInst from same vector and consecutive indexes. 903 static const int ScoreConsecutiveExtracts = 3; 904 /// Constants. 905 static const int ScoreConstants = 2; 906 /// Instructions with the same opcode. 907 static const int ScoreSameOpcode = 2; 908 /// Instructions with alt opcodes (e.g, add + sub). 909 static const int ScoreAltOpcodes = 1; 910 /// Identical instructions (a.k.a. splat or broadcast). 911 static const int ScoreSplat = 1; 912 /// Matching with an undef is preferable to failing. 913 static const int ScoreUndef = 1; 914 /// Score for failing to find a decent match. 915 static const int ScoreFail = 0; 916 /// User exteranl to the vectorized code. 917 static const int ExternalUseCost = 1; 918 /// The user is internal but in a different lane. 919 static const int UserInDiffLaneCost = ExternalUseCost; 920 921 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes. 922 static int getShallowScore(Value *V1, Value *V2, const DataLayout &DL, 923 ScalarEvolution &SE) { 924 auto *LI1 = dyn_cast<LoadInst>(V1); 925 auto *LI2 = dyn_cast<LoadInst>(V2); 926 if (LI1 && LI2) 927 return isConsecutiveAccess(LI1, LI2, DL, SE) 928 ? VLOperands::ScoreConsecutiveLoads 929 : VLOperands::ScoreFail; 930 931 auto *C1 = dyn_cast<Constant>(V1); 932 auto *C2 = dyn_cast<Constant>(V2); 933 if (C1 && C2) 934 return VLOperands::ScoreConstants; 935 936 // Extracts from consecutive indexes of the same vector better score as 937 // the extracts could be optimized away. 938 Value *EV; 939 ConstantInt *Ex1Idx, *Ex2Idx; 940 if (match(V1, m_ExtractElt(m_Value(EV), m_ConstantInt(Ex1Idx))) && 941 match(V2, m_ExtractElt(m_Deferred(EV), m_ConstantInt(Ex2Idx))) && 942 Ex1Idx->getZExtValue() + 1 == Ex2Idx->getZExtValue()) 943 return VLOperands::ScoreConsecutiveExtracts; 944 945 auto *I1 = dyn_cast<Instruction>(V1); 946 auto *I2 = dyn_cast<Instruction>(V2); 947 if (I1 && I2) { 948 if (I1 == I2) 949 return VLOperands::ScoreSplat; 950 InstructionsState S = getSameOpcode({I1, I2}); 951 // Note: Only consider instructions with <= 2 operands to avoid 952 // complexity explosion. 953 if (S.getOpcode() && S.MainOp->getNumOperands() <= 2) 954 return S.isAltShuffle() ? VLOperands::ScoreAltOpcodes 955 : VLOperands::ScoreSameOpcode; 956 } 957 958 if (isa<UndefValue>(V2)) 959 return VLOperands::ScoreUndef; 960 961 return VLOperands::ScoreFail; 962 } 963 964 /// Holds the values and their lane that are taking part in the look-ahead 965 /// score calculation. This is used in the external uses cost calculation. 966 SmallDenseMap<Value *, int> InLookAheadValues; 967 968 /// \Returns the additinal cost due to uses of \p LHS and \p RHS that are 969 /// either external to the vectorized code, or require shuffling. 970 int getExternalUsesCost(const std::pair<Value *, int> &LHS, 971 const std::pair<Value *, int> &RHS) { 972 int Cost = 0; 973 std::array<std::pair<Value *, int>, 2> Values = {{LHS, RHS}}; 974 for (int Idx = 0, IdxE = Values.size(); Idx != IdxE; ++Idx) { 975 Value *V = Values[Idx].first; 976 // Calculate the absolute lane, using the minimum relative lane of LHS 977 // and RHS as base and Idx as the offset. 978 int Ln = std::min(LHS.second, RHS.second) + Idx; 979 assert(Ln >= 0 && "Bad lane calculation"); 980 unsigned UsersBudget = LookAheadUsersBudget; 981 for (User *U : V->users()) { 982 if (const TreeEntry *UserTE = R.getTreeEntry(U)) { 983 // The user is in the VectorizableTree. Check if we need to insert. 984 auto It = llvm::find(UserTE->Scalars, U); 985 assert(It != UserTE->Scalars.end() && "U is in UserTE"); 986 int UserLn = std::distance(UserTE->Scalars.begin(), It); 987 assert(UserLn >= 0 && "Bad lane"); 988 if (UserLn != Ln) 989 Cost += UserInDiffLaneCost; 990 } else { 991 // Check if the user is in the look-ahead code. 992 auto It2 = InLookAheadValues.find(U); 993 if (It2 != InLookAheadValues.end()) { 994 // The user is in the look-ahead code. Check the lane. 995 if (It2->second != Ln) 996 Cost += UserInDiffLaneCost; 997 } else { 998 // The user is neither in SLP tree nor in the look-ahead code. 999 Cost += ExternalUseCost; 1000 } 1001 } 1002 // Limit the number of visited uses to cap compilation time. 1003 if (--UsersBudget == 0) 1004 break; 1005 } 1006 } 1007 return Cost; 1008 } 1009 1010 /// Go through the operands of \p LHS and \p RHS recursively until \p 1011 /// MaxLevel, and return the cummulative score. For example: 1012 /// \verbatim 1013 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1] 1014 /// \ / \ / \ / \ / 1015 /// + + + + 1016 /// G1 G2 G3 G4 1017 /// \endverbatim 1018 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at 1019 /// each level recursively, accumulating the score. It starts from matching 1020 /// the additions at level 0, then moves on to the loads (level 1). The 1021 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and 1022 /// {B[0],B[1]} match with VLOperands::ScoreConsecutiveLoads, while 1023 /// {A[0],C[0]} has a score of VLOperands::ScoreFail. 1024 /// Please note that the order of the operands does not matter, as we 1025 /// evaluate the score of all profitable combinations of operands. In 1026 /// other words the score of G1 and G4 is the same as G1 and G2. This 1027 /// heuristic is based on ideas described in: 1028 /// Look-ahead SLP: Auto-vectorization in the presence of commutative 1029 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha, 1030 /// Luís F. W. Góes 1031 int getScoreAtLevelRec(const std::pair<Value *, int> &LHS, 1032 const std::pair<Value *, int> &RHS, int CurrLevel, 1033 int MaxLevel) { 1034 1035 Value *V1 = LHS.first; 1036 Value *V2 = RHS.first; 1037 // Get the shallow score of V1 and V2. 1038 int ShallowScoreAtThisLevel = 1039 std::max((int)ScoreFail, getShallowScore(V1, V2, DL, SE) - 1040 getExternalUsesCost(LHS, RHS)); 1041 int Lane1 = LHS.second; 1042 int Lane2 = RHS.second; 1043 1044 // If reached MaxLevel, 1045 // or if V1 and V2 are not instructions, 1046 // or if they are SPLAT, 1047 // or if they are not consecutive, early return the current cost. 1048 auto *I1 = dyn_cast<Instruction>(V1); 1049 auto *I2 = dyn_cast<Instruction>(V2); 1050 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 || 1051 ShallowScoreAtThisLevel == VLOperands::ScoreFail || 1052 (isa<LoadInst>(I1) && isa<LoadInst>(I2) && ShallowScoreAtThisLevel)) 1053 return ShallowScoreAtThisLevel; 1054 assert(I1 && I2 && "Should have early exited."); 1055 1056 // Keep track of in-tree values for determining the external-use cost. 1057 InLookAheadValues[V1] = Lane1; 1058 InLookAheadValues[V2] = Lane2; 1059 1060 // Contains the I2 operand indexes that got matched with I1 operands. 1061 SmallSet<unsigned, 4> Op2Used; 1062 1063 // Recursion towards the operands of I1 and I2. We are trying all possbile 1064 // operand pairs, and keeping track of the best score. 1065 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands(); 1066 OpIdx1 != NumOperands1; ++OpIdx1) { 1067 // Try to pair op1I with the best operand of I2. 1068 int MaxTmpScore = 0; 1069 unsigned MaxOpIdx2 = 0; 1070 bool FoundBest = false; 1071 // If I2 is commutative try all combinations. 1072 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1; 1073 unsigned ToIdx = isCommutative(I2) 1074 ? I2->getNumOperands() 1075 : std::min(I2->getNumOperands(), OpIdx1 + 1); 1076 assert(FromIdx <= ToIdx && "Bad index"); 1077 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) { 1078 // Skip operands already paired with OpIdx1. 1079 if (Op2Used.count(OpIdx2)) 1080 continue; 1081 // Recursively calculate the cost at each level 1082 int TmpScore = getScoreAtLevelRec({I1->getOperand(OpIdx1), Lane1}, 1083 {I2->getOperand(OpIdx2), Lane2}, 1084 CurrLevel + 1, MaxLevel); 1085 // Look for the best score. 1086 if (TmpScore > VLOperands::ScoreFail && TmpScore > MaxTmpScore) { 1087 MaxTmpScore = TmpScore; 1088 MaxOpIdx2 = OpIdx2; 1089 FoundBest = true; 1090 } 1091 } 1092 if (FoundBest) { 1093 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it. 1094 Op2Used.insert(MaxOpIdx2); 1095 ShallowScoreAtThisLevel += MaxTmpScore; 1096 } 1097 } 1098 return ShallowScoreAtThisLevel; 1099 } 1100 1101 /// \Returns the look-ahead score, which tells us how much the sub-trees 1102 /// rooted at \p LHS and \p RHS match, the more they match the higher the 1103 /// score. This helps break ties in an informed way when we cannot decide on 1104 /// the order of the operands by just considering the immediate 1105 /// predecessors. 1106 int getLookAheadScore(const std::pair<Value *, int> &LHS, 1107 const std::pair<Value *, int> &RHS) { 1108 InLookAheadValues.clear(); 1109 return getScoreAtLevelRec(LHS, RHS, 1, LookAheadMaxDepth); 1110 } 1111 1112 // Search all operands in Ops[*][Lane] for the one that matches best 1113 // Ops[OpIdx][LastLane] and return its opreand index. 1114 // If no good match can be found, return None. 1115 Optional<unsigned> 1116 getBestOperand(unsigned OpIdx, int Lane, int LastLane, 1117 ArrayRef<ReorderingMode> ReorderingModes) { 1118 unsigned NumOperands = getNumOperands(); 1119 1120 // The operand of the previous lane at OpIdx. 1121 Value *OpLastLane = getData(OpIdx, LastLane).V; 1122 1123 // Our strategy mode for OpIdx. 1124 ReorderingMode RMode = ReorderingModes[OpIdx]; 1125 1126 // The linearized opcode of the operand at OpIdx, Lane. 1127 bool OpIdxAPO = getData(OpIdx, Lane).APO; 1128 1129 // The best operand index and its score. 1130 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we 1131 // are using the score to differentiate between the two. 1132 struct BestOpData { 1133 Optional<unsigned> Idx = None; 1134 unsigned Score = 0; 1135 } BestOp; 1136 1137 // Iterate through all unused operands and look for the best. 1138 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) { 1139 // Get the operand at Idx and Lane. 1140 OperandData &OpData = getData(Idx, Lane); 1141 Value *Op = OpData.V; 1142 bool OpAPO = OpData.APO; 1143 1144 // Skip already selected operands. 1145 if (OpData.IsUsed) 1146 continue; 1147 1148 // Skip if we are trying to move the operand to a position with a 1149 // different opcode in the linearized tree form. This would break the 1150 // semantics. 1151 if (OpAPO != OpIdxAPO) 1152 continue; 1153 1154 // Look for an operand that matches the current mode. 1155 switch (RMode) { 1156 case ReorderingMode::Load: 1157 case ReorderingMode::Constant: 1158 case ReorderingMode::Opcode: { 1159 bool LeftToRight = Lane > LastLane; 1160 Value *OpLeft = (LeftToRight) ? OpLastLane : Op; 1161 Value *OpRight = (LeftToRight) ? Op : OpLastLane; 1162 unsigned Score = 1163 getLookAheadScore({OpLeft, LastLane}, {OpRight, Lane}); 1164 if (Score > BestOp.Score) { 1165 BestOp.Idx = Idx; 1166 BestOp.Score = Score; 1167 } 1168 break; 1169 } 1170 case ReorderingMode::Splat: 1171 if (Op == OpLastLane) 1172 BestOp.Idx = Idx; 1173 break; 1174 case ReorderingMode::Failed: 1175 return None; 1176 } 1177 } 1178 1179 if (BestOp.Idx) { 1180 getData(BestOp.Idx.getValue(), Lane).IsUsed = true; 1181 return BestOp.Idx; 1182 } 1183 // If we could not find a good match return None. 1184 return None; 1185 } 1186 1187 /// Helper for reorderOperandVecs. \Returns the lane that we should start 1188 /// reordering from. This is the one which has the least number of operands 1189 /// that can freely move about. 1190 unsigned getBestLaneToStartReordering() const { 1191 unsigned BestLane = 0; 1192 unsigned Min = UINT_MAX; 1193 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 1194 ++Lane) { 1195 unsigned NumFreeOps = getMaxNumOperandsThatCanBeReordered(Lane); 1196 if (NumFreeOps < Min) { 1197 Min = NumFreeOps; 1198 BestLane = Lane; 1199 } 1200 } 1201 return BestLane; 1202 } 1203 1204 /// \Returns the maximum number of operands that are allowed to be reordered 1205 /// for \p Lane. This is used as a heuristic for selecting the first lane to 1206 /// start operand reordering. 1207 unsigned getMaxNumOperandsThatCanBeReordered(unsigned Lane) const { 1208 unsigned CntTrue = 0; 1209 unsigned NumOperands = getNumOperands(); 1210 // Operands with the same APO can be reordered. We therefore need to count 1211 // how many of them we have for each APO, like this: Cnt[APO] = x. 1212 // Since we only have two APOs, namely true and false, we can avoid using 1213 // a map. Instead we can simply count the number of operands that 1214 // correspond to one of them (in this case the 'true' APO), and calculate 1215 // the other by subtracting it from the total number of operands. 1216 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) 1217 if (getData(OpIdx, Lane).APO) 1218 ++CntTrue; 1219 unsigned CntFalse = NumOperands - CntTrue; 1220 return std::max(CntTrue, CntFalse); 1221 } 1222 1223 /// Go through the instructions in VL and append their operands. 1224 void appendOperandsOfVL(ArrayRef<Value *> VL) { 1225 assert(!VL.empty() && "Bad VL"); 1226 assert((empty() || VL.size() == getNumLanes()) && 1227 "Expected same number of lanes"); 1228 assert(isa<Instruction>(VL[0]) && "Expected instruction"); 1229 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands(); 1230 OpsVec.resize(NumOperands); 1231 unsigned NumLanes = VL.size(); 1232 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1233 OpsVec[OpIdx].resize(NumLanes); 1234 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1235 assert(isa<Instruction>(VL[Lane]) && "Expected instruction"); 1236 // Our tree has just 3 nodes: the root and two operands. 1237 // It is therefore trivial to get the APO. We only need to check the 1238 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or 1239 // RHS operand. The LHS operand of both add and sub is never attached 1240 // to an inversese operation in the linearized form, therefore its APO 1241 // is false. The RHS is true only if VL[Lane] is an inverse operation. 1242 1243 // Since operand reordering is performed on groups of commutative 1244 // operations or alternating sequences (e.g., +, -), we can safely 1245 // tell the inverse operations by checking commutativity. 1246 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane])); 1247 bool APO = (OpIdx == 0) ? false : IsInverseOperation; 1248 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx), 1249 APO, false}; 1250 } 1251 } 1252 } 1253 1254 /// \returns the number of operands. 1255 unsigned getNumOperands() const { return OpsVec.size(); } 1256 1257 /// \returns the number of lanes. 1258 unsigned getNumLanes() const { return OpsVec[0].size(); } 1259 1260 /// \returns the operand value at \p OpIdx and \p Lane. 1261 Value *getValue(unsigned OpIdx, unsigned Lane) const { 1262 return getData(OpIdx, Lane).V; 1263 } 1264 1265 /// \returns true if the data structure is empty. 1266 bool empty() const { return OpsVec.empty(); } 1267 1268 /// Clears the data. 1269 void clear() { OpsVec.clear(); } 1270 1271 /// \Returns true if there are enough operands identical to \p Op to fill 1272 /// the whole vector. 1273 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow. 1274 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) { 1275 bool OpAPO = getData(OpIdx, Lane).APO; 1276 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) { 1277 if (Ln == Lane) 1278 continue; 1279 // This is set to true if we found a candidate for broadcast at Lane. 1280 bool FoundCandidate = false; 1281 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) { 1282 OperandData &Data = getData(OpI, Ln); 1283 if (Data.APO != OpAPO || Data.IsUsed) 1284 continue; 1285 if (Data.V == Op) { 1286 FoundCandidate = true; 1287 Data.IsUsed = true; 1288 break; 1289 } 1290 } 1291 if (!FoundCandidate) 1292 return false; 1293 } 1294 return true; 1295 } 1296 1297 public: 1298 /// Initialize with all the operands of the instruction vector \p RootVL. 1299 VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL, 1300 ScalarEvolution &SE, const BoUpSLP &R) 1301 : DL(DL), SE(SE), R(R) { 1302 // Append all the operands of RootVL. 1303 appendOperandsOfVL(RootVL); 1304 } 1305 1306 /// \Returns a value vector with the operands across all lanes for the 1307 /// opearnd at \p OpIdx. 1308 ValueList getVL(unsigned OpIdx) const { 1309 ValueList OpVL(OpsVec[OpIdx].size()); 1310 assert(OpsVec[OpIdx].size() == getNumLanes() && 1311 "Expected same num of lanes across all operands"); 1312 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane) 1313 OpVL[Lane] = OpsVec[OpIdx][Lane].V; 1314 return OpVL; 1315 } 1316 1317 // Performs operand reordering for 2 or more operands. 1318 // The original operands are in OrigOps[OpIdx][Lane]. 1319 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'. 1320 void reorder() { 1321 unsigned NumOperands = getNumOperands(); 1322 unsigned NumLanes = getNumLanes(); 1323 // Each operand has its own mode. We are using this mode to help us select 1324 // the instructions for each lane, so that they match best with the ones 1325 // we have selected so far. 1326 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands); 1327 1328 // This is a greedy single-pass algorithm. We are going over each lane 1329 // once and deciding on the best order right away with no back-tracking. 1330 // However, in order to increase its effectiveness, we start with the lane 1331 // that has operands that can move the least. For example, given the 1332 // following lanes: 1333 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd 1334 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st 1335 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd 1336 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th 1337 // we will start at Lane 1, since the operands of the subtraction cannot 1338 // be reordered. Then we will visit the rest of the lanes in a circular 1339 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3. 1340 1341 // Find the first lane that we will start our search from. 1342 unsigned FirstLane = getBestLaneToStartReordering(); 1343 1344 // Initialize the modes. 1345 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1346 Value *OpLane0 = getValue(OpIdx, FirstLane); 1347 // Keep track if we have instructions with all the same opcode on one 1348 // side. 1349 if (isa<LoadInst>(OpLane0)) 1350 ReorderingModes[OpIdx] = ReorderingMode::Load; 1351 else if (isa<Instruction>(OpLane0)) { 1352 // Check if OpLane0 should be broadcast. 1353 if (shouldBroadcast(OpLane0, OpIdx, FirstLane)) 1354 ReorderingModes[OpIdx] = ReorderingMode::Splat; 1355 else 1356 ReorderingModes[OpIdx] = ReorderingMode::Opcode; 1357 } 1358 else if (isa<Constant>(OpLane0)) 1359 ReorderingModes[OpIdx] = ReorderingMode::Constant; 1360 else if (isa<Argument>(OpLane0)) 1361 // Our best hope is a Splat. It may save some cost in some cases. 1362 ReorderingModes[OpIdx] = ReorderingMode::Splat; 1363 else 1364 // NOTE: This should be unreachable. 1365 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1366 } 1367 1368 // If the initial strategy fails for any of the operand indexes, then we 1369 // perform reordering again in a second pass. This helps avoid assigning 1370 // high priority to the failed strategy, and should improve reordering for 1371 // the non-failed operand indexes. 1372 for (int Pass = 0; Pass != 2; ++Pass) { 1373 // Skip the second pass if the first pass did not fail. 1374 bool StrategyFailed = false; 1375 // Mark all operand data as free to use. 1376 clearUsed(); 1377 // We keep the original operand order for the FirstLane, so reorder the 1378 // rest of the lanes. We are visiting the nodes in a circular fashion, 1379 // using FirstLane as the center point and increasing the radius 1380 // distance. 1381 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) { 1382 // Visit the lane on the right and then the lane on the left. 1383 for (int Direction : {+1, -1}) { 1384 int Lane = FirstLane + Direction * Distance; 1385 if (Lane < 0 || Lane >= (int)NumLanes) 1386 continue; 1387 int LastLane = Lane - Direction; 1388 assert(LastLane >= 0 && LastLane < (int)NumLanes && 1389 "Out of bounds"); 1390 // Look for a good match for each operand. 1391 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1392 // Search for the operand that matches SortedOps[OpIdx][Lane-1]. 1393 Optional<unsigned> BestIdx = 1394 getBestOperand(OpIdx, Lane, LastLane, ReorderingModes); 1395 // By not selecting a value, we allow the operands that follow to 1396 // select a better matching value. We will get a non-null value in 1397 // the next run of getBestOperand(). 1398 if (BestIdx) { 1399 // Swap the current operand with the one returned by 1400 // getBestOperand(). 1401 swap(OpIdx, BestIdx.getValue(), Lane); 1402 } else { 1403 // We failed to find a best operand, set mode to 'Failed'. 1404 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1405 // Enable the second pass. 1406 StrategyFailed = true; 1407 } 1408 } 1409 } 1410 } 1411 // Skip second pass if the strategy did not fail. 1412 if (!StrategyFailed) 1413 break; 1414 } 1415 } 1416 1417 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1418 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) { 1419 switch (RMode) { 1420 case ReorderingMode::Load: 1421 return "Load"; 1422 case ReorderingMode::Opcode: 1423 return "Opcode"; 1424 case ReorderingMode::Constant: 1425 return "Constant"; 1426 case ReorderingMode::Splat: 1427 return "Splat"; 1428 case ReorderingMode::Failed: 1429 return "Failed"; 1430 } 1431 llvm_unreachable("Unimplemented Reordering Type"); 1432 } 1433 1434 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode, 1435 raw_ostream &OS) { 1436 return OS << getModeStr(RMode); 1437 } 1438 1439 /// Debug print. 1440 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) { 1441 printMode(RMode, dbgs()); 1442 } 1443 1444 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) { 1445 return printMode(RMode, OS); 1446 } 1447 1448 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const { 1449 const unsigned Indent = 2; 1450 unsigned Cnt = 0; 1451 for (const OperandDataVec &OpDataVec : OpsVec) { 1452 OS << "Operand " << Cnt++ << "\n"; 1453 for (const OperandData &OpData : OpDataVec) { 1454 OS.indent(Indent) << "{"; 1455 if (Value *V = OpData.V) 1456 OS << *V; 1457 else 1458 OS << "null"; 1459 OS << ", APO:" << OpData.APO << "}\n"; 1460 } 1461 OS << "\n"; 1462 } 1463 return OS; 1464 } 1465 1466 /// Debug print. 1467 LLVM_DUMP_METHOD void dump() const { print(dbgs()); } 1468 #endif 1469 }; 1470 1471 /// Checks if the instruction is marked for deletion. 1472 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); } 1473 1474 /// Marks values operands for later deletion by replacing them with Undefs. 1475 void eraseInstructions(ArrayRef<Value *> AV); 1476 1477 ~BoUpSLP(); 1478 1479 private: 1480 /// Checks if all users of \p I are the part of the vectorization tree. 1481 bool areAllUsersVectorized(Instruction *I) const; 1482 1483 /// \returns the cost of the vectorizable entry. 1484 int getEntryCost(TreeEntry *E); 1485 1486 /// This is the recursive part of buildTree. 1487 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, 1488 const EdgeInfo &EI); 1489 1490 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can 1491 /// be vectorized to use the original vector (or aggregate "bitcast" to a 1492 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise 1493 /// returns false, setting \p CurrentOrder to either an empty vector or a 1494 /// non-identity permutation that allows to reuse extract instructions. 1495 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 1496 SmallVectorImpl<unsigned> &CurrentOrder) const; 1497 1498 /// Vectorize a single entry in the tree. 1499 Value *vectorizeTree(TreeEntry *E); 1500 1501 /// Vectorize a single entry in the tree, starting in \p VL. 1502 Value *vectorizeTree(ArrayRef<Value *> VL); 1503 1504 /// \returns the scalarization cost for this type. Scalarization in this 1505 /// context means the creation of vectors from a group of scalars. 1506 int getGatherCost(FixedVectorType *Ty, 1507 const DenseSet<unsigned> &ShuffledIndices) const; 1508 1509 /// \returns the scalarization cost for this list of values. Assuming that 1510 /// this subtree gets vectorized, we may need to extract the values from the 1511 /// roots. This method calculates the cost of extracting the values. 1512 int getGatherCost(ArrayRef<Value *> VL) const; 1513 1514 /// Set the Builder insert point to one after the last instruction in 1515 /// the bundle 1516 void setInsertPointAfterBundle(TreeEntry *E); 1517 1518 /// \returns a vector from a collection of scalars in \p VL. 1519 Value *Gather(ArrayRef<Value *> VL, FixedVectorType *Ty); 1520 1521 /// \returns whether the VectorizableTree is fully vectorizable and will 1522 /// be beneficial even the tree height is tiny. 1523 bool isFullyVectorizableTinyTree() const; 1524 1525 /// Reorder commutative or alt operands to get better probability of 1526 /// generating vectorized code. 1527 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 1528 SmallVectorImpl<Value *> &Left, 1529 SmallVectorImpl<Value *> &Right, 1530 const DataLayout &DL, 1531 ScalarEvolution &SE, 1532 const BoUpSLP &R); 1533 struct TreeEntry { 1534 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>; 1535 TreeEntry(VecTreeTy &Container) : Container(Container) {} 1536 1537 /// \returns true if the scalars in VL are equal to this entry. 1538 bool isSame(ArrayRef<Value *> VL) const { 1539 if (VL.size() == Scalars.size()) 1540 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 1541 return VL.size() == ReuseShuffleIndices.size() && 1542 std::equal( 1543 VL.begin(), VL.end(), ReuseShuffleIndices.begin(), 1544 [this](Value *V, int Idx) { return V == Scalars[Idx]; }); 1545 } 1546 1547 /// A vector of scalars. 1548 ValueList Scalars; 1549 1550 /// The Scalars are vectorized into this value. It is initialized to Null. 1551 Value *VectorizedValue = nullptr; 1552 1553 /// Do we need to gather this sequence ? 1554 enum EntryState { Vectorize, NeedToGather }; 1555 EntryState State; 1556 1557 /// Does this sequence require some shuffling? 1558 SmallVector<int, 4> ReuseShuffleIndices; 1559 1560 /// Does this entry require reordering? 1561 SmallVector<unsigned, 4> ReorderIndices; 1562 1563 /// Points back to the VectorizableTree. 1564 /// 1565 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 1566 /// to be a pointer and needs to be able to initialize the child iterator. 1567 /// Thus we need a reference back to the container to translate the indices 1568 /// to entries. 1569 VecTreeTy &Container; 1570 1571 /// The TreeEntry index containing the user of this entry. We can actually 1572 /// have multiple users so the data structure is not truly a tree. 1573 SmallVector<EdgeInfo, 1> UserTreeIndices; 1574 1575 /// The index of this treeEntry in VectorizableTree. 1576 int Idx = -1; 1577 1578 private: 1579 /// The operands of each instruction in each lane Operands[op_index][lane]. 1580 /// Note: This helps avoid the replication of the code that performs the 1581 /// reordering of operands during buildTree_rec() and vectorizeTree(). 1582 SmallVector<ValueList, 2> Operands; 1583 1584 /// The main/alternate instruction. 1585 Instruction *MainOp = nullptr; 1586 Instruction *AltOp = nullptr; 1587 1588 public: 1589 /// Set this bundle's \p OpIdx'th operand to \p OpVL. 1590 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) { 1591 if (Operands.size() < OpIdx + 1) 1592 Operands.resize(OpIdx + 1); 1593 assert(Operands[OpIdx].size() == 0 && "Already resized?"); 1594 Operands[OpIdx].resize(Scalars.size()); 1595 for (unsigned Lane = 0, E = Scalars.size(); Lane != E; ++Lane) 1596 Operands[OpIdx][Lane] = OpVL[Lane]; 1597 } 1598 1599 /// Set the operands of this bundle in their original order. 1600 void setOperandsInOrder() { 1601 assert(Operands.empty() && "Already initialized?"); 1602 auto *I0 = cast<Instruction>(Scalars[0]); 1603 Operands.resize(I0->getNumOperands()); 1604 unsigned NumLanes = Scalars.size(); 1605 for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands(); 1606 OpIdx != NumOperands; ++OpIdx) { 1607 Operands[OpIdx].resize(NumLanes); 1608 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1609 auto *I = cast<Instruction>(Scalars[Lane]); 1610 assert(I->getNumOperands() == NumOperands && 1611 "Expected same number of operands"); 1612 Operands[OpIdx][Lane] = I->getOperand(OpIdx); 1613 } 1614 } 1615 } 1616 1617 /// \returns the \p OpIdx operand of this TreeEntry. 1618 ValueList &getOperand(unsigned OpIdx) { 1619 assert(OpIdx < Operands.size() && "Off bounds"); 1620 return Operands[OpIdx]; 1621 } 1622 1623 /// \returns the number of operands. 1624 unsigned getNumOperands() const { return Operands.size(); } 1625 1626 /// \return the single \p OpIdx operand. 1627 Value *getSingleOperand(unsigned OpIdx) const { 1628 assert(OpIdx < Operands.size() && "Off bounds"); 1629 assert(!Operands[OpIdx].empty() && "No operand available"); 1630 return Operands[OpIdx][0]; 1631 } 1632 1633 /// Some of the instructions in the list have alternate opcodes. 1634 bool isAltShuffle() const { 1635 return getOpcode() != getAltOpcode(); 1636 } 1637 1638 bool isOpcodeOrAlt(Instruction *I) const { 1639 unsigned CheckedOpcode = I->getOpcode(); 1640 return (getOpcode() == CheckedOpcode || 1641 getAltOpcode() == CheckedOpcode); 1642 } 1643 1644 /// Chooses the correct key for scheduling data. If \p Op has the same (or 1645 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is 1646 /// \p OpValue. 1647 Value *isOneOf(Value *Op) const { 1648 auto *I = dyn_cast<Instruction>(Op); 1649 if (I && isOpcodeOrAlt(I)) 1650 return Op; 1651 return MainOp; 1652 } 1653 1654 void setOperations(const InstructionsState &S) { 1655 MainOp = S.MainOp; 1656 AltOp = S.AltOp; 1657 } 1658 1659 Instruction *getMainOp() const { 1660 return MainOp; 1661 } 1662 1663 Instruction *getAltOp() const { 1664 return AltOp; 1665 } 1666 1667 /// The main/alternate opcodes for the list of instructions. 1668 unsigned getOpcode() const { 1669 return MainOp ? MainOp->getOpcode() : 0; 1670 } 1671 1672 unsigned getAltOpcode() const { 1673 return AltOp ? AltOp->getOpcode() : 0; 1674 } 1675 1676 /// Update operations state of this entry if reorder occurred. 1677 bool updateStateIfReorder() { 1678 if (ReorderIndices.empty()) 1679 return false; 1680 InstructionsState S = getSameOpcode(Scalars, ReorderIndices.front()); 1681 setOperations(S); 1682 return true; 1683 } 1684 1685 #ifndef NDEBUG 1686 /// Debug printer. 1687 LLVM_DUMP_METHOD void dump() const { 1688 dbgs() << Idx << ".\n"; 1689 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) { 1690 dbgs() << "Operand " << OpI << ":\n"; 1691 for (const Value *V : Operands[OpI]) 1692 dbgs().indent(2) << *V << "\n"; 1693 } 1694 dbgs() << "Scalars: \n"; 1695 for (Value *V : Scalars) 1696 dbgs().indent(2) << *V << "\n"; 1697 dbgs() << "State: "; 1698 switch (State) { 1699 case Vectorize: 1700 dbgs() << "Vectorize\n"; 1701 break; 1702 case NeedToGather: 1703 dbgs() << "NeedToGather\n"; 1704 break; 1705 } 1706 dbgs() << "MainOp: "; 1707 if (MainOp) 1708 dbgs() << *MainOp << "\n"; 1709 else 1710 dbgs() << "NULL\n"; 1711 dbgs() << "AltOp: "; 1712 if (AltOp) 1713 dbgs() << *AltOp << "\n"; 1714 else 1715 dbgs() << "NULL\n"; 1716 dbgs() << "VectorizedValue: "; 1717 if (VectorizedValue) 1718 dbgs() << *VectorizedValue << "\n"; 1719 else 1720 dbgs() << "NULL\n"; 1721 dbgs() << "ReuseShuffleIndices: "; 1722 if (ReuseShuffleIndices.empty()) 1723 dbgs() << "Emtpy"; 1724 else 1725 for (unsigned ReuseIdx : ReuseShuffleIndices) 1726 dbgs() << ReuseIdx << ", "; 1727 dbgs() << "\n"; 1728 dbgs() << "ReorderIndices: "; 1729 for (unsigned ReorderIdx : ReorderIndices) 1730 dbgs() << ReorderIdx << ", "; 1731 dbgs() << "\n"; 1732 dbgs() << "UserTreeIndices: "; 1733 for (const auto &EInfo : UserTreeIndices) 1734 dbgs() << EInfo << ", "; 1735 dbgs() << "\n"; 1736 } 1737 #endif 1738 }; 1739 1740 /// Create a new VectorizableTree entry. 1741 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle, 1742 const InstructionsState &S, 1743 const EdgeInfo &UserTreeIdx, 1744 ArrayRef<unsigned> ReuseShuffleIndices = None, 1745 ArrayRef<unsigned> ReorderIndices = None) { 1746 bool Vectorized = (bool)Bundle; 1747 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree)); 1748 TreeEntry *Last = VectorizableTree.back().get(); 1749 Last->Idx = VectorizableTree.size() - 1; 1750 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 1751 Last->State = Vectorized ? TreeEntry::Vectorize : TreeEntry::NeedToGather; 1752 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(), 1753 ReuseShuffleIndices.end()); 1754 Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end()); 1755 Last->setOperations(S); 1756 if (Vectorized) { 1757 for (int i = 0, e = VL.size(); i != e; ++i) { 1758 assert(!getTreeEntry(VL[i]) && "Scalar already in tree!"); 1759 ScalarToTreeEntry[VL[i]] = Last; 1760 } 1761 // Update the scheduler bundle to point to this TreeEntry. 1762 unsigned Lane = 0; 1763 for (ScheduleData *BundleMember = Bundle.getValue(); BundleMember; 1764 BundleMember = BundleMember->NextInBundle) { 1765 BundleMember->TE = Last; 1766 BundleMember->Lane = Lane; 1767 ++Lane; 1768 } 1769 assert((!Bundle.getValue() || Lane == VL.size()) && 1770 "Bundle and VL out of sync"); 1771 } else { 1772 MustGather.insert(VL.begin(), VL.end()); 1773 } 1774 1775 if (UserTreeIdx.UserTE) 1776 Last->UserTreeIndices.push_back(UserTreeIdx); 1777 1778 return Last; 1779 } 1780 1781 /// -- Vectorization State -- 1782 /// Holds all of the tree entries. 1783 TreeEntry::VecTreeTy VectorizableTree; 1784 1785 #ifndef NDEBUG 1786 /// Debug printer. 1787 LLVM_DUMP_METHOD void dumpVectorizableTree() const { 1788 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) { 1789 VectorizableTree[Id]->dump(); 1790 dbgs() << "\n"; 1791 } 1792 } 1793 #endif 1794 1795 TreeEntry *getTreeEntry(Value *V) { 1796 auto I = ScalarToTreeEntry.find(V); 1797 if (I != ScalarToTreeEntry.end()) 1798 return I->second; 1799 return nullptr; 1800 } 1801 1802 const TreeEntry *getTreeEntry(Value *V) const { 1803 auto I = ScalarToTreeEntry.find(V); 1804 if (I != ScalarToTreeEntry.end()) 1805 return I->second; 1806 return nullptr; 1807 } 1808 1809 /// Maps a specific scalar to its tree entry. 1810 SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry; 1811 1812 /// Maps a value to the proposed vectorizable size. 1813 SmallDenseMap<Value *, unsigned> InstrElementSize; 1814 1815 /// A list of scalars that we found that we need to keep as scalars. 1816 ValueSet MustGather; 1817 1818 /// This POD struct describes one external user in the vectorized tree. 1819 struct ExternalUser { 1820 ExternalUser(Value *S, llvm::User *U, int L) 1821 : Scalar(S), User(U), Lane(L) {} 1822 1823 // Which scalar in our function. 1824 Value *Scalar; 1825 1826 // Which user that uses the scalar. 1827 llvm::User *User; 1828 1829 // Which lane does the scalar belong to. 1830 int Lane; 1831 }; 1832 using UserList = SmallVector<ExternalUser, 16>; 1833 1834 /// Checks if two instructions may access the same memory. 1835 /// 1836 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 1837 /// is invariant in the calling loop. 1838 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 1839 Instruction *Inst2) { 1840 // First check if the result is already in the cache. 1841 AliasCacheKey key = std::make_pair(Inst1, Inst2); 1842 Optional<bool> &result = AliasCache[key]; 1843 if (result.hasValue()) { 1844 return result.getValue(); 1845 } 1846 MemoryLocation Loc2 = getLocation(Inst2, AA); 1847 bool aliased = true; 1848 if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) { 1849 // Do the alias check. 1850 aliased = AA->alias(Loc1, Loc2); 1851 } 1852 // Store the result in the cache. 1853 result = aliased; 1854 return aliased; 1855 } 1856 1857 using AliasCacheKey = std::pair<Instruction *, Instruction *>; 1858 1859 /// Cache for alias results. 1860 /// TODO: consider moving this to the AliasAnalysis itself. 1861 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 1862 1863 /// Removes an instruction from its block and eventually deletes it. 1864 /// It's like Instruction::eraseFromParent() except that the actual deletion 1865 /// is delayed until BoUpSLP is destructed. 1866 /// This is required to ensure that there are no incorrect collisions in the 1867 /// AliasCache, which can happen if a new instruction is allocated at the 1868 /// same address as a previously deleted instruction. 1869 void eraseInstruction(Instruction *I, bool ReplaceOpsWithUndef = false) { 1870 auto It = DeletedInstructions.try_emplace(I, ReplaceOpsWithUndef).first; 1871 It->getSecond() = It->getSecond() && ReplaceOpsWithUndef; 1872 } 1873 1874 /// Temporary store for deleted instructions. Instructions will be deleted 1875 /// eventually when the BoUpSLP is destructed. 1876 DenseMap<Instruction *, bool> DeletedInstructions; 1877 1878 /// A list of values that need to extracted out of the tree. 1879 /// This list holds pairs of (Internal Scalar : External User). External User 1880 /// can be nullptr, it means that this Internal Scalar will be used later, 1881 /// after vectorization. 1882 UserList ExternalUses; 1883 1884 /// Values used only by @llvm.assume calls. 1885 SmallPtrSet<const Value *, 32> EphValues; 1886 1887 /// Holds all of the instructions that we gathered. 1888 SetVector<Instruction *> GatherSeq; 1889 1890 /// A list of blocks that we are going to CSE. 1891 SetVector<BasicBlock *> CSEBlocks; 1892 1893 /// Contains all scheduling relevant data for an instruction. 1894 /// A ScheduleData either represents a single instruction or a member of an 1895 /// instruction bundle (= a group of instructions which is combined into a 1896 /// vector instruction). 1897 struct ScheduleData { 1898 // The initial value for the dependency counters. It means that the 1899 // dependencies are not calculated yet. 1900 enum { InvalidDeps = -1 }; 1901 1902 ScheduleData() = default; 1903 1904 void init(int BlockSchedulingRegionID, Value *OpVal) { 1905 FirstInBundle = this; 1906 NextInBundle = nullptr; 1907 NextLoadStore = nullptr; 1908 IsScheduled = false; 1909 SchedulingRegionID = BlockSchedulingRegionID; 1910 UnscheduledDepsInBundle = UnscheduledDeps; 1911 clearDependencies(); 1912 OpValue = OpVal; 1913 TE = nullptr; 1914 Lane = -1; 1915 } 1916 1917 /// Returns true if the dependency information has been calculated. 1918 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 1919 1920 /// Returns true for single instructions and for bundle representatives 1921 /// (= the head of a bundle). 1922 bool isSchedulingEntity() const { return FirstInBundle == this; } 1923 1924 /// Returns true if it represents an instruction bundle and not only a 1925 /// single instruction. 1926 bool isPartOfBundle() const { 1927 return NextInBundle != nullptr || FirstInBundle != this; 1928 } 1929 1930 /// Returns true if it is ready for scheduling, i.e. it has no more 1931 /// unscheduled depending instructions/bundles. 1932 bool isReady() const { 1933 assert(isSchedulingEntity() && 1934 "can't consider non-scheduling entity for ready list"); 1935 return UnscheduledDepsInBundle == 0 && !IsScheduled; 1936 } 1937 1938 /// Modifies the number of unscheduled dependencies, also updating it for 1939 /// the whole bundle. 1940 int incrementUnscheduledDeps(int Incr) { 1941 UnscheduledDeps += Incr; 1942 return FirstInBundle->UnscheduledDepsInBundle += Incr; 1943 } 1944 1945 /// Sets the number of unscheduled dependencies to the number of 1946 /// dependencies. 1947 void resetUnscheduledDeps() { 1948 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 1949 } 1950 1951 /// Clears all dependency information. 1952 void clearDependencies() { 1953 Dependencies = InvalidDeps; 1954 resetUnscheduledDeps(); 1955 MemoryDependencies.clear(); 1956 } 1957 1958 void dump(raw_ostream &os) const { 1959 if (!isSchedulingEntity()) { 1960 os << "/ " << *Inst; 1961 } else if (NextInBundle) { 1962 os << '[' << *Inst; 1963 ScheduleData *SD = NextInBundle; 1964 while (SD) { 1965 os << ';' << *SD->Inst; 1966 SD = SD->NextInBundle; 1967 } 1968 os << ']'; 1969 } else { 1970 os << *Inst; 1971 } 1972 } 1973 1974 Instruction *Inst = nullptr; 1975 1976 /// Points to the head in an instruction bundle (and always to this for 1977 /// single instructions). 1978 ScheduleData *FirstInBundle = nullptr; 1979 1980 /// Single linked list of all instructions in a bundle. Null if it is a 1981 /// single instruction. 1982 ScheduleData *NextInBundle = nullptr; 1983 1984 /// Single linked list of all memory instructions (e.g. load, store, call) 1985 /// in the block - until the end of the scheduling region. 1986 ScheduleData *NextLoadStore = nullptr; 1987 1988 /// The dependent memory instructions. 1989 /// This list is derived on demand in calculateDependencies(). 1990 SmallVector<ScheduleData *, 4> MemoryDependencies; 1991 1992 /// This ScheduleData is in the current scheduling region if this matches 1993 /// the current SchedulingRegionID of BlockScheduling. 1994 int SchedulingRegionID = 0; 1995 1996 /// Used for getting a "good" final ordering of instructions. 1997 int SchedulingPriority = 0; 1998 1999 /// The number of dependencies. Constitutes of the number of users of the 2000 /// instruction plus the number of dependent memory instructions (if any). 2001 /// This value is calculated on demand. 2002 /// If InvalidDeps, the number of dependencies is not calculated yet. 2003 int Dependencies = InvalidDeps; 2004 2005 /// The number of dependencies minus the number of dependencies of scheduled 2006 /// instructions. As soon as this is zero, the instruction/bundle gets ready 2007 /// for scheduling. 2008 /// Note that this is negative as long as Dependencies is not calculated. 2009 int UnscheduledDeps = InvalidDeps; 2010 2011 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 2012 /// single instructions. 2013 int UnscheduledDepsInBundle = InvalidDeps; 2014 2015 /// True if this instruction is scheduled (or considered as scheduled in the 2016 /// dry-run). 2017 bool IsScheduled = false; 2018 2019 /// Opcode of the current instruction in the schedule data. 2020 Value *OpValue = nullptr; 2021 2022 /// The TreeEntry that this instruction corresponds to. 2023 TreeEntry *TE = nullptr; 2024 2025 /// The lane of this node in the TreeEntry. 2026 int Lane = -1; 2027 }; 2028 2029 #ifndef NDEBUG 2030 friend inline raw_ostream &operator<<(raw_ostream &os, 2031 const BoUpSLP::ScheduleData &SD) { 2032 SD.dump(os); 2033 return os; 2034 } 2035 #endif 2036 2037 friend struct GraphTraits<BoUpSLP *>; 2038 friend struct DOTGraphTraits<BoUpSLP *>; 2039 2040 /// Contains all scheduling data for a basic block. 2041 struct BlockScheduling { 2042 BlockScheduling(BasicBlock *BB) 2043 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} 2044 2045 void clear() { 2046 ReadyInsts.clear(); 2047 ScheduleStart = nullptr; 2048 ScheduleEnd = nullptr; 2049 FirstLoadStoreInRegion = nullptr; 2050 LastLoadStoreInRegion = nullptr; 2051 2052 // Reduce the maximum schedule region size by the size of the 2053 // previous scheduling run. 2054 ScheduleRegionSizeLimit -= ScheduleRegionSize; 2055 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 2056 ScheduleRegionSizeLimit = MinScheduleRegionSize; 2057 ScheduleRegionSize = 0; 2058 2059 // Make a new scheduling region, i.e. all existing ScheduleData is not 2060 // in the new region yet. 2061 ++SchedulingRegionID; 2062 } 2063 2064 ScheduleData *getScheduleData(Value *V) { 2065 ScheduleData *SD = ScheduleDataMap[V]; 2066 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 2067 return SD; 2068 return nullptr; 2069 } 2070 2071 ScheduleData *getScheduleData(Value *V, Value *Key) { 2072 if (V == Key) 2073 return getScheduleData(V); 2074 auto I = ExtraScheduleDataMap.find(V); 2075 if (I != ExtraScheduleDataMap.end()) { 2076 ScheduleData *SD = I->second[Key]; 2077 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 2078 return SD; 2079 } 2080 return nullptr; 2081 } 2082 2083 bool isInSchedulingRegion(ScheduleData *SD) const { 2084 return SD->SchedulingRegionID == SchedulingRegionID; 2085 } 2086 2087 /// Marks an instruction as scheduled and puts all dependent ready 2088 /// instructions into the ready-list. 2089 template <typename ReadyListType> 2090 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 2091 SD->IsScheduled = true; 2092 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 2093 2094 ScheduleData *BundleMember = SD; 2095 while (BundleMember) { 2096 if (BundleMember->Inst != BundleMember->OpValue) { 2097 BundleMember = BundleMember->NextInBundle; 2098 continue; 2099 } 2100 // Handle the def-use chain dependencies. 2101 2102 // Decrement the unscheduled counter and insert to ready list if ready. 2103 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) { 2104 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 2105 if (OpDef && OpDef->hasValidDependencies() && 2106 OpDef->incrementUnscheduledDeps(-1) == 0) { 2107 // There are no more unscheduled dependencies after 2108 // decrementing, so we can put the dependent instruction 2109 // into the ready list. 2110 ScheduleData *DepBundle = OpDef->FirstInBundle; 2111 assert(!DepBundle->IsScheduled && 2112 "already scheduled bundle gets ready"); 2113 ReadyList.insert(DepBundle); 2114 LLVM_DEBUG(dbgs() 2115 << "SLP: gets ready (def): " << *DepBundle << "\n"); 2116 } 2117 }); 2118 }; 2119 2120 // If BundleMember is a vector bundle, its operands may have been 2121 // reordered duiring buildTree(). We therefore need to get its operands 2122 // through the TreeEntry. 2123 if (TreeEntry *TE = BundleMember->TE) { 2124 int Lane = BundleMember->Lane; 2125 assert(Lane >= 0 && "Lane not set"); 2126 2127 // Since vectorization tree is being built recursively this assertion 2128 // ensures that the tree entry has all operands set before reaching 2129 // this code. Couple of exceptions known at the moment are extracts 2130 // where their second (immediate) operand is not added. Since 2131 // immediates do not affect scheduler behavior this is considered 2132 // okay. 2133 auto *In = TE->getMainOp(); 2134 assert(In && 2135 (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) || 2136 In->getNumOperands() == TE->getNumOperands()) && 2137 "Missed TreeEntry operands?"); 2138 (void)In; // fake use to avoid build failure when assertions disabled 2139 2140 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands(); 2141 OpIdx != NumOperands; ++OpIdx) 2142 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane])) 2143 DecrUnsched(I); 2144 } else { 2145 // If BundleMember is a stand-alone instruction, no operand reordering 2146 // has taken place, so we directly access its operands. 2147 for (Use &U : BundleMember->Inst->operands()) 2148 if (auto *I = dyn_cast<Instruction>(U.get())) 2149 DecrUnsched(I); 2150 } 2151 // Handle the memory dependencies. 2152 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 2153 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 2154 // There are no more unscheduled dependencies after decrementing, 2155 // so we can put the dependent instruction into the ready list. 2156 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 2157 assert(!DepBundle->IsScheduled && 2158 "already scheduled bundle gets ready"); 2159 ReadyList.insert(DepBundle); 2160 LLVM_DEBUG(dbgs() 2161 << "SLP: gets ready (mem): " << *DepBundle << "\n"); 2162 } 2163 } 2164 BundleMember = BundleMember->NextInBundle; 2165 } 2166 } 2167 2168 void doForAllOpcodes(Value *V, 2169 function_ref<void(ScheduleData *SD)> Action) { 2170 if (ScheduleData *SD = getScheduleData(V)) 2171 Action(SD); 2172 auto I = ExtraScheduleDataMap.find(V); 2173 if (I != ExtraScheduleDataMap.end()) 2174 for (auto &P : I->second) 2175 if (P.second->SchedulingRegionID == SchedulingRegionID) 2176 Action(P.second); 2177 } 2178 2179 /// Put all instructions into the ReadyList which are ready for scheduling. 2180 template <typename ReadyListType> 2181 void initialFillReadyList(ReadyListType &ReadyList) { 2182 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 2183 doForAllOpcodes(I, [&](ScheduleData *SD) { 2184 if (SD->isSchedulingEntity() && SD->isReady()) { 2185 ReadyList.insert(SD); 2186 LLVM_DEBUG(dbgs() 2187 << "SLP: initially in ready list: " << *I << "\n"); 2188 } 2189 }); 2190 } 2191 } 2192 2193 /// Checks if a bundle of instructions can be scheduled, i.e. has no 2194 /// cyclic dependencies. This is only a dry-run, no instructions are 2195 /// actually moved at this stage. 2196 /// \returns the scheduling bundle. The returned Optional value is non-None 2197 /// if \p VL is allowed to be scheduled. 2198 Optional<ScheduleData *> 2199 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 2200 const InstructionsState &S); 2201 2202 /// Un-bundles a group of instructions. 2203 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 2204 2205 /// Allocates schedule data chunk. 2206 ScheduleData *allocateScheduleDataChunks(); 2207 2208 /// Extends the scheduling region so that V is inside the region. 2209 /// \returns true if the region size is within the limit. 2210 bool extendSchedulingRegion(Value *V, const InstructionsState &S); 2211 2212 /// Initialize the ScheduleData structures for new instructions in the 2213 /// scheduling region. 2214 void initScheduleData(Instruction *FromI, Instruction *ToI, 2215 ScheduleData *PrevLoadStore, 2216 ScheduleData *NextLoadStore); 2217 2218 /// Updates the dependency information of a bundle and of all instructions/ 2219 /// bundles which depend on the original bundle. 2220 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 2221 BoUpSLP *SLP); 2222 2223 /// Sets all instruction in the scheduling region to un-scheduled. 2224 void resetSchedule(); 2225 2226 BasicBlock *BB; 2227 2228 /// Simple memory allocation for ScheduleData. 2229 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 2230 2231 /// The size of a ScheduleData array in ScheduleDataChunks. 2232 int ChunkSize; 2233 2234 /// The allocator position in the current chunk, which is the last entry 2235 /// of ScheduleDataChunks. 2236 int ChunkPos; 2237 2238 /// Attaches ScheduleData to Instruction. 2239 /// Note that the mapping survives during all vectorization iterations, i.e. 2240 /// ScheduleData structures are recycled. 2241 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 2242 2243 /// Attaches ScheduleData to Instruction with the leading key. 2244 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 2245 ExtraScheduleDataMap; 2246 2247 struct ReadyList : SmallVector<ScheduleData *, 8> { 2248 void insert(ScheduleData *SD) { push_back(SD); } 2249 }; 2250 2251 /// The ready-list for scheduling (only used for the dry-run). 2252 ReadyList ReadyInsts; 2253 2254 /// The first instruction of the scheduling region. 2255 Instruction *ScheduleStart = nullptr; 2256 2257 /// The first instruction _after_ the scheduling region. 2258 Instruction *ScheduleEnd = nullptr; 2259 2260 /// The first memory accessing instruction in the scheduling region 2261 /// (can be null). 2262 ScheduleData *FirstLoadStoreInRegion = nullptr; 2263 2264 /// The last memory accessing instruction in the scheduling region 2265 /// (can be null). 2266 ScheduleData *LastLoadStoreInRegion = nullptr; 2267 2268 /// The current size of the scheduling region. 2269 int ScheduleRegionSize = 0; 2270 2271 /// The maximum size allowed for the scheduling region. 2272 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; 2273 2274 /// The ID of the scheduling region. For a new vectorization iteration this 2275 /// is incremented which "removes" all ScheduleData from the region. 2276 // Make sure that the initial SchedulingRegionID is greater than the 2277 // initial SchedulingRegionID in ScheduleData (which is 0). 2278 int SchedulingRegionID = 1; 2279 }; 2280 2281 /// Attaches the BlockScheduling structures to basic blocks. 2282 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 2283 2284 /// Performs the "real" scheduling. Done before vectorization is actually 2285 /// performed in a basic block. 2286 void scheduleBlock(BlockScheduling *BS); 2287 2288 /// List of users to ignore during scheduling and that don't need extracting. 2289 ArrayRef<Value *> UserIgnoreList; 2290 2291 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of 2292 /// sorted SmallVectors of unsigned. 2293 struct OrdersTypeDenseMapInfo { 2294 static OrdersType getEmptyKey() { 2295 OrdersType V; 2296 V.push_back(~1U); 2297 return V; 2298 } 2299 2300 static OrdersType getTombstoneKey() { 2301 OrdersType V; 2302 V.push_back(~2U); 2303 return V; 2304 } 2305 2306 static unsigned getHashValue(const OrdersType &V) { 2307 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); 2308 } 2309 2310 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) { 2311 return LHS == RHS; 2312 } 2313 }; 2314 2315 /// Contains orders of operations along with the number of bundles that have 2316 /// operations in this order. It stores only those orders that require 2317 /// reordering, if reordering is not required it is counted using \a 2318 /// NumOpsWantToKeepOriginalOrder. 2319 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo> NumOpsWantToKeepOrder; 2320 /// Number of bundles that do not require reordering. 2321 unsigned NumOpsWantToKeepOriginalOrder = 0; 2322 2323 // Analysis and block reference. 2324 Function *F; 2325 ScalarEvolution *SE; 2326 TargetTransformInfo *TTI; 2327 TargetLibraryInfo *TLI; 2328 AAResults *AA; 2329 LoopInfo *LI; 2330 DominatorTree *DT; 2331 AssumptionCache *AC; 2332 DemandedBits *DB; 2333 const DataLayout *DL; 2334 OptimizationRemarkEmitter *ORE; 2335 2336 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 2337 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 2338 2339 /// Instruction builder to construct the vectorized tree. 2340 IRBuilder<> Builder; 2341 2342 /// A map of scalar integer values to the smallest bit width with which they 2343 /// can legally be represented. The values map to (width, signed) pairs, 2344 /// where "width" indicates the minimum bit width and "signed" is True if the 2345 /// value must be signed-extended, rather than zero-extended, back to its 2346 /// original width. 2347 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs; 2348 }; 2349 2350 } // end namespace slpvectorizer 2351 2352 template <> struct GraphTraits<BoUpSLP *> { 2353 using TreeEntry = BoUpSLP::TreeEntry; 2354 2355 /// NodeRef has to be a pointer per the GraphWriter. 2356 using NodeRef = TreeEntry *; 2357 2358 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy; 2359 2360 /// Add the VectorizableTree to the index iterator to be able to return 2361 /// TreeEntry pointers. 2362 struct ChildIteratorType 2363 : public iterator_adaptor_base< 2364 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> { 2365 ContainerTy &VectorizableTree; 2366 2367 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W, 2368 ContainerTy &VT) 2369 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 2370 2371 NodeRef operator*() { return I->UserTE; } 2372 }; 2373 2374 static NodeRef getEntryNode(BoUpSLP &R) { 2375 return R.VectorizableTree[0].get(); 2376 } 2377 2378 static ChildIteratorType child_begin(NodeRef N) { 2379 return {N->UserTreeIndices.begin(), N->Container}; 2380 } 2381 2382 static ChildIteratorType child_end(NodeRef N) { 2383 return {N->UserTreeIndices.end(), N->Container}; 2384 } 2385 2386 /// For the node iterator we just need to turn the TreeEntry iterator into a 2387 /// TreeEntry* iterator so that it dereferences to NodeRef. 2388 class nodes_iterator { 2389 using ItTy = ContainerTy::iterator; 2390 ItTy It; 2391 2392 public: 2393 nodes_iterator(const ItTy &It2) : It(It2) {} 2394 NodeRef operator*() { return It->get(); } 2395 nodes_iterator operator++() { 2396 ++It; 2397 return *this; 2398 } 2399 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; } 2400 }; 2401 2402 static nodes_iterator nodes_begin(BoUpSLP *R) { 2403 return nodes_iterator(R->VectorizableTree.begin()); 2404 } 2405 2406 static nodes_iterator nodes_end(BoUpSLP *R) { 2407 return nodes_iterator(R->VectorizableTree.end()); 2408 } 2409 2410 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 2411 }; 2412 2413 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 2414 using TreeEntry = BoUpSLP::TreeEntry; 2415 2416 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 2417 2418 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 2419 std::string Str; 2420 raw_string_ostream OS(Str); 2421 if (isSplat(Entry->Scalars)) { 2422 OS << "<splat> " << *Entry->Scalars[0]; 2423 return Str; 2424 } 2425 for (auto V : Entry->Scalars) { 2426 OS << *V; 2427 if (std::any_of( 2428 R->ExternalUses.begin(), R->ExternalUses.end(), 2429 [&](const BoUpSLP::ExternalUser &EU) { return EU.Scalar == V; })) 2430 OS << " <extract>"; 2431 OS << "\n"; 2432 } 2433 return Str; 2434 } 2435 2436 static std::string getNodeAttributes(const TreeEntry *Entry, 2437 const BoUpSLP *) { 2438 if (Entry->State == TreeEntry::NeedToGather) 2439 return "color=red"; 2440 return ""; 2441 } 2442 }; 2443 2444 } // end namespace llvm 2445 2446 BoUpSLP::~BoUpSLP() { 2447 for (const auto &Pair : DeletedInstructions) { 2448 // Replace operands of ignored instructions with Undefs in case if they were 2449 // marked for deletion. 2450 if (Pair.getSecond()) { 2451 Value *Undef = UndefValue::get(Pair.getFirst()->getType()); 2452 Pair.getFirst()->replaceAllUsesWith(Undef); 2453 } 2454 Pair.getFirst()->dropAllReferences(); 2455 } 2456 for (const auto &Pair : DeletedInstructions) { 2457 assert(Pair.getFirst()->use_empty() && 2458 "trying to erase instruction with users."); 2459 Pair.getFirst()->eraseFromParent(); 2460 } 2461 assert(!verifyFunction(*F, &dbgs())); 2462 } 2463 2464 void BoUpSLP::eraseInstructions(ArrayRef<Value *> AV) { 2465 for (auto *V : AV) { 2466 if (auto *I = dyn_cast<Instruction>(V)) 2467 eraseInstruction(I, /*ReplaceWithUndef=*/true); 2468 }; 2469 } 2470 2471 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 2472 ArrayRef<Value *> UserIgnoreLst) { 2473 ExtraValueToDebugLocsMap ExternallyUsedValues; 2474 buildTree(Roots, ExternallyUsedValues, UserIgnoreLst); 2475 } 2476 2477 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 2478 ExtraValueToDebugLocsMap &ExternallyUsedValues, 2479 ArrayRef<Value *> UserIgnoreLst) { 2480 deleteTree(); 2481 UserIgnoreList = UserIgnoreLst; 2482 if (!allSameType(Roots)) 2483 return; 2484 buildTree_rec(Roots, 0, EdgeInfo()); 2485 2486 // Collect the values that we need to extract from the tree. 2487 for (auto &TEPtr : VectorizableTree) { 2488 TreeEntry *Entry = TEPtr.get(); 2489 2490 // No need to handle users of gathered values. 2491 if (Entry->State == TreeEntry::NeedToGather) 2492 continue; 2493 2494 // For each lane: 2495 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 2496 Value *Scalar = Entry->Scalars[Lane]; 2497 int FoundLane = Lane; 2498 if (!Entry->ReuseShuffleIndices.empty()) { 2499 FoundLane = 2500 std::distance(Entry->ReuseShuffleIndices.begin(), 2501 llvm::find(Entry->ReuseShuffleIndices, FoundLane)); 2502 } 2503 2504 // Check if the scalar is externally used as an extra arg. 2505 auto ExtI = ExternallyUsedValues.find(Scalar); 2506 if (ExtI != ExternallyUsedValues.end()) { 2507 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " 2508 << Lane << " from " << *Scalar << ".\n"); 2509 ExternalUses.emplace_back(Scalar, nullptr, FoundLane); 2510 } 2511 for (User *U : Scalar->users()) { 2512 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 2513 2514 Instruction *UserInst = dyn_cast<Instruction>(U); 2515 if (!UserInst) 2516 continue; 2517 2518 // Skip in-tree scalars that become vectors 2519 if (TreeEntry *UseEntry = getTreeEntry(U)) { 2520 Value *UseScalar = UseEntry->Scalars[0]; 2521 // Some in-tree scalars will remain as scalar in vectorized 2522 // instructions. If that is the case, the one in Lane 0 will 2523 // be used. 2524 if (UseScalar != U || 2525 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 2526 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 2527 << ".\n"); 2528 assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state"); 2529 continue; 2530 } 2531 } 2532 2533 // Ignore users in the user ignore list. 2534 if (is_contained(UserIgnoreList, UserInst)) 2535 continue; 2536 2537 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " 2538 << Lane << " from " << *Scalar << ".\n"); 2539 ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane)); 2540 } 2541 } 2542 } 2543 } 2544 2545 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 2546 const EdgeInfo &UserTreeIdx) { 2547 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 2548 2549 InstructionsState S = getSameOpcode(VL); 2550 if (Depth == RecursionMaxDepth) { 2551 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 2552 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2553 return; 2554 } 2555 2556 // Don't handle vectors. 2557 if (S.OpValue->getType()->isVectorTy()) { 2558 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 2559 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2560 return; 2561 } 2562 2563 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 2564 if (SI->getValueOperand()->getType()->isVectorTy()) { 2565 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 2566 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2567 return; 2568 } 2569 2570 // If all of the operands are identical or constant we have a simple solution. 2571 if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.getOpcode()) { 2572 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 2573 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2574 return; 2575 } 2576 2577 // We now know that this is a vector of instructions of the same type from 2578 // the same block. 2579 2580 // Don't vectorize ephemeral values. 2581 for (Value *V : VL) { 2582 if (EphValues.count(V)) { 2583 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 2584 << ") is ephemeral.\n"); 2585 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2586 return; 2587 } 2588 } 2589 2590 // Check if this is a duplicate of another entry. 2591 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 2592 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n"); 2593 if (!E->isSame(VL)) { 2594 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 2595 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2596 return; 2597 } 2598 // Record the reuse of the tree node. FIXME, currently this is only used to 2599 // properly draw the graph rather than for the actual vectorization. 2600 E->UserTreeIndices.push_back(UserTreeIdx); 2601 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue 2602 << ".\n"); 2603 return; 2604 } 2605 2606 // Check that none of the instructions in the bundle are already in the tree. 2607 for (Value *V : VL) { 2608 auto *I = dyn_cast<Instruction>(V); 2609 if (!I) 2610 continue; 2611 if (getTreeEntry(I)) { 2612 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 2613 << ") is already in tree.\n"); 2614 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2615 return; 2616 } 2617 } 2618 2619 // If any of the scalars is marked as a value that needs to stay scalar, then 2620 // we need to gather the scalars. 2621 // The reduction nodes (stored in UserIgnoreList) also should stay scalar. 2622 for (Value *V : VL) { 2623 if (MustGather.count(V) || is_contained(UserIgnoreList, V)) { 2624 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 2625 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2626 return; 2627 } 2628 } 2629 2630 // Check that all of the users of the scalars that we want to vectorize are 2631 // schedulable. 2632 auto *VL0 = cast<Instruction>(S.OpValue); 2633 BasicBlock *BB = VL0->getParent(); 2634 2635 if (!DT->isReachableFromEntry(BB)) { 2636 // Don't go into unreachable blocks. They may contain instructions with 2637 // dependency cycles which confuse the final scheduling. 2638 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 2639 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2640 return; 2641 } 2642 2643 // Check that every instruction appears once in this bundle. 2644 SmallVector<unsigned, 4> ReuseShuffleIndicies; 2645 SmallVector<Value *, 4> UniqueValues; 2646 DenseMap<Value *, unsigned> UniquePositions; 2647 for (Value *V : VL) { 2648 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 2649 ReuseShuffleIndicies.emplace_back(Res.first->second); 2650 if (Res.second) 2651 UniqueValues.emplace_back(V); 2652 } 2653 size_t NumUniqueScalarValues = UniqueValues.size(); 2654 if (NumUniqueScalarValues == VL.size()) { 2655 ReuseShuffleIndicies.clear(); 2656 } else { 2657 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n"); 2658 if (NumUniqueScalarValues <= 1 || 2659 !llvm::isPowerOf2_32(NumUniqueScalarValues)) { 2660 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 2661 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2662 return; 2663 } 2664 VL = UniqueValues; 2665 } 2666 2667 auto &BSRef = BlocksSchedules[BB]; 2668 if (!BSRef) 2669 BSRef = std::make_unique<BlockScheduling>(BB); 2670 2671 BlockScheduling &BS = *BSRef.get(); 2672 2673 Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S); 2674 if (!Bundle) { 2675 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 2676 assert((!BS.getScheduleData(VL0) || 2677 !BS.getScheduleData(VL0)->isPartOfBundle()) && 2678 "tryScheduleBundle should cancelScheduling on failure"); 2679 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 2680 ReuseShuffleIndicies); 2681 return; 2682 } 2683 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 2684 2685 unsigned ShuffleOrOp = S.isAltShuffle() ? 2686 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 2687 switch (ShuffleOrOp) { 2688 case Instruction::PHI: { 2689 auto *PH = cast<PHINode>(VL0); 2690 2691 // Check for terminator values (e.g. invoke). 2692 for (unsigned j = 0; j < VL.size(); ++j) 2693 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 2694 Instruction *Term = dyn_cast<Instruction>( 2695 cast<PHINode>(VL[j])->getIncomingValueForBlock( 2696 PH->getIncomingBlock(i))); 2697 if (Term && Term->isTerminator()) { 2698 LLVM_DEBUG(dbgs() 2699 << "SLP: Need to swizzle PHINodes (terminator use).\n"); 2700 BS.cancelScheduling(VL, VL0); 2701 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 2702 ReuseShuffleIndicies); 2703 return; 2704 } 2705 } 2706 2707 TreeEntry *TE = 2708 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies); 2709 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 2710 2711 // Keeps the reordered operands to avoid code duplication. 2712 SmallVector<ValueList, 2> OperandsVec; 2713 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 2714 ValueList Operands; 2715 // Prepare the operand vector. 2716 for (Value *j : VL) 2717 Operands.push_back(cast<PHINode>(j)->getIncomingValueForBlock( 2718 PH->getIncomingBlock(i))); 2719 TE->setOperand(i, Operands); 2720 OperandsVec.push_back(Operands); 2721 } 2722 for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx) 2723 buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx}); 2724 return; 2725 } 2726 case Instruction::ExtractValue: 2727 case Instruction::ExtractElement: { 2728 OrdersType CurrentOrder; 2729 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder); 2730 if (Reuse) { 2731 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n"); 2732 ++NumOpsWantToKeepOriginalOrder; 2733 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 2734 ReuseShuffleIndicies); 2735 // This is a special case, as it does not gather, but at the same time 2736 // we are not extending buildTree_rec() towards the operands. 2737 ValueList Op0; 2738 Op0.assign(VL.size(), VL0->getOperand(0)); 2739 VectorizableTree.back()->setOperand(0, Op0); 2740 return; 2741 } 2742 if (!CurrentOrder.empty()) { 2743 LLVM_DEBUG({ 2744 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " 2745 "with order"; 2746 for (unsigned Idx : CurrentOrder) 2747 dbgs() << " " << Idx; 2748 dbgs() << "\n"; 2749 }); 2750 // Insert new order with initial value 0, if it does not exist, 2751 // otherwise return the iterator to the existing one. 2752 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 2753 ReuseShuffleIndicies, CurrentOrder); 2754 findRootOrder(CurrentOrder); 2755 ++NumOpsWantToKeepOrder[CurrentOrder]; 2756 // This is a special case, as it does not gather, but at the same time 2757 // we are not extending buildTree_rec() towards the operands. 2758 ValueList Op0; 2759 Op0.assign(VL.size(), VL0->getOperand(0)); 2760 VectorizableTree.back()->setOperand(0, Op0); 2761 return; 2762 } 2763 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n"); 2764 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 2765 ReuseShuffleIndicies); 2766 BS.cancelScheduling(VL, VL0); 2767 return; 2768 } 2769 case Instruction::Load: { 2770 // Check that a vectorized load would load the same memory as a scalar 2771 // load. For example, we don't want to vectorize loads that are smaller 2772 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 2773 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 2774 // from such a struct, we read/write packed bits disagreeing with the 2775 // unvectorized version. 2776 Type *ScalarTy = VL0->getType(); 2777 2778 if (DL->getTypeSizeInBits(ScalarTy) != 2779 DL->getTypeAllocSizeInBits(ScalarTy)) { 2780 BS.cancelScheduling(VL, VL0); 2781 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 2782 ReuseShuffleIndicies); 2783 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 2784 return; 2785 } 2786 2787 // Make sure all loads in the bundle are simple - we can't vectorize 2788 // atomic or volatile loads. 2789 SmallVector<Value *, 4> PointerOps(VL.size()); 2790 auto POIter = PointerOps.begin(); 2791 for (Value *V : VL) { 2792 auto *L = cast<LoadInst>(V); 2793 if (!L->isSimple()) { 2794 BS.cancelScheduling(VL, VL0); 2795 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 2796 ReuseShuffleIndicies); 2797 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 2798 return; 2799 } 2800 *POIter = L->getPointerOperand(); 2801 ++POIter; 2802 } 2803 2804 OrdersType CurrentOrder; 2805 // Check the order of pointer operands. 2806 if (llvm::sortPtrAccesses(PointerOps, *DL, *SE, CurrentOrder)) { 2807 Value *Ptr0; 2808 Value *PtrN; 2809 if (CurrentOrder.empty()) { 2810 Ptr0 = PointerOps.front(); 2811 PtrN = PointerOps.back(); 2812 } else { 2813 Ptr0 = PointerOps[CurrentOrder.front()]; 2814 PtrN = PointerOps[CurrentOrder.back()]; 2815 } 2816 const SCEV *Scev0 = SE->getSCEV(Ptr0); 2817 const SCEV *ScevN = SE->getSCEV(PtrN); 2818 const auto *Diff = 2819 dyn_cast<SCEVConstant>(SE->getMinusSCEV(ScevN, Scev0)); 2820 uint64_t Size = DL->getTypeAllocSize(ScalarTy); 2821 // Check that the sorted loads are consecutive. 2822 if (Diff && Diff->getAPInt() == (VL.size() - 1) * Size) { 2823 if (CurrentOrder.empty()) { 2824 // Original loads are consecutive and does not require reordering. 2825 ++NumOpsWantToKeepOriginalOrder; 2826 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, 2827 UserTreeIdx, ReuseShuffleIndicies); 2828 TE->setOperandsInOrder(); 2829 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 2830 } else { 2831 // Need to reorder. 2832 TreeEntry *TE = 2833 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 2834 ReuseShuffleIndicies, CurrentOrder); 2835 TE->setOperandsInOrder(); 2836 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n"); 2837 findRootOrder(CurrentOrder); 2838 ++NumOpsWantToKeepOrder[CurrentOrder]; 2839 } 2840 return; 2841 } 2842 } 2843 2844 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 2845 BS.cancelScheduling(VL, VL0); 2846 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 2847 ReuseShuffleIndicies); 2848 return; 2849 } 2850 case Instruction::ZExt: 2851 case Instruction::SExt: 2852 case Instruction::FPToUI: 2853 case Instruction::FPToSI: 2854 case Instruction::FPExt: 2855 case Instruction::PtrToInt: 2856 case Instruction::IntToPtr: 2857 case Instruction::SIToFP: 2858 case Instruction::UIToFP: 2859 case Instruction::Trunc: 2860 case Instruction::FPTrunc: 2861 case Instruction::BitCast: { 2862 Type *SrcTy = VL0->getOperand(0)->getType(); 2863 for (Value *V : VL) { 2864 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType(); 2865 if (Ty != SrcTy || !isValidElementType(Ty)) { 2866 BS.cancelScheduling(VL, VL0); 2867 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 2868 ReuseShuffleIndicies); 2869 LLVM_DEBUG(dbgs() 2870 << "SLP: Gathering casts with different src types.\n"); 2871 return; 2872 } 2873 } 2874 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 2875 ReuseShuffleIndicies); 2876 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 2877 2878 TE->setOperandsInOrder(); 2879 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 2880 ValueList Operands; 2881 // Prepare the operand vector. 2882 for (Value *V : VL) 2883 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 2884 2885 buildTree_rec(Operands, Depth + 1, {TE, i}); 2886 } 2887 return; 2888 } 2889 case Instruction::ICmp: 2890 case Instruction::FCmp: { 2891 // Check that all of the compares have the same predicate. 2892 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 2893 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0); 2894 Type *ComparedTy = VL0->getOperand(0)->getType(); 2895 for (Value *V : VL) { 2896 CmpInst *Cmp = cast<CmpInst>(V); 2897 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) || 2898 Cmp->getOperand(0)->getType() != ComparedTy) { 2899 BS.cancelScheduling(VL, VL0); 2900 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 2901 ReuseShuffleIndicies); 2902 LLVM_DEBUG(dbgs() 2903 << "SLP: Gathering cmp with different predicate.\n"); 2904 return; 2905 } 2906 } 2907 2908 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 2909 ReuseShuffleIndicies); 2910 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 2911 2912 ValueList Left, Right; 2913 if (cast<CmpInst>(VL0)->isCommutative()) { 2914 // Commutative predicate - collect + sort operands of the instructions 2915 // so that each side is more likely to have the same opcode. 2916 assert(P0 == SwapP0 && "Commutative Predicate mismatch"); 2917 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 2918 } else { 2919 // Collect operands - commute if it uses the swapped predicate. 2920 for (Value *V : VL) { 2921 auto *Cmp = cast<CmpInst>(V); 2922 Value *LHS = Cmp->getOperand(0); 2923 Value *RHS = Cmp->getOperand(1); 2924 if (Cmp->getPredicate() != P0) 2925 std::swap(LHS, RHS); 2926 Left.push_back(LHS); 2927 Right.push_back(RHS); 2928 } 2929 } 2930 TE->setOperand(0, Left); 2931 TE->setOperand(1, Right); 2932 buildTree_rec(Left, Depth + 1, {TE, 0}); 2933 buildTree_rec(Right, Depth + 1, {TE, 1}); 2934 return; 2935 } 2936 case Instruction::Select: 2937 case Instruction::FNeg: 2938 case Instruction::Add: 2939 case Instruction::FAdd: 2940 case Instruction::Sub: 2941 case Instruction::FSub: 2942 case Instruction::Mul: 2943 case Instruction::FMul: 2944 case Instruction::UDiv: 2945 case Instruction::SDiv: 2946 case Instruction::FDiv: 2947 case Instruction::URem: 2948 case Instruction::SRem: 2949 case Instruction::FRem: 2950 case Instruction::Shl: 2951 case Instruction::LShr: 2952 case Instruction::AShr: 2953 case Instruction::And: 2954 case Instruction::Or: 2955 case Instruction::Xor: { 2956 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 2957 ReuseShuffleIndicies); 2958 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n"); 2959 2960 // Sort operands of the instructions so that each side is more likely to 2961 // have the same opcode. 2962 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 2963 ValueList Left, Right; 2964 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 2965 TE->setOperand(0, Left); 2966 TE->setOperand(1, Right); 2967 buildTree_rec(Left, Depth + 1, {TE, 0}); 2968 buildTree_rec(Right, Depth + 1, {TE, 1}); 2969 return; 2970 } 2971 2972 TE->setOperandsInOrder(); 2973 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 2974 ValueList Operands; 2975 // Prepare the operand vector. 2976 for (Value *j : VL) 2977 Operands.push_back(cast<Instruction>(j)->getOperand(i)); 2978 2979 buildTree_rec(Operands, Depth + 1, {TE, i}); 2980 } 2981 return; 2982 } 2983 case Instruction::GetElementPtr: { 2984 // We don't combine GEPs with complicated (nested) indexing. 2985 for (Value *V : VL) { 2986 if (cast<Instruction>(V)->getNumOperands() != 2) { 2987 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 2988 BS.cancelScheduling(VL, VL0); 2989 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 2990 ReuseShuffleIndicies); 2991 return; 2992 } 2993 } 2994 2995 // We can't combine several GEPs into one vector if they operate on 2996 // different types. 2997 Type *Ty0 = VL0->getOperand(0)->getType(); 2998 for (Value *V : VL) { 2999 Type *CurTy = cast<Instruction>(V)->getOperand(0)->getType(); 3000 if (Ty0 != CurTy) { 3001 LLVM_DEBUG(dbgs() 3002 << "SLP: not-vectorizable GEP (different types).\n"); 3003 BS.cancelScheduling(VL, VL0); 3004 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3005 ReuseShuffleIndicies); 3006 return; 3007 } 3008 } 3009 3010 // We don't combine GEPs with non-constant indexes. 3011 Type *Ty1 = VL0->getOperand(1)->getType(); 3012 for (Value *V : VL) { 3013 auto Op = cast<Instruction>(V)->getOperand(1); 3014 if (!isa<ConstantInt>(Op) || 3015 (Op->getType() != Ty1 && 3016 Op->getType()->getScalarSizeInBits() > 3017 DL->getIndexSizeInBits( 3018 V->getType()->getPointerAddressSpace()))) { 3019 LLVM_DEBUG(dbgs() 3020 << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 3021 BS.cancelScheduling(VL, VL0); 3022 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3023 ReuseShuffleIndicies); 3024 return; 3025 } 3026 } 3027 3028 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3029 ReuseShuffleIndicies); 3030 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 3031 TE->setOperandsInOrder(); 3032 for (unsigned i = 0, e = 2; i < e; ++i) { 3033 ValueList Operands; 3034 // Prepare the operand vector. 3035 for (Value *V : VL) 3036 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3037 3038 buildTree_rec(Operands, Depth + 1, {TE, i}); 3039 } 3040 return; 3041 } 3042 case Instruction::Store: { 3043 // Check if the stores are consecutive or if we need to swizzle them. 3044 llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType(); 3045 // Make sure all stores in the bundle are simple - we can't vectorize 3046 // atomic or volatile stores. 3047 SmallVector<Value *, 4> PointerOps(VL.size()); 3048 ValueList Operands(VL.size()); 3049 auto POIter = PointerOps.begin(); 3050 auto OIter = Operands.begin(); 3051 for (Value *V : VL) { 3052 auto *SI = cast<StoreInst>(V); 3053 if (!SI->isSimple()) { 3054 BS.cancelScheduling(VL, VL0); 3055 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3056 ReuseShuffleIndicies); 3057 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n"); 3058 return; 3059 } 3060 *POIter = SI->getPointerOperand(); 3061 *OIter = SI->getValueOperand(); 3062 ++POIter; 3063 ++OIter; 3064 } 3065 3066 OrdersType CurrentOrder; 3067 // Check the order of pointer operands. 3068 if (llvm::sortPtrAccesses(PointerOps, *DL, *SE, CurrentOrder)) { 3069 Value *Ptr0; 3070 Value *PtrN; 3071 if (CurrentOrder.empty()) { 3072 Ptr0 = PointerOps.front(); 3073 PtrN = PointerOps.back(); 3074 } else { 3075 Ptr0 = PointerOps[CurrentOrder.front()]; 3076 PtrN = PointerOps[CurrentOrder.back()]; 3077 } 3078 const SCEV *Scev0 = SE->getSCEV(Ptr0); 3079 const SCEV *ScevN = SE->getSCEV(PtrN); 3080 const auto *Diff = 3081 dyn_cast<SCEVConstant>(SE->getMinusSCEV(ScevN, Scev0)); 3082 uint64_t Size = DL->getTypeAllocSize(ScalarTy); 3083 // Check that the sorted pointer operands are consecutive. 3084 if (Diff && Diff->getAPInt() == (VL.size() - 1) * Size) { 3085 if (CurrentOrder.empty()) { 3086 // Original stores are consecutive and does not require reordering. 3087 ++NumOpsWantToKeepOriginalOrder; 3088 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, 3089 UserTreeIdx, ReuseShuffleIndicies); 3090 TE->setOperandsInOrder(); 3091 buildTree_rec(Operands, Depth + 1, {TE, 0}); 3092 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 3093 } else { 3094 TreeEntry *TE = 3095 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3096 ReuseShuffleIndicies, CurrentOrder); 3097 TE->setOperandsInOrder(); 3098 buildTree_rec(Operands, Depth + 1, {TE, 0}); 3099 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n"); 3100 findRootOrder(CurrentOrder); 3101 ++NumOpsWantToKeepOrder[CurrentOrder]; 3102 } 3103 return; 3104 } 3105 } 3106 3107 BS.cancelScheduling(VL, VL0); 3108 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3109 ReuseShuffleIndicies); 3110 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 3111 return; 3112 } 3113 case Instruction::Call: { 3114 // Check if the calls are all to the same vectorizable intrinsic or 3115 // library function. 3116 CallInst *CI = cast<CallInst>(VL0); 3117 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3118 3119 VFShape Shape = VFShape::get( 3120 *CI, ElementCount::getFixed(static_cast<unsigned int>(VL.size())), 3121 false /*HasGlobalPred*/); 3122 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3123 3124 if (!VecFunc && !isTriviallyVectorizable(ID)) { 3125 BS.cancelScheduling(VL, VL0); 3126 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3127 ReuseShuffleIndicies); 3128 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 3129 return; 3130 } 3131 Function *F = CI->getCalledFunction(); 3132 unsigned NumArgs = CI->getNumArgOperands(); 3133 SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr); 3134 for (unsigned j = 0; j != NumArgs; ++j) 3135 if (hasVectorInstrinsicScalarOpd(ID, j)) 3136 ScalarArgs[j] = CI->getArgOperand(j); 3137 for (Value *V : VL) { 3138 CallInst *CI2 = dyn_cast<CallInst>(V); 3139 if (!CI2 || CI2->getCalledFunction() != F || 3140 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 3141 (VecFunc && 3142 VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) || 3143 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 3144 BS.cancelScheduling(VL, VL0); 3145 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3146 ReuseShuffleIndicies); 3147 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V 3148 << "\n"); 3149 return; 3150 } 3151 // Some intrinsics have scalar arguments and should be same in order for 3152 // them to be vectorized. 3153 for (unsigned j = 0; j != NumArgs; ++j) { 3154 if (hasVectorInstrinsicScalarOpd(ID, j)) { 3155 Value *A1J = CI2->getArgOperand(j); 3156 if (ScalarArgs[j] != A1J) { 3157 BS.cancelScheduling(VL, VL0); 3158 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3159 ReuseShuffleIndicies); 3160 LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 3161 << " argument " << ScalarArgs[j] << "!=" << A1J 3162 << "\n"); 3163 return; 3164 } 3165 } 3166 } 3167 // Verify that the bundle operands are identical between the two calls. 3168 if (CI->hasOperandBundles() && 3169 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 3170 CI->op_begin() + CI->getBundleOperandsEndIndex(), 3171 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 3172 BS.cancelScheduling(VL, VL0); 3173 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3174 ReuseShuffleIndicies); 3175 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" 3176 << *CI << "!=" << *V << '\n'); 3177 return; 3178 } 3179 } 3180 3181 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3182 ReuseShuffleIndicies); 3183 TE->setOperandsInOrder(); 3184 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 3185 ValueList Operands; 3186 // Prepare the operand vector. 3187 for (Value *V : VL) { 3188 auto *CI2 = cast<CallInst>(V); 3189 Operands.push_back(CI2->getArgOperand(i)); 3190 } 3191 buildTree_rec(Operands, Depth + 1, {TE, i}); 3192 } 3193 return; 3194 } 3195 case Instruction::ShuffleVector: { 3196 // If this is not an alternate sequence of opcode like add-sub 3197 // then do not vectorize this instruction. 3198 if (!S.isAltShuffle()) { 3199 BS.cancelScheduling(VL, VL0); 3200 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3201 ReuseShuffleIndicies); 3202 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 3203 return; 3204 } 3205 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3206 ReuseShuffleIndicies); 3207 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 3208 3209 // Reorder operands if reordering would enable vectorization. 3210 if (isa<BinaryOperator>(VL0)) { 3211 ValueList Left, Right; 3212 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 3213 TE->setOperand(0, Left); 3214 TE->setOperand(1, Right); 3215 buildTree_rec(Left, Depth + 1, {TE, 0}); 3216 buildTree_rec(Right, Depth + 1, {TE, 1}); 3217 return; 3218 } 3219 3220 TE->setOperandsInOrder(); 3221 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 3222 ValueList Operands; 3223 // Prepare the operand vector. 3224 for (Value *V : VL) 3225 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3226 3227 buildTree_rec(Operands, Depth + 1, {TE, i}); 3228 } 3229 return; 3230 } 3231 default: 3232 BS.cancelScheduling(VL, VL0); 3233 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3234 ReuseShuffleIndicies); 3235 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 3236 return; 3237 } 3238 } 3239 3240 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { 3241 unsigned N = 1; 3242 Type *EltTy = T; 3243 3244 while (isa<StructType>(EltTy) || isa<ArrayType>(EltTy) || 3245 isa<VectorType>(EltTy)) { 3246 if (auto *ST = dyn_cast<StructType>(EltTy)) { 3247 // Check that struct is homogeneous. 3248 for (const auto *Ty : ST->elements()) 3249 if (Ty != *ST->element_begin()) 3250 return 0; 3251 N *= ST->getNumElements(); 3252 EltTy = *ST->element_begin(); 3253 } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) { 3254 N *= AT->getNumElements(); 3255 EltTy = AT->getElementType(); 3256 } else { 3257 auto *VT = cast<FixedVectorType>(EltTy); 3258 N *= VT->getNumElements(); 3259 EltTy = VT->getElementType(); 3260 } 3261 } 3262 3263 if (!isValidElementType(EltTy)) 3264 return 0; 3265 uint64_t VTSize = DL.getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N)); 3266 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) 3267 return 0; 3268 return N; 3269 } 3270 3271 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 3272 SmallVectorImpl<unsigned> &CurrentOrder) const { 3273 Instruction *E0 = cast<Instruction>(OpValue); 3274 assert(E0->getOpcode() == Instruction::ExtractElement || 3275 E0->getOpcode() == Instruction::ExtractValue); 3276 assert(E0->getOpcode() == getSameOpcode(VL).getOpcode() && "Invalid opcode"); 3277 // Check if all of the extracts come from the same vector and from the 3278 // correct offset. 3279 Value *Vec = E0->getOperand(0); 3280 3281 CurrentOrder.clear(); 3282 3283 // We have to extract from a vector/aggregate with the same number of elements. 3284 unsigned NElts; 3285 if (E0->getOpcode() == Instruction::ExtractValue) { 3286 const DataLayout &DL = E0->getModule()->getDataLayout(); 3287 NElts = canMapToVector(Vec->getType(), DL); 3288 if (!NElts) 3289 return false; 3290 // Check if load can be rewritten as load of vector. 3291 LoadInst *LI = dyn_cast<LoadInst>(Vec); 3292 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 3293 return false; 3294 } else { 3295 NElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); 3296 } 3297 3298 if (NElts != VL.size()) 3299 return false; 3300 3301 // Check that all of the indices extract from the correct offset. 3302 bool ShouldKeepOrder = true; 3303 unsigned E = VL.size(); 3304 // Assign to all items the initial value E + 1 so we can check if the extract 3305 // instruction index was used already. 3306 // Also, later we can check that all the indices are used and we have a 3307 // consecutive access in the extract instructions, by checking that no 3308 // element of CurrentOrder still has value E + 1. 3309 CurrentOrder.assign(E, E + 1); 3310 unsigned I = 0; 3311 for (; I < E; ++I) { 3312 auto *Inst = cast<Instruction>(VL[I]); 3313 if (Inst->getOperand(0) != Vec) 3314 break; 3315 Optional<unsigned> Idx = getExtractIndex(Inst); 3316 if (!Idx) 3317 break; 3318 const unsigned ExtIdx = *Idx; 3319 if (ExtIdx != I) { 3320 if (ExtIdx >= E || CurrentOrder[ExtIdx] != E + 1) 3321 break; 3322 ShouldKeepOrder = false; 3323 CurrentOrder[ExtIdx] = I; 3324 } else { 3325 if (CurrentOrder[I] != E + 1) 3326 break; 3327 CurrentOrder[I] = I; 3328 } 3329 } 3330 if (I < E) { 3331 CurrentOrder.clear(); 3332 return false; 3333 } 3334 3335 return ShouldKeepOrder; 3336 } 3337 3338 bool BoUpSLP::areAllUsersVectorized(Instruction *I) const { 3339 return I->hasOneUse() || 3340 std::all_of(I->user_begin(), I->user_end(), [this](User *U) { 3341 return ScalarToTreeEntry.count(U) > 0; 3342 }); 3343 } 3344 3345 static std::pair<unsigned, unsigned> 3346 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy, 3347 TargetTransformInfo *TTI, TargetLibraryInfo *TLI) { 3348 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3349 3350 // Calculate the cost of the scalar and vector calls. 3351 IntrinsicCostAttributes CostAttrs(ID, *CI, VecTy->getNumElements()); 3352 int IntrinsicCost = 3353 TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput); 3354 3355 auto Shape = VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>( 3356 VecTy->getNumElements())), 3357 false /*HasGlobalPred*/); 3358 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3359 int LibCost = IntrinsicCost; 3360 if (!CI->isNoBuiltin() && VecFunc) { 3361 // Calculate the cost of the vector library call. 3362 SmallVector<Type *, 4> VecTys; 3363 for (Use &Arg : CI->args()) 3364 VecTys.push_back( 3365 FixedVectorType::get(Arg->getType(), VecTy->getNumElements())); 3366 3367 // If the corresponding vector call is cheaper, return its cost. 3368 LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys, 3369 TTI::TCK_RecipThroughput); 3370 } 3371 return {IntrinsicCost, LibCost}; 3372 } 3373 3374 int BoUpSLP::getEntryCost(TreeEntry *E) { 3375 ArrayRef<Value*> VL = E->Scalars; 3376 3377 Type *ScalarTy = VL[0]->getType(); 3378 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 3379 ScalarTy = SI->getValueOperand()->getType(); 3380 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0])) 3381 ScalarTy = CI->getOperand(0)->getType(); 3382 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 3383 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 3384 3385 // If we have computed a smaller type for the expression, update VecTy so 3386 // that the costs will be accurate. 3387 if (MinBWs.count(VL[0])) 3388 VecTy = FixedVectorType::get( 3389 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size()); 3390 3391 unsigned ReuseShuffleNumbers = E->ReuseShuffleIndices.size(); 3392 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 3393 int ReuseShuffleCost = 0; 3394 if (NeedToShuffleReuses) { 3395 ReuseShuffleCost = 3396 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy); 3397 } 3398 if (E->State == TreeEntry::NeedToGather) { 3399 if (allConstant(VL)) 3400 return 0; 3401 if (isSplat(VL)) { 3402 return ReuseShuffleCost + 3403 TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0); 3404 } 3405 if (E->getOpcode() == Instruction::ExtractElement && 3406 allSameType(VL) && allSameBlock(VL)) { 3407 Optional<TargetTransformInfo::ShuffleKind> ShuffleKind = isShuffle(VL); 3408 if (ShuffleKind.hasValue()) { 3409 int Cost = TTI->getShuffleCost(ShuffleKind.getValue(), VecTy); 3410 for (auto *V : VL) { 3411 // If all users of instruction are going to be vectorized and this 3412 // instruction itself is not going to be vectorized, consider this 3413 // instruction as dead and remove its cost from the final cost of the 3414 // vectorized tree. 3415 if (areAllUsersVectorized(cast<Instruction>(V)) && 3416 !ScalarToTreeEntry.count(V)) { 3417 auto *IO = cast<ConstantInt>( 3418 cast<ExtractElementInst>(V)->getIndexOperand()); 3419 Cost -= TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 3420 IO->getZExtValue()); 3421 } 3422 } 3423 return ReuseShuffleCost + Cost; 3424 } 3425 } 3426 return ReuseShuffleCost + getGatherCost(VL); 3427 } 3428 assert(E->State == TreeEntry::Vectorize && "Unhandled state"); 3429 assert(E->getOpcode() && allSameType(VL) && allSameBlock(VL) && "Invalid VL"); 3430 Instruction *VL0 = E->getMainOp(); 3431 unsigned ShuffleOrOp = 3432 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 3433 switch (ShuffleOrOp) { 3434 case Instruction::PHI: 3435 return 0; 3436 3437 case Instruction::ExtractValue: 3438 case Instruction::ExtractElement: { 3439 if (NeedToShuffleReuses) { 3440 unsigned Idx = 0; 3441 for (unsigned I : E->ReuseShuffleIndices) { 3442 if (ShuffleOrOp == Instruction::ExtractElement) { 3443 auto *IO = cast<ConstantInt>( 3444 cast<ExtractElementInst>(VL[I])->getIndexOperand()); 3445 Idx = IO->getZExtValue(); 3446 ReuseShuffleCost -= TTI->getVectorInstrCost( 3447 Instruction::ExtractElement, VecTy, Idx); 3448 } else { 3449 ReuseShuffleCost -= TTI->getVectorInstrCost( 3450 Instruction::ExtractElement, VecTy, Idx); 3451 ++Idx; 3452 } 3453 } 3454 Idx = ReuseShuffleNumbers; 3455 for (Value *V : VL) { 3456 if (ShuffleOrOp == Instruction::ExtractElement) { 3457 auto *IO = cast<ConstantInt>( 3458 cast<ExtractElementInst>(V)->getIndexOperand()); 3459 Idx = IO->getZExtValue(); 3460 } else { 3461 --Idx; 3462 } 3463 ReuseShuffleCost += 3464 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, Idx); 3465 } 3466 } 3467 int DeadCost = ReuseShuffleCost; 3468 if (!E->ReorderIndices.empty()) { 3469 // TODO: Merge this shuffle with the ReuseShuffleCost. 3470 DeadCost += TTI->getShuffleCost( 3471 TargetTransformInfo::SK_PermuteSingleSrc, VecTy); 3472 } 3473 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 3474 Instruction *E = cast<Instruction>(VL[i]); 3475 // If all users are going to be vectorized, instruction can be 3476 // considered as dead. 3477 // The same, if have only one user, it will be vectorized for sure. 3478 if (areAllUsersVectorized(E)) { 3479 // Take credit for instruction that will become dead. 3480 if (E->hasOneUse()) { 3481 Instruction *Ext = E->user_back(); 3482 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 3483 all_of(Ext->users(), 3484 [](User *U) { return isa<GetElementPtrInst>(U); })) { 3485 // Use getExtractWithExtendCost() to calculate the cost of 3486 // extractelement/ext pair. 3487 DeadCost -= TTI->getExtractWithExtendCost( 3488 Ext->getOpcode(), Ext->getType(), VecTy, i); 3489 // Add back the cost of s|zext which is subtracted separately. 3490 DeadCost += TTI->getCastInstrCost( 3491 Ext->getOpcode(), Ext->getType(), E->getType(), 3492 TTI::getCastContextHint(Ext), CostKind, Ext); 3493 continue; 3494 } 3495 } 3496 DeadCost -= 3497 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i); 3498 } 3499 } 3500 return DeadCost; 3501 } 3502 case Instruction::ZExt: 3503 case Instruction::SExt: 3504 case Instruction::FPToUI: 3505 case Instruction::FPToSI: 3506 case Instruction::FPExt: 3507 case Instruction::PtrToInt: 3508 case Instruction::IntToPtr: 3509 case Instruction::SIToFP: 3510 case Instruction::UIToFP: 3511 case Instruction::Trunc: 3512 case Instruction::FPTrunc: 3513 case Instruction::BitCast: { 3514 Type *SrcTy = VL0->getOperand(0)->getType(); 3515 int ScalarEltCost = 3516 TTI->getCastInstrCost(E->getOpcode(), ScalarTy, SrcTy, 3517 TTI::getCastContextHint(VL0), CostKind, VL0); 3518 if (NeedToShuffleReuses) { 3519 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 3520 } 3521 3522 // Calculate the cost of this instruction. 3523 int ScalarCost = VL.size() * ScalarEltCost; 3524 3525 auto *SrcVecTy = FixedVectorType::get(SrcTy, VL.size()); 3526 int VecCost = 0; 3527 // Check if the values are candidates to demote. 3528 if (!MinBWs.count(VL0) || VecTy != SrcVecTy) { 3529 VecCost = 3530 ReuseShuffleCost + 3531 TTI->getCastInstrCost(E->getOpcode(), VecTy, SrcVecTy, 3532 TTI::getCastContextHint(VL0), CostKind, VL0); 3533 } 3534 return VecCost - ScalarCost; 3535 } 3536 case Instruction::FCmp: 3537 case Instruction::ICmp: 3538 case Instruction::Select: { 3539 // Calculate the cost of this instruction. 3540 int ScalarEltCost = TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, 3541 Builder.getInt1Ty(), 3542 CostKind, VL0); 3543 if (NeedToShuffleReuses) { 3544 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 3545 } 3546 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); 3547 int ScalarCost = VecTy->getNumElements() * ScalarEltCost; 3548 int VecCost = TTI->getCmpSelInstrCost(E->getOpcode(), VecTy, MaskTy, 3549 CostKind, VL0); 3550 return ReuseShuffleCost + VecCost - ScalarCost; 3551 } 3552 case Instruction::FNeg: 3553 case Instruction::Add: 3554 case Instruction::FAdd: 3555 case Instruction::Sub: 3556 case Instruction::FSub: 3557 case Instruction::Mul: 3558 case Instruction::FMul: 3559 case Instruction::UDiv: 3560 case Instruction::SDiv: 3561 case Instruction::FDiv: 3562 case Instruction::URem: 3563 case Instruction::SRem: 3564 case Instruction::FRem: 3565 case Instruction::Shl: 3566 case Instruction::LShr: 3567 case Instruction::AShr: 3568 case Instruction::And: 3569 case Instruction::Or: 3570 case Instruction::Xor: { 3571 // Certain instructions can be cheaper to vectorize if they have a 3572 // constant second vector operand. 3573 TargetTransformInfo::OperandValueKind Op1VK = 3574 TargetTransformInfo::OK_AnyValue; 3575 TargetTransformInfo::OperandValueKind Op2VK = 3576 TargetTransformInfo::OK_UniformConstantValue; 3577 TargetTransformInfo::OperandValueProperties Op1VP = 3578 TargetTransformInfo::OP_None; 3579 TargetTransformInfo::OperandValueProperties Op2VP = 3580 TargetTransformInfo::OP_PowerOf2; 3581 3582 // If all operands are exactly the same ConstantInt then set the 3583 // operand kind to OK_UniformConstantValue. 3584 // If instead not all operands are constants, then set the operand kind 3585 // to OK_AnyValue. If all operands are constants but not the same, 3586 // then set the operand kind to OK_NonUniformConstantValue. 3587 ConstantInt *CInt0 = nullptr; 3588 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 3589 const Instruction *I = cast<Instruction>(VL[i]); 3590 unsigned OpIdx = isa<BinaryOperator>(I) ? 1 : 0; 3591 ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(OpIdx)); 3592 if (!CInt) { 3593 Op2VK = TargetTransformInfo::OK_AnyValue; 3594 Op2VP = TargetTransformInfo::OP_None; 3595 break; 3596 } 3597 if (Op2VP == TargetTransformInfo::OP_PowerOf2 && 3598 !CInt->getValue().isPowerOf2()) 3599 Op2VP = TargetTransformInfo::OP_None; 3600 if (i == 0) { 3601 CInt0 = CInt; 3602 continue; 3603 } 3604 if (CInt0 != CInt) 3605 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 3606 } 3607 3608 SmallVector<const Value *, 4> Operands(VL0->operand_values()); 3609 int ScalarEltCost = TTI->getArithmeticInstrCost( 3610 E->getOpcode(), ScalarTy, CostKind, Op1VK, Op2VK, Op1VP, Op2VP, 3611 Operands, VL0); 3612 if (NeedToShuffleReuses) { 3613 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 3614 } 3615 int ScalarCost = VecTy->getNumElements() * ScalarEltCost; 3616 int VecCost = TTI->getArithmeticInstrCost( 3617 E->getOpcode(), VecTy, CostKind, Op1VK, Op2VK, Op1VP, Op2VP, 3618 Operands, VL0); 3619 return ReuseShuffleCost + VecCost - ScalarCost; 3620 } 3621 case Instruction::GetElementPtr: { 3622 TargetTransformInfo::OperandValueKind Op1VK = 3623 TargetTransformInfo::OK_AnyValue; 3624 TargetTransformInfo::OperandValueKind Op2VK = 3625 TargetTransformInfo::OK_UniformConstantValue; 3626 3627 int ScalarEltCost = 3628 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, CostKind, 3629 Op1VK, Op2VK); 3630 if (NeedToShuffleReuses) { 3631 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 3632 } 3633 int ScalarCost = VecTy->getNumElements() * ScalarEltCost; 3634 int VecCost = 3635 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, CostKind, 3636 Op1VK, Op2VK); 3637 return ReuseShuffleCost + VecCost - ScalarCost; 3638 } 3639 case Instruction::Load: { 3640 // Cost of wide load - cost of scalar loads. 3641 Align alignment = cast<LoadInst>(VL0)->getAlign(); 3642 int ScalarEltCost = 3643 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, alignment, 0, 3644 CostKind, VL0); 3645 if (NeedToShuffleReuses) { 3646 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 3647 } 3648 int ScalarLdCost = VecTy->getNumElements() * ScalarEltCost; 3649 int VecLdCost = 3650 TTI->getMemoryOpCost(Instruction::Load, VecTy, alignment, 0, 3651 CostKind, VL0); 3652 if (!E->ReorderIndices.empty()) { 3653 // TODO: Merge this shuffle with the ReuseShuffleCost. 3654 VecLdCost += TTI->getShuffleCost( 3655 TargetTransformInfo::SK_PermuteSingleSrc, VecTy); 3656 } 3657 return ReuseShuffleCost + VecLdCost - ScalarLdCost; 3658 } 3659 case Instruction::Store: { 3660 // We know that we can merge the stores. Calculate the cost. 3661 bool IsReorder = !E->ReorderIndices.empty(); 3662 auto *SI = 3663 cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0); 3664 Align Alignment = SI->getAlign(); 3665 int ScalarEltCost = 3666 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, Alignment, 0, 3667 CostKind, VL0); 3668 if (NeedToShuffleReuses) 3669 ReuseShuffleCost = -(ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 3670 int ScalarStCost = VecTy->getNumElements() * ScalarEltCost; 3671 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, 3672 VecTy, Alignment, 0, CostKind, VL0); 3673 if (IsReorder) { 3674 // TODO: Merge this shuffle with the ReuseShuffleCost. 3675 VecStCost += TTI->getShuffleCost( 3676 TargetTransformInfo::SK_PermuteSingleSrc, VecTy); 3677 } 3678 return ReuseShuffleCost + VecStCost - ScalarStCost; 3679 } 3680 case Instruction::Call: { 3681 CallInst *CI = cast<CallInst>(VL0); 3682 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3683 3684 // Calculate the cost of the scalar and vector calls. 3685 IntrinsicCostAttributes CostAttrs(ID, *CI, 1, 1); 3686 int ScalarEltCost = TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 3687 if (NeedToShuffleReuses) { 3688 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 3689 } 3690 int ScalarCallCost = VecTy->getNumElements() * ScalarEltCost; 3691 3692 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 3693 int VecCallCost = std::min(VecCallCosts.first, VecCallCosts.second); 3694 3695 LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost 3696 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 3697 << " for " << *CI << "\n"); 3698 3699 return ReuseShuffleCost + VecCallCost - ScalarCallCost; 3700 } 3701 case Instruction::ShuffleVector: { 3702 assert(E->isAltShuffle() && 3703 ((Instruction::isBinaryOp(E->getOpcode()) && 3704 Instruction::isBinaryOp(E->getAltOpcode())) || 3705 (Instruction::isCast(E->getOpcode()) && 3706 Instruction::isCast(E->getAltOpcode()))) && 3707 "Invalid Shuffle Vector Operand"); 3708 int ScalarCost = 0; 3709 if (NeedToShuffleReuses) { 3710 for (unsigned Idx : E->ReuseShuffleIndices) { 3711 Instruction *I = cast<Instruction>(VL[Idx]); 3712 ReuseShuffleCost -= TTI->getInstructionCost(I, CostKind); 3713 } 3714 for (Value *V : VL) { 3715 Instruction *I = cast<Instruction>(V); 3716 ReuseShuffleCost += TTI->getInstructionCost(I, CostKind); 3717 } 3718 } 3719 for (Value *V : VL) { 3720 Instruction *I = cast<Instruction>(V); 3721 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 3722 ScalarCost += TTI->getInstructionCost(I, CostKind); 3723 } 3724 // VecCost is equal to sum of the cost of creating 2 vectors 3725 // and the cost of creating shuffle. 3726 int VecCost = 0; 3727 if (Instruction::isBinaryOp(E->getOpcode())) { 3728 VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind); 3729 VecCost += TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy, 3730 CostKind); 3731 } else { 3732 Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType(); 3733 Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType(); 3734 auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size()); 3735 auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size()); 3736 VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty, 3737 TTI::CastContextHint::None, CostKind); 3738 VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty, 3739 TTI::CastContextHint::None, CostKind); 3740 } 3741 VecCost += TTI->getShuffleCost(TargetTransformInfo::SK_Select, VecTy, 0); 3742 return ReuseShuffleCost + VecCost - ScalarCost; 3743 } 3744 default: 3745 llvm_unreachable("Unknown instruction"); 3746 } 3747 } 3748 3749 bool BoUpSLP::isFullyVectorizableTinyTree() const { 3750 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height " 3751 << VectorizableTree.size() << " is fully vectorizable .\n"); 3752 3753 // We only handle trees of heights 1 and 2. 3754 if (VectorizableTree.size() == 1 && 3755 VectorizableTree[0]->State == TreeEntry::Vectorize) 3756 return true; 3757 3758 if (VectorizableTree.size() != 2) 3759 return false; 3760 3761 // Handle splat and all-constants stores. 3762 if (VectorizableTree[0]->State == TreeEntry::Vectorize && 3763 (allConstant(VectorizableTree[1]->Scalars) || 3764 isSplat(VectorizableTree[1]->Scalars))) 3765 return true; 3766 3767 // Gathering cost would be too much for tiny trees. 3768 if (VectorizableTree[0]->State == TreeEntry::NeedToGather || 3769 VectorizableTree[1]->State == TreeEntry::NeedToGather) 3770 return false; 3771 3772 return true; 3773 } 3774 3775 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts, 3776 TargetTransformInfo *TTI) { 3777 // Look past the root to find a source value. Arbitrarily follow the 3778 // path through operand 0 of any 'or'. Also, peek through optional 3779 // shift-left-by-multiple-of-8-bits. 3780 Value *ZextLoad = Root; 3781 const APInt *ShAmtC; 3782 while (!isa<ConstantExpr>(ZextLoad) && 3783 (match(ZextLoad, m_Or(m_Value(), m_Value())) || 3784 (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) && 3785 ShAmtC->urem(8) == 0))) 3786 ZextLoad = cast<BinaryOperator>(ZextLoad)->getOperand(0); 3787 3788 // Check if the input is an extended load of the required or/shift expression. 3789 Value *LoadPtr; 3790 if (ZextLoad == Root || !match(ZextLoad, m_ZExt(m_Load(m_Value(LoadPtr))))) 3791 return false; 3792 3793 // Require that the total load bit width is a legal integer type. 3794 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target. 3795 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it. 3796 Type *SrcTy = LoadPtr->getType()->getPointerElementType(); 3797 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts; 3798 if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth))) 3799 return false; 3800 3801 // Everything matched - assume that we can fold the whole sequence using 3802 // load combining. 3803 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at " 3804 << *(cast<Instruction>(Root)) << "\n"); 3805 3806 return true; 3807 } 3808 3809 bool BoUpSLP::isLoadCombineReductionCandidate(unsigned RdxOpcode) const { 3810 if (RdxOpcode != Instruction::Or) 3811 return false; 3812 3813 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 3814 Value *FirstReduced = VectorizableTree[0]->Scalars[0]; 3815 return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI); 3816 } 3817 3818 bool BoUpSLP::isLoadCombineCandidate() const { 3819 // Peek through a final sequence of stores and check if all operations are 3820 // likely to be load-combined. 3821 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 3822 for (Value *Scalar : VectorizableTree[0]->Scalars) { 3823 Value *X; 3824 if (!match(Scalar, m_Store(m_Value(X), m_Value())) || 3825 !isLoadCombineCandidateImpl(X, NumElts, TTI)) 3826 return false; 3827 } 3828 return true; 3829 } 3830 3831 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() const { 3832 // We can vectorize the tree if its size is greater than or equal to the 3833 // minimum size specified by the MinTreeSize command line option. 3834 if (VectorizableTree.size() >= MinTreeSize) 3835 return false; 3836 3837 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 3838 // can vectorize it if we can prove it fully vectorizable. 3839 if (isFullyVectorizableTinyTree()) 3840 return false; 3841 3842 assert(VectorizableTree.empty() 3843 ? ExternalUses.empty() 3844 : true && "We shouldn't have any external users"); 3845 3846 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 3847 // vectorizable. 3848 return true; 3849 } 3850 3851 int BoUpSLP::getSpillCost() const { 3852 // Walk from the bottom of the tree to the top, tracking which values are 3853 // live. When we see a call instruction that is not part of our tree, 3854 // query TTI to see if there is a cost to keeping values live over it 3855 // (for example, if spills and fills are required). 3856 unsigned BundleWidth = VectorizableTree.front()->Scalars.size(); 3857 int Cost = 0; 3858 3859 SmallPtrSet<Instruction*, 4> LiveValues; 3860 Instruction *PrevInst = nullptr; 3861 3862 // The entries in VectorizableTree are not necessarily ordered by their 3863 // position in basic blocks. Collect them and order them by dominance so later 3864 // instructions are guaranteed to be visited first. For instructions in 3865 // different basic blocks, we only scan to the beginning of the block, so 3866 // their order does not matter, as long as all instructions in a basic block 3867 // are grouped together. Using dominance ensures a deterministic order. 3868 SmallVector<Instruction *, 16> OrderedScalars; 3869 for (const auto &TEPtr : VectorizableTree) { 3870 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]); 3871 if (!Inst) 3872 continue; 3873 OrderedScalars.push_back(Inst); 3874 } 3875 llvm::stable_sort(OrderedScalars, [this](Instruction *A, Instruction *B) { 3876 return DT->dominates(B, A); 3877 }); 3878 3879 for (Instruction *Inst : OrderedScalars) { 3880 if (!PrevInst) { 3881 PrevInst = Inst; 3882 continue; 3883 } 3884 3885 // Update LiveValues. 3886 LiveValues.erase(PrevInst); 3887 for (auto &J : PrevInst->operands()) { 3888 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 3889 LiveValues.insert(cast<Instruction>(&*J)); 3890 } 3891 3892 LLVM_DEBUG({ 3893 dbgs() << "SLP: #LV: " << LiveValues.size(); 3894 for (auto *X : LiveValues) 3895 dbgs() << " " << X->getName(); 3896 dbgs() << ", Looking at "; 3897 Inst->dump(); 3898 }); 3899 3900 // Now find the sequence of instructions between PrevInst and Inst. 3901 unsigned NumCalls = 0; 3902 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 3903 PrevInstIt = 3904 PrevInst->getIterator().getReverse(); 3905 while (InstIt != PrevInstIt) { 3906 if (PrevInstIt == PrevInst->getParent()->rend()) { 3907 PrevInstIt = Inst->getParent()->rbegin(); 3908 continue; 3909 } 3910 3911 // Debug information does not impact spill cost. 3912 if ((isa<CallInst>(&*PrevInstIt) && 3913 !isa<DbgInfoIntrinsic>(&*PrevInstIt)) && 3914 &*PrevInstIt != PrevInst) 3915 NumCalls++; 3916 3917 ++PrevInstIt; 3918 } 3919 3920 if (NumCalls) { 3921 SmallVector<Type*, 4> V; 3922 for (auto *II : LiveValues) 3923 V.push_back(FixedVectorType::get(II->getType(), BundleWidth)); 3924 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V); 3925 } 3926 3927 PrevInst = Inst; 3928 } 3929 3930 return Cost; 3931 } 3932 3933 int BoUpSLP::getTreeCost() { 3934 int Cost = 0; 3935 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size " 3936 << VectorizableTree.size() << ".\n"); 3937 3938 unsigned BundleWidth = VectorizableTree[0]->Scalars.size(); 3939 3940 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) { 3941 TreeEntry &TE = *VectorizableTree[I].get(); 3942 3943 // We create duplicate tree entries for gather sequences that have multiple 3944 // uses. However, we should not compute the cost of duplicate sequences. 3945 // For example, if we have a build vector (i.e., insertelement sequence) 3946 // that is used by more than one vector instruction, we only need to 3947 // compute the cost of the insertelement instructions once. The redundant 3948 // instructions will be eliminated by CSE. 3949 // 3950 // We should consider not creating duplicate tree entries for gather 3951 // sequences, and instead add additional edges to the tree representing 3952 // their uses. Since such an approach results in fewer total entries, 3953 // existing heuristics based on tree size may yield different results. 3954 // 3955 if (TE.State == TreeEntry::NeedToGather && 3956 std::any_of(std::next(VectorizableTree.begin(), I + 1), 3957 VectorizableTree.end(), 3958 [TE](const std::unique_ptr<TreeEntry> &EntryPtr) { 3959 return EntryPtr->State == TreeEntry::NeedToGather && 3960 EntryPtr->isSame(TE.Scalars); 3961 })) 3962 continue; 3963 3964 int C = getEntryCost(&TE); 3965 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 3966 << " for bundle that starts with " << *TE.Scalars[0] 3967 << ".\n"); 3968 Cost += C; 3969 } 3970 3971 SmallPtrSet<Value *, 16> ExtractCostCalculated; 3972 int ExtractCost = 0; 3973 for (ExternalUser &EU : ExternalUses) { 3974 // We only add extract cost once for the same scalar. 3975 if (!ExtractCostCalculated.insert(EU.Scalar).second) 3976 continue; 3977 3978 // Uses by ephemeral values are free (because the ephemeral value will be 3979 // removed prior to code generation, and so the extraction will be 3980 // removed as well). 3981 if (EphValues.count(EU.User)) 3982 continue; 3983 3984 // If we plan to rewrite the tree in a smaller type, we will need to sign 3985 // extend the extracted value back to the original type. Here, we account 3986 // for the extract and the added cost of the sign extend if needed. 3987 auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth); 3988 auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; 3989 if (MinBWs.count(ScalarRoot)) { 3990 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 3991 auto Extend = 3992 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; 3993 VecTy = FixedVectorType::get(MinTy, BundleWidth); 3994 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 3995 VecTy, EU.Lane); 3996 } else { 3997 ExtractCost += 3998 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); 3999 } 4000 } 4001 4002 int SpillCost = getSpillCost(); 4003 Cost += SpillCost + ExtractCost; 4004 4005 #ifndef NDEBUG 4006 SmallString<256> Str; 4007 { 4008 raw_svector_ostream OS(Str); 4009 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 4010 << "SLP: Extract Cost = " << ExtractCost << ".\n" 4011 << "SLP: Total Cost = " << Cost << ".\n"; 4012 } 4013 LLVM_DEBUG(dbgs() << Str); 4014 if (ViewSLPTree) 4015 ViewGraph(this, "SLP" + F->getName(), false, Str); 4016 #endif 4017 4018 return Cost; 4019 } 4020 4021 int BoUpSLP::getGatherCost(FixedVectorType *Ty, 4022 const DenseSet<unsigned> &ShuffledIndices) const { 4023 unsigned NumElts = Ty->getNumElements(); 4024 APInt DemandedElts = APInt::getNullValue(NumElts); 4025 for (unsigned i = 0; i < NumElts; ++i) 4026 if (!ShuffledIndices.count(i)) 4027 DemandedElts.setBit(i); 4028 int Cost = TTI->getScalarizationOverhead(Ty, DemandedElts, /*Insert*/ true, 4029 /*Extract*/ false); 4030 if (!ShuffledIndices.empty()) 4031 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty); 4032 return Cost; 4033 } 4034 4035 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const { 4036 // Find the type of the operands in VL. 4037 Type *ScalarTy = VL[0]->getType(); 4038 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 4039 ScalarTy = SI->getValueOperand()->getType(); 4040 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 4041 // Find the cost of inserting/extracting values from the vector. 4042 // Check if the same elements are inserted several times and count them as 4043 // shuffle candidates. 4044 DenseSet<unsigned> ShuffledElements; 4045 DenseSet<Value *> UniqueElements; 4046 // Iterate in reverse order to consider insert elements with the high cost. 4047 for (unsigned I = VL.size(); I > 0; --I) { 4048 unsigned Idx = I - 1; 4049 if (!UniqueElements.insert(VL[Idx]).second) 4050 ShuffledElements.insert(Idx); 4051 } 4052 return getGatherCost(VecTy, ShuffledElements); 4053 } 4054 4055 // Perform operand reordering on the instructions in VL and return the reordered 4056 // operands in Left and Right. 4057 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 4058 SmallVectorImpl<Value *> &Left, 4059 SmallVectorImpl<Value *> &Right, 4060 const DataLayout &DL, 4061 ScalarEvolution &SE, 4062 const BoUpSLP &R) { 4063 if (VL.empty()) 4064 return; 4065 VLOperands Ops(VL, DL, SE, R); 4066 // Reorder the operands in place. 4067 Ops.reorder(); 4068 Left = Ops.getVL(0); 4069 Right = Ops.getVL(1); 4070 } 4071 4072 void BoUpSLP::setInsertPointAfterBundle(TreeEntry *E) { 4073 // Get the basic block this bundle is in. All instructions in the bundle 4074 // should be in this block. 4075 auto *Front = E->getMainOp(); 4076 auto *BB = Front->getParent(); 4077 assert(llvm::all_of(make_range(E->Scalars.begin(), E->Scalars.end()), 4078 [=](Value *V) -> bool { 4079 auto *I = cast<Instruction>(V); 4080 return !E->isOpcodeOrAlt(I) || I->getParent() == BB; 4081 })); 4082 4083 // The last instruction in the bundle in program order. 4084 Instruction *LastInst = nullptr; 4085 4086 // Find the last instruction. The common case should be that BB has been 4087 // scheduled, and the last instruction is VL.back(). So we start with 4088 // VL.back() and iterate over schedule data until we reach the end of the 4089 // bundle. The end of the bundle is marked by null ScheduleData. 4090 if (BlocksSchedules.count(BB)) { 4091 auto *Bundle = 4092 BlocksSchedules[BB]->getScheduleData(E->isOneOf(E->Scalars.back())); 4093 if (Bundle && Bundle->isPartOfBundle()) 4094 for (; Bundle; Bundle = Bundle->NextInBundle) 4095 if (Bundle->OpValue == Bundle->Inst) 4096 LastInst = Bundle->Inst; 4097 } 4098 4099 // LastInst can still be null at this point if there's either not an entry 4100 // for BB in BlocksSchedules or there's no ScheduleData available for 4101 // VL.back(). This can be the case if buildTree_rec aborts for various 4102 // reasons (e.g., the maximum recursion depth is reached, the maximum region 4103 // size is reached, etc.). ScheduleData is initialized in the scheduling 4104 // "dry-run". 4105 // 4106 // If this happens, we can still find the last instruction by brute force. We 4107 // iterate forwards from Front (inclusive) until we either see all 4108 // instructions in the bundle or reach the end of the block. If Front is the 4109 // last instruction in program order, LastInst will be set to Front, and we 4110 // will visit all the remaining instructions in the block. 4111 // 4112 // One of the reasons we exit early from buildTree_rec is to place an upper 4113 // bound on compile-time. Thus, taking an additional compile-time hit here is 4114 // not ideal. However, this should be exceedingly rare since it requires that 4115 // we both exit early from buildTree_rec and that the bundle be out-of-order 4116 // (causing us to iterate all the way to the end of the block). 4117 if (!LastInst) { 4118 SmallPtrSet<Value *, 16> Bundle(E->Scalars.begin(), E->Scalars.end()); 4119 for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) { 4120 if (Bundle.erase(&I) && E->isOpcodeOrAlt(&I)) 4121 LastInst = &I; 4122 if (Bundle.empty()) 4123 break; 4124 } 4125 } 4126 assert(LastInst && "Failed to find last instruction in bundle"); 4127 4128 // Set the insertion point after the last instruction in the bundle. Set the 4129 // debug location to Front. 4130 Builder.SetInsertPoint(BB, ++LastInst->getIterator()); 4131 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 4132 } 4133 4134 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, FixedVectorType *Ty) { 4135 Value *Vec = UndefValue::get(Ty); 4136 // Generate the 'InsertElement' instruction. 4137 for (unsigned i = 0; i < Ty->getNumElements(); ++i) { 4138 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i)); 4139 if (auto *Insrt = dyn_cast<InsertElementInst>(Vec)) { 4140 GatherSeq.insert(Insrt); 4141 CSEBlocks.insert(Insrt->getParent()); 4142 4143 // Add to our 'need-to-extract' list. 4144 if (TreeEntry *E = getTreeEntry(VL[i])) { 4145 // Find which lane we need to extract. 4146 int FoundLane = -1; 4147 for (unsigned Lane = 0, LE = E->Scalars.size(); Lane != LE; ++Lane) { 4148 // Is this the lane of the scalar that we are looking for ? 4149 if (E->Scalars[Lane] == VL[i]) { 4150 FoundLane = Lane; 4151 break; 4152 } 4153 } 4154 assert(FoundLane >= 0 && "Could not find the correct lane"); 4155 if (!E->ReuseShuffleIndices.empty()) { 4156 FoundLane = 4157 std::distance(E->ReuseShuffleIndices.begin(), 4158 llvm::find(E->ReuseShuffleIndices, FoundLane)); 4159 } 4160 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane)); 4161 } 4162 } 4163 } 4164 4165 return Vec; 4166 } 4167 4168 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 4169 InstructionsState S = getSameOpcode(VL); 4170 if (S.getOpcode()) { 4171 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 4172 if (E->isSame(VL)) { 4173 Value *V = vectorizeTree(E); 4174 if (VL.size() == E->Scalars.size() && !E->ReuseShuffleIndices.empty()) { 4175 // We need to get the vectorized value but without shuffle. 4176 if (auto *SV = dyn_cast<ShuffleVectorInst>(V)) { 4177 V = SV->getOperand(0); 4178 } else { 4179 // Reshuffle to get only unique values. 4180 SmallVector<int, 4> UniqueIdxs; 4181 SmallSet<int, 4> UsedIdxs; 4182 for (int Idx : E->ReuseShuffleIndices) 4183 if (UsedIdxs.insert(Idx).second) 4184 UniqueIdxs.emplace_back(Idx); 4185 V = Builder.CreateShuffleVector(V, UndefValue::get(V->getType()), 4186 UniqueIdxs); 4187 } 4188 } 4189 return V; 4190 } 4191 } 4192 } 4193 4194 Type *ScalarTy = S.OpValue->getType(); 4195 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 4196 ScalarTy = SI->getValueOperand()->getType(); 4197 4198 // Check that every instruction appears once in this bundle. 4199 SmallVector<int, 4> ReuseShuffleIndicies; 4200 SmallVector<Value *, 4> UniqueValues; 4201 if (VL.size() > 2) { 4202 DenseMap<Value *, unsigned> UniquePositions; 4203 for (Value *V : VL) { 4204 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 4205 ReuseShuffleIndicies.emplace_back(Res.first->second); 4206 if (Res.second || isa<Constant>(V)) 4207 UniqueValues.emplace_back(V); 4208 } 4209 // Do not shuffle single element or if number of unique values is not power 4210 // of 2. 4211 if (UniqueValues.size() == VL.size() || UniqueValues.size() <= 1 || 4212 !llvm::isPowerOf2_32(UniqueValues.size())) 4213 ReuseShuffleIndicies.clear(); 4214 else 4215 VL = UniqueValues; 4216 } 4217 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 4218 4219 Value *V = Gather(VL, VecTy); 4220 if (!ReuseShuffleIndicies.empty()) { 4221 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4222 ReuseShuffleIndicies, "shuffle"); 4223 if (auto *I = dyn_cast<Instruction>(V)) { 4224 GatherSeq.insert(I); 4225 CSEBlocks.insert(I->getParent()); 4226 } 4227 } 4228 return V; 4229 } 4230 4231 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 4232 IRBuilder<>::InsertPointGuard Guard(Builder); 4233 4234 if (E->VectorizedValue) { 4235 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 4236 return E->VectorizedValue; 4237 } 4238 4239 Instruction *VL0 = E->getMainOp(); 4240 Type *ScalarTy = VL0->getType(); 4241 if (StoreInst *SI = dyn_cast<StoreInst>(VL0)) 4242 ScalarTy = SI->getValueOperand()->getType(); 4243 auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size()); 4244 4245 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 4246 4247 if (E->State == TreeEntry::NeedToGather) { 4248 setInsertPointAfterBundle(E); 4249 auto *V = Gather(E->Scalars, VecTy); 4250 if (NeedToShuffleReuses) { 4251 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4252 E->ReuseShuffleIndices, "shuffle"); 4253 if (auto *I = dyn_cast<Instruction>(V)) { 4254 GatherSeq.insert(I); 4255 CSEBlocks.insert(I->getParent()); 4256 } 4257 } 4258 E->VectorizedValue = V; 4259 return V; 4260 } 4261 4262 assert(E->State == TreeEntry::Vectorize && "Unhandled state"); 4263 unsigned ShuffleOrOp = 4264 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 4265 switch (ShuffleOrOp) { 4266 case Instruction::PHI: { 4267 auto *PH = cast<PHINode>(VL0); 4268 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 4269 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 4270 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 4271 Value *V = NewPhi; 4272 if (NeedToShuffleReuses) { 4273 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4274 E->ReuseShuffleIndices, "shuffle"); 4275 } 4276 E->VectorizedValue = V; 4277 4278 // PHINodes may have multiple entries from the same block. We want to 4279 // visit every block once. 4280 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 4281 4282 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 4283 ValueList Operands; 4284 BasicBlock *IBB = PH->getIncomingBlock(i); 4285 4286 if (!VisitedBBs.insert(IBB).second) { 4287 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 4288 continue; 4289 } 4290 4291 Builder.SetInsertPoint(IBB->getTerminator()); 4292 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 4293 Value *Vec = vectorizeTree(E->getOperand(i)); 4294 NewPhi->addIncoming(Vec, IBB); 4295 } 4296 4297 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 4298 "Invalid number of incoming values"); 4299 return V; 4300 } 4301 4302 case Instruction::ExtractElement: { 4303 Value *V = E->getSingleOperand(0); 4304 if (!E->ReorderIndices.empty()) { 4305 SmallVector<int, 4> Mask; 4306 inversePermutation(E->ReorderIndices, Mask); 4307 Builder.SetInsertPoint(VL0); 4308 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), Mask, 4309 "reorder_shuffle"); 4310 } 4311 if (NeedToShuffleReuses) { 4312 // TODO: Merge this shuffle with the ReorderShuffleMask. 4313 if (E->ReorderIndices.empty()) 4314 Builder.SetInsertPoint(VL0); 4315 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4316 E->ReuseShuffleIndices, "shuffle"); 4317 } 4318 E->VectorizedValue = V; 4319 return V; 4320 } 4321 case Instruction::ExtractValue: { 4322 LoadInst *LI = cast<LoadInst>(E->getSingleOperand(0)); 4323 Builder.SetInsertPoint(LI); 4324 PointerType *PtrTy = 4325 PointerType::get(VecTy, LI->getPointerAddressSpace()); 4326 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); 4327 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign()); 4328 Value *NewV = propagateMetadata(V, E->Scalars); 4329 if (!E->ReorderIndices.empty()) { 4330 SmallVector<int, 4> Mask; 4331 inversePermutation(E->ReorderIndices, Mask); 4332 NewV = Builder.CreateShuffleVector(NewV, UndefValue::get(VecTy), Mask, 4333 "reorder_shuffle"); 4334 } 4335 if (NeedToShuffleReuses) { 4336 // TODO: Merge this shuffle with the ReorderShuffleMask. 4337 NewV = Builder.CreateShuffleVector(NewV, UndefValue::get(VecTy), 4338 E->ReuseShuffleIndices, "shuffle"); 4339 } 4340 E->VectorizedValue = NewV; 4341 return NewV; 4342 } 4343 case Instruction::ZExt: 4344 case Instruction::SExt: 4345 case Instruction::FPToUI: 4346 case Instruction::FPToSI: 4347 case Instruction::FPExt: 4348 case Instruction::PtrToInt: 4349 case Instruction::IntToPtr: 4350 case Instruction::SIToFP: 4351 case Instruction::UIToFP: 4352 case Instruction::Trunc: 4353 case Instruction::FPTrunc: 4354 case Instruction::BitCast: { 4355 setInsertPointAfterBundle(E); 4356 4357 Value *InVec = vectorizeTree(E->getOperand(0)); 4358 4359 if (E->VectorizedValue) { 4360 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 4361 return E->VectorizedValue; 4362 } 4363 4364 auto *CI = cast<CastInst>(VL0); 4365 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 4366 if (NeedToShuffleReuses) { 4367 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4368 E->ReuseShuffleIndices, "shuffle"); 4369 } 4370 E->VectorizedValue = V; 4371 ++NumVectorInstructions; 4372 return V; 4373 } 4374 case Instruction::FCmp: 4375 case Instruction::ICmp: { 4376 setInsertPointAfterBundle(E); 4377 4378 Value *L = vectorizeTree(E->getOperand(0)); 4379 Value *R = vectorizeTree(E->getOperand(1)); 4380 4381 if (E->VectorizedValue) { 4382 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 4383 return E->VectorizedValue; 4384 } 4385 4386 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 4387 Value *V = Builder.CreateCmp(P0, L, R); 4388 propagateIRFlags(V, E->Scalars, VL0); 4389 if (NeedToShuffleReuses) { 4390 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4391 E->ReuseShuffleIndices, "shuffle"); 4392 } 4393 E->VectorizedValue = V; 4394 ++NumVectorInstructions; 4395 return V; 4396 } 4397 case Instruction::Select: { 4398 setInsertPointAfterBundle(E); 4399 4400 Value *Cond = vectorizeTree(E->getOperand(0)); 4401 Value *True = vectorizeTree(E->getOperand(1)); 4402 Value *False = vectorizeTree(E->getOperand(2)); 4403 4404 if (E->VectorizedValue) { 4405 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 4406 return E->VectorizedValue; 4407 } 4408 4409 Value *V = Builder.CreateSelect(Cond, True, False); 4410 if (NeedToShuffleReuses) { 4411 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4412 E->ReuseShuffleIndices, "shuffle"); 4413 } 4414 E->VectorizedValue = V; 4415 ++NumVectorInstructions; 4416 return V; 4417 } 4418 case Instruction::FNeg: { 4419 setInsertPointAfterBundle(E); 4420 4421 Value *Op = vectorizeTree(E->getOperand(0)); 4422 4423 if (E->VectorizedValue) { 4424 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 4425 return E->VectorizedValue; 4426 } 4427 4428 Value *V = Builder.CreateUnOp( 4429 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op); 4430 propagateIRFlags(V, E->Scalars, VL0); 4431 if (auto *I = dyn_cast<Instruction>(V)) 4432 V = propagateMetadata(I, E->Scalars); 4433 4434 if (NeedToShuffleReuses) { 4435 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4436 E->ReuseShuffleIndices, "shuffle"); 4437 } 4438 E->VectorizedValue = V; 4439 ++NumVectorInstructions; 4440 4441 return V; 4442 } 4443 case Instruction::Add: 4444 case Instruction::FAdd: 4445 case Instruction::Sub: 4446 case Instruction::FSub: 4447 case Instruction::Mul: 4448 case Instruction::FMul: 4449 case Instruction::UDiv: 4450 case Instruction::SDiv: 4451 case Instruction::FDiv: 4452 case Instruction::URem: 4453 case Instruction::SRem: 4454 case Instruction::FRem: 4455 case Instruction::Shl: 4456 case Instruction::LShr: 4457 case Instruction::AShr: 4458 case Instruction::And: 4459 case Instruction::Or: 4460 case Instruction::Xor: { 4461 setInsertPointAfterBundle(E); 4462 4463 Value *LHS = vectorizeTree(E->getOperand(0)); 4464 Value *RHS = vectorizeTree(E->getOperand(1)); 4465 4466 if (E->VectorizedValue) { 4467 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 4468 return E->VectorizedValue; 4469 } 4470 4471 Value *V = Builder.CreateBinOp( 4472 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, 4473 RHS); 4474 propagateIRFlags(V, E->Scalars, VL0); 4475 if (auto *I = dyn_cast<Instruction>(V)) 4476 V = propagateMetadata(I, E->Scalars); 4477 4478 if (NeedToShuffleReuses) { 4479 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4480 E->ReuseShuffleIndices, "shuffle"); 4481 } 4482 E->VectorizedValue = V; 4483 ++NumVectorInstructions; 4484 4485 return V; 4486 } 4487 case Instruction::Load: { 4488 // Loads are inserted at the head of the tree because we don't want to 4489 // sink them all the way down past store instructions. 4490 bool IsReorder = E->updateStateIfReorder(); 4491 if (IsReorder) 4492 VL0 = E->getMainOp(); 4493 setInsertPointAfterBundle(E); 4494 4495 LoadInst *LI = cast<LoadInst>(VL0); 4496 unsigned AS = LI->getPointerAddressSpace(); 4497 4498 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(), 4499 VecTy->getPointerTo(AS)); 4500 4501 // The pointer operand uses an in-tree scalar so we add the new BitCast to 4502 // ExternalUses list to make sure that an extract will be generated in the 4503 // future. 4504 Value *PO = LI->getPointerOperand(); 4505 if (getTreeEntry(PO)) 4506 ExternalUses.push_back(ExternalUser(PO, cast<User>(VecPtr), 0)); 4507 4508 LI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign()); 4509 Value *V = propagateMetadata(LI, E->Scalars); 4510 if (IsReorder) { 4511 SmallVector<int, 4> Mask; 4512 inversePermutation(E->ReorderIndices, Mask); 4513 V = Builder.CreateShuffleVector(V, UndefValue::get(V->getType()), 4514 Mask, "reorder_shuffle"); 4515 } 4516 if (NeedToShuffleReuses) { 4517 // TODO: Merge this shuffle with the ReorderShuffleMask. 4518 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4519 E->ReuseShuffleIndices, "shuffle"); 4520 } 4521 E->VectorizedValue = V; 4522 ++NumVectorInstructions; 4523 return V; 4524 } 4525 case Instruction::Store: { 4526 bool IsReorder = !E->ReorderIndices.empty(); 4527 auto *SI = cast<StoreInst>( 4528 IsReorder ? E->Scalars[E->ReorderIndices.front()] : VL0); 4529 unsigned AS = SI->getPointerAddressSpace(); 4530 4531 setInsertPointAfterBundle(E); 4532 4533 Value *VecValue = vectorizeTree(E->getOperand(0)); 4534 if (IsReorder) { 4535 SmallVector<int, 4> Mask(E->ReorderIndices.begin(), 4536 E->ReorderIndices.end()); 4537 VecValue = Builder.CreateShuffleVector( 4538 VecValue, UndefValue::get(VecValue->getType()), Mask, 4539 "reorder_shuffle"); 4540 } 4541 Value *ScalarPtr = SI->getPointerOperand(); 4542 Value *VecPtr = Builder.CreateBitCast( 4543 ScalarPtr, VecValue->getType()->getPointerTo(AS)); 4544 StoreInst *ST = Builder.CreateAlignedStore(VecValue, VecPtr, 4545 SI->getAlign()); 4546 4547 // The pointer operand uses an in-tree scalar, so add the new BitCast to 4548 // ExternalUses to make sure that an extract will be generated in the 4549 // future. 4550 if (getTreeEntry(ScalarPtr)) 4551 ExternalUses.push_back(ExternalUser(ScalarPtr, cast<User>(VecPtr), 0)); 4552 4553 Value *V = propagateMetadata(ST, E->Scalars); 4554 if (NeedToShuffleReuses) { 4555 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4556 E->ReuseShuffleIndices, "shuffle"); 4557 } 4558 E->VectorizedValue = V; 4559 ++NumVectorInstructions; 4560 return V; 4561 } 4562 case Instruction::GetElementPtr: { 4563 setInsertPointAfterBundle(E); 4564 4565 Value *Op0 = vectorizeTree(E->getOperand(0)); 4566 4567 std::vector<Value *> OpVecs; 4568 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 4569 ++j) { 4570 ValueList &VL = E->getOperand(j); 4571 // Need to cast all elements to the same type before vectorization to 4572 // avoid crash. 4573 Type *VL0Ty = VL0->getOperand(j)->getType(); 4574 Type *Ty = llvm::all_of( 4575 VL, [VL0Ty](Value *V) { return VL0Ty == V->getType(); }) 4576 ? VL0Ty 4577 : DL->getIndexType(cast<GetElementPtrInst>(VL0) 4578 ->getPointerOperandType() 4579 ->getScalarType()); 4580 for (Value *&V : VL) { 4581 auto *CI = cast<ConstantInt>(V); 4582 V = ConstantExpr::getIntegerCast(CI, Ty, 4583 CI->getValue().isSignBitSet()); 4584 } 4585 Value *OpVec = vectorizeTree(VL); 4586 OpVecs.push_back(OpVec); 4587 } 4588 4589 Value *V = Builder.CreateGEP( 4590 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs); 4591 if (Instruction *I = dyn_cast<Instruction>(V)) 4592 V = propagateMetadata(I, E->Scalars); 4593 4594 if (NeedToShuffleReuses) { 4595 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4596 E->ReuseShuffleIndices, "shuffle"); 4597 } 4598 E->VectorizedValue = V; 4599 ++NumVectorInstructions; 4600 4601 return V; 4602 } 4603 case Instruction::Call: { 4604 CallInst *CI = cast<CallInst>(VL0); 4605 setInsertPointAfterBundle(E); 4606 4607 Intrinsic::ID IID = Intrinsic::not_intrinsic; 4608 if (Function *FI = CI->getCalledFunction()) 4609 IID = FI->getIntrinsicID(); 4610 4611 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4612 4613 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 4614 bool UseIntrinsic = ID != Intrinsic::not_intrinsic && 4615 VecCallCosts.first <= VecCallCosts.second; 4616 4617 Value *ScalarArg = nullptr; 4618 std::vector<Value *> OpVecs; 4619 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 4620 ValueList OpVL; 4621 // Some intrinsics have scalar arguments. This argument should not be 4622 // vectorized. 4623 if (UseIntrinsic && hasVectorInstrinsicScalarOpd(IID, j)) { 4624 CallInst *CEI = cast<CallInst>(VL0); 4625 ScalarArg = CEI->getArgOperand(j); 4626 OpVecs.push_back(CEI->getArgOperand(j)); 4627 continue; 4628 } 4629 4630 Value *OpVec = vectorizeTree(E->getOperand(j)); 4631 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 4632 OpVecs.push_back(OpVec); 4633 } 4634 4635 Function *CF; 4636 if (!UseIntrinsic) { 4637 VFShape Shape = 4638 VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>( 4639 VecTy->getNumElements())), 4640 false /*HasGlobalPred*/); 4641 CF = VFDatabase(*CI).getVectorizedFunction(Shape); 4642 } else { 4643 Type *Tys[] = {FixedVectorType::get(CI->getType(), E->Scalars.size())}; 4644 CF = Intrinsic::getDeclaration(F->getParent(), ID, Tys); 4645 } 4646 4647 SmallVector<OperandBundleDef, 1> OpBundles; 4648 CI->getOperandBundlesAsDefs(OpBundles); 4649 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 4650 4651 // The scalar argument uses an in-tree scalar so we add the new vectorized 4652 // call to ExternalUses list to make sure that an extract will be 4653 // generated in the future. 4654 if (ScalarArg && getTreeEntry(ScalarArg)) 4655 ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0)); 4656 4657 propagateIRFlags(V, E->Scalars, VL0); 4658 if (NeedToShuffleReuses) { 4659 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4660 E->ReuseShuffleIndices, "shuffle"); 4661 } 4662 E->VectorizedValue = V; 4663 ++NumVectorInstructions; 4664 return V; 4665 } 4666 case Instruction::ShuffleVector: { 4667 assert(E->isAltShuffle() && 4668 ((Instruction::isBinaryOp(E->getOpcode()) && 4669 Instruction::isBinaryOp(E->getAltOpcode())) || 4670 (Instruction::isCast(E->getOpcode()) && 4671 Instruction::isCast(E->getAltOpcode()))) && 4672 "Invalid Shuffle Vector Operand"); 4673 4674 Value *LHS = nullptr, *RHS = nullptr; 4675 if (Instruction::isBinaryOp(E->getOpcode())) { 4676 setInsertPointAfterBundle(E); 4677 LHS = vectorizeTree(E->getOperand(0)); 4678 RHS = vectorizeTree(E->getOperand(1)); 4679 } else { 4680 setInsertPointAfterBundle(E); 4681 LHS = vectorizeTree(E->getOperand(0)); 4682 } 4683 4684 if (E->VectorizedValue) { 4685 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 4686 return E->VectorizedValue; 4687 } 4688 4689 Value *V0, *V1; 4690 if (Instruction::isBinaryOp(E->getOpcode())) { 4691 V0 = Builder.CreateBinOp( 4692 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS); 4693 V1 = Builder.CreateBinOp( 4694 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS); 4695 } else { 4696 V0 = Builder.CreateCast( 4697 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy); 4698 V1 = Builder.CreateCast( 4699 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy); 4700 } 4701 4702 // Create shuffle to take alternate operations from the vector. 4703 // Also, gather up main and alt scalar ops to propagate IR flags to 4704 // each vector operation. 4705 ValueList OpScalars, AltScalars; 4706 unsigned e = E->Scalars.size(); 4707 SmallVector<int, 8> Mask(e); 4708 for (unsigned i = 0; i < e; ++i) { 4709 auto *OpInst = cast<Instruction>(E->Scalars[i]); 4710 assert(E->isOpcodeOrAlt(OpInst) && "Unexpected main/alternate opcode"); 4711 if (OpInst->getOpcode() == E->getAltOpcode()) { 4712 Mask[i] = e + i; 4713 AltScalars.push_back(E->Scalars[i]); 4714 } else { 4715 Mask[i] = i; 4716 OpScalars.push_back(E->Scalars[i]); 4717 } 4718 } 4719 4720 propagateIRFlags(V0, OpScalars); 4721 propagateIRFlags(V1, AltScalars); 4722 4723 Value *V = Builder.CreateShuffleVector(V0, V1, Mask); 4724 if (Instruction *I = dyn_cast<Instruction>(V)) 4725 V = propagateMetadata(I, E->Scalars); 4726 if (NeedToShuffleReuses) { 4727 V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy), 4728 E->ReuseShuffleIndices, "shuffle"); 4729 } 4730 E->VectorizedValue = V; 4731 ++NumVectorInstructions; 4732 4733 return V; 4734 } 4735 default: 4736 llvm_unreachable("unknown inst"); 4737 } 4738 return nullptr; 4739 } 4740 4741 Value *BoUpSLP::vectorizeTree() { 4742 ExtraValueToDebugLocsMap ExternallyUsedValues; 4743 return vectorizeTree(ExternallyUsedValues); 4744 } 4745 4746 Value * 4747 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { 4748 // All blocks must be scheduled before any instructions are inserted. 4749 for (auto &BSIter : BlocksSchedules) { 4750 scheduleBlock(BSIter.second.get()); 4751 } 4752 4753 Builder.SetInsertPoint(&F->getEntryBlock().front()); 4754 auto *VectorRoot = vectorizeTree(VectorizableTree[0].get()); 4755 4756 // If the vectorized tree can be rewritten in a smaller type, we truncate the 4757 // vectorized root. InstCombine will then rewrite the entire expression. We 4758 // sign extend the extracted values below. 4759 auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; 4760 if (MinBWs.count(ScalarRoot)) { 4761 if (auto *I = dyn_cast<Instruction>(VectorRoot)) 4762 Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); 4763 auto BundleWidth = VectorizableTree[0]->Scalars.size(); 4764 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 4765 auto *VecTy = FixedVectorType::get(MinTy, BundleWidth); 4766 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); 4767 VectorizableTree[0]->VectorizedValue = Trunc; 4768 } 4769 4770 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() 4771 << " values .\n"); 4772 4773 // If necessary, sign-extend or zero-extend ScalarRoot to the larger type 4774 // specified by ScalarType. 4775 auto extend = [&](Value *ScalarRoot, Value *Ex, Type *ScalarType) { 4776 if (!MinBWs.count(ScalarRoot)) 4777 return Ex; 4778 if (MinBWs[ScalarRoot].second) 4779 return Builder.CreateSExt(Ex, ScalarType); 4780 return Builder.CreateZExt(Ex, ScalarType); 4781 }; 4782 4783 // Extract all of the elements with the external uses. 4784 for (const auto &ExternalUse : ExternalUses) { 4785 Value *Scalar = ExternalUse.Scalar; 4786 llvm::User *User = ExternalUse.User; 4787 4788 // Skip users that we already RAUW. This happens when one instruction 4789 // has multiple uses of the same value. 4790 if (User && !is_contained(Scalar->users(), User)) 4791 continue; 4792 TreeEntry *E = getTreeEntry(Scalar); 4793 assert(E && "Invalid scalar"); 4794 assert(E->State == TreeEntry::Vectorize && "Extracting from a gather list"); 4795 4796 Value *Vec = E->VectorizedValue; 4797 assert(Vec && "Can't find vectorizable value"); 4798 4799 Value *Lane = Builder.getInt32(ExternalUse.Lane); 4800 // If User == nullptr, the Scalar is used as extra arg. Generate 4801 // ExtractElement instruction and update the record for this scalar in 4802 // ExternallyUsedValues. 4803 if (!User) { 4804 assert(ExternallyUsedValues.count(Scalar) && 4805 "Scalar with nullptr as an external user must be registered in " 4806 "ExternallyUsedValues map"); 4807 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 4808 Builder.SetInsertPoint(VecI->getParent(), 4809 std::next(VecI->getIterator())); 4810 } else { 4811 Builder.SetInsertPoint(&F->getEntryBlock().front()); 4812 } 4813 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 4814 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 4815 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent()); 4816 auto &Locs = ExternallyUsedValues[Scalar]; 4817 ExternallyUsedValues.insert({Ex, Locs}); 4818 ExternallyUsedValues.erase(Scalar); 4819 // Required to update internally referenced instructions. 4820 Scalar->replaceAllUsesWith(Ex); 4821 continue; 4822 } 4823 4824 // Generate extracts for out-of-tree users. 4825 // Find the insertion point for the extractelement lane. 4826 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 4827 if (PHINode *PH = dyn_cast<PHINode>(User)) { 4828 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 4829 if (PH->getIncomingValue(i) == Scalar) { 4830 Instruction *IncomingTerminator = 4831 PH->getIncomingBlock(i)->getTerminator(); 4832 if (isa<CatchSwitchInst>(IncomingTerminator)) { 4833 Builder.SetInsertPoint(VecI->getParent(), 4834 std::next(VecI->getIterator())); 4835 } else { 4836 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 4837 } 4838 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 4839 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 4840 CSEBlocks.insert(PH->getIncomingBlock(i)); 4841 PH->setOperand(i, Ex); 4842 } 4843 } 4844 } else { 4845 Builder.SetInsertPoint(cast<Instruction>(User)); 4846 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 4847 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 4848 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 4849 User->replaceUsesOfWith(Scalar, Ex); 4850 } 4851 } else { 4852 Builder.SetInsertPoint(&F->getEntryBlock().front()); 4853 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 4854 Ex = extend(ScalarRoot, Ex, Scalar->getType()); 4855 CSEBlocks.insert(&F->getEntryBlock()); 4856 User->replaceUsesOfWith(Scalar, Ex); 4857 } 4858 4859 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 4860 } 4861 4862 // For each vectorized value: 4863 for (auto &TEPtr : VectorizableTree) { 4864 TreeEntry *Entry = TEPtr.get(); 4865 4866 // No need to handle users of gathered values. 4867 if (Entry->State == TreeEntry::NeedToGather) 4868 continue; 4869 4870 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 4871 4872 // For each lane: 4873 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 4874 Value *Scalar = Entry->Scalars[Lane]; 4875 4876 #ifndef NDEBUG 4877 Type *Ty = Scalar->getType(); 4878 if (!Ty->isVoidTy()) { 4879 for (User *U : Scalar->users()) { 4880 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 4881 4882 // It is legal to delete users in the ignorelist. 4883 assert((getTreeEntry(U) || is_contained(UserIgnoreList, U)) && 4884 "Deleting out-of-tree value"); 4885 } 4886 } 4887 #endif 4888 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 4889 eraseInstruction(cast<Instruction>(Scalar)); 4890 } 4891 } 4892 4893 Builder.ClearInsertionPoint(); 4894 InstrElementSize.clear(); 4895 4896 return VectorizableTree[0]->VectorizedValue; 4897 } 4898 4899 void BoUpSLP::optimizeGatherSequence() { 4900 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 4901 << " gather sequences instructions.\n"); 4902 // LICM InsertElementInst sequences. 4903 for (Instruction *I : GatherSeq) { 4904 if (isDeleted(I)) 4905 continue; 4906 4907 // Check if this block is inside a loop. 4908 Loop *L = LI->getLoopFor(I->getParent()); 4909 if (!L) 4910 continue; 4911 4912 // Check if it has a preheader. 4913 BasicBlock *PreHeader = L->getLoopPreheader(); 4914 if (!PreHeader) 4915 continue; 4916 4917 // If the vector or the element that we insert into it are 4918 // instructions that are defined in this basic block then we can't 4919 // hoist this instruction. 4920 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 4921 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 4922 if (Op0 && L->contains(Op0)) 4923 continue; 4924 if (Op1 && L->contains(Op1)) 4925 continue; 4926 4927 // We can hoist this instruction. Move it to the pre-header. 4928 I->moveBefore(PreHeader->getTerminator()); 4929 } 4930 4931 // Make a list of all reachable blocks in our CSE queue. 4932 SmallVector<const DomTreeNode *, 8> CSEWorkList; 4933 CSEWorkList.reserve(CSEBlocks.size()); 4934 for (BasicBlock *BB : CSEBlocks) 4935 if (DomTreeNode *N = DT->getNode(BB)) { 4936 assert(DT->isReachableFromEntry(N)); 4937 CSEWorkList.push_back(N); 4938 } 4939 4940 // Sort blocks by domination. This ensures we visit a block after all blocks 4941 // dominating it are visited. 4942 llvm::stable_sort(CSEWorkList, 4943 [this](const DomTreeNode *A, const DomTreeNode *B) { 4944 return DT->properlyDominates(A, B); 4945 }); 4946 4947 // Perform O(N^2) search over the gather sequences and merge identical 4948 // instructions. TODO: We can further optimize this scan if we split the 4949 // instructions into different buckets based on the insert lane. 4950 SmallVector<Instruction *, 16> Visited; 4951 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 4952 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 4953 "Worklist not sorted properly!"); 4954 BasicBlock *BB = (*I)->getBlock(); 4955 // For all instructions in blocks containing gather sequences: 4956 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 4957 Instruction *In = &*it++; 4958 if (isDeleted(In)) 4959 continue; 4960 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 4961 continue; 4962 4963 // Check if we can replace this instruction with any of the 4964 // visited instructions. 4965 for (Instruction *v : Visited) { 4966 if (In->isIdenticalTo(v) && 4967 DT->dominates(v->getParent(), In->getParent())) { 4968 In->replaceAllUsesWith(v); 4969 eraseInstruction(In); 4970 In = nullptr; 4971 break; 4972 } 4973 } 4974 if (In) { 4975 assert(!is_contained(Visited, In)); 4976 Visited.push_back(In); 4977 } 4978 } 4979 } 4980 CSEBlocks.clear(); 4981 GatherSeq.clear(); 4982 } 4983 4984 // Groups the instructions to a bundle (which is then a single scheduling entity) 4985 // and schedules instructions until the bundle gets ready. 4986 Optional<BoUpSLP::ScheduleData *> 4987 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 4988 const InstructionsState &S) { 4989 if (isa<PHINode>(S.OpValue)) 4990 return nullptr; 4991 4992 // Initialize the instruction bundle. 4993 Instruction *OldScheduleEnd = ScheduleEnd; 4994 ScheduleData *PrevInBundle = nullptr; 4995 ScheduleData *Bundle = nullptr; 4996 bool ReSchedule = false; 4997 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n"); 4998 4999 // Make sure that the scheduling region contains all 5000 // instructions of the bundle. 5001 for (Value *V : VL) { 5002 if (!extendSchedulingRegion(V, S)) 5003 return None; 5004 } 5005 5006 for (Value *V : VL) { 5007 ScheduleData *BundleMember = getScheduleData(V); 5008 assert(BundleMember && 5009 "no ScheduleData for bundle member (maybe not in same basic block)"); 5010 if (BundleMember->IsScheduled) { 5011 // A bundle member was scheduled as single instruction before and now 5012 // needs to be scheduled as part of the bundle. We just get rid of the 5013 // existing schedule. 5014 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 5015 << " was already scheduled\n"); 5016 ReSchedule = true; 5017 } 5018 assert(BundleMember->isSchedulingEntity() && 5019 "bundle member already part of other bundle"); 5020 if (PrevInBundle) { 5021 PrevInBundle->NextInBundle = BundleMember; 5022 } else { 5023 Bundle = BundleMember; 5024 } 5025 BundleMember->UnscheduledDepsInBundle = 0; 5026 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 5027 5028 // Group the instructions to a bundle. 5029 BundleMember->FirstInBundle = Bundle; 5030 PrevInBundle = BundleMember; 5031 } 5032 if (ScheduleEnd != OldScheduleEnd) { 5033 // The scheduling region got new instructions at the lower end (or it is a 5034 // new region for the first bundle). This makes it necessary to 5035 // recalculate all dependencies. 5036 // It is seldom that this needs to be done a second time after adding the 5037 // initial bundle to the region. 5038 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 5039 doForAllOpcodes(I, [](ScheduleData *SD) { 5040 SD->clearDependencies(); 5041 }); 5042 } 5043 ReSchedule = true; 5044 } 5045 if (ReSchedule) { 5046 resetSchedule(); 5047 initialFillReadyList(ReadyInsts); 5048 } 5049 assert(Bundle && "Failed to find schedule bundle"); 5050 5051 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block " 5052 << BB->getName() << "\n"); 5053 5054 calculateDependencies(Bundle, true, SLP); 5055 5056 // Now try to schedule the new bundle. As soon as the bundle is "ready" it 5057 // means that there are no cyclic dependencies and we can schedule it. 5058 // Note that's important that we don't "schedule" the bundle yet (see 5059 // cancelScheduling). 5060 while (!Bundle->isReady() && !ReadyInsts.empty()) { 5061 5062 ScheduleData *pickedSD = ReadyInsts.back(); 5063 ReadyInsts.pop_back(); 5064 5065 if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) { 5066 schedule(pickedSD, ReadyInsts); 5067 } 5068 } 5069 if (!Bundle->isReady()) { 5070 cancelScheduling(VL, S.OpValue); 5071 return None; 5072 } 5073 return Bundle; 5074 } 5075 5076 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 5077 Value *OpValue) { 5078 if (isa<PHINode>(OpValue)) 5079 return; 5080 5081 ScheduleData *Bundle = getScheduleData(OpValue); 5082 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 5083 assert(!Bundle->IsScheduled && 5084 "Can't cancel bundle which is already scheduled"); 5085 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 5086 "tried to unbundle something which is not a bundle"); 5087 5088 // Un-bundle: make single instructions out of the bundle. 5089 ScheduleData *BundleMember = Bundle; 5090 while (BundleMember) { 5091 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 5092 BundleMember->FirstInBundle = BundleMember; 5093 ScheduleData *Next = BundleMember->NextInBundle; 5094 BundleMember->NextInBundle = nullptr; 5095 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 5096 if (BundleMember->UnscheduledDepsInBundle == 0) { 5097 ReadyInsts.insert(BundleMember); 5098 } 5099 BundleMember = Next; 5100 } 5101 } 5102 5103 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 5104 // Allocate a new ScheduleData for the instruction. 5105 if (ChunkPos >= ChunkSize) { 5106 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize)); 5107 ChunkPos = 0; 5108 } 5109 return &(ScheduleDataChunks.back()[ChunkPos++]); 5110 } 5111 5112 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 5113 const InstructionsState &S) { 5114 if (getScheduleData(V, isOneOf(S, V))) 5115 return true; 5116 Instruction *I = dyn_cast<Instruction>(V); 5117 assert(I && "bundle member must be an instruction"); 5118 assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled"); 5119 auto &&CheckSheduleForI = [this, &S](Instruction *I) -> bool { 5120 ScheduleData *ISD = getScheduleData(I); 5121 if (!ISD) 5122 return false; 5123 assert(isInSchedulingRegion(ISD) && 5124 "ScheduleData not in scheduling region"); 5125 ScheduleData *SD = allocateScheduleDataChunks(); 5126 SD->Inst = I; 5127 SD->init(SchedulingRegionID, S.OpValue); 5128 ExtraScheduleDataMap[I][S.OpValue] = SD; 5129 return true; 5130 }; 5131 if (CheckSheduleForI(I)) 5132 return true; 5133 if (!ScheduleStart) { 5134 // It's the first instruction in the new region. 5135 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 5136 ScheduleStart = I; 5137 ScheduleEnd = I->getNextNode(); 5138 if (isOneOf(S, I) != I) 5139 CheckSheduleForI(I); 5140 assert(ScheduleEnd && "tried to vectorize a terminator?"); 5141 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 5142 return true; 5143 } 5144 // Search up and down at the same time, because we don't know if the new 5145 // instruction is above or below the existing scheduling region. 5146 BasicBlock::reverse_iterator UpIter = 5147 ++ScheduleStart->getIterator().getReverse(); 5148 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 5149 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 5150 BasicBlock::iterator LowerEnd = BB->end(); 5151 while (true) { 5152 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 5153 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 5154 return false; 5155 } 5156 5157 if (UpIter != UpperEnd) { 5158 if (&*UpIter == I) { 5159 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 5160 ScheduleStart = I; 5161 if (isOneOf(S, I) != I) 5162 CheckSheduleForI(I); 5163 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I 5164 << "\n"); 5165 return true; 5166 } 5167 ++UpIter; 5168 } 5169 if (DownIter != LowerEnd) { 5170 if (&*DownIter == I) { 5171 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 5172 nullptr); 5173 ScheduleEnd = I->getNextNode(); 5174 if (isOneOf(S, I) != I) 5175 CheckSheduleForI(I); 5176 assert(ScheduleEnd && "tried to vectorize a terminator?"); 5177 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I 5178 << "\n"); 5179 return true; 5180 } 5181 ++DownIter; 5182 } 5183 assert((UpIter != UpperEnd || DownIter != LowerEnd) && 5184 "instruction not found in block"); 5185 } 5186 return true; 5187 } 5188 5189 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 5190 Instruction *ToI, 5191 ScheduleData *PrevLoadStore, 5192 ScheduleData *NextLoadStore) { 5193 ScheduleData *CurrentLoadStore = PrevLoadStore; 5194 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 5195 ScheduleData *SD = ScheduleDataMap[I]; 5196 if (!SD) { 5197 SD = allocateScheduleDataChunks(); 5198 ScheduleDataMap[I] = SD; 5199 SD->Inst = I; 5200 } 5201 assert(!isInSchedulingRegion(SD) && 5202 "new ScheduleData already in scheduling region"); 5203 SD->init(SchedulingRegionID, I); 5204 5205 if (I->mayReadOrWriteMemory() && 5206 (!isa<IntrinsicInst>(I) || 5207 cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect)) { 5208 // Update the linked list of memory accessing instructions. 5209 if (CurrentLoadStore) { 5210 CurrentLoadStore->NextLoadStore = SD; 5211 } else { 5212 FirstLoadStoreInRegion = SD; 5213 } 5214 CurrentLoadStore = SD; 5215 } 5216 } 5217 if (NextLoadStore) { 5218 if (CurrentLoadStore) 5219 CurrentLoadStore->NextLoadStore = NextLoadStore; 5220 } else { 5221 LastLoadStoreInRegion = CurrentLoadStore; 5222 } 5223 } 5224 5225 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 5226 bool InsertInReadyList, 5227 BoUpSLP *SLP) { 5228 assert(SD->isSchedulingEntity()); 5229 5230 SmallVector<ScheduleData *, 10> WorkList; 5231 WorkList.push_back(SD); 5232 5233 while (!WorkList.empty()) { 5234 ScheduleData *SD = WorkList.back(); 5235 WorkList.pop_back(); 5236 5237 ScheduleData *BundleMember = SD; 5238 while (BundleMember) { 5239 assert(isInSchedulingRegion(BundleMember)); 5240 if (!BundleMember->hasValidDependencies()) { 5241 5242 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember 5243 << "\n"); 5244 BundleMember->Dependencies = 0; 5245 BundleMember->resetUnscheduledDeps(); 5246 5247 // Handle def-use chain dependencies. 5248 if (BundleMember->OpValue != BundleMember->Inst) { 5249 ScheduleData *UseSD = getScheduleData(BundleMember->Inst); 5250 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 5251 BundleMember->Dependencies++; 5252 ScheduleData *DestBundle = UseSD->FirstInBundle; 5253 if (!DestBundle->IsScheduled) 5254 BundleMember->incrementUnscheduledDeps(1); 5255 if (!DestBundle->hasValidDependencies()) 5256 WorkList.push_back(DestBundle); 5257 } 5258 } else { 5259 for (User *U : BundleMember->Inst->users()) { 5260 if (isa<Instruction>(U)) { 5261 ScheduleData *UseSD = getScheduleData(U); 5262 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 5263 BundleMember->Dependencies++; 5264 ScheduleData *DestBundle = UseSD->FirstInBundle; 5265 if (!DestBundle->IsScheduled) 5266 BundleMember->incrementUnscheduledDeps(1); 5267 if (!DestBundle->hasValidDependencies()) 5268 WorkList.push_back(DestBundle); 5269 } 5270 } else { 5271 // I'm not sure if this can ever happen. But we need to be safe. 5272 // This lets the instruction/bundle never be scheduled and 5273 // eventually disable vectorization. 5274 BundleMember->Dependencies++; 5275 BundleMember->incrementUnscheduledDeps(1); 5276 } 5277 } 5278 } 5279 5280 // Handle the memory dependencies. 5281 ScheduleData *DepDest = BundleMember->NextLoadStore; 5282 if (DepDest) { 5283 Instruction *SrcInst = BundleMember->Inst; 5284 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); 5285 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 5286 unsigned numAliased = 0; 5287 unsigned DistToSrc = 1; 5288 5289 while (DepDest) { 5290 assert(isInSchedulingRegion(DepDest)); 5291 5292 // We have two limits to reduce the complexity: 5293 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 5294 // SLP->isAliased (which is the expensive part in this loop). 5295 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 5296 // the whole loop (even if the loop is fast, it's quadratic). 5297 // It's important for the loop break condition (see below) to 5298 // check this limit even between two read-only instructions. 5299 if (DistToSrc >= MaxMemDepDistance || 5300 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 5301 (numAliased >= AliasedCheckLimit || 5302 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 5303 5304 // We increment the counter only if the locations are aliased 5305 // (instead of counting all alias checks). This gives a better 5306 // balance between reduced runtime and accurate dependencies. 5307 numAliased++; 5308 5309 DepDest->MemoryDependencies.push_back(BundleMember); 5310 BundleMember->Dependencies++; 5311 ScheduleData *DestBundle = DepDest->FirstInBundle; 5312 if (!DestBundle->IsScheduled) { 5313 BundleMember->incrementUnscheduledDeps(1); 5314 } 5315 if (!DestBundle->hasValidDependencies()) { 5316 WorkList.push_back(DestBundle); 5317 } 5318 } 5319 DepDest = DepDest->NextLoadStore; 5320 5321 // Example, explaining the loop break condition: Let's assume our 5322 // starting instruction is i0 and MaxMemDepDistance = 3. 5323 // 5324 // +--------v--v--v 5325 // i0,i1,i2,i3,i4,i5,i6,i7,i8 5326 // +--------^--^--^ 5327 // 5328 // MaxMemDepDistance let us stop alias-checking at i3 and we add 5329 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 5330 // Previously we already added dependencies from i3 to i6,i7,i8 5331 // (because of MaxMemDepDistance). As we added a dependency from 5332 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 5333 // and we can abort this loop at i6. 5334 if (DistToSrc >= 2 * MaxMemDepDistance) 5335 break; 5336 DistToSrc++; 5337 } 5338 } 5339 } 5340 BundleMember = BundleMember->NextInBundle; 5341 } 5342 if (InsertInReadyList && SD->isReady()) { 5343 ReadyInsts.push_back(SD); 5344 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst 5345 << "\n"); 5346 } 5347 } 5348 } 5349 5350 void BoUpSLP::BlockScheduling::resetSchedule() { 5351 assert(ScheduleStart && 5352 "tried to reset schedule on block which has not been scheduled"); 5353 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 5354 doForAllOpcodes(I, [&](ScheduleData *SD) { 5355 assert(isInSchedulingRegion(SD) && 5356 "ScheduleData not in scheduling region"); 5357 SD->IsScheduled = false; 5358 SD->resetUnscheduledDeps(); 5359 }); 5360 } 5361 ReadyInsts.clear(); 5362 } 5363 5364 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 5365 if (!BS->ScheduleStart) 5366 return; 5367 5368 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 5369 5370 BS->resetSchedule(); 5371 5372 // For the real scheduling we use a more sophisticated ready-list: it is 5373 // sorted by the original instruction location. This lets the final schedule 5374 // be as close as possible to the original instruction order. 5375 struct ScheduleDataCompare { 5376 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 5377 return SD2->SchedulingPriority < SD1->SchedulingPriority; 5378 } 5379 }; 5380 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 5381 5382 // Ensure that all dependency data is updated and fill the ready-list with 5383 // initial instructions. 5384 int Idx = 0; 5385 int NumToSchedule = 0; 5386 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 5387 I = I->getNextNode()) { 5388 BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) { 5389 assert(SD->isPartOfBundle() == 5390 (getTreeEntry(SD->Inst) != nullptr) && 5391 "scheduler and vectorizer bundle mismatch"); 5392 SD->FirstInBundle->SchedulingPriority = Idx++; 5393 if (SD->isSchedulingEntity()) { 5394 BS->calculateDependencies(SD, false, this); 5395 NumToSchedule++; 5396 } 5397 }); 5398 } 5399 BS->initialFillReadyList(ReadyInsts); 5400 5401 Instruction *LastScheduledInst = BS->ScheduleEnd; 5402 5403 // Do the "real" scheduling. 5404 while (!ReadyInsts.empty()) { 5405 ScheduleData *picked = *ReadyInsts.begin(); 5406 ReadyInsts.erase(ReadyInsts.begin()); 5407 5408 // Move the scheduled instruction(s) to their dedicated places, if not 5409 // there yet. 5410 ScheduleData *BundleMember = picked; 5411 while (BundleMember) { 5412 Instruction *pickedInst = BundleMember->Inst; 5413 if (LastScheduledInst->getNextNode() != pickedInst) { 5414 BS->BB->getInstList().remove(pickedInst); 5415 BS->BB->getInstList().insert(LastScheduledInst->getIterator(), 5416 pickedInst); 5417 } 5418 LastScheduledInst = pickedInst; 5419 BundleMember = BundleMember->NextInBundle; 5420 } 5421 5422 BS->schedule(picked, ReadyInsts); 5423 NumToSchedule--; 5424 } 5425 assert(NumToSchedule == 0 && "could not schedule all instructions"); 5426 5427 // Avoid duplicate scheduling of the block. 5428 BS->ScheduleStart = nullptr; 5429 } 5430 5431 unsigned BoUpSLP::getVectorElementSize(Value *V) { 5432 // If V is a store, just return the width of the stored value without 5433 // traversing the expression tree. This is the common case. 5434 if (auto *Store = dyn_cast<StoreInst>(V)) 5435 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 5436 5437 auto E = InstrElementSize.find(V); 5438 if (E != InstrElementSize.end()) 5439 return E->second; 5440 5441 // If V is not a store, we can traverse the expression tree to find loads 5442 // that feed it. The type of the loaded value may indicate a more suitable 5443 // width than V's type. We want to base the vector element size on the width 5444 // of memory operations where possible. 5445 SmallVector<Instruction *, 16> Worklist; 5446 SmallPtrSet<Instruction *, 16> Visited; 5447 if (auto *I = dyn_cast<Instruction>(V)) { 5448 Worklist.push_back(I); 5449 Visited.insert(I); 5450 } 5451 5452 // Traverse the expression tree in bottom-up order looking for loads. If we 5453 // encounter an instruction we don't yet handle, we give up. 5454 auto MaxWidth = 0u; 5455 auto FoundUnknownInst = false; 5456 while (!Worklist.empty() && !FoundUnknownInst) { 5457 auto *I = Worklist.pop_back_val(); 5458 5459 // We should only be looking at scalar instructions here. If the current 5460 // instruction has a vector type, give up. 5461 auto *Ty = I->getType(); 5462 if (isa<VectorType>(Ty)) 5463 FoundUnknownInst = true; 5464 5465 // If the current instruction is a load, update MaxWidth to reflect the 5466 // width of the loaded value. 5467 else if (isa<LoadInst>(I)) 5468 MaxWidth = std::max<unsigned>(MaxWidth, DL->getTypeSizeInBits(Ty)); 5469 5470 // Otherwise, we need to visit the operands of the instruction. We only 5471 // handle the interesting cases from buildTree here. If an operand is an 5472 // instruction we haven't yet visited, we add it to the worklist. 5473 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 5474 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I)) { 5475 for (Use &U : I->operands()) 5476 if (auto *J = dyn_cast<Instruction>(U.get())) 5477 if (Visited.insert(J).second) 5478 Worklist.push_back(J); 5479 } 5480 5481 // If we don't yet handle the instruction, give up. 5482 else 5483 FoundUnknownInst = true; 5484 } 5485 5486 int Width = MaxWidth; 5487 // If we didn't encounter a memory access in the expression tree, or if we 5488 // gave up for some reason, just return the width of V. Otherwise, return the 5489 // maximum width we found. 5490 if (!MaxWidth || FoundUnknownInst) 5491 Width = DL->getTypeSizeInBits(V->getType()); 5492 5493 for (Instruction *I : Visited) 5494 InstrElementSize[I] = Width; 5495 5496 return Width; 5497 } 5498 5499 // Determine if a value V in a vectorizable expression Expr can be demoted to a 5500 // smaller type with a truncation. We collect the values that will be demoted 5501 // in ToDemote and additional roots that require investigating in Roots. 5502 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, 5503 SmallVectorImpl<Value *> &ToDemote, 5504 SmallVectorImpl<Value *> &Roots) { 5505 // We can always demote constants. 5506 if (isa<Constant>(V)) { 5507 ToDemote.push_back(V); 5508 return true; 5509 } 5510 5511 // If the value is not an instruction in the expression with only one use, it 5512 // cannot be demoted. 5513 auto *I = dyn_cast<Instruction>(V); 5514 if (!I || !I->hasOneUse() || !Expr.count(I)) 5515 return false; 5516 5517 switch (I->getOpcode()) { 5518 5519 // We can always demote truncations and extensions. Since truncations can 5520 // seed additional demotion, we save the truncated value. 5521 case Instruction::Trunc: 5522 Roots.push_back(I->getOperand(0)); 5523 break; 5524 case Instruction::ZExt: 5525 case Instruction::SExt: 5526 break; 5527 5528 // We can demote certain binary operations if we can demote both of their 5529 // operands. 5530 case Instruction::Add: 5531 case Instruction::Sub: 5532 case Instruction::Mul: 5533 case Instruction::And: 5534 case Instruction::Or: 5535 case Instruction::Xor: 5536 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || 5537 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) 5538 return false; 5539 break; 5540 5541 // We can demote selects if we can demote their true and false values. 5542 case Instruction::Select: { 5543 SelectInst *SI = cast<SelectInst>(I); 5544 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || 5545 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) 5546 return false; 5547 break; 5548 } 5549 5550 // We can demote phis if we can demote all their incoming operands. Note that 5551 // we don't need to worry about cycles since we ensure single use above. 5552 case Instruction::PHI: { 5553 PHINode *PN = cast<PHINode>(I); 5554 for (Value *IncValue : PN->incoming_values()) 5555 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) 5556 return false; 5557 break; 5558 } 5559 5560 // Otherwise, conservatively give up. 5561 default: 5562 return false; 5563 } 5564 5565 // Record the value that we can demote. 5566 ToDemote.push_back(V); 5567 return true; 5568 } 5569 5570 void BoUpSLP::computeMinimumValueSizes() { 5571 // If there are no external uses, the expression tree must be rooted by a 5572 // store. We can't demote in-memory values, so there is nothing to do here. 5573 if (ExternalUses.empty()) 5574 return; 5575 5576 // We only attempt to truncate integer expressions. 5577 auto &TreeRoot = VectorizableTree[0]->Scalars; 5578 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 5579 if (!TreeRootIT) 5580 return; 5581 5582 // If the expression is not rooted by a store, these roots should have 5583 // external uses. We will rely on InstCombine to rewrite the expression in 5584 // the narrower type. However, InstCombine only rewrites single-use values. 5585 // This means that if a tree entry other than a root is used externally, it 5586 // must have multiple uses and InstCombine will not rewrite it. The code 5587 // below ensures that only the roots are used externally. 5588 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); 5589 for (auto &EU : ExternalUses) 5590 if (!Expr.erase(EU.Scalar)) 5591 return; 5592 if (!Expr.empty()) 5593 return; 5594 5595 // Collect the scalar values of the vectorizable expression. We will use this 5596 // context to determine which values can be demoted. If we see a truncation, 5597 // we mark it as seeding another demotion. 5598 for (auto &EntryPtr : VectorizableTree) 5599 Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end()); 5600 5601 // Ensure the roots of the vectorizable tree don't form a cycle. They must 5602 // have a single external user that is not in the vectorizable tree. 5603 for (auto *Root : TreeRoot) 5604 if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) 5605 return; 5606 5607 // Conservatively determine if we can actually truncate the roots of the 5608 // expression. Collect the values that can be demoted in ToDemote and 5609 // additional roots that require investigating in Roots. 5610 SmallVector<Value *, 32> ToDemote; 5611 SmallVector<Value *, 4> Roots; 5612 for (auto *Root : TreeRoot) 5613 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) 5614 return; 5615 5616 // The maximum bit width required to represent all the values that can be 5617 // demoted without loss of precision. It would be safe to truncate the roots 5618 // of the expression to this width. 5619 auto MaxBitWidth = 8u; 5620 5621 // We first check if all the bits of the roots are demanded. If they're not, 5622 // we can truncate the roots to this narrower type. 5623 for (auto *Root : TreeRoot) { 5624 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 5625 MaxBitWidth = std::max<unsigned>( 5626 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); 5627 } 5628 5629 // True if the roots can be zero-extended back to their original type, rather 5630 // than sign-extended. We know that if the leading bits are not demanded, we 5631 // can safely zero-extend. So we initialize IsKnownPositive to True. 5632 bool IsKnownPositive = true; 5633 5634 // If all the bits of the roots are demanded, we can try a little harder to 5635 // compute a narrower type. This can happen, for example, if the roots are 5636 // getelementptr indices. InstCombine promotes these indices to the pointer 5637 // width. Thus, all their bits are technically demanded even though the 5638 // address computation might be vectorized in a smaller type. 5639 // 5640 // We start by looking at each entry that can be demoted. We compute the 5641 // maximum bit width required to store the scalar by using ValueTracking to 5642 // compute the number of high-order bits we can truncate. 5643 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) && 5644 llvm::all_of(TreeRoot, [](Value *R) { 5645 assert(R->hasOneUse() && "Root should have only one use!"); 5646 return isa<GetElementPtrInst>(R->user_back()); 5647 })) { 5648 MaxBitWidth = 8u; 5649 5650 // Determine if the sign bit of all the roots is known to be zero. If not, 5651 // IsKnownPositive is set to False. 5652 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { 5653 KnownBits Known = computeKnownBits(R, *DL); 5654 return Known.isNonNegative(); 5655 }); 5656 5657 // Determine the maximum number of bits required to store the scalar 5658 // values. 5659 for (auto *Scalar : ToDemote) { 5660 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); 5661 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 5662 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 5663 } 5664 5665 // If we can't prove that the sign bit is zero, we must add one to the 5666 // maximum bit width to account for the unknown sign bit. This preserves 5667 // the existing sign bit so we can safely sign-extend the root back to the 5668 // original type. Otherwise, if we know the sign bit is zero, we will 5669 // zero-extend the root instead. 5670 // 5671 // FIXME: This is somewhat suboptimal, as there will be cases where adding 5672 // one to the maximum bit width will yield a larger-than-necessary 5673 // type. In general, we need to add an extra bit only if we can't 5674 // prove that the upper bit of the original type is equal to the 5675 // upper bit of the proposed smaller type. If these two bits are the 5676 // same (either zero or one) we know that sign-extending from the 5677 // smaller type will result in the same value. Here, since we can't 5678 // yet prove this, we are just making the proposed smaller type 5679 // larger to ensure correctness. 5680 if (!IsKnownPositive) 5681 ++MaxBitWidth; 5682 } 5683 5684 // Round MaxBitWidth up to the next power-of-two. 5685 if (!isPowerOf2_64(MaxBitWidth)) 5686 MaxBitWidth = NextPowerOf2(MaxBitWidth); 5687 5688 // If the maximum bit width we compute is less than the with of the roots' 5689 // type, we can proceed with the narrowing. Otherwise, do nothing. 5690 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 5691 return; 5692 5693 // If we can truncate the root, we must collect additional values that might 5694 // be demoted as a result. That is, those seeded by truncations we will 5695 // modify. 5696 while (!Roots.empty()) 5697 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); 5698 5699 // Finally, map the values we can demote to the maximum bit with we computed. 5700 for (auto *Scalar : ToDemote) 5701 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive); 5702 } 5703 5704 namespace { 5705 5706 /// The SLPVectorizer Pass. 5707 struct SLPVectorizer : public FunctionPass { 5708 SLPVectorizerPass Impl; 5709 5710 /// Pass identification, replacement for typeid 5711 static char ID; 5712 5713 explicit SLPVectorizer() : FunctionPass(ID) { 5714 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 5715 } 5716 5717 bool doInitialization(Module &M) override { 5718 return false; 5719 } 5720 5721 bool runOnFunction(Function &F) override { 5722 if (skipFunction(F)) 5723 return false; 5724 5725 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 5726 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 5727 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 5728 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 5729 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 5730 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 5731 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 5732 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 5733 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 5734 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 5735 5736 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 5737 } 5738 5739 void getAnalysisUsage(AnalysisUsage &AU) const override { 5740 FunctionPass::getAnalysisUsage(AU); 5741 AU.addRequired<AssumptionCacheTracker>(); 5742 AU.addRequired<ScalarEvolutionWrapperPass>(); 5743 AU.addRequired<AAResultsWrapperPass>(); 5744 AU.addRequired<TargetTransformInfoWrapperPass>(); 5745 AU.addRequired<LoopInfoWrapperPass>(); 5746 AU.addRequired<DominatorTreeWrapperPass>(); 5747 AU.addRequired<DemandedBitsWrapperPass>(); 5748 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 5749 AU.addRequired<InjectTLIMappingsLegacy>(); 5750 AU.addPreserved<LoopInfoWrapperPass>(); 5751 AU.addPreserved<DominatorTreeWrapperPass>(); 5752 AU.addPreserved<AAResultsWrapperPass>(); 5753 AU.addPreserved<GlobalsAAWrapperPass>(); 5754 AU.setPreservesCFG(); 5755 } 5756 }; 5757 5758 } // end anonymous namespace 5759 5760 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 5761 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 5762 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 5763 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 5764 auto *AA = &AM.getResult<AAManager>(F); 5765 auto *LI = &AM.getResult<LoopAnalysis>(F); 5766 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 5767 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 5768 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 5769 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 5770 5771 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 5772 if (!Changed) 5773 return PreservedAnalyses::all(); 5774 5775 PreservedAnalyses PA; 5776 PA.preserveSet<CFGAnalyses>(); 5777 PA.preserve<AAManager>(); 5778 PA.preserve<GlobalsAA>(); 5779 return PA; 5780 } 5781 5782 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 5783 TargetTransformInfo *TTI_, 5784 TargetLibraryInfo *TLI_, AAResults *AA_, 5785 LoopInfo *LI_, DominatorTree *DT_, 5786 AssumptionCache *AC_, DemandedBits *DB_, 5787 OptimizationRemarkEmitter *ORE_) { 5788 if (!RunSLPVectorization) 5789 return false; 5790 SE = SE_; 5791 TTI = TTI_; 5792 TLI = TLI_; 5793 AA = AA_; 5794 LI = LI_; 5795 DT = DT_; 5796 AC = AC_; 5797 DB = DB_; 5798 DL = &F.getParent()->getDataLayout(); 5799 5800 Stores.clear(); 5801 GEPs.clear(); 5802 bool Changed = false; 5803 5804 // If the target claims to have no vector registers don't attempt 5805 // vectorization. 5806 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) 5807 return false; 5808 5809 // Don't vectorize when the attribute NoImplicitFloat is used. 5810 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 5811 return false; 5812 5813 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 5814 5815 // Use the bottom up slp vectorizer to construct chains that start with 5816 // store instructions. 5817 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 5818 5819 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 5820 // delete instructions. 5821 5822 // Scan the blocks in the function in post order. 5823 for (auto BB : post_order(&F.getEntryBlock())) { 5824 collectSeedInstructions(BB); 5825 5826 // Vectorize trees that end at stores. 5827 if (!Stores.empty()) { 5828 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 5829 << " underlying objects.\n"); 5830 Changed |= vectorizeStoreChains(R); 5831 } 5832 5833 // Vectorize trees that end at reductions. 5834 Changed |= vectorizeChainsInBlock(BB, R); 5835 5836 // Vectorize the index computations of getelementptr instructions. This 5837 // is primarily intended to catch gather-like idioms ending at 5838 // non-consecutive loads. 5839 if (!GEPs.empty()) { 5840 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 5841 << " underlying objects.\n"); 5842 Changed |= vectorizeGEPIndices(BB, R); 5843 } 5844 } 5845 5846 if (Changed) { 5847 R.optimizeGatherSequence(); 5848 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 5849 } 5850 return Changed; 5851 } 5852 5853 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 5854 unsigned Idx) { 5855 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size() 5856 << "\n"); 5857 const unsigned Sz = R.getVectorElementSize(Chain[0]); 5858 const unsigned MinVF = R.getMinVecRegSize() / Sz; 5859 unsigned VF = Chain.size(); 5860 5861 if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF) 5862 return false; 5863 5864 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx 5865 << "\n"); 5866 5867 R.buildTree(Chain); 5868 Optional<ArrayRef<unsigned>> Order = R.bestOrder(); 5869 // TODO: Handle orders of size less than number of elements in the vector. 5870 if (Order && Order->size() == Chain.size()) { 5871 // TODO: reorder tree nodes without tree rebuilding. 5872 SmallVector<Value *, 4> ReorderedOps(Chain.rbegin(), Chain.rend()); 5873 llvm::transform(*Order, ReorderedOps.begin(), 5874 [Chain](const unsigned Idx) { return Chain[Idx]; }); 5875 R.buildTree(ReorderedOps); 5876 } 5877 if (R.isTreeTinyAndNotFullyVectorizable()) 5878 return false; 5879 if (R.isLoadCombineCandidate()) 5880 return false; 5881 5882 R.computeMinimumValueSizes(); 5883 5884 int Cost = R.getTreeCost(); 5885 5886 LLVM_DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n"); 5887 if (Cost < -SLPCostThreshold) { 5888 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n"); 5889 5890 using namespace ore; 5891 5892 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 5893 cast<StoreInst>(Chain[0])) 5894 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 5895 << " and with tree size " 5896 << NV("TreeSize", R.getTreeSize())); 5897 5898 R.vectorizeTree(); 5899 return true; 5900 } 5901 5902 return false; 5903 } 5904 5905 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 5906 BoUpSLP &R) { 5907 // We may run into multiple chains that merge into a single chain. We mark the 5908 // stores that we vectorized so that we don't visit the same store twice. 5909 BoUpSLP::ValueSet VectorizedStores; 5910 bool Changed = false; 5911 5912 int E = Stores.size(); 5913 SmallBitVector Tails(E, false); 5914 SmallVector<int, 16> ConsecutiveChain(E, E + 1); 5915 int MaxIter = MaxStoreLookup.getValue(); 5916 int IterCnt; 5917 auto &&FindConsecutiveAccess = [this, &Stores, &Tails, &IterCnt, MaxIter, 5918 &ConsecutiveChain](int K, int Idx) { 5919 if (IterCnt >= MaxIter) 5920 return true; 5921 ++IterCnt; 5922 if (!isConsecutiveAccess(Stores[K], Stores[Idx], *DL, *SE)) 5923 return false; 5924 5925 Tails.set(Idx); 5926 ConsecutiveChain[K] = Idx; 5927 return true; 5928 }; 5929 // Do a quadratic search on all of the given stores in reverse order and find 5930 // all of the pairs of stores that follow each other. 5931 for (int Idx = E - 1; Idx >= 0; --Idx) { 5932 // If a store has multiple consecutive store candidates, search according 5933 // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ... 5934 // This is because usually pairing with immediate succeeding or preceding 5935 // candidate create the best chance to find slp vectorization opportunity. 5936 const int MaxLookDepth = std::max(E - Idx, Idx + 1); 5937 IterCnt = 0; 5938 for (int Offset = 1, F = MaxLookDepth; Offset < F; ++Offset) 5939 if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) || 5940 (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx))) 5941 break; 5942 } 5943 5944 // For stores that start but don't end a link in the chain: 5945 for (int Cnt = E; Cnt > 0; --Cnt) { 5946 int I = Cnt - 1; 5947 if (ConsecutiveChain[I] == E + 1 || Tails.test(I)) 5948 continue; 5949 // We found a store instr that starts a chain. Now follow the chain and try 5950 // to vectorize it. 5951 BoUpSLP::ValueList Operands; 5952 // Collect the chain into a list. 5953 while (I != E + 1 && !VectorizedStores.count(Stores[I])) { 5954 Operands.push_back(Stores[I]); 5955 // Move to the next value in the chain. 5956 I = ConsecutiveChain[I]; 5957 } 5958 5959 // If a vector register can't hold 1 element, we are done. 5960 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 5961 unsigned EltSize = R.getVectorElementSize(Stores[0]); 5962 if (MaxVecRegSize % EltSize != 0) 5963 continue; 5964 5965 unsigned MaxElts = MaxVecRegSize / EltSize; 5966 // FIXME: Is division-by-2 the correct step? Should we assert that the 5967 // register size is a power-of-2? 5968 unsigned StartIdx = 0; 5969 for (unsigned Size = llvm::PowerOf2Ceil(MaxElts); Size >= 2; Size /= 2) { 5970 for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) { 5971 ArrayRef<Value *> Slice = makeArrayRef(Operands).slice(Cnt, Size); 5972 if (!VectorizedStores.count(Slice.front()) && 5973 !VectorizedStores.count(Slice.back()) && 5974 vectorizeStoreChain(Slice, R, Cnt)) { 5975 // Mark the vectorized stores so that we don't vectorize them again. 5976 VectorizedStores.insert(Slice.begin(), Slice.end()); 5977 Changed = true; 5978 // If we vectorized initial block, no need to try to vectorize it 5979 // again. 5980 if (Cnt == StartIdx) 5981 StartIdx += Size; 5982 Cnt += Size; 5983 continue; 5984 } 5985 ++Cnt; 5986 } 5987 // Check if the whole array was vectorized already - exit. 5988 if (StartIdx >= Operands.size()) 5989 break; 5990 } 5991 } 5992 5993 return Changed; 5994 } 5995 5996 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 5997 // Initialize the collections. We will make a single pass over the block. 5998 Stores.clear(); 5999 GEPs.clear(); 6000 6001 // Visit the store and getelementptr instructions in BB and organize them in 6002 // Stores and GEPs according to the underlying objects of their pointer 6003 // operands. 6004 for (Instruction &I : *BB) { 6005 // Ignore store instructions that are volatile or have a pointer operand 6006 // that doesn't point to a scalar type. 6007 if (auto *SI = dyn_cast<StoreInst>(&I)) { 6008 if (!SI->isSimple()) 6009 continue; 6010 if (!isValidElementType(SI->getValueOperand()->getType())) 6011 continue; 6012 Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI); 6013 } 6014 6015 // Ignore getelementptr instructions that have more than one index, a 6016 // constant index, or a pointer operand that doesn't point to a scalar 6017 // type. 6018 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 6019 auto Idx = GEP->idx_begin()->get(); 6020 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) 6021 continue; 6022 if (!isValidElementType(Idx->getType())) 6023 continue; 6024 if (GEP->getType()->isVectorTy()) 6025 continue; 6026 GEPs[GEP->getPointerOperand()].push_back(GEP); 6027 } 6028 } 6029 } 6030 6031 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 6032 if (!A || !B) 6033 return false; 6034 Value *VL[] = {A, B}; 6035 return tryToVectorizeList(VL, R, /*AllowReorder=*/true); 6036 } 6037 6038 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 6039 bool AllowReorder, 6040 ArrayRef<Value *> InsertUses) { 6041 if (VL.size() < 2) 6042 return false; 6043 6044 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " 6045 << VL.size() << ".\n"); 6046 6047 // Check that all of the parts are instructions of the same type, 6048 // we permit an alternate opcode via InstructionsState. 6049 InstructionsState S = getSameOpcode(VL); 6050 if (!S.getOpcode()) 6051 return false; 6052 6053 Instruction *I0 = cast<Instruction>(S.OpValue); 6054 // Make sure invalid types (including vector type) are rejected before 6055 // determining vectorization factor for scalar instructions. 6056 for (Value *V : VL) { 6057 Type *Ty = V->getType(); 6058 if (!isValidElementType(Ty)) { 6059 // NOTE: the following will give user internal llvm type name, which may 6060 // not be useful. 6061 R.getORE()->emit([&]() { 6062 std::string type_str; 6063 llvm::raw_string_ostream rso(type_str); 6064 Ty->print(rso); 6065 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0) 6066 << "Cannot SLP vectorize list: type " 6067 << rso.str() + " is unsupported by vectorizer"; 6068 }); 6069 return false; 6070 } 6071 } 6072 6073 unsigned Sz = R.getVectorElementSize(I0); 6074 unsigned MinVF = std::max(2U, R.getMinVecRegSize() / Sz); 6075 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF); 6076 if (MaxVF < 2) { 6077 R.getORE()->emit([&]() { 6078 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0) 6079 << "Cannot SLP vectorize list: vectorization factor " 6080 << "less than 2 is not supported"; 6081 }); 6082 return false; 6083 } 6084 6085 bool Changed = false; 6086 bool CandidateFound = false; 6087 int MinCost = SLPCostThreshold; 6088 6089 bool CompensateUseCost = 6090 !InsertUses.empty() && llvm::all_of(InsertUses, [](const Value *V) { 6091 return V && isa<InsertElementInst>(V); 6092 }); 6093 assert((!CompensateUseCost || InsertUses.size() == VL.size()) && 6094 "Each scalar expected to have an associated InsertElement user."); 6095 6096 unsigned NextInst = 0, MaxInst = VL.size(); 6097 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) { 6098 // No actual vectorization should happen, if number of parts is the same as 6099 // provided vectorization factor (i.e. the scalar type is used for vector 6100 // code during codegen). 6101 auto *VecTy = FixedVectorType::get(VL[0]->getType(), VF); 6102 if (TTI->getNumberOfParts(VecTy) == VF) 6103 continue; 6104 for (unsigned I = NextInst; I < MaxInst; ++I) { 6105 unsigned OpsWidth = 0; 6106 6107 if (I + VF > MaxInst) 6108 OpsWidth = MaxInst - I; 6109 else 6110 OpsWidth = VF; 6111 6112 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 6113 break; 6114 6115 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth); 6116 // Check that a previous iteration of this loop did not delete the Value. 6117 if (llvm::any_of(Ops, [&R](Value *V) { 6118 auto *I = dyn_cast<Instruction>(V); 6119 return I && R.isDeleted(I); 6120 })) 6121 continue; 6122 6123 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 6124 << "\n"); 6125 6126 R.buildTree(Ops); 6127 Optional<ArrayRef<unsigned>> Order = R.bestOrder(); 6128 // TODO: check if we can allow reordering for more cases. 6129 if (AllowReorder && Order) { 6130 // TODO: reorder tree nodes without tree rebuilding. 6131 // Conceptually, there is nothing actually preventing us from trying to 6132 // reorder a larger list. In fact, we do exactly this when vectorizing 6133 // reductions. However, at this point, we only expect to get here when 6134 // there are exactly two operations. 6135 assert(Ops.size() == 2); 6136 Value *ReorderedOps[] = {Ops[1], Ops[0]}; 6137 R.buildTree(ReorderedOps, None); 6138 } 6139 if (R.isTreeTinyAndNotFullyVectorizable()) 6140 continue; 6141 6142 R.computeMinimumValueSizes(); 6143 int Cost = R.getTreeCost(); 6144 CandidateFound = true; 6145 if (CompensateUseCost) { 6146 // TODO: Use TTI's getScalarizationOverhead for sequence of inserts 6147 // rather than sum of single inserts as the latter may overestimate 6148 // cost. This work should imply improving cost estimation for extracts 6149 // that added in for external (for vectorization tree) users,i.e. that 6150 // part should also switch to same interface. 6151 // For example, the following case is projected code after SLP: 6152 // %4 = extractelement <4 x i64> %3, i32 0 6153 // %v0 = insertelement <4 x i64> undef, i64 %4, i32 0 6154 // %5 = extractelement <4 x i64> %3, i32 1 6155 // %v1 = insertelement <4 x i64> %v0, i64 %5, i32 1 6156 // %6 = extractelement <4 x i64> %3, i32 2 6157 // %v2 = insertelement <4 x i64> %v1, i64 %6, i32 2 6158 // %7 = extractelement <4 x i64> %3, i32 3 6159 // %v3 = insertelement <4 x i64> %v2, i64 %7, i32 3 6160 // 6161 // Extracts here added by SLP in order to feed users (the inserts) of 6162 // original scalars and contribute to "ExtractCost" at cost evaluation. 6163 // The inserts in turn form sequence to build an aggregate that 6164 // detected by findBuildAggregate routine. 6165 // SLP makes an assumption that such sequence will be optimized away 6166 // later (instcombine) so it tries to compensate ExctractCost with 6167 // cost of insert sequence. 6168 // Current per element cost calculation approach is not quite accurate 6169 // and tends to create bias toward favoring vectorization. 6170 // Switching to the TTI interface might help a bit. 6171 // Alternative solution could be pattern-match to detect a no-op or 6172 // shuffle. 6173 unsigned UserCost = 0; 6174 for (unsigned Lane = 0; Lane < OpsWidth; Lane++) { 6175 auto *IE = cast<InsertElementInst>(InsertUses[I + Lane]); 6176 if (auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2))) 6177 UserCost += TTI->getVectorInstrCost( 6178 Instruction::InsertElement, IE->getType(), CI->getZExtValue()); 6179 } 6180 LLVM_DEBUG(dbgs() << "SLP: Compensate cost of users by: " << UserCost 6181 << ".\n"); 6182 Cost -= UserCost; 6183 } 6184 6185 MinCost = std::min(MinCost, Cost); 6186 6187 if (Cost < -SLPCostThreshold) { 6188 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 6189 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 6190 cast<Instruction>(Ops[0])) 6191 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 6192 << " and with tree size " 6193 << ore::NV("TreeSize", R.getTreeSize())); 6194 6195 R.vectorizeTree(); 6196 // Move to the next bundle. 6197 I += VF - 1; 6198 NextInst = I + 1; 6199 Changed = true; 6200 } 6201 } 6202 } 6203 6204 if (!Changed && CandidateFound) { 6205 R.getORE()->emit([&]() { 6206 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0) 6207 << "List vectorization was possible but not beneficial with cost " 6208 << ore::NV("Cost", MinCost) << " >= " 6209 << ore::NV("Treshold", -SLPCostThreshold); 6210 }); 6211 } else if (!Changed) { 6212 R.getORE()->emit([&]() { 6213 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0) 6214 << "Cannot SLP vectorize list: vectorization was impossible" 6215 << " with available vectorization factors"; 6216 }); 6217 } 6218 return Changed; 6219 } 6220 6221 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 6222 if (!I) 6223 return false; 6224 6225 if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I)) 6226 return false; 6227 6228 Value *P = I->getParent(); 6229 6230 // Vectorize in current basic block only. 6231 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 6232 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 6233 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 6234 return false; 6235 6236 // Try to vectorize V. 6237 if (tryToVectorizePair(Op0, Op1, R)) 6238 return true; 6239 6240 auto *A = dyn_cast<BinaryOperator>(Op0); 6241 auto *B = dyn_cast<BinaryOperator>(Op1); 6242 // Try to skip B. 6243 if (B && B->hasOneUse()) { 6244 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 6245 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 6246 if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R)) 6247 return true; 6248 if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R)) 6249 return true; 6250 } 6251 6252 // Try to skip A. 6253 if (A && A->hasOneUse()) { 6254 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 6255 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 6256 if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R)) 6257 return true; 6258 if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R)) 6259 return true; 6260 } 6261 return false; 6262 } 6263 6264 /// Generate a shuffle mask to be used in a reduction tree. 6265 /// 6266 /// \param VecLen The length of the vector to be reduced. 6267 /// \param NumEltsToRdx The number of elements that should be reduced in the 6268 /// vector. 6269 /// \param IsPairwise Whether the reduction is a pairwise or splitting 6270 /// reduction. A pairwise reduction will generate a mask of 6271 /// <0,2,...> or <1,3,..> while a splitting reduction will generate 6272 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2. 6273 /// \param IsLeft True will generate a mask of even elements, odd otherwise. 6274 static SmallVector<int, 32> createRdxShuffleMask(unsigned VecLen, 6275 unsigned NumEltsToRdx, 6276 bool IsPairwise, bool IsLeft) { 6277 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask"); 6278 6279 SmallVector<int, 32> ShuffleMask(VecLen, -1); 6280 6281 if (IsPairwise) 6282 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right). 6283 for (unsigned i = 0; i != NumEltsToRdx; ++i) 6284 ShuffleMask[i] = 2 * i + !IsLeft; 6285 else 6286 // Move the upper half of the vector to the lower half. 6287 for (unsigned i = 0; i != NumEltsToRdx; ++i) 6288 ShuffleMask[i] = NumEltsToRdx + i; 6289 6290 return ShuffleMask; 6291 } 6292 6293 namespace { 6294 6295 /// Model horizontal reductions. 6296 /// 6297 /// A horizontal reduction is a tree of reduction operations (currently add and 6298 /// fadd) that has operations that can be put into a vector as its leaf. 6299 /// For example, this tree: 6300 /// 6301 /// mul mul mul mul 6302 /// \ / \ / 6303 /// + + 6304 /// \ / 6305 /// + 6306 /// This tree has "mul" as its reduced values and "+" as its reduction 6307 /// operations. A reduction might be feeding into a store or a binary operation 6308 /// feeding a phi. 6309 /// ... 6310 /// \ / 6311 /// + 6312 /// | 6313 /// phi += 6314 /// 6315 /// Or: 6316 /// ... 6317 /// \ / 6318 /// + 6319 /// | 6320 /// *p = 6321 /// 6322 class HorizontalReduction { 6323 using ReductionOpsType = SmallVector<Value *, 16>; 6324 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; 6325 ReductionOpsListType ReductionOps; 6326 SmallVector<Value *, 32> ReducedVals; 6327 // Use map vector to make stable output. 6328 MapVector<Instruction *, Value *> ExtraArgs; 6329 6330 /// Kind of the reduction data. 6331 enum ReductionKind { 6332 RK_None, /// Not a reduction. 6333 RK_Arithmetic, /// Binary reduction data. 6334 RK_SMin, /// Signed minimum reduction data. 6335 RK_UMin, /// Unsigned minimum reduction data. 6336 RK_SMax, /// Signed maximum reduction data. 6337 RK_UMax, /// Unsigned maximum reduction data. 6338 }; 6339 6340 /// Contains info about operation, like its opcode, left and right operands. 6341 class OperationData { 6342 /// Opcode of the instruction. 6343 unsigned Opcode = 0; 6344 6345 /// Left operand of the reduction operation. 6346 Value *LHS = nullptr; 6347 6348 /// Right operand of the reduction operation. 6349 Value *RHS = nullptr; 6350 6351 /// Kind of the reduction operation. 6352 ReductionKind Kind = RK_None; 6353 6354 /// Checks if the reduction operation can be vectorized. 6355 bool isVectorizable() const { 6356 return LHS && RHS && 6357 // We currently only support add/mul/logical && min/max reductions. 6358 ((Kind == RK_Arithmetic && 6359 (Opcode == Instruction::Add || Opcode == Instruction::FAdd || 6360 Opcode == Instruction::Mul || Opcode == Instruction::FMul || 6361 Opcode == Instruction::And || Opcode == Instruction::Or || 6362 Opcode == Instruction::Xor)) || 6363 (Opcode == Instruction::ICmp && 6364 (Kind == RK_SMin || Kind == RK_SMax || 6365 Kind == RK_UMin || Kind == RK_UMax))); 6366 } 6367 6368 /// Creates reduction operation with the current opcode. 6369 Value *createOp(IRBuilder<> &Builder, const Twine &Name) const { 6370 assert(isVectorizable() && 6371 "Expected add|fadd or min/max reduction operation."); 6372 Value *Cmp = nullptr; 6373 switch (Kind) { 6374 case RK_Arithmetic: 6375 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, LHS, RHS, 6376 Name); 6377 case RK_SMin: 6378 assert(Opcode == Instruction::ICmp && "Expected integer types."); 6379 Cmp = Builder.CreateICmpSLT(LHS, RHS); 6380 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 6381 case RK_SMax: 6382 assert(Opcode == Instruction::ICmp && "Expected integer types."); 6383 Cmp = Builder.CreateICmpSGT(LHS, RHS); 6384 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 6385 case RK_UMin: 6386 assert(Opcode == Instruction::ICmp && "Expected integer types."); 6387 Cmp = Builder.CreateICmpULT(LHS, RHS); 6388 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 6389 case RK_UMax: 6390 assert(Opcode == Instruction::ICmp && "Expected integer types."); 6391 Cmp = Builder.CreateICmpUGT(LHS, RHS); 6392 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 6393 case RK_None: 6394 break; 6395 } 6396 llvm_unreachable("Unknown reduction operation."); 6397 } 6398 6399 public: 6400 explicit OperationData() = default; 6401 6402 /// Construction for reduced values. They are identified by opcode only and 6403 /// don't have associated LHS/RHS values. 6404 explicit OperationData(Value *V) { 6405 if (auto *I = dyn_cast<Instruction>(V)) 6406 Opcode = I->getOpcode(); 6407 } 6408 6409 /// Constructor for reduction operations with opcode and its left and 6410 /// right operands. 6411 OperationData(unsigned Opcode, Value *LHS, Value *RHS, ReductionKind Kind) 6412 : Opcode(Opcode), LHS(LHS), RHS(RHS), Kind(Kind) { 6413 assert(Kind != RK_None && "One of the reduction operations is expected."); 6414 } 6415 6416 explicit operator bool() const { return Opcode; } 6417 6418 /// Return true if this operation is any kind of minimum or maximum. 6419 bool isMinMax() const { 6420 switch (Kind) { 6421 case RK_Arithmetic: 6422 return false; 6423 case RK_SMin: 6424 case RK_SMax: 6425 case RK_UMin: 6426 case RK_UMax: 6427 return true; 6428 case RK_None: 6429 break; 6430 } 6431 llvm_unreachable("Reduction kind is not set"); 6432 } 6433 6434 /// Get the index of the first operand. 6435 unsigned getFirstOperandIndex() const { 6436 assert(!!*this && "The opcode is not set."); 6437 // We allow calling this before 'Kind' is set, so handle that specially. 6438 if (Kind == RK_None) 6439 return 0; 6440 return isMinMax() ? 1 : 0; 6441 } 6442 6443 /// Total number of operands in the reduction operation. 6444 unsigned getNumberOfOperands() const { 6445 assert(Kind != RK_None && !!*this && LHS && RHS && 6446 "Expected reduction operation."); 6447 return isMinMax() ? 3 : 2; 6448 } 6449 6450 /// Checks if the operation has the same parent as \p P. 6451 bool hasSameParent(Instruction *I, Value *P, bool IsRedOp) const { 6452 assert(Kind != RK_None && !!*this && LHS && RHS && 6453 "Expected reduction operation."); 6454 if (!IsRedOp) 6455 return I->getParent() == P; 6456 if (isMinMax()) { 6457 // SelectInst must be used twice while the condition op must have single 6458 // use only. 6459 auto *Cmp = cast<Instruction>(cast<SelectInst>(I)->getCondition()); 6460 return I->getParent() == P && Cmp && Cmp->getParent() == P; 6461 } 6462 // Arithmetic reduction operation must be used once only. 6463 return I->getParent() == P; 6464 } 6465 6466 /// Expected number of uses for reduction operations/reduced values. 6467 bool hasRequiredNumberOfUses(Instruction *I, bool IsReductionOp) const { 6468 assert(Kind != RK_None && !!*this && LHS && RHS && 6469 "Expected reduction operation."); 6470 if (isMinMax()) 6471 return I->hasNUses(2) && 6472 (!IsReductionOp || 6473 cast<SelectInst>(I)->getCondition()->hasOneUse()); 6474 return I->hasOneUse(); 6475 } 6476 6477 /// Initializes the list of reduction operations. 6478 void initReductionOps(ReductionOpsListType &ReductionOps) { 6479 assert(Kind != RK_None && !!*this && LHS && RHS && 6480 "Expected reduction operation."); 6481 if (isMinMax()) 6482 ReductionOps.assign(2, ReductionOpsType()); 6483 else 6484 ReductionOps.assign(1, ReductionOpsType()); 6485 } 6486 6487 /// Add all reduction operations for the reduction instruction \p I. 6488 void addReductionOps(Instruction *I, ReductionOpsListType &ReductionOps) { 6489 assert(Kind != RK_None && !!*this && LHS && RHS && 6490 "Expected reduction operation."); 6491 if (isMinMax()) { 6492 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition()); 6493 ReductionOps[1].emplace_back(I); 6494 } else { 6495 ReductionOps[0].emplace_back(I); 6496 } 6497 } 6498 6499 /// Checks if instruction is associative and can be vectorized. 6500 bool isAssociative(Instruction *I) const { 6501 assert(Kind != RK_None && *this && LHS && RHS && 6502 "Expected reduction operation."); 6503 switch (Kind) { 6504 case RK_Arithmetic: 6505 return I->isAssociative(); 6506 case RK_SMin: 6507 case RK_SMax: 6508 case RK_UMin: 6509 case RK_UMax: 6510 assert(Opcode == Instruction::ICmp && 6511 "Only integer compare operation is expected."); 6512 return true; 6513 case RK_None: 6514 break; 6515 } 6516 llvm_unreachable("Reduction kind is not set"); 6517 } 6518 6519 /// Checks if the reduction operation can be vectorized. 6520 bool isVectorizable(Instruction *I) const { 6521 return isVectorizable() && isAssociative(I); 6522 } 6523 6524 /// Checks if two operation data are both a reduction op or both a reduced 6525 /// value. 6526 bool operator==(const OperationData &OD) const { 6527 assert(((Kind != OD.Kind) || ((!LHS == !OD.LHS) && (!RHS == !OD.RHS))) && 6528 "One of the comparing operations is incorrect."); 6529 return this == &OD || (Kind == OD.Kind && Opcode == OD.Opcode); 6530 } 6531 bool operator!=(const OperationData &OD) const { return !(*this == OD); } 6532 void clear() { 6533 Opcode = 0; 6534 LHS = nullptr; 6535 RHS = nullptr; 6536 Kind = RK_None; 6537 } 6538 6539 /// Get the opcode of the reduction operation. 6540 unsigned getOpcode() const { 6541 assert(isVectorizable() && "Expected vectorizable operation."); 6542 return Opcode; 6543 } 6544 6545 /// Get kind of reduction data. 6546 ReductionKind getKind() const { return Kind; } 6547 Value *getLHS() const { return LHS; } 6548 Value *getRHS() const { return RHS; } 6549 Type *getConditionType() const { 6550 return isMinMax() ? CmpInst::makeCmpResultType(LHS->getType()) : nullptr; 6551 } 6552 6553 /// Creates reduction operation with the current opcode with the IR flags 6554 /// from \p ReductionOps. 6555 Value *createOp(IRBuilder<> &Builder, const Twine &Name, 6556 const ReductionOpsListType &ReductionOps) const { 6557 assert(isVectorizable() && 6558 "Expected add|fadd or min/max reduction operation."); 6559 auto *Op = createOp(Builder, Name); 6560 switch (Kind) { 6561 case RK_Arithmetic: 6562 propagateIRFlags(Op, ReductionOps[0]); 6563 return Op; 6564 case RK_SMin: 6565 case RK_SMax: 6566 case RK_UMin: 6567 case RK_UMax: 6568 if (auto *SI = dyn_cast<SelectInst>(Op)) 6569 propagateIRFlags(SI->getCondition(), ReductionOps[0]); 6570 propagateIRFlags(Op, ReductionOps[1]); 6571 return Op; 6572 case RK_None: 6573 break; 6574 } 6575 llvm_unreachable("Unknown reduction operation."); 6576 } 6577 /// Creates reduction operation with the current opcode with the IR flags 6578 /// from \p I. 6579 Value *createOp(IRBuilder<> &Builder, const Twine &Name, 6580 Instruction *I) const { 6581 assert(isVectorizable() && 6582 "Expected add|fadd or min/max reduction operation."); 6583 auto *Op = createOp(Builder, Name); 6584 switch (Kind) { 6585 case RK_Arithmetic: 6586 propagateIRFlags(Op, I); 6587 return Op; 6588 case RK_SMin: 6589 case RK_SMax: 6590 case RK_UMin: 6591 case RK_UMax: 6592 if (auto *SI = dyn_cast<SelectInst>(Op)) { 6593 propagateIRFlags(SI->getCondition(), 6594 cast<SelectInst>(I)->getCondition()); 6595 } 6596 propagateIRFlags(Op, I); 6597 return Op; 6598 case RK_None: 6599 break; 6600 } 6601 llvm_unreachable("Unknown reduction operation."); 6602 } 6603 6604 TargetTransformInfo::ReductionFlags getFlags() const { 6605 TargetTransformInfo::ReductionFlags Flags; 6606 switch (Kind) { 6607 case RK_Arithmetic: 6608 break; 6609 case RK_SMin: 6610 Flags.IsSigned = true; 6611 Flags.IsMaxOp = false; 6612 break; 6613 case RK_SMax: 6614 Flags.IsSigned = true; 6615 Flags.IsMaxOp = true; 6616 break; 6617 case RK_UMin: 6618 Flags.IsSigned = false; 6619 Flags.IsMaxOp = false; 6620 break; 6621 case RK_UMax: 6622 Flags.IsSigned = false; 6623 Flags.IsMaxOp = true; 6624 break; 6625 case RK_None: 6626 llvm_unreachable("Reduction kind is not set"); 6627 } 6628 return Flags; 6629 } 6630 }; 6631 6632 WeakTrackingVH ReductionRoot; 6633 6634 /// The operation data of the reduction operation. 6635 OperationData ReductionData; 6636 6637 /// The operation data of the values we perform a reduction on. 6638 OperationData ReducedValueData; 6639 6640 /// Should we model this reduction as a pairwise reduction tree or a tree that 6641 /// splits the vector in halves and adds those halves. 6642 bool IsPairwiseReduction = false; 6643 6644 /// Checks if the ParentStackElem.first should be marked as a reduction 6645 /// operation with an extra argument or as extra argument itself. 6646 void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem, 6647 Value *ExtraArg) { 6648 if (ExtraArgs.count(ParentStackElem.first)) { 6649 ExtraArgs[ParentStackElem.first] = nullptr; 6650 // We ran into something like: 6651 // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg. 6652 // The whole ParentStackElem.first should be considered as an extra value 6653 // in this case. 6654 // Do not perform analysis of remaining operands of ParentStackElem.first 6655 // instruction, this whole instruction is an extra argument. 6656 ParentStackElem.second = ParentStackElem.first->getNumOperands(); 6657 } else { 6658 // We ran into something like: 6659 // ParentStackElem.first += ... + ExtraArg + ... 6660 ExtraArgs[ParentStackElem.first] = ExtraArg; 6661 } 6662 } 6663 6664 static OperationData getOperationData(Value *V) { 6665 if (!V) 6666 return OperationData(); 6667 6668 Value *LHS; 6669 Value *RHS; 6670 if (m_BinOp(m_Value(LHS), m_Value(RHS)).match(V)) { 6671 return OperationData(cast<BinaryOperator>(V)->getOpcode(), LHS, RHS, 6672 RK_Arithmetic); 6673 } 6674 if (auto *Select = dyn_cast<SelectInst>(V)) { 6675 // Look for a min/max pattern. 6676 if (m_UMin(m_Value(LHS), m_Value(RHS)).match(Select)) { 6677 return OperationData(Instruction::ICmp, LHS, RHS, RK_UMin); 6678 } else if (m_SMin(m_Value(LHS), m_Value(RHS)).match(Select)) { 6679 return OperationData(Instruction::ICmp, LHS, RHS, RK_SMin); 6680 } else if (m_UMax(m_Value(LHS), m_Value(RHS)).match(Select)) { 6681 return OperationData(Instruction::ICmp, LHS, RHS, RK_UMax); 6682 } else if (m_SMax(m_Value(LHS), m_Value(RHS)).match(Select)) { 6683 return OperationData(Instruction::ICmp, LHS, RHS, RK_SMax); 6684 } else { 6685 // Try harder: look for min/max pattern based on instructions producing 6686 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2). 6687 // During the intermediate stages of SLP, it's very common to have 6688 // pattern like this (since optimizeGatherSequence is run only once 6689 // at the end): 6690 // %1 = extractelement <2 x i32> %a, i32 0 6691 // %2 = extractelement <2 x i32> %a, i32 1 6692 // %cond = icmp sgt i32 %1, %2 6693 // %3 = extractelement <2 x i32> %a, i32 0 6694 // %4 = extractelement <2 x i32> %a, i32 1 6695 // %select = select i1 %cond, i32 %3, i32 %4 6696 CmpInst::Predicate Pred; 6697 Instruction *L1; 6698 Instruction *L2; 6699 6700 LHS = Select->getTrueValue(); 6701 RHS = Select->getFalseValue(); 6702 Value *Cond = Select->getCondition(); 6703 6704 // TODO: Support inverse predicates. 6705 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) { 6706 if (!isa<ExtractElementInst>(RHS) || 6707 !L2->isIdenticalTo(cast<Instruction>(RHS))) 6708 return OperationData(V); 6709 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) { 6710 if (!isa<ExtractElementInst>(LHS) || 6711 !L1->isIdenticalTo(cast<Instruction>(LHS))) 6712 return OperationData(V); 6713 } else { 6714 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS)) 6715 return OperationData(V); 6716 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) || 6717 !L1->isIdenticalTo(cast<Instruction>(LHS)) || 6718 !L2->isIdenticalTo(cast<Instruction>(RHS))) 6719 return OperationData(V); 6720 } 6721 switch (Pred) { 6722 default: 6723 return OperationData(V); 6724 6725 case CmpInst::ICMP_ULT: 6726 case CmpInst::ICMP_ULE: 6727 return OperationData(Instruction::ICmp, LHS, RHS, RK_UMin); 6728 6729 case CmpInst::ICMP_SLT: 6730 case CmpInst::ICMP_SLE: 6731 return OperationData(Instruction::ICmp, LHS, RHS, RK_SMin); 6732 6733 case CmpInst::ICMP_UGT: 6734 case CmpInst::ICMP_UGE: 6735 return OperationData(Instruction::ICmp, LHS, RHS, RK_UMax); 6736 6737 case CmpInst::ICMP_SGT: 6738 case CmpInst::ICMP_SGE: 6739 return OperationData(Instruction::ICmp, LHS, RHS, RK_SMax); 6740 } 6741 } 6742 } 6743 return OperationData(V); 6744 } 6745 6746 public: 6747 HorizontalReduction() = default; 6748 6749 /// Try to find a reduction tree. 6750 bool matchAssociativeReduction(PHINode *Phi, Instruction *B) { 6751 assert((!Phi || is_contained(Phi->operands(), B)) && 6752 "Thi phi needs to use the binary operator"); 6753 6754 ReductionData = getOperationData(B); 6755 6756 // We could have a initial reductions that is not an add. 6757 // r *= v1 + v2 + v3 + v4 6758 // In such a case start looking for a tree rooted in the first '+'. 6759 if (Phi) { 6760 if (ReductionData.getLHS() == Phi) { 6761 Phi = nullptr; 6762 B = dyn_cast<Instruction>(ReductionData.getRHS()); 6763 ReductionData = getOperationData(B); 6764 } else if (ReductionData.getRHS() == Phi) { 6765 Phi = nullptr; 6766 B = dyn_cast<Instruction>(ReductionData.getLHS()); 6767 ReductionData = getOperationData(B); 6768 } 6769 } 6770 6771 if (!ReductionData.isVectorizable(B)) 6772 return false; 6773 6774 Type *Ty = B->getType(); 6775 if (!isValidElementType(Ty)) 6776 return false; 6777 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy()) 6778 return false; 6779 6780 ReducedValueData.clear(); 6781 ReductionRoot = B; 6782 6783 // Post order traverse the reduction tree starting at B. We only handle true 6784 // trees containing only binary operators. 6785 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; 6786 Stack.push_back(std::make_pair(B, ReductionData.getFirstOperandIndex())); 6787 ReductionData.initReductionOps(ReductionOps); 6788 while (!Stack.empty()) { 6789 Instruction *TreeN = Stack.back().first; 6790 unsigned EdgeToVist = Stack.back().second++; 6791 OperationData OpData = getOperationData(TreeN); 6792 bool IsReducedValue = OpData != ReductionData; 6793 6794 // Postorder vist. 6795 if (IsReducedValue || EdgeToVist == OpData.getNumberOfOperands()) { 6796 if (IsReducedValue) 6797 ReducedVals.push_back(TreeN); 6798 else { 6799 auto I = ExtraArgs.find(TreeN); 6800 if (I != ExtraArgs.end() && !I->second) { 6801 // Check if TreeN is an extra argument of its parent operation. 6802 if (Stack.size() <= 1) { 6803 // TreeN can't be an extra argument as it is a root reduction 6804 // operation. 6805 return false; 6806 } 6807 // Yes, TreeN is an extra argument, do not add it to a list of 6808 // reduction operations. 6809 // Stack[Stack.size() - 2] always points to the parent operation. 6810 markExtraArg(Stack[Stack.size() - 2], TreeN); 6811 ExtraArgs.erase(TreeN); 6812 } else 6813 ReductionData.addReductionOps(TreeN, ReductionOps); 6814 } 6815 // Retract. 6816 Stack.pop_back(); 6817 continue; 6818 } 6819 6820 // Visit left or right. 6821 Value *NextV = TreeN->getOperand(EdgeToVist); 6822 if (NextV != Phi) { 6823 auto *I = dyn_cast<Instruction>(NextV); 6824 OpData = getOperationData(I); 6825 // Continue analysis if the next operand is a reduction operation or 6826 // (possibly) a reduced value. If the reduced value opcode is not set, 6827 // the first met operation != reduction operation is considered as the 6828 // reduced value class. 6829 if (I && (!ReducedValueData || OpData == ReducedValueData || 6830 OpData == ReductionData)) { 6831 const bool IsReductionOperation = OpData == ReductionData; 6832 // Only handle trees in the current basic block. 6833 if (!ReductionData.hasSameParent(I, B->getParent(), 6834 IsReductionOperation)) { 6835 // I is an extra argument for TreeN (its parent operation). 6836 markExtraArg(Stack.back(), I); 6837 continue; 6838 } 6839 6840 // Each tree node needs to have minimal number of users except for the 6841 // ultimate reduction. 6842 if (!ReductionData.hasRequiredNumberOfUses(I, 6843 OpData == ReductionData) && 6844 I != B) { 6845 // I is an extra argument for TreeN (its parent operation). 6846 markExtraArg(Stack.back(), I); 6847 continue; 6848 } 6849 6850 if (IsReductionOperation) { 6851 // We need to be able to reassociate the reduction operations. 6852 if (!OpData.isAssociative(I)) { 6853 // I is an extra argument for TreeN (its parent operation). 6854 markExtraArg(Stack.back(), I); 6855 continue; 6856 } 6857 } else if (ReducedValueData && 6858 ReducedValueData != OpData) { 6859 // Make sure that the opcodes of the operations that we are going to 6860 // reduce match. 6861 // I is an extra argument for TreeN (its parent operation). 6862 markExtraArg(Stack.back(), I); 6863 continue; 6864 } else if (!ReducedValueData) 6865 ReducedValueData = OpData; 6866 6867 Stack.push_back(std::make_pair(I, OpData.getFirstOperandIndex())); 6868 continue; 6869 } 6870 } 6871 // NextV is an extra argument for TreeN (its parent operation). 6872 markExtraArg(Stack.back(), NextV); 6873 } 6874 return true; 6875 } 6876 6877 /// Attempt to vectorize the tree found by matchAssociativeReduction. 6878 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 6879 // If there are a sufficient number of reduction values, reduce 6880 // to a nearby power-of-2. We can safely generate oversized 6881 // vectors and rely on the backend to split them to legal sizes. 6882 unsigned NumReducedVals = ReducedVals.size(); 6883 if (NumReducedVals < 4) 6884 return false; 6885 6886 // FIXME: Fast-math-flags should be set based on the instructions in the 6887 // reduction (not all of 'fast' are required). 6888 IRBuilder<> Builder(cast<Instruction>(ReductionRoot)); 6889 FastMathFlags Unsafe; 6890 Unsafe.setFast(); 6891 Builder.setFastMathFlags(Unsafe); 6892 6893 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 6894 // The same extra argument may be used several times, so log each attempt 6895 // to use it. 6896 for (std::pair<Instruction *, Value *> &Pair : ExtraArgs) { 6897 assert(Pair.first && "DebugLoc must be set."); 6898 ExternallyUsedValues[Pair.second].push_back(Pair.first); 6899 } 6900 6901 // The compare instruction of a min/max is the insertion point for new 6902 // instructions and may be replaced with a new compare instruction. 6903 auto getCmpForMinMaxReduction = [](Instruction *RdxRootInst) { 6904 assert(isa<SelectInst>(RdxRootInst) && 6905 "Expected min/max reduction to have select root instruction"); 6906 Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition(); 6907 assert(isa<Instruction>(ScalarCond) && 6908 "Expected min/max reduction to have compare condition"); 6909 return cast<Instruction>(ScalarCond); 6910 }; 6911 6912 // The reduction root is used as the insertion point for new instructions, 6913 // so set it as externally used to prevent it from being deleted. 6914 ExternallyUsedValues[ReductionRoot]; 6915 SmallVector<Value *, 16> IgnoreList; 6916 for (ReductionOpsType &RdxOp : ReductionOps) 6917 IgnoreList.append(RdxOp.begin(), RdxOp.end()); 6918 6919 unsigned ReduxWidth = PowerOf2Floor(NumReducedVals); 6920 if (NumReducedVals > ReduxWidth) { 6921 // In the loop below, we are building a tree based on a window of 6922 // 'ReduxWidth' values. 6923 // If the operands of those values have common traits (compare predicate, 6924 // constant operand, etc), then we want to group those together to 6925 // minimize the cost of the reduction. 6926 6927 // TODO: This should be extended to count common operands for 6928 // compares and binops. 6929 6930 // Step 1: Count the number of times each compare predicate occurs. 6931 SmallDenseMap<unsigned, unsigned> PredCountMap; 6932 for (Value *RdxVal : ReducedVals) { 6933 CmpInst::Predicate Pred; 6934 if (match(RdxVal, m_Cmp(Pred, m_Value(), m_Value()))) 6935 ++PredCountMap[Pred]; 6936 } 6937 // Step 2: Sort the values so the most common predicates come first. 6938 stable_sort(ReducedVals, [&PredCountMap](Value *A, Value *B) { 6939 CmpInst::Predicate PredA, PredB; 6940 if (match(A, m_Cmp(PredA, m_Value(), m_Value())) && 6941 match(B, m_Cmp(PredB, m_Value(), m_Value()))) { 6942 return PredCountMap[PredA] > PredCountMap[PredB]; 6943 } 6944 return false; 6945 }); 6946 } 6947 6948 Value *VectorizedTree = nullptr; 6949 unsigned i = 0; 6950 while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) { 6951 ArrayRef<Value *> VL = makeArrayRef(&ReducedVals[i], ReduxWidth); 6952 V.buildTree(VL, ExternallyUsedValues, IgnoreList); 6953 Optional<ArrayRef<unsigned>> Order = V.bestOrder(); 6954 if (Order) { 6955 assert(Order->size() == VL.size() && 6956 "Order size must be the same as number of vectorized " 6957 "instructions."); 6958 // TODO: reorder tree nodes without tree rebuilding. 6959 SmallVector<Value *, 4> ReorderedOps(VL.size()); 6960 llvm::transform(*Order, ReorderedOps.begin(), 6961 [VL](const unsigned Idx) { return VL[Idx]; }); 6962 V.buildTree(ReorderedOps, ExternallyUsedValues, IgnoreList); 6963 } 6964 if (V.isTreeTinyAndNotFullyVectorizable()) 6965 break; 6966 if (V.isLoadCombineReductionCandidate(ReductionData.getOpcode())) 6967 break; 6968 6969 V.computeMinimumValueSizes(); 6970 6971 // Estimate cost. 6972 int TreeCost = V.getTreeCost(); 6973 int ReductionCost = getReductionCost(TTI, ReducedVals[i], ReduxWidth); 6974 int Cost = TreeCost + ReductionCost; 6975 if (Cost >= -SLPCostThreshold) { 6976 V.getORE()->emit([&]() { 6977 return OptimizationRemarkMissed(SV_NAME, "HorSLPNotBeneficial", 6978 cast<Instruction>(VL[0])) 6979 << "Vectorizing horizontal reduction is possible" 6980 << "but not beneficial with cost " << ore::NV("Cost", Cost) 6981 << " and threshold " 6982 << ore::NV("Threshold", -SLPCostThreshold); 6983 }); 6984 break; 6985 } 6986 6987 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" 6988 << Cost << ". (HorRdx)\n"); 6989 V.getORE()->emit([&]() { 6990 return OptimizationRemark(SV_NAME, "VectorizedHorizontalReduction", 6991 cast<Instruction>(VL[0])) 6992 << "Vectorized horizontal reduction with cost " 6993 << ore::NV("Cost", Cost) << " and with tree size " 6994 << ore::NV("TreeSize", V.getTreeSize()); 6995 }); 6996 6997 // Vectorize a tree. 6998 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 6999 Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues); 7000 7001 // Emit a reduction. For min/max, the root is a select, but the insertion 7002 // point is the compare condition of that select. 7003 Instruction *RdxRootInst = cast<Instruction>(ReductionRoot); 7004 if (ReductionData.isMinMax()) 7005 Builder.SetInsertPoint(getCmpForMinMaxReduction(RdxRootInst)); 7006 else 7007 Builder.SetInsertPoint(RdxRootInst); 7008 7009 Value *ReducedSubTree = 7010 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); 7011 7012 if (!VectorizedTree) { 7013 // Initialize the final value in the reduction. 7014 VectorizedTree = ReducedSubTree; 7015 } else { 7016 // Update the final value in the reduction. 7017 Builder.SetCurrentDebugLocation(Loc); 7018 OperationData VectReductionData(ReductionData.getOpcode(), 7019 VectorizedTree, ReducedSubTree, 7020 ReductionData.getKind()); 7021 VectorizedTree = 7022 VectReductionData.createOp(Builder, "op.rdx", ReductionOps); 7023 } 7024 i += ReduxWidth; 7025 ReduxWidth = PowerOf2Floor(NumReducedVals - i); 7026 } 7027 7028 if (VectorizedTree) { 7029 // Finish the reduction. 7030 for (; i < NumReducedVals; ++i) { 7031 auto *I = cast<Instruction>(ReducedVals[i]); 7032 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 7033 OperationData VectReductionData(ReductionData.getOpcode(), 7034 VectorizedTree, I, 7035 ReductionData.getKind()); 7036 VectorizedTree = VectReductionData.createOp(Builder, "", ReductionOps); 7037 } 7038 for (auto &Pair : ExternallyUsedValues) { 7039 // Add each externally used value to the final reduction. 7040 for (auto *I : Pair.second) { 7041 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 7042 OperationData VectReductionData(ReductionData.getOpcode(), 7043 VectorizedTree, Pair.first, 7044 ReductionData.getKind()); 7045 VectorizedTree = VectReductionData.createOp(Builder, "op.extra", I); 7046 } 7047 } 7048 7049 // Update users. For a min/max reduction that ends with a compare and 7050 // select, we also have to RAUW for the compare instruction feeding the 7051 // reduction root. That's because the original compare may have extra uses 7052 // besides the final select of the reduction. 7053 if (ReductionData.isMinMax()) { 7054 if (auto *VecSelect = dyn_cast<SelectInst>(VectorizedTree)) { 7055 Instruction *ScalarCmp = 7056 getCmpForMinMaxReduction(cast<Instruction>(ReductionRoot)); 7057 ScalarCmp->replaceAllUsesWith(VecSelect->getCondition()); 7058 } 7059 } 7060 ReductionRoot->replaceAllUsesWith(VectorizedTree); 7061 7062 // Mark all scalar reduction ops for deletion, they are replaced by the 7063 // vector reductions. 7064 V.eraseInstructions(IgnoreList); 7065 } 7066 return VectorizedTree != nullptr; 7067 } 7068 7069 unsigned numReductionValues() const { 7070 return ReducedVals.size(); 7071 } 7072 7073 private: 7074 /// Calculate the cost of a reduction. 7075 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal, 7076 unsigned ReduxWidth) { 7077 Type *ScalarTy = FirstReducedVal->getType(); 7078 auto *VecTy = FixedVectorType::get(ScalarTy, ReduxWidth); 7079 7080 int PairwiseRdxCost; 7081 int SplittingRdxCost; 7082 switch (ReductionData.getKind()) { 7083 case RK_Arithmetic: 7084 PairwiseRdxCost = 7085 TTI->getArithmeticReductionCost(ReductionData.getOpcode(), VecTy, 7086 /*IsPairwiseForm=*/true); 7087 SplittingRdxCost = 7088 TTI->getArithmeticReductionCost(ReductionData.getOpcode(), VecTy, 7089 /*IsPairwiseForm=*/false); 7090 break; 7091 case RK_SMin: 7092 case RK_SMax: 7093 case RK_UMin: 7094 case RK_UMax: { 7095 auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VecTy)); 7096 bool IsUnsigned = ReductionData.getKind() == RK_UMin || 7097 ReductionData.getKind() == RK_UMax; 7098 PairwiseRdxCost = 7099 TTI->getMinMaxReductionCost(VecTy, VecCondTy, 7100 /*IsPairwiseForm=*/true, IsUnsigned); 7101 SplittingRdxCost = 7102 TTI->getMinMaxReductionCost(VecTy, VecCondTy, 7103 /*IsPairwiseForm=*/false, IsUnsigned); 7104 break; 7105 } 7106 case RK_None: 7107 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 7108 } 7109 7110 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost; 7111 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost; 7112 7113 int ScalarReduxCost = 0; 7114 switch (ReductionData.getKind()) { 7115 case RK_Arithmetic: 7116 ScalarReduxCost = 7117 TTI->getArithmeticInstrCost(ReductionData.getOpcode(), ScalarTy); 7118 break; 7119 case RK_SMin: 7120 case RK_SMax: 7121 case RK_UMin: 7122 case RK_UMax: 7123 ScalarReduxCost = 7124 TTI->getCmpSelInstrCost(ReductionData.getOpcode(), ScalarTy) + 7125 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 7126 CmpInst::makeCmpResultType(ScalarTy)); 7127 break; 7128 case RK_None: 7129 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 7130 } 7131 ScalarReduxCost *= (ReduxWidth - 1); 7132 7133 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost 7134 << " for reduction that starts with " << *FirstReducedVal 7135 << " (It is a " 7136 << (IsPairwiseReduction ? "pairwise" : "splitting") 7137 << " reduction)\n"); 7138 7139 return VecReduxCost - ScalarReduxCost; 7140 } 7141 7142 /// Emit a horizontal reduction of the vectorized value. 7143 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 7144 unsigned ReduxWidth, const TargetTransformInfo *TTI) { 7145 assert(VectorizedValue && "Need to have a vectorized tree node"); 7146 assert(isPowerOf2_32(ReduxWidth) && 7147 "We only handle power-of-two reductions for now"); 7148 7149 if (!IsPairwiseReduction) { 7150 // FIXME: The builder should use an FMF guard. It should not be hard-coded 7151 // to 'fast'. 7152 assert(Builder.getFastMathFlags().isFast() && "Expected 'fast' FMF"); 7153 return createSimpleTargetReduction( 7154 Builder, TTI, ReductionData.getOpcode(), VectorizedValue, 7155 ReductionData.getFlags(), ReductionOps.back()); 7156 } 7157 7158 Value *TmpVec = VectorizedValue; 7159 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) { 7160 auto LeftMask = createRdxShuffleMask(ReduxWidth, i, true, true); 7161 auto RightMask = createRdxShuffleMask(ReduxWidth, i, true, false); 7162 7163 Value *LeftShuf = Builder.CreateShuffleVector( 7164 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l"); 7165 Value *RightShuf = Builder.CreateShuffleVector( 7166 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask), 7167 "rdx.shuf.r"); 7168 OperationData VectReductionData(ReductionData.getOpcode(), LeftShuf, 7169 RightShuf, ReductionData.getKind()); 7170 TmpVec = VectReductionData.createOp(Builder, "op.rdx", ReductionOps); 7171 } 7172 7173 // The result is in the first element of the vector. 7174 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 7175 } 7176 }; 7177 7178 } // end anonymous namespace 7179 7180 static Optional<unsigned> getAggregateSize(Instruction *InsertInst) { 7181 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) 7182 return cast<FixedVectorType>(IE->getType())->getNumElements(); 7183 7184 unsigned AggregateSize = 1; 7185 auto *IV = cast<InsertValueInst>(InsertInst); 7186 Type *CurrentType = IV->getType(); 7187 do { 7188 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 7189 for (auto *Elt : ST->elements()) 7190 if (Elt != ST->getElementType(0)) // check homogeneity 7191 return None; 7192 AggregateSize *= ST->getNumElements(); 7193 CurrentType = ST->getElementType(0); 7194 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 7195 AggregateSize *= AT->getNumElements(); 7196 CurrentType = AT->getElementType(); 7197 } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) { 7198 AggregateSize *= VT->getNumElements(); 7199 return AggregateSize; 7200 } else if (CurrentType->isSingleValueType()) { 7201 return AggregateSize; 7202 } else { 7203 return None; 7204 } 7205 } while (true); 7206 } 7207 7208 static Optional<unsigned> getOperandIndex(Instruction *InsertInst, 7209 unsigned OperandOffset) { 7210 unsigned OperandIndex = OperandOffset; 7211 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) { 7212 if (auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2))) { 7213 auto *VT = cast<FixedVectorType>(IE->getType()); 7214 OperandIndex *= VT->getNumElements(); 7215 OperandIndex += CI->getZExtValue(); 7216 return OperandIndex; 7217 } 7218 return None; 7219 } 7220 7221 auto *IV = cast<InsertValueInst>(InsertInst); 7222 Type *CurrentType = IV->getType(); 7223 for (unsigned int Index : IV->indices()) { 7224 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 7225 OperandIndex *= ST->getNumElements(); 7226 CurrentType = ST->getElementType(Index); 7227 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 7228 OperandIndex *= AT->getNumElements(); 7229 CurrentType = AT->getElementType(); 7230 } else { 7231 return None; 7232 } 7233 OperandIndex += Index; 7234 } 7235 return OperandIndex; 7236 } 7237 7238 static bool findBuildAggregate_rec(Instruction *LastInsertInst, 7239 TargetTransformInfo *TTI, 7240 SmallVectorImpl<Value *> &BuildVectorOpds, 7241 SmallVectorImpl<Value *> &InsertElts, 7242 unsigned OperandOffset) { 7243 do { 7244 Value *InsertedOperand = LastInsertInst->getOperand(1); 7245 Optional<unsigned> OperandIndex = 7246 getOperandIndex(LastInsertInst, OperandOffset); 7247 if (!OperandIndex) 7248 return false; 7249 if (isa<InsertElementInst>(InsertedOperand) || 7250 isa<InsertValueInst>(InsertedOperand)) { 7251 if (!findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI, 7252 BuildVectorOpds, InsertElts, *OperandIndex)) 7253 return false; 7254 } else { 7255 BuildVectorOpds[*OperandIndex] = InsertedOperand; 7256 InsertElts[*OperandIndex] = LastInsertInst; 7257 } 7258 if (isa<UndefValue>(LastInsertInst->getOperand(0))) 7259 return true; 7260 LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0)); 7261 } while (LastInsertInst != nullptr && 7262 (isa<InsertValueInst>(LastInsertInst) || 7263 isa<InsertElementInst>(LastInsertInst)) && 7264 LastInsertInst->hasOneUse()); 7265 return false; 7266 } 7267 7268 /// Recognize construction of vectors like 7269 /// %ra = insertelement <4 x float> undef, float %s0, i32 0 7270 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 7271 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 7272 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 7273 /// starting from the last insertelement or insertvalue instruction. 7274 /// 7275 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>}, 7276 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on. 7277 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples. 7278 /// 7279 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type. 7280 /// 7281 /// \return true if it matches. 7282 static bool findBuildAggregate(Instruction *LastInsertInst, 7283 TargetTransformInfo *TTI, 7284 SmallVectorImpl<Value *> &BuildVectorOpds, 7285 SmallVectorImpl<Value *> &InsertElts) { 7286 7287 assert((isa<InsertElementInst>(LastInsertInst) || 7288 isa<InsertValueInst>(LastInsertInst)) && 7289 "Expected insertelement or insertvalue instruction!"); 7290 7291 assert((BuildVectorOpds.empty() && InsertElts.empty()) && 7292 "Expected empty result vectors!"); 7293 7294 Optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst); 7295 if (!AggregateSize) 7296 return false; 7297 BuildVectorOpds.resize(*AggregateSize); 7298 InsertElts.resize(*AggregateSize); 7299 7300 if (findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 7301 0)) { 7302 llvm::erase_if(BuildVectorOpds, 7303 [](const Value *V) { return V == nullptr; }); 7304 llvm::erase_if(InsertElts, [](const Value *V) { return V == nullptr; }); 7305 if (BuildVectorOpds.size() >= 2) 7306 return true; 7307 } 7308 7309 return false; 7310 } 7311 7312 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 7313 return V->getType() < V2->getType(); 7314 } 7315 7316 /// Try and get a reduction value from a phi node. 7317 /// 7318 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 7319 /// if they come from either \p ParentBB or a containing loop latch. 7320 /// 7321 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 7322 /// if not possible. 7323 static Value *getReductionValue(const DominatorTree *DT, PHINode *P, 7324 BasicBlock *ParentBB, LoopInfo *LI) { 7325 // There are situations where the reduction value is not dominated by the 7326 // reduction phi. Vectorizing such cases has been reported to cause 7327 // miscompiles. See PR25787. 7328 auto DominatedReduxValue = [&](Value *R) { 7329 return isa<Instruction>(R) && 7330 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent()); 7331 }; 7332 7333 Value *Rdx = nullptr; 7334 7335 // Return the incoming value if it comes from the same BB as the phi node. 7336 if (P->getIncomingBlock(0) == ParentBB) { 7337 Rdx = P->getIncomingValue(0); 7338 } else if (P->getIncomingBlock(1) == ParentBB) { 7339 Rdx = P->getIncomingValue(1); 7340 } 7341 7342 if (Rdx && DominatedReduxValue(Rdx)) 7343 return Rdx; 7344 7345 // Otherwise, check whether we have a loop latch to look at. 7346 Loop *BBL = LI->getLoopFor(ParentBB); 7347 if (!BBL) 7348 return nullptr; 7349 BasicBlock *BBLatch = BBL->getLoopLatch(); 7350 if (!BBLatch) 7351 return nullptr; 7352 7353 // There is a loop latch, return the incoming value if it comes from 7354 // that. This reduction pattern occasionally turns up. 7355 if (P->getIncomingBlock(0) == BBLatch) { 7356 Rdx = P->getIncomingValue(0); 7357 } else if (P->getIncomingBlock(1) == BBLatch) { 7358 Rdx = P->getIncomingValue(1); 7359 } 7360 7361 if (Rdx && DominatedReduxValue(Rdx)) 7362 return Rdx; 7363 7364 return nullptr; 7365 } 7366 7367 /// Attempt to reduce a horizontal reduction. 7368 /// If it is legal to match a horizontal reduction feeding the phi node \a P 7369 /// with reduction operators \a Root (or one of its operands) in a basic block 7370 /// \a BB, then check if it can be done. If horizontal reduction is not found 7371 /// and root instruction is a binary operation, vectorization of the operands is 7372 /// attempted. 7373 /// \returns true if a horizontal reduction was matched and reduced or operands 7374 /// of one of the binary instruction were vectorized. 7375 /// \returns false if a horizontal reduction was not matched (or not possible) 7376 /// or no vectorization of any binary operation feeding \a Root instruction was 7377 /// performed. 7378 static bool tryToVectorizeHorReductionOrInstOperands( 7379 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, 7380 TargetTransformInfo *TTI, 7381 const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) { 7382 if (!ShouldVectorizeHor) 7383 return false; 7384 7385 if (!Root) 7386 return false; 7387 7388 if (Root->getParent() != BB || isa<PHINode>(Root)) 7389 return false; 7390 // Start analysis starting from Root instruction. If horizontal reduction is 7391 // found, try to vectorize it. If it is not a horizontal reduction or 7392 // vectorization is not possible or not effective, and currently analyzed 7393 // instruction is a binary operation, try to vectorize the operands, using 7394 // pre-order DFS traversal order. If the operands were not vectorized, repeat 7395 // the same procedure considering each operand as a possible root of the 7396 // horizontal reduction. 7397 // Interrupt the process if the Root instruction itself was vectorized or all 7398 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 7399 SmallVector<std::pair<Instruction *, unsigned>, 8> Stack(1, {Root, 0}); 7400 SmallPtrSet<Value *, 8> VisitedInstrs; 7401 bool Res = false; 7402 while (!Stack.empty()) { 7403 Instruction *Inst; 7404 unsigned Level; 7405 std::tie(Inst, Level) = Stack.pop_back_val(); 7406 auto *BI = dyn_cast<BinaryOperator>(Inst); 7407 auto *SI = dyn_cast<SelectInst>(Inst); 7408 if (BI || SI) { 7409 HorizontalReduction HorRdx; 7410 if (HorRdx.matchAssociativeReduction(P, Inst)) { 7411 if (HorRdx.tryToReduce(R, TTI)) { 7412 Res = true; 7413 // Set P to nullptr to avoid re-analysis of phi node in 7414 // matchAssociativeReduction function unless this is the root node. 7415 P = nullptr; 7416 continue; 7417 } 7418 } 7419 if (P && BI) { 7420 Inst = dyn_cast<Instruction>(BI->getOperand(0)); 7421 if (Inst == P) 7422 Inst = dyn_cast<Instruction>(BI->getOperand(1)); 7423 if (!Inst) { 7424 // Set P to nullptr to avoid re-analysis of phi node in 7425 // matchAssociativeReduction function unless this is the root node. 7426 P = nullptr; 7427 continue; 7428 } 7429 } 7430 } 7431 // Set P to nullptr to avoid re-analysis of phi node in 7432 // matchAssociativeReduction function unless this is the root node. 7433 P = nullptr; 7434 if (Vectorize(Inst, R)) { 7435 Res = true; 7436 continue; 7437 } 7438 7439 // Try to vectorize operands. 7440 // Continue analysis for the instruction from the same basic block only to 7441 // save compile time. 7442 if (++Level < RecursionMaxDepth) 7443 for (auto *Op : Inst->operand_values()) 7444 if (VisitedInstrs.insert(Op).second) 7445 if (auto *I = dyn_cast<Instruction>(Op)) 7446 if (!isa<PHINode>(I) && !R.isDeleted(I) && I->getParent() == BB) 7447 Stack.emplace_back(I, Level); 7448 } 7449 return Res; 7450 } 7451 7452 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V, 7453 BasicBlock *BB, BoUpSLP &R, 7454 TargetTransformInfo *TTI) { 7455 if (!V) 7456 return false; 7457 auto *I = dyn_cast<Instruction>(V); 7458 if (!I) 7459 return false; 7460 7461 if (!isa<BinaryOperator>(I)) 7462 P = nullptr; 7463 // Try to match and vectorize a horizontal reduction. 7464 auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool { 7465 return tryToVectorize(I, R); 7466 }; 7467 return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI, 7468 ExtraVectorization); 7469 } 7470 7471 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 7472 BasicBlock *BB, BoUpSLP &R) { 7473 const DataLayout &DL = BB->getModule()->getDataLayout(); 7474 if (!R.canMapToVector(IVI->getType(), DL)) 7475 return false; 7476 7477 SmallVector<Value *, 16> BuildVectorOpds; 7478 SmallVector<Value *, 16> BuildVectorInsts; 7479 if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts)) 7480 return false; 7481 7482 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 7483 // Aggregate value is unlikely to be processed in vector register, we need to 7484 // extract scalars into scalar registers, so NeedExtraction is set true. 7485 return tryToVectorizeList(BuildVectorOpds, R, /*AllowReorder=*/false, 7486 BuildVectorInsts); 7487 } 7488 7489 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 7490 BasicBlock *BB, BoUpSLP &R) { 7491 SmallVector<Value *, 16> BuildVectorInsts; 7492 SmallVector<Value *, 16> BuildVectorOpds; 7493 if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) || 7494 (llvm::all_of(BuildVectorOpds, 7495 [](Value *V) { return isa<ExtractElementInst>(V); }) && 7496 isShuffle(BuildVectorOpds))) 7497 return false; 7498 7499 // Vectorize starting with the build vector operands ignoring the BuildVector 7500 // instructions for the purpose of scheduling and user extraction. 7501 return tryToVectorizeList(BuildVectorOpds, R, /*AllowReorder=*/false, 7502 BuildVectorInsts); 7503 } 7504 7505 bool SLPVectorizerPass::vectorizeCmpInst(CmpInst *CI, BasicBlock *BB, 7506 BoUpSLP &R) { 7507 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) 7508 return true; 7509 7510 bool OpsChanged = false; 7511 for (int Idx = 0; Idx < 2; ++Idx) { 7512 OpsChanged |= 7513 vectorizeRootInstruction(nullptr, CI->getOperand(Idx), BB, R, TTI); 7514 } 7515 return OpsChanged; 7516 } 7517 7518 bool SLPVectorizerPass::vectorizeSimpleInstructions( 7519 SmallVectorImpl<Instruction *> &Instructions, BasicBlock *BB, BoUpSLP &R) { 7520 bool OpsChanged = false; 7521 for (auto *I : reverse(Instructions)) { 7522 if (R.isDeleted(I)) 7523 continue; 7524 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) 7525 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 7526 else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) 7527 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 7528 else if (auto *CI = dyn_cast<CmpInst>(I)) 7529 OpsChanged |= vectorizeCmpInst(CI, BB, R); 7530 } 7531 Instructions.clear(); 7532 return OpsChanged; 7533 } 7534 7535 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 7536 bool Changed = false; 7537 SmallVector<Value *, 4> Incoming; 7538 SmallPtrSet<Value *, 16> VisitedInstrs; 7539 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 7540 7541 bool HaveVectorizedPhiNodes = true; 7542 while (HaveVectorizedPhiNodes) { 7543 HaveVectorizedPhiNodes = false; 7544 7545 // Collect the incoming values from the PHIs. 7546 Incoming.clear(); 7547 for (Instruction &I : *BB) { 7548 PHINode *P = dyn_cast<PHINode>(&I); 7549 if (!P) 7550 break; 7551 7552 if (!VisitedInstrs.count(P) && !R.isDeleted(P)) 7553 Incoming.push_back(P); 7554 } 7555 7556 // Sort by type. 7557 llvm::stable_sort(Incoming, PhiTypeSorterFunc); 7558 7559 // Try to vectorize elements base on their type. 7560 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 7561 E = Incoming.end(); 7562 IncIt != E;) { 7563 7564 // Look for the next elements with the same type. 7565 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 7566 Type *EltTy = (*IncIt)->getType(); 7567 7568 assert(EltTy->isSized() && 7569 "Instructions should all be sized at this point"); 7570 TypeSize EltTS = DL->getTypeSizeInBits(EltTy); 7571 if (EltTS.isScalable()) { 7572 // For now, just ignore vectorizing scalable types. 7573 ++IncIt; 7574 continue; 7575 } 7576 7577 unsigned EltSize = EltTS.getFixedSize(); 7578 unsigned MaxNumElts = MaxVecRegSize / EltSize; 7579 if (MaxNumElts < 2) { 7580 ++IncIt; 7581 continue; 7582 } 7583 7584 while (SameTypeIt != E && 7585 (*SameTypeIt)->getType() == EltTy && 7586 static_cast<unsigned>(SameTypeIt - IncIt) < MaxNumElts) { 7587 VisitedInstrs.insert(*SameTypeIt); 7588 ++SameTypeIt; 7589 } 7590 7591 // Try to vectorize them. 7592 unsigned NumElts = (SameTypeIt - IncIt); 7593 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at PHIs (" 7594 << NumElts << ")\n"); 7595 // The order in which the phi nodes appear in the program does not matter. 7596 // So allow tryToVectorizeList to reorder them if it is beneficial. This 7597 // is done when there are exactly two elements since tryToVectorizeList 7598 // asserts that there are only two values when AllowReorder is true. 7599 bool AllowReorder = NumElts == 2; 7600 if (NumElts > 1 && 7601 tryToVectorizeList(makeArrayRef(IncIt, NumElts), R, AllowReorder)) { 7602 // Success start over because instructions might have been changed. 7603 HaveVectorizedPhiNodes = true; 7604 Changed = true; 7605 break; 7606 } 7607 7608 // Start over at the next instruction of a different type (or the end). 7609 IncIt = SameTypeIt; 7610 } 7611 } 7612 7613 VisitedInstrs.clear(); 7614 7615 SmallVector<Instruction *, 8> PostProcessInstructions; 7616 SmallDenseSet<Instruction *, 4> KeyNodes; 7617 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 7618 // Skip instructions with scalable type. The num of elements is unknown at 7619 // compile-time for scalable type. 7620 if (isa<ScalableVectorType>(it->getType())) 7621 continue; 7622 7623 // Skip instructions marked for the deletion. 7624 if (R.isDeleted(&*it)) 7625 continue; 7626 // We may go through BB multiple times so skip the one we have checked. 7627 if (!VisitedInstrs.insert(&*it).second) { 7628 if (it->use_empty() && KeyNodes.count(&*it) > 0 && 7629 vectorizeSimpleInstructions(PostProcessInstructions, BB, R)) { 7630 // We would like to start over since some instructions are deleted 7631 // and the iterator may become invalid value. 7632 Changed = true; 7633 it = BB->begin(); 7634 e = BB->end(); 7635 } 7636 continue; 7637 } 7638 7639 if (isa<DbgInfoIntrinsic>(it)) 7640 continue; 7641 7642 // Try to vectorize reductions that use PHINodes. 7643 if (PHINode *P = dyn_cast<PHINode>(it)) { 7644 // Check that the PHI is a reduction PHI. 7645 if (P->getNumIncomingValues() != 2) 7646 return Changed; 7647 7648 // Try to match and vectorize a horizontal reduction. 7649 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R, 7650 TTI)) { 7651 Changed = true; 7652 it = BB->begin(); 7653 e = BB->end(); 7654 continue; 7655 } 7656 continue; 7657 } 7658 7659 // Ran into an instruction without users, like terminator, or function call 7660 // with ignored return value, store. Ignore unused instructions (basing on 7661 // instruction type, except for CallInst and InvokeInst). 7662 if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) || 7663 isa<InvokeInst>(it))) { 7664 KeyNodes.insert(&*it); 7665 bool OpsChanged = false; 7666 if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) { 7667 for (auto *V : it->operand_values()) { 7668 // Try to match and vectorize a horizontal reduction. 7669 OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI); 7670 } 7671 } 7672 // Start vectorization of post-process list of instructions from the 7673 // top-tree instructions to try to vectorize as many instructions as 7674 // possible. 7675 OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R); 7676 if (OpsChanged) { 7677 // We would like to start over since some instructions are deleted 7678 // and the iterator may become invalid value. 7679 Changed = true; 7680 it = BB->begin(); 7681 e = BB->end(); 7682 continue; 7683 } 7684 } 7685 7686 if (isa<InsertElementInst>(it) || isa<CmpInst>(it) || 7687 isa<InsertValueInst>(it)) 7688 PostProcessInstructions.push_back(&*it); 7689 } 7690 7691 return Changed; 7692 } 7693 7694 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 7695 auto Changed = false; 7696 for (auto &Entry : GEPs) { 7697 // If the getelementptr list has fewer than two elements, there's nothing 7698 // to do. 7699 if (Entry.second.size() < 2) 7700 continue; 7701 7702 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 7703 << Entry.second.size() << ".\n"); 7704 7705 // Process the GEP list in chunks suitable for the target's supported 7706 // vector size. If a vector register can't hold 1 element, we are done. We 7707 // are trying to vectorize the index computations, so the maximum number of 7708 // elements is based on the size of the index expression, rather than the 7709 // size of the GEP itself (the target's pointer size). 7710 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 7711 unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin()); 7712 if (MaxVecRegSize < EltSize) 7713 continue; 7714 7715 unsigned MaxElts = MaxVecRegSize / EltSize; 7716 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) { 7717 auto Len = std::min<unsigned>(BE - BI, MaxElts); 7718 auto GEPList = makeArrayRef(&Entry.second[BI], Len); 7719 7720 // Initialize a set a candidate getelementptrs. Note that we use a 7721 // SetVector here to preserve program order. If the index computations 7722 // are vectorizable and begin with loads, we want to minimize the chance 7723 // of having to reorder them later. 7724 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 7725 7726 // Some of the candidates may have already been vectorized after we 7727 // initially collected them. If so, they are marked as deleted, so remove 7728 // them from the set of candidates. 7729 Candidates.remove_if( 7730 [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); }); 7731 7732 // Remove from the set of candidates all pairs of getelementptrs with 7733 // constant differences. Such getelementptrs are likely not good 7734 // candidates for vectorization in a bottom-up phase since one can be 7735 // computed from the other. We also ensure all candidate getelementptr 7736 // indices are unique. 7737 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 7738 auto *GEPI = GEPList[I]; 7739 if (!Candidates.count(GEPI)) 7740 continue; 7741 auto *SCEVI = SE->getSCEV(GEPList[I]); 7742 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 7743 auto *GEPJ = GEPList[J]; 7744 auto *SCEVJ = SE->getSCEV(GEPList[J]); 7745 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 7746 Candidates.remove(GEPI); 7747 Candidates.remove(GEPJ); 7748 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 7749 Candidates.remove(GEPJ); 7750 } 7751 } 7752 } 7753 7754 // We break out of the above computation as soon as we know there are 7755 // fewer than two candidates remaining. 7756 if (Candidates.size() < 2) 7757 continue; 7758 7759 // Add the single, non-constant index of each candidate to the bundle. We 7760 // ensured the indices met these constraints when we originally collected 7761 // the getelementptrs. 7762 SmallVector<Value *, 16> Bundle(Candidates.size()); 7763 auto BundleIndex = 0u; 7764 for (auto *V : Candidates) { 7765 auto *GEP = cast<GetElementPtrInst>(V); 7766 auto *GEPIdx = GEP->idx_begin()->get(); 7767 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 7768 Bundle[BundleIndex++] = GEPIdx; 7769 } 7770 7771 // Try and vectorize the indices. We are currently only interested in 7772 // gather-like cases of the form: 7773 // 7774 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 7775 // 7776 // where the loads of "a", the loads of "b", and the subtractions can be 7777 // performed in parallel. It's likely that detecting this pattern in a 7778 // bottom-up phase will be simpler and less costly than building a 7779 // full-blown top-down phase beginning at the consecutive loads. 7780 Changed |= tryToVectorizeList(Bundle, R); 7781 } 7782 } 7783 return Changed; 7784 } 7785 7786 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 7787 bool Changed = false; 7788 // Attempt to sort and vectorize each of the store-groups. 7789 for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e; 7790 ++it) { 7791 if (it->second.size() < 2) 7792 continue; 7793 7794 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 7795 << it->second.size() << ".\n"); 7796 7797 Changed |= vectorizeStores(it->second, R); 7798 } 7799 return Changed; 7800 } 7801 7802 char SLPVectorizer::ID = 0; 7803 7804 static const char lv_name[] = "SLP Vectorizer"; 7805 7806 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 7807 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7808 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7809 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7810 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7811 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 7812 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7813 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7814 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7815 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 7816 7817 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); } 7818