1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 20 #include "llvm/ADT/DenseMap.h" 21 #include "llvm/ADT/DenseSet.h" 22 #include "llvm/ADT/Optional.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/PriorityQueue.h" 25 #include "llvm/ADT/STLExtras.h" 26 #include "llvm/ADT/SetOperations.h" 27 #include "llvm/ADT/SetVector.h" 28 #include "llvm/ADT/SmallBitVector.h" 29 #include "llvm/ADT/SmallPtrSet.h" 30 #include "llvm/ADT/SmallSet.h" 31 #include "llvm/ADT/SmallString.h" 32 #include "llvm/ADT/Statistic.h" 33 #include "llvm/ADT/iterator.h" 34 #include "llvm/ADT/iterator_range.h" 35 #include "llvm/Analysis/AliasAnalysis.h" 36 #include "llvm/Analysis/AssumptionCache.h" 37 #include "llvm/Analysis/CodeMetrics.h" 38 #include "llvm/Analysis/DemandedBits.h" 39 #include "llvm/Analysis/GlobalsModRef.h" 40 #include "llvm/Analysis/IVDescriptors.h" 41 #include "llvm/Analysis/LoopAccessAnalysis.h" 42 #include "llvm/Analysis/LoopInfo.h" 43 #include "llvm/Analysis/MemoryLocation.h" 44 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 45 #include "llvm/Analysis/ScalarEvolution.h" 46 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 47 #include "llvm/Analysis/TargetLibraryInfo.h" 48 #include "llvm/Analysis/TargetTransformInfo.h" 49 #include "llvm/Analysis/ValueTracking.h" 50 #include "llvm/Analysis/VectorUtils.h" 51 #include "llvm/IR/Attributes.h" 52 #include "llvm/IR/BasicBlock.h" 53 #include "llvm/IR/Constant.h" 54 #include "llvm/IR/Constants.h" 55 #include "llvm/IR/DataLayout.h" 56 #include "llvm/IR/DebugLoc.h" 57 #include "llvm/IR/DerivedTypes.h" 58 #include "llvm/IR/Dominators.h" 59 #include "llvm/IR/Function.h" 60 #include "llvm/IR/IRBuilder.h" 61 #include "llvm/IR/InstrTypes.h" 62 #include "llvm/IR/Instruction.h" 63 #include "llvm/IR/Instructions.h" 64 #include "llvm/IR/IntrinsicInst.h" 65 #include "llvm/IR/Intrinsics.h" 66 #include "llvm/IR/Module.h" 67 #include "llvm/IR/NoFolder.h" 68 #include "llvm/IR/Operator.h" 69 #include "llvm/IR/PatternMatch.h" 70 #include "llvm/IR/Type.h" 71 #include "llvm/IR/Use.h" 72 #include "llvm/IR/User.h" 73 #include "llvm/IR/Value.h" 74 #include "llvm/IR/ValueHandle.h" 75 #include "llvm/IR/Verifier.h" 76 #include "llvm/InitializePasses.h" 77 #include "llvm/Pass.h" 78 #include "llvm/Support/Casting.h" 79 #include "llvm/Support/CommandLine.h" 80 #include "llvm/Support/Compiler.h" 81 #include "llvm/Support/DOTGraphTraits.h" 82 #include "llvm/Support/Debug.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/GraphWriter.h" 85 #include "llvm/Support/InstructionCost.h" 86 #include "llvm/Support/KnownBits.h" 87 #include "llvm/Support/MathExtras.h" 88 #include "llvm/Support/raw_ostream.h" 89 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 90 #include "llvm/Transforms/Utils/LoopUtils.h" 91 #include "llvm/Transforms/Vectorize.h" 92 #include <algorithm> 93 #include <cassert> 94 #include <cstdint> 95 #include <iterator> 96 #include <memory> 97 #include <set> 98 #include <string> 99 #include <tuple> 100 #include <utility> 101 #include <vector> 102 103 using namespace llvm; 104 using namespace llvm::PatternMatch; 105 using namespace slpvectorizer; 106 107 #define SV_NAME "slp-vectorizer" 108 #define DEBUG_TYPE "SLP" 109 110 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 111 112 cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden, 113 cl::desc("Run the SLP vectorization passes")); 114 115 static cl::opt<int> 116 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 117 cl::desc("Only vectorize if you gain more than this " 118 "number ")); 119 120 static cl::opt<bool> 121 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 122 cl::desc("Attempt to vectorize horizontal reductions")); 123 124 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 125 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 126 cl::desc( 127 "Attempt to vectorize horizontal reductions feeding into a store")); 128 129 static cl::opt<int> 130 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 131 cl::desc("Attempt to vectorize for this register size in bits")); 132 133 static cl::opt<unsigned> 134 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden, 135 cl::desc("Maximum SLP vectorization factor (0=unlimited)")); 136 137 static cl::opt<int> 138 MaxStoreLookup("slp-max-store-lookup", cl::init(32), cl::Hidden, 139 cl::desc("Maximum depth of the lookup for consecutive stores.")); 140 141 /// Limits the size of scheduling regions in a block. 142 /// It avoid long compile times for _very_ large blocks where vector 143 /// instructions are spread over a wide range. 144 /// This limit is way higher than needed by real-world functions. 145 static cl::opt<int> 146 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 147 cl::desc("Limit the size of the SLP scheduling region per block")); 148 149 static cl::opt<int> MinVectorRegSizeOption( 150 "slp-min-reg-size", cl::init(128), cl::Hidden, 151 cl::desc("Attempt to vectorize for this register size in bits")); 152 153 static cl::opt<unsigned> RecursionMaxDepth( 154 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 155 cl::desc("Limit the recursion depth when building a vectorizable tree")); 156 157 static cl::opt<unsigned> MinTreeSize( 158 "slp-min-tree-size", cl::init(3), cl::Hidden, 159 cl::desc("Only vectorize small trees if they are fully vectorizable")); 160 161 // The maximum depth that the look-ahead score heuristic will explore. 162 // The higher this value, the higher the compilation time overhead. 163 static cl::opt<int> LookAheadMaxDepth( 164 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden, 165 cl::desc("The maximum look-ahead depth for operand reordering scores")); 166 167 // The Look-ahead heuristic goes through the users of the bundle to calculate 168 // the users cost in getExternalUsesCost(). To avoid compilation time increase 169 // we limit the number of users visited to this value. 170 static cl::opt<unsigned> LookAheadUsersBudget( 171 "slp-look-ahead-users-budget", cl::init(2), cl::Hidden, 172 cl::desc("The maximum number of users to visit while visiting the " 173 "predecessors. This prevents compilation time increase.")); 174 175 static cl::opt<bool> 176 ViewSLPTree("view-slp-tree", cl::Hidden, 177 cl::desc("Display the SLP trees with Graphviz")); 178 179 // Limit the number of alias checks. The limit is chosen so that 180 // it has no negative effect on the llvm benchmarks. 181 static const unsigned AliasedCheckLimit = 10; 182 183 // Another limit for the alias checks: The maximum distance between load/store 184 // instructions where alias checks are done. 185 // This limit is useful for very large basic blocks. 186 static const unsigned MaxMemDepDistance = 160; 187 188 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 189 /// regions to be handled. 190 static const int MinScheduleRegionSize = 16; 191 192 /// Predicate for the element types that the SLP vectorizer supports. 193 /// 194 /// The most important thing to filter here are types which are invalid in LLVM 195 /// vectors. We also filter target specific types which have absolutely no 196 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 197 /// avoids spending time checking the cost model and realizing that they will 198 /// be inevitably scalarized. 199 static bool isValidElementType(Type *Ty) { 200 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 201 !Ty->isPPC_FP128Ty(); 202 } 203 204 /// \returns True if the value is a constant (but not globals/constant 205 /// expressions). 206 static bool isConstant(Value *V) { 207 return isa<Constant>(V) && !isa<ConstantExpr>(V) && !isa<GlobalValue>(V); 208 } 209 210 /// Checks if \p V is one of vector-like instructions, i.e. undef, 211 /// insertelement/extractelement with constant indices for fixed vector type or 212 /// extractvalue instruction. 213 static bool isVectorLikeInstWithConstOps(Value *V) { 214 if (!isa<InsertElementInst, ExtractElementInst>(V) && 215 !isa<ExtractValueInst, UndefValue>(V)) 216 return false; 217 auto *I = dyn_cast<Instruction>(V); 218 if (!I || isa<ExtractValueInst>(I)) 219 return true; 220 if (!isa<FixedVectorType>(I->getOperand(0)->getType())) 221 return false; 222 if (isa<ExtractElementInst>(I)) 223 return isConstant(I->getOperand(1)); 224 assert(isa<InsertElementInst>(V) && "Expected only insertelement."); 225 return isConstant(I->getOperand(2)); 226 } 227 228 /// \returns true if all of the instructions in \p VL are in the same block or 229 /// false otherwise. 230 static bool allSameBlock(ArrayRef<Value *> VL) { 231 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 232 if (!I0) 233 return false; 234 if (all_of(VL, isVectorLikeInstWithConstOps)) 235 return true; 236 237 BasicBlock *BB = I0->getParent(); 238 for (int I = 1, E = VL.size(); I < E; I++) { 239 auto *II = dyn_cast<Instruction>(VL[I]); 240 if (!II) 241 return false; 242 243 if (BB != II->getParent()) 244 return false; 245 } 246 return true; 247 } 248 249 /// \returns True if all of the values in \p VL are constants (but not 250 /// globals/constant expressions). 251 static bool allConstant(ArrayRef<Value *> VL) { 252 // Constant expressions and globals can't be vectorized like normal integer/FP 253 // constants. 254 return all_of(VL, isConstant); 255 } 256 257 /// \returns True if all of the values in \p VL are identical. 258 static bool isSplat(ArrayRef<Value *> VL) { 259 for (unsigned i = 1, e = VL.size(); i < e; ++i) 260 if (VL[i] != VL[0]) 261 return false; 262 return true; 263 } 264 265 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator. 266 static bool isCommutative(Instruction *I) { 267 if (auto *Cmp = dyn_cast<CmpInst>(I)) 268 return Cmp->isCommutative(); 269 if (auto *BO = dyn_cast<BinaryOperator>(I)) 270 return BO->isCommutative(); 271 // TODO: This should check for generic Instruction::isCommutative(), but 272 // we need to confirm that the caller code correctly handles Intrinsics 273 // for example (does not have 2 operands). 274 return false; 275 } 276 277 /// Checks if the vector of instructions can be represented as a shuffle, like: 278 /// %x0 = extractelement <4 x i8> %x, i32 0 279 /// %x3 = extractelement <4 x i8> %x, i32 3 280 /// %y1 = extractelement <4 x i8> %y, i32 1 281 /// %y2 = extractelement <4 x i8> %y, i32 2 282 /// %x0x0 = mul i8 %x0, %x0 283 /// %x3x3 = mul i8 %x3, %x3 284 /// %y1y1 = mul i8 %y1, %y1 285 /// %y2y2 = mul i8 %y2, %y2 286 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0 287 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 288 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 289 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 290 /// ret <4 x i8> %ins4 291 /// can be transformed into: 292 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 293 /// i32 6> 294 /// %2 = mul <4 x i8> %1, %1 295 /// ret <4 x i8> %2 296 /// We convert this initially to something like: 297 /// %x0 = extractelement <4 x i8> %x, i32 0 298 /// %x3 = extractelement <4 x i8> %x, i32 3 299 /// %y1 = extractelement <4 x i8> %y, i32 1 300 /// %y2 = extractelement <4 x i8> %y, i32 2 301 /// %1 = insertelement <4 x i8> poison, i8 %x0, i32 0 302 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1 303 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2 304 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3 305 /// %5 = mul <4 x i8> %4, %4 306 /// %6 = extractelement <4 x i8> %5, i32 0 307 /// %ins1 = insertelement <4 x i8> poison, i8 %6, i32 0 308 /// %7 = extractelement <4 x i8> %5, i32 1 309 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1 310 /// %8 = extractelement <4 x i8> %5, i32 2 311 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2 312 /// %9 = extractelement <4 x i8> %5, i32 3 313 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3 314 /// ret <4 x i8> %ins4 315 /// InstCombiner transforms this into a shuffle and vector mul 316 /// Mask will return the Shuffle Mask equivalent to the extracted elements. 317 /// TODO: Can we split off and reuse the shuffle mask detection from 318 /// TargetTransformInfo::getInstructionThroughput? 319 static Optional<TargetTransformInfo::ShuffleKind> 320 isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) { 321 auto *EI0 = cast<ExtractElementInst>(VL[0]); 322 if (isa<ScalableVectorType>(EI0->getVectorOperandType())) 323 return None; 324 unsigned Size = 325 cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements(); 326 Value *Vec1 = nullptr; 327 Value *Vec2 = nullptr; 328 enum ShuffleMode { Unknown, Select, Permute }; 329 ShuffleMode CommonShuffleMode = Unknown; 330 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 331 auto *EI = cast<ExtractElementInst>(VL[I]); 332 auto *Vec = EI->getVectorOperand(); 333 // All vector operands must have the same number of vector elements. 334 if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size) 335 return None; 336 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 337 if (!Idx) 338 return None; 339 // Undefined behavior if Idx is negative or >= Size. 340 if (Idx->getValue().uge(Size)) { 341 Mask.push_back(UndefMaskElem); 342 continue; 343 } 344 unsigned IntIdx = Idx->getValue().getZExtValue(); 345 Mask.push_back(IntIdx); 346 // We can extractelement from undef or poison vector. 347 if (isa<UndefValue>(Vec)) 348 continue; 349 // For correct shuffling we have to have at most 2 different vector operands 350 // in all extractelement instructions. 351 if (!Vec1 || Vec1 == Vec) 352 Vec1 = Vec; 353 else if (!Vec2 || Vec2 == Vec) 354 Vec2 = Vec; 355 else 356 return None; 357 if (CommonShuffleMode == Permute) 358 continue; 359 // If the extract index is not the same as the operation number, it is a 360 // permutation. 361 if (IntIdx != I) { 362 CommonShuffleMode = Permute; 363 continue; 364 } 365 CommonShuffleMode = Select; 366 } 367 // If we're not crossing lanes in different vectors, consider it as blending. 368 if (CommonShuffleMode == Select && Vec2) 369 return TargetTransformInfo::SK_Select; 370 // If Vec2 was never used, we have a permutation of a single vector, otherwise 371 // we have permutation of 2 vectors. 372 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 373 : TargetTransformInfo::SK_PermuteSingleSrc; 374 } 375 376 namespace { 377 378 /// Main data required for vectorization of instructions. 379 struct InstructionsState { 380 /// The very first instruction in the list with the main opcode. 381 Value *OpValue = nullptr; 382 383 /// The main/alternate instruction. 384 Instruction *MainOp = nullptr; 385 Instruction *AltOp = nullptr; 386 387 /// The main/alternate opcodes for the list of instructions. 388 unsigned getOpcode() const { 389 return MainOp ? MainOp->getOpcode() : 0; 390 } 391 392 unsigned getAltOpcode() const { 393 return AltOp ? AltOp->getOpcode() : 0; 394 } 395 396 /// Some of the instructions in the list have alternate opcodes. 397 bool isAltShuffle() const { return getOpcode() != getAltOpcode(); } 398 399 bool isOpcodeOrAlt(Instruction *I) const { 400 unsigned CheckedOpcode = I->getOpcode(); 401 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode; 402 } 403 404 InstructionsState() = delete; 405 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp) 406 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {} 407 }; 408 409 } // end anonymous namespace 410 411 /// Chooses the correct key for scheduling data. If \p Op has the same (or 412 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 413 /// OpValue. 414 static Value *isOneOf(const InstructionsState &S, Value *Op) { 415 auto *I = dyn_cast<Instruction>(Op); 416 if (I && S.isOpcodeOrAlt(I)) 417 return Op; 418 return S.OpValue; 419 } 420 421 /// \returns true if \p Opcode is allowed as part of of the main/alternate 422 /// instruction for SLP vectorization. 423 /// 424 /// Example of unsupported opcode is SDIV that can potentially cause UB if the 425 /// "shuffled out" lane would result in division by zero. 426 static bool isValidForAlternation(unsigned Opcode) { 427 if (Instruction::isIntDivRem(Opcode)) 428 return false; 429 430 return true; 431 } 432 433 /// \returns analysis of the Instructions in \p VL described in 434 /// InstructionsState, the Opcode that we suppose the whole list 435 /// could be vectorized even if its structure is diverse. 436 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 437 unsigned BaseIndex = 0) { 438 // Make sure these are all Instructions. 439 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); })) 440 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 441 442 bool IsCastOp = isa<CastInst>(VL[BaseIndex]); 443 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]); 444 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode(); 445 unsigned AltOpcode = Opcode; 446 unsigned AltIndex = BaseIndex; 447 448 // Check for one alternate opcode from another BinaryOperator. 449 // TODO - generalize to support all operators (types, calls etc.). 450 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { 451 unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode(); 452 if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) { 453 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 454 continue; 455 if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) && 456 isValidForAlternation(Opcode)) { 457 AltOpcode = InstOpcode; 458 AltIndex = Cnt; 459 continue; 460 } 461 } else if (IsCastOp && isa<CastInst>(VL[Cnt])) { 462 Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType(); 463 Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType(); 464 if (Ty0 == Ty1) { 465 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 466 continue; 467 if (Opcode == AltOpcode) { 468 assert(isValidForAlternation(Opcode) && 469 isValidForAlternation(InstOpcode) && 470 "Cast isn't safe for alternation, logic needs to be updated!"); 471 AltOpcode = InstOpcode; 472 AltIndex = Cnt; 473 continue; 474 } 475 } 476 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode) 477 continue; 478 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 479 } 480 481 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]), 482 cast<Instruction>(VL[AltIndex])); 483 } 484 485 /// \returns true if all of the values in \p VL have the same type or false 486 /// otherwise. 487 static bool allSameType(ArrayRef<Value *> VL) { 488 Type *Ty = VL[0]->getType(); 489 for (int i = 1, e = VL.size(); i < e; i++) 490 if (VL[i]->getType() != Ty) 491 return false; 492 493 return true; 494 } 495 496 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 497 static Optional<unsigned> getExtractIndex(Instruction *E) { 498 unsigned Opcode = E->getOpcode(); 499 assert((Opcode == Instruction::ExtractElement || 500 Opcode == Instruction::ExtractValue) && 501 "Expected extractelement or extractvalue instruction."); 502 if (Opcode == Instruction::ExtractElement) { 503 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 504 if (!CI) 505 return None; 506 return CI->getZExtValue(); 507 } 508 ExtractValueInst *EI = cast<ExtractValueInst>(E); 509 if (EI->getNumIndices() != 1) 510 return None; 511 return *EI->idx_begin(); 512 } 513 514 /// \returns True if in-tree use also needs extract. This refers to 515 /// possible scalar operand in vectorized instruction. 516 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 517 TargetLibraryInfo *TLI) { 518 unsigned Opcode = UserInst->getOpcode(); 519 switch (Opcode) { 520 case Instruction::Load: { 521 LoadInst *LI = cast<LoadInst>(UserInst); 522 return (LI->getPointerOperand() == Scalar); 523 } 524 case Instruction::Store: { 525 StoreInst *SI = cast<StoreInst>(UserInst); 526 return (SI->getPointerOperand() == Scalar); 527 } 528 case Instruction::Call: { 529 CallInst *CI = cast<CallInst>(UserInst); 530 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 531 for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) { 532 if (hasVectorInstrinsicScalarOpd(ID, i)) 533 return (CI->getArgOperand(i) == Scalar); 534 } 535 LLVM_FALLTHROUGH; 536 } 537 default: 538 return false; 539 } 540 } 541 542 /// \returns the AA location that is being access by the instruction. 543 static MemoryLocation getLocation(Instruction *I, AAResults *AA) { 544 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 545 return MemoryLocation::get(SI); 546 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 547 return MemoryLocation::get(LI); 548 return MemoryLocation(); 549 } 550 551 /// \returns True if the instruction is not a volatile or atomic load/store. 552 static bool isSimple(Instruction *I) { 553 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 554 return LI->isSimple(); 555 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 556 return SI->isSimple(); 557 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 558 return !MI->isVolatile(); 559 return true; 560 } 561 562 /// Shuffles \p Mask in accordance with the given \p SubMask. 563 static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask) { 564 if (SubMask.empty()) 565 return; 566 if (Mask.empty()) { 567 Mask.append(SubMask.begin(), SubMask.end()); 568 return; 569 } 570 SmallVector<int> NewMask(SubMask.size(), UndefMaskElem); 571 int TermValue = std::min(Mask.size(), SubMask.size()); 572 for (int I = 0, E = SubMask.size(); I < E; ++I) { 573 if (SubMask[I] >= TermValue || SubMask[I] == UndefMaskElem || 574 Mask[SubMask[I]] >= TermValue) 575 continue; 576 NewMask[I] = Mask[SubMask[I]]; 577 } 578 Mask.swap(NewMask); 579 } 580 581 /// Order may have elements assigned special value (size) which is out of 582 /// bounds. Such indices only appear on places which correspond to undef values 583 /// (see canReuseExtract for details) and used in order to avoid undef values 584 /// have effect on operands ordering. 585 /// The first loop below simply finds all unused indices and then the next loop 586 /// nest assigns these indices for undef values positions. 587 /// As an example below Order has two undef positions and they have assigned 588 /// values 3 and 7 respectively: 589 /// before: 6 9 5 4 9 2 1 0 590 /// after: 6 3 5 4 7 2 1 0 591 /// \returns Fixed ordering. 592 static void fixupOrderingIndices(SmallVectorImpl<unsigned> &Order) { 593 const unsigned Sz = Order.size(); 594 SmallBitVector UsedIndices(Sz); 595 SmallVector<int> MaskedIndices; 596 for (unsigned I = 0; I < Sz; ++I) { 597 if (Order[I] < Sz) 598 UsedIndices.set(Order[I]); 599 else 600 MaskedIndices.push_back(I); 601 } 602 if (MaskedIndices.empty()) 603 return; 604 SmallVector<int> AvailableIndices(MaskedIndices.size()); 605 unsigned Cnt = 0; 606 int Idx = UsedIndices.find_first(); 607 do { 608 AvailableIndices[Cnt] = Idx; 609 Idx = UsedIndices.find_next(Idx); 610 ++Cnt; 611 } while (Idx > 0); 612 assert(Cnt == MaskedIndices.size() && "Non-synced masked/available indices."); 613 for (int I = 0, E = MaskedIndices.size(); I < E; ++I) 614 Order[MaskedIndices[I]] = AvailableIndices[I]; 615 } 616 617 namespace llvm { 618 619 static void inversePermutation(ArrayRef<unsigned> Indices, 620 SmallVectorImpl<int> &Mask) { 621 Mask.clear(); 622 const unsigned E = Indices.size(); 623 Mask.resize(E, UndefMaskElem); 624 for (unsigned I = 0; I < E; ++I) 625 Mask[Indices[I]] = I; 626 } 627 628 /// \returns inserting index of InsertElement or InsertValue instruction, 629 /// using Offset as base offset for index. 630 static Optional<int> getInsertIndex(Value *InsertInst, unsigned Offset) { 631 int Index = Offset; 632 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) { 633 if (auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2))) { 634 auto *VT = cast<FixedVectorType>(IE->getType()); 635 if (CI->getValue().uge(VT->getNumElements())) 636 return UndefMaskElem; 637 Index *= VT->getNumElements(); 638 Index += CI->getZExtValue(); 639 return Index; 640 } 641 if (isa<UndefValue>(IE->getOperand(2))) 642 return UndefMaskElem; 643 return None; 644 } 645 646 auto *IV = cast<InsertValueInst>(InsertInst); 647 Type *CurrentType = IV->getType(); 648 for (unsigned I : IV->indices()) { 649 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 650 Index *= ST->getNumElements(); 651 CurrentType = ST->getElementType(I); 652 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 653 Index *= AT->getNumElements(); 654 CurrentType = AT->getElementType(); 655 } else { 656 return None; 657 } 658 Index += I; 659 } 660 return Index; 661 } 662 663 /// Reorders the list of scalars in accordance with the given \p Order and then 664 /// the \p Mask. \p Order - is the original order of the scalars, need to 665 /// reorder scalars into an unordered state at first according to the given 666 /// order. Then the ordered scalars are shuffled once again in accordance with 667 /// the provided mask. 668 static void reorderScalars(SmallVectorImpl<Value *> &Scalars, 669 ArrayRef<int> Mask) { 670 assert(!Mask.empty() && "Expected non-empty mask."); 671 SmallVector<Value *> Prev(Scalars.size(), 672 UndefValue::get(Scalars.front()->getType())); 673 Prev.swap(Scalars); 674 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 675 if (Mask[I] != UndefMaskElem) 676 Scalars[Mask[I]] = Prev[I]; 677 } 678 679 namespace slpvectorizer { 680 681 /// Bottom Up SLP Vectorizer. 682 class BoUpSLP { 683 struct TreeEntry; 684 struct ScheduleData; 685 686 public: 687 using ValueList = SmallVector<Value *, 8>; 688 using InstrList = SmallVector<Instruction *, 16>; 689 using ValueSet = SmallPtrSet<Value *, 16>; 690 using StoreList = SmallVector<StoreInst *, 8>; 691 using ExtraValueToDebugLocsMap = 692 MapVector<Value *, SmallVector<Instruction *, 2>>; 693 using OrdersType = SmallVector<unsigned, 4>; 694 695 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 696 TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li, 697 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 698 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 699 : F(Func), SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), 700 DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { 701 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 702 // Use the vector register size specified by the target unless overridden 703 // by a command-line option. 704 // TODO: It would be better to limit the vectorization factor based on 705 // data type rather than just register size. For example, x86 AVX has 706 // 256-bit registers, but it does not support integer operations 707 // at that width (that requires AVX2). 708 if (MaxVectorRegSizeOption.getNumOccurrences()) 709 MaxVecRegSize = MaxVectorRegSizeOption; 710 else 711 MaxVecRegSize = 712 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 713 .getFixedSize(); 714 715 if (MinVectorRegSizeOption.getNumOccurrences()) 716 MinVecRegSize = MinVectorRegSizeOption; 717 else 718 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 719 } 720 721 /// Vectorize the tree that starts with the elements in \p VL. 722 /// Returns the vectorized root. 723 Value *vectorizeTree(); 724 725 /// Vectorize the tree but with the list of externally used values \p 726 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 727 /// generated extractvalue instructions. 728 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues); 729 730 /// \returns the cost incurred by unwanted spills and fills, caused by 731 /// holding live values over call sites. 732 InstructionCost getSpillCost() const; 733 734 /// \returns the vectorization cost of the subtree that starts at \p VL. 735 /// A negative number means that this is profitable. 736 InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = None); 737 738 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 739 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 740 void buildTree(ArrayRef<Value *> Roots, 741 ArrayRef<Value *> UserIgnoreLst = None); 742 743 /// Builds external uses of the vectorized scalars, i.e. the list of 744 /// vectorized scalars to be extracted, their lanes and their scalar users. \p 745 /// ExternallyUsedValues contains additional list of external uses to handle 746 /// vectorization of reductions. 747 void 748 buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {}); 749 750 /// Clear the internal data structures that are created by 'buildTree'. 751 void deleteTree() { 752 VectorizableTree.clear(); 753 ScalarToTreeEntry.clear(); 754 MustGather.clear(); 755 ExternalUses.clear(); 756 for (auto &Iter : BlocksSchedules) { 757 BlockScheduling *BS = Iter.second.get(); 758 BS->clear(); 759 } 760 MinBWs.clear(); 761 InstrElementSize.clear(); 762 } 763 764 unsigned getTreeSize() const { return VectorizableTree.size(); } 765 766 /// Perform LICM and CSE on the newly generated gather sequences. 767 void optimizeGatherSequence(); 768 769 /// Reorders the current graph to the most profitable order starting from the 770 /// root node to the leaf nodes. The best order is chosen only from the nodes 771 /// of the same size (vectorization factor). Smaller nodes are considered 772 /// parts of subgraph with smaller VF and they are reordered independently. We 773 /// can make it because we still need to extend smaller nodes to the wider VF 774 /// and we can merge reordering shuffles with the widening shuffles. 775 void reorderTopToBottom(); 776 777 /// Reorders the current graph to the most profitable order starting from 778 /// leaves to the root. It allows to rotate small subgraphs and reduce the 779 /// number of reshuffles if the leaf nodes use the same order. In this case we 780 /// can merge the orders and just shuffle user node instead of shuffling its 781 /// operands. Plus, even the leaf nodes have different orders, it allows to 782 /// sink reordering in the graph closer to the root node and merge it later 783 /// during analysis. 784 void reorderBottomToTop(); 785 786 /// \return The vector element size in bits to use when vectorizing the 787 /// expression tree ending at \p V. If V is a store, the size is the width of 788 /// the stored value. Otherwise, the size is the width of the largest loaded 789 /// value reaching V. This method is used by the vectorizer to calculate 790 /// vectorization factors. 791 unsigned getVectorElementSize(Value *V); 792 793 /// Compute the minimum type sizes required to represent the entries in a 794 /// vectorizable tree. 795 void computeMinimumValueSizes(); 796 797 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 798 unsigned getMaxVecRegSize() const { 799 return MaxVecRegSize; 800 } 801 802 // \returns minimum vector register size as set by cl::opt. 803 unsigned getMinVecRegSize() const { 804 return MinVecRegSize; 805 } 806 807 unsigned getMinVF(unsigned Sz) const { 808 return std::max(2U, getMinVecRegSize() / Sz); 809 } 810 811 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { 812 unsigned MaxVF = MaxVFOption.getNumOccurrences() ? 813 MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode); 814 return MaxVF ? MaxVF : UINT_MAX; 815 } 816 817 /// Check if homogeneous aggregate is isomorphic to some VectorType. 818 /// Accepts homogeneous multidimensional aggregate of scalars/vectors like 819 /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> }, 820 /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on. 821 /// 822 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 823 unsigned canMapToVector(Type *T, const DataLayout &DL) const; 824 825 /// \returns True if the VectorizableTree is both tiny and not fully 826 /// vectorizable. We do not vectorize such trees. 827 bool isTreeTinyAndNotFullyVectorizable() const; 828 829 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values 830 /// can be load combined in the backend. Load combining may not be allowed in 831 /// the IR optimizer, so we do not want to alter the pattern. For example, 832 /// partially transforming a scalar bswap() pattern into vector code is 833 /// effectively impossible for the backend to undo. 834 /// TODO: If load combining is allowed in the IR optimizer, this analysis 835 /// may not be necessary. 836 bool isLoadCombineReductionCandidate(RecurKind RdxKind) const; 837 838 /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values 839 /// can be load combined in the backend. Load combining may not be allowed in 840 /// the IR optimizer, so we do not want to alter the pattern. For example, 841 /// partially transforming a scalar bswap() pattern into vector code is 842 /// effectively impossible for the backend to undo. 843 /// TODO: If load combining is allowed in the IR optimizer, this analysis 844 /// may not be necessary. 845 bool isLoadCombineCandidate() const; 846 847 OptimizationRemarkEmitter *getORE() { return ORE; } 848 849 /// This structure holds any data we need about the edges being traversed 850 /// during buildTree_rec(). We keep track of: 851 /// (i) the user TreeEntry index, and 852 /// (ii) the index of the edge. 853 struct EdgeInfo { 854 EdgeInfo() = default; 855 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx) 856 : UserTE(UserTE), EdgeIdx(EdgeIdx) {} 857 /// The user TreeEntry. 858 TreeEntry *UserTE = nullptr; 859 /// The operand index of the use. 860 unsigned EdgeIdx = UINT_MAX; 861 #ifndef NDEBUG 862 friend inline raw_ostream &operator<<(raw_ostream &OS, 863 const BoUpSLP::EdgeInfo &EI) { 864 EI.dump(OS); 865 return OS; 866 } 867 /// Debug print. 868 void dump(raw_ostream &OS) const { 869 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null") 870 << " EdgeIdx:" << EdgeIdx << "}"; 871 } 872 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); } 873 #endif 874 }; 875 876 /// A helper data structure to hold the operands of a vector of instructions. 877 /// This supports a fixed vector length for all operand vectors. 878 class VLOperands { 879 /// For each operand we need (i) the value, and (ii) the opcode that it 880 /// would be attached to if the expression was in a left-linearized form. 881 /// This is required to avoid illegal operand reordering. 882 /// For example: 883 /// \verbatim 884 /// 0 Op1 885 /// |/ 886 /// Op1 Op2 Linearized + Op2 887 /// \ / ----------> |/ 888 /// - - 889 /// 890 /// Op1 - Op2 (0 + Op1) - Op2 891 /// \endverbatim 892 /// 893 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'. 894 /// 895 /// Another way to think of this is to track all the operations across the 896 /// path from the operand all the way to the root of the tree and to 897 /// calculate the operation that corresponds to this path. For example, the 898 /// path from Op2 to the root crosses the RHS of the '-', therefore the 899 /// corresponding operation is a '-' (which matches the one in the 900 /// linearized tree, as shown above). 901 /// 902 /// For lack of a better term, we refer to this operation as Accumulated 903 /// Path Operation (APO). 904 struct OperandData { 905 OperandData() = default; 906 OperandData(Value *V, bool APO, bool IsUsed) 907 : V(V), APO(APO), IsUsed(IsUsed) {} 908 /// The operand value. 909 Value *V = nullptr; 910 /// TreeEntries only allow a single opcode, or an alternate sequence of 911 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the 912 /// APO. It is set to 'true' if 'V' is attached to an inverse operation 913 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise 914 /// (e.g., Add/Mul) 915 bool APO = false; 916 /// Helper data for the reordering function. 917 bool IsUsed = false; 918 }; 919 920 /// During operand reordering, we are trying to select the operand at lane 921 /// that matches best with the operand at the neighboring lane. Our 922 /// selection is based on the type of value we are looking for. For example, 923 /// if the neighboring lane has a load, we need to look for a load that is 924 /// accessing a consecutive address. These strategies are summarized in the 925 /// 'ReorderingMode' enumerator. 926 enum class ReorderingMode { 927 Load, ///< Matching loads to consecutive memory addresses 928 Opcode, ///< Matching instructions based on opcode (same or alternate) 929 Constant, ///< Matching constants 930 Splat, ///< Matching the same instruction multiple times (broadcast) 931 Failed, ///< We failed to create a vectorizable group 932 }; 933 934 using OperandDataVec = SmallVector<OperandData, 2>; 935 936 /// A vector of operand vectors. 937 SmallVector<OperandDataVec, 4> OpsVec; 938 939 const DataLayout &DL; 940 ScalarEvolution &SE; 941 const BoUpSLP &R; 942 943 /// \returns the operand data at \p OpIdx and \p Lane. 944 OperandData &getData(unsigned OpIdx, unsigned Lane) { 945 return OpsVec[OpIdx][Lane]; 946 } 947 948 /// \returns the operand data at \p OpIdx and \p Lane. Const version. 949 const OperandData &getData(unsigned OpIdx, unsigned Lane) const { 950 return OpsVec[OpIdx][Lane]; 951 } 952 953 /// Clears the used flag for all entries. 954 void clearUsed() { 955 for (unsigned OpIdx = 0, NumOperands = getNumOperands(); 956 OpIdx != NumOperands; ++OpIdx) 957 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 958 ++Lane) 959 OpsVec[OpIdx][Lane].IsUsed = false; 960 } 961 962 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2. 963 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) { 964 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]); 965 } 966 967 // The hard-coded scores listed here are not very important. When computing 968 // the scores of matching one sub-tree with another, we are basically 969 // counting the number of values that are matching. So even if all scores 970 // are set to 1, we would still get a decent matching result. 971 // However, sometimes we have to break ties. For example we may have to 972 // choose between matching loads vs matching opcodes. This is what these 973 // scores are helping us with: they provide the order of preference. 974 975 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]). 976 static const int ScoreConsecutiveLoads = 3; 977 /// ExtractElementInst from same vector and consecutive indexes. 978 static const int ScoreConsecutiveExtracts = 3; 979 /// Constants. 980 static const int ScoreConstants = 2; 981 /// Instructions with the same opcode. 982 static const int ScoreSameOpcode = 2; 983 /// Instructions with alt opcodes (e.g, add + sub). 984 static const int ScoreAltOpcodes = 1; 985 /// Identical instructions (a.k.a. splat or broadcast). 986 static const int ScoreSplat = 1; 987 /// Matching with an undef is preferable to failing. 988 static const int ScoreUndef = 1; 989 /// Score for failing to find a decent match. 990 static const int ScoreFail = 0; 991 /// User exteranl to the vectorized code. 992 static const int ExternalUseCost = 1; 993 /// The user is internal but in a different lane. 994 static const int UserInDiffLaneCost = ExternalUseCost; 995 996 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes. 997 static int getShallowScore(Value *V1, Value *V2, const DataLayout &DL, 998 ScalarEvolution &SE) { 999 auto *LI1 = dyn_cast<LoadInst>(V1); 1000 auto *LI2 = dyn_cast<LoadInst>(V2); 1001 if (LI1 && LI2) { 1002 if (LI1->getParent() != LI2->getParent()) 1003 return VLOperands::ScoreFail; 1004 1005 Optional<int> Dist = getPointersDiff( 1006 LI1->getType(), LI1->getPointerOperand(), LI2->getType(), 1007 LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true); 1008 return (Dist && *Dist == 1) ? VLOperands::ScoreConsecutiveLoads 1009 : VLOperands::ScoreFail; 1010 } 1011 1012 auto *C1 = dyn_cast<Constant>(V1); 1013 auto *C2 = dyn_cast<Constant>(V2); 1014 if (C1 && C2) 1015 return VLOperands::ScoreConstants; 1016 1017 // Extracts from consecutive indexes of the same vector better score as 1018 // the extracts could be optimized away. 1019 Value *EV; 1020 ConstantInt *Ex1Idx, *Ex2Idx; 1021 if (match(V1, m_ExtractElt(m_Value(EV), m_ConstantInt(Ex1Idx))) && 1022 match(V2, m_ExtractElt(m_Deferred(EV), m_ConstantInt(Ex2Idx))) && 1023 Ex1Idx->getZExtValue() + 1 == Ex2Idx->getZExtValue()) 1024 return VLOperands::ScoreConsecutiveExtracts; 1025 1026 auto *I1 = dyn_cast<Instruction>(V1); 1027 auto *I2 = dyn_cast<Instruction>(V2); 1028 if (I1 && I2) { 1029 if (I1 == I2) 1030 return VLOperands::ScoreSplat; 1031 InstructionsState S = getSameOpcode({I1, I2}); 1032 // Note: Only consider instructions with <= 2 operands to avoid 1033 // complexity explosion. 1034 if (S.getOpcode() && S.MainOp->getNumOperands() <= 2) 1035 return S.isAltShuffle() ? VLOperands::ScoreAltOpcodes 1036 : VLOperands::ScoreSameOpcode; 1037 } 1038 1039 if (isa<UndefValue>(V2)) 1040 return VLOperands::ScoreUndef; 1041 1042 return VLOperands::ScoreFail; 1043 } 1044 1045 /// Holds the values and their lane that are taking part in the look-ahead 1046 /// score calculation. This is used in the external uses cost calculation. 1047 SmallDenseMap<Value *, int> InLookAheadValues; 1048 1049 /// \Returns the additinal cost due to uses of \p LHS and \p RHS that are 1050 /// either external to the vectorized code, or require shuffling. 1051 int getExternalUsesCost(const std::pair<Value *, int> &LHS, 1052 const std::pair<Value *, int> &RHS) { 1053 int Cost = 0; 1054 std::array<std::pair<Value *, int>, 2> Values = {{LHS, RHS}}; 1055 for (int Idx = 0, IdxE = Values.size(); Idx != IdxE; ++Idx) { 1056 Value *V = Values[Idx].first; 1057 if (isa<Constant>(V)) { 1058 // Since this is a function pass, it doesn't make semantic sense to 1059 // walk the users of a subclass of Constant. The users could be in 1060 // another function, or even another module that happens to be in 1061 // the same LLVMContext. 1062 continue; 1063 } 1064 1065 // Calculate the absolute lane, using the minimum relative lane of LHS 1066 // and RHS as base and Idx as the offset. 1067 int Ln = std::min(LHS.second, RHS.second) + Idx; 1068 assert(Ln >= 0 && "Bad lane calculation"); 1069 unsigned UsersBudget = LookAheadUsersBudget; 1070 for (User *U : V->users()) { 1071 if (const TreeEntry *UserTE = R.getTreeEntry(U)) { 1072 // The user is in the VectorizableTree. Check if we need to insert. 1073 auto It = llvm::find(UserTE->Scalars, U); 1074 assert(It != UserTE->Scalars.end() && "U is in UserTE"); 1075 int UserLn = std::distance(UserTE->Scalars.begin(), It); 1076 assert(UserLn >= 0 && "Bad lane"); 1077 if (UserLn != Ln) 1078 Cost += UserInDiffLaneCost; 1079 } else { 1080 // Check if the user is in the look-ahead code. 1081 auto It2 = InLookAheadValues.find(U); 1082 if (It2 != InLookAheadValues.end()) { 1083 // The user is in the look-ahead code. Check the lane. 1084 if (It2->second != Ln) 1085 Cost += UserInDiffLaneCost; 1086 } else { 1087 // The user is neither in SLP tree nor in the look-ahead code. 1088 Cost += ExternalUseCost; 1089 } 1090 } 1091 // Limit the number of visited uses to cap compilation time. 1092 if (--UsersBudget == 0) 1093 break; 1094 } 1095 } 1096 return Cost; 1097 } 1098 1099 /// Go through the operands of \p LHS and \p RHS recursively until \p 1100 /// MaxLevel, and return the cummulative score. For example: 1101 /// \verbatim 1102 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1] 1103 /// \ / \ / \ / \ / 1104 /// + + + + 1105 /// G1 G2 G3 G4 1106 /// \endverbatim 1107 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at 1108 /// each level recursively, accumulating the score. It starts from matching 1109 /// the additions at level 0, then moves on to the loads (level 1). The 1110 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and 1111 /// {B[0],B[1]} match with VLOperands::ScoreConsecutiveLoads, while 1112 /// {A[0],C[0]} has a score of VLOperands::ScoreFail. 1113 /// Please note that the order of the operands does not matter, as we 1114 /// evaluate the score of all profitable combinations of operands. In 1115 /// other words the score of G1 and G4 is the same as G1 and G2. This 1116 /// heuristic is based on ideas described in: 1117 /// Look-ahead SLP: Auto-vectorization in the presence of commutative 1118 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha, 1119 /// Luís F. W. Góes 1120 int getScoreAtLevelRec(const std::pair<Value *, int> &LHS, 1121 const std::pair<Value *, int> &RHS, int CurrLevel, 1122 int MaxLevel) { 1123 1124 Value *V1 = LHS.first; 1125 Value *V2 = RHS.first; 1126 // Get the shallow score of V1 and V2. 1127 int ShallowScoreAtThisLevel = 1128 std::max((int)ScoreFail, getShallowScore(V1, V2, DL, SE) - 1129 getExternalUsesCost(LHS, RHS)); 1130 int Lane1 = LHS.second; 1131 int Lane2 = RHS.second; 1132 1133 // If reached MaxLevel, 1134 // or if V1 and V2 are not instructions, 1135 // or if they are SPLAT, 1136 // or if they are not consecutive, early return the current cost. 1137 auto *I1 = dyn_cast<Instruction>(V1); 1138 auto *I2 = dyn_cast<Instruction>(V2); 1139 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 || 1140 ShallowScoreAtThisLevel == VLOperands::ScoreFail || 1141 (isa<LoadInst>(I1) && isa<LoadInst>(I2) && ShallowScoreAtThisLevel)) 1142 return ShallowScoreAtThisLevel; 1143 assert(I1 && I2 && "Should have early exited."); 1144 1145 // Keep track of in-tree values for determining the external-use cost. 1146 InLookAheadValues[V1] = Lane1; 1147 InLookAheadValues[V2] = Lane2; 1148 1149 // Contains the I2 operand indexes that got matched with I1 operands. 1150 SmallSet<unsigned, 4> Op2Used; 1151 1152 // Recursion towards the operands of I1 and I2. We are trying all possbile 1153 // operand pairs, and keeping track of the best score. 1154 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands(); 1155 OpIdx1 != NumOperands1; ++OpIdx1) { 1156 // Try to pair op1I with the best operand of I2. 1157 int MaxTmpScore = 0; 1158 unsigned MaxOpIdx2 = 0; 1159 bool FoundBest = false; 1160 // If I2 is commutative try all combinations. 1161 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1; 1162 unsigned ToIdx = isCommutative(I2) 1163 ? I2->getNumOperands() 1164 : std::min(I2->getNumOperands(), OpIdx1 + 1); 1165 assert(FromIdx <= ToIdx && "Bad index"); 1166 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) { 1167 // Skip operands already paired with OpIdx1. 1168 if (Op2Used.count(OpIdx2)) 1169 continue; 1170 // Recursively calculate the cost at each level 1171 int TmpScore = getScoreAtLevelRec({I1->getOperand(OpIdx1), Lane1}, 1172 {I2->getOperand(OpIdx2), Lane2}, 1173 CurrLevel + 1, MaxLevel); 1174 // Look for the best score. 1175 if (TmpScore > VLOperands::ScoreFail && TmpScore > MaxTmpScore) { 1176 MaxTmpScore = TmpScore; 1177 MaxOpIdx2 = OpIdx2; 1178 FoundBest = true; 1179 } 1180 } 1181 if (FoundBest) { 1182 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it. 1183 Op2Used.insert(MaxOpIdx2); 1184 ShallowScoreAtThisLevel += MaxTmpScore; 1185 } 1186 } 1187 return ShallowScoreAtThisLevel; 1188 } 1189 1190 /// \Returns the look-ahead score, which tells us how much the sub-trees 1191 /// rooted at \p LHS and \p RHS match, the more they match the higher the 1192 /// score. This helps break ties in an informed way when we cannot decide on 1193 /// the order of the operands by just considering the immediate 1194 /// predecessors. 1195 int getLookAheadScore(const std::pair<Value *, int> &LHS, 1196 const std::pair<Value *, int> &RHS) { 1197 InLookAheadValues.clear(); 1198 return getScoreAtLevelRec(LHS, RHS, 1, LookAheadMaxDepth); 1199 } 1200 1201 // Search all operands in Ops[*][Lane] for the one that matches best 1202 // Ops[OpIdx][LastLane] and return its opreand index. 1203 // If no good match can be found, return None. 1204 Optional<unsigned> 1205 getBestOperand(unsigned OpIdx, int Lane, int LastLane, 1206 ArrayRef<ReorderingMode> ReorderingModes) { 1207 unsigned NumOperands = getNumOperands(); 1208 1209 // The operand of the previous lane at OpIdx. 1210 Value *OpLastLane = getData(OpIdx, LastLane).V; 1211 1212 // Our strategy mode for OpIdx. 1213 ReorderingMode RMode = ReorderingModes[OpIdx]; 1214 1215 // The linearized opcode of the operand at OpIdx, Lane. 1216 bool OpIdxAPO = getData(OpIdx, Lane).APO; 1217 1218 // The best operand index and its score. 1219 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we 1220 // are using the score to differentiate between the two. 1221 struct BestOpData { 1222 Optional<unsigned> Idx = None; 1223 unsigned Score = 0; 1224 } BestOp; 1225 1226 // Iterate through all unused operands and look for the best. 1227 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) { 1228 // Get the operand at Idx and Lane. 1229 OperandData &OpData = getData(Idx, Lane); 1230 Value *Op = OpData.V; 1231 bool OpAPO = OpData.APO; 1232 1233 // Skip already selected operands. 1234 if (OpData.IsUsed) 1235 continue; 1236 1237 // Skip if we are trying to move the operand to a position with a 1238 // different opcode in the linearized tree form. This would break the 1239 // semantics. 1240 if (OpAPO != OpIdxAPO) 1241 continue; 1242 1243 // Look for an operand that matches the current mode. 1244 switch (RMode) { 1245 case ReorderingMode::Load: 1246 case ReorderingMode::Constant: 1247 case ReorderingMode::Opcode: { 1248 bool LeftToRight = Lane > LastLane; 1249 Value *OpLeft = (LeftToRight) ? OpLastLane : Op; 1250 Value *OpRight = (LeftToRight) ? Op : OpLastLane; 1251 unsigned Score = 1252 getLookAheadScore({OpLeft, LastLane}, {OpRight, Lane}); 1253 if (Score > BestOp.Score) { 1254 BestOp.Idx = Idx; 1255 BestOp.Score = Score; 1256 } 1257 break; 1258 } 1259 case ReorderingMode::Splat: 1260 if (Op == OpLastLane) 1261 BestOp.Idx = Idx; 1262 break; 1263 case ReorderingMode::Failed: 1264 return None; 1265 } 1266 } 1267 1268 if (BestOp.Idx) { 1269 getData(BestOp.Idx.getValue(), Lane).IsUsed = true; 1270 return BestOp.Idx; 1271 } 1272 // If we could not find a good match return None. 1273 return None; 1274 } 1275 1276 /// Helper for reorderOperandVecs. \Returns the lane that we should start 1277 /// reordering from. This is the one which has the least number of operands 1278 /// that can freely move about. 1279 unsigned getBestLaneToStartReordering() const { 1280 unsigned BestLane = 0; 1281 unsigned Min = UINT_MAX; 1282 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 1283 ++Lane) { 1284 unsigned NumFreeOps = getMaxNumOperandsThatCanBeReordered(Lane); 1285 if (NumFreeOps < Min) { 1286 Min = NumFreeOps; 1287 BestLane = Lane; 1288 } 1289 } 1290 return BestLane; 1291 } 1292 1293 /// \Returns the maximum number of operands that are allowed to be reordered 1294 /// for \p Lane. This is used as a heuristic for selecting the first lane to 1295 /// start operand reordering. 1296 unsigned getMaxNumOperandsThatCanBeReordered(unsigned Lane) const { 1297 unsigned CntTrue = 0; 1298 unsigned NumOperands = getNumOperands(); 1299 // Operands with the same APO can be reordered. We therefore need to count 1300 // how many of them we have for each APO, like this: Cnt[APO] = x. 1301 // Since we only have two APOs, namely true and false, we can avoid using 1302 // a map. Instead we can simply count the number of operands that 1303 // correspond to one of them (in this case the 'true' APO), and calculate 1304 // the other by subtracting it from the total number of operands. 1305 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) 1306 if (getData(OpIdx, Lane).APO) 1307 ++CntTrue; 1308 unsigned CntFalse = NumOperands - CntTrue; 1309 return std::max(CntTrue, CntFalse); 1310 } 1311 1312 /// Go through the instructions in VL and append their operands. 1313 void appendOperandsOfVL(ArrayRef<Value *> VL) { 1314 assert(!VL.empty() && "Bad VL"); 1315 assert((empty() || VL.size() == getNumLanes()) && 1316 "Expected same number of lanes"); 1317 assert(isa<Instruction>(VL[0]) && "Expected instruction"); 1318 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands(); 1319 OpsVec.resize(NumOperands); 1320 unsigned NumLanes = VL.size(); 1321 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1322 OpsVec[OpIdx].resize(NumLanes); 1323 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1324 assert(isa<Instruction>(VL[Lane]) && "Expected instruction"); 1325 // Our tree has just 3 nodes: the root and two operands. 1326 // It is therefore trivial to get the APO. We only need to check the 1327 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or 1328 // RHS operand. The LHS operand of both add and sub is never attached 1329 // to an inversese operation in the linearized form, therefore its APO 1330 // is false. The RHS is true only if VL[Lane] is an inverse operation. 1331 1332 // Since operand reordering is performed on groups of commutative 1333 // operations or alternating sequences (e.g., +, -), we can safely 1334 // tell the inverse operations by checking commutativity. 1335 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane])); 1336 bool APO = (OpIdx == 0) ? false : IsInverseOperation; 1337 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx), 1338 APO, false}; 1339 } 1340 } 1341 } 1342 1343 /// \returns the number of operands. 1344 unsigned getNumOperands() const { return OpsVec.size(); } 1345 1346 /// \returns the number of lanes. 1347 unsigned getNumLanes() const { return OpsVec[0].size(); } 1348 1349 /// \returns the operand value at \p OpIdx and \p Lane. 1350 Value *getValue(unsigned OpIdx, unsigned Lane) const { 1351 return getData(OpIdx, Lane).V; 1352 } 1353 1354 /// \returns true if the data structure is empty. 1355 bool empty() const { return OpsVec.empty(); } 1356 1357 /// Clears the data. 1358 void clear() { OpsVec.clear(); } 1359 1360 /// \Returns true if there are enough operands identical to \p Op to fill 1361 /// the whole vector. 1362 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow. 1363 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) { 1364 bool OpAPO = getData(OpIdx, Lane).APO; 1365 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) { 1366 if (Ln == Lane) 1367 continue; 1368 // This is set to true if we found a candidate for broadcast at Lane. 1369 bool FoundCandidate = false; 1370 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) { 1371 OperandData &Data = getData(OpI, Ln); 1372 if (Data.APO != OpAPO || Data.IsUsed) 1373 continue; 1374 if (Data.V == Op) { 1375 FoundCandidate = true; 1376 Data.IsUsed = true; 1377 break; 1378 } 1379 } 1380 if (!FoundCandidate) 1381 return false; 1382 } 1383 return true; 1384 } 1385 1386 public: 1387 /// Initialize with all the operands of the instruction vector \p RootVL. 1388 VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL, 1389 ScalarEvolution &SE, const BoUpSLP &R) 1390 : DL(DL), SE(SE), R(R) { 1391 // Append all the operands of RootVL. 1392 appendOperandsOfVL(RootVL); 1393 } 1394 1395 /// \Returns a value vector with the operands across all lanes for the 1396 /// opearnd at \p OpIdx. 1397 ValueList getVL(unsigned OpIdx) const { 1398 ValueList OpVL(OpsVec[OpIdx].size()); 1399 assert(OpsVec[OpIdx].size() == getNumLanes() && 1400 "Expected same num of lanes across all operands"); 1401 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane) 1402 OpVL[Lane] = OpsVec[OpIdx][Lane].V; 1403 return OpVL; 1404 } 1405 1406 // Performs operand reordering for 2 or more operands. 1407 // The original operands are in OrigOps[OpIdx][Lane]. 1408 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'. 1409 void reorder() { 1410 unsigned NumOperands = getNumOperands(); 1411 unsigned NumLanes = getNumLanes(); 1412 // Each operand has its own mode. We are using this mode to help us select 1413 // the instructions for each lane, so that they match best with the ones 1414 // we have selected so far. 1415 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands); 1416 1417 // This is a greedy single-pass algorithm. We are going over each lane 1418 // once and deciding on the best order right away with no back-tracking. 1419 // However, in order to increase its effectiveness, we start with the lane 1420 // that has operands that can move the least. For example, given the 1421 // following lanes: 1422 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd 1423 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st 1424 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd 1425 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th 1426 // we will start at Lane 1, since the operands of the subtraction cannot 1427 // be reordered. Then we will visit the rest of the lanes in a circular 1428 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3. 1429 1430 // Find the first lane that we will start our search from. 1431 unsigned FirstLane = getBestLaneToStartReordering(); 1432 1433 // Initialize the modes. 1434 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1435 Value *OpLane0 = getValue(OpIdx, FirstLane); 1436 // Keep track if we have instructions with all the same opcode on one 1437 // side. 1438 if (isa<LoadInst>(OpLane0)) 1439 ReorderingModes[OpIdx] = ReorderingMode::Load; 1440 else if (isa<Instruction>(OpLane0)) { 1441 // Check if OpLane0 should be broadcast. 1442 if (shouldBroadcast(OpLane0, OpIdx, FirstLane)) 1443 ReorderingModes[OpIdx] = ReorderingMode::Splat; 1444 else 1445 ReorderingModes[OpIdx] = ReorderingMode::Opcode; 1446 } 1447 else if (isa<Constant>(OpLane0)) 1448 ReorderingModes[OpIdx] = ReorderingMode::Constant; 1449 else if (isa<Argument>(OpLane0)) 1450 // Our best hope is a Splat. It may save some cost in some cases. 1451 ReorderingModes[OpIdx] = ReorderingMode::Splat; 1452 else 1453 // NOTE: This should be unreachable. 1454 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1455 } 1456 1457 // If the initial strategy fails for any of the operand indexes, then we 1458 // perform reordering again in a second pass. This helps avoid assigning 1459 // high priority to the failed strategy, and should improve reordering for 1460 // the non-failed operand indexes. 1461 for (int Pass = 0; Pass != 2; ++Pass) { 1462 // Skip the second pass if the first pass did not fail. 1463 bool StrategyFailed = false; 1464 // Mark all operand data as free to use. 1465 clearUsed(); 1466 // We keep the original operand order for the FirstLane, so reorder the 1467 // rest of the lanes. We are visiting the nodes in a circular fashion, 1468 // using FirstLane as the center point and increasing the radius 1469 // distance. 1470 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) { 1471 // Visit the lane on the right and then the lane on the left. 1472 for (int Direction : {+1, -1}) { 1473 int Lane = FirstLane + Direction * Distance; 1474 if (Lane < 0 || Lane >= (int)NumLanes) 1475 continue; 1476 int LastLane = Lane - Direction; 1477 assert(LastLane >= 0 && LastLane < (int)NumLanes && 1478 "Out of bounds"); 1479 // Look for a good match for each operand. 1480 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1481 // Search for the operand that matches SortedOps[OpIdx][Lane-1]. 1482 Optional<unsigned> BestIdx = 1483 getBestOperand(OpIdx, Lane, LastLane, ReorderingModes); 1484 // By not selecting a value, we allow the operands that follow to 1485 // select a better matching value. We will get a non-null value in 1486 // the next run of getBestOperand(). 1487 if (BestIdx) { 1488 // Swap the current operand with the one returned by 1489 // getBestOperand(). 1490 swap(OpIdx, BestIdx.getValue(), Lane); 1491 } else { 1492 // We failed to find a best operand, set mode to 'Failed'. 1493 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1494 // Enable the second pass. 1495 StrategyFailed = true; 1496 } 1497 } 1498 } 1499 } 1500 // Skip second pass if the strategy did not fail. 1501 if (!StrategyFailed) 1502 break; 1503 } 1504 } 1505 1506 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1507 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) { 1508 switch (RMode) { 1509 case ReorderingMode::Load: 1510 return "Load"; 1511 case ReorderingMode::Opcode: 1512 return "Opcode"; 1513 case ReorderingMode::Constant: 1514 return "Constant"; 1515 case ReorderingMode::Splat: 1516 return "Splat"; 1517 case ReorderingMode::Failed: 1518 return "Failed"; 1519 } 1520 llvm_unreachable("Unimplemented Reordering Type"); 1521 } 1522 1523 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode, 1524 raw_ostream &OS) { 1525 return OS << getModeStr(RMode); 1526 } 1527 1528 /// Debug print. 1529 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) { 1530 printMode(RMode, dbgs()); 1531 } 1532 1533 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) { 1534 return printMode(RMode, OS); 1535 } 1536 1537 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const { 1538 const unsigned Indent = 2; 1539 unsigned Cnt = 0; 1540 for (const OperandDataVec &OpDataVec : OpsVec) { 1541 OS << "Operand " << Cnt++ << "\n"; 1542 for (const OperandData &OpData : OpDataVec) { 1543 OS.indent(Indent) << "{"; 1544 if (Value *V = OpData.V) 1545 OS << *V; 1546 else 1547 OS << "null"; 1548 OS << ", APO:" << OpData.APO << "}\n"; 1549 } 1550 OS << "\n"; 1551 } 1552 return OS; 1553 } 1554 1555 /// Debug print. 1556 LLVM_DUMP_METHOD void dump() const { print(dbgs()); } 1557 #endif 1558 }; 1559 1560 /// Checks if the instruction is marked for deletion. 1561 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); } 1562 1563 /// Marks values operands for later deletion by replacing them with Undefs. 1564 void eraseInstructions(ArrayRef<Value *> AV); 1565 1566 ~BoUpSLP(); 1567 1568 private: 1569 /// Checks if all users of \p I are the part of the vectorization tree. 1570 bool areAllUsersVectorized(Instruction *I, 1571 ArrayRef<Value *> VectorizedVals) const; 1572 1573 /// \returns the cost of the vectorizable entry. 1574 InstructionCost getEntryCost(const TreeEntry *E, 1575 ArrayRef<Value *> VectorizedVals); 1576 1577 /// This is the recursive part of buildTree. 1578 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, 1579 const EdgeInfo &EI); 1580 1581 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can 1582 /// be vectorized to use the original vector (or aggregate "bitcast" to a 1583 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise 1584 /// returns false, setting \p CurrentOrder to either an empty vector or a 1585 /// non-identity permutation that allows to reuse extract instructions. 1586 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 1587 SmallVectorImpl<unsigned> &CurrentOrder) const; 1588 1589 /// Vectorize a single entry in the tree. 1590 Value *vectorizeTree(TreeEntry *E); 1591 1592 /// Vectorize a single entry in the tree, starting in \p VL. 1593 Value *vectorizeTree(ArrayRef<Value *> VL); 1594 1595 /// \returns the scalarization cost for this type. Scalarization in this 1596 /// context means the creation of vectors from a group of scalars. 1597 InstructionCost 1598 getGatherCost(FixedVectorType *Ty, 1599 const DenseSet<unsigned> &ShuffledIndices) const; 1600 1601 /// Checks if the gathered \p VL can be represented as shuffle(s) of previous 1602 /// tree entries. 1603 /// \returns ShuffleKind, if gathered values can be represented as shuffles of 1604 /// previous tree entries. \p Mask is filled with the shuffle mask. 1605 Optional<TargetTransformInfo::ShuffleKind> 1606 isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask, 1607 SmallVectorImpl<const TreeEntry *> &Entries); 1608 1609 /// \returns the scalarization cost for this list of values. Assuming that 1610 /// this subtree gets vectorized, we may need to extract the values from the 1611 /// roots. This method calculates the cost of extracting the values. 1612 InstructionCost getGatherCost(ArrayRef<Value *> VL) const; 1613 1614 /// Set the Builder insert point to one after the last instruction in 1615 /// the bundle 1616 void setInsertPointAfterBundle(const TreeEntry *E); 1617 1618 /// \returns a vector from a collection of scalars in \p VL. 1619 Value *gather(ArrayRef<Value *> VL); 1620 1621 /// \returns whether the VectorizableTree is fully vectorizable and will 1622 /// be beneficial even the tree height is tiny. 1623 bool isFullyVectorizableTinyTree() const; 1624 1625 /// Reorder commutative or alt operands to get better probability of 1626 /// generating vectorized code. 1627 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 1628 SmallVectorImpl<Value *> &Left, 1629 SmallVectorImpl<Value *> &Right, 1630 const DataLayout &DL, 1631 ScalarEvolution &SE, 1632 const BoUpSLP &R); 1633 struct TreeEntry { 1634 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>; 1635 TreeEntry(VecTreeTy &Container) : Container(Container) {} 1636 1637 /// \returns true if the scalars in VL are equal to this entry. 1638 bool isSame(ArrayRef<Value *> VL) const { 1639 auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) { 1640 if (Mask.size() != VL.size() && VL.size() == Scalars.size()) 1641 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 1642 return VL.size() == Mask.size() && 1643 std::equal( 1644 VL.begin(), VL.end(), Mask.begin(), 1645 [Scalars](Value *V, int Idx) { return V == Scalars[Idx]; }); 1646 }; 1647 if (!ReorderIndices.empty()) { 1648 // TODO: implement matching if the nodes are just reordered, still can 1649 // treat the vector as the same if the list of scalars matches VL 1650 // directly, without reordering. 1651 SmallVector<int> Mask; 1652 inversePermutation(ReorderIndices, Mask); 1653 if (VL.size() == Scalars.size()) 1654 return IsSame(Scalars, Mask); 1655 if (VL.size() == ReuseShuffleIndices.size()) { 1656 ::addMask(Mask, ReuseShuffleIndices); 1657 return IsSame(Scalars, Mask); 1658 } 1659 return false; 1660 } 1661 return IsSame(Scalars, ReuseShuffleIndices); 1662 } 1663 1664 /// A vector of scalars. 1665 ValueList Scalars; 1666 1667 /// The Scalars are vectorized into this value. It is initialized to Null. 1668 Value *VectorizedValue = nullptr; 1669 1670 /// Do we need to gather this sequence or vectorize it 1671 /// (either with vector instruction or with scatter/gather 1672 /// intrinsics for store/load)? 1673 enum EntryState { Vectorize, ScatterVectorize, NeedToGather }; 1674 EntryState State; 1675 1676 /// Does this sequence require some shuffling? 1677 SmallVector<int, 4> ReuseShuffleIndices; 1678 1679 /// Does this entry require reordering? 1680 SmallVector<unsigned, 4> ReorderIndices; 1681 1682 /// Points back to the VectorizableTree. 1683 /// 1684 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 1685 /// to be a pointer and needs to be able to initialize the child iterator. 1686 /// Thus we need a reference back to the container to translate the indices 1687 /// to entries. 1688 VecTreeTy &Container; 1689 1690 /// The TreeEntry index containing the user of this entry. We can actually 1691 /// have multiple users so the data structure is not truly a tree. 1692 SmallVector<EdgeInfo, 1> UserTreeIndices; 1693 1694 /// The index of this treeEntry in VectorizableTree. 1695 int Idx = -1; 1696 1697 private: 1698 /// The operands of each instruction in each lane Operands[op_index][lane]. 1699 /// Note: This helps avoid the replication of the code that performs the 1700 /// reordering of operands during buildTree_rec() and vectorizeTree(). 1701 SmallVector<ValueList, 2> Operands; 1702 1703 /// The main/alternate instruction. 1704 Instruction *MainOp = nullptr; 1705 Instruction *AltOp = nullptr; 1706 1707 public: 1708 /// Set this bundle's \p OpIdx'th operand to \p OpVL. 1709 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) { 1710 if (Operands.size() < OpIdx + 1) 1711 Operands.resize(OpIdx + 1); 1712 assert(Operands[OpIdx].empty() && "Already resized?"); 1713 Operands[OpIdx].resize(Scalars.size()); 1714 for (unsigned Lane = 0, E = Scalars.size(); Lane != E; ++Lane) 1715 Operands[OpIdx][Lane] = OpVL[Lane]; 1716 } 1717 1718 /// Set the operands of this bundle in their original order. 1719 void setOperandsInOrder() { 1720 assert(Operands.empty() && "Already initialized?"); 1721 auto *I0 = cast<Instruction>(Scalars[0]); 1722 Operands.resize(I0->getNumOperands()); 1723 unsigned NumLanes = Scalars.size(); 1724 for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands(); 1725 OpIdx != NumOperands; ++OpIdx) { 1726 Operands[OpIdx].resize(NumLanes); 1727 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1728 auto *I = cast<Instruction>(Scalars[Lane]); 1729 assert(I->getNumOperands() == NumOperands && 1730 "Expected same number of operands"); 1731 Operands[OpIdx][Lane] = I->getOperand(OpIdx); 1732 } 1733 } 1734 } 1735 1736 /// Reorders operands of the node to the given mask \p Mask. 1737 void reorderOperands(ArrayRef<int> Mask) { 1738 for (ValueList &Operand : Operands) 1739 reorderScalars(Operand, Mask); 1740 } 1741 1742 /// \returns the \p OpIdx operand of this TreeEntry. 1743 ValueList &getOperand(unsigned OpIdx) { 1744 assert(OpIdx < Operands.size() && "Off bounds"); 1745 return Operands[OpIdx]; 1746 } 1747 1748 /// \returns the number of operands. 1749 unsigned getNumOperands() const { return Operands.size(); } 1750 1751 /// \return the single \p OpIdx operand. 1752 Value *getSingleOperand(unsigned OpIdx) const { 1753 assert(OpIdx < Operands.size() && "Off bounds"); 1754 assert(!Operands[OpIdx].empty() && "No operand available"); 1755 return Operands[OpIdx][0]; 1756 } 1757 1758 /// Some of the instructions in the list have alternate opcodes. 1759 bool isAltShuffle() const { 1760 return getOpcode() != getAltOpcode(); 1761 } 1762 1763 bool isOpcodeOrAlt(Instruction *I) const { 1764 unsigned CheckedOpcode = I->getOpcode(); 1765 return (getOpcode() == CheckedOpcode || 1766 getAltOpcode() == CheckedOpcode); 1767 } 1768 1769 /// Chooses the correct key for scheduling data. If \p Op has the same (or 1770 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is 1771 /// \p OpValue. 1772 Value *isOneOf(Value *Op) const { 1773 auto *I = dyn_cast<Instruction>(Op); 1774 if (I && isOpcodeOrAlt(I)) 1775 return Op; 1776 return MainOp; 1777 } 1778 1779 void setOperations(const InstructionsState &S) { 1780 MainOp = S.MainOp; 1781 AltOp = S.AltOp; 1782 } 1783 1784 Instruction *getMainOp() const { 1785 return MainOp; 1786 } 1787 1788 Instruction *getAltOp() const { 1789 return AltOp; 1790 } 1791 1792 /// The main/alternate opcodes for the list of instructions. 1793 unsigned getOpcode() const { 1794 return MainOp ? MainOp->getOpcode() : 0; 1795 } 1796 1797 unsigned getAltOpcode() const { 1798 return AltOp ? AltOp->getOpcode() : 0; 1799 } 1800 1801 /// When ReuseReorderShuffleIndices is empty it just returns position of \p 1802 /// V within vector of Scalars. Otherwise, try to remap on its reuse index. 1803 int findLaneForValue(Value *V) const { 1804 unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V)); 1805 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 1806 if (!ReorderIndices.empty()) 1807 FoundLane = ReorderIndices[FoundLane]; 1808 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 1809 if (!ReuseShuffleIndices.empty()) { 1810 FoundLane = std::distance(ReuseShuffleIndices.begin(), 1811 find(ReuseShuffleIndices, FoundLane)); 1812 } 1813 return FoundLane; 1814 } 1815 1816 #ifndef NDEBUG 1817 /// Debug printer. 1818 LLVM_DUMP_METHOD void dump() const { 1819 dbgs() << Idx << ".\n"; 1820 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) { 1821 dbgs() << "Operand " << OpI << ":\n"; 1822 for (const Value *V : Operands[OpI]) 1823 dbgs().indent(2) << *V << "\n"; 1824 } 1825 dbgs() << "Scalars: \n"; 1826 for (Value *V : Scalars) 1827 dbgs().indent(2) << *V << "\n"; 1828 dbgs() << "State: "; 1829 switch (State) { 1830 case Vectorize: 1831 dbgs() << "Vectorize\n"; 1832 break; 1833 case ScatterVectorize: 1834 dbgs() << "ScatterVectorize\n"; 1835 break; 1836 case NeedToGather: 1837 dbgs() << "NeedToGather\n"; 1838 break; 1839 } 1840 dbgs() << "MainOp: "; 1841 if (MainOp) 1842 dbgs() << *MainOp << "\n"; 1843 else 1844 dbgs() << "NULL\n"; 1845 dbgs() << "AltOp: "; 1846 if (AltOp) 1847 dbgs() << *AltOp << "\n"; 1848 else 1849 dbgs() << "NULL\n"; 1850 dbgs() << "VectorizedValue: "; 1851 if (VectorizedValue) 1852 dbgs() << *VectorizedValue << "\n"; 1853 else 1854 dbgs() << "NULL\n"; 1855 dbgs() << "ReuseShuffleIndices: "; 1856 if (ReuseShuffleIndices.empty()) 1857 dbgs() << "Empty"; 1858 else 1859 for (unsigned ReuseIdx : ReuseShuffleIndices) 1860 dbgs() << ReuseIdx << ", "; 1861 dbgs() << "\n"; 1862 dbgs() << "ReorderIndices: "; 1863 for (unsigned ReorderIdx : ReorderIndices) 1864 dbgs() << ReorderIdx << ", "; 1865 dbgs() << "\n"; 1866 dbgs() << "UserTreeIndices: "; 1867 for (const auto &EInfo : UserTreeIndices) 1868 dbgs() << EInfo << ", "; 1869 dbgs() << "\n"; 1870 } 1871 #endif 1872 }; 1873 1874 #ifndef NDEBUG 1875 void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost, 1876 InstructionCost VecCost, 1877 InstructionCost ScalarCost) const { 1878 dbgs() << "SLP: Calculated costs for Tree:\n"; E->dump(); 1879 dbgs() << "SLP: Costs:\n"; 1880 dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n"; 1881 dbgs() << "SLP: VectorCost = " << VecCost << "\n"; 1882 dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n"; 1883 dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = " << 1884 ReuseShuffleCost + VecCost - ScalarCost << "\n"; 1885 } 1886 #endif 1887 1888 /// Create a new VectorizableTree entry. 1889 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle, 1890 const InstructionsState &S, 1891 const EdgeInfo &UserTreeIdx, 1892 ArrayRef<int> ReuseShuffleIndices = None, 1893 ArrayRef<unsigned> ReorderIndices = None) { 1894 TreeEntry::EntryState EntryState = 1895 Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather; 1896 return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx, 1897 ReuseShuffleIndices, ReorderIndices); 1898 } 1899 1900 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, 1901 TreeEntry::EntryState EntryState, 1902 Optional<ScheduleData *> Bundle, 1903 const InstructionsState &S, 1904 const EdgeInfo &UserTreeIdx, 1905 ArrayRef<int> ReuseShuffleIndices = None, 1906 ArrayRef<unsigned> ReorderIndices = None) { 1907 assert(((!Bundle && EntryState == TreeEntry::NeedToGather) || 1908 (Bundle && EntryState != TreeEntry::NeedToGather)) && 1909 "Need to vectorize gather entry?"); 1910 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree)); 1911 TreeEntry *Last = VectorizableTree.back().get(); 1912 Last->Idx = VectorizableTree.size() - 1; 1913 Last->State = EntryState; 1914 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(), 1915 ReuseShuffleIndices.end()); 1916 if (ReorderIndices.empty()) { 1917 Last->Scalars.assign(VL.begin(), VL.end()); 1918 Last->setOperations(S); 1919 } else { 1920 // Reorder scalars and build final mask. 1921 Last->Scalars.assign(VL.size(), nullptr); 1922 transform(ReorderIndices, Last->Scalars.begin(), 1923 [VL](unsigned Idx) -> Value * { 1924 if (Idx >= VL.size()) 1925 return UndefValue::get(VL.front()->getType()); 1926 return VL[Idx]; 1927 }); 1928 InstructionsState S = getSameOpcode(Last->Scalars); 1929 Last->setOperations(S); 1930 Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end()); 1931 } 1932 if (Last->State != TreeEntry::NeedToGather) { 1933 for (Value *V : VL) { 1934 assert(!getTreeEntry(V) && "Scalar already in tree!"); 1935 ScalarToTreeEntry[V] = Last; 1936 } 1937 // Update the scheduler bundle to point to this TreeEntry. 1938 unsigned Lane = 0; 1939 for (ScheduleData *BundleMember = Bundle.getValue(); BundleMember; 1940 BundleMember = BundleMember->NextInBundle) { 1941 BundleMember->TE = Last; 1942 BundleMember->Lane = Lane; 1943 ++Lane; 1944 } 1945 assert((!Bundle.getValue() || Lane == VL.size()) && 1946 "Bundle and VL out of sync"); 1947 } else { 1948 MustGather.insert(VL.begin(), VL.end()); 1949 } 1950 1951 if (UserTreeIdx.UserTE) 1952 Last->UserTreeIndices.push_back(UserTreeIdx); 1953 1954 return Last; 1955 } 1956 1957 /// -- Vectorization State -- 1958 /// Holds all of the tree entries. 1959 TreeEntry::VecTreeTy VectorizableTree; 1960 1961 #ifndef NDEBUG 1962 /// Debug printer. 1963 LLVM_DUMP_METHOD void dumpVectorizableTree() const { 1964 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) { 1965 VectorizableTree[Id]->dump(); 1966 dbgs() << "\n"; 1967 } 1968 } 1969 #endif 1970 1971 TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); } 1972 1973 const TreeEntry *getTreeEntry(Value *V) const { 1974 return ScalarToTreeEntry.lookup(V); 1975 } 1976 1977 /// Maps a specific scalar to its tree entry. 1978 SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry; 1979 1980 /// Maps a value to the proposed vectorizable size. 1981 SmallDenseMap<Value *, unsigned> InstrElementSize; 1982 1983 /// A list of scalars that we found that we need to keep as scalars. 1984 ValueSet MustGather; 1985 1986 /// This POD struct describes one external user in the vectorized tree. 1987 struct ExternalUser { 1988 ExternalUser(Value *S, llvm::User *U, int L) 1989 : Scalar(S), User(U), Lane(L) {} 1990 1991 // Which scalar in our function. 1992 Value *Scalar; 1993 1994 // Which user that uses the scalar. 1995 llvm::User *User; 1996 1997 // Which lane does the scalar belong to. 1998 int Lane; 1999 }; 2000 using UserList = SmallVector<ExternalUser, 16>; 2001 2002 /// Checks if two instructions may access the same memory. 2003 /// 2004 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 2005 /// is invariant in the calling loop. 2006 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 2007 Instruction *Inst2) { 2008 // First check if the result is already in the cache. 2009 AliasCacheKey key = std::make_pair(Inst1, Inst2); 2010 Optional<bool> &result = AliasCache[key]; 2011 if (result.hasValue()) { 2012 return result.getValue(); 2013 } 2014 bool aliased = true; 2015 if (Loc1.Ptr && isSimple(Inst1)) 2016 aliased = isModOrRefSet(AA->getModRefInfo(Inst2, Loc1)); 2017 // Store the result in the cache. 2018 result = aliased; 2019 return aliased; 2020 } 2021 2022 using AliasCacheKey = std::pair<Instruction *, Instruction *>; 2023 2024 /// Cache for alias results. 2025 /// TODO: consider moving this to the AliasAnalysis itself. 2026 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 2027 2028 /// Removes an instruction from its block and eventually deletes it. 2029 /// It's like Instruction::eraseFromParent() except that the actual deletion 2030 /// is delayed until BoUpSLP is destructed. 2031 /// This is required to ensure that there are no incorrect collisions in the 2032 /// AliasCache, which can happen if a new instruction is allocated at the 2033 /// same address as a previously deleted instruction. 2034 void eraseInstruction(Instruction *I, bool ReplaceOpsWithUndef = false) { 2035 auto It = DeletedInstructions.try_emplace(I, ReplaceOpsWithUndef).first; 2036 It->getSecond() = It->getSecond() && ReplaceOpsWithUndef; 2037 } 2038 2039 /// Temporary store for deleted instructions. Instructions will be deleted 2040 /// eventually when the BoUpSLP is destructed. 2041 DenseMap<Instruction *, bool> DeletedInstructions; 2042 2043 /// A list of values that need to extracted out of the tree. 2044 /// This list holds pairs of (Internal Scalar : External User). External User 2045 /// can be nullptr, it means that this Internal Scalar will be used later, 2046 /// after vectorization. 2047 UserList ExternalUses; 2048 2049 /// Values used only by @llvm.assume calls. 2050 SmallPtrSet<const Value *, 32> EphValues; 2051 2052 /// Holds all of the instructions that we gathered. 2053 SetVector<Instruction *> GatherSeq; 2054 2055 /// A list of blocks that we are going to CSE. 2056 SetVector<BasicBlock *> CSEBlocks; 2057 2058 /// Contains all scheduling relevant data for an instruction. 2059 /// A ScheduleData either represents a single instruction or a member of an 2060 /// instruction bundle (= a group of instructions which is combined into a 2061 /// vector instruction). 2062 struct ScheduleData { 2063 // The initial value for the dependency counters. It means that the 2064 // dependencies are not calculated yet. 2065 enum { InvalidDeps = -1 }; 2066 2067 ScheduleData() = default; 2068 2069 void init(int BlockSchedulingRegionID, Value *OpVal) { 2070 FirstInBundle = this; 2071 NextInBundle = nullptr; 2072 NextLoadStore = nullptr; 2073 IsScheduled = false; 2074 SchedulingRegionID = BlockSchedulingRegionID; 2075 UnscheduledDepsInBundle = UnscheduledDeps; 2076 clearDependencies(); 2077 OpValue = OpVal; 2078 TE = nullptr; 2079 Lane = -1; 2080 } 2081 2082 /// Returns true if the dependency information has been calculated. 2083 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 2084 2085 /// Returns true for single instructions and for bundle representatives 2086 /// (= the head of a bundle). 2087 bool isSchedulingEntity() const { return FirstInBundle == this; } 2088 2089 /// Returns true if it represents an instruction bundle and not only a 2090 /// single instruction. 2091 bool isPartOfBundle() const { 2092 return NextInBundle != nullptr || FirstInBundle != this; 2093 } 2094 2095 /// Returns true if it is ready for scheduling, i.e. it has no more 2096 /// unscheduled depending instructions/bundles. 2097 bool isReady() const { 2098 assert(isSchedulingEntity() && 2099 "can't consider non-scheduling entity for ready list"); 2100 return UnscheduledDepsInBundle == 0 && !IsScheduled; 2101 } 2102 2103 /// Modifies the number of unscheduled dependencies, also updating it for 2104 /// the whole bundle. 2105 int incrementUnscheduledDeps(int Incr) { 2106 UnscheduledDeps += Incr; 2107 return FirstInBundle->UnscheduledDepsInBundle += Incr; 2108 } 2109 2110 /// Sets the number of unscheduled dependencies to the number of 2111 /// dependencies. 2112 void resetUnscheduledDeps() { 2113 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 2114 } 2115 2116 /// Clears all dependency information. 2117 void clearDependencies() { 2118 Dependencies = InvalidDeps; 2119 resetUnscheduledDeps(); 2120 MemoryDependencies.clear(); 2121 } 2122 2123 void dump(raw_ostream &os) const { 2124 if (!isSchedulingEntity()) { 2125 os << "/ " << *Inst; 2126 } else if (NextInBundle) { 2127 os << '[' << *Inst; 2128 ScheduleData *SD = NextInBundle; 2129 while (SD) { 2130 os << ';' << *SD->Inst; 2131 SD = SD->NextInBundle; 2132 } 2133 os << ']'; 2134 } else { 2135 os << *Inst; 2136 } 2137 } 2138 2139 Instruction *Inst = nullptr; 2140 2141 /// Points to the head in an instruction bundle (and always to this for 2142 /// single instructions). 2143 ScheduleData *FirstInBundle = nullptr; 2144 2145 /// Single linked list of all instructions in a bundle. Null if it is a 2146 /// single instruction. 2147 ScheduleData *NextInBundle = nullptr; 2148 2149 /// Single linked list of all memory instructions (e.g. load, store, call) 2150 /// in the block - until the end of the scheduling region. 2151 ScheduleData *NextLoadStore = nullptr; 2152 2153 /// The dependent memory instructions. 2154 /// This list is derived on demand in calculateDependencies(). 2155 SmallVector<ScheduleData *, 4> MemoryDependencies; 2156 2157 /// This ScheduleData is in the current scheduling region if this matches 2158 /// the current SchedulingRegionID of BlockScheduling. 2159 int SchedulingRegionID = 0; 2160 2161 /// Used for getting a "good" final ordering of instructions. 2162 int SchedulingPriority = 0; 2163 2164 /// The number of dependencies. Constitutes of the number of users of the 2165 /// instruction plus the number of dependent memory instructions (if any). 2166 /// This value is calculated on demand. 2167 /// If InvalidDeps, the number of dependencies is not calculated yet. 2168 int Dependencies = InvalidDeps; 2169 2170 /// The number of dependencies minus the number of dependencies of scheduled 2171 /// instructions. As soon as this is zero, the instruction/bundle gets ready 2172 /// for scheduling. 2173 /// Note that this is negative as long as Dependencies is not calculated. 2174 int UnscheduledDeps = InvalidDeps; 2175 2176 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 2177 /// single instructions. 2178 int UnscheduledDepsInBundle = InvalidDeps; 2179 2180 /// True if this instruction is scheduled (or considered as scheduled in the 2181 /// dry-run). 2182 bool IsScheduled = false; 2183 2184 /// Opcode of the current instruction in the schedule data. 2185 Value *OpValue = nullptr; 2186 2187 /// The TreeEntry that this instruction corresponds to. 2188 TreeEntry *TE = nullptr; 2189 2190 /// The lane of this node in the TreeEntry. 2191 int Lane = -1; 2192 }; 2193 2194 #ifndef NDEBUG 2195 friend inline raw_ostream &operator<<(raw_ostream &os, 2196 const BoUpSLP::ScheduleData &SD) { 2197 SD.dump(os); 2198 return os; 2199 } 2200 #endif 2201 2202 friend struct GraphTraits<BoUpSLP *>; 2203 friend struct DOTGraphTraits<BoUpSLP *>; 2204 2205 /// Contains all scheduling data for a basic block. 2206 struct BlockScheduling { 2207 BlockScheduling(BasicBlock *BB) 2208 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} 2209 2210 void clear() { 2211 ReadyInsts.clear(); 2212 ScheduleStart = nullptr; 2213 ScheduleEnd = nullptr; 2214 FirstLoadStoreInRegion = nullptr; 2215 LastLoadStoreInRegion = nullptr; 2216 2217 // Reduce the maximum schedule region size by the size of the 2218 // previous scheduling run. 2219 ScheduleRegionSizeLimit -= ScheduleRegionSize; 2220 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 2221 ScheduleRegionSizeLimit = MinScheduleRegionSize; 2222 ScheduleRegionSize = 0; 2223 2224 // Make a new scheduling region, i.e. all existing ScheduleData is not 2225 // in the new region yet. 2226 ++SchedulingRegionID; 2227 } 2228 2229 ScheduleData *getScheduleData(Value *V) { 2230 ScheduleData *SD = ScheduleDataMap[V]; 2231 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 2232 return SD; 2233 return nullptr; 2234 } 2235 2236 ScheduleData *getScheduleData(Value *V, Value *Key) { 2237 if (V == Key) 2238 return getScheduleData(V); 2239 auto I = ExtraScheduleDataMap.find(V); 2240 if (I != ExtraScheduleDataMap.end()) { 2241 ScheduleData *SD = I->second[Key]; 2242 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 2243 return SD; 2244 } 2245 return nullptr; 2246 } 2247 2248 bool isInSchedulingRegion(ScheduleData *SD) const { 2249 return SD->SchedulingRegionID == SchedulingRegionID; 2250 } 2251 2252 /// Marks an instruction as scheduled and puts all dependent ready 2253 /// instructions into the ready-list. 2254 template <typename ReadyListType> 2255 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 2256 SD->IsScheduled = true; 2257 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 2258 2259 ScheduleData *BundleMember = SD; 2260 while (BundleMember) { 2261 if (BundleMember->Inst != BundleMember->OpValue) { 2262 BundleMember = BundleMember->NextInBundle; 2263 continue; 2264 } 2265 // Handle the def-use chain dependencies. 2266 2267 // Decrement the unscheduled counter and insert to ready list if ready. 2268 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) { 2269 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 2270 if (OpDef && OpDef->hasValidDependencies() && 2271 OpDef->incrementUnscheduledDeps(-1) == 0) { 2272 // There are no more unscheduled dependencies after 2273 // decrementing, so we can put the dependent instruction 2274 // into the ready list. 2275 ScheduleData *DepBundle = OpDef->FirstInBundle; 2276 assert(!DepBundle->IsScheduled && 2277 "already scheduled bundle gets ready"); 2278 ReadyList.insert(DepBundle); 2279 LLVM_DEBUG(dbgs() 2280 << "SLP: gets ready (def): " << *DepBundle << "\n"); 2281 } 2282 }); 2283 }; 2284 2285 // If BundleMember is a vector bundle, its operands may have been 2286 // reordered duiring buildTree(). We therefore need to get its operands 2287 // through the TreeEntry. 2288 if (TreeEntry *TE = BundleMember->TE) { 2289 int Lane = BundleMember->Lane; 2290 assert(Lane >= 0 && "Lane not set"); 2291 2292 // Since vectorization tree is being built recursively this assertion 2293 // ensures that the tree entry has all operands set before reaching 2294 // this code. Couple of exceptions known at the moment are extracts 2295 // where their second (immediate) operand is not added. Since 2296 // immediates do not affect scheduler behavior this is considered 2297 // okay. 2298 auto *In = TE->getMainOp(); 2299 assert(In && 2300 (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) || 2301 In->getNumOperands() == TE->getNumOperands()) && 2302 "Missed TreeEntry operands?"); 2303 (void)In; // fake use to avoid build failure when assertions disabled 2304 2305 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands(); 2306 OpIdx != NumOperands; ++OpIdx) 2307 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane])) 2308 DecrUnsched(I); 2309 } else { 2310 // If BundleMember is a stand-alone instruction, no operand reordering 2311 // has taken place, so we directly access its operands. 2312 for (Use &U : BundleMember->Inst->operands()) 2313 if (auto *I = dyn_cast<Instruction>(U.get())) 2314 DecrUnsched(I); 2315 } 2316 // Handle the memory dependencies. 2317 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 2318 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 2319 // There are no more unscheduled dependencies after decrementing, 2320 // so we can put the dependent instruction into the ready list. 2321 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 2322 assert(!DepBundle->IsScheduled && 2323 "already scheduled bundle gets ready"); 2324 ReadyList.insert(DepBundle); 2325 LLVM_DEBUG(dbgs() 2326 << "SLP: gets ready (mem): " << *DepBundle << "\n"); 2327 } 2328 } 2329 BundleMember = BundleMember->NextInBundle; 2330 } 2331 } 2332 2333 void doForAllOpcodes(Value *V, 2334 function_ref<void(ScheduleData *SD)> Action) { 2335 if (ScheduleData *SD = getScheduleData(V)) 2336 Action(SD); 2337 auto I = ExtraScheduleDataMap.find(V); 2338 if (I != ExtraScheduleDataMap.end()) 2339 for (auto &P : I->second) 2340 if (P.second->SchedulingRegionID == SchedulingRegionID) 2341 Action(P.second); 2342 } 2343 2344 /// Put all instructions into the ReadyList which are ready for scheduling. 2345 template <typename ReadyListType> 2346 void initialFillReadyList(ReadyListType &ReadyList) { 2347 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 2348 doForAllOpcodes(I, [&](ScheduleData *SD) { 2349 if (SD->isSchedulingEntity() && SD->isReady()) { 2350 ReadyList.insert(SD); 2351 LLVM_DEBUG(dbgs() 2352 << "SLP: initially in ready list: " << *I << "\n"); 2353 } 2354 }); 2355 } 2356 } 2357 2358 /// Checks if a bundle of instructions can be scheduled, i.e. has no 2359 /// cyclic dependencies. This is only a dry-run, no instructions are 2360 /// actually moved at this stage. 2361 /// \returns the scheduling bundle. The returned Optional value is non-None 2362 /// if \p VL is allowed to be scheduled. 2363 Optional<ScheduleData *> 2364 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 2365 const InstructionsState &S); 2366 2367 /// Un-bundles a group of instructions. 2368 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 2369 2370 /// Allocates schedule data chunk. 2371 ScheduleData *allocateScheduleDataChunks(); 2372 2373 /// Extends the scheduling region so that V is inside the region. 2374 /// \returns true if the region size is within the limit. 2375 bool extendSchedulingRegion(Value *V, const InstructionsState &S); 2376 2377 /// Initialize the ScheduleData structures for new instructions in the 2378 /// scheduling region. 2379 void initScheduleData(Instruction *FromI, Instruction *ToI, 2380 ScheduleData *PrevLoadStore, 2381 ScheduleData *NextLoadStore); 2382 2383 /// Updates the dependency information of a bundle and of all instructions/ 2384 /// bundles which depend on the original bundle. 2385 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 2386 BoUpSLP *SLP); 2387 2388 /// Sets all instruction in the scheduling region to un-scheduled. 2389 void resetSchedule(); 2390 2391 BasicBlock *BB; 2392 2393 /// Simple memory allocation for ScheduleData. 2394 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 2395 2396 /// The size of a ScheduleData array in ScheduleDataChunks. 2397 int ChunkSize; 2398 2399 /// The allocator position in the current chunk, which is the last entry 2400 /// of ScheduleDataChunks. 2401 int ChunkPos; 2402 2403 /// Attaches ScheduleData to Instruction. 2404 /// Note that the mapping survives during all vectorization iterations, i.e. 2405 /// ScheduleData structures are recycled. 2406 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 2407 2408 /// Attaches ScheduleData to Instruction with the leading key. 2409 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 2410 ExtraScheduleDataMap; 2411 2412 struct ReadyList : SmallVector<ScheduleData *, 8> { 2413 void insert(ScheduleData *SD) { push_back(SD); } 2414 }; 2415 2416 /// The ready-list for scheduling (only used for the dry-run). 2417 ReadyList ReadyInsts; 2418 2419 /// The first instruction of the scheduling region. 2420 Instruction *ScheduleStart = nullptr; 2421 2422 /// The first instruction _after_ the scheduling region. 2423 Instruction *ScheduleEnd = nullptr; 2424 2425 /// The first memory accessing instruction in the scheduling region 2426 /// (can be null). 2427 ScheduleData *FirstLoadStoreInRegion = nullptr; 2428 2429 /// The last memory accessing instruction in the scheduling region 2430 /// (can be null). 2431 ScheduleData *LastLoadStoreInRegion = nullptr; 2432 2433 /// The current size of the scheduling region. 2434 int ScheduleRegionSize = 0; 2435 2436 /// The maximum size allowed for the scheduling region. 2437 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; 2438 2439 /// The ID of the scheduling region. For a new vectorization iteration this 2440 /// is incremented which "removes" all ScheduleData from the region. 2441 // Make sure that the initial SchedulingRegionID is greater than the 2442 // initial SchedulingRegionID in ScheduleData (which is 0). 2443 int SchedulingRegionID = 1; 2444 }; 2445 2446 /// Attaches the BlockScheduling structures to basic blocks. 2447 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 2448 2449 /// Performs the "real" scheduling. Done before vectorization is actually 2450 /// performed in a basic block. 2451 void scheduleBlock(BlockScheduling *BS); 2452 2453 /// List of users to ignore during scheduling and that don't need extracting. 2454 ArrayRef<Value *> UserIgnoreList; 2455 2456 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of 2457 /// sorted SmallVectors of unsigned. 2458 struct OrdersTypeDenseMapInfo { 2459 static OrdersType getEmptyKey() { 2460 OrdersType V; 2461 V.push_back(~1U); 2462 return V; 2463 } 2464 2465 static OrdersType getTombstoneKey() { 2466 OrdersType V; 2467 V.push_back(~2U); 2468 return V; 2469 } 2470 2471 static unsigned getHashValue(const OrdersType &V) { 2472 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); 2473 } 2474 2475 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) { 2476 return LHS == RHS; 2477 } 2478 }; 2479 2480 // Analysis and block reference. 2481 Function *F; 2482 ScalarEvolution *SE; 2483 TargetTransformInfo *TTI; 2484 TargetLibraryInfo *TLI; 2485 AAResults *AA; 2486 LoopInfo *LI; 2487 DominatorTree *DT; 2488 AssumptionCache *AC; 2489 DemandedBits *DB; 2490 const DataLayout *DL; 2491 OptimizationRemarkEmitter *ORE; 2492 2493 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 2494 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 2495 2496 /// Instruction builder to construct the vectorized tree. 2497 IRBuilder<> Builder; 2498 2499 /// A map of scalar integer values to the smallest bit width with which they 2500 /// can legally be represented. The values map to (width, signed) pairs, 2501 /// where "width" indicates the minimum bit width and "signed" is True if the 2502 /// value must be signed-extended, rather than zero-extended, back to its 2503 /// original width. 2504 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs; 2505 }; 2506 2507 } // end namespace slpvectorizer 2508 2509 template <> struct GraphTraits<BoUpSLP *> { 2510 using TreeEntry = BoUpSLP::TreeEntry; 2511 2512 /// NodeRef has to be a pointer per the GraphWriter. 2513 using NodeRef = TreeEntry *; 2514 2515 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy; 2516 2517 /// Add the VectorizableTree to the index iterator to be able to return 2518 /// TreeEntry pointers. 2519 struct ChildIteratorType 2520 : public iterator_adaptor_base< 2521 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> { 2522 ContainerTy &VectorizableTree; 2523 2524 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W, 2525 ContainerTy &VT) 2526 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 2527 2528 NodeRef operator*() { return I->UserTE; } 2529 }; 2530 2531 static NodeRef getEntryNode(BoUpSLP &R) { 2532 return R.VectorizableTree[0].get(); 2533 } 2534 2535 static ChildIteratorType child_begin(NodeRef N) { 2536 return {N->UserTreeIndices.begin(), N->Container}; 2537 } 2538 2539 static ChildIteratorType child_end(NodeRef N) { 2540 return {N->UserTreeIndices.end(), N->Container}; 2541 } 2542 2543 /// For the node iterator we just need to turn the TreeEntry iterator into a 2544 /// TreeEntry* iterator so that it dereferences to NodeRef. 2545 class nodes_iterator { 2546 using ItTy = ContainerTy::iterator; 2547 ItTy It; 2548 2549 public: 2550 nodes_iterator(const ItTy &It2) : It(It2) {} 2551 NodeRef operator*() { return It->get(); } 2552 nodes_iterator operator++() { 2553 ++It; 2554 return *this; 2555 } 2556 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; } 2557 }; 2558 2559 static nodes_iterator nodes_begin(BoUpSLP *R) { 2560 return nodes_iterator(R->VectorizableTree.begin()); 2561 } 2562 2563 static nodes_iterator nodes_end(BoUpSLP *R) { 2564 return nodes_iterator(R->VectorizableTree.end()); 2565 } 2566 2567 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 2568 }; 2569 2570 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 2571 using TreeEntry = BoUpSLP::TreeEntry; 2572 2573 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 2574 2575 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 2576 std::string Str; 2577 raw_string_ostream OS(Str); 2578 if (isSplat(Entry->Scalars)) { 2579 OS << "<splat> " << *Entry->Scalars[0]; 2580 return Str; 2581 } 2582 for (auto V : Entry->Scalars) { 2583 OS << *V; 2584 if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) { 2585 return EU.Scalar == V; 2586 })) 2587 OS << " <extract>"; 2588 OS << "\n"; 2589 } 2590 return Str; 2591 } 2592 2593 static std::string getNodeAttributes(const TreeEntry *Entry, 2594 const BoUpSLP *) { 2595 if (Entry->State == TreeEntry::NeedToGather) 2596 return "color=red"; 2597 return ""; 2598 } 2599 }; 2600 2601 } // end namespace llvm 2602 2603 BoUpSLP::~BoUpSLP() { 2604 for (const auto &Pair : DeletedInstructions) { 2605 // Replace operands of ignored instructions with Undefs in case if they were 2606 // marked for deletion. 2607 if (Pair.getSecond()) { 2608 Value *Undef = UndefValue::get(Pair.getFirst()->getType()); 2609 Pair.getFirst()->replaceAllUsesWith(Undef); 2610 } 2611 Pair.getFirst()->dropAllReferences(); 2612 } 2613 for (const auto &Pair : DeletedInstructions) { 2614 assert(Pair.getFirst()->use_empty() && 2615 "trying to erase instruction with users."); 2616 Pair.getFirst()->eraseFromParent(); 2617 } 2618 #ifdef EXPENSIVE_CHECKS 2619 // If we could guarantee that this call is not extremely slow, we could 2620 // remove the ifdef limitation (see PR47712). 2621 assert(!verifyFunction(*F, &dbgs())); 2622 #endif 2623 } 2624 2625 void BoUpSLP::eraseInstructions(ArrayRef<Value *> AV) { 2626 for (auto *V : AV) { 2627 if (auto *I = dyn_cast<Instruction>(V)) 2628 eraseInstruction(I, /*ReplaceOpsWithUndef=*/true); 2629 }; 2630 } 2631 2632 /// Reorders the given \p Reuses mask according to the given \p Mask. \p Reuses 2633 /// contains original mask for the scalars reused in the node. Procedure 2634 /// transform this mask in accordance with the given \p Mask. 2635 static void reorderReuses(SmallVectorImpl<int> &Reuses, ArrayRef<int> Mask) { 2636 assert(!Mask.empty() && Reuses.size() == Mask.size() && 2637 "Expected non-empty mask."); 2638 SmallVector<int> Prev(Reuses.begin(), Reuses.end()); 2639 Prev.swap(Reuses); 2640 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 2641 if (Mask[I] != UndefMaskElem) 2642 Reuses[Mask[I]] = Prev[I]; 2643 } 2644 2645 /// Reorders the given \p Order according to the given \p Mask. \p Order - is 2646 /// the original order of the scalars. Procedure transforms the provided order 2647 /// in accordance with the given \p Mask. If the resulting \p Order is just an 2648 /// identity order, \p Order is cleared. 2649 static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask) { 2650 assert(!Mask.empty() && "Expected non-empty mask."); 2651 SmallVector<int> MaskOrder; 2652 if (Order.empty()) { 2653 MaskOrder.resize(Mask.size()); 2654 std::iota(MaskOrder.begin(), MaskOrder.end(), 0); 2655 } else { 2656 inversePermutation(Order, MaskOrder); 2657 } 2658 reorderReuses(MaskOrder, Mask); 2659 if (ShuffleVectorInst::isIdentityMask(MaskOrder)) { 2660 Order.clear(); 2661 return; 2662 } 2663 Order.assign(Mask.size(), Mask.size()); 2664 for (unsigned I = 0, E = Mask.size(); I < E; ++I) 2665 if (MaskOrder[I] != UndefMaskElem) 2666 Order[MaskOrder[I]] = I; 2667 fixupOrderingIndices(Order); 2668 } 2669 2670 void BoUpSLP::reorderTopToBottom() { 2671 // Maps VF to the graph nodes. 2672 DenseMap<unsigned, SmallPtrSet<TreeEntry *, 4>> VFToOrderedEntries; 2673 // ExtractElement gather nodes which can be vectorized and need to handle 2674 // their ordering. 2675 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 2676 // Find all reorderable nodes with the given VF. 2677 // Currently the are vectorized loads,extracts + some gathering of extracts. 2678 for_each(VectorizableTree, [this, &VFToOrderedEntries, &GathersToOrders]( 2679 const std::unique_ptr<TreeEntry> &TE) { 2680 // No need to reorder if need to shuffle reuses, still need to shuffle the 2681 // node. 2682 if (!TE->ReuseShuffleIndices.empty()) 2683 return; 2684 if (TE->State == TreeEntry::Vectorize && 2685 isa<LoadInst, ExtractElementInst, ExtractValueInst, StoreInst, 2686 InsertElementInst>(TE->getMainOp()) && 2687 !TE->isAltShuffle()) { 2688 VFToOrderedEntries[TE->Scalars.size()].insert(TE.get()); 2689 } else if (TE->State == TreeEntry::NeedToGather && 2690 TE->getOpcode() == Instruction::ExtractElement && 2691 !TE->isAltShuffle() && 2692 isa<FixedVectorType>(cast<ExtractElementInst>(TE->getMainOp()) 2693 ->getVectorOperandType()) && 2694 allSameType(TE->Scalars) && allSameBlock(TE->Scalars)) { 2695 // Check that gather of extractelements can be represented as 2696 // just a shuffle of a single vector. 2697 OrdersType CurrentOrder; 2698 bool Reuse = canReuseExtract(TE->Scalars, TE->getMainOp(), CurrentOrder); 2699 if (Reuse || !CurrentOrder.empty()) { 2700 VFToOrderedEntries[TE->Scalars.size()].insert(TE.get()); 2701 GathersToOrders.try_emplace(TE.get(), CurrentOrder); 2702 } 2703 } 2704 }); 2705 2706 // Reorder the graph nodes according to their vectorization factor. 2707 for (unsigned VF = VectorizableTree.front()->Scalars.size(); VF > 1; 2708 VF /= 2) { 2709 auto It = VFToOrderedEntries.find(VF); 2710 if (It == VFToOrderedEntries.end()) 2711 continue; 2712 // Try to find the most profitable order. We just are looking for the most 2713 // used order and reorder scalar elements in the nodes according to this 2714 // mostly used order. 2715 const SmallPtrSetImpl<TreeEntry *> &OrderedEntries = It->getSecond(); 2716 // All operands are reordered and used only in this node - propagate the 2717 // most used order to the user node. 2718 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo> OrdersUses; 2719 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 2720 for (const TreeEntry *OpTE : OrderedEntries) { 2721 // No need to reorder this nodes, still need to extend and to use shuffle, 2722 // just need to merge reordering shuffle and the reuse shuffle. 2723 if (!OpTE->ReuseShuffleIndices.empty()) 2724 continue; 2725 // Count number of orders uses. 2726 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { 2727 if (OpTE->State == TreeEntry::NeedToGather) 2728 return GathersToOrders.find(OpTE)->second; 2729 return OpTE->ReorderIndices; 2730 }(); 2731 // Stores actually store the mask, not the order, need to invert. 2732 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 2733 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 2734 SmallVector<int> Mask; 2735 inversePermutation(Order, Mask); 2736 unsigned E = Order.size(); 2737 OrdersType CurrentOrder(E, E); 2738 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 2739 return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx); 2740 }); 2741 fixupOrderingIndices(CurrentOrder); 2742 ++OrdersUses.try_emplace(CurrentOrder).first->getSecond(); 2743 } else { 2744 ++OrdersUses.try_emplace(Order).first->getSecond(); 2745 } 2746 } 2747 // Set order of the user node. 2748 if (OrdersUses.empty()) 2749 continue; 2750 // Choose the most used order. 2751 ArrayRef<unsigned> BestOrder = OrdersUses.begin()->first; 2752 unsigned Cnt = OrdersUses.begin()->second; 2753 for (const auto &Pair : llvm::drop_begin(OrdersUses)) { 2754 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 2755 BestOrder = Pair.first; 2756 Cnt = Pair.second; 2757 } 2758 } 2759 // Set order of the user node. 2760 if (BestOrder.empty()) 2761 continue; 2762 SmallVector<int> Mask; 2763 inversePermutation(BestOrder, Mask); 2764 SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem); 2765 unsigned E = BestOrder.size(); 2766 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 2767 return I < E ? static_cast<int>(I) : UndefMaskElem; 2768 }); 2769 // Do an actual reordering, if profitable. 2770 for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 2771 // Just do the reordering for the nodes with the given VF. 2772 if (TE->Scalars.size() != VF) { 2773 if (TE->ReuseShuffleIndices.size() == VF) { 2774 // Need to reorder the reuses masks of the operands with smaller VF to 2775 // be able to find the match between the graph nodes and scalar 2776 // operands of the given node during vectorization/cost estimation. 2777 assert(all_of(TE->UserTreeIndices, 2778 [VF, &TE](const EdgeInfo &EI) { 2779 return EI.UserTE->Scalars.size() == VF || 2780 EI.UserTE->Scalars.size() == 2781 TE->Scalars.size(); 2782 }) && 2783 "All users must be of VF size."); 2784 // Update ordering of the operands with the smaller VF than the given 2785 // one. 2786 reorderReuses(TE->ReuseShuffleIndices, Mask); 2787 } 2788 continue; 2789 } 2790 if (TE->State == TreeEntry::Vectorize && 2791 isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst, 2792 InsertElementInst>(TE->getMainOp()) && 2793 !TE->isAltShuffle()) { 2794 // Build correct orders for extract{element,value}, loads and 2795 // stores. 2796 reorderOrder(TE->ReorderIndices, Mask); 2797 if (isa<InsertElementInst, StoreInst>(TE->getMainOp())) 2798 TE->reorderOperands(Mask); 2799 } else { 2800 // Reorder the node and its operands. 2801 TE->reorderOperands(Mask); 2802 assert(TE->ReorderIndices.empty() && 2803 "Expected empty reorder sequence."); 2804 reorderScalars(TE->Scalars, Mask); 2805 } 2806 if (!TE->ReuseShuffleIndices.empty()) { 2807 // Apply reversed order to keep the original ordering of the reused 2808 // elements to avoid extra reorder indices shuffling. 2809 OrdersType CurrentOrder; 2810 reorderOrder(CurrentOrder, MaskOrder); 2811 SmallVector<int> NewReuses; 2812 inversePermutation(CurrentOrder, NewReuses); 2813 addMask(NewReuses, TE->ReuseShuffleIndices); 2814 TE->ReuseShuffleIndices.swap(NewReuses); 2815 } 2816 } 2817 } 2818 } 2819 2820 void BoUpSLP::reorderBottomToTop() { 2821 SetVector<TreeEntry *> OrderedEntries; 2822 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 2823 // Find all reorderable leaf nodes with the given VF. 2824 // Currently the are vectorized loads,extracts without alternate operands + 2825 // some gathering of extracts. 2826 SmallVector<TreeEntry *> NonVectorized; 2827 for_each(VectorizableTree, [this, &OrderedEntries, &GathersToOrders, 2828 &NonVectorized]( 2829 const std::unique_ptr<TreeEntry> &TE) { 2830 // No need to reorder if need to shuffle reuses, still need to shuffle the 2831 // node. 2832 if (!TE->ReuseShuffleIndices.empty()) 2833 return; 2834 if (TE->State == TreeEntry::Vectorize && 2835 isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE->getMainOp()) && 2836 !TE->isAltShuffle()) { 2837 OrderedEntries.insert(TE.get()); 2838 } else if (TE->State == TreeEntry::NeedToGather && 2839 TE->getOpcode() == Instruction::ExtractElement && 2840 !TE->isAltShuffle() && 2841 isa<FixedVectorType>(cast<ExtractElementInst>(TE->getMainOp()) 2842 ->getVectorOperandType()) && 2843 allSameType(TE->Scalars) && allSameBlock(TE->Scalars)) { 2844 // Check that gather of extractelements can be represented as 2845 // just a shuffle of a single vector with a single user only. 2846 OrdersType CurrentOrder; 2847 bool Reuse = canReuseExtract(TE->Scalars, TE->getMainOp(), CurrentOrder); 2848 if ((Reuse || !CurrentOrder.empty()) && 2849 !any_of( 2850 VectorizableTree, [&TE](const std::unique_ptr<TreeEntry> &Entry) { 2851 return Entry->State == TreeEntry::NeedToGather && 2852 Entry.get() != TE.get() && Entry->isSame(TE->Scalars); 2853 })) { 2854 OrderedEntries.insert(TE.get()); 2855 GathersToOrders.try_emplace(TE.get(), CurrentOrder); 2856 } 2857 } 2858 if (TE->State != TreeEntry::Vectorize) 2859 NonVectorized.push_back(TE.get()); 2860 }); 2861 2862 // Checks if the operands of the users are reordarable and have only single 2863 // use. 2864 auto &&CheckOperands = 2865 [this, &NonVectorized](const auto &Data, 2866 SmallVectorImpl<TreeEntry *> &GatherOps) { 2867 for (unsigned I = 0, E = Data.first->getNumOperands(); I < E; ++I) { 2868 if (any_of(Data.second, 2869 [I](const std::pair<unsigned, TreeEntry *> &OpData) { 2870 return OpData.first == I && 2871 OpData.second->State == TreeEntry::Vectorize; 2872 })) 2873 continue; 2874 ArrayRef<Value *> VL = Data.first->getOperand(I); 2875 const TreeEntry *TE = nullptr; 2876 const auto *It = find_if(VL, [this, &TE](Value *V) { 2877 TE = getTreeEntry(V); 2878 return TE; 2879 }); 2880 if (It != VL.end() && TE->isSame(VL)) 2881 return false; 2882 TreeEntry *Gather = nullptr; 2883 if (count_if(NonVectorized, [VL, &Gather](TreeEntry *TE) { 2884 assert(TE->State != TreeEntry::Vectorize && 2885 "Only non-vectorized nodes are expected."); 2886 if (TE->isSame(VL)) { 2887 Gather = TE; 2888 return true; 2889 } 2890 return false; 2891 }) > 1) 2892 return false; 2893 if (Gather) 2894 GatherOps.push_back(Gather); 2895 } 2896 return true; 2897 }; 2898 // 1. Propagate order to the graph nodes, which use only reordered nodes. 2899 // I.e., if the node has operands, that are reordered, try to make at least 2900 // one operand order in the natural order and reorder others + reorder the 2901 // user node itself. 2902 SmallPtrSet<const TreeEntry *, 4> Visited; 2903 while (!OrderedEntries.empty()) { 2904 // 1. Filter out only reordered nodes. 2905 // 2. If the entry has multiple uses - skip it and jump to the next node. 2906 MapVector<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>> Users; 2907 SmallVector<TreeEntry *> Filtered; 2908 for (TreeEntry *TE : OrderedEntries) { 2909 if (!(TE->State == TreeEntry::Vectorize || 2910 (TE->State == TreeEntry::NeedToGather && 2911 TE->getOpcode() == Instruction::ExtractElement)) || 2912 TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() || 2913 !all_of(drop_begin(TE->UserTreeIndices), 2914 [TE](const EdgeInfo &EI) { 2915 return EI.UserTE == TE->UserTreeIndices.front().UserTE; 2916 }) || 2917 !Visited.insert(TE).second) { 2918 Filtered.push_back(TE); 2919 continue; 2920 } 2921 // Build a map between user nodes and their operands order to speedup 2922 // search. The graph currently does not provide this dependency directly. 2923 for (EdgeInfo &EI : TE->UserTreeIndices) { 2924 TreeEntry *UserTE = EI.UserTE; 2925 auto It = Users.find(UserTE); 2926 if (It == Users.end()) 2927 It = Users.insert({UserTE, {}}).first; 2928 It->second.emplace_back(EI.EdgeIdx, TE); 2929 } 2930 } 2931 // Erase filtered entries. 2932 for_each(Filtered, 2933 [&OrderedEntries](TreeEntry *TE) { OrderedEntries.remove(TE); }); 2934 for (const auto &Data : Users) { 2935 // Check that operands are used only in the User node. 2936 SmallVector<TreeEntry *> GatherOps; 2937 if (!CheckOperands(Data, GatherOps)) { 2938 for_each(Data.second, 2939 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { 2940 OrderedEntries.remove(Op.second); 2941 }); 2942 continue; 2943 } 2944 // All operands are reordered and used only in this node - propagate the 2945 // most used order to the user node. 2946 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo> OrdersUses; 2947 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 2948 for (const auto &Op : Data.second) { 2949 TreeEntry *OpTE = Op.second; 2950 if (!OpTE->ReuseShuffleIndices.empty()) 2951 continue; 2952 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { 2953 if (OpTE->State == TreeEntry::NeedToGather) 2954 return GathersToOrders.find(OpTE)->second; 2955 return OpTE->ReorderIndices; 2956 }(); 2957 // Stores actually store the mask, not the order, need to invert. 2958 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 2959 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 2960 SmallVector<int> Mask; 2961 inversePermutation(Order, Mask); 2962 unsigned E = Order.size(); 2963 OrdersType CurrentOrder(E, E); 2964 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 2965 return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx); 2966 }); 2967 fixupOrderingIndices(CurrentOrder); 2968 ++OrdersUses.try_emplace(CurrentOrder).first->getSecond(); 2969 } else { 2970 ++OrdersUses.try_emplace(Order).first->getSecond(); 2971 } 2972 if (VisitedOps.insert(OpTE).second) 2973 OrdersUses.try_emplace({}, 0).first->getSecond() += 2974 OpTE->UserTreeIndices.size(); 2975 --OrdersUses[{}]; 2976 } 2977 // If no orders - skip current nodes and jump to the next one, if any. 2978 if (OrdersUses.empty()) { 2979 for_each(Data.second, 2980 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { 2981 OrderedEntries.remove(Op.second); 2982 }); 2983 continue; 2984 } 2985 // Choose the best order. 2986 ArrayRef<unsigned> BestOrder = OrdersUses.begin()->first; 2987 unsigned Cnt = OrdersUses.begin()->second; 2988 for (const auto &Pair : llvm::drop_begin(OrdersUses)) { 2989 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 2990 BestOrder = Pair.first; 2991 Cnt = Pair.second; 2992 } 2993 } 2994 // Set order of the user node (reordering of operands and user nodes). 2995 if (BestOrder.empty()) { 2996 for_each(Data.second, 2997 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { 2998 OrderedEntries.remove(Op.second); 2999 }); 3000 continue; 3001 } 3002 // Erase operands from OrderedEntries list and adjust their orders. 3003 VisitedOps.clear(); 3004 SmallVector<int> Mask; 3005 inversePermutation(BestOrder, Mask); 3006 SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem); 3007 unsigned E = BestOrder.size(); 3008 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 3009 return I < E ? static_cast<int>(I) : UndefMaskElem; 3010 }); 3011 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) { 3012 TreeEntry *TE = Op.second; 3013 OrderedEntries.remove(TE); 3014 if (!VisitedOps.insert(TE).second) 3015 continue; 3016 if (!TE->ReuseShuffleIndices.empty() && TE->ReorderIndices.empty()) { 3017 // Just reorder reuses indices. 3018 reorderReuses(TE->ReuseShuffleIndices, Mask); 3019 continue; 3020 } 3021 // Gathers are processed separately. 3022 if (TE->State != TreeEntry::Vectorize) 3023 continue; 3024 assert((BestOrder.size() == TE->ReorderIndices.size() || 3025 TE->ReorderIndices.empty()) && 3026 "Non-matching sizes of user/operand entries."); 3027 reorderOrder(TE->ReorderIndices, Mask); 3028 } 3029 // For gathers just need to reorder its scalars. 3030 for (TreeEntry *Gather : GatherOps) { 3031 if (!Gather->ReuseShuffleIndices.empty()) 3032 continue; 3033 assert(Gather->ReorderIndices.empty() && 3034 "Unexpected reordering of gathers."); 3035 reorderScalars(Gather->Scalars, Mask); 3036 OrderedEntries.remove(Gather); 3037 } 3038 // Reorder operands of the user node and set the ordering for the user 3039 // node itself. 3040 if (Data.first->State != TreeEntry::Vectorize || 3041 !isa<ExtractElementInst, ExtractValueInst, LoadInst>( 3042 Data.first->getMainOp()) || 3043 Data.first->isAltShuffle()) 3044 Data.first->reorderOperands(Mask); 3045 if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) || 3046 Data.first->isAltShuffle()) { 3047 reorderScalars(Data.first->Scalars, Mask); 3048 reorderOrder(Data.first->ReorderIndices, MaskOrder); 3049 if (Data.first->ReuseShuffleIndices.empty() && 3050 !Data.first->ReorderIndices.empty() && 3051 !Data.first->isAltShuffle()) { 3052 // Insert user node to the list to try to sink reordering deeper in 3053 // the graph. 3054 OrderedEntries.insert(Data.first); 3055 } 3056 } else { 3057 reorderOrder(Data.first->ReorderIndices, Mask); 3058 } 3059 } 3060 } 3061 } 3062 3063 void BoUpSLP::buildExternalUses( 3064 const ExtraValueToDebugLocsMap &ExternallyUsedValues) { 3065 // Collect the values that we need to extract from the tree. 3066 for (auto &TEPtr : VectorizableTree) { 3067 TreeEntry *Entry = TEPtr.get(); 3068 3069 // No need to handle users of gathered values. 3070 if (Entry->State == TreeEntry::NeedToGather) 3071 continue; 3072 3073 // For each lane: 3074 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 3075 Value *Scalar = Entry->Scalars[Lane]; 3076 int FoundLane = Entry->findLaneForValue(Scalar); 3077 3078 // Check if the scalar is externally used as an extra arg. 3079 auto ExtI = ExternallyUsedValues.find(Scalar); 3080 if (ExtI != ExternallyUsedValues.end()) { 3081 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " 3082 << Lane << " from " << *Scalar << ".\n"); 3083 ExternalUses.emplace_back(Scalar, nullptr, FoundLane); 3084 } 3085 for (User *U : Scalar->users()) { 3086 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 3087 3088 Instruction *UserInst = dyn_cast<Instruction>(U); 3089 if (!UserInst) 3090 continue; 3091 3092 if (isDeleted(UserInst)) 3093 continue; 3094 3095 // Skip in-tree scalars that become vectors 3096 if (TreeEntry *UseEntry = getTreeEntry(U)) { 3097 Value *UseScalar = UseEntry->Scalars[0]; 3098 // Some in-tree scalars will remain as scalar in vectorized 3099 // instructions. If that is the case, the one in Lane 0 will 3100 // be used. 3101 if (UseScalar != U || 3102 UseEntry->State == TreeEntry::ScatterVectorize || 3103 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 3104 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 3105 << ".\n"); 3106 assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state"); 3107 continue; 3108 } 3109 } 3110 3111 // Ignore users in the user ignore list. 3112 if (is_contained(UserIgnoreList, UserInst)) 3113 continue; 3114 3115 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " 3116 << Lane << " from " << *Scalar << ".\n"); 3117 ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane)); 3118 } 3119 } 3120 } 3121 } 3122 3123 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 3124 ArrayRef<Value *> UserIgnoreLst) { 3125 deleteTree(); 3126 UserIgnoreList = UserIgnoreLst; 3127 if (!allSameType(Roots)) 3128 return; 3129 buildTree_rec(Roots, 0, EdgeInfo()); 3130 } 3131 3132 namespace { 3133 /// Tracks the state we can represent the loads in the given sequence. 3134 enum class LoadsState { Gather, Vectorize, ScatterVectorize }; 3135 } // anonymous namespace 3136 3137 /// Checks if the given array of loads can be represented as a vectorized, 3138 /// scatter or just simple gather. 3139 static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0, 3140 const TargetTransformInfo &TTI, 3141 const DataLayout &DL, ScalarEvolution &SE, 3142 SmallVectorImpl<unsigned> &Order, 3143 SmallVectorImpl<Value *> &PointerOps) { 3144 // Check that a vectorized load would load the same memory as a scalar 3145 // load. For example, we don't want to vectorize loads that are smaller 3146 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 3147 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 3148 // from such a struct, we read/write packed bits disagreeing with the 3149 // unvectorized version. 3150 Type *ScalarTy = VL0->getType(); 3151 3152 if (DL.getTypeSizeInBits(ScalarTy) != DL.getTypeAllocSizeInBits(ScalarTy)) 3153 return LoadsState::Gather; 3154 3155 // Make sure all loads in the bundle are simple - we can't vectorize 3156 // atomic or volatile loads. 3157 PointerOps.clear(); 3158 PointerOps.resize(VL.size()); 3159 auto *POIter = PointerOps.begin(); 3160 for (Value *V : VL) { 3161 auto *L = cast<LoadInst>(V); 3162 if (!L->isSimple()) 3163 return LoadsState::Gather; 3164 *POIter = L->getPointerOperand(); 3165 ++POIter; 3166 } 3167 3168 Order.clear(); 3169 // Check the order of pointer operands. 3170 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, DL, SE, Order)) { 3171 Value *Ptr0; 3172 Value *PtrN; 3173 if (Order.empty()) { 3174 Ptr0 = PointerOps.front(); 3175 PtrN = PointerOps.back(); 3176 } else { 3177 Ptr0 = PointerOps[Order.front()]; 3178 PtrN = PointerOps[Order.back()]; 3179 } 3180 Optional<int> Diff = 3181 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE); 3182 // Check that the sorted loads are consecutive. 3183 if (static_cast<unsigned>(*Diff) == VL.size() - 1) 3184 return LoadsState::Vectorize; 3185 Align CommonAlignment = cast<LoadInst>(VL0)->getAlign(); 3186 for (Value *V : VL) 3187 CommonAlignment = 3188 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 3189 if (TTI.isLegalMaskedGather(FixedVectorType::get(ScalarTy, VL.size()), 3190 CommonAlignment)) 3191 return LoadsState::ScatterVectorize; 3192 } 3193 3194 return LoadsState::Gather; 3195 } 3196 3197 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 3198 const EdgeInfo &UserTreeIdx) { 3199 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 3200 3201 SmallVector<int> ReuseShuffleIndicies; 3202 SmallVector<Value *> UniqueValues; 3203 auto &&TryToFindDuplicates = [&VL, &ReuseShuffleIndicies, &UniqueValues, 3204 &UserTreeIdx, 3205 this](const InstructionsState &S) { 3206 // Check that every instruction appears once in this bundle. 3207 DenseMap<Value *, unsigned> UniquePositions; 3208 for (Value *V : VL) { 3209 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 3210 ReuseShuffleIndicies.emplace_back(isa<UndefValue>(V) ? -1 3211 : Res.first->second); 3212 if (Res.second) 3213 UniqueValues.emplace_back(V); 3214 } 3215 size_t NumUniqueScalarValues = UniqueValues.size(); 3216 if (NumUniqueScalarValues == VL.size()) { 3217 ReuseShuffleIndicies.clear(); 3218 } else { 3219 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n"); 3220 if (NumUniqueScalarValues <= 1 || 3221 !llvm::isPowerOf2_32(NumUniqueScalarValues)) { 3222 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 3223 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3224 return false; 3225 } 3226 VL = UniqueValues; 3227 } 3228 return true; 3229 }; 3230 3231 InstructionsState S = getSameOpcode(VL); 3232 if (Depth == RecursionMaxDepth) { 3233 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 3234 if (TryToFindDuplicates(S)) 3235 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3236 ReuseShuffleIndicies); 3237 return; 3238 } 3239 3240 // Don't handle scalable vectors 3241 if (S.getOpcode() == Instruction::ExtractElement && 3242 isa<ScalableVectorType>( 3243 cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) { 3244 LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n"); 3245 if (TryToFindDuplicates(S)) 3246 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3247 ReuseShuffleIndicies); 3248 return; 3249 } 3250 3251 // Don't handle vectors. 3252 if (S.OpValue->getType()->isVectorTy() && 3253 !isa<InsertElementInst>(S.OpValue)) { 3254 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 3255 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3256 return; 3257 } 3258 3259 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 3260 if (SI->getValueOperand()->getType()->isVectorTy()) { 3261 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 3262 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3263 return; 3264 } 3265 3266 // If all of the operands are identical or constant we have a simple solution. 3267 // If we deal with insert/extract instructions, they all must have constant 3268 // indices, otherwise we should gather them, not try to vectorize. 3269 if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.getOpcode() || 3270 (isa<InsertElementInst, ExtractValueInst, ExtractElementInst>(S.MainOp) && 3271 !all_of(VL, isVectorLikeInstWithConstOps))) { 3272 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 3273 if (TryToFindDuplicates(S)) 3274 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3275 ReuseShuffleIndicies); 3276 return; 3277 } 3278 3279 // We now know that this is a vector of instructions of the same type from 3280 // the same block. 3281 3282 // Don't vectorize ephemeral values. 3283 for (Value *V : VL) { 3284 if (EphValues.count(V)) { 3285 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 3286 << ") is ephemeral.\n"); 3287 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3288 return; 3289 } 3290 } 3291 3292 // Check if this is a duplicate of another entry. 3293 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 3294 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n"); 3295 if (!E->isSame(VL)) { 3296 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 3297 if (TryToFindDuplicates(S)) 3298 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3299 ReuseShuffleIndicies); 3300 return; 3301 } 3302 // Record the reuse of the tree node. FIXME, currently this is only used to 3303 // properly draw the graph rather than for the actual vectorization. 3304 E->UserTreeIndices.push_back(UserTreeIdx); 3305 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue 3306 << ".\n"); 3307 return; 3308 } 3309 3310 // Check that none of the instructions in the bundle are already in the tree. 3311 for (Value *V : VL) { 3312 auto *I = dyn_cast<Instruction>(V); 3313 if (!I) 3314 continue; 3315 if (getTreeEntry(I)) { 3316 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 3317 << ") is already in tree.\n"); 3318 if (TryToFindDuplicates(S)) 3319 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3320 ReuseShuffleIndicies); 3321 return; 3322 } 3323 } 3324 3325 // If any of the scalars is marked as a value that needs to stay scalar, then 3326 // we need to gather the scalars. 3327 // The reduction nodes (stored in UserIgnoreList) also should stay scalar. 3328 for (Value *V : VL) { 3329 if (MustGather.count(V) || is_contained(UserIgnoreList, V)) { 3330 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 3331 if (TryToFindDuplicates(S)) 3332 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3333 ReuseShuffleIndicies); 3334 return; 3335 } 3336 } 3337 3338 // Check that all of the users of the scalars that we want to vectorize are 3339 // schedulable. 3340 auto *VL0 = cast<Instruction>(S.OpValue); 3341 BasicBlock *BB = VL0->getParent(); 3342 3343 if (!DT->isReachableFromEntry(BB)) { 3344 // Don't go into unreachable blocks. They may contain instructions with 3345 // dependency cycles which confuse the final scheduling. 3346 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 3347 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3348 return; 3349 } 3350 3351 // Check that every instruction appears once in this bundle. 3352 if (!TryToFindDuplicates(S)) 3353 return; 3354 3355 auto &BSRef = BlocksSchedules[BB]; 3356 if (!BSRef) 3357 BSRef = std::make_unique<BlockScheduling>(BB); 3358 3359 BlockScheduling &BS = *BSRef.get(); 3360 3361 Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S); 3362 if (!Bundle) { 3363 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 3364 assert((!BS.getScheduleData(VL0) || 3365 !BS.getScheduleData(VL0)->isPartOfBundle()) && 3366 "tryScheduleBundle should cancelScheduling on failure"); 3367 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3368 ReuseShuffleIndicies); 3369 return; 3370 } 3371 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 3372 3373 unsigned ShuffleOrOp = S.isAltShuffle() ? 3374 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 3375 switch (ShuffleOrOp) { 3376 case Instruction::PHI: { 3377 auto *PH = cast<PHINode>(VL0); 3378 3379 // Check for terminator values (e.g. invoke). 3380 for (Value *V : VL) 3381 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { 3382 Instruction *Term = dyn_cast<Instruction>( 3383 cast<PHINode>(V)->getIncomingValueForBlock( 3384 PH->getIncomingBlock(I))); 3385 if (Term && Term->isTerminator()) { 3386 LLVM_DEBUG(dbgs() 3387 << "SLP: Need to swizzle PHINodes (terminator use).\n"); 3388 BS.cancelScheduling(VL, VL0); 3389 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3390 ReuseShuffleIndicies); 3391 return; 3392 } 3393 } 3394 3395 TreeEntry *TE = 3396 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies); 3397 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 3398 3399 // Keeps the reordered operands to avoid code duplication. 3400 SmallVector<ValueList, 2> OperandsVec; 3401 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { 3402 if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) { 3403 ValueList Operands(VL.size(), PoisonValue::get(PH->getType())); 3404 TE->setOperand(I, Operands); 3405 OperandsVec.push_back(Operands); 3406 continue; 3407 } 3408 ValueList Operands; 3409 // Prepare the operand vector. 3410 for (Value *V : VL) 3411 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock( 3412 PH->getIncomingBlock(I))); 3413 TE->setOperand(I, Operands); 3414 OperandsVec.push_back(Operands); 3415 } 3416 for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx) 3417 buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx}); 3418 return; 3419 } 3420 case Instruction::ExtractValue: 3421 case Instruction::ExtractElement: { 3422 OrdersType CurrentOrder; 3423 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder); 3424 if (Reuse) { 3425 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n"); 3426 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3427 ReuseShuffleIndicies); 3428 // This is a special case, as it does not gather, but at the same time 3429 // we are not extending buildTree_rec() towards the operands. 3430 ValueList Op0; 3431 Op0.assign(VL.size(), VL0->getOperand(0)); 3432 VectorizableTree.back()->setOperand(0, Op0); 3433 return; 3434 } 3435 if (!CurrentOrder.empty()) { 3436 LLVM_DEBUG({ 3437 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " 3438 "with order"; 3439 for (unsigned Idx : CurrentOrder) 3440 dbgs() << " " << Idx; 3441 dbgs() << "\n"; 3442 }); 3443 fixupOrderingIndices(CurrentOrder); 3444 // Insert new order with initial value 0, if it does not exist, 3445 // otherwise return the iterator to the existing one. 3446 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3447 ReuseShuffleIndicies, CurrentOrder); 3448 // This is a special case, as it does not gather, but at the same time 3449 // we are not extending buildTree_rec() towards the operands. 3450 ValueList Op0; 3451 Op0.assign(VL.size(), VL0->getOperand(0)); 3452 VectorizableTree.back()->setOperand(0, Op0); 3453 return; 3454 } 3455 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n"); 3456 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3457 ReuseShuffleIndicies); 3458 BS.cancelScheduling(VL, VL0); 3459 return; 3460 } 3461 case Instruction::InsertElement: { 3462 assert(ReuseShuffleIndicies.empty() && "All inserts should be unique"); 3463 3464 // Check that we have a buildvector and not a shuffle of 2 or more 3465 // different vectors. 3466 ValueSet SourceVectors; 3467 int MinIdx = std::numeric_limits<int>::max(); 3468 for (Value *V : VL) { 3469 SourceVectors.insert(cast<Instruction>(V)->getOperand(0)); 3470 Optional<int> Idx = *getInsertIndex(V, 0); 3471 if (!Idx || *Idx == UndefMaskElem) 3472 continue; 3473 MinIdx = std::min(MinIdx, *Idx); 3474 } 3475 3476 if (count_if(VL, [&SourceVectors](Value *V) { 3477 return !SourceVectors.contains(V); 3478 }) >= 2) { 3479 // Found 2nd source vector - cancel. 3480 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with " 3481 "different source vectors.\n"); 3482 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3483 BS.cancelScheduling(VL, VL0); 3484 return; 3485 } 3486 3487 auto OrdCompare = [](const std::pair<int, int> &P1, 3488 const std::pair<int, int> &P2) { 3489 return P1.first > P2.first; 3490 }; 3491 PriorityQueue<std::pair<int, int>, SmallVector<std::pair<int, int>>, 3492 decltype(OrdCompare)> 3493 Indices(OrdCompare); 3494 for (int I = 0, E = VL.size(); I < E; ++I) { 3495 Optional<int> Idx = *getInsertIndex(VL[I], 0); 3496 if (!Idx || *Idx == UndefMaskElem) 3497 continue; 3498 Indices.emplace(*Idx, I); 3499 } 3500 OrdersType CurrentOrder(VL.size(), VL.size()); 3501 bool IsIdentity = true; 3502 for (int I = 0, E = VL.size(); I < E; ++I) { 3503 CurrentOrder[Indices.top().second] = I; 3504 IsIdentity &= Indices.top().second == I; 3505 Indices.pop(); 3506 } 3507 if (IsIdentity) 3508 CurrentOrder.clear(); 3509 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3510 None, CurrentOrder); 3511 LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n"); 3512 3513 constexpr int NumOps = 2; 3514 ValueList VectorOperands[NumOps]; 3515 for (int I = 0; I < NumOps; ++I) { 3516 for (Value *V : VL) 3517 VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I)); 3518 3519 TE->setOperand(I, VectorOperands[I]); 3520 } 3521 buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, NumOps - 1}); 3522 return; 3523 } 3524 case Instruction::Load: { 3525 // Check that a vectorized load would load the same memory as a scalar 3526 // load. For example, we don't want to vectorize loads that are smaller 3527 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 3528 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 3529 // from such a struct, we read/write packed bits disagreeing with the 3530 // unvectorized version. 3531 SmallVector<Value *> PointerOps; 3532 OrdersType CurrentOrder; 3533 TreeEntry *TE = nullptr; 3534 switch (canVectorizeLoads(VL, VL0, *TTI, *DL, *SE, CurrentOrder, 3535 PointerOps)) { 3536 case LoadsState::Vectorize: 3537 if (CurrentOrder.empty()) { 3538 // Original loads are consecutive and does not require reordering. 3539 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3540 ReuseShuffleIndicies); 3541 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 3542 } else { 3543 fixupOrderingIndices(CurrentOrder); 3544 // Need to reorder. 3545 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3546 ReuseShuffleIndicies, CurrentOrder); 3547 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n"); 3548 } 3549 TE->setOperandsInOrder(); 3550 break; 3551 case LoadsState::ScatterVectorize: 3552 // Vectorizing non-consecutive loads with `llvm.masked.gather`. 3553 TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S, 3554 UserTreeIdx, ReuseShuffleIndicies); 3555 TE->setOperandsInOrder(); 3556 buildTree_rec(PointerOps, Depth + 1, {TE, 0}); 3557 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n"); 3558 break; 3559 case LoadsState::Gather: 3560 BS.cancelScheduling(VL, VL0); 3561 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3562 ReuseShuffleIndicies); 3563 #ifndef NDEBUG 3564 Type *ScalarTy = VL0->getType(); 3565 if (DL->getTypeSizeInBits(ScalarTy) != 3566 DL->getTypeAllocSizeInBits(ScalarTy)) 3567 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 3568 else if (any_of(VL, [](Value *V) { 3569 return !cast<LoadInst>(V)->isSimple(); 3570 })) 3571 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 3572 else 3573 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 3574 #endif // NDEBUG 3575 break; 3576 } 3577 return; 3578 } 3579 case Instruction::ZExt: 3580 case Instruction::SExt: 3581 case Instruction::FPToUI: 3582 case Instruction::FPToSI: 3583 case Instruction::FPExt: 3584 case Instruction::PtrToInt: 3585 case Instruction::IntToPtr: 3586 case Instruction::SIToFP: 3587 case Instruction::UIToFP: 3588 case Instruction::Trunc: 3589 case Instruction::FPTrunc: 3590 case Instruction::BitCast: { 3591 Type *SrcTy = VL0->getOperand(0)->getType(); 3592 for (Value *V : VL) { 3593 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType(); 3594 if (Ty != SrcTy || !isValidElementType(Ty)) { 3595 BS.cancelScheduling(VL, VL0); 3596 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3597 ReuseShuffleIndicies); 3598 LLVM_DEBUG(dbgs() 3599 << "SLP: Gathering casts with different src types.\n"); 3600 return; 3601 } 3602 } 3603 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3604 ReuseShuffleIndicies); 3605 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 3606 3607 TE->setOperandsInOrder(); 3608 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 3609 ValueList Operands; 3610 // Prepare the operand vector. 3611 for (Value *V : VL) 3612 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3613 3614 buildTree_rec(Operands, Depth + 1, {TE, i}); 3615 } 3616 return; 3617 } 3618 case Instruction::ICmp: 3619 case Instruction::FCmp: { 3620 // Check that all of the compares have the same predicate. 3621 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 3622 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0); 3623 Type *ComparedTy = VL0->getOperand(0)->getType(); 3624 for (Value *V : VL) { 3625 CmpInst *Cmp = cast<CmpInst>(V); 3626 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) || 3627 Cmp->getOperand(0)->getType() != ComparedTy) { 3628 BS.cancelScheduling(VL, VL0); 3629 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3630 ReuseShuffleIndicies); 3631 LLVM_DEBUG(dbgs() 3632 << "SLP: Gathering cmp with different predicate.\n"); 3633 return; 3634 } 3635 } 3636 3637 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3638 ReuseShuffleIndicies); 3639 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 3640 3641 ValueList Left, Right; 3642 if (cast<CmpInst>(VL0)->isCommutative()) { 3643 // Commutative predicate - collect + sort operands of the instructions 3644 // so that each side is more likely to have the same opcode. 3645 assert(P0 == SwapP0 && "Commutative Predicate mismatch"); 3646 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 3647 } else { 3648 // Collect operands - commute if it uses the swapped predicate. 3649 for (Value *V : VL) { 3650 auto *Cmp = cast<CmpInst>(V); 3651 Value *LHS = Cmp->getOperand(0); 3652 Value *RHS = Cmp->getOperand(1); 3653 if (Cmp->getPredicate() != P0) 3654 std::swap(LHS, RHS); 3655 Left.push_back(LHS); 3656 Right.push_back(RHS); 3657 } 3658 } 3659 TE->setOperand(0, Left); 3660 TE->setOperand(1, Right); 3661 buildTree_rec(Left, Depth + 1, {TE, 0}); 3662 buildTree_rec(Right, Depth + 1, {TE, 1}); 3663 return; 3664 } 3665 case Instruction::Select: 3666 case Instruction::FNeg: 3667 case Instruction::Add: 3668 case Instruction::FAdd: 3669 case Instruction::Sub: 3670 case Instruction::FSub: 3671 case Instruction::Mul: 3672 case Instruction::FMul: 3673 case Instruction::UDiv: 3674 case Instruction::SDiv: 3675 case Instruction::FDiv: 3676 case Instruction::URem: 3677 case Instruction::SRem: 3678 case Instruction::FRem: 3679 case Instruction::Shl: 3680 case Instruction::LShr: 3681 case Instruction::AShr: 3682 case Instruction::And: 3683 case Instruction::Or: 3684 case Instruction::Xor: { 3685 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3686 ReuseShuffleIndicies); 3687 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n"); 3688 3689 // Sort operands of the instructions so that each side is more likely to 3690 // have the same opcode. 3691 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 3692 ValueList Left, Right; 3693 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 3694 TE->setOperand(0, Left); 3695 TE->setOperand(1, Right); 3696 buildTree_rec(Left, Depth + 1, {TE, 0}); 3697 buildTree_rec(Right, Depth + 1, {TE, 1}); 3698 return; 3699 } 3700 3701 TE->setOperandsInOrder(); 3702 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 3703 ValueList Operands; 3704 // Prepare the operand vector. 3705 for (Value *V : VL) 3706 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3707 3708 buildTree_rec(Operands, Depth + 1, {TE, i}); 3709 } 3710 return; 3711 } 3712 case Instruction::GetElementPtr: { 3713 // We don't combine GEPs with complicated (nested) indexing. 3714 for (Value *V : VL) { 3715 if (cast<Instruction>(V)->getNumOperands() != 2) { 3716 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 3717 BS.cancelScheduling(VL, VL0); 3718 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3719 ReuseShuffleIndicies); 3720 return; 3721 } 3722 } 3723 3724 // We can't combine several GEPs into one vector if they operate on 3725 // different types. 3726 Type *Ty0 = VL0->getOperand(0)->getType(); 3727 for (Value *V : VL) { 3728 Type *CurTy = cast<Instruction>(V)->getOperand(0)->getType(); 3729 if (Ty0 != CurTy) { 3730 LLVM_DEBUG(dbgs() 3731 << "SLP: not-vectorizable GEP (different types).\n"); 3732 BS.cancelScheduling(VL, VL0); 3733 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3734 ReuseShuffleIndicies); 3735 return; 3736 } 3737 } 3738 3739 // We don't combine GEPs with non-constant indexes. 3740 Type *Ty1 = VL0->getOperand(1)->getType(); 3741 for (Value *V : VL) { 3742 auto Op = cast<Instruction>(V)->getOperand(1); 3743 if (!isa<ConstantInt>(Op) || 3744 (Op->getType() != Ty1 && 3745 Op->getType()->getScalarSizeInBits() > 3746 DL->getIndexSizeInBits( 3747 V->getType()->getPointerAddressSpace()))) { 3748 LLVM_DEBUG(dbgs() 3749 << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 3750 BS.cancelScheduling(VL, VL0); 3751 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3752 ReuseShuffleIndicies); 3753 return; 3754 } 3755 } 3756 3757 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3758 ReuseShuffleIndicies); 3759 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 3760 TE->setOperandsInOrder(); 3761 for (unsigned i = 0, e = 2; i < e; ++i) { 3762 ValueList Operands; 3763 // Prepare the operand vector. 3764 for (Value *V : VL) 3765 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3766 3767 buildTree_rec(Operands, Depth + 1, {TE, i}); 3768 } 3769 return; 3770 } 3771 case Instruction::Store: { 3772 // Check if the stores are consecutive or if we need to swizzle them. 3773 llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType(); 3774 // Avoid types that are padded when being allocated as scalars, while 3775 // being packed together in a vector (such as i1). 3776 if (DL->getTypeSizeInBits(ScalarTy) != 3777 DL->getTypeAllocSizeInBits(ScalarTy)) { 3778 BS.cancelScheduling(VL, VL0); 3779 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3780 ReuseShuffleIndicies); 3781 LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n"); 3782 return; 3783 } 3784 // Make sure all stores in the bundle are simple - we can't vectorize 3785 // atomic or volatile stores. 3786 SmallVector<Value *, 4> PointerOps(VL.size()); 3787 ValueList Operands(VL.size()); 3788 auto POIter = PointerOps.begin(); 3789 auto OIter = Operands.begin(); 3790 for (Value *V : VL) { 3791 auto *SI = cast<StoreInst>(V); 3792 if (!SI->isSimple()) { 3793 BS.cancelScheduling(VL, VL0); 3794 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3795 ReuseShuffleIndicies); 3796 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n"); 3797 return; 3798 } 3799 *POIter = SI->getPointerOperand(); 3800 *OIter = SI->getValueOperand(); 3801 ++POIter; 3802 ++OIter; 3803 } 3804 3805 OrdersType CurrentOrder; 3806 // Check the order of pointer operands. 3807 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) { 3808 Value *Ptr0; 3809 Value *PtrN; 3810 if (CurrentOrder.empty()) { 3811 Ptr0 = PointerOps.front(); 3812 PtrN = PointerOps.back(); 3813 } else { 3814 Ptr0 = PointerOps[CurrentOrder.front()]; 3815 PtrN = PointerOps[CurrentOrder.back()]; 3816 } 3817 Optional<int> Dist = 3818 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE); 3819 // Check that the sorted pointer operands are consecutive. 3820 if (static_cast<unsigned>(*Dist) == VL.size() - 1) { 3821 if (CurrentOrder.empty()) { 3822 // Original stores are consecutive and does not require reordering. 3823 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, 3824 UserTreeIdx, ReuseShuffleIndicies); 3825 TE->setOperandsInOrder(); 3826 buildTree_rec(Operands, Depth + 1, {TE, 0}); 3827 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 3828 } else { 3829 fixupOrderingIndices(CurrentOrder); 3830 TreeEntry *TE = 3831 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3832 ReuseShuffleIndicies, CurrentOrder); 3833 TE->setOperandsInOrder(); 3834 buildTree_rec(Operands, Depth + 1, {TE, 0}); 3835 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n"); 3836 } 3837 return; 3838 } 3839 } 3840 3841 BS.cancelScheduling(VL, VL0); 3842 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3843 ReuseShuffleIndicies); 3844 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 3845 return; 3846 } 3847 case Instruction::Call: { 3848 // Check if the calls are all to the same vectorizable intrinsic or 3849 // library function. 3850 CallInst *CI = cast<CallInst>(VL0); 3851 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3852 3853 VFShape Shape = VFShape::get( 3854 *CI, ElementCount::getFixed(static_cast<unsigned int>(VL.size())), 3855 false /*HasGlobalPred*/); 3856 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3857 3858 if (!VecFunc && !isTriviallyVectorizable(ID)) { 3859 BS.cancelScheduling(VL, VL0); 3860 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3861 ReuseShuffleIndicies); 3862 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 3863 return; 3864 } 3865 Function *F = CI->getCalledFunction(); 3866 unsigned NumArgs = CI->arg_size(); 3867 SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr); 3868 for (unsigned j = 0; j != NumArgs; ++j) 3869 if (hasVectorInstrinsicScalarOpd(ID, j)) 3870 ScalarArgs[j] = CI->getArgOperand(j); 3871 for (Value *V : VL) { 3872 CallInst *CI2 = dyn_cast<CallInst>(V); 3873 if (!CI2 || CI2->getCalledFunction() != F || 3874 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 3875 (VecFunc && 3876 VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) || 3877 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 3878 BS.cancelScheduling(VL, VL0); 3879 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3880 ReuseShuffleIndicies); 3881 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V 3882 << "\n"); 3883 return; 3884 } 3885 // Some intrinsics have scalar arguments and should be same in order for 3886 // them to be vectorized. 3887 for (unsigned j = 0; j != NumArgs; ++j) { 3888 if (hasVectorInstrinsicScalarOpd(ID, j)) { 3889 Value *A1J = CI2->getArgOperand(j); 3890 if (ScalarArgs[j] != A1J) { 3891 BS.cancelScheduling(VL, VL0); 3892 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3893 ReuseShuffleIndicies); 3894 LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 3895 << " argument " << ScalarArgs[j] << "!=" << A1J 3896 << "\n"); 3897 return; 3898 } 3899 } 3900 } 3901 // Verify that the bundle operands are identical between the two calls. 3902 if (CI->hasOperandBundles() && 3903 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 3904 CI->op_begin() + CI->getBundleOperandsEndIndex(), 3905 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 3906 BS.cancelScheduling(VL, VL0); 3907 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3908 ReuseShuffleIndicies); 3909 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" 3910 << *CI << "!=" << *V << '\n'); 3911 return; 3912 } 3913 } 3914 3915 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3916 ReuseShuffleIndicies); 3917 TE->setOperandsInOrder(); 3918 for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) { 3919 ValueList Operands; 3920 // Prepare the operand vector. 3921 for (Value *V : VL) { 3922 auto *CI2 = cast<CallInst>(V); 3923 Operands.push_back(CI2->getArgOperand(i)); 3924 } 3925 buildTree_rec(Operands, Depth + 1, {TE, i}); 3926 } 3927 return; 3928 } 3929 case Instruction::ShuffleVector: { 3930 // If this is not an alternate sequence of opcode like add-sub 3931 // then do not vectorize this instruction. 3932 if (!S.isAltShuffle()) { 3933 BS.cancelScheduling(VL, VL0); 3934 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3935 ReuseShuffleIndicies); 3936 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 3937 return; 3938 } 3939 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3940 ReuseShuffleIndicies); 3941 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 3942 3943 // Reorder operands if reordering would enable vectorization. 3944 if (isa<BinaryOperator>(VL0)) { 3945 ValueList Left, Right; 3946 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 3947 TE->setOperand(0, Left); 3948 TE->setOperand(1, Right); 3949 buildTree_rec(Left, Depth + 1, {TE, 0}); 3950 buildTree_rec(Right, Depth + 1, {TE, 1}); 3951 return; 3952 } 3953 3954 TE->setOperandsInOrder(); 3955 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 3956 ValueList Operands; 3957 // Prepare the operand vector. 3958 for (Value *V : VL) 3959 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3960 3961 buildTree_rec(Operands, Depth + 1, {TE, i}); 3962 } 3963 return; 3964 } 3965 default: 3966 BS.cancelScheduling(VL, VL0); 3967 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3968 ReuseShuffleIndicies); 3969 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 3970 return; 3971 } 3972 } 3973 3974 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { 3975 unsigned N = 1; 3976 Type *EltTy = T; 3977 3978 while (isa<StructType>(EltTy) || isa<ArrayType>(EltTy) || 3979 isa<VectorType>(EltTy)) { 3980 if (auto *ST = dyn_cast<StructType>(EltTy)) { 3981 // Check that struct is homogeneous. 3982 for (const auto *Ty : ST->elements()) 3983 if (Ty != *ST->element_begin()) 3984 return 0; 3985 N *= ST->getNumElements(); 3986 EltTy = *ST->element_begin(); 3987 } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) { 3988 N *= AT->getNumElements(); 3989 EltTy = AT->getElementType(); 3990 } else { 3991 auto *VT = cast<FixedVectorType>(EltTy); 3992 N *= VT->getNumElements(); 3993 EltTy = VT->getElementType(); 3994 } 3995 } 3996 3997 if (!isValidElementType(EltTy)) 3998 return 0; 3999 uint64_t VTSize = DL.getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N)); 4000 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) 4001 return 0; 4002 return N; 4003 } 4004 4005 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 4006 SmallVectorImpl<unsigned> &CurrentOrder) const { 4007 Instruction *E0 = cast<Instruction>(OpValue); 4008 assert(E0->getOpcode() == Instruction::ExtractElement || 4009 E0->getOpcode() == Instruction::ExtractValue); 4010 assert(E0->getOpcode() == getSameOpcode(VL).getOpcode() && "Invalid opcode"); 4011 // Check if all of the extracts come from the same vector and from the 4012 // correct offset. 4013 Value *Vec = E0->getOperand(0); 4014 4015 CurrentOrder.clear(); 4016 4017 // We have to extract from a vector/aggregate with the same number of elements. 4018 unsigned NElts; 4019 if (E0->getOpcode() == Instruction::ExtractValue) { 4020 const DataLayout &DL = E0->getModule()->getDataLayout(); 4021 NElts = canMapToVector(Vec->getType(), DL); 4022 if (!NElts) 4023 return false; 4024 // Check if load can be rewritten as load of vector. 4025 LoadInst *LI = dyn_cast<LoadInst>(Vec); 4026 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 4027 return false; 4028 } else { 4029 NElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); 4030 } 4031 4032 if (NElts != VL.size()) 4033 return false; 4034 4035 // Check that all of the indices extract from the correct offset. 4036 bool ShouldKeepOrder = true; 4037 unsigned E = VL.size(); 4038 // Assign to all items the initial value E + 1 so we can check if the extract 4039 // instruction index was used already. 4040 // Also, later we can check that all the indices are used and we have a 4041 // consecutive access in the extract instructions, by checking that no 4042 // element of CurrentOrder still has value E + 1. 4043 CurrentOrder.assign(E, E + 1); 4044 unsigned I = 0; 4045 for (; I < E; ++I) { 4046 auto *Inst = cast<Instruction>(VL[I]); 4047 if (Inst->getOperand(0) != Vec) 4048 break; 4049 Optional<unsigned> Idx = getExtractIndex(Inst); 4050 if (!Idx) 4051 break; 4052 const unsigned ExtIdx = *Idx; 4053 if (ExtIdx != I) { 4054 if (ExtIdx >= E || CurrentOrder[ExtIdx] != E + 1) 4055 break; 4056 ShouldKeepOrder = false; 4057 CurrentOrder[ExtIdx] = I; 4058 } else { 4059 if (CurrentOrder[I] != E + 1) 4060 break; 4061 CurrentOrder[I] = I; 4062 } 4063 } 4064 if (I < E) { 4065 CurrentOrder.clear(); 4066 return false; 4067 } 4068 4069 return ShouldKeepOrder; 4070 } 4071 4072 bool BoUpSLP::areAllUsersVectorized(Instruction *I, 4073 ArrayRef<Value *> VectorizedVals) const { 4074 return (I->hasOneUse() && is_contained(VectorizedVals, I)) || 4075 llvm::all_of(I->users(), [this](User *U) { 4076 return ScalarToTreeEntry.count(U) > 0; 4077 }); 4078 } 4079 4080 static std::pair<InstructionCost, InstructionCost> 4081 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy, 4082 TargetTransformInfo *TTI, TargetLibraryInfo *TLI) { 4083 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4084 4085 // Calculate the cost of the scalar and vector calls. 4086 SmallVector<Type *, 4> VecTys; 4087 for (Use &Arg : CI->args()) 4088 VecTys.push_back( 4089 FixedVectorType::get(Arg->getType(), VecTy->getNumElements())); 4090 FastMathFlags FMF; 4091 if (auto *FPCI = dyn_cast<FPMathOperator>(CI)) 4092 FMF = FPCI->getFastMathFlags(); 4093 SmallVector<const Value *> Arguments(CI->args()); 4094 IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF, 4095 dyn_cast<IntrinsicInst>(CI)); 4096 auto IntrinsicCost = 4097 TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput); 4098 4099 auto Shape = VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>( 4100 VecTy->getNumElements())), 4101 false /*HasGlobalPred*/); 4102 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 4103 auto LibCost = IntrinsicCost; 4104 if (!CI->isNoBuiltin() && VecFunc) { 4105 // Calculate the cost of the vector library call. 4106 // If the corresponding vector call is cheaper, return its cost. 4107 LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys, 4108 TTI::TCK_RecipThroughput); 4109 } 4110 return {IntrinsicCost, LibCost}; 4111 } 4112 4113 /// Compute the cost of creating a vector of type \p VecTy containing the 4114 /// extracted values from \p VL. 4115 static InstructionCost 4116 computeExtractCost(ArrayRef<Value *> VL, FixedVectorType *VecTy, 4117 TargetTransformInfo::ShuffleKind ShuffleKind, 4118 ArrayRef<int> Mask, TargetTransformInfo &TTI) { 4119 unsigned NumOfParts = TTI.getNumberOfParts(VecTy); 4120 4121 if (ShuffleKind != TargetTransformInfo::SK_PermuteSingleSrc || !NumOfParts || 4122 VecTy->getNumElements() < NumOfParts) 4123 return TTI.getShuffleCost(ShuffleKind, VecTy, Mask); 4124 4125 bool AllConsecutive = true; 4126 unsigned EltsPerVector = VecTy->getNumElements() / NumOfParts; 4127 unsigned Idx = -1; 4128 InstructionCost Cost = 0; 4129 4130 // Process extracts in blocks of EltsPerVector to check if the source vector 4131 // operand can be re-used directly. If not, add the cost of creating a shuffle 4132 // to extract the values into a vector register. 4133 for (auto *V : VL) { 4134 ++Idx; 4135 4136 // Reached the start of a new vector registers. 4137 if (Idx % EltsPerVector == 0) { 4138 AllConsecutive = true; 4139 continue; 4140 } 4141 4142 // Check all extracts for a vector register on the target directly 4143 // extract values in order. 4144 unsigned CurrentIdx = *getExtractIndex(cast<Instruction>(V)); 4145 unsigned PrevIdx = *getExtractIndex(cast<Instruction>(VL[Idx - 1])); 4146 AllConsecutive &= PrevIdx + 1 == CurrentIdx && 4147 CurrentIdx % EltsPerVector == Idx % EltsPerVector; 4148 4149 if (AllConsecutive) 4150 continue; 4151 4152 // Skip all indices, except for the last index per vector block. 4153 if ((Idx + 1) % EltsPerVector != 0 && Idx + 1 != VL.size()) 4154 continue; 4155 4156 // If we have a series of extracts which are not consecutive and hence 4157 // cannot re-use the source vector register directly, compute the shuffle 4158 // cost to extract the a vector with EltsPerVector elements. 4159 Cost += TTI.getShuffleCost( 4160 TargetTransformInfo::SK_PermuteSingleSrc, 4161 FixedVectorType::get(VecTy->getElementType(), EltsPerVector)); 4162 } 4163 return Cost; 4164 } 4165 4166 /// Build shuffle mask for shuffle graph entries and lists of main and alternate 4167 /// operations operands. 4168 static void 4169 buildSuffleEntryMask(ArrayRef<Value *> VL, ArrayRef<unsigned> ReorderIndices, 4170 ArrayRef<int> ReusesIndices, 4171 const function_ref<bool(Instruction *)> IsAltOp, 4172 SmallVectorImpl<int> &Mask, 4173 SmallVectorImpl<Value *> *OpScalars = nullptr, 4174 SmallVectorImpl<Value *> *AltScalars = nullptr) { 4175 unsigned Sz = VL.size(); 4176 Mask.assign(Sz, UndefMaskElem); 4177 SmallVector<int> OrderMask; 4178 if (!ReorderIndices.empty()) 4179 inversePermutation(ReorderIndices, OrderMask); 4180 for (unsigned I = 0; I < Sz; ++I) { 4181 unsigned Idx = I; 4182 if (!ReorderIndices.empty()) 4183 Idx = OrderMask[I]; 4184 auto *OpInst = cast<Instruction>(VL[Idx]); 4185 if (IsAltOp(OpInst)) { 4186 Mask[I] = Sz + Idx; 4187 if (AltScalars) 4188 AltScalars->push_back(OpInst); 4189 } else { 4190 Mask[I] = Idx; 4191 if (OpScalars) 4192 OpScalars->push_back(OpInst); 4193 } 4194 } 4195 if (!ReusesIndices.empty()) { 4196 SmallVector<int> NewMask(ReusesIndices.size(), UndefMaskElem); 4197 transform(ReusesIndices, NewMask.begin(), [&Mask](int Idx) { 4198 return Idx != UndefMaskElem ? Mask[Idx] : UndefMaskElem; 4199 }); 4200 Mask.swap(NewMask); 4201 } 4202 } 4203 4204 InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E, 4205 ArrayRef<Value *> VectorizedVals) { 4206 ArrayRef<Value*> VL = E->Scalars; 4207 4208 Type *ScalarTy = VL[0]->getType(); 4209 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 4210 ScalarTy = SI->getValueOperand()->getType(); 4211 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0])) 4212 ScalarTy = CI->getOperand(0)->getType(); 4213 else if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 4214 ScalarTy = IE->getOperand(1)->getType(); 4215 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 4216 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 4217 4218 // If we have computed a smaller type for the expression, update VecTy so 4219 // that the costs will be accurate. 4220 if (MinBWs.count(VL[0])) 4221 VecTy = FixedVectorType::get( 4222 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size()); 4223 auto *FinalVecTy = VecTy; 4224 4225 unsigned ReuseShuffleNumbers = E->ReuseShuffleIndices.size(); 4226 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 4227 if (NeedToShuffleReuses) 4228 FinalVecTy = 4229 FixedVectorType::get(VecTy->getElementType(), ReuseShuffleNumbers); 4230 // FIXME: it tries to fix a problem with MSVC buildbots. 4231 TargetTransformInfo &TTIRef = *TTI; 4232 auto &&AdjustExtractsCost = [this, &TTIRef, CostKind, VL, VecTy, 4233 VectorizedVals](InstructionCost &Cost, 4234 bool IsGather) { 4235 DenseMap<Value *, int> ExtractVectorsTys; 4236 for (auto *V : VL) { 4237 // If all users of instruction are going to be vectorized and this 4238 // instruction itself is not going to be vectorized, consider this 4239 // instruction as dead and remove its cost from the final cost of the 4240 // vectorized tree. 4241 if (!areAllUsersVectorized(cast<Instruction>(V), VectorizedVals) || 4242 (IsGather && ScalarToTreeEntry.count(V))) 4243 continue; 4244 auto *EE = cast<ExtractElementInst>(V); 4245 unsigned Idx = *getExtractIndex(EE); 4246 if (TTIRef.getNumberOfParts(VecTy) != 4247 TTIRef.getNumberOfParts(EE->getVectorOperandType())) { 4248 auto It = 4249 ExtractVectorsTys.try_emplace(EE->getVectorOperand(), Idx).first; 4250 It->getSecond() = std::min<int>(It->second, Idx); 4251 } 4252 // Take credit for instruction that will become dead. 4253 if (EE->hasOneUse()) { 4254 Instruction *Ext = EE->user_back(); 4255 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 4256 all_of(Ext->users(), 4257 [](User *U) { return isa<GetElementPtrInst>(U); })) { 4258 // Use getExtractWithExtendCost() to calculate the cost of 4259 // extractelement/ext pair. 4260 Cost -= 4261 TTIRef.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(), 4262 EE->getVectorOperandType(), Idx); 4263 // Add back the cost of s|zext which is subtracted separately. 4264 Cost += TTIRef.getCastInstrCost( 4265 Ext->getOpcode(), Ext->getType(), EE->getType(), 4266 TTI::getCastContextHint(Ext), CostKind, Ext); 4267 continue; 4268 } 4269 } 4270 Cost -= TTIRef.getVectorInstrCost(Instruction::ExtractElement, 4271 EE->getVectorOperandType(), Idx); 4272 } 4273 // Add a cost for subvector extracts/inserts if required. 4274 for (const auto &Data : ExtractVectorsTys) { 4275 auto *EEVTy = cast<FixedVectorType>(Data.first->getType()); 4276 unsigned NumElts = VecTy->getNumElements(); 4277 if (TTIRef.getNumberOfParts(EEVTy) > TTIRef.getNumberOfParts(VecTy)) { 4278 unsigned Idx = (Data.second / NumElts) * NumElts; 4279 unsigned EENumElts = EEVTy->getNumElements(); 4280 if (Idx + NumElts <= EENumElts) { 4281 Cost += 4282 TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 4283 EEVTy, None, Idx, VecTy); 4284 } else { 4285 // Need to round up the subvector type vectorization factor to avoid a 4286 // crash in cost model functions. Make SubVT so that Idx + VF of SubVT 4287 // <= EENumElts. 4288 auto *SubVT = 4289 FixedVectorType::get(VecTy->getElementType(), EENumElts - Idx); 4290 Cost += 4291 TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 4292 EEVTy, None, Idx, SubVT); 4293 } 4294 } else { 4295 Cost += TTIRef.getShuffleCost(TargetTransformInfo::SK_InsertSubvector, 4296 VecTy, None, 0, EEVTy); 4297 } 4298 } 4299 }; 4300 if (E->State == TreeEntry::NeedToGather) { 4301 if (allConstant(VL)) 4302 return 0; 4303 if (isa<InsertElementInst>(VL[0])) 4304 return InstructionCost::getInvalid(); 4305 SmallVector<int> Mask; 4306 SmallVector<const TreeEntry *> Entries; 4307 Optional<TargetTransformInfo::ShuffleKind> Shuffle = 4308 isGatherShuffledEntry(E, Mask, Entries); 4309 if (Shuffle.hasValue()) { 4310 InstructionCost GatherCost = 0; 4311 if (ShuffleVectorInst::isIdentityMask(Mask)) { 4312 // Perfect match in the graph, will reuse the previously vectorized 4313 // node. Cost is 0. 4314 LLVM_DEBUG( 4315 dbgs() 4316 << "SLP: perfect diamond match for gather bundle that starts with " 4317 << *VL.front() << ".\n"); 4318 if (NeedToShuffleReuses) 4319 GatherCost = 4320 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, 4321 FinalVecTy, E->ReuseShuffleIndices); 4322 } else { 4323 LLVM_DEBUG(dbgs() << "SLP: shuffled " << Entries.size() 4324 << " entries for bundle that starts with " 4325 << *VL.front() << ".\n"); 4326 // Detected that instead of gather we can emit a shuffle of single/two 4327 // previously vectorized nodes. Add the cost of the permutation rather 4328 // than gather. 4329 ::addMask(Mask, E->ReuseShuffleIndices); 4330 GatherCost = TTI->getShuffleCost(*Shuffle, FinalVecTy, Mask); 4331 } 4332 return GatherCost; 4333 } 4334 if (isSplat(VL)) { 4335 // Found the broadcasting of the single scalar, calculate the cost as the 4336 // broadcast. 4337 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy); 4338 } 4339 if (E->getOpcode() == Instruction::ExtractElement && allSameType(VL) && 4340 allSameBlock(VL) && 4341 !isa<ScalableVectorType>( 4342 cast<ExtractElementInst>(E->getMainOp())->getVectorOperandType())) { 4343 // Check that gather of extractelements can be represented as just a 4344 // shuffle of a single/two vectors the scalars are extracted from. 4345 SmallVector<int> Mask; 4346 Optional<TargetTransformInfo::ShuffleKind> ShuffleKind = 4347 isFixedVectorShuffle(VL, Mask); 4348 if (ShuffleKind.hasValue()) { 4349 // Found the bunch of extractelement instructions that must be gathered 4350 // into a vector and can be represented as a permutation elements in a 4351 // single input vector or of 2 input vectors. 4352 InstructionCost Cost = 4353 computeExtractCost(VL, VecTy, *ShuffleKind, Mask, *TTI); 4354 AdjustExtractsCost(Cost, /*IsGather=*/true); 4355 if (NeedToShuffleReuses) 4356 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, 4357 FinalVecTy, E->ReuseShuffleIndices); 4358 return Cost; 4359 } 4360 } 4361 InstructionCost ReuseShuffleCost = 0; 4362 if (NeedToShuffleReuses) 4363 ReuseShuffleCost = TTI->getShuffleCost( 4364 TTI::SK_PermuteSingleSrc, FinalVecTy, E->ReuseShuffleIndices); 4365 // Improve gather cost for gather of loads, if we can group some of the 4366 // loads into vector loads. 4367 if (VL.size() > 2 && E->getOpcode() == Instruction::Load && 4368 !E->isAltShuffle()) { 4369 BoUpSLP::ValueSet VectorizedLoads; 4370 unsigned StartIdx = 0; 4371 unsigned VF = VL.size() / 2; 4372 unsigned VectorizedCnt = 0; 4373 unsigned ScatterVectorizeCnt = 0; 4374 const unsigned Sz = DL->getTypeSizeInBits(E->getMainOp()->getType()); 4375 for (unsigned MinVF = getMinVF(2 * Sz); VF >= MinVF; VF /= 2) { 4376 for (unsigned Cnt = StartIdx, End = VL.size(); Cnt + VF <= End; 4377 Cnt += VF) { 4378 ArrayRef<Value *> Slice = VL.slice(Cnt, VF); 4379 if (!VectorizedLoads.count(Slice.front()) && 4380 !VectorizedLoads.count(Slice.back()) && allSameBlock(Slice)) { 4381 SmallVector<Value *> PointerOps; 4382 OrdersType CurrentOrder; 4383 LoadsState LS = canVectorizeLoads(Slice, Slice.front(), *TTI, *DL, 4384 *SE, CurrentOrder, PointerOps); 4385 switch (LS) { 4386 case LoadsState::Vectorize: 4387 case LoadsState::ScatterVectorize: 4388 // Mark the vectorized loads so that we don't vectorize them 4389 // again. 4390 if (LS == LoadsState::Vectorize) 4391 ++VectorizedCnt; 4392 else 4393 ++ScatterVectorizeCnt; 4394 VectorizedLoads.insert(Slice.begin(), Slice.end()); 4395 // If we vectorized initial block, no need to try to vectorize it 4396 // again. 4397 if (Cnt == StartIdx) 4398 StartIdx += VF; 4399 break; 4400 case LoadsState::Gather: 4401 break; 4402 } 4403 } 4404 } 4405 // Check if the whole array was vectorized already - exit. 4406 if (StartIdx >= VL.size()) 4407 break; 4408 // Found vectorizable parts - exit. 4409 if (!VectorizedLoads.empty()) 4410 break; 4411 } 4412 if (!VectorizedLoads.empty()) { 4413 InstructionCost GatherCost = 0; 4414 // Get the cost for gathered loads. 4415 for (unsigned I = 0, End = VL.size(); I < End; I += VF) { 4416 if (VectorizedLoads.contains(VL[I])) 4417 continue; 4418 GatherCost += getGatherCost(VL.slice(I, VF)); 4419 } 4420 // The cost for vectorized loads. 4421 InstructionCost ScalarsCost = 0; 4422 for (Value *V : VectorizedLoads) { 4423 auto *LI = cast<LoadInst>(V); 4424 ScalarsCost += TTI->getMemoryOpCost( 4425 Instruction::Load, LI->getType(), LI->getAlign(), 4426 LI->getPointerAddressSpace(), CostKind, LI); 4427 } 4428 auto *LI = cast<LoadInst>(E->getMainOp()); 4429 auto *LoadTy = FixedVectorType::get(LI->getType(), VF); 4430 Align Alignment = LI->getAlign(); 4431 GatherCost += 4432 VectorizedCnt * 4433 TTI->getMemoryOpCost(Instruction::Load, LoadTy, Alignment, 4434 LI->getPointerAddressSpace(), CostKind, LI); 4435 GatherCost += ScatterVectorizeCnt * 4436 TTI->getGatherScatterOpCost( 4437 Instruction::Load, LoadTy, LI->getPointerOperand(), 4438 /*VariableMask=*/false, Alignment, CostKind, LI); 4439 // Add the cost for the subvectors shuffling. 4440 GatherCost += ((VL.size() - VF) / VF) * 4441 TTI->getShuffleCost(TTI::SK_Select, VecTy); 4442 return ReuseShuffleCost + GatherCost - ScalarsCost; 4443 } 4444 } 4445 return ReuseShuffleCost + getGatherCost(VL); 4446 } 4447 InstructionCost CommonCost = 0; 4448 SmallVector<int> Mask; 4449 if (!E->ReorderIndices.empty()) { 4450 SmallVector<int> NewMask; 4451 if (E->getOpcode() == Instruction::Store) { 4452 // For stores the order is actually a mask. 4453 NewMask.resize(E->ReorderIndices.size()); 4454 copy(E->ReorderIndices, NewMask.begin()); 4455 } else { 4456 inversePermutation(E->ReorderIndices, NewMask); 4457 } 4458 ::addMask(Mask, NewMask); 4459 } 4460 if (NeedToShuffleReuses) 4461 ::addMask(Mask, E->ReuseShuffleIndices); 4462 if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask)) 4463 CommonCost = 4464 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask); 4465 assert((E->State == TreeEntry::Vectorize || 4466 E->State == TreeEntry::ScatterVectorize) && 4467 "Unhandled state"); 4468 assert(E->getOpcode() && allSameType(VL) && allSameBlock(VL) && "Invalid VL"); 4469 Instruction *VL0 = E->getMainOp(); 4470 unsigned ShuffleOrOp = 4471 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 4472 switch (ShuffleOrOp) { 4473 case Instruction::PHI: 4474 return 0; 4475 4476 case Instruction::ExtractValue: 4477 case Instruction::ExtractElement: { 4478 // The common cost of removal ExtractElement/ExtractValue instructions + 4479 // the cost of shuffles, if required to resuffle the original vector. 4480 if (NeedToShuffleReuses) { 4481 unsigned Idx = 0; 4482 for (unsigned I : E->ReuseShuffleIndices) { 4483 if (ShuffleOrOp == Instruction::ExtractElement) { 4484 auto *EE = cast<ExtractElementInst>(VL[I]); 4485 CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement, 4486 EE->getVectorOperandType(), 4487 *getExtractIndex(EE)); 4488 } else { 4489 CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement, 4490 VecTy, Idx); 4491 ++Idx; 4492 } 4493 } 4494 Idx = ReuseShuffleNumbers; 4495 for (Value *V : VL) { 4496 if (ShuffleOrOp == Instruction::ExtractElement) { 4497 auto *EE = cast<ExtractElementInst>(V); 4498 CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement, 4499 EE->getVectorOperandType(), 4500 *getExtractIndex(EE)); 4501 } else { 4502 --Idx; 4503 CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement, 4504 VecTy, Idx); 4505 } 4506 } 4507 } 4508 if (ShuffleOrOp == Instruction::ExtractValue) { 4509 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 4510 auto *EI = cast<Instruction>(VL[I]); 4511 // Take credit for instruction that will become dead. 4512 if (EI->hasOneUse()) { 4513 Instruction *Ext = EI->user_back(); 4514 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 4515 all_of(Ext->users(), 4516 [](User *U) { return isa<GetElementPtrInst>(U); })) { 4517 // Use getExtractWithExtendCost() to calculate the cost of 4518 // extractelement/ext pair. 4519 CommonCost -= TTI->getExtractWithExtendCost( 4520 Ext->getOpcode(), Ext->getType(), VecTy, I); 4521 // Add back the cost of s|zext which is subtracted separately. 4522 CommonCost += TTI->getCastInstrCost( 4523 Ext->getOpcode(), Ext->getType(), EI->getType(), 4524 TTI::getCastContextHint(Ext), CostKind, Ext); 4525 continue; 4526 } 4527 } 4528 CommonCost -= 4529 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, I); 4530 } 4531 } else { 4532 AdjustExtractsCost(CommonCost, /*IsGather=*/false); 4533 } 4534 return CommonCost; 4535 } 4536 case Instruction::InsertElement: { 4537 assert(E->ReuseShuffleIndices.empty() && 4538 "Unique insertelements only are expected."); 4539 auto *SrcVecTy = cast<FixedVectorType>(VL0->getType()); 4540 4541 unsigned const NumElts = SrcVecTy->getNumElements(); 4542 unsigned const NumScalars = VL.size(); 4543 APInt DemandedElts = APInt::getZero(NumElts); 4544 // TODO: Add support for Instruction::InsertValue. 4545 SmallVector<int> Mask; 4546 if (!E->ReorderIndices.empty()) { 4547 inversePermutation(E->ReorderIndices, Mask); 4548 Mask.append(NumElts - NumScalars, UndefMaskElem); 4549 } else { 4550 Mask.assign(NumElts, UndefMaskElem); 4551 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); 4552 } 4553 unsigned Offset = *getInsertIndex(VL0, 0); 4554 bool IsIdentity = true; 4555 SmallVector<int> PrevMask(NumElts, UndefMaskElem); 4556 Mask.swap(PrevMask); 4557 for (unsigned I = 0; I < NumScalars; ++I) { 4558 Optional<int> InsertIdx = getInsertIndex(VL[PrevMask[I]], 0); 4559 if (!InsertIdx || *InsertIdx == UndefMaskElem) 4560 continue; 4561 DemandedElts.setBit(*InsertIdx); 4562 IsIdentity &= *InsertIdx - Offset == I; 4563 Mask[*InsertIdx - Offset] = I; 4564 } 4565 assert(Offset < NumElts && "Failed to find vector index offset"); 4566 4567 InstructionCost Cost = 0; 4568 Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts, 4569 /*Insert*/ true, /*Extract*/ false); 4570 4571 if (IsIdentity && NumElts != NumScalars && Offset % NumScalars != 0) { 4572 // FIXME: Replace with SK_InsertSubvector once it is properly supported. 4573 unsigned Sz = PowerOf2Ceil(Offset + NumScalars); 4574 Cost += TTI->getShuffleCost( 4575 TargetTransformInfo::SK_PermuteSingleSrc, 4576 FixedVectorType::get(SrcVecTy->getElementType(), Sz)); 4577 } else if (!IsIdentity) { 4578 auto *FirstInsert = 4579 cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 4580 return !is_contained(E->Scalars, 4581 cast<Instruction>(V)->getOperand(0)); 4582 })); 4583 if (isa<UndefValue>(FirstInsert->getOperand(0))) { 4584 Cost += TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, SrcVecTy, Mask); 4585 } else { 4586 SmallVector<int> InsertMask(NumElts); 4587 std::iota(InsertMask.begin(), InsertMask.end(), 0); 4588 for (unsigned I = 0; I < NumElts; I++) { 4589 if (Mask[I] != UndefMaskElem) 4590 InsertMask[Offset + I] = NumElts + I; 4591 } 4592 Cost += 4593 TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVecTy, InsertMask); 4594 } 4595 } 4596 4597 return Cost; 4598 } 4599 case Instruction::ZExt: 4600 case Instruction::SExt: 4601 case Instruction::FPToUI: 4602 case Instruction::FPToSI: 4603 case Instruction::FPExt: 4604 case Instruction::PtrToInt: 4605 case Instruction::IntToPtr: 4606 case Instruction::SIToFP: 4607 case Instruction::UIToFP: 4608 case Instruction::Trunc: 4609 case Instruction::FPTrunc: 4610 case Instruction::BitCast: { 4611 Type *SrcTy = VL0->getOperand(0)->getType(); 4612 InstructionCost ScalarEltCost = 4613 TTI->getCastInstrCost(E->getOpcode(), ScalarTy, SrcTy, 4614 TTI::getCastContextHint(VL0), CostKind, VL0); 4615 if (NeedToShuffleReuses) { 4616 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4617 } 4618 4619 // Calculate the cost of this instruction. 4620 InstructionCost ScalarCost = VL.size() * ScalarEltCost; 4621 4622 auto *SrcVecTy = FixedVectorType::get(SrcTy, VL.size()); 4623 InstructionCost VecCost = 0; 4624 // Check if the values are candidates to demote. 4625 if (!MinBWs.count(VL0) || VecTy != SrcVecTy) { 4626 VecCost = CommonCost + TTI->getCastInstrCost( 4627 E->getOpcode(), VecTy, SrcVecTy, 4628 TTI::getCastContextHint(VL0), CostKind, VL0); 4629 } 4630 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 4631 return VecCost - ScalarCost; 4632 } 4633 case Instruction::FCmp: 4634 case Instruction::ICmp: 4635 case Instruction::Select: { 4636 // Calculate the cost of this instruction. 4637 InstructionCost ScalarEltCost = 4638 TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, Builder.getInt1Ty(), 4639 CmpInst::BAD_ICMP_PREDICATE, CostKind, VL0); 4640 if (NeedToShuffleReuses) { 4641 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4642 } 4643 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); 4644 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 4645 4646 // Check if all entries in VL are either compares or selects with compares 4647 // as condition that have the same predicates. 4648 CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE; 4649 bool First = true; 4650 for (auto *V : VL) { 4651 CmpInst::Predicate CurrentPred; 4652 auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value()); 4653 if ((!match(V, m_Select(MatchCmp, m_Value(), m_Value())) && 4654 !match(V, MatchCmp)) || 4655 (!First && VecPred != CurrentPred)) { 4656 VecPred = CmpInst::BAD_ICMP_PREDICATE; 4657 break; 4658 } 4659 First = false; 4660 VecPred = CurrentPred; 4661 } 4662 4663 InstructionCost VecCost = TTI->getCmpSelInstrCost( 4664 E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0); 4665 // Check if it is possible and profitable to use min/max for selects in 4666 // VL. 4667 // 4668 auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL); 4669 if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) { 4670 IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy, 4671 {VecTy, VecTy}); 4672 InstructionCost IntrinsicCost = 4673 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 4674 // If the selects are the only uses of the compares, they will be dead 4675 // and we can adjust the cost by removing their cost. 4676 if (IntrinsicAndUse.second) 4677 IntrinsicCost -= 4678 TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy, MaskTy, 4679 CmpInst::BAD_ICMP_PREDICATE, CostKind); 4680 VecCost = std::min(VecCost, IntrinsicCost); 4681 } 4682 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 4683 return CommonCost + VecCost - ScalarCost; 4684 } 4685 case Instruction::FNeg: 4686 case Instruction::Add: 4687 case Instruction::FAdd: 4688 case Instruction::Sub: 4689 case Instruction::FSub: 4690 case Instruction::Mul: 4691 case Instruction::FMul: 4692 case Instruction::UDiv: 4693 case Instruction::SDiv: 4694 case Instruction::FDiv: 4695 case Instruction::URem: 4696 case Instruction::SRem: 4697 case Instruction::FRem: 4698 case Instruction::Shl: 4699 case Instruction::LShr: 4700 case Instruction::AShr: 4701 case Instruction::And: 4702 case Instruction::Or: 4703 case Instruction::Xor: { 4704 // Certain instructions can be cheaper to vectorize if they have a 4705 // constant second vector operand. 4706 TargetTransformInfo::OperandValueKind Op1VK = 4707 TargetTransformInfo::OK_AnyValue; 4708 TargetTransformInfo::OperandValueKind Op2VK = 4709 TargetTransformInfo::OK_UniformConstantValue; 4710 TargetTransformInfo::OperandValueProperties Op1VP = 4711 TargetTransformInfo::OP_None; 4712 TargetTransformInfo::OperandValueProperties Op2VP = 4713 TargetTransformInfo::OP_PowerOf2; 4714 4715 // If all operands are exactly the same ConstantInt then set the 4716 // operand kind to OK_UniformConstantValue. 4717 // If instead not all operands are constants, then set the operand kind 4718 // to OK_AnyValue. If all operands are constants but not the same, 4719 // then set the operand kind to OK_NonUniformConstantValue. 4720 ConstantInt *CInt0 = nullptr; 4721 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 4722 const Instruction *I = cast<Instruction>(VL[i]); 4723 unsigned OpIdx = isa<BinaryOperator>(I) ? 1 : 0; 4724 ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(OpIdx)); 4725 if (!CInt) { 4726 Op2VK = TargetTransformInfo::OK_AnyValue; 4727 Op2VP = TargetTransformInfo::OP_None; 4728 break; 4729 } 4730 if (Op2VP == TargetTransformInfo::OP_PowerOf2 && 4731 !CInt->getValue().isPowerOf2()) 4732 Op2VP = TargetTransformInfo::OP_None; 4733 if (i == 0) { 4734 CInt0 = CInt; 4735 continue; 4736 } 4737 if (CInt0 != CInt) 4738 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 4739 } 4740 4741 SmallVector<const Value *, 4> Operands(VL0->operand_values()); 4742 InstructionCost ScalarEltCost = 4743 TTI->getArithmeticInstrCost(E->getOpcode(), ScalarTy, CostKind, Op1VK, 4744 Op2VK, Op1VP, Op2VP, Operands, VL0); 4745 if (NeedToShuffleReuses) { 4746 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4747 } 4748 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 4749 InstructionCost VecCost = 4750 TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind, Op1VK, 4751 Op2VK, Op1VP, Op2VP, Operands, VL0); 4752 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 4753 return CommonCost + VecCost - ScalarCost; 4754 } 4755 case Instruction::GetElementPtr: { 4756 TargetTransformInfo::OperandValueKind Op1VK = 4757 TargetTransformInfo::OK_AnyValue; 4758 TargetTransformInfo::OperandValueKind Op2VK = 4759 TargetTransformInfo::OK_UniformConstantValue; 4760 4761 InstructionCost ScalarEltCost = TTI->getArithmeticInstrCost( 4762 Instruction::Add, ScalarTy, CostKind, Op1VK, Op2VK); 4763 if (NeedToShuffleReuses) { 4764 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4765 } 4766 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 4767 InstructionCost VecCost = TTI->getArithmeticInstrCost( 4768 Instruction::Add, VecTy, CostKind, Op1VK, Op2VK); 4769 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 4770 return CommonCost + VecCost - ScalarCost; 4771 } 4772 case Instruction::Load: { 4773 // Cost of wide load - cost of scalar loads. 4774 Align Alignment = cast<LoadInst>(VL0)->getAlign(); 4775 InstructionCost ScalarEltCost = TTI->getMemoryOpCost( 4776 Instruction::Load, ScalarTy, Alignment, 0, CostKind, VL0); 4777 if (NeedToShuffleReuses) { 4778 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4779 } 4780 InstructionCost ScalarLdCost = VecTy->getNumElements() * ScalarEltCost; 4781 InstructionCost VecLdCost; 4782 if (E->State == TreeEntry::Vectorize) { 4783 VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, Alignment, 0, 4784 CostKind, VL0); 4785 } else { 4786 assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState"); 4787 Align CommonAlignment = Alignment; 4788 for (Value *V : VL) 4789 CommonAlignment = 4790 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 4791 VecLdCost = TTI->getGatherScatterOpCost( 4792 Instruction::Load, VecTy, cast<LoadInst>(VL0)->getPointerOperand(), 4793 /*VariableMask=*/false, CommonAlignment, CostKind, VL0); 4794 } 4795 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecLdCost, ScalarLdCost)); 4796 return CommonCost + VecLdCost - ScalarLdCost; 4797 } 4798 case Instruction::Store: { 4799 // We know that we can merge the stores. Calculate the cost. 4800 bool IsReorder = !E->ReorderIndices.empty(); 4801 auto *SI = 4802 cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0); 4803 Align Alignment = SI->getAlign(); 4804 InstructionCost ScalarEltCost = TTI->getMemoryOpCost( 4805 Instruction::Store, ScalarTy, Alignment, 0, CostKind, VL0); 4806 InstructionCost ScalarStCost = VecTy->getNumElements() * ScalarEltCost; 4807 InstructionCost VecStCost = TTI->getMemoryOpCost( 4808 Instruction::Store, VecTy, Alignment, 0, CostKind, VL0); 4809 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecStCost, ScalarStCost)); 4810 return CommonCost + VecStCost - ScalarStCost; 4811 } 4812 case Instruction::Call: { 4813 CallInst *CI = cast<CallInst>(VL0); 4814 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4815 4816 // Calculate the cost of the scalar and vector calls. 4817 IntrinsicCostAttributes CostAttrs(ID, *CI, 1); 4818 InstructionCost ScalarEltCost = 4819 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 4820 if (NeedToShuffleReuses) { 4821 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4822 } 4823 InstructionCost ScalarCallCost = VecTy->getNumElements() * ScalarEltCost; 4824 4825 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 4826 InstructionCost VecCallCost = 4827 std::min(VecCallCosts.first, VecCallCosts.second); 4828 4829 LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost 4830 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 4831 << " for " << *CI << "\n"); 4832 4833 return CommonCost + VecCallCost - ScalarCallCost; 4834 } 4835 case Instruction::ShuffleVector: { 4836 assert(E->isAltShuffle() && 4837 ((Instruction::isBinaryOp(E->getOpcode()) && 4838 Instruction::isBinaryOp(E->getAltOpcode())) || 4839 (Instruction::isCast(E->getOpcode()) && 4840 Instruction::isCast(E->getAltOpcode()))) && 4841 "Invalid Shuffle Vector Operand"); 4842 InstructionCost ScalarCost = 0; 4843 if (NeedToShuffleReuses) { 4844 for (unsigned Idx : E->ReuseShuffleIndices) { 4845 Instruction *I = cast<Instruction>(VL[Idx]); 4846 CommonCost -= TTI->getInstructionCost(I, CostKind); 4847 } 4848 for (Value *V : VL) { 4849 Instruction *I = cast<Instruction>(V); 4850 CommonCost += TTI->getInstructionCost(I, CostKind); 4851 } 4852 } 4853 for (Value *V : VL) { 4854 Instruction *I = cast<Instruction>(V); 4855 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 4856 ScalarCost += TTI->getInstructionCost(I, CostKind); 4857 } 4858 // VecCost is equal to sum of the cost of creating 2 vectors 4859 // and the cost of creating shuffle. 4860 InstructionCost VecCost = 0; 4861 if (Instruction::isBinaryOp(E->getOpcode())) { 4862 VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind); 4863 VecCost += TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy, 4864 CostKind); 4865 } else { 4866 Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType(); 4867 Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType(); 4868 auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size()); 4869 auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size()); 4870 VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty, 4871 TTI::CastContextHint::None, CostKind); 4872 VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty, 4873 TTI::CastContextHint::None, CostKind); 4874 } 4875 4876 SmallVector<int> Mask; 4877 buildSuffleEntryMask( 4878 E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices, 4879 [E](Instruction *I) { 4880 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 4881 return I->getOpcode() == E->getAltOpcode(); 4882 }, 4883 Mask); 4884 CommonCost = 4885 TTI->getShuffleCost(TargetTransformInfo::SK_Select, FinalVecTy, Mask); 4886 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 4887 return CommonCost + VecCost - ScalarCost; 4888 } 4889 default: 4890 llvm_unreachable("Unknown instruction"); 4891 } 4892 } 4893 4894 bool BoUpSLP::isFullyVectorizableTinyTree() const { 4895 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height " 4896 << VectorizableTree.size() << " is fully vectorizable .\n"); 4897 4898 // We only handle trees of heights 1 and 2. 4899 if (VectorizableTree.size() == 1 && 4900 VectorizableTree[0]->State == TreeEntry::Vectorize) 4901 return true; 4902 4903 if (VectorizableTree.size() != 2) 4904 return false; 4905 4906 // Handle splat and all-constants stores. Also try to vectorize tiny trees 4907 // with the second gather nodes if they have less scalar operands rather than 4908 // the initial tree element (may be profitable to shuffle the second gather) 4909 // or they are extractelements, which form shuffle. 4910 SmallVector<int> Mask; 4911 if (VectorizableTree[0]->State == TreeEntry::Vectorize && 4912 (allConstant(VectorizableTree[1]->Scalars) || 4913 isSplat(VectorizableTree[1]->Scalars) || 4914 (VectorizableTree[1]->State == TreeEntry::NeedToGather && 4915 VectorizableTree[1]->Scalars.size() < 4916 VectorizableTree[0]->Scalars.size()) || 4917 (VectorizableTree[1]->State == TreeEntry::NeedToGather && 4918 VectorizableTree[1]->getOpcode() == Instruction::ExtractElement && 4919 isFixedVectorShuffle(VectorizableTree[1]->Scalars, Mask)))) 4920 return true; 4921 4922 // Gathering cost would be too much for tiny trees. 4923 if (VectorizableTree[0]->State == TreeEntry::NeedToGather || 4924 VectorizableTree[1]->State == TreeEntry::NeedToGather) 4925 return false; 4926 4927 return true; 4928 } 4929 4930 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts, 4931 TargetTransformInfo *TTI, 4932 bool MustMatchOrInst) { 4933 // Look past the root to find a source value. Arbitrarily follow the 4934 // path through operand 0 of any 'or'. Also, peek through optional 4935 // shift-left-by-multiple-of-8-bits. 4936 Value *ZextLoad = Root; 4937 const APInt *ShAmtC; 4938 bool FoundOr = false; 4939 while (!isa<ConstantExpr>(ZextLoad) && 4940 (match(ZextLoad, m_Or(m_Value(), m_Value())) || 4941 (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) && 4942 ShAmtC->urem(8) == 0))) { 4943 auto *BinOp = cast<BinaryOperator>(ZextLoad); 4944 ZextLoad = BinOp->getOperand(0); 4945 if (BinOp->getOpcode() == Instruction::Or) 4946 FoundOr = true; 4947 } 4948 // Check if the input is an extended load of the required or/shift expression. 4949 Value *LoadPtr; 4950 if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root || 4951 !match(ZextLoad, m_ZExt(m_Load(m_Value(LoadPtr))))) 4952 return false; 4953 4954 // Require that the total load bit width is a legal integer type. 4955 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target. 4956 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it. 4957 Type *SrcTy = LoadPtr->getType()->getPointerElementType(); 4958 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts; 4959 if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth))) 4960 return false; 4961 4962 // Everything matched - assume that we can fold the whole sequence using 4963 // load combining. 4964 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at " 4965 << *(cast<Instruction>(Root)) << "\n"); 4966 4967 return true; 4968 } 4969 4970 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const { 4971 if (RdxKind != RecurKind::Or) 4972 return false; 4973 4974 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 4975 Value *FirstReduced = VectorizableTree[0]->Scalars[0]; 4976 return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI, 4977 /* MatchOr */ false); 4978 } 4979 4980 bool BoUpSLP::isLoadCombineCandidate() const { 4981 // Peek through a final sequence of stores and check if all operations are 4982 // likely to be load-combined. 4983 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 4984 for (Value *Scalar : VectorizableTree[0]->Scalars) { 4985 Value *X; 4986 if (!match(Scalar, m_Store(m_Value(X), m_Value())) || 4987 !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true)) 4988 return false; 4989 } 4990 return true; 4991 } 4992 4993 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() const { 4994 // No need to vectorize inserts of gathered values. 4995 if (VectorizableTree.size() == 2 && 4996 isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) && 4997 VectorizableTree[1]->State == TreeEntry::NeedToGather) 4998 return true; 4999 5000 // We can vectorize the tree if its size is greater than or equal to the 5001 // minimum size specified by the MinTreeSize command line option. 5002 if (VectorizableTree.size() >= MinTreeSize) 5003 return false; 5004 5005 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 5006 // can vectorize it if we can prove it fully vectorizable. 5007 if (isFullyVectorizableTinyTree()) 5008 return false; 5009 5010 assert(VectorizableTree.empty() 5011 ? ExternalUses.empty() 5012 : true && "We shouldn't have any external users"); 5013 5014 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 5015 // vectorizable. 5016 return true; 5017 } 5018 5019 InstructionCost BoUpSLP::getSpillCost() const { 5020 // Walk from the bottom of the tree to the top, tracking which values are 5021 // live. When we see a call instruction that is not part of our tree, 5022 // query TTI to see if there is a cost to keeping values live over it 5023 // (for example, if spills and fills are required). 5024 unsigned BundleWidth = VectorizableTree.front()->Scalars.size(); 5025 InstructionCost Cost = 0; 5026 5027 SmallPtrSet<Instruction*, 4> LiveValues; 5028 Instruction *PrevInst = nullptr; 5029 5030 // The entries in VectorizableTree are not necessarily ordered by their 5031 // position in basic blocks. Collect them and order them by dominance so later 5032 // instructions are guaranteed to be visited first. For instructions in 5033 // different basic blocks, we only scan to the beginning of the block, so 5034 // their order does not matter, as long as all instructions in a basic block 5035 // are grouped together. Using dominance ensures a deterministic order. 5036 SmallVector<Instruction *, 16> OrderedScalars; 5037 for (const auto &TEPtr : VectorizableTree) { 5038 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]); 5039 if (!Inst) 5040 continue; 5041 OrderedScalars.push_back(Inst); 5042 } 5043 llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) { 5044 auto *NodeA = DT->getNode(A->getParent()); 5045 auto *NodeB = DT->getNode(B->getParent()); 5046 assert(NodeA && "Should only process reachable instructions"); 5047 assert(NodeB && "Should only process reachable instructions"); 5048 assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 5049 "Different nodes should have different DFS numbers"); 5050 if (NodeA != NodeB) 5051 return NodeA->getDFSNumIn() < NodeB->getDFSNumIn(); 5052 return B->comesBefore(A); 5053 }); 5054 5055 for (Instruction *Inst : OrderedScalars) { 5056 if (!PrevInst) { 5057 PrevInst = Inst; 5058 continue; 5059 } 5060 5061 // Update LiveValues. 5062 LiveValues.erase(PrevInst); 5063 for (auto &J : PrevInst->operands()) { 5064 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 5065 LiveValues.insert(cast<Instruction>(&*J)); 5066 } 5067 5068 LLVM_DEBUG({ 5069 dbgs() << "SLP: #LV: " << LiveValues.size(); 5070 for (auto *X : LiveValues) 5071 dbgs() << " " << X->getName(); 5072 dbgs() << ", Looking at "; 5073 Inst->dump(); 5074 }); 5075 5076 // Now find the sequence of instructions between PrevInst and Inst. 5077 unsigned NumCalls = 0; 5078 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 5079 PrevInstIt = 5080 PrevInst->getIterator().getReverse(); 5081 while (InstIt != PrevInstIt) { 5082 if (PrevInstIt == PrevInst->getParent()->rend()) { 5083 PrevInstIt = Inst->getParent()->rbegin(); 5084 continue; 5085 } 5086 5087 // Debug information does not impact spill cost. 5088 if ((isa<CallInst>(&*PrevInstIt) && 5089 !isa<DbgInfoIntrinsic>(&*PrevInstIt)) && 5090 &*PrevInstIt != PrevInst) 5091 NumCalls++; 5092 5093 ++PrevInstIt; 5094 } 5095 5096 if (NumCalls) { 5097 SmallVector<Type*, 4> V; 5098 for (auto *II : LiveValues) { 5099 auto *ScalarTy = II->getType(); 5100 if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy)) 5101 ScalarTy = VectorTy->getElementType(); 5102 V.push_back(FixedVectorType::get(ScalarTy, BundleWidth)); 5103 } 5104 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V); 5105 } 5106 5107 PrevInst = Inst; 5108 } 5109 5110 return Cost; 5111 } 5112 5113 InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) { 5114 InstructionCost Cost = 0; 5115 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size " 5116 << VectorizableTree.size() << ".\n"); 5117 5118 unsigned BundleWidth = VectorizableTree[0]->Scalars.size(); 5119 5120 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) { 5121 TreeEntry &TE = *VectorizableTree[I].get(); 5122 5123 InstructionCost C = getEntryCost(&TE, VectorizedVals); 5124 Cost += C; 5125 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 5126 << " for bundle that starts with " << *TE.Scalars[0] 5127 << ".\n" 5128 << "SLP: Current total cost = " << Cost << "\n"); 5129 } 5130 5131 SmallPtrSet<Value *, 16> ExtractCostCalculated; 5132 InstructionCost ExtractCost = 0; 5133 SmallVector<unsigned> VF; 5134 SmallVector<SmallVector<int>> ShuffleMask; 5135 SmallVector<Value *> FirstUsers; 5136 SmallVector<APInt> DemandedElts; 5137 for (ExternalUser &EU : ExternalUses) { 5138 // We only add extract cost once for the same scalar. 5139 if (!ExtractCostCalculated.insert(EU.Scalar).second) 5140 continue; 5141 5142 // Uses by ephemeral values are free (because the ephemeral value will be 5143 // removed prior to code generation, and so the extraction will be 5144 // removed as well). 5145 if (EphValues.count(EU.User)) 5146 continue; 5147 5148 // No extract cost for vector "scalar" 5149 if (isa<FixedVectorType>(EU.Scalar->getType())) 5150 continue; 5151 5152 // Already counted the cost for external uses when tried to adjust the cost 5153 // for extractelements, no need to add it again. 5154 if (isa<ExtractElementInst>(EU.Scalar)) 5155 continue; 5156 5157 // If found user is an insertelement, do not calculate extract cost but try 5158 // to detect it as a final shuffled/identity match. 5159 if (EU.User && isa<InsertElementInst>(EU.User)) { 5160 if (auto *FTy = dyn_cast<FixedVectorType>(EU.User->getType())) { 5161 Optional<int> InsertIdx = getInsertIndex(EU.User, 0); 5162 if (!InsertIdx || *InsertIdx == UndefMaskElem) 5163 continue; 5164 Value *VU = EU.User; 5165 auto *It = find_if(FirstUsers, [VU](Value *V) { 5166 // Checks if 2 insertelements are from the same buildvector. 5167 if (VU->getType() != V->getType()) 5168 return false; 5169 auto *IE1 = cast<InsertElementInst>(VU); 5170 auto *IE2 = cast<InsertElementInst>(V); 5171 // Go though of insertelement instructions trying to find either VU as 5172 // the original vector for IE2 or V as the original vector for IE1. 5173 do { 5174 if (IE1 == VU || IE2 == V) 5175 return true; 5176 if (IE1) 5177 IE1 = dyn_cast<InsertElementInst>(IE1->getOperand(0)); 5178 if (IE2) 5179 IE2 = dyn_cast<InsertElementInst>(IE2->getOperand(0)); 5180 } while (IE1 || IE2); 5181 return false; 5182 }); 5183 int VecId = -1; 5184 if (It == FirstUsers.end()) { 5185 VF.push_back(FTy->getNumElements()); 5186 ShuffleMask.emplace_back(VF.back(), UndefMaskElem); 5187 FirstUsers.push_back(EU.User); 5188 DemandedElts.push_back(APInt::getZero(VF.back())); 5189 VecId = FirstUsers.size() - 1; 5190 } else { 5191 VecId = std::distance(FirstUsers.begin(), It); 5192 } 5193 int Idx = *InsertIdx; 5194 ShuffleMask[VecId][Idx] = EU.Lane; 5195 DemandedElts[VecId].setBit(Idx); 5196 } 5197 } 5198 5199 // If we plan to rewrite the tree in a smaller type, we will need to sign 5200 // extend the extracted value back to the original type. Here, we account 5201 // for the extract and the added cost of the sign extend if needed. 5202 auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth); 5203 auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; 5204 if (MinBWs.count(ScalarRoot)) { 5205 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 5206 auto Extend = 5207 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; 5208 VecTy = FixedVectorType::get(MinTy, BundleWidth); 5209 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 5210 VecTy, EU.Lane); 5211 } else { 5212 ExtractCost += 5213 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); 5214 } 5215 } 5216 5217 InstructionCost SpillCost = getSpillCost(); 5218 Cost += SpillCost + ExtractCost; 5219 for (int I = 0, E = FirstUsers.size(); I < E; ++I) { 5220 // For the very first element - simple shuffle of the source vector. 5221 int Limit = ShuffleMask[I].size() * 2; 5222 if (I == 0 && 5223 all_of(ShuffleMask[I], [Limit](int Idx) { return Idx < Limit; }) && 5224 !ShuffleVectorInst::isIdentityMask(ShuffleMask[I])) { 5225 InstructionCost C = TTI->getShuffleCost( 5226 TTI::SK_PermuteSingleSrc, 5227 cast<FixedVectorType>(FirstUsers[I]->getType()), ShuffleMask[I]); 5228 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 5229 << " for final shuffle of insertelement external users " 5230 << *VectorizableTree.front()->Scalars.front() << ".\n" 5231 << "SLP: Current total cost = " << Cost << "\n"); 5232 Cost += C; 5233 continue; 5234 } 5235 // Other elements - permutation of 2 vectors (the initial one and the next 5236 // Ith incoming vector). 5237 unsigned VF = ShuffleMask[I].size(); 5238 for (unsigned Idx = 0; Idx < VF; ++Idx) { 5239 int &Mask = ShuffleMask[I][Idx]; 5240 Mask = Mask == UndefMaskElem ? Idx : VF + Mask; 5241 } 5242 InstructionCost C = TTI->getShuffleCost( 5243 TTI::SK_PermuteTwoSrc, cast<FixedVectorType>(FirstUsers[I]->getType()), 5244 ShuffleMask[I]); 5245 LLVM_DEBUG( 5246 dbgs() 5247 << "SLP: Adding cost " << C 5248 << " for final shuffle of vector node and external insertelement users " 5249 << *VectorizableTree.front()->Scalars.front() << ".\n" 5250 << "SLP: Current total cost = " << Cost << "\n"); 5251 Cost += C; 5252 InstructionCost InsertCost = TTI->getScalarizationOverhead( 5253 cast<FixedVectorType>(FirstUsers[I]->getType()), DemandedElts[I], 5254 /*Insert*/ true, 5255 /*Extract*/ false); 5256 Cost -= InsertCost; 5257 LLVM_DEBUG(dbgs() << "SLP: subtracting the cost " << InsertCost 5258 << " for insertelements gather.\n" 5259 << "SLP: Current total cost = " << Cost << "\n"); 5260 } 5261 5262 #ifndef NDEBUG 5263 SmallString<256> Str; 5264 { 5265 raw_svector_ostream OS(Str); 5266 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 5267 << "SLP: Extract Cost = " << ExtractCost << ".\n" 5268 << "SLP: Total Cost = " << Cost << ".\n"; 5269 } 5270 LLVM_DEBUG(dbgs() << Str); 5271 if (ViewSLPTree) 5272 ViewGraph(this, "SLP" + F->getName(), false, Str); 5273 #endif 5274 5275 return Cost; 5276 } 5277 5278 Optional<TargetTransformInfo::ShuffleKind> 5279 BoUpSLP::isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask, 5280 SmallVectorImpl<const TreeEntry *> &Entries) { 5281 // TODO: currently checking only for Scalars in the tree entry, need to count 5282 // reused elements too for better cost estimation. 5283 Mask.assign(TE->Scalars.size(), UndefMaskElem); 5284 Entries.clear(); 5285 // Build a lists of values to tree entries. 5286 DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>> ValueToTEs; 5287 for (const std::unique_ptr<TreeEntry> &EntryPtr : VectorizableTree) { 5288 if (EntryPtr.get() == TE) 5289 break; 5290 if (EntryPtr->State != TreeEntry::NeedToGather) 5291 continue; 5292 for (Value *V : EntryPtr->Scalars) 5293 ValueToTEs.try_emplace(V).first->getSecond().insert(EntryPtr.get()); 5294 } 5295 // Find all tree entries used by the gathered values. If no common entries 5296 // found - not a shuffle. 5297 // Here we build a set of tree nodes for each gathered value and trying to 5298 // find the intersection between these sets. If we have at least one common 5299 // tree node for each gathered value - we have just a permutation of the 5300 // single vector. If we have 2 different sets, we're in situation where we 5301 // have a permutation of 2 input vectors. 5302 SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs; 5303 DenseMap<Value *, int> UsedValuesEntry; 5304 for (Value *V : TE->Scalars) { 5305 if (isa<UndefValue>(V)) 5306 continue; 5307 // Build a list of tree entries where V is used. 5308 SmallPtrSet<const TreeEntry *, 4> VToTEs; 5309 auto It = ValueToTEs.find(V); 5310 if (It != ValueToTEs.end()) 5311 VToTEs = It->second; 5312 if (const TreeEntry *VTE = getTreeEntry(V)) 5313 VToTEs.insert(VTE); 5314 if (VToTEs.empty()) 5315 return None; 5316 if (UsedTEs.empty()) { 5317 // The first iteration, just insert the list of nodes to vector. 5318 UsedTEs.push_back(VToTEs); 5319 } else { 5320 // Need to check if there are any previously used tree nodes which use V. 5321 // If there are no such nodes, consider that we have another one input 5322 // vector. 5323 SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs); 5324 unsigned Idx = 0; 5325 for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) { 5326 // Do we have a non-empty intersection of previously listed tree entries 5327 // and tree entries using current V? 5328 set_intersect(VToTEs, Set); 5329 if (!VToTEs.empty()) { 5330 // Yes, write the new subset and continue analysis for the next 5331 // scalar. 5332 Set.swap(VToTEs); 5333 break; 5334 } 5335 VToTEs = SavedVToTEs; 5336 ++Idx; 5337 } 5338 // No non-empty intersection found - need to add a second set of possible 5339 // source vectors. 5340 if (Idx == UsedTEs.size()) { 5341 // If the number of input vectors is greater than 2 - not a permutation, 5342 // fallback to the regular gather. 5343 if (UsedTEs.size() == 2) 5344 return None; 5345 UsedTEs.push_back(SavedVToTEs); 5346 Idx = UsedTEs.size() - 1; 5347 } 5348 UsedValuesEntry.try_emplace(V, Idx); 5349 } 5350 } 5351 5352 unsigned VF = 0; 5353 if (UsedTEs.size() == 1) { 5354 // Try to find the perfect match in another gather node at first. 5355 auto It = find_if(UsedTEs.front(), [TE](const TreeEntry *EntryPtr) { 5356 return EntryPtr->isSame(TE->Scalars); 5357 }); 5358 if (It != UsedTEs.front().end()) { 5359 Entries.push_back(*It); 5360 std::iota(Mask.begin(), Mask.end(), 0); 5361 return TargetTransformInfo::SK_PermuteSingleSrc; 5362 } 5363 // No perfect match, just shuffle, so choose the first tree node. 5364 Entries.push_back(*UsedTEs.front().begin()); 5365 } else { 5366 // Try to find nodes with the same vector factor. 5367 assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries."); 5368 // FIXME: Shall be replaced by GetVF function once non-power-2 patch is 5369 // landed. 5370 auto &&GetVF = [](const TreeEntry *TE) { 5371 if (!TE->ReuseShuffleIndices.empty()) 5372 return TE->ReuseShuffleIndices.size(); 5373 return TE->Scalars.size(); 5374 }; 5375 DenseMap<int, const TreeEntry *> VFToTE; 5376 for (const TreeEntry *TE : UsedTEs.front()) 5377 VFToTE.try_emplace(GetVF(TE), TE); 5378 for (const TreeEntry *TE : UsedTEs.back()) { 5379 auto It = VFToTE.find(GetVF(TE)); 5380 if (It != VFToTE.end()) { 5381 VF = It->first; 5382 Entries.push_back(It->second); 5383 Entries.push_back(TE); 5384 break; 5385 } 5386 } 5387 // No 2 source vectors with the same vector factor - give up and do regular 5388 // gather. 5389 if (Entries.empty()) 5390 return None; 5391 } 5392 5393 // Build a shuffle mask for better cost estimation and vector emission. 5394 for (int I = 0, E = TE->Scalars.size(); I < E; ++I) { 5395 Value *V = TE->Scalars[I]; 5396 if (isa<UndefValue>(V)) 5397 continue; 5398 unsigned Idx = UsedValuesEntry.lookup(V); 5399 const TreeEntry *VTE = Entries[Idx]; 5400 int FoundLane = VTE->findLaneForValue(V); 5401 Mask[I] = Idx * VF + FoundLane; 5402 // Extra check required by isSingleSourceMaskImpl function (called by 5403 // ShuffleVectorInst::isSingleSourceMask). 5404 if (Mask[I] >= 2 * E) 5405 return None; 5406 } 5407 switch (Entries.size()) { 5408 case 1: 5409 return TargetTransformInfo::SK_PermuteSingleSrc; 5410 case 2: 5411 return TargetTransformInfo::SK_PermuteTwoSrc; 5412 default: 5413 break; 5414 } 5415 return None; 5416 } 5417 5418 InstructionCost 5419 BoUpSLP::getGatherCost(FixedVectorType *Ty, 5420 const DenseSet<unsigned> &ShuffledIndices) const { 5421 unsigned NumElts = Ty->getNumElements(); 5422 APInt DemandedElts = APInt::getZero(NumElts); 5423 for (unsigned I = 0; I < NumElts; ++I) 5424 if (!ShuffledIndices.count(I)) 5425 DemandedElts.setBit(I); 5426 InstructionCost Cost = 5427 TTI->getScalarizationOverhead(Ty, DemandedElts, /*Insert*/ true, 5428 /*Extract*/ false); 5429 if (!ShuffledIndices.empty()) 5430 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty); 5431 return Cost; 5432 } 5433 5434 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const { 5435 // Find the type of the operands in VL. 5436 Type *ScalarTy = VL[0]->getType(); 5437 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 5438 ScalarTy = SI->getValueOperand()->getType(); 5439 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 5440 // Find the cost of inserting/extracting values from the vector. 5441 // Check if the same elements are inserted several times and count them as 5442 // shuffle candidates. 5443 DenseSet<unsigned> ShuffledElements; 5444 DenseSet<Value *> UniqueElements; 5445 // Iterate in reverse order to consider insert elements with the high cost. 5446 for (unsigned I = VL.size(); I > 0; --I) { 5447 unsigned Idx = I - 1; 5448 if (isConstant(VL[Idx])) 5449 continue; 5450 if (!UniqueElements.insert(VL[Idx]).second) 5451 ShuffledElements.insert(Idx); 5452 } 5453 return getGatherCost(VecTy, ShuffledElements); 5454 } 5455 5456 // Perform operand reordering on the instructions in VL and return the reordered 5457 // operands in Left and Right. 5458 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 5459 SmallVectorImpl<Value *> &Left, 5460 SmallVectorImpl<Value *> &Right, 5461 const DataLayout &DL, 5462 ScalarEvolution &SE, 5463 const BoUpSLP &R) { 5464 if (VL.empty()) 5465 return; 5466 VLOperands Ops(VL, DL, SE, R); 5467 // Reorder the operands in place. 5468 Ops.reorder(); 5469 Left = Ops.getVL(0); 5470 Right = Ops.getVL(1); 5471 } 5472 5473 void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) { 5474 // Get the basic block this bundle is in. All instructions in the bundle 5475 // should be in this block. 5476 auto *Front = E->getMainOp(); 5477 auto *BB = Front->getParent(); 5478 assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool { 5479 auto *I = cast<Instruction>(V); 5480 return !E->isOpcodeOrAlt(I) || I->getParent() == BB; 5481 })); 5482 5483 // The last instruction in the bundle in program order. 5484 Instruction *LastInst = nullptr; 5485 5486 // Find the last instruction. The common case should be that BB has been 5487 // scheduled, and the last instruction is VL.back(). So we start with 5488 // VL.back() and iterate over schedule data until we reach the end of the 5489 // bundle. The end of the bundle is marked by null ScheduleData. 5490 if (BlocksSchedules.count(BB)) { 5491 auto *Bundle = 5492 BlocksSchedules[BB]->getScheduleData(E->isOneOf(E->Scalars.back())); 5493 if (Bundle && Bundle->isPartOfBundle()) 5494 for (; Bundle; Bundle = Bundle->NextInBundle) 5495 if (Bundle->OpValue == Bundle->Inst) 5496 LastInst = Bundle->Inst; 5497 } 5498 5499 // LastInst can still be null at this point if there's either not an entry 5500 // for BB in BlocksSchedules or there's no ScheduleData available for 5501 // VL.back(). This can be the case if buildTree_rec aborts for various 5502 // reasons (e.g., the maximum recursion depth is reached, the maximum region 5503 // size is reached, etc.). ScheduleData is initialized in the scheduling 5504 // "dry-run". 5505 // 5506 // If this happens, we can still find the last instruction by brute force. We 5507 // iterate forwards from Front (inclusive) until we either see all 5508 // instructions in the bundle or reach the end of the block. If Front is the 5509 // last instruction in program order, LastInst will be set to Front, and we 5510 // will visit all the remaining instructions in the block. 5511 // 5512 // One of the reasons we exit early from buildTree_rec is to place an upper 5513 // bound on compile-time. Thus, taking an additional compile-time hit here is 5514 // not ideal. However, this should be exceedingly rare since it requires that 5515 // we both exit early from buildTree_rec and that the bundle be out-of-order 5516 // (causing us to iterate all the way to the end of the block). 5517 if (!LastInst) { 5518 SmallPtrSet<Value *, 16> Bundle(E->Scalars.begin(), E->Scalars.end()); 5519 for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) { 5520 if (Bundle.erase(&I) && E->isOpcodeOrAlt(&I)) 5521 LastInst = &I; 5522 if (Bundle.empty()) 5523 break; 5524 } 5525 } 5526 assert(LastInst && "Failed to find last instruction in bundle"); 5527 5528 // Set the insertion point after the last instruction in the bundle. Set the 5529 // debug location to Front. 5530 Builder.SetInsertPoint(BB, ++LastInst->getIterator()); 5531 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 5532 } 5533 5534 Value *BoUpSLP::gather(ArrayRef<Value *> VL) { 5535 // List of instructions/lanes from current block and/or the blocks which are 5536 // part of the current loop. These instructions will be inserted at the end to 5537 // make it possible to optimize loops and hoist invariant instructions out of 5538 // the loops body with better chances for success. 5539 SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts; 5540 SmallSet<int, 4> PostponedIndices; 5541 Loop *L = LI->getLoopFor(Builder.GetInsertBlock()); 5542 auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) { 5543 SmallPtrSet<BasicBlock *, 4> Visited; 5544 while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second) 5545 InsertBB = InsertBB->getSinglePredecessor(); 5546 return InsertBB && InsertBB == InstBB; 5547 }; 5548 for (int I = 0, E = VL.size(); I < E; ++I) { 5549 if (auto *Inst = dyn_cast<Instruction>(VL[I])) 5550 if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) || 5551 getTreeEntry(Inst) || (L && (L->contains(Inst)))) && 5552 PostponedIndices.insert(I).second) 5553 PostponedInsts.emplace_back(Inst, I); 5554 } 5555 5556 auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos) { 5557 Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(Pos)); 5558 auto *InsElt = dyn_cast<InsertElementInst>(Vec); 5559 if (!InsElt) 5560 return Vec; 5561 GatherSeq.insert(InsElt); 5562 CSEBlocks.insert(InsElt->getParent()); 5563 // Add to our 'need-to-extract' list. 5564 if (TreeEntry *Entry = getTreeEntry(V)) { 5565 // Find which lane we need to extract. 5566 unsigned FoundLane = Entry->findLaneForValue(V); 5567 ExternalUses.emplace_back(V, InsElt, FoundLane); 5568 } 5569 return Vec; 5570 }; 5571 Value *Val0 = 5572 isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0]; 5573 FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size()); 5574 Value *Vec = PoisonValue::get(VecTy); 5575 SmallVector<int> NonConsts; 5576 // Insert constant values at first. 5577 for (int I = 0, E = VL.size(); I < E; ++I) { 5578 if (PostponedIndices.contains(I)) 5579 continue; 5580 if (!isConstant(VL[I])) { 5581 NonConsts.push_back(I); 5582 continue; 5583 } 5584 Vec = CreateInsertElement(Vec, VL[I], I); 5585 } 5586 // Insert non-constant values. 5587 for (int I : NonConsts) 5588 Vec = CreateInsertElement(Vec, VL[I], I); 5589 // Append instructions, which are/may be part of the loop, in the end to make 5590 // it possible to hoist non-loop-based instructions. 5591 for (const std::pair<Value *, unsigned> &Pair : PostponedInsts) 5592 Vec = CreateInsertElement(Vec, Pair.first, Pair.second); 5593 5594 return Vec; 5595 } 5596 5597 namespace { 5598 /// Merges shuffle masks and emits final shuffle instruction, if required. 5599 class ShuffleInstructionBuilder { 5600 IRBuilderBase &Builder; 5601 const unsigned VF = 0; 5602 bool IsFinalized = false; 5603 SmallVector<int, 4> Mask; 5604 5605 public: 5606 ShuffleInstructionBuilder(IRBuilderBase &Builder, unsigned VF) 5607 : Builder(Builder), VF(VF) {} 5608 5609 /// Adds a mask, inverting it before applying. 5610 void addInversedMask(ArrayRef<unsigned> SubMask) { 5611 if (SubMask.empty()) 5612 return; 5613 SmallVector<int, 4> NewMask; 5614 inversePermutation(SubMask, NewMask); 5615 addMask(NewMask); 5616 } 5617 5618 /// Functions adds masks, merging them into single one. 5619 void addMask(ArrayRef<unsigned> SubMask) { 5620 SmallVector<int, 4> NewMask(SubMask.begin(), SubMask.end()); 5621 addMask(NewMask); 5622 } 5623 5624 void addMask(ArrayRef<int> SubMask) { ::addMask(Mask, SubMask); } 5625 5626 Value *finalize(Value *V) { 5627 IsFinalized = true; 5628 unsigned ValueVF = cast<FixedVectorType>(V->getType())->getNumElements(); 5629 if (VF == ValueVF && Mask.empty()) 5630 return V; 5631 SmallVector<int, 4> NormalizedMask(VF, UndefMaskElem); 5632 std::iota(NormalizedMask.begin(), NormalizedMask.end(), 0); 5633 addMask(NormalizedMask); 5634 5635 if (VF == ValueVF && ShuffleVectorInst::isIdentityMask(Mask)) 5636 return V; 5637 return Builder.CreateShuffleVector(V, Mask, "shuffle"); 5638 } 5639 5640 ~ShuffleInstructionBuilder() { 5641 assert((IsFinalized || Mask.empty()) && 5642 "Shuffle construction must be finalized."); 5643 } 5644 }; 5645 } // namespace 5646 5647 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 5648 unsigned VF = VL.size(); 5649 InstructionsState S = getSameOpcode(VL); 5650 if (S.getOpcode()) { 5651 if (TreeEntry *E = getTreeEntry(S.OpValue)) 5652 if (E->isSame(VL)) { 5653 Value *V = vectorizeTree(E); 5654 if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) { 5655 if (!E->ReuseShuffleIndices.empty()) { 5656 // Reshuffle to get only unique values. 5657 // If some of the scalars are duplicated in the vectorization tree 5658 // entry, we do not vectorize them but instead generate a mask for 5659 // the reuses. But if there are several users of the same entry, 5660 // they may have different vectorization factors. This is especially 5661 // important for PHI nodes. In this case, we need to adapt the 5662 // resulting instruction for the user vectorization factor and have 5663 // to reshuffle it again to take only unique elements of the vector. 5664 // Without this code the function incorrectly returns reduced vector 5665 // instruction with the same elements, not with the unique ones. 5666 5667 // block: 5668 // %phi = phi <2 x > { .., %entry} {%shuffle, %block} 5669 // %2 = shuffle <2 x > %phi, %poison, <4 x > <0, 0, 1, 1> 5670 // ... (use %2) 5671 // %shuffle = shuffle <2 x> %2, poison, <2 x> {0, 2} 5672 // br %block 5673 SmallVector<int> UniqueIdxs; 5674 SmallSet<int, 4> UsedIdxs; 5675 int Pos = 0; 5676 int Sz = VL.size(); 5677 for (int Idx : E->ReuseShuffleIndices) { 5678 if (Idx != Sz && UsedIdxs.insert(Idx).second) 5679 UniqueIdxs.emplace_back(Pos); 5680 ++Pos; 5681 } 5682 assert(VF >= UsedIdxs.size() && "Expected vectorization factor " 5683 "less than original vector size."); 5684 UniqueIdxs.append(VF - UsedIdxs.size(), UndefMaskElem); 5685 V = Builder.CreateShuffleVector(V, UniqueIdxs, "shrink.shuffle"); 5686 } else { 5687 assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() && 5688 "Expected vectorization factor less " 5689 "than original vector size."); 5690 SmallVector<int> UniformMask(VF, 0); 5691 std::iota(UniformMask.begin(), UniformMask.end(), 0); 5692 V = Builder.CreateShuffleVector(V, UniformMask, "shrink.shuffle"); 5693 } 5694 } 5695 return V; 5696 } 5697 } 5698 5699 // Check that every instruction appears once in this bundle. 5700 SmallVector<int> ReuseShuffleIndicies; 5701 SmallVector<Value *> UniqueValues; 5702 if (VL.size() > 2) { 5703 DenseMap<Value *, unsigned> UniquePositions; 5704 unsigned NumValues = 5705 std::distance(VL.begin(), find_if(reverse(VL), [](Value *V) { 5706 return !isa<UndefValue>(V); 5707 }).base()); 5708 VF = std::max<unsigned>(VF, PowerOf2Ceil(NumValues)); 5709 int UniqueVals = 0; 5710 for (Value *V : VL.drop_back(VL.size() - VF)) { 5711 if (isa<UndefValue>(V)) { 5712 ReuseShuffleIndicies.emplace_back(UndefMaskElem); 5713 continue; 5714 } 5715 if (isConstant(V)) { 5716 ReuseShuffleIndicies.emplace_back(UniqueValues.size()); 5717 UniqueValues.emplace_back(V); 5718 continue; 5719 } 5720 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 5721 ReuseShuffleIndicies.emplace_back(Res.first->second); 5722 if (Res.second) { 5723 UniqueValues.emplace_back(V); 5724 ++UniqueVals; 5725 } 5726 } 5727 if (UniqueVals == 1 && UniqueValues.size() == 1) { 5728 // Emit pure splat vector. 5729 ReuseShuffleIndicies.append(VF - ReuseShuffleIndicies.size(), 5730 UndefMaskElem); 5731 } else if (UniqueValues.size() >= VF - 1 || UniqueValues.size() <= 1) { 5732 ReuseShuffleIndicies.clear(); 5733 UniqueValues.clear(); 5734 UniqueValues.append(VL.begin(), std::next(VL.begin(), NumValues)); 5735 } 5736 UniqueValues.append(VF - UniqueValues.size(), 5737 PoisonValue::get(VL[0]->getType())); 5738 VL = UniqueValues; 5739 } 5740 5741 ShuffleInstructionBuilder ShuffleBuilder(Builder, VF); 5742 Value *Vec = gather(VL); 5743 if (!ReuseShuffleIndicies.empty()) { 5744 ShuffleBuilder.addMask(ReuseShuffleIndicies); 5745 Vec = ShuffleBuilder.finalize(Vec); 5746 if (auto *I = dyn_cast<Instruction>(Vec)) { 5747 GatherSeq.insert(I); 5748 CSEBlocks.insert(I->getParent()); 5749 } 5750 } 5751 return Vec; 5752 } 5753 5754 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 5755 IRBuilder<>::InsertPointGuard Guard(Builder); 5756 5757 if (E->VectorizedValue) { 5758 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 5759 return E->VectorizedValue; 5760 } 5761 5762 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 5763 unsigned VF = E->Scalars.size(); 5764 if (NeedToShuffleReuses) 5765 VF = E->ReuseShuffleIndices.size(); 5766 ShuffleInstructionBuilder ShuffleBuilder(Builder, VF); 5767 if (E->State == TreeEntry::NeedToGather) { 5768 setInsertPointAfterBundle(E); 5769 Value *Vec; 5770 SmallVector<int> Mask; 5771 SmallVector<const TreeEntry *> Entries; 5772 Optional<TargetTransformInfo::ShuffleKind> Shuffle = 5773 isGatherShuffledEntry(E, Mask, Entries); 5774 if (Shuffle.hasValue()) { 5775 assert((Entries.size() == 1 || Entries.size() == 2) && 5776 "Expected shuffle of 1 or 2 entries."); 5777 Vec = Builder.CreateShuffleVector(Entries.front()->VectorizedValue, 5778 Entries.back()->VectorizedValue, Mask); 5779 } else { 5780 Vec = gather(E->Scalars); 5781 } 5782 if (NeedToShuffleReuses) { 5783 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5784 Vec = ShuffleBuilder.finalize(Vec); 5785 if (auto *I = dyn_cast<Instruction>(Vec)) { 5786 GatherSeq.insert(I); 5787 CSEBlocks.insert(I->getParent()); 5788 } 5789 } 5790 E->VectorizedValue = Vec; 5791 return Vec; 5792 } 5793 5794 assert((E->State == TreeEntry::Vectorize || 5795 E->State == TreeEntry::ScatterVectorize) && 5796 "Unhandled state"); 5797 unsigned ShuffleOrOp = 5798 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 5799 Instruction *VL0 = E->getMainOp(); 5800 Type *ScalarTy = VL0->getType(); 5801 if (auto *Store = dyn_cast<StoreInst>(VL0)) 5802 ScalarTy = Store->getValueOperand()->getType(); 5803 else if (auto *IE = dyn_cast<InsertElementInst>(VL0)) 5804 ScalarTy = IE->getOperand(1)->getType(); 5805 auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size()); 5806 switch (ShuffleOrOp) { 5807 case Instruction::PHI: { 5808 assert( 5809 (E->ReorderIndices.empty() || E != VectorizableTree.front().get()) && 5810 "PHI reordering is free."); 5811 auto *PH = cast<PHINode>(VL0); 5812 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 5813 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 5814 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 5815 Value *V = NewPhi; 5816 ShuffleBuilder.addInversedMask(E->ReorderIndices); 5817 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5818 V = ShuffleBuilder.finalize(V); 5819 5820 E->VectorizedValue = V; 5821 5822 // PHINodes may have multiple entries from the same block. We want to 5823 // visit every block once. 5824 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 5825 5826 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 5827 ValueList Operands; 5828 BasicBlock *IBB = PH->getIncomingBlock(i); 5829 5830 if (!VisitedBBs.insert(IBB).second) { 5831 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 5832 continue; 5833 } 5834 5835 Builder.SetInsertPoint(IBB->getTerminator()); 5836 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 5837 Value *Vec = vectorizeTree(E->getOperand(i)); 5838 NewPhi->addIncoming(Vec, IBB); 5839 } 5840 5841 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 5842 "Invalid number of incoming values"); 5843 return V; 5844 } 5845 5846 case Instruction::ExtractElement: { 5847 Value *V = E->getSingleOperand(0); 5848 Builder.SetInsertPoint(VL0); 5849 ShuffleBuilder.addInversedMask(E->ReorderIndices); 5850 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5851 V = ShuffleBuilder.finalize(V); 5852 E->VectorizedValue = V; 5853 return V; 5854 } 5855 case Instruction::ExtractValue: { 5856 auto *LI = cast<LoadInst>(E->getSingleOperand(0)); 5857 Builder.SetInsertPoint(LI); 5858 auto *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); 5859 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); 5860 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign()); 5861 Value *NewV = propagateMetadata(V, E->Scalars); 5862 ShuffleBuilder.addInversedMask(E->ReorderIndices); 5863 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5864 NewV = ShuffleBuilder.finalize(NewV); 5865 E->VectorizedValue = NewV; 5866 return NewV; 5867 } 5868 case Instruction::InsertElement: { 5869 assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique"); 5870 Builder.SetInsertPoint(cast<Instruction>(E->Scalars.back())); 5871 Value *V = vectorizeTree(E->getOperand(1)); 5872 5873 // Create InsertVector shuffle if necessary 5874 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 5875 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0)); 5876 })); 5877 const unsigned NumElts = 5878 cast<FixedVectorType>(FirstInsert->getType())->getNumElements(); 5879 const unsigned NumScalars = E->Scalars.size(); 5880 5881 unsigned Offset = *getInsertIndex(VL0, 0); 5882 assert(Offset < NumElts && "Failed to find vector index offset"); 5883 5884 // Create shuffle to resize vector 5885 SmallVector<int> Mask; 5886 if (!E->ReorderIndices.empty()) { 5887 inversePermutation(E->ReorderIndices, Mask); 5888 Mask.append(NumElts - NumScalars, UndefMaskElem); 5889 } else { 5890 Mask.assign(NumElts, UndefMaskElem); 5891 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); 5892 } 5893 // Create InsertVector shuffle if necessary 5894 bool IsIdentity = true; 5895 SmallVector<int> PrevMask(NumElts, UndefMaskElem); 5896 Mask.swap(PrevMask); 5897 for (unsigned I = 0; I < NumScalars; ++I) { 5898 Value *Scalar = E->Scalars[PrevMask[I]]; 5899 Optional<int> InsertIdx = getInsertIndex(Scalar, 0); 5900 if (!InsertIdx || *InsertIdx == UndefMaskElem) 5901 continue; 5902 IsIdentity &= *InsertIdx - Offset == I; 5903 Mask[*InsertIdx - Offset] = I; 5904 } 5905 if (!IsIdentity || NumElts != NumScalars) 5906 V = Builder.CreateShuffleVector(V, Mask); 5907 5908 if ((!IsIdentity || Offset != 0 || 5909 !isa<UndefValue>(FirstInsert->getOperand(0))) && 5910 NumElts != NumScalars) { 5911 SmallVector<int> InsertMask(NumElts); 5912 std::iota(InsertMask.begin(), InsertMask.end(), 0); 5913 for (unsigned I = 0; I < NumElts; I++) { 5914 if (Mask[I] != UndefMaskElem) 5915 InsertMask[Offset + I] = NumElts + I; 5916 } 5917 5918 V = Builder.CreateShuffleVector( 5919 FirstInsert->getOperand(0), V, InsertMask, 5920 cast<Instruction>(E->Scalars.back())->getName()); 5921 } 5922 5923 ++NumVectorInstructions; 5924 E->VectorizedValue = V; 5925 return V; 5926 } 5927 case Instruction::ZExt: 5928 case Instruction::SExt: 5929 case Instruction::FPToUI: 5930 case Instruction::FPToSI: 5931 case Instruction::FPExt: 5932 case Instruction::PtrToInt: 5933 case Instruction::IntToPtr: 5934 case Instruction::SIToFP: 5935 case Instruction::UIToFP: 5936 case Instruction::Trunc: 5937 case Instruction::FPTrunc: 5938 case Instruction::BitCast: { 5939 setInsertPointAfterBundle(E); 5940 5941 Value *InVec = vectorizeTree(E->getOperand(0)); 5942 5943 if (E->VectorizedValue) { 5944 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 5945 return E->VectorizedValue; 5946 } 5947 5948 auto *CI = cast<CastInst>(VL0); 5949 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 5950 ShuffleBuilder.addInversedMask(E->ReorderIndices); 5951 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5952 V = ShuffleBuilder.finalize(V); 5953 5954 E->VectorizedValue = V; 5955 ++NumVectorInstructions; 5956 return V; 5957 } 5958 case Instruction::FCmp: 5959 case Instruction::ICmp: { 5960 setInsertPointAfterBundle(E); 5961 5962 Value *L = vectorizeTree(E->getOperand(0)); 5963 Value *R = vectorizeTree(E->getOperand(1)); 5964 5965 if (E->VectorizedValue) { 5966 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 5967 return E->VectorizedValue; 5968 } 5969 5970 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 5971 Value *V = Builder.CreateCmp(P0, L, R); 5972 propagateIRFlags(V, E->Scalars, VL0); 5973 ShuffleBuilder.addInversedMask(E->ReorderIndices); 5974 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5975 V = ShuffleBuilder.finalize(V); 5976 5977 E->VectorizedValue = V; 5978 ++NumVectorInstructions; 5979 return V; 5980 } 5981 case Instruction::Select: { 5982 setInsertPointAfterBundle(E); 5983 5984 Value *Cond = vectorizeTree(E->getOperand(0)); 5985 Value *True = vectorizeTree(E->getOperand(1)); 5986 Value *False = vectorizeTree(E->getOperand(2)); 5987 5988 if (E->VectorizedValue) { 5989 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 5990 return E->VectorizedValue; 5991 } 5992 5993 Value *V = Builder.CreateSelect(Cond, True, False); 5994 ShuffleBuilder.addInversedMask(E->ReorderIndices); 5995 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5996 V = ShuffleBuilder.finalize(V); 5997 5998 E->VectorizedValue = V; 5999 ++NumVectorInstructions; 6000 return V; 6001 } 6002 case Instruction::FNeg: { 6003 setInsertPointAfterBundle(E); 6004 6005 Value *Op = vectorizeTree(E->getOperand(0)); 6006 6007 if (E->VectorizedValue) { 6008 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6009 return E->VectorizedValue; 6010 } 6011 6012 Value *V = Builder.CreateUnOp( 6013 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op); 6014 propagateIRFlags(V, E->Scalars, VL0); 6015 if (auto *I = dyn_cast<Instruction>(V)) 6016 V = propagateMetadata(I, E->Scalars); 6017 6018 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6019 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6020 V = ShuffleBuilder.finalize(V); 6021 6022 E->VectorizedValue = V; 6023 ++NumVectorInstructions; 6024 6025 return V; 6026 } 6027 case Instruction::Add: 6028 case Instruction::FAdd: 6029 case Instruction::Sub: 6030 case Instruction::FSub: 6031 case Instruction::Mul: 6032 case Instruction::FMul: 6033 case Instruction::UDiv: 6034 case Instruction::SDiv: 6035 case Instruction::FDiv: 6036 case Instruction::URem: 6037 case Instruction::SRem: 6038 case Instruction::FRem: 6039 case Instruction::Shl: 6040 case Instruction::LShr: 6041 case Instruction::AShr: 6042 case Instruction::And: 6043 case Instruction::Or: 6044 case Instruction::Xor: { 6045 setInsertPointAfterBundle(E); 6046 6047 Value *LHS = vectorizeTree(E->getOperand(0)); 6048 Value *RHS = vectorizeTree(E->getOperand(1)); 6049 6050 if (E->VectorizedValue) { 6051 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6052 return E->VectorizedValue; 6053 } 6054 6055 Value *V = Builder.CreateBinOp( 6056 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, 6057 RHS); 6058 propagateIRFlags(V, E->Scalars, VL0); 6059 if (auto *I = dyn_cast<Instruction>(V)) 6060 V = propagateMetadata(I, E->Scalars); 6061 6062 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6063 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6064 V = ShuffleBuilder.finalize(V); 6065 6066 E->VectorizedValue = V; 6067 ++NumVectorInstructions; 6068 6069 return V; 6070 } 6071 case Instruction::Load: { 6072 // Loads are inserted at the head of the tree because we don't want to 6073 // sink them all the way down past store instructions. 6074 setInsertPointAfterBundle(E); 6075 6076 LoadInst *LI = cast<LoadInst>(VL0); 6077 Instruction *NewLI; 6078 unsigned AS = LI->getPointerAddressSpace(); 6079 Value *PO = LI->getPointerOperand(); 6080 if (E->State == TreeEntry::Vectorize) { 6081 6082 Value *VecPtr = Builder.CreateBitCast(PO, VecTy->getPointerTo(AS)); 6083 6084 // The pointer operand uses an in-tree scalar so we add the new BitCast 6085 // to ExternalUses list to make sure that an extract will be generated 6086 // in the future. 6087 if (TreeEntry *Entry = getTreeEntry(PO)) { 6088 // Find which lane we need to extract. 6089 unsigned FoundLane = Entry->findLaneForValue(PO); 6090 ExternalUses.emplace_back(PO, cast<User>(VecPtr), FoundLane); 6091 } 6092 6093 NewLI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign()); 6094 } else { 6095 assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state"); 6096 Value *VecPtr = vectorizeTree(E->getOperand(0)); 6097 // Use the minimum alignment of the gathered loads. 6098 Align CommonAlignment = LI->getAlign(); 6099 for (Value *V : E->Scalars) 6100 CommonAlignment = 6101 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 6102 NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment); 6103 } 6104 Value *V = propagateMetadata(NewLI, E->Scalars); 6105 6106 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6107 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6108 V = ShuffleBuilder.finalize(V); 6109 E->VectorizedValue = V; 6110 ++NumVectorInstructions; 6111 return V; 6112 } 6113 case Instruction::Store: { 6114 auto *SI = cast<StoreInst>(VL0); 6115 unsigned AS = SI->getPointerAddressSpace(); 6116 6117 setInsertPointAfterBundle(E); 6118 6119 Value *VecValue = vectorizeTree(E->getOperand(0)); 6120 ShuffleBuilder.addMask(E->ReorderIndices); 6121 VecValue = ShuffleBuilder.finalize(VecValue); 6122 6123 Value *ScalarPtr = SI->getPointerOperand(); 6124 Value *VecPtr = Builder.CreateBitCast( 6125 ScalarPtr, VecValue->getType()->getPointerTo(AS)); 6126 StoreInst *ST = Builder.CreateAlignedStore(VecValue, VecPtr, 6127 SI->getAlign()); 6128 6129 // The pointer operand uses an in-tree scalar, so add the new BitCast to 6130 // ExternalUses to make sure that an extract will be generated in the 6131 // future. 6132 if (TreeEntry *Entry = getTreeEntry(ScalarPtr)) { 6133 // Find which lane we need to extract. 6134 unsigned FoundLane = Entry->findLaneForValue(ScalarPtr); 6135 ExternalUses.push_back( 6136 ExternalUser(ScalarPtr, cast<User>(VecPtr), FoundLane)); 6137 } 6138 6139 Value *V = propagateMetadata(ST, E->Scalars); 6140 6141 E->VectorizedValue = V; 6142 ++NumVectorInstructions; 6143 return V; 6144 } 6145 case Instruction::GetElementPtr: { 6146 setInsertPointAfterBundle(E); 6147 6148 Value *Op0 = vectorizeTree(E->getOperand(0)); 6149 6150 std::vector<Value *> OpVecs; 6151 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 6152 ++j) { 6153 ValueList &VL = E->getOperand(j); 6154 // Need to cast all elements to the same type before vectorization to 6155 // avoid crash. 6156 Type *VL0Ty = VL0->getOperand(j)->getType(); 6157 Type *Ty = llvm::all_of( 6158 VL, [VL0Ty](Value *V) { return VL0Ty == V->getType(); }) 6159 ? VL0Ty 6160 : DL->getIndexType(cast<GetElementPtrInst>(VL0) 6161 ->getPointerOperandType() 6162 ->getScalarType()); 6163 for (Value *&V : VL) { 6164 auto *CI = cast<ConstantInt>(V); 6165 V = ConstantExpr::getIntegerCast(CI, Ty, 6166 CI->getValue().isSignBitSet()); 6167 } 6168 Value *OpVec = vectorizeTree(VL); 6169 OpVecs.push_back(OpVec); 6170 } 6171 6172 Value *V = Builder.CreateGEP( 6173 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs); 6174 if (Instruction *I = dyn_cast<Instruction>(V)) 6175 V = propagateMetadata(I, E->Scalars); 6176 6177 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6178 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6179 V = ShuffleBuilder.finalize(V); 6180 6181 E->VectorizedValue = V; 6182 ++NumVectorInstructions; 6183 6184 return V; 6185 } 6186 case Instruction::Call: { 6187 CallInst *CI = cast<CallInst>(VL0); 6188 setInsertPointAfterBundle(E); 6189 6190 Intrinsic::ID IID = Intrinsic::not_intrinsic; 6191 if (Function *FI = CI->getCalledFunction()) 6192 IID = FI->getIntrinsicID(); 6193 6194 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6195 6196 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 6197 bool UseIntrinsic = ID != Intrinsic::not_intrinsic && 6198 VecCallCosts.first <= VecCallCosts.second; 6199 6200 Value *ScalarArg = nullptr; 6201 std::vector<Value *> OpVecs; 6202 SmallVector<Type *, 2> TysForDecl = 6203 {FixedVectorType::get(CI->getType(), E->Scalars.size())}; 6204 for (int j = 0, e = CI->arg_size(); j < e; ++j) { 6205 ValueList OpVL; 6206 // Some intrinsics have scalar arguments. This argument should not be 6207 // vectorized. 6208 if (UseIntrinsic && hasVectorInstrinsicScalarOpd(IID, j)) { 6209 CallInst *CEI = cast<CallInst>(VL0); 6210 ScalarArg = CEI->getArgOperand(j); 6211 OpVecs.push_back(CEI->getArgOperand(j)); 6212 if (hasVectorInstrinsicOverloadedScalarOpd(IID, j)) 6213 TysForDecl.push_back(ScalarArg->getType()); 6214 continue; 6215 } 6216 6217 Value *OpVec = vectorizeTree(E->getOperand(j)); 6218 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 6219 OpVecs.push_back(OpVec); 6220 } 6221 6222 Function *CF; 6223 if (!UseIntrinsic) { 6224 VFShape Shape = 6225 VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>( 6226 VecTy->getNumElements())), 6227 false /*HasGlobalPred*/); 6228 CF = VFDatabase(*CI).getVectorizedFunction(Shape); 6229 } else { 6230 CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl); 6231 } 6232 6233 SmallVector<OperandBundleDef, 1> OpBundles; 6234 CI->getOperandBundlesAsDefs(OpBundles); 6235 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 6236 6237 // The scalar argument uses an in-tree scalar so we add the new vectorized 6238 // call to ExternalUses list to make sure that an extract will be 6239 // generated in the future. 6240 if (ScalarArg) { 6241 if (TreeEntry *Entry = getTreeEntry(ScalarArg)) { 6242 // Find which lane we need to extract. 6243 unsigned FoundLane = Entry->findLaneForValue(ScalarArg); 6244 ExternalUses.push_back( 6245 ExternalUser(ScalarArg, cast<User>(V), FoundLane)); 6246 } 6247 } 6248 6249 propagateIRFlags(V, E->Scalars, VL0); 6250 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6251 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6252 V = ShuffleBuilder.finalize(V); 6253 6254 E->VectorizedValue = V; 6255 ++NumVectorInstructions; 6256 return V; 6257 } 6258 case Instruction::ShuffleVector: { 6259 assert(E->isAltShuffle() && 6260 ((Instruction::isBinaryOp(E->getOpcode()) && 6261 Instruction::isBinaryOp(E->getAltOpcode())) || 6262 (Instruction::isCast(E->getOpcode()) && 6263 Instruction::isCast(E->getAltOpcode()))) && 6264 "Invalid Shuffle Vector Operand"); 6265 6266 Value *LHS = nullptr, *RHS = nullptr; 6267 if (Instruction::isBinaryOp(E->getOpcode())) { 6268 setInsertPointAfterBundle(E); 6269 LHS = vectorizeTree(E->getOperand(0)); 6270 RHS = vectorizeTree(E->getOperand(1)); 6271 } else { 6272 setInsertPointAfterBundle(E); 6273 LHS = vectorizeTree(E->getOperand(0)); 6274 } 6275 6276 if (E->VectorizedValue) { 6277 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6278 return E->VectorizedValue; 6279 } 6280 6281 Value *V0, *V1; 6282 if (Instruction::isBinaryOp(E->getOpcode())) { 6283 V0 = Builder.CreateBinOp( 6284 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS); 6285 V1 = Builder.CreateBinOp( 6286 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS); 6287 } else { 6288 V0 = Builder.CreateCast( 6289 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy); 6290 V1 = Builder.CreateCast( 6291 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy); 6292 } 6293 6294 // Create shuffle to take alternate operations from the vector. 6295 // Also, gather up main and alt scalar ops to propagate IR flags to 6296 // each vector operation. 6297 ValueList OpScalars, AltScalars; 6298 SmallVector<int> Mask; 6299 buildSuffleEntryMask( 6300 E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices, 6301 [E](Instruction *I) { 6302 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 6303 return I->getOpcode() == E->getAltOpcode(); 6304 }, 6305 Mask, &OpScalars, &AltScalars); 6306 6307 propagateIRFlags(V0, OpScalars); 6308 propagateIRFlags(V1, AltScalars); 6309 6310 Value *V = Builder.CreateShuffleVector(V0, V1, Mask); 6311 if (Instruction *I = dyn_cast<Instruction>(V)) 6312 V = propagateMetadata(I, E->Scalars); 6313 V = ShuffleBuilder.finalize(V); 6314 6315 E->VectorizedValue = V; 6316 ++NumVectorInstructions; 6317 6318 return V; 6319 } 6320 default: 6321 llvm_unreachable("unknown inst"); 6322 } 6323 return nullptr; 6324 } 6325 6326 Value *BoUpSLP::vectorizeTree() { 6327 ExtraValueToDebugLocsMap ExternallyUsedValues; 6328 return vectorizeTree(ExternallyUsedValues); 6329 } 6330 6331 Value * 6332 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { 6333 // All blocks must be scheduled before any instructions are inserted. 6334 for (auto &BSIter : BlocksSchedules) { 6335 scheduleBlock(BSIter.second.get()); 6336 } 6337 6338 Builder.SetInsertPoint(&F->getEntryBlock().front()); 6339 auto *VectorRoot = vectorizeTree(VectorizableTree[0].get()); 6340 6341 // If the vectorized tree can be rewritten in a smaller type, we truncate the 6342 // vectorized root. InstCombine will then rewrite the entire expression. We 6343 // sign extend the extracted values below. 6344 auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; 6345 if (MinBWs.count(ScalarRoot)) { 6346 if (auto *I = dyn_cast<Instruction>(VectorRoot)) { 6347 // If current instr is a phi and not the last phi, insert it after the 6348 // last phi node. 6349 if (isa<PHINode>(I)) 6350 Builder.SetInsertPoint(&*I->getParent()->getFirstInsertionPt()); 6351 else 6352 Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); 6353 } 6354 auto BundleWidth = VectorizableTree[0]->Scalars.size(); 6355 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 6356 auto *VecTy = FixedVectorType::get(MinTy, BundleWidth); 6357 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); 6358 VectorizableTree[0]->VectorizedValue = Trunc; 6359 } 6360 6361 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() 6362 << " values .\n"); 6363 6364 // Extract all of the elements with the external uses. 6365 for (const auto &ExternalUse : ExternalUses) { 6366 Value *Scalar = ExternalUse.Scalar; 6367 llvm::User *User = ExternalUse.User; 6368 6369 // Skip users that we already RAUW. This happens when one instruction 6370 // has multiple uses of the same value. 6371 if (User && !is_contained(Scalar->users(), User)) 6372 continue; 6373 TreeEntry *E = getTreeEntry(Scalar); 6374 assert(E && "Invalid scalar"); 6375 assert(E->State != TreeEntry::NeedToGather && 6376 "Extracting from a gather list"); 6377 6378 Value *Vec = E->VectorizedValue; 6379 assert(Vec && "Can't find vectorizable value"); 6380 6381 Value *Lane = Builder.getInt32(ExternalUse.Lane); 6382 auto ExtractAndExtendIfNeeded = [&](Value *Vec) { 6383 if (Scalar->getType() != Vec->getType()) { 6384 Value *Ex; 6385 // "Reuse" the existing extract to improve final codegen. 6386 if (auto *ES = dyn_cast<ExtractElementInst>(Scalar)) { 6387 Ex = Builder.CreateExtractElement(ES->getOperand(0), 6388 ES->getOperand(1)); 6389 } else { 6390 Ex = Builder.CreateExtractElement(Vec, Lane); 6391 } 6392 // If necessary, sign-extend or zero-extend ScalarRoot 6393 // to the larger type. 6394 if (!MinBWs.count(ScalarRoot)) 6395 return Ex; 6396 if (MinBWs[ScalarRoot].second) 6397 return Builder.CreateSExt(Ex, Scalar->getType()); 6398 return Builder.CreateZExt(Ex, Scalar->getType()); 6399 } 6400 assert(isa<FixedVectorType>(Scalar->getType()) && 6401 isa<InsertElementInst>(Scalar) && 6402 "In-tree scalar of vector type is not insertelement?"); 6403 return Vec; 6404 }; 6405 // If User == nullptr, the Scalar is used as extra arg. Generate 6406 // ExtractElement instruction and update the record for this scalar in 6407 // ExternallyUsedValues. 6408 if (!User) { 6409 assert(ExternallyUsedValues.count(Scalar) && 6410 "Scalar with nullptr as an external user must be registered in " 6411 "ExternallyUsedValues map"); 6412 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 6413 Builder.SetInsertPoint(VecI->getParent(), 6414 std::next(VecI->getIterator())); 6415 } else { 6416 Builder.SetInsertPoint(&F->getEntryBlock().front()); 6417 } 6418 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 6419 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent()); 6420 auto &NewInstLocs = ExternallyUsedValues[NewInst]; 6421 auto It = ExternallyUsedValues.find(Scalar); 6422 assert(It != ExternallyUsedValues.end() && 6423 "Externally used scalar is not found in ExternallyUsedValues"); 6424 NewInstLocs.append(It->second); 6425 ExternallyUsedValues.erase(Scalar); 6426 // Required to update internally referenced instructions. 6427 Scalar->replaceAllUsesWith(NewInst); 6428 continue; 6429 } 6430 6431 // Generate extracts for out-of-tree users. 6432 // Find the insertion point for the extractelement lane. 6433 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 6434 if (PHINode *PH = dyn_cast<PHINode>(User)) { 6435 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 6436 if (PH->getIncomingValue(i) == Scalar) { 6437 Instruction *IncomingTerminator = 6438 PH->getIncomingBlock(i)->getTerminator(); 6439 if (isa<CatchSwitchInst>(IncomingTerminator)) { 6440 Builder.SetInsertPoint(VecI->getParent(), 6441 std::next(VecI->getIterator())); 6442 } else { 6443 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 6444 } 6445 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 6446 CSEBlocks.insert(PH->getIncomingBlock(i)); 6447 PH->setOperand(i, NewInst); 6448 } 6449 } 6450 } else { 6451 Builder.SetInsertPoint(cast<Instruction>(User)); 6452 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 6453 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 6454 User->replaceUsesOfWith(Scalar, NewInst); 6455 } 6456 } else { 6457 Builder.SetInsertPoint(&F->getEntryBlock().front()); 6458 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 6459 CSEBlocks.insert(&F->getEntryBlock()); 6460 User->replaceUsesOfWith(Scalar, NewInst); 6461 } 6462 6463 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 6464 } 6465 6466 // For each vectorized value: 6467 for (auto &TEPtr : VectorizableTree) { 6468 TreeEntry *Entry = TEPtr.get(); 6469 6470 // No need to handle users of gathered values. 6471 if (Entry->State == TreeEntry::NeedToGather) 6472 continue; 6473 6474 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 6475 6476 // For each lane: 6477 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 6478 Value *Scalar = Entry->Scalars[Lane]; 6479 6480 #ifndef NDEBUG 6481 Type *Ty = Scalar->getType(); 6482 if (!Ty->isVoidTy()) { 6483 for (User *U : Scalar->users()) { 6484 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 6485 6486 // It is legal to delete users in the ignorelist. 6487 assert((getTreeEntry(U) || is_contained(UserIgnoreList, U) || 6488 (isa_and_nonnull<Instruction>(U) && 6489 isDeleted(cast<Instruction>(U)))) && 6490 "Deleting out-of-tree value"); 6491 } 6492 } 6493 #endif 6494 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 6495 eraseInstruction(cast<Instruction>(Scalar)); 6496 } 6497 } 6498 6499 Builder.ClearInsertionPoint(); 6500 InstrElementSize.clear(); 6501 6502 return VectorizableTree[0]->VectorizedValue; 6503 } 6504 6505 void BoUpSLP::optimizeGatherSequence() { 6506 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 6507 << " gather sequences instructions.\n"); 6508 // LICM InsertElementInst sequences. 6509 for (Instruction *I : GatherSeq) { 6510 if (isDeleted(I)) 6511 continue; 6512 6513 // Check if this block is inside a loop. 6514 Loop *L = LI->getLoopFor(I->getParent()); 6515 if (!L) 6516 continue; 6517 6518 // Check if it has a preheader. 6519 BasicBlock *PreHeader = L->getLoopPreheader(); 6520 if (!PreHeader) 6521 continue; 6522 6523 // If the vector or the element that we insert into it are 6524 // instructions that are defined in this basic block then we can't 6525 // hoist this instruction. 6526 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 6527 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 6528 if (Op0 && L->contains(Op0)) 6529 continue; 6530 if (Op1 && L->contains(Op1)) 6531 continue; 6532 6533 // We can hoist this instruction. Move it to the pre-header. 6534 I->moveBefore(PreHeader->getTerminator()); 6535 } 6536 6537 // Make a list of all reachable blocks in our CSE queue. 6538 SmallVector<const DomTreeNode *, 8> CSEWorkList; 6539 CSEWorkList.reserve(CSEBlocks.size()); 6540 for (BasicBlock *BB : CSEBlocks) 6541 if (DomTreeNode *N = DT->getNode(BB)) { 6542 assert(DT->isReachableFromEntry(N)); 6543 CSEWorkList.push_back(N); 6544 } 6545 6546 // Sort blocks by domination. This ensures we visit a block after all blocks 6547 // dominating it are visited. 6548 llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) { 6549 assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) && 6550 "Different nodes should have different DFS numbers"); 6551 return A->getDFSNumIn() < B->getDFSNumIn(); 6552 }); 6553 6554 // Perform O(N^2) search over the gather sequences and merge identical 6555 // instructions. TODO: We can further optimize this scan if we split the 6556 // instructions into different buckets based on the insert lane. 6557 SmallVector<Instruction *, 16> Visited; 6558 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 6559 assert(*I && 6560 (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 6561 "Worklist not sorted properly!"); 6562 BasicBlock *BB = (*I)->getBlock(); 6563 // For all instructions in blocks containing gather sequences: 6564 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 6565 Instruction *In = &*it++; 6566 if (isDeleted(In)) 6567 continue; 6568 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In) && 6569 !isa<ShuffleVectorInst>(In)) 6570 continue; 6571 6572 // Check if we can replace this instruction with any of the 6573 // visited instructions. 6574 for (Instruction *v : Visited) { 6575 if (In->isIdenticalTo(v) && 6576 DT->dominates(v->getParent(), In->getParent())) { 6577 In->replaceAllUsesWith(v); 6578 eraseInstruction(In); 6579 In = nullptr; 6580 break; 6581 } 6582 } 6583 if (In) { 6584 assert(!is_contained(Visited, In)); 6585 Visited.push_back(In); 6586 } 6587 } 6588 } 6589 CSEBlocks.clear(); 6590 GatherSeq.clear(); 6591 } 6592 6593 // Groups the instructions to a bundle (which is then a single scheduling entity) 6594 // and schedules instructions until the bundle gets ready. 6595 Optional<BoUpSLP::ScheduleData *> 6596 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 6597 const InstructionsState &S) { 6598 // No need to schedule PHIs, insertelement, extractelement and extractvalue 6599 // instructions. 6600 if (isa<PHINode>(S.OpValue) || isVectorLikeInstWithConstOps(S.OpValue)) 6601 return nullptr; 6602 6603 // Initialize the instruction bundle. 6604 Instruction *OldScheduleEnd = ScheduleEnd; 6605 ScheduleData *PrevInBundle = nullptr; 6606 ScheduleData *Bundle = nullptr; 6607 bool ReSchedule = false; 6608 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n"); 6609 6610 auto &&TryScheduleBundle = [this, OldScheduleEnd, SLP](bool ReSchedule, 6611 ScheduleData *Bundle) { 6612 // The scheduling region got new instructions at the lower end (or it is a 6613 // new region for the first bundle). This makes it necessary to 6614 // recalculate all dependencies. 6615 // It is seldom that this needs to be done a second time after adding the 6616 // initial bundle to the region. 6617 if (ScheduleEnd != OldScheduleEnd) { 6618 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) 6619 doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); }); 6620 ReSchedule = true; 6621 } 6622 if (ReSchedule) { 6623 resetSchedule(); 6624 initialFillReadyList(ReadyInsts); 6625 } 6626 if (Bundle) { 6627 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle 6628 << " in block " << BB->getName() << "\n"); 6629 calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP); 6630 } 6631 6632 // Now try to schedule the new bundle or (if no bundle) just calculate 6633 // dependencies. As soon as the bundle is "ready" it means that there are no 6634 // cyclic dependencies and we can schedule it. Note that's important that we 6635 // don't "schedule" the bundle yet (see cancelScheduling). 6636 while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) && 6637 !ReadyInsts.empty()) { 6638 ScheduleData *Picked = ReadyInsts.pop_back_val(); 6639 if (Picked->isSchedulingEntity() && Picked->isReady()) 6640 schedule(Picked, ReadyInsts); 6641 } 6642 }; 6643 6644 // Make sure that the scheduling region contains all 6645 // instructions of the bundle. 6646 for (Value *V : VL) { 6647 if (!extendSchedulingRegion(V, S)) { 6648 // If the scheduling region got new instructions at the lower end (or it 6649 // is a new region for the first bundle). This makes it necessary to 6650 // recalculate all dependencies. 6651 // Otherwise the compiler may crash trying to incorrectly calculate 6652 // dependencies and emit instruction in the wrong order at the actual 6653 // scheduling. 6654 TryScheduleBundle(/*ReSchedule=*/false, nullptr); 6655 return None; 6656 } 6657 } 6658 6659 for (Value *V : VL) { 6660 ScheduleData *BundleMember = getScheduleData(V); 6661 assert(BundleMember && 6662 "no ScheduleData for bundle member (maybe not in same basic block)"); 6663 if (BundleMember->IsScheduled) { 6664 // A bundle member was scheduled as single instruction before and now 6665 // needs to be scheduled as part of the bundle. We just get rid of the 6666 // existing schedule. 6667 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 6668 << " was already scheduled\n"); 6669 ReSchedule = true; 6670 } 6671 assert(BundleMember->isSchedulingEntity() && 6672 "bundle member already part of other bundle"); 6673 if (PrevInBundle) { 6674 PrevInBundle->NextInBundle = BundleMember; 6675 } else { 6676 Bundle = BundleMember; 6677 } 6678 BundleMember->UnscheduledDepsInBundle = 0; 6679 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 6680 6681 // Group the instructions to a bundle. 6682 BundleMember->FirstInBundle = Bundle; 6683 PrevInBundle = BundleMember; 6684 } 6685 assert(Bundle && "Failed to find schedule bundle"); 6686 TryScheduleBundle(ReSchedule, Bundle); 6687 if (!Bundle->isReady()) { 6688 cancelScheduling(VL, S.OpValue); 6689 return None; 6690 } 6691 return Bundle; 6692 } 6693 6694 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 6695 Value *OpValue) { 6696 if (isa<PHINode>(OpValue) || isVectorLikeInstWithConstOps(OpValue)) 6697 return; 6698 6699 ScheduleData *Bundle = getScheduleData(OpValue); 6700 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 6701 assert(!Bundle->IsScheduled && 6702 "Can't cancel bundle which is already scheduled"); 6703 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 6704 "tried to unbundle something which is not a bundle"); 6705 6706 // Un-bundle: make single instructions out of the bundle. 6707 ScheduleData *BundleMember = Bundle; 6708 while (BundleMember) { 6709 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 6710 BundleMember->FirstInBundle = BundleMember; 6711 ScheduleData *Next = BundleMember->NextInBundle; 6712 BundleMember->NextInBundle = nullptr; 6713 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 6714 if (BundleMember->UnscheduledDepsInBundle == 0) { 6715 ReadyInsts.insert(BundleMember); 6716 } 6717 BundleMember = Next; 6718 } 6719 } 6720 6721 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 6722 // Allocate a new ScheduleData for the instruction. 6723 if (ChunkPos >= ChunkSize) { 6724 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize)); 6725 ChunkPos = 0; 6726 } 6727 return &(ScheduleDataChunks.back()[ChunkPos++]); 6728 } 6729 6730 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 6731 const InstructionsState &S) { 6732 if (getScheduleData(V, isOneOf(S, V))) 6733 return true; 6734 Instruction *I = dyn_cast<Instruction>(V); 6735 assert(I && "bundle member must be an instruction"); 6736 assert(!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) && 6737 "phi nodes/insertelements/extractelements/extractvalues don't need to " 6738 "be scheduled"); 6739 auto &&CheckSheduleForI = [this, &S](Instruction *I) -> bool { 6740 ScheduleData *ISD = getScheduleData(I); 6741 if (!ISD) 6742 return false; 6743 assert(isInSchedulingRegion(ISD) && 6744 "ScheduleData not in scheduling region"); 6745 ScheduleData *SD = allocateScheduleDataChunks(); 6746 SD->Inst = I; 6747 SD->init(SchedulingRegionID, S.OpValue); 6748 ExtraScheduleDataMap[I][S.OpValue] = SD; 6749 return true; 6750 }; 6751 if (CheckSheduleForI(I)) 6752 return true; 6753 if (!ScheduleStart) { 6754 // It's the first instruction in the new region. 6755 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 6756 ScheduleStart = I; 6757 ScheduleEnd = I->getNextNode(); 6758 if (isOneOf(S, I) != I) 6759 CheckSheduleForI(I); 6760 assert(ScheduleEnd && "tried to vectorize a terminator?"); 6761 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 6762 return true; 6763 } 6764 // Search up and down at the same time, because we don't know if the new 6765 // instruction is above or below the existing scheduling region. 6766 BasicBlock::reverse_iterator UpIter = 6767 ++ScheduleStart->getIterator().getReverse(); 6768 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 6769 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 6770 BasicBlock::iterator LowerEnd = BB->end(); 6771 while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I && 6772 &*DownIter != I) { 6773 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 6774 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 6775 return false; 6776 } 6777 6778 ++UpIter; 6779 ++DownIter; 6780 } 6781 if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) { 6782 assert(I->getParent() == ScheduleStart->getParent() && 6783 "Instruction is in wrong basic block."); 6784 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 6785 ScheduleStart = I; 6786 if (isOneOf(S, I) != I) 6787 CheckSheduleForI(I); 6788 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I 6789 << "\n"); 6790 return true; 6791 } 6792 assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && 6793 "Expected to reach top of the basic block or instruction down the " 6794 "lower end."); 6795 assert(I->getParent() == ScheduleEnd->getParent() && 6796 "Instruction is in wrong basic block."); 6797 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 6798 nullptr); 6799 ScheduleEnd = I->getNextNode(); 6800 if (isOneOf(S, I) != I) 6801 CheckSheduleForI(I); 6802 assert(ScheduleEnd && "tried to vectorize a terminator?"); 6803 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 6804 return true; 6805 } 6806 6807 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 6808 Instruction *ToI, 6809 ScheduleData *PrevLoadStore, 6810 ScheduleData *NextLoadStore) { 6811 ScheduleData *CurrentLoadStore = PrevLoadStore; 6812 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 6813 ScheduleData *SD = ScheduleDataMap[I]; 6814 if (!SD) { 6815 SD = allocateScheduleDataChunks(); 6816 ScheduleDataMap[I] = SD; 6817 SD->Inst = I; 6818 } 6819 assert(!isInSchedulingRegion(SD) && 6820 "new ScheduleData already in scheduling region"); 6821 SD->init(SchedulingRegionID, I); 6822 6823 if (I->mayReadOrWriteMemory() && 6824 (!isa<IntrinsicInst>(I) || 6825 (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect && 6826 cast<IntrinsicInst>(I)->getIntrinsicID() != 6827 Intrinsic::pseudoprobe))) { 6828 // Update the linked list of memory accessing instructions. 6829 if (CurrentLoadStore) { 6830 CurrentLoadStore->NextLoadStore = SD; 6831 } else { 6832 FirstLoadStoreInRegion = SD; 6833 } 6834 CurrentLoadStore = SD; 6835 } 6836 } 6837 if (NextLoadStore) { 6838 if (CurrentLoadStore) 6839 CurrentLoadStore->NextLoadStore = NextLoadStore; 6840 } else { 6841 LastLoadStoreInRegion = CurrentLoadStore; 6842 } 6843 } 6844 6845 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 6846 bool InsertInReadyList, 6847 BoUpSLP *SLP) { 6848 assert(SD->isSchedulingEntity()); 6849 6850 SmallVector<ScheduleData *, 10> WorkList; 6851 WorkList.push_back(SD); 6852 6853 while (!WorkList.empty()) { 6854 ScheduleData *SD = WorkList.pop_back_val(); 6855 6856 ScheduleData *BundleMember = SD; 6857 while (BundleMember) { 6858 assert(isInSchedulingRegion(BundleMember)); 6859 if (!BundleMember->hasValidDependencies()) { 6860 6861 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember 6862 << "\n"); 6863 BundleMember->Dependencies = 0; 6864 BundleMember->resetUnscheduledDeps(); 6865 6866 // Handle def-use chain dependencies. 6867 if (BundleMember->OpValue != BundleMember->Inst) { 6868 ScheduleData *UseSD = getScheduleData(BundleMember->Inst); 6869 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 6870 BundleMember->Dependencies++; 6871 ScheduleData *DestBundle = UseSD->FirstInBundle; 6872 if (!DestBundle->IsScheduled) 6873 BundleMember->incrementUnscheduledDeps(1); 6874 if (!DestBundle->hasValidDependencies()) 6875 WorkList.push_back(DestBundle); 6876 } 6877 } else { 6878 for (User *U : BundleMember->Inst->users()) { 6879 if (isa<Instruction>(U)) { 6880 ScheduleData *UseSD = getScheduleData(U); 6881 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 6882 BundleMember->Dependencies++; 6883 ScheduleData *DestBundle = UseSD->FirstInBundle; 6884 if (!DestBundle->IsScheduled) 6885 BundleMember->incrementUnscheduledDeps(1); 6886 if (!DestBundle->hasValidDependencies()) 6887 WorkList.push_back(DestBundle); 6888 } 6889 } else { 6890 // I'm not sure if this can ever happen. But we need to be safe. 6891 // This lets the instruction/bundle never be scheduled and 6892 // eventually disable vectorization. 6893 BundleMember->Dependencies++; 6894 BundleMember->incrementUnscheduledDeps(1); 6895 } 6896 } 6897 } 6898 6899 // Handle the memory dependencies. 6900 ScheduleData *DepDest = BundleMember->NextLoadStore; 6901 if (DepDest) { 6902 Instruction *SrcInst = BundleMember->Inst; 6903 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); 6904 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 6905 unsigned numAliased = 0; 6906 unsigned DistToSrc = 1; 6907 6908 while (DepDest) { 6909 assert(isInSchedulingRegion(DepDest)); 6910 6911 // We have two limits to reduce the complexity: 6912 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 6913 // SLP->isAliased (which is the expensive part in this loop). 6914 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 6915 // the whole loop (even if the loop is fast, it's quadratic). 6916 // It's important for the loop break condition (see below) to 6917 // check this limit even between two read-only instructions. 6918 if (DistToSrc >= MaxMemDepDistance || 6919 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 6920 (numAliased >= AliasedCheckLimit || 6921 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 6922 6923 // We increment the counter only if the locations are aliased 6924 // (instead of counting all alias checks). This gives a better 6925 // balance between reduced runtime and accurate dependencies. 6926 numAliased++; 6927 6928 DepDest->MemoryDependencies.push_back(BundleMember); 6929 BundleMember->Dependencies++; 6930 ScheduleData *DestBundle = DepDest->FirstInBundle; 6931 if (!DestBundle->IsScheduled) { 6932 BundleMember->incrementUnscheduledDeps(1); 6933 } 6934 if (!DestBundle->hasValidDependencies()) { 6935 WorkList.push_back(DestBundle); 6936 } 6937 } 6938 DepDest = DepDest->NextLoadStore; 6939 6940 // Example, explaining the loop break condition: Let's assume our 6941 // starting instruction is i0 and MaxMemDepDistance = 3. 6942 // 6943 // +--------v--v--v 6944 // i0,i1,i2,i3,i4,i5,i6,i7,i8 6945 // +--------^--^--^ 6946 // 6947 // MaxMemDepDistance let us stop alias-checking at i3 and we add 6948 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 6949 // Previously we already added dependencies from i3 to i6,i7,i8 6950 // (because of MaxMemDepDistance). As we added a dependency from 6951 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 6952 // and we can abort this loop at i6. 6953 if (DistToSrc >= 2 * MaxMemDepDistance) 6954 break; 6955 DistToSrc++; 6956 } 6957 } 6958 } 6959 BundleMember = BundleMember->NextInBundle; 6960 } 6961 if (InsertInReadyList && SD->isReady()) { 6962 ReadyInsts.push_back(SD); 6963 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst 6964 << "\n"); 6965 } 6966 } 6967 } 6968 6969 void BoUpSLP::BlockScheduling::resetSchedule() { 6970 assert(ScheduleStart && 6971 "tried to reset schedule on block which has not been scheduled"); 6972 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 6973 doForAllOpcodes(I, [&](ScheduleData *SD) { 6974 assert(isInSchedulingRegion(SD) && 6975 "ScheduleData not in scheduling region"); 6976 SD->IsScheduled = false; 6977 SD->resetUnscheduledDeps(); 6978 }); 6979 } 6980 ReadyInsts.clear(); 6981 } 6982 6983 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 6984 if (!BS->ScheduleStart) 6985 return; 6986 6987 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 6988 6989 BS->resetSchedule(); 6990 6991 // For the real scheduling we use a more sophisticated ready-list: it is 6992 // sorted by the original instruction location. This lets the final schedule 6993 // be as close as possible to the original instruction order. 6994 struct ScheduleDataCompare { 6995 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 6996 return SD2->SchedulingPriority < SD1->SchedulingPriority; 6997 } 6998 }; 6999 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 7000 7001 // Ensure that all dependency data is updated and fill the ready-list with 7002 // initial instructions. 7003 int Idx = 0; 7004 int NumToSchedule = 0; 7005 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 7006 I = I->getNextNode()) { 7007 BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) { 7008 assert((isVectorLikeInstWithConstOps(SD->Inst) || 7009 SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr)) && 7010 "scheduler and vectorizer bundle mismatch"); 7011 SD->FirstInBundle->SchedulingPriority = Idx++; 7012 if (SD->isSchedulingEntity()) { 7013 BS->calculateDependencies(SD, false, this); 7014 NumToSchedule++; 7015 } 7016 }); 7017 } 7018 BS->initialFillReadyList(ReadyInsts); 7019 7020 Instruction *LastScheduledInst = BS->ScheduleEnd; 7021 7022 // Do the "real" scheduling. 7023 while (!ReadyInsts.empty()) { 7024 ScheduleData *picked = *ReadyInsts.begin(); 7025 ReadyInsts.erase(ReadyInsts.begin()); 7026 7027 // Move the scheduled instruction(s) to their dedicated places, if not 7028 // there yet. 7029 ScheduleData *BundleMember = picked; 7030 while (BundleMember) { 7031 Instruction *pickedInst = BundleMember->Inst; 7032 if (pickedInst->getNextNode() != LastScheduledInst) { 7033 BS->BB->getInstList().remove(pickedInst); 7034 BS->BB->getInstList().insert(LastScheduledInst->getIterator(), 7035 pickedInst); 7036 } 7037 LastScheduledInst = pickedInst; 7038 BundleMember = BundleMember->NextInBundle; 7039 } 7040 7041 BS->schedule(picked, ReadyInsts); 7042 NumToSchedule--; 7043 } 7044 assert(NumToSchedule == 0 && "could not schedule all instructions"); 7045 7046 // Avoid duplicate scheduling of the block. 7047 BS->ScheduleStart = nullptr; 7048 } 7049 7050 unsigned BoUpSLP::getVectorElementSize(Value *V) { 7051 // If V is a store, just return the width of the stored value (or value 7052 // truncated just before storing) without traversing the expression tree. 7053 // This is the common case. 7054 if (auto *Store = dyn_cast<StoreInst>(V)) { 7055 if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand())) 7056 return DL->getTypeSizeInBits(Trunc->getSrcTy()); 7057 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 7058 } 7059 7060 if (auto *IEI = dyn_cast<InsertElementInst>(V)) 7061 return getVectorElementSize(IEI->getOperand(1)); 7062 7063 auto E = InstrElementSize.find(V); 7064 if (E != InstrElementSize.end()) 7065 return E->second; 7066 7067 // If V is not a store, we can traverse the expression tree to find loads 7068 // that feed it. The type of the loaded value may indicate a more suitable 7069 // width than V's type. We want to base the vector element size on the width 7070 // of memory operations where possible. 7071 SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist; 7072 SmallPtrSet<Instruction *, 16> Visited; 7073 if (auto *I = dyn_cast<Instruction>(V)) { 7074 Worklist.emplace_back(I, I->getParent()); 7075 Visited.insert(I); 7076 } 7077 7078 // Traverse the expression tree in bottom-up order looking for loads. If we 7079 // encounter an instruction we don't yet handle, we give up. 7080 auto Width = 0u; 7081 while (!Worklist.empty()) { 7082 Instruction *I; 7083 BasicBlock *Parent; 7084 std::tie(I, Parent) = Worklist.pop_back_val(); 7085 7086 // We should only be looking at scalar instructions here. If the current 7087 // instruction has a vector type, skip. 7088 auto *Ty = I->getType(); 7089 if (isa<VectorType>(Ty)) 7090 continue; 7091 7092 // If the current instruction is a load, update MaxWidth to reflect the 7093 // width of the loaded value. 7094 if (isa<LoadInst>(I) || isa<ExtractElementInst>(I) || 7095 isa<ExtractValueInst>(I)) 7096 Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty)); 7097 7098 // Otherwise, we need to visit the operands of the instruction. We only 7099 // handle the interesting cases from buildTree here. If an operand is an 7100 // instruction we haven't yet visited and from the same basic block as the 7101 // user or the use is a PHI node, we add it to the worklist. 7102 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7103 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I) || 7104 isa<UnaryOperator>(I)) { 7105 for (Use &U : I->operands()) 7106 if (auto *J = dyn_cast<Instruction>(U.get())) 7107 if (Visited.insert(J).second && 7108 (isa<PHINode>(I) || J->getParent() == Parent)) 7109 Worklist.emplace_back(J, J->getParent()); 7110 } else { 7111 break; 7112 } 7113 } 7114 7115 // If we didn't encounter a memory access in the expression tree, or if we 7116 // gave up for some reason, just return the width of V. Otherwise, return the 7117 // maximum width we found. 7118 if (!Width) { 7119 if (auto *CI = dyn_cast<CmpInst>(V)) 7120 V = CI->getOperand(0); 7121 Width = DL->getTypeSizeInBits(V->getType()); 7122 } 7123 7124 for (Instruction *I : Visited) 7125 InstrElementSize[I] = Width; 7126 7127 return Width; 7128 } 7129 7130 // Determine if a value V in a vectorizable expression Expr can be demoted to a 7131 // smaller type with a truncation. We collect the values that will be demoted 7132 // in ToDemote and additional roots that require investigating in Roots. 7133 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, 7134 SmallVectorImpl<Value *> &ToDemote, 7135 SmallVectorImpl<Value *> &Roots) { 7136 // We can always demote constants. 7137 if (isa<Constant>(V)) { 7138 ToDemote.push_back(V); 7139 return true; 7140 } 7141 7142 // If the value is not an instruction in the expression with only one use, it 7143 // cannot be demoted. 7144 auto *I = dyn_cast<Instruction>(V); 7145 if (!I || !I->hasOneUse() || !Expr.count(I)) 7146 return false; 7147 7148 switch (I->getOpcode()) { 7149 7150 // We can always demote truncations and extensions. Since truncations can 7151 // seed additional demotion, we save the truncated value. 7152 case Instruction::Trunc: 7153 Roots.push_back(I->getOperand(0)); 7154 break; 7155 case Instruction::ZExt: 7156 case Instruction::SExt: 7157 if (isa<ExtractElementInst>(I->getOperand(0)) || 7158 isa<InsertElementInst>(I->getOperand(0))) 7159 return false; 7160 break; 7161 7162 // We can demote certain binary operations if we can demote both of their 7163 // operands. 7164 case Instruction::Add: 7165 case Instruction::Sub: 7166 case Instruction::Mul: 7167 case Instruction::And: 7168 case Instruction::Or: 7169 case Instruction::Xor: 7170 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || 7171 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) 7172 return false; 7173 break; 7174 7175 // We can demote selects if we can demote their true and false values. 7176 case Instruction::Select: { 7177 SelectInst *SI = cast<SelectInst>(I); 7178 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || 7179 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) 7180 return false; 7181 break; 7182 } 7183 7184 // We can demote phis if we can demote all their incoming operands. Note that 7185 // we don't need to worry about cycles since we ensure single use above. 7186 case Instruction::PHI: { 7187 PHINode *PN = cast<PHINode>(I); 7188 for (Value *IncValue : PN->incoming_values()) 7189 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) 7190 return false; 7191 break; 7192 } 7193 7194 // Otherwise, conservatively give up. 7195 default: 7196 return false; 7197 } 7198 7199 // Record the value that we can demote. 7200 ToDemote.push_back(V); 7201 return true; 7202 } 7203 7204 void BoUpSLP::computeMinimumValueSizes() { 7205 // If there are no external uses, the expression tree must be rooted by a 7206 // store. We can't demote in-memory values, so there is nothing to do here. 7207 if (ExternalUses.empty()) 7208 return; 7209 7210 // We only attempt to truncate integer expressions. 7211 auto &TreeRoot = VectorizableTree[0]->Scalars; 7212 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 7213 if (!TreeRootIT) 7214 return; 7215 7216 // If the expression is not rooted by a store, these roots should have 7217 // external uses. We will rely on InstCombine to rewrite the expression in 7218 // the narrower type. However, InstCombine only rewrites single-use values. 7219 // This means that if a tree entry other than a root is used externally, it 7220 // must have multiple uses and InstCombine will not rewrite it. The code 7221 // below ensures that only the roots are used externally. 7222 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); 7223 for (auto &EU : ExternalUses) 7224 if (!Expr.erase(EU.Scalar)) 7225 return; 7226 if (!Expr.empty()) 7227 return; 7228 7229 // Collect the scalar values of the vectorizable expression. We will use this 7230 // context to determine which values can be demoted. If we see a truncation, 7231 // we mark it as seeding another demotion. 7232 for (auto &EntryPtr : VectorizableTree) 7233 Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end()); 7234 7235 // Ensure the roots of the vectorizable tree don't form a cycle. They must 7236 // have a single external user that is not in the vectorizable tree. 7237 for (auto *Root : TreeRoot) 7238 if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) 7239 return; 7240 7241 // Conservatively determine if we can actually truncate the roots of the 7242 // expression. Collect the values that can be demoted in ToDemote and 7243 // additional roots that require investigating in Roots. 7244 SmallVector<Value *, 32> ToDemote; 7245 SmallVector<Value *, 4> Roots; 7246 for (auto *Root : TreeRoot) 7247 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) 7248 return; 7249 7250 // The maximum bit width required to represent all the values that can be 7251 // demoted without loss of precision. It would be safe to truncate the roots 7252 // of the expression to this width. 7253 auto MaxBitWidth = 8u; 7254 7255 // We first check if all the bits of the roots are demanded. If they're not, 7256 // we can truncate the roots to this narrower type. 7257 for (auto *Root : TreeRoot) { 7258 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 7259 MaxBitWidth = std::max<unsigned>( 7260 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); 7261 } 7262 7263 // True if the roots can be zero-extended back to their original type, rather 7264 // than sign-extended. We know that if the leading bits are not demanded, we 7265 // can safely zero-extend. So we initialize IsKnownPositive to True. 7266 bool IsKnownPositive = true; 7267 7268 // If all the bits of the roots are demanded, we can try a little harder to 7269 // compute a narrower type. This can happen, for example, if the roots are 7270 // getelementptr indices. InstCombine promotes these indices to the pointer 7271 // width. Thus, all their bits are technically demanded even though the 7272 // address computation might be vectorized in a smaller type. 7273 // 7274 // We start by looking at each entry that can be demoted. We compute the 7275 // maximum bit width required to store the scalar by using ValueTracking to 7276 // compute the number of high-order bits we can truncate. 7277 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) && 7278 llvm::all_of(TreeRoot, [](Value *R) { 7279 assert(R->hasOneUse() && "Root should have only one use!"); 7280 return isa<GetElementPtrInst>(R->user_back()); 7281 })) { 7282 MaxBitWidth = 8u; 7283 7284 // Determine if the sign bit of all the roots is known to be zero. If not, 7285 // IsKnownPositive is set to False. 7286 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { 7287 KnownBits Known = computeKnownBits(R, *DL); 7288 return Known.isNonNegative(); 7289 }); 7290 7291 // Determine the maximum number of bits required to store the scalar 7292 // values. 7293 for (auto *Scalar : ToDemote) { 7294 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); 7295 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 7296 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 7297 } 7298 7299 // If we can't prove that the sign bit is zero, we must add one to the 7300 // maximum bit width to account for the unknown sign bit. This preserves 7301 // the existing sign bit so we can safely sign-extend the root back to the 7302 // original type. Otherwise, if we know the sign bit is zero, we will 7303 // zero-extend the root instead. 7304 // 7305 // FIXME: This is somewhat suboptimal, as there will be cases where adding 7306 // one to the maximum bit width will yield a larger-than-necessary 7307 // type. In general, we need to add an extra bit only if we can't 7308 // prove that the upper bit of the original type is equal to the 7309 // upper bit of the proposed smaller type. If these two bits are the 7310 // same (either zero or one) we know that sign-extending from the 7311 // smaller type will result in the same value. Here, since we can't 7312 // yet prove this, we are just making the proposed smaller type 7313 // larger to ensure correctness. 7314 if (!IsKnownPositive) 7315 ++MaxBitWidth; 7316 } 7317 7318 // Round MaxBitWidth up to the next power-of-two. 7319 if (!isPowerOf2_64(MaxBitWidth)) 7320 MaxBitWidth = NextPowerOf2(MaxBitWidth); 7321 7322 // If the maximum bit width we compute is less than the with of the roots' 7323 // type, we can proceed with the narrowing. Otherwise, do nothing. 7324 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 7325 return; 7326 7327 // If we can truncate the root, we must collect additional values that might 7328 // be demoted as a result. That is, those seeded by truncations we will 7329 // modify. 7330 while (!Roots.empty()) 7331 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); 7332 7333 // Finally, map the values we can demote to the maximum bit with we computed. 7334 for (auto *Scalar : ToDemote) 7335 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive); 7336 } 7337 7338 namespace { 7339 7340 /// The SLPVectorizer Pass. 7341 struct SLPVectorizer : public FunctionPass { 7342 SLPVectorizerPass Impl; 7343 7344 /// Pass identification, replacement for typeid 7345 static char ID; 7346 7347 explicit SLPVectorizer() : FunctionPass(ID) { 7348 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 7349 } 7350 7351 bool doInitialization(Module &M) override { 7352 return false; 7353 } 7354 7355 bool runOnFunction(Function &F) override { 7356 if (skipFunction(F)) 7357 return false; 7358 7359 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 7360 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 7361 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 7362 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 7363 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 7364 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 7365 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 7366 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 7367 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 7368 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 7369 7370 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 7371 } 7372 7373 void getAnalysisUsage(AnalysisUsage &AU) const override { 7374 FunctionPass::getAnalysisUsage(AU); 7375 AU.addRequired<AssumptionCacheTracker>(); 7376 AU.addRequired<ScalarEvolutionWrapperPass>(); 7377 AU.addRequired<AAResultsWrapperPass>(); 7378 AU.addRequired<TargetTransformInfoWrapperPass>(); 7379 AU.addRequired<LoopInfoWrapperPass>(); 7380 AU.addRequired<DominatorTreeWrapperPass>(); 7381 AU.addRequired<DemandedBitsWrapperPass>(); 7382 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 7383 AU.addRequired<InjectTLIMappingsLegacy>(); 7384 AU.addPreserved<LoopInfoWrapperPass>(); 7385 AU.addPreserved<DominatorTreeWrapperPass>(); 7386 AU.addPreserved<AAResultsWrapperPass>(); 7387 AU.addPreserved<GlobalsAAWrapperPass>(); 7388 AU.setPreservesCFG(); 7389 } 7390 }; 7391 7392 } // end anonymous namespace 7393 7394 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 7395 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 7396 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 7397 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 7398 auto *AA = &AM.getResult<AAManager>(F); 7399 auto *LI = &AM.getResult<LoopAnalysis>(F); 7400 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 7401 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 7402 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 7403 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7404 7405 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 7406 if (!Changed) 7407 return PreservedAnalyses::all(); 7408 7409 PreservedAnalyses PA; 7410 PA.preserveSet<CFGAnalyses>(); 7411 return PA; 7412 } 7413 7414 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 7415 TargetTransformInfo *TTI_, 7416 TargetLibraryInfo *TLI_, AAResults *AA_, 7417 LoopInfo *LI_, DominatorTree *DT_, 7418 AssumptionCache *AC_, DemandedBits *DB_, 7419 OptimizationRemarkEmitter *ORE_) { 7420 if (!RunSLPVectorization) 7421 return false; 7422 SE = SE_; 7423 TTI = TTI_; 7424 TLI = TLI_; 7425 AA = AA_; 7426 LI = LI_; 7427 DT = DT_; 7428 AC = AC_; 7429 DB = DB_; 7430 DL = &F.getParent()->getDataLayout(); 7431 7432 Stores.clear(); 7433 GEPs.clear(); 7434 bool Changed = false; 7435 7436 // If the target claims to have no vector registers don't attempt 7437 // vectorization. 7438 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) 7439 return false; 7440 7441 // Don't vectorize when the attribute NoImplicitFloat is used. 7442 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 7443 return false; 7444 7445 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 7446 7447 // Use the bottom up slp vectorizer to construct chains that start with 7448 // store instructions. 7449 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 7450 7451 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 7452 // delete instructions. 7453 7454 // Update DFS numbers now so that we can use them for ordering. 7455 DT->updateDFSNumbers(); 7456 7457 // Scan the blocks in the function in post order. 7458 for (auto BB : post_order(&F.getEntryBlock())) { 7459 collectSeedInstructions(BB); 7460 7461 // Vectorize trees that end at stores. 7462 if (!Stores.empty()) { 7463 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 7464 << " underlying objects.\n"); 7465 Changed |= vectorizeStoreChains(R); 7466 } 7467 7468 // Vectorize trees that end at reductions. 7469 Changed |= vectorizeChainsInBlock(BB, R); 7470 7471 // Vectorize the index computations of getelementptr instructions. This 7472 // is primarily intended to catch gather-like idioms ending at 7473 // non-consecutive loads. 7474 if (!GEPs.empty()) { 7475 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 7476 << " underlying objects.\n"); 7477 Changed |= vectorizeGEPIndices(BB, R); 7478 } 7479 } 7480 7481 if (Changed) { 7482 R.optimizeGatherSequence(); 7483 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 7484 } 7485 return Changed; 7486 } 7487 7488 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 7489 unsigned Idx) { 7490 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size() 7491 << "\n"); 7492 const unsigned Sz = R.getVectorElementSize(Chain[0]); 7493 const unsigned MinVF = R.getMinVecRegSize() / Sz; 7494 unsigned VF = Chain.size(); 7495 7496 if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF) 7497 return false; 7498 7499 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx 7500 << "\n"); 7501 7502 R.buildTree(Chain); 7503 if (R.isTreeTinyAndNotFullyVectorizable()) 7504 return false; 7505 if (R.isLoadCombineCandidate()) 7506 return false; 7507 R.reorderTopToBottom(); 7508 R.reorderBottomToTop(); 7509 R.buildExternalUses(); 7510 7511 R.computeMinimumValueSizes(); 7512 7513 InstructionCost Cost = R.getTreeCost(); 7514 7515 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF =" << VF << "\n"); 7516 if (Cost < -SLPCostThreshold) { 7517 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n"); 7518 7519 using namespace ore; 7520 7521 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 7522 cast<StoreInst>(Chain[0])) 7523 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 7524 << " and with tree size " 7525 << NV("TreeSize", R.getTreeSize())); 7526 7527 R.vectorizeTree(); 7528 return true; 7529 } 7530 7531 return false; 7532 } 7533 7534 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 7535 BoUpSLP &R) { 7536 // We may run into multiple chains that merge into a single chain. We mark the 7537 // stores that we vectorized so that we don't visit the same store twice. 7538 BoUpSLP::ValueSet VectorizedStores; 7539 bool Changed = false; 7540 7541 int E = Stores.size(); 7542 SmallBitVector Tails(E, false); 7543 int MaxIter = MaxStoreLookup.getValue(); 7544 SmallVector<std::pair<int, int>, 16> ConsecutiveChain( 7545 E, std::make_pair(E, INT_MAX)); 7546 SmallVector<SmallBitVector, 4> CheckedPairs(E, SmallBitVector(E, false)); 7547 int IterCnt; 7548 auto &&FindConsecutiveAccess = [this, &Stores, &Tails, &IterCnt, MaxIter, 7549 &CheckedPairs, 7550 &ConsecutiveChain](int K, int Idx) { 7551 if (IterCnt >= MaxIter) 7552 return true; 7553 if (CheckedPairs[Idx].test(K)) 7554 return ConsecutiveChain[K].second == 1 && 7555 ConsecutiveChain[K].first == Idx; 7556 ++IterCnt; 7557 CheckedPairs[Idx].set(K); 7558 CheckedPairs[K].set(Idx); 7559 Optional<int> Diff = getPointersDiff( 7560 Stores[K]->getValueOperand()->getType(), Stores[K]->getPointerOperand(), 7561 Stores[Idx]->getValueOperand()->getType(), 7562 Stores[Idx]->getPointerOperand(), *DL, *SE, /*StrictCheck=*/true); 7563 if (!Diff || *Diff == 0) 7564 return false; 7565 int Val = *Diff; 7566 if (Val < 0) { 7567 if (ConsecutiveChain[Idx].second > -Val) { 7568 Tails.set(K); 7569 ConsecutiveChain[Idx] = std::make_pair(K, -Val); 7570 } 7571 return false; 7572 } 7573 if (ConsecutiveChain[K].second <= Val) 7574 return false; 7575 7576 Tails.set(Idx); 7577 ConsecutiveChain[K] = std::make_pair(Idx, Val); 7578 return Val == 1; 7579 }; 7580 // Do a quadratic search on all of the given stores in reverse order and find 7581 // all of the pairs of stores that follow each other. 7582 for (int Idx = E - 1; Idx >= 0; --Idx) { 7583 // If a store has multiple consecutive store candidates, search according 7584 // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ... 7585 // This is because usually pairing with immediate succeeding or preceding 7586 // candidate create the best chance to find slp vectorization opportunity. 7587 const int MaxLookDepth = std::max(E - Idx, Idx + 1); 7588 IterCnt = 0; 7589 for (int Offset = 1, F = MaxLookDepth; Offset < F; ++Offset) 7590 if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) || 7591 (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx))) 7592 break; 7593 } 7594 7595 // Tracks if we tried to vectorize stores starting from the given tail 7596 // already. 7597 SmallBitVector TriedTails(E, false); 7598 // For stores that start but don't end a link in the chain: 7599 for (int Cnt = E; Cnt > 0; --Cnt) { 7600 int I = Cnt - 1; 7601 if (ConsecutiveChain[I].first == E || Tails.test(I)) 7602 continue; 7603 // We found a store instr that starts a chain. Now follow the chain and try 7604 // to vectorize it. 7605 BoUpSLP::ValueList Operands; 7606 // Collect the chain into a list. 7607 while (I != E && !VectorizedStores.count(Stores[I])) { 7608 Operands.push_back(Stores[I]); 7609 Tails.set(I); 7610 if (ConsecutiveChain[I].second != 1) { 7611 // Mark the new end in the chain and go back, if required. It might be 7612 // required if the original stores come in reversed order, for example. 7613 if (ConsecutiveChain[I].first != E && 7614 Tails.test(ConsecutiveChain[I].first) && !TriedTails.test(I) && 7615 !VectorizedStores.count(Stores[ConsecutiveChain[I].first])) { 7616 TriedTails.set(I); 7617 Tails.reset(ConsecutiveChain[I].first); 7618 if (Cnt < ConsecutiveChain[I].first + 2) 7619 Cnt = ConsecutiveChain[I].first + 2; 7620 } 7621 break; 7622 } 7623 // Move to the next value in the chain. 7624 I = ConsecutiveChain[I].first; 7625 } 7626 assert(!Operands.empty() && "Expected non-empty list of stores."); 7627 7628 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 7629 unsigned EltSize = R.getVectorElementSize(Operands[0]); 7630 unsigned MaxElts = llvm::PowerOf2Floor(MaxVecRegSize / EltSize); 7631 7632 unsigned MinVF = R.getMinVF(EltSize); 7633 unsigned MaxVF = std::min(R.getMaximumVF(EltSize, Instruction::Store), 7634 MaxElts); 7635 7636 // FIXME: Is division-by-2 the correct step? Should we assert that the 7637 // register size is a power-of-2? 7638 unsigned StartIdx = 0; 7639 for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) { 7640 for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) { 7641 ArrayRef<Value *> Slice = makeArrayRef(Operands).slice(Cnt, Size); 7642 if (!VectorizedStores.count(Slice.front()) && 7643 !VectorizedStores.count(Slice.back()) && 7644 vectorizeStoreChain(Slice, R, Cnt)) { 7645 // Mark the vectorized stores so that we don't vectorize them again. 7646 VectorizedStores.insert(Slice.begin(), Slice.end()); 7647 Changed = true; 7648 // If we vectorized initial block, no need to try to vectorize it 7649 // again. 7650 if (Cnt == StartIdx) 7651 StartIdx += Size; 7652 Cnt += Size; 7653 continue; 7654 } 7655 ++Cnt; 7656 } 7657 // Check if the whole array was vectorized already - exit. 7658 if (StartIdx >= Operands.size()) 7659 break; 7660 } 7661 } 7662 7663 return Changed; 7664 } 7665 7666 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 7667 // Initialize the collections. We will make a single pass over the block. 7668 Stores.clear(); 7669 GEPs.clear(); 7670 7671 // Visit the store and getelementptr instructions in BB and organize them in 7672 // Stores and GEPs according to the underlying objects of their pointer 7673 // operands. 7674 for (Instruction &I : *BB) { 7675 // Ignore store instructions that are volatile or have a pointer operand 7676 // that doesn't point to a scalar type. 7677 if (auto *SI = dyn_cast<StoreInst>(&I)) { 7678 if (!SI->isSimple()) 7679 continue; 7680 if (!isValidElementType(SI->getValueOperand()->getType())) 7681 continue; 7682 Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI); 7683 } 7684 7685 // Ignore getelementptr instructions that have more than one index, a 7686 // constant index, or a pointer operand that doesn't point to a scalar 7687 // type. 7688 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 7689 auto Idx = GEP->idx_begin()->get(); 7690 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) 7691 continue; 7692 if (!isValidElementType(Idx->getType())) 7693 continue; 7694 if (GEP->getType()->isVectorTy()) 7695 continue; 7696 GEPs[GEP->getPointerOperand()].push_back(GEP); 7697 } 7698 } 7699 } 7700 7701 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 7702 if (!A || !B) 7703 return false; 7704 Value *VL[] = {A, B}; 7705 return tryToVectorizeList(VL, R); 7706 } 7707 7708 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 7709 bool LimitForRegisterSize) { 7710 if (VL.size() < 2) 7711 return false; 7712 7713 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " 7714 << VL.size() << ".\n"); 7715 7716 // Check that all of the parts are instructions of the same type, 7717 // we permit an alternate opcode via InstructionsState. 7718 InstructionsState S = getSameOpcode(VL); 7719 if (!S.getOpcode()) 7720 return false; 7721 7722 Instruction *I0 = cast<Instruction>(S.OpValue); 7723 // Make sure invalid types (including vector type) are rejected before 7724 // determining vectorization factor for scalar instructions. 7725 for (Value *V : VL) { 7726 Type *Ty = V->getType(); 7727 if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) { 7728 // NOTE: the following will give user internal llvm type name, which may 7729 // not be useful. 7730 R.getORE()->emit([&]() { 7731 std::string type_str; 7732 llvm::raw_string_ostream rso(type_str); 7733 Ty->print(rso); 7734 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0) 7735 << "Cannot SLP vectorize list: type " 7736 << rso.str() + " is unsupported by vectorizer"; 7737 }); 7738 return false; 7739 } 7740 } 7741 7742 unsigned Sz = R.getVectorElementSize(I0); 7743 unsigned MinVF = R.getMinVF(Sz); 7744 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF); 7745 MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF); 7746 if (MaxVF < 2) { 7747 R.getORE()->emit([&]() { 7748 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0) 7749 << "Cannot SLP vectorize list: vectorization factor " 7750 << "less than 2 is not supported"; 7751 }); 7752 return false; 7753 } 7754 7755 bool Changed = false; 7756 bool CandidateFound = false; 7757 InstructionCost MinCost = SLPCostThreshold.getValue(); 7758 Type *ScalarTy = VL[0]->getType(); 7759 if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 7760 ScalarTy = IE->getOperand(1)->getType(); 7761 7762 unsigned NextInst = 0, MaxInst = VL.size(); 7763 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) { 7764 // No actual vectorization should happen, if number of parts is the same as 7765 // provided vectorization factor (i.e. the scalar type is used for vector 7766 // code during codegen). 7767 auto *VecTy = FixedVectorType::get(ScalarTy, VF); 7768 if (TTI->getNumberOfParts(VecTy) == VF) 7769 continue; 7770 for (unsigned I = NextInst; I < MaxInst; ++I) { 7771 unsigned OpsWidth = 0; 7772 7773 if (I + VF > MaxInst) 7774 OpsWidth = MaxInst - I; 7775 else 7776 OpsWidth = VF; 7777 7778 if (!isPowerOf2_32(OpsWidth)) 7779 continue; 7780 7781 if ((LimitForRegisterSize && OpsWidth < MaxVF) || 7782 (VF > MinVF && OpsWidth <= VF / 2) || (VF == MinVF && OpsWidth < 2)) 7783 break; 7784 7785 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth); 7786 // Check that a previous iteration of this loop did not delete the Value. 7787 if (llvm::any_of(Ops, [&R](Value *V) { 7788 auto *I = dyn_cast<Instruction>(V); 7789 return I && R.isDeleted(I); 7790 })) 7791 continue; 7792 7793 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 7794 << "\n"); 7795 7796 R.buildTree(Ops); 7797 if (R.isTreeTinyAndNotFullyVectorizable()) 7798 continue; 7799 R.reorderTopToBottom(); 7800 R.reorderBottomToTop(); 7801 R.buildExternalUses(); 7802 7803 R.computeMinimumValueSizes(); 7804 InstructionCost Cost = R.getTreeCost(); 7805 CandidateFound = true; 7806 MinCost = std::min(MinCost, Cost); 7807 7808 if (Cost < -SLPCostThreshold) { 7809 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 7810 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 7811 cast<Instruction>(Ops[0])) 7812 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 7813 << " and with tree size " 7814 << ore::NV("TreeSize", R.getTreeSize())); 7815 7816 R.vectorizeTree(); 7817 // Move to the next bundle. 7818 I += VF - 1; 7819 NextInst = I + 1; 7820 Changed = true; 7821 } 7822 } 7823 } 7824 7825 if (!Changed && CandidateFound) { 7826 R.getORE()->emit([&]() { 7827 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0) 7828 << "List vectorization was possible but not beneficial with cost " 7829 << ore::NV("Cost", MinCost) << " >= " 7830 << ore::NV("Treshold", -SLPCostThreshold); 7831 }); 7832 } else if (!Changed) { 7833 R.getORE()->emit([&]() { 7834 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0) 7835 << "Cannot SLP vectorize list: vectorization was impossible" 7836 << " with available vectorization factors"; 7837 }); 7838 } 7839 return Changed; 7840 } 7841 7842 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 7843 if (!I) 7844 return false; 7845 7846 if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I)) 7847 return false; 7848 7849 Value *P = I->getParent(); 7850 7851 // Vectorize in current basic block only. 7852 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 7853 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 7854 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 7855 return false; 7856 7857 // Try to vectorize V. 7858 if (tryToVectorizePair(Op0, Op1, R)) 7859 return true; 7860 7861 auto *A = dyn_cast<BinaryOperator>(Op0); 7862 auto *B = dyn_cast<BinaryOperator>(Op1); 7863 // Try to skip B. 7864 if (B && B->hasOneUse()) { 7865 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 7866 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 7867 if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R)) 7868 return true; 7869 if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R)) 7870 return true; 7871 } 7872 7873 // Try to skip A. 7874 if (A && A->hasOneUse()) { 7875 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 7876 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 7877 if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R)) 7878 return true; 7879 if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R)) 7880 return true; 7881 } 7882 return false; 7883 } 7884 7885 namespace { 7886 7887 /// Model horizontal reductions. 7888 /// 7889 /// A horizontal reduction is a tree of reduction instructions that has values 7890 /// that can be put into a vector as its leaves. For example: 7891 /// 7892 /// mul mul mul mul 7893 /// \ / \ / 7894 /// + + 7895 /// \ / 7896 /// + 7897 /// This tree has "mul" as its leaf values and "+" as its reduction 7898 /// instructions. A reduction can feed into a store or a binary operation 7899 /// feeding a phi. 7900 /// ... 7901 /// \ / 7902 /// + 7903 /// | 7904 /// phi += 7905 /// 7906 /// Or: 7907 /// ... 7908 /// \ / 7909 /// + 7910 /// | 7911 /// *p = 7912 /// 7913 class HorizontalReduction { 7914 using ReductionOpsType = SmallVector<Value *, 16>; 7915 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; 7916 ReductionOpsListType ReductionOps; 7917 SmallVector<Value *, 32> ReducedVals; 7918 // Use map vector to make stable output. 7919 MapVector<Instruction *, Value *> ExtraArgs; 7920 WeakTrackingVH ReductionRoot; 7921 /// The type of reduction operation. 7922 RecurKind RdxKind; 7923 7924 const unsigned INVALID_OPERAND_INDEX = std::numeric_limits<unsigned>::max(); 7925 7926 static bool isCmpSelMinMax(Instruction *I) { 7927 return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) && 7928 RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I)); 7929 } 7930 7931 // And/or are potentially poison-safe logical patterns like: 7932 // select x, y, false 7933 // select x, true, y 7934 static bool isBoolLogicOp(Instruction *I) { 7935 return match(I, m_LogicalAnd(m_Value(), m_Value())) || 7936 match(I, m_LogicalOr(m_Value(), m_Value())); 7937 } 7938 7939 /// Checks if instruction is associative and can be vectorized. 7940 static bool isVectorizable(RecurKind Kind, Instruction *I) { 7941 if (Kind == RecurKind::None) 7942 return false; 7943 7944 // Integer ops that map to select instructions or intrinsics are fine. 7945 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) || 7946 isBoolLogicOp(I)) 7947 return true; 7948 7949 if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) { 7950 // FP min/max are associative except for NaN and -0.0. We do not 7951 // have to rule out -0.0 here because the intrinsic semantics do not 7952 // specify a fixed result for it. 7953 return I->getFastMathFlags().noNaNs(); 7954 } 7955 7956 return I->isAssociative(); 7957 } 7958 7959 static Value *getRdxOperand(Instruction *I, unsigned Index) { 7960 // Poison-safe 'or' takes the form: select X, true, Y 7961 // To make that work with the normal operand processing, we skip the 7962 // true value operand. 7963 // TODO: Change the code and data structures to handle this without a hack. 7964 if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1) 7965 return I->getOperand(2); 7966 return I->getOperand(Index); 7967 } 7968 7969 /// Checks if the ParentStackElem.first should be marked as a reduction 7970 /// operation with an extra argument or as extra argument itself. 7971 void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem, 7972 Value *ExtraArg) { 7973 if (ExtraArgs.count(ParentStackElem.first)) { 7974 ExtraArgs[ParentStackElem.first] = nullptr; 7975 // We ran into something like: 7976 // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg. 7977 // The whole ParentStackElem.first should be considered as an extra value 7978 // in this case. 7979 // Do not perform analysis of remaining operands of ParentStackElem.first 7980 // instruction, this whole instruction is an extra argument. 7981 ParentStackElem.second = INVALID_OPERAND_INDEX; 7982 } else { 7983 // We ran into something like: 7984 // ParentStackElem.first += ... + ExtraArg + ... 7985 ExtraArgs[ParentStackElem.first] = ExtraArg; 7986 } 7987 } 7988 7989 /// Creates reduction operation with the current opcode. 7990 static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS, 7991 Value *RHS, const Twine &Name, bool UseSelect) { 7992 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind); 7993 switch (Kind) { 7994 case RecurKind::Add: 7995 case RecurKind::Mul: 7996 case RecurKind::Or: 7997 case RecurKind::And: 7998 case RecurKind::Xor: 7999 case RecurKind::FAdd: 8000 case RecurKind::FMul: 8001 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 8002 Name); 8003 case RecurKind::FMax: 8004 return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS); 8005 case RecurKind::FMin: 8006 return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS); 8007 case RecurKind::SMax: 8008 if (UseSelect) { 8009 Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name); 8010 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 8011 } 8012 return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS); 8013 case RecurKind::SMin: 8014 if (UseSelect) { 8015 Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name); 8016 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 8017 } 8018 return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS); 8019 case RecurKind::UMax: 8020 if (UseSelect) { 8021 Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name); 8022 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 8023 } 8024 return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS); 8025 case RecurKind::UMin: 8026 if (UseSelect) { 8027 Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name); 8028 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 8029 } 8030 return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS); 8031 default: 8032 llvm_unreachable("Unknown reduction operation."); 8033 } 8034 } 8035 8036 /// Creates reduction operation with the current opcode with the IR flags 8037 /// from \p ReductionOps. 8038 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 8039 Value *RHS, const Twine &Name, 8040 const ReductionOpsListType &ReductionOps) { 8041 bool UseSelect = ReductionOps.size() == 2; 8042 assert((!UseSelect || isa<SelectInst>(ReductionOps[1][0])) && 8043 "Expected cmp + select pairs for reduction"); 8044 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect); 8045 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 8046 if (auto *Sel = dyn_cast<SelectInst>(Op)) { 8047 propagateIRFlags(Sel->getCondition(), ReductionOps[0]); 8048 propagateIRFlags(Op, ReductionOps[1]); 8049 return Op; 8050 } 8051 } 8052 propagateIRFlags(Op, ReductionOps[0]); 8053 return Op; 8054 } 8055 8056 /// Creates reduction operation with the current opcode with the IR flags 8057 /// from \p I. 8058 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 8059 Value *RHS, const Twine &Name, Instruction *I) { 8060 auto *SelI = dyn_cast<SelectInst>(I); 8061 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, SelI != nullptr); 8062 if (SelI && RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 8063 if (auto *Sel = dyn_cast<SelectInst>(Op)) 8064 propagateIRFlags(Sel->getCondition(), SelI->getCondition()); 8065 } 8066 propagateIRFlags(Op, I); 8067 return Op; 8068 } 8069 8070 static RecurKind getRdxKind(Instruction *I) { 8071 assert(I && "Expected instruction for reduction matching"); 8072 TargetTransformInfo::ReductionFlags RdxFlags; 8073 if (match(I, m_Add(m_Value(), m_Value()))) 8074 return RecurKind::Add; 8075 if (match(I, m_Mul(m_Value(), m_Value()))) 8076 return RecurKind::Mul; 8077 if (match(I, m_And(m_Value(), m_Value())) || 8078 match(I, m_LogicalAnd(m_Value(), m_Value()))) 8079 return RecurKind::And; 8080 if (match(I, m_Or(m_Value(), m_Value())) || 8081 match(I, m_LogicalOr(m_Value(), m_Value()))) 8082 return RecurKind::Or; 8083 if (match(I, m_Xor(m_Value(), m_Value()))) 8084 return RecurKind::Xor; 8085 if (match(I, m_FAdd(m_Value(), m_Value()))) 8086 return RecurKind::FAdd; 8087 if (match(I, m_FMul(m_Value(), m_Value()))) 8088 return RecurKind::FMul; 8089 8090 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value()))) 8091 return RecurKind::FMax; 8092 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value()))) 8093 return RecurKind::FMin; 8094 8095 // This matches either cmp+select or intrinsics. SLP is expected to handle 8096 // either form. 8097 // TODO: If we are canonicalizing to intrinsics, we can remove several 8098 // special-case paths that deal with selects. 8099 if (match(I, m_SMax(m_Value(), m_Value()))) 8100 return RecurKind::SMax; 8101 if (match(I, m_SMin(m_Value(), m_Value()))) 8102 return RecurKind::SMin; 8103 if (match(I, m_UMax(m_Value(), m_Value()))) 8104 return RecurKind::UMax; 8105 if (match(I, m_UMin(m_Value(), m_Value()))) 8106 return RecurKind::UMin; 8107 8108 if (auto *Select = dyn_cast<SelectInst>(I)) { 8109 // Try harder: look for min/max pattern based on instructions producing 8110 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2). 8111 // During the intermediate stages of SLP, it's very common to have 8112 // pattern like this (since optimizeGatherSequence is run only once 8113 // at the end): 8114 // %1 = extractelement <2 x i32> %a, i32 0 8115 // %2 = extractelement <2 x i32> %a, i32 1 8116 // %cond = icmp sgt i32 %1, %2 8117 // %3 = extractelement <2 x i32> %a, i32 0 8118 // %4 = extractelement <2 x i32> %a, i32 1 8119 // %select = select i1 %cond, i32 %3, i32 %4 8120 CmpInst::Predicate Pred; 8121 Instruction *L1; 8122 Instruction *L2; 8123 8124 Value *LHS = Select->getTrueValue(); 8125 Value *RHS = Select->getFalseValue(); 8126 Value *Cond = Select->getCondition(); 8127 8128 // TODO: Support inverse predicates. 8129 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) { 8130 if (!isa<ExtractElementInst>(RHS) || 8131 !L2->isIdenticalTo(cast<Instruction>(RHS))) 8132 return RecurKind::None; 8133 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) { 8134 if (!isa<ExtractElementInst>(LHS) || 8135 !L1->isIdenticalTo(cast<Instruction>(LHS))) 8136 return RecurKind::None; 8137 } else { 8138 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS)) 8139 return RecurKind::None; 8140 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) || 8141 !L1->isIdenticalTo(cast<Instruction>(LHS)) || 8142 !L2->isIdenticalTo(cast<Instruction>(RHS))) 8143 return RecurKind::None; 8144 } 8145 8146 TargetTransformInfo::ReductionFlags RdxFlags; 8147 switch (Pred) { 8148 default: 8149 return RecurKind::None; 8150 case CmpInst::ICMP_SGT: 8151 case CmpInst::ICMP_SGE: 8152 return RecurKind::SMax; 8153 case CmpInst::ICMP_SLT: 8154 case CmpInst::ICMP_SLE: 8155 return RecurKind::SMin; 8156 case CmpInst::ICMP_UGT: 8157 case CmpInst::ICMP_UGE: 8158 return RecurKind::UMax; 8159 case CmpInst::ICMP_ULT: 8160 case CmpInst::ICMP_ULE: 8161 return RecurKind::UMin; 8162 } 8163 } 8164 return RecurKind::None; 8165 } 8166 8167 /// Get the index of the first operand. 8168 static unsigned getFirstOperandIndex(Instruction *I) { 8169 return isCmpSelMinMax(I) ? 1 : 0; 8170 } 8171 8172 /// Total number of operands in the reduction operation. 8173 static unsigned getNumberOfOperands(Instruction *I) { 8174 return isCmpSelMinMax(I) ? 3 : 2; 8175 } 8176 8177 /// Checks if the instruction is in basic block \p BB. 8178 /// For a cmp+sel min/max reduction check that both ops are in \p BB. 8179 static bool hasSameParent(Instruction *I, BasicBlock *BB) { 8180 if (isCmpSelMinMax(I)) { 8181 auto *Sel = cast<SelectInst>(I); 8182 auto *Cmp = cast<Instruction>(Sel->getCondition()); 8183 return Sel->getParent() == BB && Cmp->getParent() == BB; 8184 } 8185 return I->getParent() == BB; 8186 } 8187 8188 /// Expected number of uses for reduction operations/reduced values. 8189 static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) { 8190 if (IsCmpSelMinMax) { 8191 // SelectInst must be used twice while the condition op must have single 8192 // use only. 8193 if (auto *Sel = dyn_cast<SelectInst>(I)) 8194 return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse(); 8195 return I->hasNUses(2); 8196 } 8197 8198 // Arithmetic reduction operation must be used once only. 8199 return I->hasOneUse(); 8200 } 8201 8202 /// Initializes the list of reduction operations. 8203 void initReductionOps(Instruction *I) { 8204 if (isCmpSelMinMax(I)) 8205 ReductionOps.assign(2, ReductionOpsType()); 8206 else 8207 ReductionOps.assign(1, ReductionOpsType()); 8208 } 8209 8210 /// Add all reduction operations for the reduction instruction \p I. 8211 void addReductionOps(Instruction *I) { 8212 if (isCmpSelMinMax(I)) { 8213 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition()); 8214 ReductionOps[1].emplace_back(I); 8215 } else { 8216 ReductionOps[0].emplace_back(I); 8217 } 8218 } 8219 8220 static Value *getLHS(RecurKind Kind, Instruction *I) { 8221 if (Kind == RecurKind::None) 8222 return nullptr; 8223 return I->getOperand(getFirstOperandIndex(I)); 8224 } 8225 static Value *getRHS(RecurKind Kind, Instruction *I) { 8226 if (Kind == RecurKind::None) 8227 return nullptr; 8228 return I->getOperand(getFirstOperandIndex(I) + 1); 8229 } 8230 8231 public: 8232 HorizontalReduction() = default; 8233 8234 /// Try to find a reduction tree. 8235 bool matchAssociativeReduction(PHINode *Phi, Instruction *Inst) { 8236 assert((!Phi || is_contained(Phi->operands(), Inst)) && 8237 "Phi needs to use the binary operator"); 8238 assert((isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) || 8239 isa<IntrinsicInst>(Inst)) && 8240 "Expected binop, select, or intrinsic for reduction matching"); 8241 RdxKind = getRdxKind(Inst); 8242 8243 // We could have a initial reductions that is not an add. 8244 // r *= v1 + v2 + v3 + v4 8245 // In such a case start looking for a tree rooted in the first '+'. 8246 if (Phi) { 8247 if (getLHS(RdxKind, Inst) == Phi) { 8248 Phi = nullptr; 8249 Inst = dyn_cast<Instruction>(getRHS(RdxKind, Inst)); 8250 if (!Inst) 8251 return false; 8252 RdxKind = getRdxKind(Inst); 8253 } else if (getRHS(RdxKind, Inst) == Phi) { 8254 Phi = nullptr; 8255 Inst = dyn_cast<Instruction>(getLHS(RdxKind, Inst)); 8256 if (!Inst) 8257 return false; 8258 RdxKind = getRdxKind(Inst); 8259 } 8260 } 8261 8262 if (!isVectorizable(RdxKind, Inst)) 8263 return false; 8264 8265 // Analyze "regular" integer/FP types for reductions - no target-specific 8266 // types or pointers. 8267 Type *Ty = Inst->getType(); 8268 if (!isValidElementType(Ty) || Ty->isPointerTy()) 8269 return false; 8270 8271 // Though the ultimate reduction may have multiple uses, its condition must 8272 // have only single use. 8273 if (auto *Sel = dyn_cast<SelectInst>(Inst)) 8274 if (!Sel->getCondition()->hasOneUse()) 8275 return false; 8276 8277 ReductionRoot = Inst; 8278 8279 // The opcode for leaf values that we perform a reduction on. 8280 // For example: load(x) + load(y) + load(z) + fptoui(w) 8281 // The leaf opcode for 'w' does not match, so we don't include it as a 8282 // potential candidate for the reduction. 8283 unsigned LeafOpcode = 0; 8284 8285 // Post-order traverse the reduction tree starting at Inst. We only handle 8286 // true trees containing binary operators or selects. 8287 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; 8288 Stack.push_back(std::make_pair(Inst, getFirstOperandIndex(Inst))); 8289 initReductionOps(Inst); 8290 while (!Stack.empty()) { 8291 Instruction *TreeN = Stack.back().first; 8292 unsigned EdgeToVisit = Stack.back().second++; 8293 const RecurKind TreeRdxKind = getRdxKind(TreeN); 8294 bool IsReducedValue = TreeRdxKind != RdxKind; 8295 8296 // Postorder visit. 8297 if (IsReducedValue || EdgeToVisit >= getNumberOfOperands(TreeN)) { 8298 if (IsReducedValue) 8299 ReducedVals.push_back(TreeN); 8300 else { 8301 auto ExtraArgsIter = ExtraArgs.find(TreeN); 8302 if (ExtraArgsIter != ExtraArgs.end() && !ExtraArgsIter->second) { 8303 // Check if TreeN is an extra argument of its parent operation. 8304 if (Stack.size() <= 1) { 8305 // TreeN can't be an extra argument as it is a root reduction 8306 // operation. 8307 return false; 8308 } 8309 // Yes, TreeN is an extra argument, do not add it to a list of 8310 // reduction operations. 8311 // Stack[Stack.size() - 2] always points to the parent operation. 8312 markExtraArg(Stack[Stack.size() - 2], TreeN); 8313 ExtraArgs.erase(TreeN); 8314 } else 8315 addReductionOps(TreeN); 8316 } 8317 // Retract. 8318 Stack.pop_back(); 8319 continue; 8320 } 8321 8322 // Visit operands. 8323 Value *EdgeVal = getRdxOperand(TreeN, EdgeToVisit); 8324 auto *EdgeInst = dyn_cast<Instruction>(EdgeVal); 8325 if (!EdgeInst) { 8326 // Edge value is not a reduction instruction or a leaf instruction. 8327 // (It may be a constant, function argument, or something else.) 8328 markExtraArg(Stack.back(), EdgeVal); 8329 continue; 8330 } 8331 RecurKind EdgeRdxKind = getRdxKind(EdgeInst); 8332 // Continue analysis if the next operand is a reduction operation or 8333 // (possibly) a leaf value. If the leaf value opcode is not set, 8334 // the first met operation != reduction operation is considered as the 8335 // leaf opcode. 8336 // Only handle trees in the current basic block. 8337 // Each tree node needs to have minimal number of users except for the 8338 // ultimate reduction. 8339 const bool IsRdxInst = EdgeRdxKind == RdxKind; 8340 if (EdgeInst != Phi && EdgeInst != Inst && 8341 hasSameParent(EdgeInst, Inst->getParent()) && 8342 hasRequiredNumberOfUses(isCmpSelMinMax(Inst), EdgeInst) && 8343 (!LeafOpcode || LeafOpcode == EdgeInst->getOpcode() || IsRdxInst)) { 8344 if (IsRdxInst) { 8345 // We need to be able to reassociate the reduction operations. 8346 if (!isVectorizable(EdgeRdxKind, EdgeInst)) { 8347 // I is an extra argument for TreeN (its parent operation). 8348 markExtraArg(Stack.back(), EdgeInst); 8349 continue; 8350 } 8351 } else if (!LeafOpcode) { 8352 LeafOpcode = EdgeInst->getOpcode(); 8353 } 8354 Stack.push_back( 8355 std::make_pair(EdgeInst, getFirstOperandIndex(EdgeInst))); 8356 continue; 8357 } 8358 // I is an extra argument for TreeN (its parent operation). 8359 markExtraArg(Stack.back(), EdgeInst); 8360 } 8361 return true; 8362 } 8363 8364 /// Attempt to vectorize the tree found by matchAssociativeReduction. 8365 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 8366 // If there are a sufficient number of reduction values, reduce 8367 // to a nearby power-of-2. We can safely generate oversized 8368 // vectors and rely on the backend to split them to legal sizes. 8369 unsigned NumReducedVals = ReducedVals.size(); 8370 if (NumReducedVals < 4) 8371 return false; 8372 8373 // Intersect the fast-math-flags from all reduction operations. 8374 FastMathFlags RdxFMF; 8375 RdxFMF.set(); 8376 for (ReductionOpsType &RdxOp : ReductionOps) { 8377 for (Value *RdxVal : RdxOp) { 8378 if (auto *FPMO = dyn_cast<FPMathOperator>(RdxVal)) 8379 RdxFMF &= FPMO->getFastMathFlags(); 8380 } 8381 } 8382 8383 IRBuilder<> Builder(cast<Instruction>(ReductionRoot)); 8384 Builder.setFastMathFlags(RdxFMF); 8385 8386 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 8387 // The same extra argument may be used several times, so log each attempt 8388 // to use it. 8389 for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) { 8390 assert(Pair.first && "DebugLoc must be set."); 8391 ExternallyUsedValues[Pair.second].push_back(Pair.first); 8392 } 8393 8394 // The compare instruction of a min/max is the insertion point for new 8395 // instructions and may be replaced with a new compare instruction. 8396 auto getCmpForMinMaxReduction = [](Instruction *RdxRootInst) { 8397 assert(isa<SelectInst>(RdxRootInst) && 8398 "Expected min/max reduction to have select root instruction"); 8399 Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition(); 8400 assert(isa<Instruction>(ScalarCond) && 8401 "Expected min/max reduction to have compare condition"); 8402 return cast<Instruction>(ScalarCond); 8403 }; 8404 8405 // The reduction root is used as the insertion point for new instructions, 8406 // so set it as externally used to prevent it from being deleted. 8407 ExternallyUsedValues[ReductionRoot]; 8408 SmallVector<Value *, 16> IgnoreList; 8409 for (ReductionOpsType &RdxOp : ReductionOps) 8410 IgnoreList.append(RdxOp.begin(), RdxOp.end()); 8411 8412 unsigned ReduxWidth = PowerOf2Floor(NumReducedVals); 8413 if (NumReducedVals > ReduxWidth) { 8414 // In the loop below, we are building a tree based on a window of 8415 // 'ReduxWidth' values. 8416 // If the operands of those values have common traits (compare predicate, 8417 // constant operand, etc), then we want to group those together to 8418 // minimize the cost of the reduction. 8419 8420 // TODO: This should be extended to count common operands for 8421 // compares and binops. 8422 8423 // Step 1: Count the number of times each compare predicate occurs. 8424 SmallDenseMap<unsigned, unsigned> PredCountMap; 8425 for (Value *RdxVal : ReducedVals) { 8426 CmpInst::Predicate Pred; 8427 if (match(RdxVal, m_Cmp(Pred, m_Value(), m_Value()))) 8428 ++PredCountMap[Pred]; 8429 } 8430 // Step 2: Sort the values so the most common predicates come first. 8431 stable_sort(ReducedVals, [&PredCountMap](Value *A, Value *B) { 8432 CmpInst::Predicate PredA, PredB; 8433 if (match(A, m_Cmp(PredA, m_Value(), m_Value())) && 8434 match(B, m_Cmp(PredB, m_Value(), m_Value()))) { 8435 return PredCountMap[PredA] > PredCountMap[PredB]; 8436 } 8437 return false; 8438 }); 8439 } 8440 8441 Value *VectorizedTree = nullptr; 8442 unsigned i = 0; 8443 while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) { 8444 ArrayRef<Value *> VL(&ReducedVals[i], ReduxWidth); 8445 V.buildTree(VL, IgnoreList); 8446 if (V.isTreeTinyAndNotFullyVectorizable()) 8447 break; 8448 if (V.isLoadCombineReductionCandidate(RdxKind)) 8449 break; 8450 V.reorderTopToBottom(); 8451 V.reorderBottomToTop(); 8452 V.buildExternalUses(ExternallyUsedValues); 8453 8454 // For a poison-safe boolean logic reduction, do not replace select 8455 // instructions with logic ops. All reduced values will be frozen (see 8456 // below) to prevent leaking poison. 8457 if (isa<SelectInst>(ReductionRoot) && 8458 isBoolLogicOp(cast<Instruction>(ReductionRoot)) && 8459 NumReducedVals != ReduxWidth) 8460 break; 8461 8462 V.computeMinimumValueSizes(); 8463 8464 // Estimate cost. 8465 InstructionCost TreeCost = 8466 V.getTreeCost(makeArrayRef(&ReducedVals[i], ReduxWidth)); 8467 InstructionCost ReductionCost = 8468 getReductionCost(TTI, ReducedVals[i], ReduxWidth, RdxFMF); 8469 InstructionCost Cost = TreeCost + ReductionCost; 8470 if (!Cost.isValid()) { 8471 LLVM_DEBUG(dbgs() << "Encountered invalid baseline cost.\n"); 8472 return false; 8473 } 8474 if (Cost >= -SLPCostThreshold) { 8475 V.getORE()->emit([&]() { 8476 return OptimizationRemarkMissed(SV_NAME, "HorSLPNotBeneficial", 8477 cast<Instruction>(VL[0])) 8478 << "Vectorizing horizontal reduction is possible" 8479 << "but not beneficial with cost " << ore::NV("Cost", Cost) 8480 << " and threshold " 8481 << ore::NV("Threshold", -SLPCostThreshold); 8482 }); 8483 break; 8484 } 8485 8486 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" 8487 << Cost << ". (HorRdx)\n"); 8488 V.getORE()->emit([&]() { 8489 return OptimizationRemark(SV_NAME, "VectorizedHorizontalReduction", 8490 cast<Instruction>(VL[0])) 8491 << "Vectorized horizontal reduction with cost " 8492 << ore::NV("Cost", Cost) << " and with tree size " 8493 << ore::NV("TreeSize", V.getTreeSize()); 8494 }); 8495 8496 // Vectorize a tree. 8497 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 8498 Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues); 8499 8500 // Emit a reduction. If the root is a select (min/max idiom), the insert 8501 // point is the compare condition of that select. 8502 Instruction *RdxRootInst = cast<Instruction>(ReductionRoot); 8503 if (isCmpSelMinMax(RdxRootInst)) 8504 Builder.SetInsertPoint(getCmpForMinMaxReduction(RdxRootInst)); 8505 else 8506 Builder.SetInsertPoint(RdxRootInst); 8507 8508 // To prevent poison from leaking across what used to be sequential, safe, 8509 // scalar boolean logic operations, the reduction operand must be frozen. 8510 if (isa<SelectInst>(RdxRootInst) && isBoolLogicOp(RdxRootInst)) 8511 VectorizedRoot = Builder.CreateFreeze(VectorizedRoot); 8512 8513 Value *ReducedSubTree = 8514 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); 8515 8516 if (!VectorizedTree) { 8517 // Initialize the final value in the reduction. 8518 VectorizedTree = ReducedSubTree; 8519 } else { 8520 // Update the final value in the reduction. 8521 Builder.SetCurrentDebugLocation(Loc); 8522 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree, 8523 ReducedSubTree, "op.rdx", ReductionOps); 8524 } 8525 i += ReduxWidth; 8526 ReduxWidth = PowerOf2Floor(NumReducedVals - i); 8527 } 8528 8529 if (VectorizedTree) { 8530 // Finish the reduction. 8531 for (; i < NumReducedVals; ++i) { 8532 auto *I = cast<Instruction>(ReducedVals[i]); 8533 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 8534 VectorizedTree = 8535 createOp(Builder, RdxKind, VectorizedTree, I, "", ReductionOps); 8536 } 8537 for (auto &Pair : ExternallyUsedValues) { 8538 // Add each externally used value to the final reduction. 8539 for (auto *I : Pair.second) { 8540 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 8541 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree, 8542 Pair.first, "op.extra", I); 8543 } 8544 } 8545 8546 ReductionRoot->replaceAllUsesWith(VectorizedTree); 8547 8548 // Mark all scalar reduction ops for deletion, they are replaced by the 8549 // vector reductions. 8550 V.eraseInstructions(IgnoreList); 8551 } 8552 return VectorizedTree != nullptr; 8553 } 8554 8555 unsigned numReductionValues() const { return ReducedVals.size(); } 8556 8557 private: 8558 /// Calculate the cost of a reduction. 8559 InstructionCost getReductionCost(TargetTransformInfo *TTI, 8560 Value *FirstReducedVal, unsigned ReduxWidth, 8561 FastMathFlags FMF) { 8562 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 8563 Type *ScalarTy = FirstReducedVal->getType(); 8564 FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth); 8565 InstructionCost VectorCost, ScalarCost; 8566 switch (RdxKind) { 8567 case RecurKind::Add: 8568 case RecurKind::Mul: 8569 case RecurKind::Or: 8570 case RecurKind::And: 8571 case RecurKind::Xor: 8572 case RecurKind::FAdd: 8573 case RecurKind::FMul: { 8574 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind); 8575 VectorCost = 8576 TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF, CostKind); 8577 ScalarCost = TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy, CostKind); 8578 break; 8579 } 8580 case RecurKind::FMax: 8581 case RecurKind::FMin: { 8582 auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy); 8583 auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy)); 8584 VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy, 8585 /*unsigned=*/false, CostKind); 8586 CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind); 8587 ScalarCost = TTI->getCmpSelInstrCost(Instruction::FCmp, ScalarTy, 8588 SclCondTy, RdxPred, CostKind) + 8589 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 8590 SclCondTy, RdxPred, CostKind); 8591 break; 8592 } 8593 case RecurKind::SMax: 8594 case RecurKind::SMin: 8595 case RecurKind::UMax: 8596 case RecurKind::UMin: { 8597 auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy); 8598 auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy)); 8599 bool IsUnsigned = 8600 RdxKind == RecurKind::UMax || RdxKind == RecurKind::UMin; 8601 VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy, IsUnsigned, 8602 CostKind); 8603 CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind); 8604 ScalarCost = TTI->getCmpSelInstrCost(Instruction::ICmp, ScalarTy, 8605 SclCondTy, RdxPred, CostKind) + 8606 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 8607 SclCondTy, RdxPred, CostKind); 8608 break; 8609 } 8610 default: 8611 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 8612 } 8613 8614 // Scalar cost is repeated for N-1 elements. 8615 ScalarCost *= (ReduxWidth - 1); 8616 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost 8617 << " for reduction that starts with " << *FirstReducedVal 8618 << " (It is a splitting reduction)\n"); 8619 return VectorCost - ScalarCost; 8620 } 8621 8622 /// Emit a horizontal reduction of the vectorized value. 8623 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 8624 unsigned ReduxWidth, const TargetTransformInfo *TTI) { 8625 assert(VectorizedValue && "Need to have a vectorized tree node"); 8626 assert(isPowerOf2_32(ReduxWidth) && 8627 "We only handle power-of-two reductions for now"); 8628 8629 return createSimpleTargetReduction(Builder, TTI, VectorizedValue, RdxKind, 8630 ReductionOps.back()); 8631 } 8632 }; 8633 8634 } // end anonymous namespace 8635 8636 static Optional<unsigned> getAggregateSize(Instruction *InsertInst) { 8637 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) 8638 return cast<FixedVectorType>(IE->getType())->getNumElements(); 8639 8640 unsigned AggregateSize = 1; 8641 auto *IV = cast<InsertValueInst>(InsertInst); 8642 Type *CurrentType = IV->getType(); 8643 do { 8644 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 8645 for (auto *Elt : ST->elements()) 8646 if (Elt != ST->getElementType(0)) // check homogeneity 8647 return None; 8648 AggregateSize *= ST->getNumElements(); 8649 CurrentType = ST->getElementType(0); 8650 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 8651 AggregateSize *= AT->getNumElements(); 8652 CurrentType = AT->getElementType(); 8653 } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) { 8654 AggregateSize *= VT->getNumElements(); 8655 return AggregateSize; 8656 } else if (CurrentType->isSingleValueType()) { 8657 return AggregateSize; 8658 } else { 8659 return None; 8660 } 8661 } while (true); 8662 } 8663 8664 static bool findBuildAggregate_rec(Instruction *LastInsertInst, 8665 TargetTransformInfo *TTI, 8666 SmallVectorImpl<Value *> &BuildVectorOpds, 8667 SmallVectorImpl<Value *> &InsertElts, 8668 unsigned OperandOffset) { 8669 do { 8670 Value *InsertedOperand = LastInsertInst->getOperand(1); 8671 Optional<int> OperandIndex = getInsertIndex(LastInsertInst, OperandOffset); 8672 if (!OperandIndex) 8673 return false; 8674 if (isa<InsertElementInst>(InsertedOperand) || 8675 isa<InsertValueInst>(InsertedOperand)) { 8676 if (!findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI, 8677 BuildVectorOpds, InsertElts, *OperandIndex)) 8678 return false; 8679 } else { 8680 BuildVectorOpds[*OperandIndex] = InsertedOperand; 8681 InsertElts[*OperandIndex] = LastInsertInst; 8682 } 8683 LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0)); 8684 } while (LastInsertInst != nullptr && 8685 (isa<InsertValueInst>(LastInsertInst) || 8686 isa<InsertElementInst>(LastInsertInst)) && 8687 LastInsertInst->hasOneUse()); 8688 return true; 8689 } 8690 8691 /// Recognize construction of vectors like 8692 /// %ra = insertelement <4 x float> poison, float %s0, i32 0 8693 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 8694 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 8695 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 8696 /// starting from the last insertelement or insertvalue instruction. 8697 /// 8698 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>}, 8699 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on. 8700 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples. 8701 /// 8702 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type. 8703 /// 8704 /// \return true if it matches. 8705 static bool findBuildAggregate(Instruction *LastInsertInst, 8706 TargetTransformInfo *TTI, 8707 SmallVectorImpl<Value *> &BuildVectorOpds, 8708 SmallVectorImpl<Value *> &InsertElts) { 8709 8710 assert((isa<InsertElementInst>(LastInsertInst) || 8711 isa<InsertValueInst>(LastInsertInst)) && 8712 "Expected insertelement or insertvalue instruction!"); 8713 8714 assert((BuildVectorOpds.empty() && InsertElts.empty()) && 8715 "Expected empty result vectors!"); 8716 8717 Optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst); 8718 if (!AggregateSize) 8719 return false; 8720 BuildVectorOpds.resize(*AggregateSize); 8721 InsertElts.resize(*AggregateSize); 8722 8723 if (findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 8724 0)) { 8725 llvm::erase_value(BuildVectorOpds, nullptr); 8726 llvm::erase_value(InsertElts, nullptr); 8727 if (BuildVectorOpds.size() >= 2) 8728 return true; 8729 } 8730 8731 return false; 8732 } 8733 8734 /// Try and get a reduction value from a phi node. 8735 /// 8736 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 8737 /// if they come from either \p ParentBB or a containing loop latch. 8738 /// 8739 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 8740 /// if not possible. 8741 static Value *getReductionValue(const DominatorTree *DT, PHINode *P, 8742 BasicBlock *ParentBB, LoopInfo *LI) { 8743 // There are situations where the reduction value is not dominated by the 8744 // reduction phi. Vectorizing such cases has been reported to cause 8745 // miscompiles. See PR25787. 8746 auto DominatedReduxValue = [&](Value *R) { 8747 return isa<Instruction>(R) && 8748 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent()); 8749 }; 8750 8751 Value *Rdx = nullptr; 8752 8753 // Return the incoming value if it comes from the same BB as the phi node. 8754 if (P->getIncomingBlock(0) == ParentBB) { 8755 Rdx = P->getIncomingValue(0); 8756 } else if (P->getIncomingBlock(1) == ParentBB) { 8757 Rdx = P->getIncomingValue(1); 8758 } 8759 8760 if (Rdx && DominatedReduxValue(Rdx)) 8761 return Rdx; 8762 8763 // Otherwise, check whether we have a loop latch to look at. 8764 Loop *BBL = LI->getLoopFor(ParentBB); 8765 if (!BBL) 8766 return nullptr; 8767 BasicBlock *BBLatch = BBL->getLoopLatch(); 8768 if (!BBLatch) 8769 return nullptr; 8770 8771 // There is a loop latch, return the incoming value if it comes from 8772 // that. This reduction pattern occasionally turns up. 8773 if (P->getIncomingBlock(0) == BBLatch) { 8774 Rdx = P->getIncomingValue(0); 8775 } else if (P->getIncomingBlock(1) == BBLatch) { 8776 Rdx = P->getIncomingValue(1); 8777 } 8778 8779 if (Rdx && DominatedReduxValue(Rdx)) 8780 return Rdx; 8781 8782 return nullptr; 8783 } 8784 8785 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) { 8786 if (match(I, m_BinOp(m_Value(V0), m_Value(V1)))) 8787 return true; 8788 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1)))) 8789 return true; 8790 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1)))) 8791 return true; 8792 if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1)))) 8793 return true; 8794 if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1)))) 8795 return true; 8796 if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1)))) 8797 return true; 8798 if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1)))) 8799 return true; 8800 return false; 8801 } 8802 8803 /// Attempt to reduce a horizontal reduction. 8804 /// If it is legal to match a horizontal reduction feeding the phi node \a P 8805 /// with reduction operators \a Root (or one of its operands) in a basic block 8806 /// \a BB, then check if it can be done. If horizontal reduction is not found 8807 /// and root instruction is a binary operation, vectorization of the operands is 8808 /// attempted. 8809 /// \returns true if a horizontal reduction was matched and reduced or operands 8810 /// of one of the binary instruction were vectorized. 8811 /// \returns false if a horizontal reduction was not matched (or not possible) 8812 /// or no vectorization of any binary operation feeding \a Root instruction was 8813 /// performed. 8814 static bool tryToVectorizeHorReductionOrInstOperands( 8815 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, 8816 TargetTransformInfo *TTI, 8817 const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) { 8818 if (!ShouldVectorizeHor) 8819 return false; 8820 8821 if (!Root) 8822 return false; 8823 8824 if (Root->getParent() != BB || isa<PHINode>(Root)) 8825 return false; 8826 // Start analysis starting from Root instruction. If horizontal reduction is 8827 // found, try to vectorize it. If it is not a horizontal reduction or 8828 // vectorization is not possible or not effective, and currently analyzed 8829 // instruction is a binary operation, try to vectorize the operands, using 8830 // pre-order DFS traversal order. If the operands were not vectorized, repeat 8831 // the same procedure considering each operand as a possible root of the 8832 // horizontal reduction. 8833 // Interrupt the process if the Root instruction itself was vectorized or all 8834 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 8835 // Skip the analysis of CmpInsts.Compiler implements postanalysis of the 8836 // CmpInsts so we can skip extra attempts in 8837 // tryToVectorizeHorReductionOrInstOperands and save compile time. 8838 SmallVector<std::pair<Instruction *, unsigned>, 8> Stack(1, {Root, 0}); 8839 SmallPtrSet<Value *, 8> VisitedInstrs; 8840 bool Res = false; 8841 while (!Stack.empty()) { 8842 Instruction *Inst; 8843 unsigned Level; 8844 std::tie(Inst, Level) = Stack.pop_back_val(); 8845 // Do not try to analyze instruction that has already been vectorized. 8846 // This may happen when we vectorize instruction operands on a previous 8847 // iteration while stack was populated before that happened. 8848 if (R.isDeleted(Inst)) 8849 continue; 8850 Value *B0, *B1; 8851 bool IsBinop = matchRdxBop(Inst, B0, B1); 8852 bool IsSelect = match(Inst, m_Select(m_Value(), m_Value(), m_Value())); 8853 if (IsBinop || IsSelect) { 8854 HorizontalReduction HorRdx; 8855 if (HorRdx.matchAssociativeReduction(P, Inst)) { 8856 if (HorRdx.tryToReduce(R, TTI)) { 8857 Res = true; 8858 // Set P to nullptr to avoid re-analysis of phi node in 8859 // matchAssociativeReduction function unless this is the root node. 8860 P = nullptr; 8861 continue; 8862 } 8863 } 8864 if (P && IsBinop) { 8865 Inst = dyn_cast<Instruction>(B0); 8866 if (Inst == P) 8867 Inst = dyn_cast<Instruction>(B1); 8868 if (!Inst) { 8869 // Set P to nullptr to avoid re-analysis of phi node in 8870 // matchAssociativeReduction function unless this is the root node. 8871 P = nullptr; 8872 continue; 8873 } 8874 } 8875 } 8876 // Set P to nullptr to avoid re-analysis of phi node in 8877 // matchAssociativeReduction function unless this is the root node. 8878 P = nullptr; 8879 // Do not try to vectorize CmpInst operands, this is done separately. 8880 if (!isa<CmpInst>(Inst) && Vectorize(Inst, R)) { 8881 Res = true; 8882 continue; 8883 } 8884 8885 // Try to vectorize operands. 8886 // Continue analysis for the instruction from the same basic block only to 8887 // save compile time. 8888 if (++Level < RecursionMaxDepth) 8889 for (auto *Op : Inst->operand_values()) 8890 if (VisitedInstrs.insert(Op).second) 8891 if (auto *I = dyn_cast<Instruction>(Op)) 8892 // Do not try to vectorize CmpInst operands, this is done 8893 // separately. 8894 if (!isa<PHINode>(I) && !isa<CmpInst>(I) && !R.isDeleted(I) && 8895 I->getParent() == BB) 8896 Stack.emplace_back(I, Level); 8897 } 8898 return Res; 8899 } 8900 8901 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V, 8902 BasicBlock *BB, BoUpSLP &R, 8903 TargetTransformInfo *TTI) { 8904 auto *I = dyn_cast_or_null<Instruction>(V); 8905 if (!I) 8906 return false; 8907 8908 if (!isa<BinaryOperator>(I)) 8909 P = nullptr; 8910 // Try to match and vectorize a horizontal reduction. 8911 auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool { 8912 return tryToVectorize(I, R); 8913 }; 8914 return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI, 8915 ExtraVectorization); 8916 } 8917 8918 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 8919 BasicBlock *BB, BoUpSLP &R) { 8920 const DataLayout &DL = BB->getModule()->getDataLayout(); 8921 if (!R.canMapToVector(IVI->getType(), DL)) 8922 return false; 8923 8924 SmallVector<Value *, 16> BuildVectorOpds; 8925 SmallVector<Value *, 16> BuildVectorInsts; 8926 if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts)) 8927 return false; 8928 8929 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 8930 // Aggregate value is unlikely to be processed in vector register, we need to 8931 // extract scalars into scalar registers, so NeedExtraction is set true. 8932 return tryToVectorizeList(BuildVectorOpds, R); 8933 } 8934 8935 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 8936 BasicBlock *BB, BoUpSLP &R) { 8937 SmallVector<Value *, 16> BuildVectorInsts; 8938 SmallVector<Value *, 16> BuildVectorOpds; 8939 SmallVector<int> Mask; 8940 if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) || 8941 (llvm::all_of(BuildVectorOpds, 8942 [](Value *V) { return isa<ExtractElementInst>(V); }) && 8943 isFixedVectorShuffle(BuildVectorOpds, Mask))) 8944 return false; 8945 8946 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n"); 8947 return tryToVectorizeList(BuildVectorInsts, R); 8948 } 8949 8950 bool SLPVectorizerPass::vectorizeSimpleInstructions( 8951 SmallVectorImpl<Instruction *> &Instructions, BasicBlock *BB, BoUpSLP &R, 8952 bool AtTerminator) { 8953 bool OpsChanged = false; 8954 SmallVector<Instruction *, 4> PostponedCmps; 8955 for (auto *I : reverse(Instructions)) { 8956 if (R.isDeleted(I)) 8957 continue; 8958 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) 8959 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 8960 else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) 8961 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 8962 else if (isa<CmpInst>(I)) 8963 PostponedCmps.push_back(I); 8964 } 8965 if (AtTerminator) { 8966 // Try to find reductions first. 8967 for (Instruction *I : PostponedCmps) { 8968 if (R.isDeleted(I)) 8969 continue; 8970 for (Value *Op : I->operands()) 8971 OpsChanged |= vectorizeRootInstruction(nullptr, Op, BB, R, TTI); 8972 } 8973 // Try to vectorize operands as vector bundles. 8974 for (Instruction *I : PostponedCmps) { 8975 if (R.isDeleted(I)) 8976 continue; 8977 OpsChanged |= tryToVectorize(I, R); 8978 } 8979 Instructions.clear(); 8980 } else { 8981 // Insert in reverse order since the PostponedCmps vector was filled in 8982 // reverse order. 8983 Instructions.assign(PostponedCmps.rbegin(), PostponedCmps.rend()); 8984 } 8985 return OpsChanged; 8986 } 8987 8988 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 8989 bool Changed = false; 8990 SmallVector<Value *, 4> Incoming; 8991 SmallPtrSet<Value *, 16> VisitedInstrs; 8992 // Maps phi nodes to the non-phi nodes found in the use tree for each phi 8993 // node. Allows better to identify the chains that can be vectorized in the 8994 // better way. 8995 DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes; 8996 8997 bool HaveVectorizedPhiNodes = true; 8998 while (HaveVectorizedPhiNodes) { 8999 HaveVectorizedPhiNodes = false; 9000 9001 // Collect the incoming values from the PHIs. 9002 Incoming.clear(); 9003 for (Instruction &I : *BB) { 9004 PHINode *P = dyn_cast<PHINode>(&I); 9005 if (!P) 9006 break; 9007 9008 // No need to analyze deleted, vectorized and non-vectorizable 9009 // instructions. 9010 if (!VisitedInstrs.count(P) && !R.isDeleted(P) && 9011 isValidElementType(P->getType())) 9012 Incoming.push_back(P); 9013 } 9014 9015 // Find the corresponding non-phi nodes for better matching when trying to 9016 // build the tree. 9017 for (Value *V : Incoming) { 9018 SmallVectorImpl<Value *> &Opcodes = 9019 PHIToOpcodes.try_emplace(V).first->getSecond(); 9020 if (!Opcodes.empty()) 9021 continue; 9022 SmallVector<Value *, 4> Nodes(1, V); 9023 SmallPtrSet<Value *, 4> Visited; 9024 while (!Nodes.empty()) { 9025 auto *PHI = cast<PHINode>(Nodes.pop_back_val()); 9026 if (!Visited.insert(PHI).second) 9027 continue; 9028 for (Value *V : PHI->incoming_values()) { 9029 if (auto *PHI1 = dyn_cast<PHINode>((V))) { 9030 Nodes.push_back(PHI1); 9031 continue; 9032 } 9033 Opcodes.emplace_back(V); 9034 } 9035 } 9036 } 9037 9038 // Sort by type, parent, operands. 9039 stable_sort(Incoming, [this, &PHIToOpcodes](Value *V1, Value *V2) { 9040 assert(isValidElementType(V1->getType()) && 9041 isValidElementType(V2->getType()) && 9042 "Expected vectorizable types only."); 9043 // It is fine to compare type IDs here, since we expect only vectorizable 9044 // types, like ints, floats and pointers, we don't care about other type. 9045 if (V1->getType()->getTypeID() < V2->getType()->getTypeID()) 9046 return true; 9047 if (V1->getType()->getTypeID() > V2->getType()->getTypeID()) 9048 return false; 9049 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 9050 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 9051 if (Opcodes1.size() < Opcodes2.size()) 9052 return true; 9053 if (Opcodes1.size() > Opcodes2.size()) 9054 return false; 9055 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 9056 // Undefs are compatible with any other value. 9057 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) 9058 continue; 9059 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 9060 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 9061 DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent()); 9062 DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent()); 9063 if (!NodeI1) 9064 return NodeI2 != nullptr; 9065 if (!NodeI2) 9066 return false; 9067 assert((NodeI1 == NodeI2) == 9068 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 9069 "Different nodes should have different DFS numbers"); 9070 if (NodeI1 != NodeI2) 9071 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 9072 InstructionsState S = getSameOpcode({I1, I2}); 9073 if (S.getOpcode()) 9074 continue; 9075 return I1->getOpcode() < I2->getOpcode(); 9076 } 9077 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 9078 continue; 9079 if (Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID()) 9080 return true; 9081 if (Opcodes1[I]->getValueID() > Opcodes2[I]->getValueID()) 9082 return false; 9083 } 9084 return false; 9085 }); 9086 9087 auto &&AreCompatiblePHIs = [&PHIToOpcodes](Value *V1, Value *V2) { 9088 if (V1 == V2) 9089 return true; 9090 if (V1->getType() != V2->getType()) 9091 return false; 9092 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 9093 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 9094 if (Opcodes1.size() != Opcodes2.size()) 9095 return false; 9096 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 9097 // Undefs are compatible with any other value. 9098 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) 9099 continue; 9100 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 9101 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 9102 if (I1->getParent() != I2->getParent()) 9103 return false; 9104 InstructionsState S = getSameOpcode({I1, I2}); 9105 if (S.getOpcode()) 9106 continue; 9107 return false; 9108 } 9109 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 9110 continue; 9111 if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID()) 9112 return false; 9113 } 9114 return true; 9115 }; 9116 9117 // Try to vectorize elements base on their type. 9118 SmallVector<Value *, 4> Candidates; 9119 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 9120 E = Incoming.end(); 9121 IncIt != E;) { 9122 9123 // Look for the next elements with the same type, parent and operand 9124 // kinds. 9125 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 9126 while (SameTypeIt != E && AreCompatiblePHIs(*SameTypeIt, *IncIt)) { 9127 VisitedInstrs.insert(*SameTypeIt); 9128 ++SameTypeIt; 9129 } 9130 9131 // Try to vectorize them. 9132 unsigned NumElts = (SameTypeIt - IncIt); 9133 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at PHIs (" 9134 << NumElts << ")\n"); 9135 // The order in which the phi nodes appear in the program does not matter. 9136 // So allow tryToVectorizeList to reorder them if it is beneficial. This 9137 // is done when there are exactly two elements since tryToVectorizeList 9138 // asserts that there are only two values when AllowReorder is true. 9139 // The vectorization is a 3-state attempt: 9140 // 1. Try to vectorize PHIs with the same/alternate opcodes with the size 9141 // of maximal register at first. 9142 // 2. Try to vectorize remaining PHIs with the same type, if possible. 9143 // This may result in the better vectorization results rather than if we 9144 // try just to vectorize PHIs with the same/alternate opcodes. 9145 // 3. Final attempt to try to vectorize all PHIs with the same/alternate 9146 // ops only, this may result in some extra final vectorization. 9147 if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R, 9148 /*LimitForRegisterSize=*/true)) { 9149 // Success start over because instructions might have been changed. 9150 HaveVectorizedPhiNodes = true; 9151 Changed = true; 9152 } else if (NumElts * R.getVectorElementSize(*IncIt) < 9153 R.getMaxVecRegSize() && 9154 (Candidates.empty() || 9155 Candidates.front()->getType() == (*IncIt)->getType())) { 9156 Candidates.append(IncIt, std::next(IncIt, NumElts)); 9157 } 9158 // Final attempt to vectorize phis with the same types. 9159 if (Candidates.size() > 1 && 9160 (SameTypeIt == E || 9161 (*SameTypeIt)->getType() != (*IncIt)->getType())) { 9162 if (tryToVectorizeList(Candidates, R)) { 9163 // Success start over because instructions might have been changed. 9164 HaveVectorizedPhiNodes = true; 9165 Changed = true; 9166 } else { 9167 // Try to vectorize using small vectors. 9168 for (SmallVector<Value *, 4>::iterator It = Candidates.begin(), 9169 End = Candidates.end(); 9170 It != End;) { 9171 SmallVector<Value *, 4>::iterator SameTypeIt = It; 9172 while (SameTypeIt != End && AreCompatiblePHIs(*SameTypeIt, *It)) 9173 ++SameTypeIt; 9174 unsigned NumElts = (SameTypeIt - It); 9175 if (NumElts > 1 && 9176 tryToVectorizeList(makeArrayRef(It, NumElts), R)) { 9177 HaveVectorizedPhiNodes = true; 9178 Changed = true; 9179 } 9180 It = SameTypeIt; 9181 } 9182 } 9183 Candidates.clear(); 9184 } 9185 9186 // Start over at the next instruction of a different type (or the end). 9187 IncIt = SameTypeIt; 9188 } 9189 } 9190 9191 VisitedInstrs.clear(); 9192 9193 SmallVector<Instruction *, 8> PostProcessInstructions; 9194 SmallDenseSet<Instruction *, 4> KeyNodes; 9195 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 9196 // Skip instructions with scalable type. The num of elements is unknown at 9197 // compile-time for scalable type. 9198 if (isa<ScalableVectorType>(it->getType())) 9199 continue; 9200 9201 // Skip instructions marked for the deletion. 9202 if (R.isDeleted(&*it)) 9203 continue; 9204 // We may go through BB multiple times so skip the one we have checked. 9205 if (!VisitedInstrs.insert(&*it).second) { 9206 if (it->use_empty() && KeyNodes.contains(&*it) && 9207 vectorizeSimpleInstructions(PostProcessInstructions, BB, R, 9208 it->isTerminator())) { 9209 // We would like to start over since some instructions are deleted 9210 // and the iterator may become invalid value. 9211 Changed = true; 9212 it = BB->begin(); 9213 e = BB->end(); 9214 } 9215 continue; 9216 } 9217 9218 if (isa<DbgInfoIntrinsic>(it)) 9219 continue; 9220 9221 // Try to vectorize reductions that use PHINodes. 9222 if (PHINode *P = dyn_cast<PHINode>(it)) { 9223 // Check that the PHI is a reduction PHI. 9224 if (P->getNumIncomingValues() == 2) { 9225 // Try to match and vectorize a horizontal reduction. 9226 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R, 9227 TTI)) { 9228 Changed = true; 9229 it = BB->begin(); 9230 e = BB->end(); 9231 continue; 9232 } 9233 } 9234 // Try to vectorize the incoming values of the PHI, to catch reductions 9235 // that feed into PHIs. 9236 for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) { 9237 // Skip if the incoming block is the current BB for now. Also, bypass 9238 // unreachable IR for efficiency and to avoid crashing. 9239 // TODO: Collect the skipped incoming values and try to vectorize them 9240 // after processing BB. 9241 if (BB == P->getIncomingBlock(I) || 9242 !DT->isReachableFromEntry(P->getIncomingBlock(I))) 9243 continue; 9244 9245 Changed |= vectorizeRootInstruction(nullptr, P->getIncomingValue(I), 9246 P->getIncomingBlock(I), R, TTI); 9247 } 9248 continue; 9249 } 9250 9251 // Ran into an instruction without users, like terminator, or function call 9252 // with ignored return value, store. Ignore unused instructions (basing on 9253 // instruction type, except for CallInst and InvokeInst). 9254 if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) || 9255 isa<InvokeInst>(it))) { 9256 KeyNodes.insert(&*it); 9257 bool OpsChanged = false; 9258 if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) { 9259 for (auto *V : it->operand_values()) { 9260 // Try to match and vectorize a horizontal reduction. 9261 OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI); 9262 } 9263 } 9264 // Start vectorization of post-process list of instructions from the 9265 // top-tree instructions to try to vectorize as many instructions as 9266 // possible. 9267 OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R, 9268 it->isTerminator()); 9269 if (OpsChanged) { 9270 // We would like to start over since some instructions are deleted 9271 // and the iterator may become invalid value. 9272 Changed = true; 9273 it = BB->begin(); 9274 e = BB->end(); 9275 continue; 9276 } 9277 } 9278 9279 if (isa<InsertElementInst>(it) || isa<CmpInst>(it) || 9280 isa<InsertValueInst>(it)) 9281 PostProcessInstructions.push_back(&*it); 9282 } 9283 9284 return Changed; 9285 } 9286 9287 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 9288 auto Changed = false; 9289 for (auto &Entry : GEPs) { 9290 // If the getelementptr list has fewer than two elements, there's nothing 9291 // to do. 9292 if (Entry.second.size() < 2) 9293 continue; 9294 9295 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 9296 << Entry.second.size() << ".\n"); 9297 9298 // Process the GEP list in chunks suitable for the target's supported 9299 // vector size. If a vector register can't hold 1 element, we are done. We 9300 // are trying to vectorize the index computations, so the maximum number of 9301 // elements is based on the size of the index expression, rather than the 9302 // size of the GEP itself (the target's pointer size). 9303 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 9304 unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin()); 9305 if (MaxVecRegSize < EltSize) 9306 continue; 9307 9308 unsigned MaxElts = MaxVecRegSize / EltSize; 9309 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) { 9310 auto Len = std::min<unsigned>(BE - BI, MaxElts); 9311 ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len); 9312 9313 // Initialize a set a candidate getelementptrs. Note that we use a 9314 // SetVector here to preserve program order. If the index computations 9315 // are vectorizable and begin with loads, we want to minimize the chance 9316 // of having to reorder them later. 9317 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 9318 9319 // Some of the candidates may have already been vectorized after we 9320 // initially collected them. If so, they are marked as deleted, so remove 9321 // them from the set of candidates. 9322 Candidates.remove_if( 9323 [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); }); 9324 9325 // Remove from the set of candidates all pairs of getelementptrs with 9326 // constant differences. Such getelementptrs are likely not good 9327 // candidates for vectorization in a bottom-up phase since one can be 9328 // computed from the other. We also ensure all candidate getelementptr 9329 // indices are unique. 9330 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 9331 auto *GEPI = GEPList[I]; 9332 if (!Candidates.count(GEPI)) 9333 continue; 9334 auto *SCEVI = SE->getSCEV(GEPList[I]); 9335 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 9336 auto *GEPJ = GEPList[J]; 9337 auto *SCEVJ = SE->getSCEV(GEPList[J]); 9338 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 9339 Candidates.remove(GEPI); 9340 Candidates.remove(GEPJ); 9341 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 9342 Candidates.remove(GEPJ); 9343 } 9344 } 9345 } 9346 9347 // We break out of the above computation as soon as we know there are 9348 // fewer than two candidates remaining. 9349 if (Candidates.size() < 2) 9350 continue; 9351 9352 // Add the single, non-constant index of each candidate to the bundle. We 9353 // ensured the indices met these constraints when we originally collected 9354 // the getelementptrs. 9355 SmallVector<Value *, 16> Bundle(Candidates.size()); 9356 auto BundleIndex = 0u; 9357 for (auto *V : Candidates) { 9358 auto *GEP = cast<GetElementPtrInst>(V); 9359 auto *GEPIdx = GEP->idx_begin()->get(); 9360 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 9361 Bundle[BundleIndex++] = GEPIdx; 9362 } 9363 9364 // Try and vectorize the indices. We are currently only interested in 9365 // gather-like cases of the form: 9366 // 9367 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 9368 // 9369 // where the loads of "a", the loads of "b", and the subtractions can be 9370 // performed in parallel. It's likely that detecting this pattern in a 9371 // bottom-up phase will be simpler and less costly than building a 9372 // full-blown top-down phase beginning at the consecutive loads. 9373 Changed |= tryToVectorizeList(Bundle, R); 9374 } 9375 } 9376 return Changed; 9377 } 9378 9379 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 9380 bool Changed = false; 9381 // Sort by type, base pointers and values operand. Value operands must be 9382 // compatible (have the same opcode, same parent), otherwise it is 9383 // definitely not profitable to try to vectorize them. 9384 auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) { 9385 if (V->getPointerOperandType()->getTypeID() < 9386 V2->getPointerOperandType()->getTypeID()) 9387 return true; 9388 if (V->getPointerOperandType()->getTypeID() > 9389 V2->getPointerOperandType()->getTypeID()) 9390 return false; 9391 // UndefValues are compatible with all other values. 9392 if (isa<UndefValue>(V->getValueOperand()) || 9393 isa<UndefValue>(V2->getValueOperand())) 9394 return false; 9395 if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand())) 9396 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 9397 DomTreeNodeBase<llvm::BasicBlock> *NodeI1 = 9398 DT->getNode(I1->getParent()); 9399 DomTreeNodeBase<llvm::BasicBlock> *NodeI2 = 9400 DT->getNode(I2->getParent()); 9401 assert(NodeI1 && "Should only process reachable instructions"); 9402 assert(NodeI1 && "Should only process reachable instructions"); 9403 assert((NodeI1 == NodeI2) == 9404 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 9405 "Different nodes should have different DFS numbers"); 9406 if (NodeI1 != NodeI2) 9407 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 9408 InstructionsState S = getSameOpcode({I1, I2}); 9409 if (S.getOpcode()) 9410 return false; 9411 return I1->getOpcode() < I2->getOpcode(); 9412 } 9413 if (isa<Constant>(V->getValueOperand()) && 9414 isa<Constant>(V2->getValueOperand())) 9415 return false; 9416 return V->getValueOperand()->getValueID() < 9417 V2->getValueOperand()->getValueID(); 9418 }; 9419 9420 auto &&AreCompatibleStores = [](StoreInst *V1, StoreInst *V2) { 9421 if (V1 == V2) 9422 return true; 9423 if (V1->getPointerOperandType() != V2->getPointerOperandType()) 9424 return false; 9425 // Undefs are compatible with any other value. 9426 if (isa<UndefValue>(V1->getValueOperand()) || 9427 isa<UndefValue>(V2->getValueOperand())) 9428 return true; 9429 if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand())) 9430 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 9431 if (I1->getParent() != I2->getParent()) 9432 return false; 9433 InstructionsState S = getSameOpcode({I1, I2}); 9434 return S.getOpcode() > 0; 9435 } 9436 if (isa<Constant>(V1->getValueOperand()) && 9437 isa<Constant>(V2->getValueOperand())) 9438 return true; 9439 return V1->getValueOperand()->getValueID() == 9440 V2->getValueOperand()->getValueID(); 9441 }; 9442 9443 // Attempt to sort and vectorize each of the store-groups. 9444 for (auto &Pair : Stores) { 9445 if (Pair.second.size() < 2) 9446 continue; 9447 9448 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 9449 << Pair.second.size() << ".\n"); 9450 9451 stable_sort(Pair.second, StoreSorter); 9452 9453 // Try to vectorize elements based on their compatibility. 9454 for (ArrayRef<StoreInst *>::iterator IncIt = Pair.second.begin(), 9455 E = Pair.second.end(); 9456 IncIt != E;) { 9457 9458 // Look for the next elements with the same type. 9459 ArrayRef<StoreInst *>::iterator SameTypeIt = IncIt; 9460 Type *EltTy = (*IncIt)->getPointerOperand()->getType(); 9461 9462 while (SameTypeIt != E && AreCompatibleStores(*SameTypeIt, *IncIt)) 9463 ++SameTypeIt; 9464 9465 // Try to vectorize them. 9466 unsigned NumElts = (SameTypeIt - IncIt); 9467 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at stores (" 9468 << NumElts << ")\n"); 9469 if (NumElts > 1 && !EltTy->getPointerElementType()->isVectorTy() && 9470 vectorizeStores(makeArrayRef(IncIt, NumElts), R)) { 9471 // Success start over because instructions might have been changed. 9472 Changed = true; 9473 } 9474 9475 // Start over at the next instruction of a different type (or the end). 9476 IncIt = SameTypeIt; 9477 } 9478 } 9479 return Changed; 9480 } 9481 9482 char SLPVectorizer::ID = 0; 9483 9484 static const char lv_name[] = "SLP Vectorizer"; 9485 9486 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 9487 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 9488 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 9489 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 9490 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 9491 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 9492 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 9493 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 9494 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 9495 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 9496 9497 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); } 9498