1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 20 #include "llvm/ADT/DenseMap.h" 21 #include "llvm/ADT/DenseSet.h" 22 #include "llvm/ADT/Optional.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/PriorityQueue.h" 25 #include "llvm/ADT/STLExtras.h" 26 #include "llvm/ADT/SetOperations.h" 27 #include "llvm/ADT/SetVector.h" 28 #include "llvm/ADT/SmallBitVector.h" 29 #include "llvm/ADT/SmallPtrSet.h" 30 #include "llvm/ADT/SmallSet.h" 31 #include "llvm/ADT/SmallString.h" 32 #include "llvm/ADT/Statistic.h" 33 #include "llvm/ADT/iterator.h" 34 #include "llvm/ADT/iterator_range.h" 35 #include "llvm/Analysis/AliasAnalysis.h" 36 #include "llvm/Analysis/AssumptionCache.h" 37 #include "llvm/Analysis/CodeMetrics.h" 38 #include "llvm/Analysis/DemandedBits.h" 39 #include "llvm/Analysis/GlobalsModRef.h" 40 #include "llvm/Analysis/IVDescriptors.h" 41 #include "llvm/Analysis/LoopAccessAnalysis.h" 42 #include "llvm/Analysis/LoopInfo.h" 43 #include "llvm/Analysis/MemoryLocation.h" 44 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 45 #include "llvm/Analysis/ScalarEvolution.h" 46 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 47 #include "llvm/Analysis/TargetLibraryInfo.h" 48 #include "llvm/Analysis/TargetTransformInfo.h" 49 #include "llvm/Analysis/ValueTracking.h" 50 #include "llvm/Analysis/VectorUtils.h" 51 #include "llvm/IR/Attributes.h" 52 #include "llvm/IR/BasicBlock.h" 53 #include "llvm/IR/Constant.h" 54 #include "llvm/IR/Constants.h" 55 #include "llvm/IR/DataLayout.h" 56 #include "llvm/IR/DebugLoc.h" 57 #include "llvm/IR/DerivedTypes.h" 58 #include "llvm/IR/Dominators.h" 59 #include "llvm/IR/Function.h" 60 #include "llvm/IR/IRBuilder.h" 61 #include "llvm/IR/InstrTypes.h" 62 #include "llvm/IR/Instruction.h" 63 #include "llvm/IR/Instructions.h" 64 #include "llvm/IR/IntrinsicInst.h" 65 #include "llvm/IR/Intrinsics.h" 66 #include "llvm/IR/Module.h" 67 #include "llvm/IR/NoFolder.h" 68 #include "llvm/IR/Operator.h" 69 #include "llvm/IR/PatternMatch.h" 70 #include "llvm/IR/Type.h" 71 #include "llvm/IR/Use.h" 72 #include "llvm/IR/User.h" 73 #include "llvm/IR/Value.h" 74 #include "llvm/IR/ValueHandle.h" 75 #include "llvm/IR/Verifier.h" 76 #include "llvm/InitializePasses.h" 77 #include "llvm/Pass.h" 78 #include "llvm/Support/Casting.h" 79 #include "llvm/Support/CommandLine.h" 80 #include "llvm/Support/Compiler.h" 81 #include "llvm/Support/DOTGraphTraits.h" 82 #include "llvm/Support/Debug.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/GraphWriter.h" 85 #include "llvm/Support/InstructionCost.h" 86 #include "llvm/Support/KnownBits.h" 87 #include "llvm/Support/MathExtras.h" 88 #include "llvm/Support/raw_ostream.h" 89 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 90 #include "llvm/Transforms/Utils/LoopUtils.h" 91 #include "llvm/Transforms/Vectorize.h" 92 #include <algorithm> 93 #include <cassert> 94 #include <cstdint> 95 #include <iterator> 96 #include <memory> 97 #include <set> 98 #include <string> 99 #include <tuple> 100 #include <utility> 101 #include <vector> 102 103 using namespace llvm; 104 using namespace llvm::PatternMatch; 105 using namespace slpvectorizer; 106 107 #define SV_NAME "slp-vectorizer" 108 #define DEBUG_TYPE "SLP" 109 110 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 111 112 cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden, 113 cl::desc("Run the SLP vectorization passes")); 114 115 static cl::opt<int> 116 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 117 cl::desc("Only vectorize if you gain more than this " 118 "number ")); 119 120 static cl::opt<bool> 121 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 122 cl::desc("Attempt to vectorize horizontal reductions")); 123 124 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 125 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 126 cl::desc( 127 "Attempt to vectorize horizontal reductions feeding into a store")); 128 129 static cl::opt<int> 130 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 131 cl::desc("Attempt to vectorize for this register size in bits")); 132 133 static cl::opt<unsigned> 134 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden, 135 cl::desc("Maximum SLP vectorization factor (0=unlimited)")); 136 137 static cl::opt<int> 138 MaxStoreLookup("slp-max-store-lookup", cl::init(32), cl::Hidden, 139 cl::desc("Maximum depth of the lookup for consecutive stores.")); 140 141 /// Limits the size of scheduling regions in a block. 142 /// It avoid long compile times for _very_ large blocks where vector 143 /// instructions are spread over a wide range. 144 /// This limit is way higher than needed by real-world functions. 145 static cl::opt<int> 146 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 147 cl::desc("Limit the size of the SLP scheduling region per block")); 148 149 static cl::opt<int> MinVectorRegSizeOption( 150 "slp-min-reg-size", cl::init(128), cl::Hidden, 151 cl::desc("Attempt to vectorize for this register size in bits")); 152 153 static cl::opt<unsigned> RecursionMaxDepth( 154 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 155 cl::desc("Limit the recursion depth when building a vectorizable tree")); 156 157 static cl::opt<unsigned> MinTreeSize( 158 "slp-min-tree-size", cl::init(3), cl::Hidden, 159 cl::desc("Only vectorize small trees if they are fully vectorizable")); 160 161 // The maximum depth that the look-ahead score heuristic will explore. 162 // The higher this value, the higher the compilation time overhead. 163 static cl::opt<int> LookAheadMaxDepth( 164 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden, 165 cl::desc("The maximum look-ahead depth for operand reordering scores")); 166 167 // The Look-ahead heuristic goes through the users of the bundle to calculate 168 // the users cost in getExternalUsesCost(). To avoid compilation time increase 169 // we limit the number of users visited to this value. 170 static cl::opt<unsigned> LookAheadUsersBudget( 171 "slp-look-ahead-users-budget", cl::init(2), cl::Hidden, 172 cl::desc("The maximum number of users to visit while visiting the " 173 "predecessors. This prevents compilation time increase.")); 174 175 static cl::opt<bool> 176 ViewSLPTree("view-slp-tree", cl::Hidden, 177 cl::desc("Display the SLP trees with Graphviz")); 178 179 // Limit the number of alias checks. The limit is chosen so that 180 // it has no negative effect on the llvm benchmarks. 181 static const unsigned AliasedCheckLimit = 10; 182 183 // Another limit for the alias checks: The maximum distance between load/store 184 // instructions where alias checks are done. 185 // This limit is useful for very large basic blocks. 186 static const unsigned MaxMemDepDistance = 160; 187 188 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 189 /// regions to be handled. 190 static const int MinScheduleRegionSize = 16; 191 192 /// Predicate for the element types that the SLP vectorizer supports. 193 /// 194 /// The most important thing to filter here are types which are invalid in LLVM 195 /// vectors. We also filter target specific types which have absolutely no 196 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 197 /// avoids spending time checking the cost model and realizing that they will 198 /// be inevitably scalarized. 199 static bool isValidElementType(Type *Ty) { 200 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 201 !Ty->isPPC_FP128Ty(); 202 } 203 204 /// \returns True if the value is a constant (but not globals/constant 205 /// expressions). 206 static bool isConstant(Value *V) { 207 return isa<Constant>(V) && !isa<ConstantExpr>(V) && !isa<GlobalValue>(V); 208 } 209 210 /// Checks if \p V is one of vector-like instructions, i.e. undef, 211 /// insertelement/extractelement with constant indices for fixed vector type or 212 /// extractvalue instruction. 213 static bool isVectorLikeInstWithConstOps(Value *V) { 214 if (!isa<InsertElementInst, ExtractElementInst>(V) && 215 !isa<ExtractValueInst, UndefValue>(V)) 216 return false; 217 auto *I = dyn_cast<Instruction>(V); 218 if (!I || isa<ExtractValueInst>(I)) 219 return true; 220 if (!isa<FixedVectorType>(I->getOperand(0)->getType())) 221 return false; 222 if (isa<ExtractElementInst>(I)) 223 return isConstant(I->getOperand(1)); 224 assert(isa<InsertElementInst>(V) && "Expected only insertelement."); 225 return isConstant(I->getOperand(2)); 226 } 227 228 /// \returns true if all of the instructions in \p VL are in the same block or 229 /// false otherwise. 230 static bool allSameBlock(ArrayRef<Value *> VL) { 231 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 232 if (!I0) 233 return false; 234 if (all_of(VL, isVectorLikeInstWithConstOps)) 235 return true; 236 237 BasicBlock *BB = I0->getParent(); 238 for (int I = 1, E = VL.size(); I < E; I++) { 239 auto *II = dyn_cast<Instruction>(VL[I]); 240 if (!II) 241 return false; 242 243 if (BB != II->getParent()) 244 return false; 245 } 246 return true; 247 } 248 249 /// \returns True if all of the values in \p VL are constants (but not 250 /// globals/constant expressions). 251 static bool allConstant(ArrayRef<Value *> VL) { 252 // Constant expressions and globals can't be vectorized like normal integer/FP 253 // constants. 254 return all_of(VL, isConstant); 255 } 256 257 /// \returns True if all of the values in \p VL are identical or some of them 258 /// are UndefValue. 259 static bool isSplat(ArrayRef<Value *> VL) { 260 Value *FirstNonUndef = nullptr; 261 for (Value *V : VL) { 262 if (isa<UndefValue>(V)) 263 continue; 264 if (!FirstNonUndef) { 265 FirstNonUndef = V; 266 continue; 267 } 268 if (V != FirstNonUndef) 269 return false; 270 } 271 return FirstNonUndef != nullptr; 272 } 273 274 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator. 275 static bool isCommutative(Instruction *I) { 276 if (auto *Cmp = dyn_cast<CmpInst>(I)) 277 return Cmp->isCommutative(); 278 if (auto *BO = dyn_cast<BinaryOperator>(I)) 279 return BO->isCommutative(); 280 // TODO: This should check for generic Instruction::isCommutative(), but 281 // we need to confirm that the caller code correctly handles Intrinsics 282 // for example (does not have 2 operands). 283 return false; 284 } 285 286 /// Checks if the given value is actually an undefined constant vector. 287 static bool isUndefVector(const Value *V) { 288 if (isa<UndefValue>(V)) 289 return true; 290 auto *C = dyn_cast<Constant>(V); 291 if (!C) 292 return false; 293 if (!C->containsUndefOrPoisonElement()) 294 return false; 295 auto *VecTy = dyn_cast<FixedVectorType>(C->getType()); 296 if (!VecTy) 297 return false; 298 for (unsigned I = 0, E = VecTy->getNumElements(); I != E; ++I) { 299 if (Constant *Elem = C->getAggregateElement(I)) 300 if (!isa<UndefValue>(Elem)) 301 return false; 302 } 303 return true; 304 } 305 306 /// Checks if the vector of instructions can be represented as a shuffle, like: 307 /// %x0 = extractelement <4 x i8> %x, i32 0 308 /// %x3 = extractelement <4 x i8> %x, i32 3 309 /// %y1 = extractelement <4 x i8> %y, i32 1 310 /// %y2 = extractelement <4 x i8> %y, i32 2 311 /// %x0x0 = mul i8 %x0, %x0 312 /// %x3x3 = mul i8 %x3, %x3 313 /// %y1y1 = mul i8 %y1, %y1 314 /// %y2y2 = mul i8 %y2, %y2 315 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0 316 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 317 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 318 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 319 /// ret <4 x i8> %ins4 320 /// can be transformed into: 321 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 322 /// i32 6> 323 /// %2 = mul <4 x i8> %1, %1 324 /// ret <4 x i8> %2 325 /// We convert this initially to something like: 326 /// %x0 = extractelement <4 x i8> %x, i32 0 327 /// %x3 = extractelement <4 x i8> %x, i32 3 328 /// %y1 = extractelement <4 x i8> %y, i32 1 329 /// %y2 = extractelement <4 x i8> %y, i32 2 330 /// %1 = insertelement <4 x i8> poison, i8 %x0, i32 0 331 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1 332 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2 333 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3 334 /// %5 = mul <4 x i8> %4, %4 335 /// %6 = extractelement <4 x i8> %5, i32 0 336 /// %ins1 = insertelement <4 x i8> poison, i8 %6, i32 0 337 /// %7 = extractelement <4 x i8> %5, i32 1 338 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1 339 /// %8 = extractelement <4 x i8> %5, i32 2 340 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2 341 /// %9 = extractelement <4 x i8> %5, i32 3 342 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3 343 /// ret <4 x i8> %ins4 344 /// InstCombiner transforms this into a shuffle and vector mul 345 /// Mask will return the Shuffle Mask equivalent to the extracted elements. 346 /// TODO: Can we split off and reuse the shuffle mask detection from 347 /// TargetTransformInfo::getInstructionThroughput? 348 static Optional<TargetTransformInfo::ShuffleKind> 349 isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) { 350 const auto *It = 351 find_if(VL, [](Value *V) { return isa<ExtractElementInst>(V); }); 352 if (It == VL.end()) 353 return None; 354 auto *EI0 = cast<ExtractElementInst>(*It); 355 if (isa<ScalableVectorType>(EI0->getVectorOperandType())) 356 return None; 357 unsigned Size = 358 cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements(); 359 Value *Vec1 = nullptr; 360 Value *Vec2 = nullptr; 361 enum ShuffleMode { Unknown, Select, Permute }; 362 ShuffleMode CommonShuffleMode = Unknown; 363 Mask.assign(VL.size(), UndefMaskElem); 364 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 365 // Undef can be represented as an undef element in a vector. 366 if (isa<UndefValue>(VL[I])) 367 continue; 368 auto *EI = cast<ExtractElementInst>(VL[I]); 369 if (isa<ScalableVectorType>(EI->getVectorOperandType())) 370 return None; 371 auto *Vec = EI->getVectorOperand(); 372 // We can extractelement from undef or poison vector. 373 if (isUndefVector(Vec)) 374 continue; 375 // All vector operands must have the same number of vector elements. 376 if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size) 377 return None; 378 if (isa<UndefValue>(EI->getIndexOperand())) 379 continue; 380 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 381 if (!Idx) 382 return None; 383 // Undefined behavior if Idx is negative or >= Size. 384 if (Idx->getValue().uge(Size)) 385 continue; 386 unsigned IntIdx = Idx->getValue().getZExtValue(); 387 Mask[I] = IntIdx; 388 // For correct shuffling we have to have at most 2 different vector operands 389 // in all extractelement instructions. 390 if (!Vec1 || Vec1 == Vec) { 391 Vec1 = Vec; 392 } else if (!Vec2 || Vec2 == Vec) { 393 Vec2 = Vec; 394 Mask[I] += Size; 395 } else { 396 return None; 397 } 398 if (CommonShuffleMode == Permute) 399 continue; 400 // If the extract index is not the same as the operation number, it is a 401 // permutation. 402 if (IntIdx != I) { 403 CommonShuffleMode = Permute; 404 continue; 405 } 406 CommonShuffleMode = Select; 407 } 408 // If we're not crossing lanes in different vectors, consider it as blending. 409 if (CommonShuffleMode == Select && Vec2) 410 return TargetTransformInfo::SK_Select; 411 // If Vec2 was never used, we have a permutation of a single vector, otherwise 412 // we have permutation of 2 vectors. 413 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 414 : TargetTransformInfo::SK_PermuteSingleSrc; 415 } 416 417 namespace { 418 419 /// Main data required for vectorization of instructions. 420 struct InstructionsState { 421 /// The very first instruction in the list with the main opcode. 422 Value *OpValue = nullptr; 423 424 /// The main/alternate instruction. 425 Instruction *MainOp = nullptr; 426 Instruction *AltOp = nullptr; 427 428 /// The main/alternate opcodes for the list of instructions. 429 unsigned getOpcode() const { 430 return MainOp ? MainOp->getOpcode() : 0; 431 } 432 433 unsigned getAltOpcode() const { 434 return AltOp ? AltOp->getOpcode() : 0; 435 } 436 437 /// Some of the instructions in the list have alternate opcodes. 438 bool isAltShuffle() const { return getOpcode() != getAltOpcode(); } 439 440 bool isOpcodeOrAlt(Instruction *I) const { 441 unsigned CheckedOpcode = I->getOpcode(); 442 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode; 443 } 444 445 InstructionsState() = delete; 446 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp) 447 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {} 448 }; 449 450 } // end anonymous namespace 451 452 /// Chooses the correct key for scheduling data. If \p Op has the same (or 453 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 454 /// OpValue. 455 static Value *isOneOf(const InstructionsState &S, Value *Op) { 456 auto *I = dyn_cast<Instruction>(Op); 457 if (I && S.isOpcodeOrAlt(I)) 458 return Op; 459 return S.OpValue; 460 } 461 462 /// \returns true if \p Opcode is allowed as part of of the main/alternate 463 /// instruction for SLP vectorization. 464 /// 465 /// Example of unsupported opcode is SDIV that can potentially cause UB if the 466 /// "shuffled out" lane would result in division by zero. 467 static bool isValidForAlternation(unsigned Opcode) { 468 if (Instruction::isIntDivRem(Opcode)) 469 return false; 470 471 return true; 472 } 473 474 /// \returns analysis of the Instructions in \p VL described in 475 /// InstructionsState, the Opcode that we suppose the whole list 476 /// could be vectorized even if its structure is diverse. 477 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 478 unsigned BaseIndex = 0) { 479 // Make sure these are all Instructions. 480 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); })) 481 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 482 483 bool IsCastOp = isa<CastInst>(VL[BaseIndex]); 484 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]); 485 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode(); 486 unsigned AltOpcode = Opcode; 487 unsigned AltIndex = BaseIndex; 488 489 // Check for one alternate opcode from another BinaryOperator. 490 // TODO - generalize to support all operators (types, calls etc.). 491 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { 492 unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode(); 493 if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) { 494 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 495 continue; 496 if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) && 497 isValidForAlternation(Opcode)) { 498 AltOpcode = InstOpcode; 499 AltIndex = Cnt; 500 continue; 501 } 502 } else if (IsCastOp && isa<CastInst>(VL[Cnt])) { 503 Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType(); 504 Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType(); 505 if (Ty0 == Ty1) { 506 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 507 continue; 508 if (Opcode == AltOpcode) { 509 assert(isValidForAlternation(Opcode) && 510 isValidForAlternation(InstOpcode) && 511 "Cast isn't safe for alternation, logic needs to be updated!"); 512 AltOpcode = InstOpcode; 513 AltIndex = Cnt; 514 continue; 515 } 516 } 517 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode) 518 continue; 519 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 520 } 521 522 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]), 523 cast<Instruction>(VL[AltIndex])); 524 } 525 526 /// \returns true if all of the values in \p VL have the same type or false 527 /// otherwise. 528 static bool allSameType(ArrayRef<Value *> VL) { 529 Type *Ty = VL[0]->getType(); 530 for (int i = 1, e = VL.size(); i < e; i++) 531 if (VL[i]->getType() != Ty) 532 return false; 533 534 return true; 535 } 536 537 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 538 static Optional<unsigned> getExtractIndex(Instruction *E) { 539 unsigned Opcode = E->getOpcode(); 540 assert((Opcode == Instruction::ExtractElement || 541 Opcode == Instruction::ExtractValue) && 542 "Expected extractelement or extractvalue instruction."); 543 if (Opcode == Instruction::ExtractElement) { 544 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 545 if (!CI) 546 return None; 547 return CI->getZExtValue(); 548 } 549 ExtractValueInst *EI = cast<ExtractValueInst>(E); 550 if (EI->getNumIndices() != 1) 551 return None; 552 return *EI->idx_begin(); 553 } 554 555 /// \returns True if in-tree use also needs extract. This refers to 556 /// possible scalar operand in vectorized instruction. 557 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 558 TargetLibraryInfo *TLI) { 559 unsigned Opcode = UserInst->getOpcode(); 560 switch (Opcode) { 561 case Instruction::Load: { 562 LoadInst *LI = cast<LoadInst>(UserInst); 563 return (LI->getPointerOperand() == Scalar); 564 } 565 case Instruction::Store: { 566 StoreInst *SI = cast<StoreInst>(UserInst); 567 return (SI->getPointerOperand() == Scalar); 568 } 569 case Instruction::Call: { 570 CallInst *CI = cast<CallInst>(UserInst); 571 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 572 for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) { 573 if (hasVectorInstrinsicScalarOpd(ID, i)) 574 return (CI->getArgOperand(i) == Scalar); 575 } 576 LLVM_FALLTHROUGH; 577 } 578 default: 579 return false; 580 } 581 } 582 583 /// \returns the AA location that is being access by the instruction. 584 static MemoryLocation getLocation(Instruction *I, AAResults *AA) { 585 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 586 return MemoryLocation::get(SI); 587 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 588 return MemoryLocation::get(LI); 589 return MemoryLocation(); 590 } 591 592 /// \returns True if the instruction is not a volatile or atomic load/store. 593 static bool isSimple(Instruction *I) { 594 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 595 return LI->isSimple(); 596 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 597 return SI->isSimple(); 598 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 599 return !MI->isVolatile(); 600 return true; 601 } 602 603 /// Shuffles \p Mask in accordance with the given \p SubMask. 604 static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask) { 605 if (SubMask.empty()) 606 return; 607 if (Mask.empty()) { 608 Mask.append(SubMask.begin(), SubMask.end()); 609 return; 610 } 611 SmallVector<int> NewMask(SubMask.size(), UndefMaskElem); 612 int TermValue = std::min(Mask.size(), SubMask.size()); 613 for (int I = 0, E = SubMask.size(); I < E; ++I) { 614 if (SubMask[I] >= TermValue || SubMask[I] == UndefMaskElem || 615 Mask[SubMask[I]] >= TermValue) 616 continue; 617 NewMask[I] = Mask[SubMask[I]]; 618 } 619 Mask.swap(NewMask); 620 } 621 622 /// Order may have elements assigned special value (size) which is out of 623 /// bounds. Such indices only appear on places which correspond to undef values 624 /// (see canReuseExtract for details) and used in order to avoid undef values 625 /// have effect on operands ordering. 626 /// The first loop below simply finds all unused indices and then the next loop 627 /// nest assigns these indices for undef values positions. 628 /// As an example below Order has two undef positions and they have assigned 629 /// values 3 and 7 respectively: 630 /// before: 6 9 5 4 9 2 1 0 631 /// after: 6 3 5 4 7 2 1 0 632 static void fixupOrderingIndices(SmallVectorImpl<unsigned> &Order) { 633 const unsigned Sz = Order.size(); 634 SmallBitVector UnusedIndices(Sz, /*t=*/true); 635 SmallBitVector MaskedIndices(Sz); 636 for (unsigned I = 0; I < Sz; ++I) { 637 if (Order[I] < Sz) 638 UnusedIndices.reset(Order[I]); 639 else 640 MaskedIndices.set(I); 641 } 642 if (MaskedIndices.none()) 643 return; 644 assert(UnusedIndices.count() == MaskedIndices.count() && 645 "Non-synced masked/available indices."); 646 int Idx = UnusedIndices.find_first(); 647 int MIdx = MaskedIndices.find_first(); 648 while (MIdx >= 0) { 649 assert(Idx >= 0 && "Indices must be synced."); 650 Order[MIdx] = Idx; 651 Idx = UnusedIndices.find_next(Idx); 652 MIdx = MaskedIndices.find_next(MIdx); 653 } 654 } 655 656 namespace llvm { 657 658 static void inversePermutation(ArrayRef<unsigned> Indices, 659 SmallVectorImpl<int> &Mask) { 660 Mask.clear(); 661 const unsigned E = Indices.size(); 662 Mask.resize(E, UndefMaskElem); 663 for (unsigned I = 0; I < E; ++I) 664 Mask[Indices[I]] = I; 665 } 666 667 /// \returns inserting index of InsertElement or InsertValue instruction, 668 /// using Offset as base offset for index. 669 static Optional<int> getInsertIndex(Value *InsertInst, unsigned Offset) { 670 int Index = Offset; 671 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) { 672 if (auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2))) { 673 auto *VT = cast<FixedVectorType>(IE->getType()); 674 if (CI->getValue().uge(VT->getNumElements())) 675 return UndefMaskElem; 676 Index *= VT->getNumElements(); 677 Index += CI->getZExtValue(); 678 return Index; 679 } 680 if (isa<UndefValue>(IE->getOperand(2))) 681 return UndefMaskElem; 682 return None; 683 } 684 685 auto *IV = cast<InsertValueInst>(InsertInst); 686 Type *CurrentType = IV->getType(); 687 for (unsigned I : IV->indices()) { 688 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 689 Index *= ST->getNumElements(); 690 CurrentType = ST->getElementType(I); 691 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 692 Index *= AT->getNumElements(); 693 CurrentType = AT->getElementType(); 694 } else { 695 return None; 696 } 697 Index += I; 698 } 699 return Index; 700 } 701 702 /// Reorders the list of scalars in accordance with the given \p Order and then 703 /// the \p Mask. \p Order - is the original order of the scalars, need to 704 /// reorder scalars into an unordered state at first according to the given 705 /// order. Then the ordered scalars are shuffled once again in accordance with 706 /// the provided mask. 707 static void reorderScalars(SmallVectorImpl<Value *> &Scalars, 708 ArrayRef<int> Mask) { 709 assert(!Mask.empty() && "Expected non-empty mask."); 710 SmallVector<Value *> Prev(Scalars.size(), 711 UndefValue::get(Scalars.front()->getType())); 712 Prev.swap(Scalars); 713 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 714 if (Mask[I] != UndefMaskElem) 715 Scalars[Mask[I]] = Prev[I]; 716 } 717 718 namespace slpvectorizer { 719 720 /// Bottom Up SLP Vectorizer. 721 class BoUpSLP { 722 struct TreeEntry; 723 struct ScheduleData; 724 725 public: 726 using ValueList = SmallVector<Value *, 8>; 727 using InstrList = SmallVector<Instruction *, 16>; 728 using ValueSet = SmallPtrSet<Value *, 16>; 729 using StoreList = SmallVector<StoreInst *, 8>; 730 using ExtraValueToDebugLocsMap = 731 MapVector<Value *, SmallVector<Instruction *, 2>>; 732 using OrdersType = SmallVector<unsigned, 4>; 733 734 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 735 TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li, 736 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 737 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 738 : F(Func), SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), 739 DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { 740 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 741 // Use the vector register size specified by the target unless overridden 742 // by a command-line option. 743 // TODO: It would be better to limit the vectorization factor based on 744 // data type rather than just register size. For example, x86 AVX has 745 // 256-bit registers, but it does not support integer operations 746 // at that width (that requires AVX2). 747 if (MaxVectorRegSizeOption.getNumOccurrences()) 748 MaxVecRegSize = MaxVectorRegSizeOption; 749 else 750 MaxVecRegSize = 751 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 752 .getFixedSize(); 753 754 if (MinVectorRegSizeOption.getNumOccurrences()) 755 MinVecRegSize = MinVectorRegSizeOption; 756 else 757 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 758 } 759 760 /// Vectorize the tree that starts with the elements in \p VL. 761 /// Returns the vectorized root. 762 Value *vectorizeTree(); 763 764 /// Vectorize the tree but with the list of externally used values \p 765 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 766 /// generated extractvalue instructions. 767 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues); 768 769 /// \returns the cost incurred by unwanted spills and fills, caused by 770 /// holding live values over call sites. 771 InstructionCost getSpillCost() const; 772 773 /// \returns the vectorization cost of the subtree that starts at \p VL. 774 /// A negative number means that this is profitable. 775 InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = None); 776 777 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 778 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 779 void buildTree(ArrayRef<Value *> Roots, 780 ArrayRef<Value *> UserIgnoreLst = None); 781 782 /// Builds external uses of the vectorized scalars, i.e. the list of 783 /// vectorized scalars to be extracted, their lanes and their scalar users. \p 784 /// ExternallyUsedValues contains additional list of external uses to handle 785 /// vectorization of reductions. 786 void 787 buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {}); 788 789 /// Clear the internal data structures that are created by 'buildTree'. 790 void deleteTree() { 791 VectorizableTree.clear(); 792 ScalarToTreeEntry.clear(); 793 MustGather.clear(); 794 ExternalUses.clear(); 795 for (auto &Iter : BlocksSchedules) { 796 BlockScheduling *BS = Iter.second.get(); 797 BS->clear(); 798 } 799 MinBWs.clear(); 800 InstrElementSize.clear(); 801 } 802 803 unsigned getTreeSize() const { return VectorizableTree.size(); } 804 805 /// Perform LICM and CSE on the newly generated gather sequences. 806 void optimizeGatherSequence(); 807 808 /// Checks if the specified gather tree entry \p TE can be represented as a 809 /// shuffled vector entry + (possibly) permutation with other gathers. It 810 /// implements the checks only for possibly ordered scalars (Loads, 811 /// ExtractElement, ExtractValue), which can be part of the graph. 812 Optional<OrdersType> findReusedOrderedScalars(const TreeEntry &TE); 813 814 /// Gets reordering data for the given tree entry. If the entry is vectorized 815 /// - just return ReorderIndices, otherwise check if the scalars can be 816 /// reordered and return the most optimal order. 817 /// \param TopToBottom If true, include the order of vectorized stores and 818 /// insertelement nodes, otherwise skip them. 819 Optional<OrdersType> getReorderingData(const TreeEntry &TE, bool TopToBottom); 820 821 /// Reorders the current graph to the most profitable order starting from the 822 /// root node to the leaf nodes. The best order is chosen only from the nodes 823 /// of the same size (vectorization factor). Smaller nodes are considered 824 /// parts of subgraph with smaller VF and they are reordered independently. We 825 /// can make it because we still need to extend smaller nodes to the wider VF 826 /// and we can merge reordering shuffles with the widening shuffles. 827 void reorderTopToBottom(); 828 829 /// Reorders the current graph to the most profitable order starting from 830 /// leaves to the root. It allows to rotate small subgraphs and reduce the 831 /// number of reshuffles if the leaf nodes use the same order. In this case we 832 /// can merge the orders and just shuffle user node instead of shuffling its 833 /// operands. Plus, even the leaf nodes have different orders, it allows to 834 /// sink reordering in the graph closer to the root node and merge it later 835 /// during analysis. 836 void reorderBottomToTop(bool IgnoreReorder = false); 837 838 /// \return The vector element size in bits to use when vectorizing the 839 /// expression tree ending at \p V. If V is a store, the size is the width of 840 /// the stored value. Otherwise, the size is the width of the largest loaded 841 /// value reaching V. This method is used by the vectorizer to calculate 842 /// vectorization factors. 843 unsigned getVectorElementSize(Value *V); 844 845 /// Compute the minimum type sizes required to represent the entries in a 846 /// vectorizable tree. 847 void computeMinimumValueSizes(); 848 849 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 850 unsigned getMaxVecRegSize() const { 851 return MaxVecRegSize; 852 } 853 854 // \returns minimum vector register size as set by cl::opt. 855 unsigned getMinVecRegSize() const { 856 return MinVecRegSize; 857 } 858 859 unsigned getMinVF(unsigned Sz) const { 860 return std::max(2U, getMinVecRegSize() / Sz); 861 } 862 863 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { 864 unsigned MaxVF = MaxVFOption.getNumOccurrences() ? 865 MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode); 866 return MaxVF ? MaxVF : UINT_MAX; 867 } 868 869 /// Check if homogeneous aggregate is isomorphic to some VectorType. 870 /// Accepts homogeneous multidimensional aggregate of scalars/vectors like 871 /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> }, 872 /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on. 873 /// 874 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 875 unsigned canMapToVector(Type *T, const DataLayout &DL) const; 876 877 /// \returns True if the VectorizableTree is both tiny and not fully 878 /// vectorizable. We do not vectorize such trees. 879 bool isTreeTinyAndNotFullyVectorizable(bool ForReduction = false) const; 880 881 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values 882 /// can be load combined in the backend. Load combining may not be allowed in 883 /// the IR optimizer, so we do not want to alter the pattern. For example, 884 /// partially transforming a scalar bswap() pattern into vector code is 885 /// effectively impossible for the backend to undo. 886 /// TODO: If load combining is allowed in the IR optimizer, this analysis 887 /// may not be necessary. 888 bool isLoadCombineReductionCandidate(RecurKind RdxKind) const; 889 890 /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values 891 /// can be load combined in the backend. Load combining may not be allowed in 892 /// the IR optimizer, so we do not want to alter the pattern. For example, 893 /// partially transforming a scalar bswap() pattern into vector code is 894 /// effectively impossible for the backend to undo. 895 /// TODO: If load combining is allowed in the IR optimizer, this analysis 896 /// may not be necessary. 897 bool isLoadCombineCandidate() const; 898 899 OptimizationRemarkEmitter *getORE() { return ORE; } 900 901 /// This structure holds any data we need about the edges being traversed 902 /// during buildTree_rec(). We keep track of: 903 /// (i) the user TreeEntry index, and 904 /// (ii) the index of the edge. 905 struct EdgeInfo { 906 EdgeInfo() = default; 907 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx) 908 : UserTE(UserTE), EdgeIdx(EdgeIdx) {} 909 /// The user TreeEntry. 910 TreeEntry *UserTE = nullptr; 911 /// The operand index of the use. 912 unsigned EdgeIdx = UINT_MAX; 913 #ifndef NDEBUG 914 friend inline raw_ostream &operator<<(raw_ostream &OS, 915 const BoUpSLP::EdgeInfo &EI) { 916 EI.dump(OS); 917 return OS; 918 } 919 /// Debug print. 920 void dump(raw_ostream &OS) const { 921 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null") 922 << " EdgeIdx:" << EdgeIdx << "}"; 923 } 924 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); } 925 #endif 926 }; 927 928 /// A helper data structure to hold the operands of a vector of instructions. 929 /// This supports a fixed vector length for all operand vectors. 930 class VLOperands { 931 /// For each operand we need (i) the value, and (ii) the opcode that it 932 /// would be attached to if the expression was in a left-linearized form. 933 /// This is required to avoid illegal operand reordering. 934 /// For example: 935 /// \verbatim 936 /// 0 Op1 937 /// |/ 938 /// Op1 Op2 Linearized + Op2 939 /// \ / ----------> |/ 940 /// - - 941 /// 942 /// Op1 - Op2 (0 + Op1) - Op2 943 /// \endverbatim 944 /// 945 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'. 946 /// 947 /// Another way to think of this is to track all the operations across the 948 /// path from the operand all the way to the root of the tree and to 949 /// calculate the operation that corresponds to this path. For example, the 950 /// path from Op2 to the root crosses the RHS of the '-', therefore the 951 /// corresponding operation is a '-' (which matches the one in the 952 /// linearized tree, as shown above). 953 /// 954 /// For lack of a better term, we refer to this operation as Accumulated 955 /// Path Operation (APO). 956 struct OperandData { 957 OperandData() = default; 958 OperandData(Value *V, bool APO, bool IsUsed) 959 : V(V), APO(APO), IsUsed(IsUsed) {} 960 /// The operand value. 961 Value *V = nullptr; 962 /// TreeEntries only allow a single opcode, or an alternate sequence of 963 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the 964 /// APO. It is set to 'true' if 'V' is attached to an inverse operation 965 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise 966 /// (e.g., Add/Mul) 967 bool APO = false; 968 /// Helper data for the reordering function. 969 bool IsUsed = false; 970 }; 971 972 /// During operand reordering, we are trying to select the operand at lane 973 /// that matches best with the operand at the neighboring lane. Our 974 /// selection is based on the type of value we are looking for. For example, 975 /// if the neighboring lane has a load, we need to look for a load that is 976 /// accessing a consecutive address. These strategies are summarized in the 977 /// 'ReorderingMode' enumerator. 978 enum class ReorderingMode { 979 Load, ///< Matching loads to consecutive memory addresses 980 Opcode, ///< Matching instructions based on opcode (same or alternate) 981 Constant, ///< Matching constants 982 Splat, ///< Matching the same instruction multiple times (broadcast) 983 Failed, ///< We failed to create a vectorizable group 984 }; 985 986 using OperandDataVec = SmallVector<OperandData, 2>; 987 988 /// A vector of operand vectors. 989 SmallVector<OperandDataVec, 4> OpsVec; 990 991 const DataLayout &DL; 992 ScalarEvolution &SE; 993 const BoUpSLP &R; 994 995 /// \returns the operand data at \p OpIdx and \p Lane. 996 OperandData &getData(unsigned OpIdx, unsigned Lane) { 997 return OpsVec[OpIdx][Lane]; 998 } 999 1000 /// \returns the operand data at \p OpIdx and \p Lane. Const version. 1001 const OperandData &getData(unsigned OpIdx, unsigned Lane) const { 1002 return OpsVec[OpIdx][Lane]; 1003 } 1004 1005 /// Clears the used flag for all entries. 1006 void clearUsed() { 1007 for (unsigned OpIdx = 0, NumOperands = getNumOperands(); 1008 OpIdx != NumOperands; ++OpIdx) 1009 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 1010 ++Lane) 1011 OpsVec[OpIdx][Lane].IsUsed = false; 1012 } 1013 1014 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2. 1015 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) { 1016 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]); 1017 } 1018 1019 // The hard-coded scores listed here are not very important. When computing 1020 // the scores of matching one sub-tree with another, we are basically 1021 // counting the number of values that are matching. So even if all scores 1022 // are set to 1, we would still get a decent matching result. 1023 // However, sometimes we have to break ties. For example we may have to 1024 // choose between matching loads vs matching opcodes. This is what these 1025 // scores are helping us with: they provide the order of preference. 1026 1027 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]). 1028 static const int ScoreConsecutiveLoads = 3; 1029 /// ExtractElementInst from same vector and consecutive indexes. 1030 static const int ScoreConsecutiveExtracts = 3; 1031 /// Constants. 1032 static const int ScoreConstants = 2; 1033 /// Instructions with the same opcode. 1034 static const int ScoreSameOpcode = 2; 1035 /// Instructions with alt opcodes (e.g, add + sub). 1036 static const int ScoreAltOpcodes = 1; 1037 /// Identical instructions (a.k.a. splat or broadcast). 1038 static const int ScoreSplat = 1; 1039 /// Matching with an undef is preferable to failing. 1040 static const int ScoreUndef = 1; 1041 /// Score for failing to find a decent match. 1042 static const int ScoreFail = 0; 1043 /// User exteranl to the vectorized code. 1044 static const int ExternalUseCost = 1; 1045 /// The user is internal but in a different lane. 1046 static const int UserInDiffLaneCost = ExternalUseCost; 1047 1048 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes. 1049 static int getShallowScore(Value *V1, Value *V2, const DataLayout &DL, 1050 ScalarEvolution &SE) { 1051 auto *LI1 = dyn_cast<LoadInst>(V1); 1052 auto *LI2 = dyn_cast<LoadInst>(V2); 1053 if (LI1 && LI2) { 1054 if (LI1->getParent() != LI2->getParent()) 1055 return VLOperands::ScoreFail; 1056 1057 Optional<int> Dist = getPointersDiff( 1058 LI1->getType(), LI1->getPointerOperand(), LI2->getType(), 1059 LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true); 1060 return (Dist && *Dist == 1) ? VLOperands::ScoreConsecutiveLoads 1061 : VLOperands::ScoreFail; 1062 } 1063 1064 auto *C1 = dyn_cast<Constant>(V1); 1065 auto *C2 = dyn_cast<Constant>(V2); 1066 if (C1 && C2) 1067 return VLOperands::ScoreConstants; 1068 1069 // Extracts from consecutive indexes of the same vector better score as 1070 // the extracts could be optimized away. 1071 Value *EV; 1072 ConstantInt *Ex1Idx, *Ex2Idx; 1073 if (match(V1, m_ExtractElt(m_Value(EV), m_ConstantInt(Ex1Idx))) && 1074 match(V2, m_ExtractElt(m_Deferred(EV), m_ConstantInt(Ex2Idx))) && 1075 Ex1Idx->getZExtValue() + 1 == Ex2Idx->getZExtValue()) 1076 return VLOperands::ScoreConsecutiveExtracts; 1077 1078 auto *I1 = dyn_cast<Instruction>(V1); 1079 auto *I2 = dyn_cast<Instruction>(V2); 1080 if (I1 && I2) { 1081 if (I1 == I2) 1082 return VLOperands::ScoreSplat; 1083 InstructionsState S = getSameOpcode({I1, I2}); 1084 // Note: Only consider instructions with <= 2 operands to avoid 1085 // complexity explosion. 1086 if (S.getOpcode() && S.MainOp->getNumOperands() <= 2) 1087 return S.isAltShuffle() ? VLOperands::ScoreAltOpcodes 1088 : VLOperands::ScoreSameOpcode; 1089 } 1090 1091 if (isa<UndefValue>(V2)) 1092 return VLOperands::ScoreUndef; 1093 1094 return VLOperands::ScoreFail; 1095 } 1096 1097 /// Holds the values and their lane that are taking part in the look-ahead 1098 /// score calculation. This is used in the external uses cost calculation. 1099 SmallDenseMap<Value *, int> InLookAheadValues; 1100 1101 /// \Returns the additinal cost due to uses of \p LHS and \p RHS that are 1102 /// either external to the vectorized code, or require shuffling. 1103 int getExternalUsesCost(const std::pair<Value *, int> &LHS, 1104 const std::pair<Value *, int> &RHS) { 1105 int Cost = 0; 1106 std::array<std::pair<Value *, int>, 2> Values = {{LHS, RHS}}; 1107 for (int Idx = 0, IdxE = Values.size(); Idx != IdxE; ++Idx) { 1108 Value *V = Values[Idx].first; 1109 if (isa<Constant>(V)) { 1110 // Since this is a function pass, it doesn't make semantic sense to 1111 // walk the users of a subclass of Constant. The users could be in 1112 // another function, or even another module that happens to be in 1113 // the same LLVMContext. 1114 continue; 1115 } 1116 1117 // Calculate the absolute lane, using the minimum relative lane of LHS 1118 // and RHS as base and Idx as the offset. 1119 int Ln = std::min(LHS.second, RHS.second) + Idx; 1120 assert(Ln >= 0 && "Bad lane calculation"); 1121 unsigned UsersBudget = LookAheadUsersBudget; 1122 for (User *U : V->users()) { 1123 if (const TreeEntry *UserTE = R.getTreeEntry(U)) { 1124 // The user is in the VectorizableTree. Check if we need to insert. 1125 auto It = llvm::find(UserTE->Scalars, U); 1126 assert(It != UserTE->Scalars.end() && "U is in UserTE"); 1127 int UserLn = std::distance(UserTE->Scalars.begin(), It); 1128 assert(UserLn >= 0 && "Bad lane"); 1129 if (UserLn != Ln) 1130 Cost += UserInDiffLaneCost; 1131 } else { 1132 // Check if the user is in the look-ahead code. 1133 auto It2 = InLookAheadValues.find(U); 1134 if (It2 != InLookAheadValues.end()) { 1135 // The user is in the look-ahead code. Check the lane. 1136 if (It2->second != Ln) 1137 Cost += UserInDiffLaneCost; 1138 } else { 1139 // The user is neither in SLP tree nor in the look-ahead code. 1140 Cost += ExternalUseCost; 1141 } 1142 } 1143 // Limit the number of visited uses to cap compilation time. 1144 if (--UsersBudget == 0) 1145 break; 1146 } 1147 } 1148 return Cost; 1149 } 1150 1151 /// Go through the operands of \p LHS and \p RHS recursively until \p 1152 /// MaxLevel, and return the cummulative score. For example: 1153 /// \verbatim 1154 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1] 1155 /// \ / \ / \ / \ / 1156 /// + + + + 1157 /// G1 G2 G3 G4 1158 /// \endverbatim 1159 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at 1160 /// each level recursively, accumulating the score. It starts from matching 1161 /// the additions at level 0, then moves on to the loads (level 1). The 1162 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and 1163 /// {B[0],B[1]} match with VLOperands::ScoreConsecutiveLoads, while 1164 /// {A[0],C[0]} has a score of VLOperands::ScoreFail. 1165 /// Please note that the order of the operands does not matter, as we 1166 /// evaluate the score of all profitable combinations of operands. In 1167 /// other words the score of G1 and G4 is the same as G1 and G2. This 1168 /// heuristic is based on ideas described in: 1169 /// Look-ahead SLP: Auto-vectorization in the presence of commutative 1170 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha, 1171 /// Luís F. W. Góes 1172 int getScoreAtLevelRec(const std::pair<Value *, int> &LHS, 1173 const std::pair<Value *, int> &RHS, int CurrLevel, 1174 int MaxLevel) { 1175 1176 Value *V1 = LHS.first; 1177 Value *V2 = RHS.first; 1178 // Get the shallow score of V1 and V2. 1179 int ShallowScoreAtThisLevel = 1180 std::max((int)ScoreFail, getShallowScore(V1, V2, DL, SE) - 1181 getExternalUsesCost(LHS, RHS)); 1182 int Lane1 = LHS.second; 1183 int Lane2 = RHS.second; 1184 1185 // If reached MaxLevel, 1186 // or if V1 and V2 are not instructions, 1187 // or if they are SPLAT, 1188 // or if they are not consecutive, early return the current cost. 1189 auto *I1 = dyn_cast<Instruction>(V1); 1190 auto *I2 = dyn_cast<Instruction>(V2); 1191 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 || 1192 ShallowScoreAtThisLevel == VLOperands::ScoreFail || 1193 (isa<LoadInst>(I1) && isa<LoadInst>(I2) && ShallowScoreAtThisLevel)) 1194 return ShallowScoreAtThisLevel; 1195 assert(I1 && I2 && "Should have early exited."); 1196 1197 // Keep track of in-tree values for determining the external-use cost. 1198 InLookAheadValues[V1] = Lane1; 1199 InLookAheadValues[V2] = Lane2; 1200 1201 // Contains the I2 operand indexes that got matched with I1 operands. 1202 SmallSet<unsigned, 4> Op2Used; 1203 1204 // Recursion towards the operands of I1 and I2. We are trying all possbile 1205 // operand pairs, and keeping track of the best score. 1206 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands(); 1207 OpIdx1 != NumOperands1; ++OpIdx1) { 1208 // Try to pair op1I with the best operand of I2. 1209 int MaxTmpScore = 0; 1210 unsigned MaxOpIdx2 = 0; 1211 bool FoundBest = false; 1212 // If I2 is commutative try all combinations. 1213 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1; 1214 unsigned ToIdx = isCommutative(I2) 1215 ? I2->getNumOperands() 1216 : std::min(I2->getNumOperands(), OpIdx1 + 1); 1217 assert(FromIdx <= ToIdx && "Bad index"); 1218 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) { 1219 // Skip operands already paired with OpIdx1. 1220 if (Op2Used.count(OpIdx2)) 1221 continue; 1222 // Recursively calculate the cost at each level 1223 int TmpScore = getScoreAtLevelRec({I1->getOperand(OpIdx1), Lane1}, 1224 {I2->getOperand(OpIdx2), Lane2}, 1225 CurrLevel + 1, MaxLevel); 1226 // Look for the best score. 1227 if (TmpScore > VLOperands::ScoreFail && TmpScore > MaxTmpScore) { 1228 MaxTmpScore = TmpScore; 1229 MaxOpIdx2 = OpIdx2; 1230 FoundBest = true; 1231 } 1232 } 1233 if (FoundBest) { 1234 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it. 1235 Op2Used.insert(MaxOpIdx2); 1236 ShallowScoreAtThisLevel += MaxTmpScore; 1237 } 1238 } 1239 return ShallowScoreAtThisLevel; 1240 } 1241 1242 /// \Returns the look-ahead score, which tells us how much the sub-trees 1243 /// rooted at \p LHS and \p RHS match, the more they match the higher the 1244 /// score. This helps break ties in an informed way when we cannot decide on 1245 /// the order of the operands by just considering the immediate 1246 /// predecessors. 1247 int getLookAheadScore(const std::pair<Value *, int> &LHS, 1248 const std::pair<Value *, int> &RHS) { 1249 InLookAheadValues.clear(); 1250 return getScoreAtLevelRec(LHS, RHS, 1, LookAheadMaxDepth); 1251 } 1252 1253 // Search all operands in Ops[*][Lane] for the one that matches best 1254 // Ops[OpIdx][LastLane] and return its opreand index. 1255 // If no good match can be found, return None. 1256 Optional<unsigned> 1257 getBestOperand(unsigned OpIdx, int Lane, int LastLane, 1258 ArrayRef<ReorderingMode> ReorderingModes) { 1259 unsigned NumOperands = getNumOperands(); 1260 1261 // The operand of the previous lane at OpIdx. 1262 Value *OpLastLane = getData(OpIdx, LastLane).V; 1263 1264 // Our strategy mode for OpIdx. 1265 ReorderingMode RMode = ReorderingModes[OpIdx]; 1266 1267 // The linearized opcode of the operand at OpIdx, Lane. 1268 bool OpIdxAPO = getData(OpIdx, Lane).APO; 1269 1270 // The best operand index and its score. 1271 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we 1272 // are using the score to differentiate between the two. 1273 struct BestOpData { 1274 Optional<unsigned> Idx = None; 1275 unsigned Score = 0; 1276 } BestOp; 1277 1278 // Iterate through all unused operands and look for the best. 1279 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) { 1280 // Get the operand at Idx and Lane. 1281 OperandData &OpData = getData(Idx, Lane); 1282 Value *Op = OpData.V; 1283 bool OpAPO = OpData.APO; 1284 1285 // Skip already selected operands. 1286 if (OpData.IsUsed) 1287 continue; 1288 1289 // Skip if we are trying to move the operand to a position with a 1290 // different opcode in the linearized tree form. This would break the 1291 // semantics. 1292 if (OpAPO != OpIdxAPO) 1293 continue; 1294 1295 // Look for an operand that matches the current mode. 1296 switch (RMode) { 1297 case ReorderingMode::Load: 1298 case ReorderingMode::Constant: 1299 case ReorderingMode::Opcode: { 1300 bool LeftToRight = Lane > LastLane; 1301 Value *OpLeft = (LeftToRight) ? OpLastLane : Op; 1302 Value *OpRight = (LeftToRight) ? Op : OpLastLane; 1303 unsigned Score = 1304 getLookAheadScore({OpLeft, LastLane}, {OpRight, Lane}); 1305 if (Score > BestOp.Score) { 1306 BestOp.Idx = Idx; 1307 BestOp.Score = Score; 1308 } 1309 break; 1310 } 1311 case ReorderingMode::Splat: 1312 if (Op == OpLastLane) 1313 BestOp.Idx = Idx; 1314 break; 1315 case ReorderingMode::Failed: 1316 return None; 1317 } 1318 } 1319 1320 if (BestOp.Idx) { 1321 getData(BestOp.Idx.getValue(), Lane).IsUsed = true; 1322 return BestOp.Idx; 1323 } 1324 // If we could not find a good match return None. 1325 return None; 1326 } 1327 1328 /// Helper for reorderOperandVecs. \Returns the lane that we should start 1329 /// reordering from. This is the one which has the least number of operands 1330 /// that can freely move about. 1331 unsigned getBestLaneToStartReordering() const { 1332 unsigned BestLane = 0; 1333 unsigned Min = UINT_MAX; 1334 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 1335 ++Lane) { 1336 unsigned NumFreeOps = getMaxNumOperandsThatCanBeReordered(Lane); 1337 if (NumFreeOps < Min) { 1338 Min = NumFreeOps; 1339 BestLane = Lane; 1340 } 1341 } 1342 return BestLane; 1343 } 1344 1345 /// \Returns the maximum number of operands that are allowed to be reordered 1346 /// for \p Lane. This is used as a heuristic for selecting the first lane to 1347 /// start operand reordering. 1348 unsigned getMaxNumOperandsThatCanBeReordered(unsigned Lane) const { 1349 unsigned CntTrue = 0; 1350 unsigned NumOperands = getNumOperands(); 1351 // Operands with the same APO can be reordered. We therefore need to count 1352 // how many of them we have for each APO, like this: Cnt[APO] = x. 1353 // Since we only have two APOs, namely true and false, we can avoid using 1354 // a map. Instead we can simply count the number of operands that 1355 // correspond to one of them (in this case the 'true' APO), and calculate 1356 // the other by subtracting it from the total number of operands. 1357 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) 1358 if (getData(OpIdx, Lane).APO) 1359 ++CntTrue; 1360 unsigned CntFalse = NumOperands - CntTrue; 1361 return std::max(CntTrue, CntFalse); 1362 } 1363 1364 /// Go through the instructions in VL and append their operands. 1365 void appendOperandsOfVL(ArrayRef<Value *> VL) { 1366 assert(!VL.empty() && "Bad VL"); 1367 assert((empty() || VL.size() == getNumLanes()) && 1368 "Expected same number of lanes"); 1369 assert(isa<Instruction>(VL[0]) && "Expected instruction"); 1370 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands(); 1371 OpsVec.resize(NumOperands); 1372 unsigned NumLanes = VL.size(); 1373 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1374 OpsVec[OpIdx].resize(NumLanes); 1375 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1376 assert(isa<Instruction>(VL[Lane]) && "Expected instruction"); 1377 // Our tree has just 3 nodes: the root and two operands. 1378 // It is therefore trivial to get the APO. We only need to check the 1379 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or 1380 // RHS operand. The LHS operand of both add and sub is never attached 1381 // to an inversese operation in the linearized form, therefore its APO 1382 // is false. The RHS is true only if VL[Lane] is an inverse operation. 1383 1384 // Since operand reordering is performed on groups of commutative 1385 // operations or alternating sequences (e.g., +, -), we can safely 1386 // tell the inverse operations by checking commutativity. 1387 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane])); 1388 bool APO = (OpIdx == 0) ? false : IsInverseOperation; 1389 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx), 1390 APO, false}; 1391 } 1392 } 1393 } 1394 1395 /// \returns the number of operands. 1396 unsigned getNumOperands() const { return OpsVec.size(); } 1397 1398 /// \returns the number of lanes. 1399 unsigned getNumLanes() const { return OpsVec[0].size(); } 1400 1401 /// \returns the operand value at \p OpIdx and \p Lane. 1402 Value *getValue(unsigned OpIdx, unsigned Lane) const { 1403 return getData(OpIdx, Lane).V; 1404 } 1405 1406 /// \returns true if the data structure is empty. 1407 bool empty() const { return OpsVec.empty(); } 1408 1409 /// Clears the data. 1410 void clear() { OpsVec.clear(); } 1411 1412 /// \Returns true if there are enough operands identical to \p Op to fill 1413 /// the whole vector. 1414 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow. 1415 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) { 1416 bool OpAPO = getData(OpIdx, Lane).APO; 1417 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) { 1418 if (Ln == Lane) 1419 continue; 1420 // This is set to true if we found a candidate for broadcast at Lane. 1421 bool FoundCandidate = false; 1422 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) { 1423 OperandData &Data = getData(OpI, Ln); 1424 if (Data.APO != OpAPO || Data.IsUsed) 1425 continue; 1426 if (Data.V == Op) { 1427 FoundCandidate = true; 1428 Data.IsUsed = true; 1429 break; 1430 } 1431 } 1432 if (!FoundCandidate) 1433 return false; 1434 } 1435 return true; 1436 } 1437 1438 public: 1439 /// Initialize with all the operands of the instruction vector \p RootVL. 1440 VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL, 1441 ScalarEvolution &SE, const BoUpSLP &R) 1442 : DL(DL), SE(SE), R(R) { 1443 // Append all the operands of RootVL. 1444 appendOperandsOfVL(RootVL); 1445 } 1446 1447 /// \Returns a value vector with the operands across all lanes for the 1448 /// opearnd at \p OpIdx. 1449 ValueList getVL(unsigned OpIdx) const { 1450 ValueList OpVL(OpsVec[OpIdx].size()); 1451 assert(OpsVec[OpIdx].size() == getNumLanes() && 1452 "Expected same num of lanes across all operands"); 1453 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane) 1454 OpVL[Lane] = OpsVec[OpIdx][Lane].V; 1455 return OpVL; 1456 } 1457 1458 // Performs operand reordering for 2 or more operands. 1459 // The original operands are in OrigOps[OpIdx][Lane]. 1460 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'. 1461 void reorder() { 1462 unsigned NumOperands = getNumOperands(); 1463 unsigned NumLanes = getNumLanes(); 1464 // Each operand has its own mode. We are using this mode to help us select 1465 // the instructions for each lane, so that they match best with the ones 1466 // we have selected so far. 1467 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands); 1468 1469 // This is a greedy single-pass algorithm. We are going over each lane 1470 // once and deciding on the best order right away with no back-tracking. 1471 // However, in order to increase its effectiveness, we start with the lane 1472 // that has operands that can move the least. For example, given the 1473 // following lanes: 1474 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd 1475 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st 1476 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd 1477 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th 1478 // we will start at Lane 1, since the operands of the subtraction cannot 1479 // be reordered. Then we will visit the rest of the lanes in a circular 1480 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3. 1481 1482 // Find the first lane that we will start our search from. 1483 unsigned FirstLane = getBestLaneToStartReordering(); 1484 1485 // Initialize the modes. 1486 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1487 Value *OpLane0 = getValue(OpIdx, FirstLane); 1488 // Keep track if we have instructions with all the same opcode on one 1489 // side. 1490 if (isa<LoadInst>(OpLane0)) 1491 ReorderingModes[OpIdx] = ReorderingMode::Load; 1492 else if (isa<Instruction>(OpLane0)) { 1493 // Check if OpLane0 should be broadcast. 1494 if (shouldBroadcast(OpLane0, OpIdx, FirstLane)) 1495 ReorderingModes[OpIdx] = ReorderingMode::Splat; 1496 else 1497 ReorderingModes[OpIdx] = ReorderingMode::Opcode; 1498 } 1499 else if (isa<Constant>(OpLane0)) 1500 ReorderingModes[OpIdx] = ReorderingMode::Constant; 1501 else if (isa<Argument>(OpLane0)) 1502 // Our best hope is a Splat. It may save some cost in some cases. 1503 ReorderingModes[OpIdx] = ReorderingMode::Splat; 1504 else 1505 // NOTE: This should be unreachable. 1506 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1507 } 1508 1509 // If the initial strategy fails for any of the operand indexes, then we 1510 // perform reordering again in a second pass. This helps avoid assigning 1511 // high priority to the failed strategy, and should improve reordering for 1512 // the non-failed operand indexes. 1513 for (int Pass = 0; Pass != 2; ++Pass) { 1514 // Skip the second pass if the first pass did not fail. 1515 bool StrategyFailed = false; 1516 // Mark all operand data as free to use. 1517 clearUsed(); 1518 // We keep the original operand order for the FirstLane, so reorder the 1519 // rest of the lanes. We are visiting the nodes in a circular fashion, 1520 // using FirstLane as the center point and increasing the radius 1521 // distance. 1522 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) { 1523 // Visit the lane on the right and then the lane on the left. 1524 for (int Direction : {+1, -1}) { 1525 int Lane = FirstLane + Direction * Distance; 1526 if (Lane < 0 || Lane >= (int)NumLanes) 1527 continue; 1528 int LastLane = Lane - Direction; 1529 assert(LastLane >= 0 && LastLane < (int)NumLanes && 1530 "Out of bounds"); 1531 // Look for a good match for each operand. 1532 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1533 // Search for the operand that matches SortedOps[OpIdx][Lane-1]. 1534 Optional<unsigned> BestIdx = 1535 getBestOperand(OpIdx, Lane, LastLane, ReorderingModes); 1536 // By not selecting a value, we allow the operands that follow to 1537 // select a better matching value. We will get a non-null value in 1538 // the next run of getBestOperand(). 1539 if (BestIdx) { 1540 // Swap the current operand with the one returned by 1541 // getBestOperand(). 1542 swap(OpIdx, BestIdx.getValue(), Lane); 1543 } else { 1544 // We failed to find a best operand, set mode to 'Failed'. 1545 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1546 // Enable the second pass. 1547 StrategyFailed = true; 1548 } 1549 } 1550 } 1551 } 1552 // Skip second pass if the strategy did not fail. 1553 if (!StrategyFailed) 1554 break; 1555 } 1556 } 1557 1558 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1559 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) { 1560 switch (RMode) { 1561 case ReorderingMode::Load: 1562 return "Load"; 1563 case ReorderingMode::Opcode: 1564 return "Opcode"; 1565 case ReorderingMode::Constant: 1566 return "Constant"; 1567 case ReorderingMode::Splat: 1568 return "Splat"; 1569 case ReorderingMode::Failed: 1570 return "Failed"; 1571 } 1572 llvm_unreachable("Unimplemented Reordering Type"); 1573 } 1574 1575 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode, 1576 raw_ostream &OS) { 1577 return OS << getModeStr(RMode); 1578 } 1579 1580 /// Debug print. 1581 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) { 1582 printMode(RMode, dbgs()); 1583 } 1584 1585 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) { 1586 return printMode(RMode, OS); 1587 } 1588 1589 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const { 1590 const unsigned Indent = 2; 1591 unsigned Cnt = 0; 1592 for (const OperandDataVec &OpDataVec : OpsVec) { 1593 OS << "Operand " << Cnt++ << "\n"; 1594 for (const OperandData &OpData : OpDataVec) { 1595 OS.indent(Indent) << "{"; 1596 if (Value *V = OpData.V) 1597 OS << *V; 1598 else 1599 OS << "null"; 1600 OS << ", APO:" << OpData.APO << "}\n"; 1601 } 1602 OS << "\n"; 1603 } 1604 return OS; 1605 } 1606 1607 /// Debug print. 1608 LLVM_DUMP_METHOD void dump() const { print(dbgs()); } 1609 #endif 1610 }; 1611 1612 /// Checks if the instruction is marked for deletion. 1613 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); } 1614 1615 /// Marks values operands for later deletion by replacing them with Undefs. 1616 void eraseInstructions(ArrayRef<Value *> AV); 1617 1618 ~BoUpSLP(); 1619 1620 private: 1621 /// Checks if all users of \p I are the part of the vectorization tree. 1622 bool areAllUsersVectorized(Instruction *I, 1623 ArrayRef<Value *> VectorizedVals) const; 1624 1625 /// \returns the cost of the vectorizable entry. 1626 InstructionCost getEntryCost(const TreeEntry *E, 1627 ArrayRef<Value *> VectorizedVals); 1628 1629 /// This is the recursive part of buildTree. 1630 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, 1631 const EdgeInfo &EI); 1632 1633 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can 1634 /// be vectorized to use the original vector (or aggregate "bitcast" to a 1635 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise 1636 /// returns false, setting \p CurrentOrder to either an empty vector or a 1637 /// non-identity permutation that allows to reuse extract instructions. 1638 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 1639 SmallVectorImpl<unsigned> &CurrentOrder) const; 1640 1641 /// Vectorize a single entry in the tree. 1642 Value *vectorizeTree(TreeEntry *E); 1643 1644 /// Vectorize a single entry in the tree, starting in \p VL. 1645 Value *vectorizeTree(ArrayRef<Value *> VL); 1646 1647 /// \returns the scalarization cost for this type. Scalarization in this 1648 /// context means the creation of vectors from a group of scalars. If \p 1649 /// NeedToShuffle is true, need to add a cost of reshuffling some of the 1650 /// vector elements. 1651 InstructionCost getGatherCost(FixedVectorType *Ty, 1652 const DenseSet<unsigned> &ShuffledIndices, 1653 bool NeedToShuffle) const; 1654 1655 /// Checks if the gathered \p VL can be represented as shuffle(s) of previous 1656 /// tree entries. 1657 /// \returns ShuffleKind, if gathered values can be represented as shuffles of 1658 /// previous tree entries. \p Mask is filled with the shuffle mask. 1659 Optional<TargetTransformInfo::ShuffleKind> 1660 isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask, 1661 SmallVectorImpl<const TreeEntry *> &Entries); 1662 1663 /// \returns the scalarization cost for this list of values. Assuming that 1664 /// this subtree gets vectorized, we may need to extract the values from the 1665 /// roots. This method calculates the cost of extracting the values. 1666 InstructionCost getGatherCost(ArrayRef<Value *> VL) const; 1667 1668 /// Set the Builder insert point to one after the last instruction in 1669 /// the bundle 1670 void setInsertPointAfterBundle(const TreeEntry *E); 1671 1672 /// \returns a vector from a collection of scalars in \p VL. 1673 Value *gather(ArrayRef<Value *> VL); 1674 1675 /// \returns whether the VectorizableTree is fully vectorizable and will 1676 /// be beneficial even the tree height is tiny. 1677 bool isFullyVectorizableTinyTree(bool ForReduction) const; 1678 1679 /// Reorder commutative or alt operands to get better probability of 1680 /// generating vectorized code. 1681 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 1682 SmallVectorImpl<Value *> &Left, 1683 SmallVectorImpl<Value *> &Right, 1684 const DataLayout &DL, 1685 ScalarEvolution &SE, 1686 const BoUpSLP &R); 1687 struct TreeEntry { 1688 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>; 1689 TreeEntry(VecTreeTy &Container) : Container(Container) {} 1690 1691 /// \returns true if the scalars in VL are equal to this entry. 1692 bool isSame(ArrayRef<Value *> VL) const { 1693 auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) { 1694 if (Mask.size() != VL.size() && VL.size() == Scalars.size()) 1695 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 1696 return VL.size() == Mask.size() && 1697 std::equal(VL.begin(), VL.end(), Mask.begin(), 1698 [Scalars](Value *V, int Idx) { 1699 return (isa<UndefValue>(V) && 1700 Idx == UndefMaskElem) || 1701 (Idx != UndefMaskElem && V == Scalars[Idx]); 1702 }); 1703 }; 1704 if (!ReorderIndices.empty()) { 1705 // TODO: implement matching if the nodes are just reordered, still can 1706 // treat the vector as the same if the list of scalars matches VL 1707 // directly, without reordering. 1708 SmallVector<int> Mask; 1709 inversePermutation(ReorderIndices, Mask); 1710 if (VL.size() == Scalars.size()) 1711 return IsSame(Scalars, Mask); 1712 if (VL.size() == ReuseShuffleIndices.size()) { 1713 ::addMask(Mask, ReuseShuffleIndices); 1714 return IsSame(Scalars, Mask); 1715 } 1716 return false; 1717 } 1718 return IsSame(Scalars, ReuseShuffleIndices); 1719 } 1720 1721 /// \returns true if current entry has same operands as \p TE. 1722 bool hasEqualOperands(const TreeEntry &TE) const { 1723 if (TE.getNumOperands() != getNumOperands()) 1724 return false; 1725 SmallBitVector Used(getNumOperands()); 1726 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) { 1727 unsigned PrevCount = Used.count(); 1728 for (unsigned K = 0; K < E; ++K) { 1729 if (Used.test(K)) 1730 continue; 1731 if (getOperand(K) == TE.getOperand(I)) { 1732 Used.set(K); 1733 break; 1734 } 1735 } 1736 // Check if we actually found the matching operand. 1737 if (PrevCount == Used.count()) 1738 return false; 1739 } 1740 return true; 1741 } 1742 1743 /// \return Final vectorization factor for the node. Defined by the total 1744 /// number of vectorized scalars, including those, used several times in the 1745 /// entry and counted in the \a ReuseShuffleIndices, if any. 1746 unsigned getVectorFactor() const { 1747 if (!ReuseShuffleIndices.empty()) 1748 return ReuseShuffleIndices.size(); 1749 return Scalars.size(); 1750 }; 1751 1752 /// A vector of scalars. 1753 ValueList Scalars; 1754 1755 /// The Scalars are vectorized into this value. It is initialized to Null. 1756 Value *VectorizedValue = nullptr; 1757 1758 /// Do we need to gather this sequence or vectorize it 1759 /// (either with vector instruction or with scatter/gather 1760 /// intrinsics for store/load)? 1761 enum EntryState { Vectorize, ScatterVectorize, NeedToGather }; 1762 EntryState State; 1763 1764 /// Does this sequence require some shuffling? 1765 SmallVector<int, 4> ReuseShuffleIndices; 1766 1767 /// Does this entry require reordering? 1768 SmallVector<unsigned, 4> ReorderIndices; 1769 1770 /// Points back to the VectorizableTree. 1771 /// 1772 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 1773 /// to be a pointer and needs to be able to initialize the child iterator. 1774 /// Thus we need a reference back to the container to translate the indices 1775 /// to entries. 1776 VecTreeTy &Container; 1777 1778 /// The TreeEntry index containing the user of this entry. We can actually 1779 /// have multiple users so the data structure is not truly a tree. 1780 SmallVector<EdgeInfo, 1> UserTreeIndices; 1781 1782 /// The index of this treeEntry in VectorizableTree. 1783 int Idx = -1; 1784 1785 private: 1786 /// The operands of each instruction in each lane Operands[op_index][lane]. 1787 /// Note: This helps avoid the replication of the code that performs the 1788 /// reordering of operands during buildTree_rec() and vectorizeTree(). 1789 SmallVector<ValueList, 2> Operands; 1790 1791 /// The main/alternate instruction. 1792 Instruction *MainOp = nullptr; 1793 Instruction *AltOp = nullptr; 1794 1795 public: 1796 /// Set this bundle's \p OpIdx'th operand to \p OpVL. 1797 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) { 1798 if (Operands.size() < OpIdx + 1) 1799 Operands.resize(OpIdx + 1); 1800 assert(Operands[OpIdx].empty() && "Already resized?"); 1801 Operands[OpIdx].resize(Scalars.size()); 1802 for (unsigned Lane = 0, E = Scalars.size(); Lane != E; ++Lane) 1803 Operands[OpIdx][Lane] = OpVL[Lane]; 1804 } 1805 1806 /// Set the operands of this bundle in their original order. 1807 void setOperandsInOrder() { 1808 assert(Operands.empty() && "Already initialized?"); 1809 auto *I0 = cast<Instruction>(Scalars[0]); 1810 Operands.resize(I0->getNumOperands()); 1811 unsigned NumLanes = Scalars.size(); 1812 for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands(); 1813 OpIdx != NumOperands; ++OpIdx) { 1814 Operands[OpIdx].resize(NumLanes); 1815 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1816 auto *I = cast<Instruction>(Scalars[Lane]); 1817 assert(I->getNumOperands() == NumOperands && 1818 "Expected same number of operands"); 1819 Operands[OpIdx][Lane] = I->getOperand(OpIdx); 1820 } 1821 } 1822 } 1823 1824 /// Reorders operands of the node to the given mask \p Mask. 1825 void reorderOperands(ArrayRef<int> Mask) { 1826 for (ValueList &Operand : Operands) 1827 reorderScalars(Operand, Mask); 1828 } 1829 1830 /// \returns the \p OpIdx operand of this TreeEntry. 1831 ValueList &getOperand(unsigned OpIdx) { 1832 assert(OpIdx < Operands.size() && "Off bounds"); 1833 return Operands[OpIdx]; 1834 } 1835 1836 /// \returns the \p OpIdx operand of this TreeEntry. 1837 ArrayRef<Value *> getOperand(unsigned OpIdx) const { 1838 assert(OpIdx < Operands.size() && "Off bounds"); 1839 return Operands[OpIdx]; 1840 } 1841 1842 /// \returns the number of operands. 1843 unsigned getNumOperands() const { return Operands.size(); } 1844 1845 /// \return the single \p OpIdx operand. 1846 Value *getSingleOperand(unsigned OpIdx) const { 1847 assert(OpIdx < Operands.size() && "Off bounds"); 1848 assert(!Operands[OpIdx].empty() && "No operand available"); 1849 return Operands[OpIdx][0]; 1850 } 1851 1852 /// Some of the instructions in the list have alternate opcodes. 1853 bool isAltShuffle() const { 1854 return getOpcode() != getAltOpcode(); 1855 } 1856 1857 bool isOpcodeOrAlt(Instruction *I) const { 1858 unsigned CheckedOpcode = I->getOpcode(); 1859 return (getOpcode() == CheckedOpcode || 1860 getAltOpcode() == CheckedOpcode); 1861 } 1862 1863 /// Chooses the correct key for scheduling data. If \p Op has the same (or 1864 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is 1865 /// \p OpValue. 1866 Value *isOneOf(Value *Op) const { 1867 auto *I = dyn_cast<Instruction>(Op); 1868 if (I && isOpcodeOrAlt(I)) 1869 return Op; 1870 return MainOp; 1871 } 1872 1873 void setOperations(const InstructionsState &S) { 1874 MainOp = S.MainOp; 1875 AltOp = S.AltOp; 1876 } 1877 1878 Instruction *getMainOp() const { 1879 return MainOp; 1880 } 1881 1882 Instruction *getAltOp() const { 1883 return AltOp; 1884 } 1885 1886 /// The main/alternate opcodes for the list of instructions. 1887 unsigned getOpcode() const { 1888 return MainOp ? MainOp->getOpcode() : 0; 1889 } 1890 1891 unsigned getAltOpcode() const { 1892 return AltOp ? AltOp->getOpcode() : 0; 1893 } 1894 1895 /// When ReuseReorderShuffleIndices is empty it just returns position of \p 1896 /// V within vector of Scalars. Otherwise, try to remap on its reuse index. 1897 int findLaneForValue(Value *V) const { 1898 unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V)); 1899 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 1900 if (!ReorderIndices.empty()) 1901 FoundLane = ReorderIndices[FoundLane]; 1902 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 1903 if (!ReuseShuffleIndices.empty()) { 1904 FoundLane = std::distance(ReuseShuffleIndices.begin(), 1905 find(ReuseShuffleIndices, FoundLane)); 1906 } 1907 return FoundLane; 1908 } 1909 1910 #ifndef NDEBUG 1911 /// Debug printer. 1912 LLVM_DUMP_METHOD void dump() const { 1913 dbgs() << Idx << ".\n"; 1914 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) { 1915 dbgs() << "Operand " << OpI << ":\n"; 1916 for (const Value *V : Operands[OpI]) 1917 dbgs().indent(2) << *V << "\n"; 1918 } 1919 dbgs() << "Scalars: \n"; 1920 for (Value *V : Scalars) 1921 dbgs().indent(2) << *V << "\n"; 1922 dbgs() << "State: "; 1923 switch (State) { 1924 case Vectorize: 1925 dbgs() << "Vectorize\n"; 1926 break; 1927 case ScatterVectorize: 1928 dbgs() << "ScatterVectorize\n"; 1929 break; 1930 case NeedToGather: 1931 dbgs() << "NeedToGather\n"; 1932 break; 1933 } 1934 dbgs() << "MainOp: "; 1935 if (MainOp) 1936 dbgs() << *MainOp << "\n"; 1937 else 1938 dbgs() << "NULL\n"; 1939 dbgs() << "AltOp: "; 1940 if (AltOp) 1941 dbgs() << *AltOp << "\n"; 1942 else 1943 dbgs() << "NULL\n"; 1944 dbgs() << "VectorizedValue: "; 1945 if (VectorizedValue) 1946 dbgs() << *VectorizedValue << "\n"; 1947 else 1948 dbgs() << "NULL\n"; 1949 dbgs() << "ReuseShuffleIndices: "; 1950 if (ReuseShuffleIndices.empty()) 1951 dbgs() << "Empty"; 1952 else 1953 for (unsigned ReuseIdx : ReuseShuffleIndices) 1954 dbgs() << ReuseIdx << ", "; 1955 dbgs() << "\n"; 1956 dbgs() << "ReorderIndices: "; 1957 for (unsigned ReorderIdx : ReorderIndices) 1958 dbgs() << ReorderIdx << ", "; 1959 dbgs() << "\n"; 1960 dbgs() << "UserTreeIndices: "; 1961 for (const auto &EInfo : UserTreeIndices) 1962 dbgs() << EInfo << ", "; 1963 dbgs() << "\n"; 1964 } 1965 #endif 1966 }; 1967 1968 #ifndef NDEBUG 1969 void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost, 1970 InstructionCost VecCost, 1971 InstructionCost ScalarCost) const { 1972 dbgs() << "SLP: Calculated costs for Tree:\n"; E->dump(); 1973 dbgs() << "SLP: Costs:\n"; 1974 dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n"; 1975 dbgs() << "SLP: VectorCost = " << VecCost << "\n"; 1976 dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n"; 1977 dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = " << 1978 ReuseShuffleCost + VecCost - ScalarCost << "\n"; 1979 } 1980 #endif 1981 1982 /// Create a new VectorizableTree entry. 1983 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle, 1984 const InstructionsState &S, 1985 const EdgeInfo &UserTreeIdx, 1986 ArrayRef<int> ReuseShuffleIndices = None, 1987 ArrayRef<unsigned> ReorderIndices = None) { 1988 TreeEntry::EntryState EntryState = 1989 Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather; 1990 return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx, 1991 ReuseShuffleIndices, ReorderIndices); 1992 } 1993 1994 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, 1995 TreeEntry::EntryState EntryState, 1996 Optional<ScheduleData *> Bundle, 1997 const InstructionsState &S, 1998 const EdgeInfo &UserTreeIdx, 1999 ArrayRef<int> ReuseShuffleIndices = None, 2000 ArrayRef<unsigned> ReorderIndices = None) { 2001 assert(((!Bundle && EntryState == TreeEntry::NeedToGather) || 2002 (Bundle && EntryState != TreeEntry::NeedToGather)) && 2003 "Need to vectorize gather entry?"); 2004 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree)); 2005 TreeEntry *Last = VectorizableTree.back().get(); 2006 Last->Idx = VectorizableTree.size() - 1; 2007 Last->State = EntryState; 2008 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(), 2009 ReuseShuffleIndices.end()); 2010 if (ReorderIndices.empty()) { 2011 Last->Scalars.assign(VL.begin(), VL.end()); 2012 Last->setOperations(S); 2013 } else { 2014 // Reorder scalars and build final mask. 2015 Last->Scalars.assign(VL.size(), nullptr); 2016 transform(ReorderIndices, Last->Scalars.begin(), 2017 [VL](unsigned Idx) -> Value * { 2018 if (Idx >= VL.size()) 2019 return UndefValue::get(VL.front()->getType()); 2020 return VL[Idx]; 2021 }); 2022 InstructionsState S = getSameOpcode(Last->Scalars); 2023 Last->setOperations(S); 2024 Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end()); 2025 } 2026 if (Last->State != TreeEntry::NeedToGather) { 2027 for (Value *V : VL) { 2028 assert(!getTreeEntry(V) && "Scalar already in tree!"); 2029 ScalarToTreeEntry[V] = Last; 2030 } 2031 // Update the scheduler bundle to point to this TreeEntry. 2032 unsigned Lane = 0; 2033 for (ScheduleData *BundleMember = Bundle.getValue(); BundleMember; 2034 BundleMember = BundleMember->NextInBundle) { 2035 BundleMember->TE = Last; 2036 BundleMember->Lane = Lane; 2037 ++Lane; 2038 } 2039 assert((!Bundle.getValue() || Lane == VL.size()) && 2040 "Bundle and VL out of sync"); 2041 } else { 2042 MustGather.insert(VL.begin(), VL.end()); 2043 } 2044 2045 if (UserTreeIdx.UserTE) 2046 Last->UserTreeIndices.push_back(UserTreeIdx); 2047 2048 return Last; 2049 } 2050 2051 /// -- Vectorization State -- 2052 /// Holds all of the tree entries. 2053 TreeEntry::VecTreeTy VectorizableTree; 2054 2055 #ifndef NDEBUG 2056 /// Debug printer. 2057 LLVM_DUMP_METHOD void dumpVectorizableTree() const { 2058 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) { 2059 VectorizableTree[Id]->dump(); 2060 dbgs() << "\n"; 2061 } 2062 } 2063 #endif 2064 2065 TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); } 2066 2067 const TreeEntry *getTreeEntry(Value *V) const { 2068 return ScalarToTreeEntry.lookup(V); 2069 } 2070 2071 /// Maps a specific scalar to its tree entry. 2072 SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry; 2073 2074 /// Maps a value to the proposed vectorizable size. 2075 SmallDenseMap<Value *, unsigned> InstrElementSize; 2076 2077 /// A list of scalars that we found that we need to keep as scalars. 2078 ValueSet MustGather; 2079 2080 /// This POD struct describes one external user in the vectorized tree. 2081 struct ExternalUser { 2082 ExternalUser(Value *S, llvm::User *U, int L) 2083 : Scalar(S), User(U), Lane(L) {} 2084 2085 // Which scalar in our function. 2086 Value *Scalar; 2087 2088 // Which user that uses the scalar. 2089 llvm::User *User; 2090 2091 // Which lane does the scalar belong to. 2092 int Lane; 2093 }; 2094 using UserList = SmallVector<ExternalUser, 16>; 2095 2096 /// Checks if two instructions may access the same memory. 2097 /// 2098 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 2099 /// is invariant in the calling loop. 2100 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 2101 Instruction *Inst2) { 2102 // First check if the result is already in the cache. 2103 AliasCacheKey key = std::make_pair(Inst1, Inst2); 2104 Optional<bool> &result = AliasCache[key]; 2105 if (result.hasValue()) { 2106 return result.getValue(); 2107 } 2108 bool aliased = true; 2109 if (Loc1.Ptr && isSimple(Inst1)) 2110 aliased = isModOrRefSet(AA->getModRefInfo(Inst2, Loc1)); 2111 // Store the result in the cache. 2112 result = aliased; 2113 return aliased; 2114 } 2115 2116 using AliasCacheKey = std::pair<Instruction *, Instruction *>; 2117 2118 /// Cache for alias results. 2119 /// TODO: consider moving this to the AliasAnalysis itself. 2120 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 2121 2122 /// Removes an instruction from its block and eventually deletes it. 2123 /// It's like Instruction::eraseFromParent() except that the actual deletion 2124 /// is delayed until BoUpSLP is destructed. 2125 /// This is required to ensure that there are no incorrect collisions in the 2126 /// AliasCache, which can happen if a new instruction is allocated at the 2127 /// same address as a previously deleted instruction. 2128 void eraseInstruction(Instruction *I, bool ReplaceOpsWithUndef = false) { 2129 auto It = DeletedInstructions.try_emplace(I, ReplaceOpsWithUndef).first; 2130 It->getSecond() = It->getSecond() && ReplaceOpsWithUndef; 2131 } 2132 2133 /// Temporary store for deleted instructions. Instructions will be deleted 2134 /// eventually when the BoUpSLP is destructed. 2135 DenseMap<Instruction *, bool> DeletedInstructions; 2136 2137 /// A list of values that need to extracted out of the tree. 2138 /// This list holds pairs of (Internal Scalar : External User). External User 2139 /// can be nullptr, it means that this Internal Scalar will be used later, 2140 /// after vectorization. 2141 UserList ExternalUses; 2142 2143 /// Values used only by @llvm.assume calls. 2144 SmallPtrSet<const Value *, 32> EphValues; 2145 2146 /// Holds all of the instructions that we gathered. 2147 SetVector<Instruction *> GatherShuffleSeq; 2148 2149 /// A list of blocks that we are going to CSE. 2150 SetVector<BasicBlock *> CSEBlocks; 2151 2152 /// Contains all scheduling relevant data for an instruction. 2153 /// A ScheduleData either represents a single instruction or a member of an 2154 /// instruction bundle (= a group of instructions which is combined into a 2155 /// vector instruction). 2156 struct ScheduleData { 2157 // The initial value for the dependency counters. It means that the 2158 // dependencies are not calculated yet. 2159 enum { InvalidDeps = -1 }; 2160 2161 ScheduleData() = default; 2162 2163 void init(int BlockSchedulingRegionID, Value *OpVal) { 2164 FirstInBundle = this; 2165 NextInBundle = nullptr; 2166 NextLoadStore = nullptr; 2167 IsScheduled = false; 2168 SchedulingRegionID = BlockSchedulingRegionID; 2169 UnscheduledDepsInBundle = UnscheduledDeps; 2170 clearDependencies(); 2171 OpValue = OpVal; 2172 TE = nullptr; 2173 Lane = -1; 2174 } 2175 2176 /// Returns true if the dependency information has been calculated. 2177 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 2178 2179 /// Returns true for single instructions and for bundle representatives 2180 /// (= the head of a bundle). 2181 bool isSchedulingEntity() const { return FirstInBundle == this; } 2182 2183 /// Returns true if it represents an instruction bundle and not only a 2184 /// single instruction. 2185 bool isPartOfBundle() const { 2186 return NextInBundle != nullptr || FirstInBundle != this; 2187 } 2188 2189 /// Returns true if it is ready for scheduling, i.e. it has no more 2190 /// unscheduled depending instructions/bundles. 2191 bool isReady() const { 2192 assert(isSchedulingEntity() && 2193 "can't consider non-scheduling entity for ready list"); 2194 return UnscheduledDepsInBundle == 0 && !IsScheduled; 2195 } 2196 2197 /// Modifies the number of unscheduled dependencies, also updating it for 2198 /// the whole bundle. 2199 int incrementUnscheduledDeps(int Incr) { 2200 UnscheduledDeps += Incr; 2201 return FirstInBundle->UnscheduledDepsInBundle += Incr; 2202 } 2203 2204 /// Sets the number of unscheduled dependencies to the number of 2205 /// dependencies. 2206 void resetUnscheduledDeps() { 2207 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 2208 } 2209 2210 /// Clears all dependency information. 2211 void clearDependencies() { 2212 Dependencies = InvalidDeps; 2213 resetUnscheduledDeps(); 2214 MemoryDependencies.clear(); 2215 } 2216 2217 void dump(raw_ostream &os) const { 2218 if (!isSchedulingEntity()) { 2219 os << "/ " << *Inst; 2220 } else if (NextInBundle) { 2221 os << '[' << *Inst; 2222 ScheduleData *SD = NextInBundle; 2223 while (SD) { 2224 os << ';' << *SD->Inst; 2225 SD = SD->NextInBundle; 2226 } 2227 os << ']'; 2228 } else { 2229 os << *Inst; 2230 } 2231 } 2232 2233 Instruction *Inst = nullptr; 2234 2235 /// Points to the head in an instruction bundle (and always to this for 2236 /// single instructions). 2237 ScheduleData *FirstInBundle = nullptr; 2238 2239 /// Single linked list of all instructions in a bundle. Null if it is a 2240 /// single instruction. 2241 ScheduleData *NextInBundle = nullptr; 2242 2243 /// Single linked list of all memory instructions (e.g. load, store, call) 2244 /// in the block - until the end of the scheduling region. 2245 ScheduleData *NextLoadStore = nullptr; 2246 2247 /// The dependent memory instructions. 2248 /// This list is derived on demand in calculateDependencies(). 2249 SmallVector<ScheduleData *, 4> MemoryDependencies; 2250 2251 /// This ScheduleData is in the current scheduling region if this matches 2252 /// the current SchedulingRegionID of BlockScheduling. 2253 int SchedulingRegionID = 0; 2254 2255 /// Used for getting a "good" final ordering of instructions. 2256 int SchedulingPriority = 0; 2257 2258 /// The number of dependencies. Constitutes of the number of users of the 2259 /// instruction plus the number of dependent memory instructions (if any). 2260 /// This value is calculated on demand. 2261 /// If InvalidDeps, the number of dependencies is not calculated yet. 2262 int Dependencies = InvalidDeps; 2263 2264 /// The number of dependencies minus the number of dependencies of scheduled 2265 /// instructions. As soon as this is zero, the instruction/bundle gets ready 2266 /// for scheduling. 2267 /// Note that this is negative as long as Dependencies is not calculated. 2268 int UnscheduledDeps = InvalidDeps; 2269 2270 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 2271 /// single instructions. 2272 int UnscheduledDepsInBundle = InvalidDeps; 2273 2274 /// True if this instruction is scheduled (or considered as scheduled in the 2275 /// dry-run). 2276 bool IsScheduled = false; 2277 2278 /// Opcode of the current instruction in the schedule data. 2279 Value *OpValue = nullptr; 2280 2281 /// The TreeEntry that this instruction corresponds to. 2282 TreeEntry *TE = nullptr; 2283 2284 /// The lane of this node in the TreeEntry. 2285 int Lane = -1; 2286 }; 2287 2288 #ifndef NDEBUG 2289 friend inline raw_ostream &operator<<(raw_ostream &os, 2290 const BoUpSLP::ScheduleData &SD) { 2291 SD.dump(os); 2292 return os; 2293 } 2294 #endif 2295 2296 friend struct GraphTraits<BoUpSLP *>; 2297 friend struct DOTGraphTraits<BoUpSLP *>; 2298 2299 /// Contains all scheduling data for a basic block. 2300 struct BlockScheduling { 2301 BlockScheduling(BasicBlock *BB) 2302 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} 2303 2304 void clear() { 2305 ReadyInsts.clear(); 2306 ScheduleStart = nullptr; 2307 ScheduleEnd = nullptr; 2308 FirstLoadStoreInRegion = nullptr; 2309 LastLoadStoreInRegion = nullptr; 2310 2311 // Reduce the maximum schedule region size by the size of the 2312 // previous scheduling run. 2313 ScheduleRegionSizeLimit -= ScheduleRegionSize; 2314 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 2315 ScheduleRegionSizeLimit = MinScheduleRegionSize; 2316 ScheduleRegionSize = 0; 2317 2318 // Make a new scheduling region, i.e. all existing ScheduleData is not 2319 // in the new region yet. 2320 ++SchedulingRegionID; 2321 } 2322 2323 ScheduleData *getScheduleData(Value *V) { 2324 ScheduleData *SD = ScheduleDataMap[V]; 2325 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 2326 return SD; 2327 return nullptr; 2328 } 2329 2330 ScheduleData *getScheduleData(Value *V, Value *Key) { 2331 if (V == Key) 2332 return getScheduleData(V); 2333 auto I = ExtraScheduleDataMap.find(V); 2334 if (I != ExtraScheduleDataMap.end()) { 2335 ScheduleData *SD = I->second[Key]; 2336 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 2337 return SD; 2338 } 2339 return nullptr; 2340 } 2341 2342 bool isInSchedulingRegion(ScheduleData *SD) const { 2343 return SD->SchedulingRegionID == SchedulingRegionID; 2344 } 2345 2346 /// Marks an instruction as scheduled and puts all dependent ready 2347 /// instructions into the ready-list. 2348 template <typename ReadyListType> 2349 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 2350 SD->IsScheduled = true; 2351 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 2352 2353 ScheduleData *BundleMember = SD; 2354 while (BundleMember) { 2355 if (BundleMember->Inst != BundleMember->OpValue) { 2356 BundleMember = BundleMember->NextInBundle; 2357 continue; 2358 } 2359 // Handle the def-use chain dependencies. 2360 2361 // Decrement the unscheduled counter and insert to ready list if ready. 2362 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) { 2363 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 2364 if (OpDef && OpDef->hasValidDependencies() && 2365 OpDef->incrementUnscheduledDeps(-1) == 0) { 2366 // There are no more unscheduled dependencies after 2367 // decrementing, so we can put the dependent instruction 2368 // into the ready list. 2369 ScheduleData *DepBundle = OpDef->FirstInBundle; 2370 assert(!DepBundle->IsScheduled && 2371 "already scheduled bundle gets ready"); 2372 ReadyList.insert(DepBundle); 2373 LLVM_DEBUG(dbgs() 2374 << "SLP: gets ready (def): " << *DepBundle << "\n"); 2375 } 2376 }); 2377 }; 2378 2379 // If BundleMember is a vector bundle, its operands may have been 2380 // reordered duiring buildTree(). We therefore need to get its operands 2381 // through the TreeEntry. 2382 if (TreeEntry *TE = BundleMember->TE) { 2383 int Lane = BundleMember->Lane; 2384 assert(Lane >= 0 && "Lane not set"); 2385 2386 // Since vectorization tree is being built recursively this assertion 2387 // ensures that the tree entry has all operands set before reaching 2388 // this code. Couple of exceptions known at the moment are extracts 2389 // where their second (immediate) operand is not added. Since 2390 // immediates do not affect scheduler behavior this is considered 2391 // okay. 2392 auto *In = TE->getMainOp(); 2393 assert(In && 2394 (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) || 2395 In->getNumOperands() == TE->getNumOperands()) && 2396 "Missed TreeEntry operands?"); 2397 (void)In; // fake use to avoid build failure when assertions disabled 2398 2399 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands(); 2400 OpIdx != NumOperands; ++OpIdx) 2401 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane])) 2402 DecrUnsched(I); 2403 } else { 2404 // If BundleMember is a stand-alone instruction, no operand reordering 2405 // has taken place, so we directly access its operands. 2406 for (Use &U : BundleMember->Inst->operands()) 2407 if (auto *I = dyn_cast<Instruction>(U.get())) 2408 DecrUnsched(I); 2409 } 2410 // Handle the memory dependencies. 2411 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 2412 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 2413 // There are no more unscheduled dependencies after decrementing, 2414 // so we can put the dependent instruction into the ready list. 2415 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 2416 assert(!DepBundle->IsScheduled && 2417 "already scheduled bundle gets ready"); 2418 ReadyList.insert(DepBundle); 2419 LLVM_DEBUG(dbgs() 2420 << "SLP: gets ready (mem): " << *DepBundle << "\n"); 2421 } 2422 } 2423 BundleMember = BundleMember->NextInBundle; 2424 } 2425 } 2426 2427 void doForAllOpcodes(Value *V, 2428 function_ref<void(ScheduleData *SD)> Action) { 2429 if (ScheduleData *SD = getScheduleData(V)) 2430 Action(SD); 2431 auto I = ExtraScheduleDataMap.find(V); 2432 if (I != ExtraScheduleDataMap.end()) 2433 for (auto &P : I->second) 2434 if (P.second->SchedulingRegionID == SchedulingRegionID) 2435 Action(P.second); 2436 } 2437 2438 /// Put all instructions into the ReadyList which are ready for scheduling. 2439 template <typename ReadyListType> 2440 void initialFillReadyList(ReadyListType &ReadyList) { 2441 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 2442 doForAllOpcodes(I, [&](ScheduleData *SD) { 2443 if (SD->isSchedulingEntity() && SD->isReady()) { 2444 ReadyList.insert(SD); 2445 LLVM_DEBUG(dbgs() 2446 << "SLP: initially in ready list: " << *I << "\n"); 2447 } 2448 }); 2449 } 2450 } 2451 2452 /// Checks if a bundle of instructions can be scheduled, i.e. has no 2453 /// cyclic dependencies. This is only a dry-run, no instructions are 2454 /// actually moved at this stage. 2455 /// \returns the scheduling bundle. The returned Optional value is non-None 2456 /// if \p VL is allowed to be scheduled. 2457 Optional<ScheduleData *> 2458 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 2459 const InstructionsState &S); 2460 2461 /// Un-bundles a group of instructions. 2462 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 2463 2464 /// Allocates schedule data chunk. 2465 ScheduleData *allocateScheduleDataChunks(); 2466 2467 /// Extends the scheduling region so that V is inside the region. 2468 /// \returns true if the region size is within the limit. 2469 bool extendSchedulingRegion(Value *V, const InstructionsState &S); 2470 2471 /// Initialize the ScheduleData structures for new instructions in the 2472 /// scheduling region. 2473 void initScheduleData(Instruction *FromI, Instruction *ToI, 2474 ScheduleData *PrevLoadStore, 2475 ScheduleData *NextLoadStore); 2476 2477 /// Updates the dependency information of a bundle and of all instructions/ 2478 /// bundles which depend on the original bundle. 2479 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 2480 BoUpSLP *SLP); 2481 2482 /// Sets all instruction in the scheduling region to un-scheduled. 2483 void resetSchedule(); 2484 2485 BasicBlock *BB; 2486 2487 /// Simple memory allocation for ScheduleData. 2488 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 2489 2490 /// The size of a ScheduleData array in ScheduleDataChunks. 2491 int ChunkSize; 2492 2493 /// The allocator position in the current chunk, which is the last entry 2494 /// of ScheduleDataChunks. 2495 int ChunkPos; 2496 2497 /// Attaches ScheduleData to Instruction. 2498 /// Note that the mapping survives during all vectorization iterations, i.e. 2499 /// ScheduleData structures are recycled. 2500 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 2501 2502 /// Attaches ScheduleData to Instruction with the leading key. 2503 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 2504 ExtraScheduleDataMap; 2505 2506 struct ReadyList : SmallVector<ScheduleData *, 8> { 2507 void insert(ScheduleData *SD) { push_back(SD); } 2508 }; 2509 2510 /// The ready-list for scheduling (only used for the dry-run). 2511 ReadyList ReadyInsts; 2512 2513 /// The first instruction of the scheduling region. 2514 Instruction *ScheduleStart = nullptr; 2515 2516 /// The first instruction _after_ the scheduling region. 2517 Instruction *ScheduleEnd = nullptr; 2518 2519 /// The first memory accessing instruction in the scheduling region 2520 /// (can be null). 2521 ScheduleData *FirstLoadStoreInRegion = nullptr; 2522 2523 /// The last memory accessing instruction in the scheduling region 2524 /// (can be null). 2525 ScheduleData *LastLoadStoreInRegion = nullptr; 2526 2527 /// The current size of the scheduling region. 2528 int ScheduleRegionSize = 0; 2529 2530 /// The maximum size allowed for the scheduling region. 2531 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; 2532 2533 /// The ID of the scheduling region. For a new vectorization iteration this 2534 /// is incremented which "removes" all ScheduleData from the region. 2535 // Make sure that the initial SchedulingRegionID is greater than the 2536 // initial SchedulingRegionID in ScheduleData (which is 0). 2537 int SchedulingRegionID = 1; 2538 }; 2539 2540 /// Attaches the BlockScheduling structures to basic blocks. 2541 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 2542 2543 /// Performs the "real" scheduling. Done before vectorization is actually 2544 /// performed in a basic block. 2545 void scheduleBlock(BlockScheduling *BS); 2546 2547 /// List of users to ignore during scheduling and that don't need extracting. 2548 ArrayRef<Value *> UserIgnoreList; 2549 2550 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of 2551 /// sorted SmallVectors of unsigned. 2552 struct OrdersTypeDenseMapInfo { 2553 static OrdersType getEmptyKey() { 2554 OrdersType V; 2555 V.push_back(~1U); 2556 return V; 2557 } 2558 2559 static OrdersType getTombstoneKey() { 2560 OrdersType V; 2561 V.push_back(~2U); 2562 return V; 2563 } 2564 2565 static unsigned getHashValue(const OrdersType &V) { 2566 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); 2567 } 2568 2569 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) { 2570 return LHS == RHS; 2571 } 2572 }; 2573 2574 // Analysis and block reference. 2575 Function *F; 2576 ScalarEvolution *SE; 2577 TargetTransformInfo *TTI; 2578 TargetLibraryInfo *TLI; 2579 AAResults *AA; 2580 LoopInfo *LI; 2581 DominatorTree *DT; 2582 AssumptionCache *AC; 2583 DemandedBits *DB; 2584 const DataLayout *DL; 2585 OptimizationRemarkEmitter *ORE; 2586 2587 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 2588 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 2589 2590 /// Instruction builder to construct the vectorized tree. 2591 IRBuilder<> Builder; 2592 2593 /// A map of scalar integer values to the smallest bit width with which they 2594 /// can legally be represented. The values map to (width, signed) pairs, 2595 /// where "width" indicates the minimum bit width and "signed" is True if the 2596 /// value must be signed-extended, rather than zero-extended, back to its 2597 /// original width. 2598 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs; 2599 }; 2600 2601 } // end namespace slpvectorizer 2602 2603 template <> struct GraphTraits<BoUpSLP *> { 2604 using TreeEntry = BoUpSLP::TreeEntry; 2605 2606 /// NodeRef has to be a pointer per the GraphWriter. 2607 using NodeRef = TreeEntry *; 2608 2609 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy; 2610 2611 /// Add the VectorizableTree to the index iterator to be able to return 2612 /// TreeEntry pointers. 2613 struct ChildIteratorType 2614 : public iterator_adaptor_base< 2615 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> { 2616 ContainerTy &VectorizableTree; 2617 2618 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W, 2619 ContainerTy &VT) 2620 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 2621 2622 NodeRef operator*() { return I->UserTE; } 2623 }; 2624 2625 static NodeRef getEntryNode(BoUpSLP &R) { 2626 return R.VectorizableTree[0].get(); 2627 } 2628 2629 static ChildIteratorType child_begin(NodeRef N) { 2630 return {N->UserTreeIndices.begin(), N->Container}; 2631 } 2632 2633 static ChildIteratorType child_end(NodeRef N) { 2634 return {N->UserTreeIndices.end(), N->Container}; 2635 } 2636 2637 /// For the node iterator we just need to turn the TreeEntry iterator into a 2638 /// TreeEntry* iterator so that it dereferences to NodeRef. 2639 class nodes_iterator { 2640 using ItTy = ContainerTy::iterator; 2641 ItTy It; 2642 2643 public: 2644 nodes_iterator(const ItTy &It2) : It(It2) {} 2645 NodeRef operator*() { return It->get(); } 2646 nodes_iterator operator++() { 2647 ++It; 2648 return *this; 2649 } 2650 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; } 2651 }; 2652 2653 static nodes_iterator nodes_begin(BoUpSLP *R) { 2654 return nodes_iterator(R->VectorizableTree.begin()); 2655 } 2656 2657 static nodes_iterator nodes_end(BoUpSLP *R) { 2658 return nodes_iterator(R->VectorizableTree.end()); 2659 } 2660 2661 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 2662 }; 2663 2664 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 2665 using TreeEntry = BoUpSLP::TreeEntry; 2666 2667 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 2668 2669 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 2670 std::string Str; 2671 raw_string_ostream OS(Str); 2672 if (isSplat(Entry->Scalars)) 2673 OS << "<splat> "; 2674 for (auto V : Entry->Scalars) { 2675 OS << *V; 2676 if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) { 2677 return EU.Scalar == V; 2678 })) 2679 OS << " <extract>"; 2680 OS << "\n"; 2681 } 2682 return Str; 2683 } 2684 2685 static std::string getNodeAttributes(const TreeEntry *Entry, 2686 const BoUpSLP *) { 2687 if (Entry->State == TreeEntry::NeedToGather) 2688 return "color=red"; 2689 return ""; 2690 } 2691 }; 2692 2693 } // end namespace llvm 2694 2695 BoUpSLP::~BoUpSLP() { 2696 for (const auto &Pair : DeletedInstructions) { 2697 // Replace operands of ignored instructions with Undefs in case if they were 2698 // marked for deletion. 2699 if (Pair.getSecond()) { 2700 Value *Undef = UndefValue::get(Pair.getFirst()->getType()); 2701 Pair.getFirst()->replaceAllUsesWith(Undef); 2702 } 2703 Pair.getFirst()->dropAllReferences(); 2704 } 2705 for (const auto &Pair : DeletedInstructions) { 2706 assert(Pair.getFirst()->use_empty() && 2707 "trying to erase instruction with users."); 2708 Pair.getFirst()->eraseFromParent(); 2709 } 2710 #ifdef EXPENSIVE_CHECKS 2711 // If we could guarantee that this call is not extremely slow, we could 2712 // remove the ifdef limitation (see PR47712). 2713 assert(!verifyFunction(*F, &dbgs())); 2714 #endif 2715 } 2716 2717 void BoUpSLP::eraseInstructions(ArrayRef<Value *> AV) { 2718 for (auto *V : AV) { 2719 if (auto *I = dyn_cast<Instruction>(V)) 2720 eraseInstruction(I, /*ReplaceOpsWithUndef=*/true); 2721 }; 2722 } 2723 2724 /// Reorders the given \p Reuses mask according to the given \p Mask. \p Reuses 2725 /// contains original mask for the scalars reused in the node. Procedure 2726 /// transform this mask in accordance with the given \p Mask. 2727 static void reorderReuses(SmallVectorImpl<int> &Reuses, ArrayRef<int> Mask) { 2728 assert(!Mask.empty() && Reuses.size() == Mask.size() && 2729 "Expected non-empty mask."); 2730 SmallVector<int> Prev(Reuses.begin(), Reuses.end()); 2731 Prev.swap(Reuses); 2732 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 2733 if (Mask[I] != UndefMaskElem) 2734 Reuses[Mask[I]] = Prev[I]; 2735 } 2736 2737 /// Reorders the given \p Order according to the given \p Mask. \p Order - is 2738 /// the original order of the scalars. Procedure transforms the provided order 2739 /// in accordance with the given \p Mask. If the resulting \p Order is just an 2740 /// identity order, \p Order is cleared. 2741 static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask) { 2742 assert(!Mask.empty() && "Expected non-empty mask."); 2743 SmallVector<int> MaskOrder; 2744 if (Order.empty()) { 2745 MaskOrder.resize(Mask.size()); 2746 std::iota(MaskOrder.begin(), MaskOrder.end(), 0); 2747 } else { 2748 inversePermutation(Order, MaskOrder); 2749 } 2750 reorderReuses(MaskOrder, Mask); 2751 if (ShuffleVectorInst::isIdentityMask(MaskOrder)) { 2752 Order.clear(); 2753 return; 2754 } 2755 Order.assign(Mask.size(), Mask.size()); 2756 for (unsigned I = 0, E = Mask.size(); I < E; ++I) 2757 if (MaskOrder[I] != UndefMaskElem) 2758 Order[MaskOrder[I]] = I; 2759 fixupOrderingIndices(Order); 2760 } 2761 2762 Optional<BoUpSLP::OrdersType> 2763 BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) { 2764 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only."); 2765 unsigned NumScalars = TE.Scalars.size(); 2766 OrdersType CurrentOrder(NumScalars, NumScalars); 2767 SmallVector<int> Positions; 2768 SmallBitVector UsedPositions(NumScalars); 2769 const TreeEntry *STE = nullptr; 2770 // Try to find all gathered scalars that are gets vectorized in other 2771 // vectorize node. Here we can have only one single tree vector node to 2772 // correctly identify order of the gathered scalars. 2773 for (unsigned I = 0; I < NumScalars; ++I) { 2774 Value *V = TE.Scalars[I]; 2775 if (!isa<LoadInst, ExtractElementInst, ExtractValueInst>(V)) 2776 continue; 2777 if (const auto *LocalSTE = getTreeEntry(V)) { 2778 if (!STE) 2779 STE = LocalSTE; 2780 else if (STE != LocalSTE) 2781 // Take the order only from the single vector node. 2782 return None; 2783 unsigned Lane = 2784 std::distance(STE->Scalars.begin(), find(STE->Scalars, V)); 2785 if (Lane >= NumScalars) 2786 return None; 2787 if (CurrentOrder[Lane] != NumScalars) { 2788 if (Lane != I) 2789 continue; 2790 UsedPositions.reset(CurrentOrder[Lane]); 2791 } 2792 // The partial identity (where only some elements of the gather node are 2793 // in the identity order) is good. 2794 CurrentOrder[Lane] = I; 2795 UsedPositions.set(I); 2796 } 2797 } 2798 // Need to keep the order if we have a vector entry and at least 2 scalars or 2799 // the vectorized entry has just 2 scalars. 2800 if (STE && (UsedPositions.count() > 1 || STE->Scalars.size() == 2)) { 2801 auto &&IsIdentityOrder = [NumScalars](ArrayRef<unsigned> CurrentOrder) { 2802 for (unsigned I = 0; I < NumScalars; ++I) 2803 if (CurrentOrder[I] != I && CurrentOrder[I] != NumScalars) 2804 return false; 2805 return true; 2806 }; 2807 if (IsIdentityOrder(CurrentOrder)) { 2808 CurrentOrder.clear(); 2809 return CurrentOrder; 2810 } 2811 auto *It = CurrentOrder.begin(); 2812 for (unsigned I = 0; I < NumScalars;) { 2813 if (UsedPositions.test(I)) { 2814 ++I; 2815 continue; 2816 } 2817 if (*It == NumScalars) { 2818 *It = I; 2819 ++I; 2820 } 2821 ++It; 2822 } 2823 return CurrentOrder; 2824 } 2825 return None; 2826 } 2827 2828 Optional<BoUpSLP::OrdersType> BoUpSLP::getReorderingData(const TreeEntry &TE, 2829 bool TopToBottom) { 2830 // No need to reorder if need to shuffle reuses, still need to shuffle the 2831 // node. 2832 if (!TE.ReuseShuffleIndices.empty()) 2833 return None; 2834 if (TE.State == TreeEntry::Vectorize && 2835 (isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE.getMainOp()) || 2836 (TopToBottom && isa<StoreInst, InsertElementInst>(TE.getMainOp()))) && 2837 !TE.isAltShuffle()) 2838 return TE.ReorderIndices; 2839 if (TE.State == TreeEntry::NeedToGather) { 2840 // TODO: add analysis of other gather nodes with extractelement 2841 // instructions and other values/instructions, not only undefs. 2842 if (((TE.getOpcode() == Instruction::ExtractElement && 2843 !TE.isAltShuffle()) || 2844 (all_of(TE.Scalars, 2845 [](Value *V) { 2846 return isa<UndefValue, ExtractElementInst>(V); 2847 }) && 2848 any_of(TE.Scalars, 2849 [](Value *V) { return isa<ExtractElementInst>(V); }))) && 2850 all_of(TE.Scalars, 2851 [](Value *V) { 2852 auto *EE = dyn_cast<ExtractElementInst>(V); 2853 return !EE || isa<FixedVectorType>(EE->getVectorOperandType()); 2854 }) && 2855 allSameType(TE.Scalars)) { 2856 // Check that gather of extractelements can be represented as 2857 // just a shuffle of a single vector. 2858 OrdersType CurrentOrder; 2859 bool Reuse = canReuseExtract(TE.Scalars, TE.getMainOp(), CurrentOrder); 2860 if (Reuse || !CurrentOrder.empty()) { 2861 if (!CurrentOrder.empty()) 2862 fixupOrderingIndices(CurrentOrder); 2863 return CurrentOrder; 2864 } 2865 } 2866 if (Optional<OrdersType> CurrentOrder = findReusedOrderedScalars(TE)) 2867 return CurrentOrder; 2868 } 2869 return None; 2870 } 2871 2872 void BoUpSLP::reorderTopToBottom() { 2873 // Maps VF to the graph nodes. 2874 DenseMap<unsigned, SmallPtrSet<TreeEntry *, 4>> VFToOrderedEntries; 2875 // ExtractElement gather nodes which can be vectorized and need to handle 2876 // their ordering. 2877 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 2878 // Find all reorderable nodes with the given VF. 2879 // Currently the are vectorized loads,extracts + some gathering of extracts. 2880 for_each(VectorizableTree, [this, &VFToOrderedEntries, &GathersToOrders]( 2881 const std::unique_ptr<TreeEntry> &TE) { 2882 if (Optional<OrdersType> CurrentOrder = 2883 getReorderingData(*TE.get(), /*TopToBottom=*/true)) { 2884 VFToOrderedEntries[TE->Scalars.size()].insert(TE.get()); 2885 if (TE->State != TreeEntry::Vectorize) 2886 GathersToOrders.try_emplace(TE.get(), *CurrentOrder); 2887 } 2888 }); 2889 2890 // Reorder the graph nodes according to their vectorization factor. 2891 for (unsigned VF = VectorizableTree.front()->Scalars.size(); VF > 1; 2892 VF /= 2) { 2893 auto It = VFToOrderedEntries.find(VF); 2894 if (It == VFToOrderedEntries.end()) 2895 continue; 2896 // Try to find the most profitable order. We just are looking for the most 2897 // used order and reorder scalar elements in the nodes according to this 2898 // mostly used order. 2899 const SmallPtrSetImpl<TreeEntry *> &OrderedEntries = It->getSecond(); 2900 // All operands are reordered and used only in this node - propagate the 2901 // most used order to the user node. 2902 MapVector<OrdersType, unsigned, 2903 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>> 2904 OrdersUses; 2905 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 2906 for (const TreeEntry *OpTE : OrderedEntries) { 2907 // No need to reorder this nodes, still need to extend and to use shuffle, 2908 // just need to merge reordering shuffle and the reuse shuffle. 2909 if (!OpTE->ReuseShuffleIndices.empty()) 2910 continue; 2911 // Count number of orders uses. 2912 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { 2913 if (OpTE->State == TreeEntry::NeedToGather) 2914 return GathersToOrders.find(OpTE)->second; 2915 return OpTE->ReorderIndices; 2916 }(); 2917 // Stores actually store the mask, not the order, need to invert. 2918 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 2919 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 2920 SmallVector<int> Mask; 2921 inversePermutation(Order, Mask); 2922 unsigned E = Order.size(); 2923 OrdersType CurrentOrder(E, E); 2924 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 2925 return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx); 2926 }); 2927 fixupOrderingIndices(CurrentOrder); 2928 ++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second; 2929 } else { 2930 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second; 2931 } 2932 } 2933 // Set order of the user node. 2934 if (OrdersUses.empty()) 2935 continue; 2936 // Choose the most used order. 2937 ArrayRef<unsigned> BestOrder = OrdersUses.front().first; 2938 unsigned Cnt = OrdersUses.front().second; 2939 for (const auto &Pair : drop_begin(OrdersUses)) { 2940 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 2941 BestOrder = Pair.first; 2942 Cnt = Pair.second; 2943 } 2944 } 2945 // Set order of the user node. 2946 if (BestOrder.empty()) 2947 continue; 2948 SmallVector<int> Mask; 2949 inversePermutation(BestOrder, Mask); 2950 SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem); 2951 unsigned E = BestOrder.size(); 2952 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 2953 return I < E ? static_cast<int>(I) : UndefMaskElem; 2954 }); 2955 // Do an actual reordering, if profitable. 2956 for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 2957 // Just do the reordering for the nodes with the given VF. 2958 if (TE->Scalars.size() != VF) { 2959 if (TE->ReuseShuffleIndices.size() == VF) { 2960 // Need to reorder the reuses masks of the operands with smaller VF to 2961 // be able to find the match between the graph nodes and scalar 2962 // operands of the given node during vectorization/cost estimation. 2963 assert(all_of(TE->UserTreeIndices, 2964 [VF, &TE](const EdgeInfo &EI) { 2965 return EI.UserTE->Scalars.size() == VF || 2966 EI.UserTE->Scalars.size() == 2967 TE->Scalars.size(); 2968 }) && 2969 "All users must be of VF size."); 2970 // Update ordering of the operands with the smaller VF than the given 2971 // one. 2972 reorderReuses(TE->ReuseShuffleIndices, Mask); 2973 } 2974 continue; 2975 } 2976 if (TE->State == TreeEntry::Vectorize && 2977 isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst, 2978 InsertElementInst>(TE->getMainOp()) && 2979 !TE->isAltShuffle()) { 2980 // Build correct orders for extract{element,value}, loads and 2981 // stores. 2982 reorderOrder(TE->ReorderIndices, Mask); 2983 if (isa<InsertElementInst, StoreInst>(TE->getMainOp())) 2984 TE->reorderOperands(Mask); 2985 } else { 2986 // Reorder the node and its operands. 2987 TE->reorderOperands(Mask); 2988 assert(TE->ReorderIndices.empty() && 2989 "Expected empty reorder sequence."); 2990 reorderScalars(TE->Scalars, Mask); 2991 } 2992 if (!TE->ReuseShuffleIndices.empty()) { 2993 // Apply reversed order to keep the original ordering of the reused 2994 // elements to avoid extra reorder indices shuffling. 2995 OrdersType CurrentOrder; 2996 reorderOrder(CurrentOrder, MaskOrder); 2997 SmallVector<int> NewReuses; 2998 inversePermutation(CurrentOrder, NewReuses); 2999 addMask(NewReuses, TE->ReuseShuffleIndices); 3000 TE->ReuseShuffleIndices.swap(NewReuses); 3001 } 3002 } 3003 } 3004 } 3005 3006 void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) { 3007 SetVector<TreeEntry *> OrderedEntries; 3008 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 3009 // Find all reorderable leaf nodes with the given VF. 3010 // Currently the are vectorized loads,extracts without alternate operands + 3011 // some gathering of extracts. 3012 SmallVector<TreeEntry *> NonVectorized; 3013 for_each(VectorizableTree, [this, &OrderedEntries, &GathersToOrders, 3014 &NonVectorized]( 3015 const std::unique_ptr<TreeEntry> &TE) { 3016 if (TE->State != TreeEntry::Vectorize) 3017 NonVectorized.push_back(TE.get()); 3018 if (Optional<OrdersType> CurrentOrder = 3019 getReorderingData(*TE.get(), /*TopToBottom=*/false)) { 3020 OrderedEntries.insert(TE.get()); 3021 if (TE->State != TreeEntry::Vectorize) 3022 GathersToOrders.try_emplace(TE.get(), *CurrentOrder); 3023 } 3024 }); 3025 3026 // Checks if the operands of the users are reordarable and have only single 3027 // use. 3028 auto &&CheckOperands = 3029 [this, &NonVectorized](const auto &Data, 3030 SmallVectorImpl<TreeEntry *> &GatherOps) { 3031 for (unsigned I = 0, E = Data.first->getNumOperands(); I < E; ++I) { 3032 if (any_of(Data.second, 3033 [I](const std::pair<unsigned, TreeEntry *> &OpData) { 3034 return OpData.first == I && 3035 OpData.second->State == TreeEntry::Vectorize; 3036 })) 3037 continue; 3038 ArrayRef<Value *> VL = Data.first->getOperand(I); 3039 const TreeEntry *TE = nullptr; 3040 const auto *It = find_if(VL, [this, &TE](Value *V) { 3041 TE = getTreeEntry(V); 3042 return TE; 3043 }); 3044 if (It != VL.end() && TE->isSame(VL)) 3045 return false; 3046 TreeEntry *Gather = nullptr; 3047 if (count_if(NonVectorized, [VL, &Gather](TreeEntry *TE) { 3048 assert(TE->State != TreeEntry::Vectorize && 3049 "Only non-vectorized nodes are expected."); 3050 if (TE->isSame(VL)) { 3051 Gather = TE; 3052 return true; 3053 } 3054 return false; 3055 }) > 1) 3056 return false; 3057 if (Gather) 3058 GatherOps.push_back(Gather); 3059 } 3060 return true; 3061 }; 3062 // 1. Propagate order to the graph nodes, which use only reordered nodes. 3063 // I.e., if the node has operands, that are reordered, try to make at least 3064 // one operand order in the natural order and reorder others + reorder the 3065 // user node itself. 3066 SmallPtrSet<const TreeEntry *, 4> Visited; 3067 while (!OrderedEntries.empty()) { 3068 // 1. Filter out only reordered nodes. 3069 // 2. If the entry has multiple uses - skip it and jump to the next node. 3070 MapVector<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>> Users; 3071 SmallVector<TreeEntry *> Filtered; 3072 for (TreeEntry *TE : OrderedEntries) { 3073 if (!(TE->State == TreeEntry::Vectorize || 3074 (TE->State == TreeEntry::NeedToGather && 3075 GathersToOrders.count(TE))) || 3076 TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() || 3077 !all_of(drop_begin(TE->UserTreeIndices), 3078 [TE](const EdgeInfo &EI) { 3079 return EI.UserTE == TE->UserTreeIndices.front().UserTE; 3080 }) || 3081 !Visited.insert(TE).second) { 3082 Filtered.push_back(TE); 3083 continue; 3084 } 3085 // Build a map between user nodes and their operands order to speedup 3086 // search. The graph currently does not provide this dependency directly. 3087 for (EdgeInfo &EI : TE->UserTreeIndices) { 3088 TreeEntry *UserTE = EI.UserTE; 3089 auto It = Users.find(UserTE); 3090 if (It == Users.end()) 3091 It = Users.insert({UserTE, {}}).first; 3092 It->second.emplace_back(EI.EdgeIdx, TE); 3093 } 3094 } 3095 // Erase filtered entries. 3096 for_each(Filtered, 3097 [&OrderedEntries](TreeEntry *TE) { OrderedEntries.remove(TE); }); 3098 for (const auto &Data : Users) { 3099 // Check that operands are used only in the User node. 3100 SmallVector<TreeEntry *> GatherOps; 3101 if (!CheckOperands(Data, GatherOps)) { 3102 for_each(Data.second, 3103 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { 3104 OrderedEntries.remove(Op.second); 3105 }); 3106 continue; 3107 } 3108 // All operands are reordered and used only in this node - propagate the 3109 // most used order to the user node. 3110 MapVector<OrdersType, unsigned, 3111 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>> 3112 OrdersUses; 3113 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 3114 for (const auto &Op : Data.second) { 3115 TreeEntry *OpTE = Op.second; 3116 if (!OpTE->ReuseShuffleIndices.empty() || 3117 (IgnoreReorder && OpTE == VectorizableTree.front().get())) 3118 continue; 3119 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { 3120 if (OpTE->State == TreeEntry::NeedToGather) 3121 return GathersToOrders.find(OpTE)->second; 3122 return OpTE->ReorderIndices; 3123 }(); 3124 // Stores actually store the mask, not the order, need to invert. 3125 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 3126 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 3127 SmallVector<int> Mask; 3128 inversePermutation(Order, Mask); 3129 unsigned E = Order.size(); 3130 OrdersType CurrentOrder(E, E); 3131 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 3132 return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx); 3133 }); 3134 fixupOrderingIndices(CurrentOrder); 3135 ++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second; 3136 } else { 3137 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second; 3138 } 3139 if (VisitedOps.insert(OpTE).second) 3140 OrdersUses.insert(std::make_pair(OrdersType(), 0)).first->second += 3141 OpTE->UserTreeIndices.size(); 3142 assert(OrdersUses[{}] > 0 && "Counter cannot be less than 0."); 3143 --OrdersUses[{}]; 3144 } 3145 // If no orders - skip current nodes and jump to the next one, if any. 3146 if (OrdersUses.empty()) { 3147 for_each(Data.second, 3148 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { 3149 OrderedEntries.remove(Op.second); 3150 }); 3151 continue; 3152 } 3153 // Choose the best order. 3154 ArrayRef<unsigned> BestOrder = OrdersUses.front().first; 3155 unsigned Cnt = OrdersUses.front().second; 3156 for (const auto &Pair : drop_begin(OrdersUses)) { 3157 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 3158 BestOrder = Pair.first; 3159 Cnt = Pair.second; 3160 } 3161 } 3162 // Set order of the user node (reordering of operands and user nodes). 3163 if (BestOrder.empty()) { 3164 for_each(Data.second, 3165 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { 3166 OrderedEntries.remove(Op.second); 3167 }); 3168 continue; 3169 } 3170 // Erase operands from OrderedEntries list and adjust their orders. 3171 VisitedOps.clear(); 3172 SmallVector<int> Mask; 3173 inversePermutation(BestOrder, Mask); 3174 SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem); 3175 unsigned E = BestOrder.size(); 3176 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 3177 return I < E ? static_cast<int>(I) : UndefMaskElem; 3178 }); 3179 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) { 3180 TreeEntry *TE = Op.second; 3181 OrderedEntries.remove(TE); 3182 if (!VisitedOps.insert(TE).second) 3183 continue; 3184 if (!TE->ReuseShuffleIndices.empty() && TE->ReorderIndices.empty()) { 3185 // Just reorder reuses indices. 3186 reorderReuses(TE->ReuseShuffleIndices, Mask); 3187 continue; 3188 } 3189 // Gathers are processed separately. 3190 if (TE->State != TreeEntry::Vectorize) 3191 continue; 3192 assert((BestOrder.size() == TE->ReorderIndices.size() || 3193 TE->ReorderIndices.empty()) && 3194 "Non-matching sizes of user/operand entries."); 3195 reorderOrder(TE->ReorderIndices, Mask); 3196 } 3197 // For gathers just need to reorder its scalars. 3198 for (TreeEntry *Gather : GatherOps) { 3199 assert(Gather->ReorderIndices.empty() && 3200 "Unexpected reordering of gathers."); 3201 if (!Gather->ReuseShuffleIndices.empty()) { 3202 // Just reorder reuses indices. 3203 reorderReuses(Gather->ReuseShuffleIndices, Mask); 3204 continue; 3205 } 3206 reorderScalars(Gather->Scalars, Mask); 3207 OrderedEntries.remove(Gather); 3208 } 3209 // Reorder operands of the user node and set the ordering for the user 3210 // node itself. 3211 if (Data.first->State != TreeEntry::Vectorize || 3212 !isa<ExtractElementInst, ExtractValueInst, LoadInst>( 3213 Data.first->getMainOp()) || 3214 Data.first->isAltShuffle()) 3215 Data.first->reorderOperands(Mask); 3216 if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) || 3217 Data.first->isAltShuffle()) { 3218 reorderScalars(Data.first->Scalars, Mask); 3219 reorderOrder(Data.first->ReorderIndices, MaskOrder); 3220 if (Data.first->ReuseShuffleIndices.empty() && 3221 !Data.first->ReorderIndices.empty() && 3222 !Data.first->isAltShuffle()) { 3223 // Insert user node to the list to try to sink reordering deeper in 3224 // the graph. 3225 OrderedEntries.insert(Data.first); 3226 } 3227 } else { 3228 reorderOrder(Data.first->ReorderIndices, Mask); 3229 } 3230 } 3231 } 3232 // If the reordering is unnecessary, just remove the reorder. 3233 if (IgnoreReorder && !VectorizableTree.front()->ReorderIndices.empty() && 3234 VectorizableTree.front()->ReuseShuffleIndices.empty()) 3235 VectorizableTree.front()->ReorderIndices.clear(); 3236 } 3237 3238 void BoUpSLP::buildExternalUses( 3239 const ExtraValueToDebugLocsMap &ExternallyUsedValues) { 3240 // Collect the values that we need to extract from the tree. 3241 for (auto &TEPtr : VectorizableTree) { 3242 TreeEntry *Entry = TEPtr.get(); 3243 3244 // No need to handle users of gathered values. 3245 if (Entry->State == TreeEntry::NeedToGather) 3246 continue; 3247 3248 // For each lane: 3249 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 3250 Value *Scalar = Entry->Scalars[Lane]; 3251 int FoundLane = Entry->findLaneForValue(Scalar); 3252 3253 // Check if the scalar is externally used as an extra arg. 3254 auto ExtI = ExternallyUsedValues.find(Scalar); 3255 if (ExtI != ExternallyUsedValues.end()) { 3256 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " 3257 << Lane << " from " << *Scalar << ".\n"); 3258 ExternalUses.emplace_back(Scalar, nullptr, FoundLane); 3259 } 3260 for (User *U : Scalar->users()) { 3261 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 3262 3263 Instruction *UserInst = dyn_cast<Instruction>(U); 3264 if (!UserInst) 3265 continue; 3266 3267 if (isDeleted(UserInst)) 3268 continue; 3269 3270 // Skip in-tree scalars that become vectors 3271 if (TreeEntry *UseEntry = getTreeEntry(U)) { 3272 Value *UseScalar = UseEntry->Scalars[0]; 3273 // Some in-tree scalars will remain as scalar in vectorized 3274 // instructions. If that is the case, the one in Lane 0 will 3275 // be used. 3276 if (UseScalar != U || 3277 UseEntry->State == TreeEntry::ScatterVectorize || 3278 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 3279 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 3280 << ".\n"); 3281 assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state"); 3282 continue; 3283 } 3284 } 3285 3286 // Ignore users in the user ignore list. 3287 if (is_contained(UserIgnoreList, UserInst)) 3288 continue; 3289 3290 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " 3291 << Lane << " from " << *Scalar << ".\n"); 3292 ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane)); 3293 } 3294 } 3295 } 3296 } 3297 3298 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 3299 ArrayRef<Value *> UserIgnoreLst) { 3300 deleteTree(); 3301 UserIgnoreList = UserIgnoreLst; 3302 if (!allSameType(Roots)) 3303 return; 3304 buildTree_rec(Roots, 0, EdgeInfo()); 3305 } 3306 3307 namespace { 3308 /// Tracks the state we can represent the loads in the given sequence. 3309 enum class LoadsState { Gather, Vectorize, ScatterVectorize }; 3310 } // anonymous namespace 3311 3312 /// Checks if the given array of loads can be represented as a vectorized, 3313 /// scatter or just simple gather. 3314 static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0, 3315 const TargetTransformInfo &TTI, 3316 const DataLayout &DL, ScalarEvolution &SE, 3317 SmallVectorImpl<unsigned> &Order, 3318 SmallVectorImpl<Value *> &PointerOps) { 3319 // Check that a vectorized load would load the same memory as a scalar 3320 // load. For example, we don't want to vectorize loads that are smaller 3321 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 3322 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 3323 // from such a struct, we read/write packed bits disagreeing with the 3324 // unvectorized version. 3325 Type *ScalarTy = VL0->getType(); 3326 3327 if (DL.getTypeSizeInBits(ScalarTy) != DL.getTypeAllocSizeInBits(ScalarTy)) 3328 return LoadsState::Gather; 3329 3330 // Make sure all loads in the bundle are simple - we can't vectorize 3331 // atomic or volatile loads. 3332 PointerOps.clear(); 3333 PointerOps.resize(VL.size()); 3334 auto *POIter = PointerOps.begin(); 3335 for (Value *V : VL) { 3336 auto *L = cast<LoadInst>(V); 3337 if (!L->isSimple()) 3338 return LoadsState::Gather; 3339 *POIter = L->getPointerOperand(); 3340 ++POIter; 3341 } 3342 3343 Order.clear(); 3344 // Check the order of pointer operands. 3345 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, DL, SE, Order)) { 3346 Value *Ptr0; 3347 Value *PtrN; 3348 if (Order.empty()) { 3349 Ptr0 = PointerOps.front(); 3350 PtrN = PointerOps.back(); 3351 } else { 3352 Ptr0 = PointerOps[Order.front()]; 3353 PtrN = PointerOps[Order.back()]; 3354 } 3355 Optional<int> Diff = 3356 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE); 3357 // Check that the sorted loads are consecutive. 3358 if (static_cast<unsigned>(*Diff) == VL.size() - 1) 3359 return LoadsState::Vectorize; 3360 Align CommonAlignment = cast<LoadInst>(VL0)->getAlign(); 3361 for (Value *V : VL) 3362 CommonAlignment = 3363 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 3364 if (TTI.isLegalMaskedGather(FixedVectorType::get(ScalarTy, VL.size()), 3365 CommonAlignment)) 3366 return LoadsState::ScatterVectorize; 3367 } 3368 3369 return LoadsState::Gather; 3370 } 3371 3372 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 3373 const EdgeInfo &UserTreeIdx) { 3374 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 3375 3376 SmallVector<int> ReuseShuffleIndicies; 3377 SmallVector<Value *> UniqueValues; 3378 auto &&TryToFindDuplicates = [&VL, &ReuseShuffleIndicies, &UniqueValues, 3379 &UserTreeIdx, 3380 this](const InstructionsState &S) { 3381 // Check that every instruction appears once in this bundle. 3382 DenseMap<Value *, unsigned> UniquePositions; 3383 for (Value *V : VL) { 3384 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 3385 ReuseShuffleIndicies.emplace_back(isa<UndefValue>(V) ? -1 3386 : Res.first->second); 3387 if (Res.second) 3388 UniqueValues.emplace_back(V); 3389 } 3390 size_t NumUniqueScalarValues = UniqueValues.size(); 3391 if (NumUniqueScalarValues == VL.size()) { 3392 ReuseShuffleIndicies.clear(); 3393 } else { 3394 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n"); 3395 if (NumUniqueScalarValues <= 1 || 3396 !llvm::isPowerOf2_32(NumUniqueScalarValues)) { 3397 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 3398 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3399 return false; 3400 } 3401 VL = UniqueValues; 3402 } 3403 return true; 3404 }; 3405 3406 InstructionsState S = getSameOpcode(VL); 3407 if (Depth == RecursionMaxDepth) { 3408 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 3409 if (TryToFindDuplicates(S)) 3410 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3411 ReuseShuffleIndicies); 3412 return; 3413 } 3414 3415 // Don't handle scalable vectors 3416 if (S.getOpcode() == Instruction::ExtractElement && 3417 isa<ScalableVectorType>( 3418 cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) { 3419 LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n"); 3420 if (TryToFindDuplicates(S)) 3421 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3422 ReuseShuffleIndicies); 3423 return; 3424 } 3425 3426 // Don't handle vectors. 3427 if (S.OpValue->getType()->isVectorTy() && 3428 !isa<InsertElementInst>(S.OpValue)) { 3429 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 3430 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3431 return; 3432 } 3433 3434 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 3435 if (SI->getValueOperand()->getType()->isVectorTy()) { 3436 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 3437 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3438 return; 3439 } 3440 3441 // If all of the operands are identical or constant we have a simple solution. 3442 // If we deal with insert/extract instructions, they all must have constant 3443 // indices, otherwise we should gather them, not try to vectorize. 3444 if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.getOpcode() || 3445 (isa<InsertElementInst, ExtractValueInst, ExtractElementInst>(S.MainOp) && 3446 !all_of(VL, isVectorLikeInstWithConstOps))) { 3447 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 3448 if (TryToFindDuplicates(S)) 3449 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3450 ReuseShuffleIndicies); 3451 return; 3452 } 3453 3454 // We now know that this is a vector of instructions of the same type from 3455 // the same block. 3456 3457 // Don't vectorize ephemeral values. 3458 for (Value *V : VL) { 3459 if (EphValues.count(V)) { 3460 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 3461 << ") is ephemeral.\n"); 3462 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3463 return; 3464 } 3465 } 3466 3467 // Check if this is a duplicate of another entry. 3468 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 3469 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n"); 3470 if (!E->isSame(VL)) { 3471 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 3472 if (TryToFindDuplicates(S)) 3473 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3474 ReuseShuffleIndicies); 3475 return; 3476 } 3477 // Record the reuse of the tree node. FIXME, currently this is only used to 3478 // properly draw the graph rather than for the actual vectorization. 3479 E->UserTreeIndices.push_back(UserTreeIdx); 3480 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue 3481 << ".\n"); 3482 return; 3483 } 3484 3485 // Check that none of the instructions in the bundle are already in the tree. 3486 for (Value *V : VL) { 3487 auto *I = dyn_cast<Instruction>(V); 3488 if (!I) 3489 continue; 3490 if (getTreeEntry(I)) { 3491 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 3492 << ") is already in tree.\n"); 3493 if (TryToFindDuplicates(S)) 3494 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3495 ReuseShuffleIndicies); 3496 return; 3497 } 3498 } 3499 3500 // If any of the scalars is marked as a value that needs to stay scalar, then 3501 // we need to gather the scalars. 3502 // The reduction nodes (stored in UserIgnoreList) also should stay scalar. 3503 for (Value *V : VL) { 3504 if (MustGather.count(V) || is_contained(UserIgnoreList, V)) { 3505 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 3506 if (TryToFindDuplicates(S)) 3507 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3508 ReuseShuffleIndicies); 3509 return; 3510 } 3511 } 3512 3513 // Check that all of the users of the scalars that we want to vectorize are 3514 // schedulable. 3515 auto *VL0 = cast<Instruction>(S.OpValue); 3516 BasicBlock *BB = VL0->getParent(); 3517 3518 if (!DT->isReachableFromEntry(BB)) { 3519 // Don't go into unreachable blocks. They may contain instructions with 3520 // dependency cycles which confuse the final scheduling. 3521 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 3522 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3523 return; 3524 } 3525 3526 // Check that every instruction appears once in this bundle. 3527 if (!TryToFindDuplicates(S)) 3528 return; 3529 3530 auto &BSRef = BlocksSchedules[BB]; 3531 if (!BSRef) 3532 BSRef = std::make_unique<BlockScheduling>(BB); 3533 3534 BlockScheduling &BS = *BSRef.get(); 3535 3536 Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S); 3537 if (!Bundle) { 3538 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 3539 assert((!BS.getScheduleData(VL0) || 3540 !BS.getScheduleData(VL0)->isPartOfBundle()) && 3541 "tryScheduleBundle should cancelScheduling on failure"); 3542 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3543 ReuseShuffleIndicies); 3544 return; 3545 } 3546 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 3547 3548 unsigned ShuffleOrOp = S.isAltShuffle() ? 3549 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 3550 switch (ShuffleOrOp) { 3551 case Instruction::PHI: { 3552 auto *PH = cast<PHINode>(VL0); 3553 3554 // Check for terminator values (e.g. invoke). 3555 for (Value *V : VL) 3556 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { 3557 Instruction *Term = dyn_cast<Instruction>( 3558 cast<PHINode>(V)->getIncomingValueForBlock( 3559 PH->getIncomingBlock(I))); 3560 if (Term && Term->isTerminator()) { 3561 LLVM_DEBUG(dbgs() 3562 << "SLP: Need to swizzle PHINodes (terminator use).\n"); 3563 BS.cancelScheduling(VL, VL0); 3564 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3565 ReuseShuffleIndicies); 3566 return; 3567 } 3568 } 3569 3570 TreeEntry *TE = 3571 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies); 3572 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 3573 3574 // Keeps the reordered operands to avoid code duplication. 3575 SmallVector<ValueList, 2> OperandsVec; 3576 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { 3577 if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) { 3578 ValueList Operands(VL.size(), PoisonValue::get(PH->getType())); 3579 TE->setOperand(I, Operands); 3580 OperandsVec.push_back(Operands); 3581 continue; 3582 } 3583 ValueList Operands; 3584 // Prepare the operand vector. 3585 for (Value *V : VL) 3586 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock( 3587 PH->getIncomingBlock(I))); 3588 TE->setOperand(I, Operands); 3589 OperandsVec.push_back(Operands); 3590 } 3591 for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx) 3592 buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx}); 3593 return; 3594 } 3595 case Instruction::ExtractValue: 3596 case Instruction::ExtractElement: { 3597 OrdersType CurrentOrder; 3598 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder); 3599 if (Reuse) { 3600 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n"); 3601 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3602 ReuseShuffleIndicies); 3603 // This is a special case, as it does not gather, but at the same time 3604 // we are not extending buildTree_rec() towards the operands. 3605 ValueList Op0; 3606 Op0.assign(VL.size(), VL0->getOperand(0)); 3607 VectorizableTree.back()->setOperand(0, Op0); 3608 return; 3609 } 3610 if (!CurrentOrder.empty()) { 3611 LLVM_DEBUG({ 3612 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " 3613 "with order"; 3614 for (unsigned Idx : CurrentOrder) 3615 dbgs() << " " << Idx; 3616 dbgs() << "\n"; 3617 }); 3618 fixupOrderingIndices(CurrentOrder); 3619 // Insert new order with initial value 0, if it does not exist, 3620 // otherwise return the iterator to the existing one. 3621 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3622 ReuseShuffleIndicies, CurrentOrder); 3623 // This is a special case, as it does not gather, but at the same time 3624 // we are not extending buildTree_rec() towards the operands. 3625 ValueList Op0; 3626 Op0.assign(VL.size(), VL0->getOperand(0)); 3627 VectorizableTree.back()->setOperand(0, Op0); 3628 return; 3629 } 3630 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n"); 3631 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3632 ReuseShuffleIndicies); 3633 BS.cancelScheduling(VL, VL0); 3634 return; 3635 } 3636 case Instruction::InsertElement: { 3637 assert(ReuseShuffleIndicies.empty() && "All inserts should be unique"); 3638 3639 // Check that we have a buildvector and not a shuffle of 2 or more 3640 // different vectors. 3641 ValueSet SourceVectors; 3642 int MinIdx = std::numeric_limits<int>::max(); 3643 for (Value *V : VL) { 3644 SourceVectors.insert(cast<Instruction>(V)->getOperand(0)); 3645 Optional<int> Idx = *getInsertIndex(V, 0); 3646 if (!Idx || *Idx == UndefMaskElem) 3647 continue; 3648 MinIdx = std::min(MinIdx, *Idx); 3649 } 3650 3651 if (count_if(VL, [&SourceVectors](Value *V) { 3652 return !SourceVectors.contains(V); 3653 }) >= 2) { 3654 // Found 2nd source vector - cancel. 3655 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with " 3656 "different source vectors.\n"); 3657 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3658 BS.cancelScheduling(VL, VL0); 3659 return; 3660 } 3661 3662 auto OrdCompare = [](const std::pair<int, int> &P1, 3663 const std::pair<int, int> &P2) { 3664 return P1.first > P2.first; 3665 }; 3666 PriorityQueue<std::pair<int, int>, SmallVector<std::pair<int, int>>, 3667 decltype(OrdCompare)> 3668 Indices(OrdCompare); 3669 for (int I = 0, E = VL.size(); I < E; ++I) { 3670 Optional<int> Idx = *getInsertIndex(VL[I], 0); 3671 if (!Idx || *Idx == UndefMaskElem) 3672 continue; 3673 Indices.emplace(*Idx, I); 3674 } 3675 OrdersType CurrentOrder(VL.size(), VL.size()); 3676 bool IsIdentity = true; 3677 for (int I = 0, E = VL.size(); I < E; ++I) { 3678 CurrentOrder[Indices.top().second] = I; 3679 IsIdentity &= Indices.top().second == I; 3680 Indices.pop(); 3681 } 3682 if (IsIdentity) 3683 CurrentOrder.clear(); 3684 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3685 None, CurrentOrder); 3686 LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n"); 3687 3688 constexpr int NumOps = 2; 3689 ValueList VectorOperands[NumOps]; 3690 for (int I = 0; I < NumOps; ++I) { 3691 for (Value *V : VL) 3692 VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I)); 3693 3694 TE->setOperand(I, VectorOperands[I]); 3695 } 3696 buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, NumOps - 1}); 3697 return; 3698 } 3699 case Instruction::Load: { 3700 // Check that a vectorized load would load the same memory as a scalar 3701 // load. For example, we don't want to vectorize loads that are smaller 3702 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 3703 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 3704 // from such a struct, we read/write packed bits disagreeing with the 3705 // unvectorized version. 3706 SmallVector<Value *> PointerOps; 3707 OrdersType CurrentOrder; 3708 TreeEntry *TE = nullptr; 3709 switch (canVectorizeLoads(VL, VL0, *TTI, *DL, *SE, CurrentOrder, 3710 PointerOps)) { 3711 case LoadsState::Vectorize: 3712 if (CurrentOrder.empty()) { 3713 // Original loads are consecutive and does not require reordering. 3714 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3715 ReuseShuffleIndicies); 3716 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 3717 } else { 3718 fixupOrderingIndices(CurrentOrder); 3719 // Need to reorder. 3720 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3721 ReuseShuffleIndicies, CurrentOrder); 3722 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n"); 3723 } 3724 TE->setOperandsInOrder(); 3725 break; 3726 case LoadsState::ScatterVectorize: 3727 // Vectorizing non-consecutive loads with `llvm.masked.gather`. 3728 TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S, 3729 UserTreeIdx, ReuseShuffleIndicies); 3730 TE->setOperandsInOrder(); 3731 buildTree_rec(PointerOps, Depth + 1, {TE, 0}); 3732 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n"); 3733 break; 3734 case LoadsState::Gather: 3735 BS.cancelScheduling(VL, VL0); 3736 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3737 ReuseShuffleIndicies); 3738 #ifndef NDEBUG 3739 Type *ScalarTy = VL0->getType(); 3740 if (DL->getTypeSizeInBits(ScalarTy) != 3741 DL->getTypeAllocSizeInBits(ScalarTy)) 3742 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 3743 else if (any_of(VL, [](Value *V) { 3744 return !cast<LoadInst>(V)->isSimple(); 3745 })) 3746 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 3747 else 3748 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 3749 #endif // NDEBUG 3750 break; 3751 } 3752 return; 3753 } 3754 case Instruction::ZExt: 3755 case Instruction::SExt: 3756 case Instruction::FPToUI: 3757 case Instruction::FPToSI: 3758 case Instruction::FPExt: 3759 case Instruction::PtrToInt: 3760 case Instruction::IntToPtr: 3761 case Instruction::SIToFP: 3762 case Instruction::UIToFP: 3763 case Instruction::Trunc: 3764 case Instruction::FPTrunc: 3765 case Instruction::BitCast: { 3766 Type *SrcTy = VL0->getOperand(0)->getType(); 3767 for (Value *V : VL) { 3768 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType(); 3769 if (Ty != SrcTy || !isValidElementType(Ty)) { 3770 BS.cancelScheduling(VL, VL0); 3771 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3772 ReuseShuffleIndicies); 3773 LLVM_DEBUG(dbgs() 3774 << "SLP: Gathering casts with different src types.\n"); 3775 return; 3776 } 3777 } 3778 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3779 ReuseShuffleIndicies); 3780 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 3781 3782 TE->setOperandsInOrder(); 3783 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 3784 ValueList Operands; 3785 // Prepare the operand vector. 3786 for (Value *V : VL) 3787 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3788 3789 buildTree_rec(Operands, Depth + 1, {TE, i}); 3790 } 3791 return; 3792 } 3793 case Instruction::ICmp: 3794 case Instruction::FCmp: { 3795 // Check that all of the compares have the same predicate. 3796 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 3797 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0); 3798 Type *ComparedTy = VL0->getOperand(0)->getType(); 3799 for (Value *V : VL) { 3800 CmpInst *Cmp = cast<CmpInst>(V); 3801 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) || 3802 Cmp->getOperand(0)->getType() != ComparedTy) { 3803 BS.cancelScheduling(VL, VL0); 3804 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3805 ReuseShuffleIndicies); 3806 LLVM_DEBUG(dbgs() 3807 << "SLP: Gathering cmp with different predicate.\n"); 3808 return; 3809 } 3810 } 3811 3812 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3813 ReuseShuffleIndicies); 3814 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 3815 3816 ValueList Left, Right; 3817 if (cast<CmpInst>(VL0)->isCommutative()) { 3818 // Commutative predicate - collect + sort operands of the instructions 3819 // so that each side is more likely to have the same opcode. 3820 assert(P0 == SwapP0 && "Commutative Predicate mismatch"); 3821 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 3822 } else { 3823 // Collect operands - commute if it uses the swapped predicate. 3824 for (Value *V : VL) { 3825 auto *Cmp = cast<CmpInst>(V); 3826 Value *LHS = Cmp->getOperand(0); 3827 Value *RHS = Cmp->getOperand(1); 3828 if (Cmp->getPredicate() != P0) 3829 std::swap(LHS, RHS); 3830 Left.push_back(LHS); 3831 Right.push_back(RHS); 3832 } 3833 } 3834 TE->setOperand(0, Left); 3835 TE->setOperand(1, Right); 3836 buildTree_rec(Left, Depth + 1, {TE, 0}); 3837 buildTree_rec(Right, Depth + 1, {TE, 1}); 3838 return; 3839 } 3840 case Instruction::Select: 3841 case Instruction::FNeg: 3842 case Instruction::Add: 3843 case Instruction::FAdd: 3844 case Instruction::Sub: 3845 case Instruction::FSub: 3846 case Instruction::Mul: 3847 case Instruction::FMul: 3848 case Instruction::UDiv: 3849 case Instruction::SDiv: 3850 case Instruction::FDiv: 3851 case Instruction::URem: 3852 case Instruction::SRem: 3853 case Instruction::FRem: 3854 case Instruction::Shl: 3855 case Instruction::LShr: 3856 case Instruction::AShr: 3857 case Instruction::And: 3858 case Instruction::Or: 3859 case Instruction::Xor: { 3860 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3861 ReuseShuffleIndicies); 3862 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n"); 3863 3864 // Sort operands of the instructions so that each side is more likely to 3865 // have the same opcode. 3866 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 3867 ValueList Left, Right; 3868 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 3869 TE->setOperand(0, Left); 3870 TE->setOperand(1, Right); 3871 buildTree_rec(Left, Depth + 1, {TE, 0}); 3872 buildTree_rec(Right, Depth + 1, {TE, 1}); 3873 return; 3874 } 3875 3876 TE->setOperandsInOrder(); 3877 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 3878 ValueList Operands; 3879 // Prepare the operand vector. 3880 for (Value *V : VL) 3881 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3882 3883 buildTree_rec(Operands, Depth + 1, {TE, i}); 3884 } 3885 return; 3886 } 3887 case Instruction::GetElementPtr: { 3888 // We don't combine GEPs with complicated (nested) indexing. 3889 for (Value *V : VL) { 3890 if (cast<Instruction>(V)->getNumOperands() != 2) { 3891 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 3892 BS.cancelScheduling(VL, VL0); 3893 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3894 ReuseShuffleIndicies); 3895 return; 3896 } 3897 } 3898 3899 // We can't combine several GEPs into one vector if they operate on 3900 // different types. 3901 Type *Ty0 = VL0->getOperand(0)->getType(); 3902 for (Value *V : VL) { 3903 Type *CurTy = cast<Instruction>(V)->getOperand(0)->getType(); 3904 if (Ty0 != CurTy) { 3905 LLVM_DEBUG(dbgs() 3906 << "SLP: not-vectorizable GEP (different types).\n"); 3907 BS.cancelScheduling(VL, VL0); 3908 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3909 ReuseShuffleIndicies); 3910 return; 3911 } 3912 } 3913 3914 // We don't combine GEPs with non-constant indexes. 3915 Type *Ty1 = VL0->getOperand(1)->getType(); 3916 for (Value *V : VL) { 3917 auto Op = cast<Instruction>(V)->getOperand(1); 3918 if (!isa<ConstantInt>(Op) || 3919 (Op->getType() != Ty1 && 3920 Op->getType()->getScalarSizeInBits() > 3921 DL->getIndexSizeInBits( 3922 V->getType()->getPointerAddressSpace()))) { 3923 LLVM_DEBUG(dbgs() 3924 << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 3925 BS.cancelScheduling(VL, VL0); 3926 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3927 ReuseShuffleIndicies); 3928 return; 3929 } 3930 } 3931 3932 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3933 ReuseShuffleIndicies); 3934 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 3935 SmallVector<ValueList, 2> Operands(2); 3936 // Prepare the operand vector for pointer operands. 3937 for (Value *V : VL) 3938 Operands.front().push_back( 3939 cast<GetElementPtrInst>(V)->getPointerOperand()); 3940 TE->setOperand(0, Operands.front()); 3941 // Need to cast all indices to the same type before vectorization to 3942 // avoid crash. 3943 // Required to be able to find correct matches between different gather 3944 // nodes and reuse the vectorized values rather than trying to gather them 3945 // again. 3946 int IndexIdx = 1; 3947 Type *VL0Ty = VL0->getOperand(IndexIdx)->getType(); 3948 Type *Ty = all_of(VL, 3949 [VL0Ty, IndexIdx](Value *V) { 3950 return VL0Ty == cast<GetElementPtrInst>(V) 3951 ->getOperand(IndexIdx) 3952 ->getType(); 3953 }) 3954 ? VL0Ty 3955 : DL->getIndexType(cast<GetElementPtrInst>(VL0) 3956 ->getPointerOperandType() 3957 ->getScalarType()); 3958 // Prepare the operand vector. 3959 for (Value *V : VL) { 3960 auto *Op = cast<Instruction>(V)->getOperand(IndexIdx); 3961 auto *CI = cast<ConstantInt>(Op); 3962 Operands.back().push_back(ConstantExpr::getIntegerCast( 3963 CI, Ty, CI->getValue().isSignBitSet())); 3964 } 3965 TE->setOperand(IndexIdx, Operands.back()); 3966 3967 for (unsigned I = 0, Ops = Operands.size(); I < Ops; ++I) 3968 buildTree_rec(Operands[I], Depth + 1, {TE, I}); 3969 return; 3970 } 3971 case Instruction::Store: { 3972 // Check if the stores are consecutive or if we need to swizzle them. 3973 llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType(); 3974 // Avoid types that are padded when being allocated as scalars, while 3975 // being packed together in a vector (such as i1). 3976 if (DL->getTypeSizeInBits(ScalarTy) != 3977 DL->getTypeAllocSizeInBits(ScalarTy)) { 3978 BS.cancelScheduling(VL, VL0); 3979 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3980 ReuseShuffleIndicies); 3981 LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n"); 3982 return; 3983 } 3984 // Make sure all stores in the bundle are simple - we can't vectorize 3985 // atomic or volatile stores. 3986 SmallVector<Value *, 4> PointerOps(VL.size()); 3987 ValueList Operands(VL.size()); 3988 auto POIter = PointerOps.begin(); 3989 auto OIter = Operands.begin(); 3990 for (Value *V : VL) { 3991 auto *SI = cast<StoreInst>(V); 3992 if (!SI->isSimple()) { 3993 BS.cancelScheduling(VL, VL0); 3994 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3995 ReuseShuffleIndicies); 3996 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n"); 3997 return; 3998 } 3999 *POIter = SI->getPointerOperand(); 4000 *OIter = SI->getValueOperand(); 4001 ++POIter; 4002 ++OIter; 4003 } 4004 4005 OrdersType CurrentOrder; 4006 // Check the order of pointer operands. 4007 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) { 4008 Value *Ptr0; 4009 Value *PtrN; 4010 if (CurrentOrder.empty()) { 4011 Ptr0 = PointerOps.front(); 4012 PtrN = PointerOps.back(); 4013 } else { 4014 Ptr0 = PointerOps[CurrentOrder.front()]; 4015 PtrN = PointerOps[CurrentOrder.back()]; 4016 } 4017 Optional<int> Dist = 4018 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE); 4019 // Check that the sorted pointer operands are consecutive. 4020 if (static_cast<unsigned>(*Dist) == VL.size() - 1) { 4021 if (CurrentOrder.empty()) { 4022 // Original stores are consecutive and does not require reordering. 4023 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, 4024 UserTreeIdx, ReuseShuffleIndicies); 4025 TE->setOperandsInOrder(); 4026 buildTree_rec(Operands, Depth + 1, {TE, 0}); 4027 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 4028 } else { 4029 fixupOrderingIndices(CurrentOrder); 4030 TreeEntry *TE = 4031 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 4032 ReuseShuffleIndicies, CurrentOrder); 4033 TE->setOperandsInOrder(); 4034 buildTree_rec(Operands, Depth + 1, {TE, 0}); 4035 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n"); 4036 } 4037 return; 4038 } 4039 } 4040 4041 BS.cancelScheduling(VL, VL0); 4042 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4043 ReuseShuffleIndicies); 4044 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 4045 return; 4046 } 4047 case Instruction::Call: { 4048 // Check if the calls are all to the same vectorizable intrinsic or 4049 // library function. 4050 CallInst *CI = cast<CallInst>(VL0); 4051 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4052 4053 VFShape Shape = VFShape::get( 4054 *CI, ElementCount::getFixed(static_cast<unsigned int>(VL.size())), 4055 false /*HasGlobalPred*/); 4056 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 4057 4058 if (!VecFunc && !isTriviallyVectorizable(ID)) { 4059 BS.cancelScheduling(VL, VL0); 4060 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4061 ReuseShuffleIndicies); 4062 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 4063 return; 4064 } 4065 Function *F = CI->getCalledFunction(); 4066 unsigned NumArgs = CI->arg_size(); 4067 SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr); 4068 for (unsigned j = 0; j != NumArgs; ++j) 4069 if (hasVectorInstrinsicScalarOpd(ID, j)) 4070 ScalarArgs[j] = CI->getArgOperand(j); 4071 for (Value *V : VL) { 4072 CallInst *CI2 = dyn_cast<CallInst>(V); 4073 if (!CI2 || CI2->getCalledFunction() != F || 4074 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 4075 (VecFunc && 4076 VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) || 4077 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 4078 BS.cancelScheduling(VL, VL0); 4079 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4080 ReuseShuffleIndicies); 4081 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V 4082 << "\n"); 4083 return; 4084 } 4085 // Some intrinsics have scalar arguments and should be same in order for 4086 // them to be vectorized. 4087 for (unsigned j = 0; j != NumArgs; ++j) { 4088 if (hasVectorInstrinsicScalarOpd(ID, j)) { 4089 Value *A1J = CI2->getArgOperand(j); 4090 if (ScalarArgs[j] != A1J) { 4091 BS.cancelScheduling(VL, VL0); 4092 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4093 ReuseShuffleIndicies); 4094 LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 4095 << " argument " << ScalarArgs[j] << "!=" << A1J 4096 << "\n"); 4097 return; 4098 } 4099 } 4100 } 4101 // Verify that the bundle operands are identical between the two calls. 4102 if (CI->hasOperandBundles() && 4103 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 4104 CI->op_begin() + CI->getBundleOperandsEndIndex(), 4105 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 4106 BS.cancelScheduling(VL, VL0); 4107 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4108 ReuseShuffleIndicies); 4109 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" 4110 << *CI << "!=" << *V << '\n'); 4111 return; 4112 } 4113 } 4114 4115 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 4116 ReuseShuffleIndicies); 4117 TE->setOperandsInOrder(); 4118 for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) { 4119 // For scalar operands no need to to create an entry since no need to 4120 // vectorize it. 4121 if (hasVectorInstrinsicScalarOpd(ID, i)) 4122 continue; 4123 ValueList Operands; 4124 // Prepare the operand vector. 4125 for (Value *V : VL) { 4126 auto *CI2 = cast<CallInst>(V); 4127 Operands.push_back(CI2->getArgOperand(i)); 4128 } 4129 buildTree_rec(Operands, Depth + 1, {TE, i}); 4130 } 4131 return; 4132 } 4133 case Instruction::ShuffleVector: { 4134 // If this is not an alternate sequence of opcode like add-sub 4135 // then do not vectorize this instruction. 4136 if (!S.isAltShuffle()) { 4137 BS.cancelScheduling(VL, VL0); 4138 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4139 ReuseShuffleIndicies); 4140 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 4141 return; 4142 } 4143 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 4144 ReuseShuffleIndicies); 4145 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 4146 4147 // Reorder operands if reordering would enable vectorization. 4148 if (isa<BinaryOperator>(VL0)) { 4149 ValueList Left, Right; 4150 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 4151 TE->setOperand(0, Left); 4152 TE->setOperand(1, Right); 4153 buildTree_rec(Left, Depth + 1, {TE, 0}); 4154 buildTree_rec(Right, Depth + 1, {TE, 1}); 4155 return; 4156 } 4157 4158 TE->setOperandsInOrder(); 4159 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 4160 ValueList Operands; 4161 // Prepare the operand vector. 4162 for (Value *V : VL) 4163 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 4164 4165 buildTree_rec(Operands, Depth + 1, {TE, i}); 4166 } 4167 return; 4168 } 4169 default: 4170 BS.cancelScheduling(VL, VL0); 4171 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4172 ReuseShuffleIndicies); 4173 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 4174 return; 4175 } 4176 } 4177 4178 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { 4179 unsigned N = 1; 4180 Type *EltTy = T; 4181 4182 while (isa<StructType>(EltTy) || isa<ArrayType>(EltTy) || 4183 isa<VectorType>(EltTy)) { 4184 if (auto *ST = dyn_cast<StructType>(EltTy)) { 4185 // Check that struct is homogeneous. 4186 for (const auto *Ty : ST->elements()) 4187 if (Ty != *ST->element_begin()) 4188 return 0; 4189 N *= ST->getNumElements(); 4190 EltTy = *ST->element_begin(); 4191 } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) { 4192 N *= AT->getNumElements(); 4193 EltTy = AT->getElementType(); 4194 } else { 4195 auto *VT = cast<FixedVectorType>(EltTy); 4196 N *= VT->getNumElements(); 4197 EltTy = VT->getElementType(); 4198 } 4199 } 4200 4201 if (!isValidElementType(EltTy)) 4202 return 0; 4203 uint64_t VTSize = DL.getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N)); 4204 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) 4205 return 0; 4206 return N; 4207 } 4208 4209 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 4210 SmallVectorImpl<unsigned> &CurrentOrder) const { 4211 const auto *It = find_if(VL, [](Value *V) { 4212 return isa<ExtractElementInst, ExtractValueInst>(V); 4213 }); 4214 assert(It != VL.end() && "Expected at least one extract instruction."); 4215 auto *E0 = cast<Instruction>(*It); 4216 assert(all_of(VL, 4217 [](Value *V) { 4218 return isa<UndefValue, ExtractElementInst, ExtractValueInst>( 4219 V); 4220 }) && 4221 "Invalid opcode"); 4222 // Check if all of the extracts come from the same vector and from the 4223 // correct offset. 4224 Value *Vec = E0->getOperand(0); 4225 4226 CurrentOrder.clear(); 4227 4228 // We have to extract from a vector/aggregate with the same number of elements. 4229 unsigned NElts; 4230 if (E0->getOpcode() == Instruction::ExtractValue) { 4231 const DataLayout &DL = E0->getModule()->getDataLayout(); 4232 NElts = canMapToVector(Vec->getType(), DL); 4233 if (!NElts) 4234 return false; 4235 // Check if load can be rewritten as load of vector. 4236 LoadInst *LI = dyn_cast<LoadInst>(Vec); 4237 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 4238 return false; 4239 } else { 4240 NElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); 4241 } 4242 4243 if (NElts != VL.size()) 4244 return false; 4245 4246 // Check that all of the indices extract from the correct offset. 4247 bool ShouldKeepOrder = true; 4248 unsigned E = VL.size(); 4249 // Assign to all items the initial value E + 1 so we can check if the extract 4250 // instruction index was used already. 4251 // Also, later we can check that all the indices are used and we have a 4252 // consecutive access in the extract instructions, by checking that no 4253 // element of CurrentOrder still has value E + 1. 4254 CurrentOrder.assign(E, E); 4255 unsigned I = 0; 4256 for (; I < E; ++I) { 4257 auto *Inst = dyn_cast<Instruction>(VL[I]); 4258 if (!Inst) 4259 continue; 4260 if (Inst->getOperand(0) != Vec) 4261 break; 4262 if (auto *EE = dyn_cast<ExtractElementInst>(Inst)) 4263 if (isa<UndefValue>(EE->getIndexOperand())) 4264 continue; 4265 Optional<unsigned> Idx = getExtractIndex(Inst); 4266 if (!Idx) 4267 break; 4268 const unsigned ExtIdx = *Idx; 4269 if (ExtIdx != I) { 4270 if (ExtIdx >= E || CurrentOrder[ExtIdx] != E) 4271 break; 4272 ShouldKeepOrder = false; 4273 CurrentOrder[ExtIdx] = I; 4274 } else { 4275 if (CurrentOrder[I] != E) 4276 break; 4277 CurrentOrder[I] = I; 4278 } 4279 } 4280 if (I < E) { 4281 CurrentOrder.clear(); 4282 return false; 4283 } 4284 4285 return ShouldKeepOrder; 4286 } 4287 4288 bool BoUpSLP::areAllUsersVectorized(Instruction *I, 4289 ArrayRef<Value *> VectorizedVals) const { 4290 return (I->hasOneUse() && is_contained(VectorizedVals, I)) || 4291 all_of(I->users(), [this](User *U) { 4292 return ScalarToTreeEntry.count(U) > 0 || MustGather.contains(U); 4293 }); 4294 } 4295 4296 static std::pair<InstructionCost, InstructionCost> 4297 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy, 4298 TargetTransformInfo *TTI, TargetLibraryInfo *TLI) { 4299 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4300 4301 // Calculate the cost of the scalar and vector calls. 4302 SmallVector<Type *, 4> VecTys; 4303 for (Use &Arg : CI->args()) 4304 VecTys.push_back( 4305 FixedVectorType::get(Arg->getType(), VecTy->getNumElements())); 4306 FastMathFlags FMF; 4307 if (auto *FPCI = dyn_cast<FPMathOperator>(CI)) 4308 FMF = FPCI->getFastMathFlags(); 4309 SmallVector<const Value *> Arguments(CI->args()); 4310 IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF, 4311 dyn_cast<IntrinsicInst>(CI)); 4312 auto IntrinsicCost = 4313 TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput); 4314 4315 auto Shape = VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>( 4316 VecTy->getNumElements())), 4317 false /*HasGlobalPred*/); 4318 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 4319 auto LibCost = IntrinsicCost; 4320 if (!CI->isNoBuiltin() && VecFunc) { 4321 // Calculate the cost of the vector library call. 4322 // If the corresponding vector call is cheaper, return its cost. 4323 LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys, 4324 TTI::TCK_RecipThroughput); 4325 } 4326 return {IntrinsicCost, LibCost}; 4327 } 4328 4329 /// Compute the cost of creating a vector of type \p VecTy containing the 4330 /// extracted values from \p VL. 4331 static InstructionCost 4332 computeExtractCost(ArrayRef<Value *> VL, FixedVectorType *VecTy, 4333 TargetTransformInfo::ShuffleKind ShuffleKind, 4334 ArrayRef<int> Mask, TargetTransformInfo &TTI) { 4335 unsigned NumOfParts = TTI.getNumberOfParts(VecTy); 4336 4337 if (ShuffleKind != TargetTransformInfo::SK_PermuteSingleSrc || !NumOfParts || 4338 VecTy->getNumElements() < NumOfParts) 4339 return TTI.getShuffleCost(ShuffleKind, VecTy, Mask); 4340 4341 bool AllConsecutive = true; 4342 unsigned EltsPerVector = VecTy->getNumElements() / NumOfParts; 4343 unsigned Idx = -1; 4344 InstructionCost Cost = 0; 4345 4346 // Process extracts in blocks of EltsPerVector to check if the source vector 4347 // operand can be re-used directly. If not, add the cost of creating a shuffle 4348 // to extract the values into a vector register. 4349 for (auto *V : VL) { 4350 ++Idx; 4351 4352 // Need to exclude undefs from analysis. 4353 if (isa<UndefValue>(V) || Mask[Idx] == UndefMaskElem) 4354 continue; 4355 4356 // Reached the start of a new vector registers. 4357 if (Idx % EltsPerVector == 0) { 4358 AllConsecutive = true; 4359 continue; 4360 } 4361 4362 // Check all extracts for a vector register on the target directly 4363 // extract values in order. 4364 unsigned CurrentIdx = *getExtractIndex(cast<Instruction>(V)); 4365 if (!isa<UndefValue>(VL[Idx - 1]) && Mask[Idx - 1] != UndefMaskElem) { 4366 unsigned PrevIdx = *getExtractIndex(cast<Instruction>(VL[Idx - 1])); 4367 AllConsecutive &= PrevIdx + 1 == CurrentIdx && 4368 CurrentIdx % EltsPerVector == Idx % EltsPerVector; 4369 } 4370 4371 if (AllConsecutive) 4372 continue; 4373 4374 // Skip all indices, except for the last index per vector block. 4375 if ((Idx + 1) % EltsPerVector != 0 && Idx + 1 != VL.size()) 4376 continue; 4377 4378 // If we have a series of extracts which are not consecutive and hence 4379 // cannot re-use the source vector register directly, compute the shuffle 4380 // cost to extract the a vector with EltsPerVector elements. 4381 Cost += TTI.getShuffleCost( 4382 TargetTransformInfo::SK_PermuteSingleSrc, 4383 FixedVectorType::get(VecTy->getElementType(), EltsPerVector)); 4384 } 4385 return Cost; 4386 } 4387 4388 /// Build shuffle mask for shuffle graph entries and lists of main and alternate 4389 /// operations operands. 4390 static void 4391 buildSuffleEntryMask(ArrayRef<Value *> VL, ArrayRef<unsigned> ReorderIndices, 4392 ArrayRef<int> ReusesIndices, 4393 const function_ref<bool(Instruction *)> IsAltOp, 4394 SmallVectorImpl<int> &Mask, 4395 SmallVectorImpl<Value *> *OpScalars = nullptr, 4396 SmallVectorImpl<Value *> *AltScalars = nullptr) { 4397 unsigned Sz = VL.size(); 4398 Mask.assign(Sz, UndefMaskElem); 4399 SmallVector<int> OrderMask; 4400 if (!ReorderIndices.empty()) 4401 inversePermutation(ReorderIndices, OrderMask); 4402 for (unsigned I = 0; I < Sz; ++I) { 4403 unsigned Idx = I; 4404 if (!ReorderIndices.empty()) 4405 Idx = OrderMask[I]; 4406 auto *OpInst = cast<Instruction>(VL[Idx]); 4407 if (IsAltOp(OpInst)) { 4408 Mask[I] = Sz + Idx; 4409 if (AltScalars) 4410 AltScalars->push_back(OpInst); 4411 } else { 4412 Mask[I] = Idx; 4413 if (OpScalars) 4414 OpScalars->push_back(OpInst); 4415 } 4416 } 4417 if (!ReusesIndices.empty()) { 4418 SmallVector<int> NewMask(ReusesIndices.size(), UndefMaskElem); 4419 transform(ReusesIndices, NewMask.begin(), [&Mask](int Idx) { 4420 return Idx != UndefMaskElem ? Mask[Idx] : UndefMaskElem; 4421 }); 4422 Mask.swap(NewMask); 4423 } 4424 } 4425 4426 InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E, 4427 ArrayRef<Value *> VectorizedVals) { 4428 ArrayRef<Value*> VL = E->Scalars; 4429 4430 Type *ScalarTy = VL[0]->getType(); 4431 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 4432 ScalarTy = SI->getValueOperand()->getType(); 4433 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0])) 4434 ScalarTy = CI->getOperand(0)->getType(); 4435 else if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 4436 ScalarTy = IE->getOperand(1)->getType(); 4437 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 4438 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 4439 4440 // If we have computed a smaller type for the expression, update VecTy so 4441 // that the costs will be accurate. 4442 if (MinBWs.count(VL[0])) 4443 VecTy = FixedVectorType::get( 4444 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size()); 4445 unsigned EntryVF = E->getVectorFactor(); 4446 auto *FinalVecTy = FixedVectorType::get(VecTy->getElementType(), EntryVF); 4447 4448 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 4449 // FIXME: it tries to fix a problem with MSVC buildbots. 4450 TargetTransformInfo &TTIRef = *TTI; 4451 auto &&AdjustExtractsCost = [this, &TTIRef, CostKind, VL, VecTy, 4452 VectorizedVals, E](InstructionCost &Cost) { 4453 DenseMap<Value *, int> ExtractVectorsTys; 4454 SmallPtrSet<Value *, 4> CheckedExtracts; 4455 for (auto *V : VL) { 4456 if (isa<UndefValue>(V)) 4457 continue; 4458 // If all users of instruction are going to be vectorized and this 4459 // instruction itself is not going to be vectorized, consider this 4460 // instruction as dead and remove its cost from the final cost of the 4461 // vectorized tree. 4462 // Also, avoid adjusting the cost for extractelements with multiple uses 4463 // in different graph entries. 4464 const TreeEntry *VE = getTreeEntry(V); 4465 if (!CheckedExtracts.insert(V).second || 4466 !areAllUsersVectorized(cast<Instruction>(V), VectorizedVals) || 4467 (VE && VE != E)) 4468 continue; 4469 auto *EE = cast<ExtractElementInst>(V); 4470 Optional<unsigned> EEIdx = getExtractIndex(EE); 4471 if (!EEIdx) 4472 continue; 4473 unsigned Idx = *EEIdx; 4474 if (TTIRef.getNumberOfParts(VecTy) != 4475 TTIRef.getNumberOfParts(EE->getVectorOperandType())) { 4476 auto It = 4477 ExtractVectorsTys.try_emplace(EE->getVectorOperand(), Idx).first; 4478 It->getSecond() = std::min<int>(It->second, Idx); 4479 } 4480 // Take credit for instruction that will become dead. 4481 if (EE->hasOneUse()) { 4482 Instruction *Ext = EE->user_back(); 4483 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 4484 all_of(Ext->users(), 4485 [](User *U) { return isa<GetElementPtrInst>(U); })) { 4486 // Use getExtractWithExtendCost() to calculate the cost of 4487 // extractelement/ext pair. 4488 Cost -= 4489 TTIRef.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(), 4490 EE->getVectorOperandType(), Idx); 4491 // Add back the cost of s|zext which is subtracted separately. 4492 Cost += TTIRef.getCastInstrCost( 4493 Ext->getOpcode(), Ext->getType(), EE->getType(), 4494 TTI::getCastContextHint(Ext), CostKind, Ext); 4495 continue; 4496 } 4497 } 4498 Cost -= TTIRef.getVectorInstrCost(Instruction::ExtractElement, 4499 EE->getVectorOperandType(), Idx); 4500 } 4501 // Add a cost for subvector extracts/inserts if required. 4502 for (const auto &Data : ExtractVectorsTys) { 4503 auto *EEVTy = cast<FixedVectorType>(Data.first->getType()); 4504 unsigned NumElts = VecTy->getNumElements(); 4505 if (Data.second % NumElts == 0) 4506 continue; 4507 if (TTIRef.getNumberOfParts(EEVTy) > TTIRef.getNumberOfParts(VecTy)) { 4508 unsigned Idx = (Data.second / NumElts) * NumElts; 4509 unsigned EENumElts = EEVTy->getNumElements(); 4510 if (Idx + NumElts <= EENumElts) { 4511 Cost += 4512 TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 4513 EEVTy, None, Idx, VecTy); 4514 } else { 4515 // Need to round up the subvector type vectorization factor to avoid a 4516 // crash in cost model functions. Make SubVT so that Idx + VF of SubVT 4517 // <= EENumElts. 4518 auto *SubVT = 4519 FixedVectorType::get(VecTy->getElementType(), EENumElts - Idx); 4520 Cost += 4521 TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 4522 EEVTy, None, Idx, SubVT); 4523 } 4524 } else { 4525 Cost += TTIRef.getShuffleCost(TargetTransformInfo::SK_InsertSubvector, 4526 VecTy, None, 0, EEVTy); 4527 } 4528 } 4529 }; 4530 if (E->State == TreeEntry::NeedToGather) { 4531 if (allConstant(VL)) 4532 return 0; 4533 if (isa<InsertElementInst>(VL[0])) 4534 return InstructionCost::getInvalid(); 4535 SmallVector<int> Mask; 4536 SmallVector<const TreeEntry *> Entries; 4537 Optional<TargetTransformInfo::ShuffleKind> Shuffle = 4538 isGatherShuffledEntry(E, Mask, Entries); 4539 if (Shuffle.hasValue()) { 4540 InstructionCost GatherCost = 0; 4541 if (ShuffleVectorInst::isIdentityMask(Mask)) { 4542 // Perfect match in the graph, will reuse the previously vectorized 4543 // node. Cost is 0. 4544 LLVM_DEBUG( 4545 dbgs() 4546 << "SLP: perfect diamond match for gather bundle that starts with " 4547 << *VL.front() << ".\n"); 4548 if (NeedToShuffleReuses) 4549 GatherCost = 4550 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, 4551 FinalVecTy, E->ReuseShuffleIndices); 4552 } else { 4553 LLVM_DEBUG(dbgs() << "SLP: shuffled " << Entries.size() 4554 << " entries for bundle that starts with " 4555 << *VL.front() << ".\n"); 4556 // Detected that instead of gather we can emit a shuffle of single/two 4557 // previously vectorized nodes. Add the cost of the permutation rather 4558 // than gather. 4559 ::addMask(Mask, E->ReuseShuffleIndices); 4560 GatherCost = TTI->getShuffleCost(*Shuffle, FinalVecTy, Mask); 4561 } 4562 return GatherCost; 4563 } 4564 if ((E->getOpcode() == Instruction::ExtractElement || 4565 all_of(E->Scalars, 4566 [](Value *V) { 4567 return isa<ExtractElementInst, UndefValue>(V); 4568 })) && 4569 allSameType(VL)) { 4570 // Check that gather of extractelements can be represented as just a 4571 // shuffle of a single/two vectors the scalars are extracted from. 4572 SmallVector<int> Mask; 4573 Optional<TargetTransformInfo::ShuffleKind> ShuffleKind = 4574 isFixedVectorShuffle(VL, Mask); 4575 if (ShuffleKind.hasValue()) { 4576 // Found the bunch of extractelement instructions that must be gathered 4577 // into a vector and can be represented as a permutation elements in a 4578 // single input vector or of 2 input vectors. 4579 InstructionCost Cost = 4580 computeExtractCost(VL, VecTy, *ShuffleKind, Mask, *TTI); 4581 AdjustExtractsCost(Cost); 4582 if (NeedToShuffleReuses) 4583 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, 4584 FinalVecTy, E->ReuseShuffleIndices); 4585 return Cost; 4586 } 4587 } 4588 if (isSplat(VL)) { 4589 // Found the broadcasting of the single scalar, calculate the cost as the 4590 // broadcast. 4591 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy); 4592 } 4593 InstructionCost ReuseShuffleCost = 0; 4594 if (NeedToShuffleReuses) 4595 ReuseShuffleCost = TTI->getShuffleCost( 4596 TTI::SK_PermuteSingleSrc, FinalVecTy, E->ReuseShuffleIndices); 4597 // Improve gather cost for gather of loads, if we can group some of the 4598 // loads into vector loads. 4599 if (VL.size() > 2 && E->getOpcode() == Instruction::Load && 4600 !E->isAltShuffle()) { 4601 BoUpSLP::ValueSet VectorizedLoads; 4602 unsigned StartIdx = 0; 4603 unsigned VF = VL.size() / 2; 4604 unsigned VectorizedCnt = 0; 4605 unsigned ScatterVectorizeCnt = 0; 4606 const unsigned Sz = DL->getTypeSizeInBits(E->getMainOp()->getType()); 4607 for (unsigned MinVF = getMinVF(2 * Sz); VF >= MinVF; VF /= 2) { 4608 for (unsigned Cnt = StartIdx, End = VL.size(); Cnt + VF <= End; 4609 Cnt += VF) { 4610 ArrayRef<Value *> Slice = VL.slice(Cnt, VF); 4611 if (!VectorizedLoads.count(Slice.front()) && 4612 !VectorizedLoads.count(Slice.back()) && allSameBlock(Slice)) { 4613 SmallVector<Value *> PointerOps; 4614 OrdersType CurrentOrder; 4615 LoadsState LS = canVectorizeLoads(Slice, Slice.front(), *TTI, *DL, 4616 *SE, CurrentOrder, PointerOps); 4617 switch (LS) { 4618 case LoadsState::Vectorize: 4619 case LoadsState::ScatterVectorize: 4620 // Mark the vectorized loads so that we don't vectorize them 4621 // again. 4622 if (LS == LoadsState::Vectorize) 4623 ++VectorizedCnt; 4624 else 4625 ++ScatterVectorizeCnt; 4626 VectorizedLoads.insert(Slice.begin(), Slice.end()); 4627 // If we vectorized initial block, no need to try to vectorize it 4628 // again. 4629 if (Cnt == StartIdx) 4630 StartIdx += VF; 4631 break; 4632 case LoadsState::Gather: 4633 break; 4634 } 4635 } 4636 } 4637 // Check if the whole array was vectorized already - exit. 4638 if (StartIdx >= VL.size()) 4639 break; 4640 // Found vectorizable parts - exit. 4641 if (!VectorizedLoads.empty()) 4642 break; 4643 } 4644 if (!VectorizedLoads.empty()) { 4645 InstructionCost GatherCost = 0; 4646 unsigned NumParts = TTI->getNumberOfParts(VecTy); 4647 bool NeedInsertSubvectorAnalysis = 4648 !NumParts || (VL.size() / VF) > NumParts; 4649 // Get the cost for gathered loads. 4650 for (unsigned I = 0, End = VL.size(); I < End; I += VF) { 4651 if (VectorizedLoads.contains(VL[I])) 4652 continue; 4653 GatherCost += getGatherCost(VL.slice(I, VF)); 4654 } 4655 // The cost for vectorized loads. 4656 InstructionCost ScalarsCost = 0; 4657 for (Value *V : VectorizedLoads) { 4658 auto *LI = cast<LoadInst>(V); 4659 ScalarsCost += TTI->getMemoryOpCost( 4660 Instruction::Load, LI->getType(), LI->getAlign(), 4661 LI->getPointerAddressSpace(), CostKind, LI); 4662 } 4663 auto *LI = cast<LoadInst>(E->getMainOp()); 4664 auto *LoadTy = FixedVectorType::get(LI->getType(), VF); 4665 Align Alignment = LI->getAlign(); 4666 GatherCost += 4667 VectorizedCnt * 4668 TTI->getMemoryOpCost(Instruction::Load, LoadTy, Alignment, 4669 LI->getPointerAddressSpace(), CostKind, LI); 4670 GatherCost += ScatterVectorizeCnt * 4671 TTI->getGatherScatterOpCost( 4672 Instruction::Load, LoadTy, LI->getPointerOperand(), 4673 /*VariableMask=*/false, Alignment, CostKind, LI); 4674 if (NeedInsertSubvectorAnalysis) { 4675 // Add the cost for the subvectors insert. 4676 for (int I = VF, E = VL.size(); I < E; I += VF) 4677 GatherCost += TTI->getShuffleCost(TTI::SK_InsertSubvector, VecTy, 4678 None, I, LoadTy); 4679 } 4680 return ReuseShuffleCost + GatherCost - ScalarsCost; 4681 } 4682 } 4683 return ReuseShuffleCost + getGatherCost(VL); 4684 } 4685 InstructionCost CommonCost = 0; 4686 SmallVector<int> Mask; 4687 if (!E->ReorderIndices.empty()) { 4688 SmallVector<int> NewMask; 4689 if (E->getOpcode() == Instruction::Store) { 4690 // For stores the order is actually a mask. 4691 NewMask.resize(E->ReorderIndices.size()); 4692 copy(E->ReorderIndices, NewMask.begin()); 4693 } else { 4694 inversePermutation(E->ReorderIndices, NewMask); 4695 } 4696 ::addMask(Mask, NewMask); 4697 } 4698 if (NeedToShuffleReuses) 4699 ::addMask(Mask, E->ReuseShuffleIndices); 4700 if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask)) 4701 CommonCost = 4702 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask); 4703 assert((E->State == TreeEntry::Vectorize || 4704 E->State == TreeEntry::ScatterVectorize) && 4705 "Unhandled state"); 4706 assert(E->getOpcode() && allSameType(VL) && allSameBlock(VL) && "Invalid VL"); 4707 Instruction *VL0 = E->getMainOp(); 4708 unsigned ShuffleOrOp = 4709 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 4710 switch (ShuffleOrOp) { 4711 case Instruction::PHI: 4712 return 0; 4713 4714 case Instruction::ExtractValue: 4715 case Instruction::ExtractElement: { 4716 // The common cost of removal ExtractElement/ExtractValue instructions + 4717 // the cost of shuffles, if required to resuffle the original vector. 4718 if (NeedToShuffleReuses) { 4719 unsigned Idx = 0; 4720 for (unsigned I : E->ReuseShuffleIndices) { 4721 if (ShuffleOrOp == Instruction::ExtractElement) { 4722 auto *EE = cast<ExtractElementInst>(VL[I]); 4723 CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement, 4724 EE->getVectorOperandType(), 4725 *getExtractIndex(EE)); 4726 } else { 4727 CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement, 4728 VecTy, Idx); 4729 ++Idx; 4730 } 4731 } 4732 Idx = EntryVF; 4733 for (Value *V : VL) { 4734 if (ShuffleOrOp == Instruction::ExtractElement) { 4735 auto *EE = cast<ExtractElementInst>(V); 4736 CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement, 4737 EE->getVectorOperandType(), 4738 *getExtractIndex(EE)); 4739 } else { 4740 --Idx; 4741 CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement, 4742 VecTy, Idx); 4743 } 4744 } 4745 } 4746 if (ShuffleOrOp == Instruction::ExtractValue) { 4747 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 4748 auto *EI = cast<Instruction>(VL[I]); 4749 // Take credit for instruction that will become dead. 4750 if (EI->hasOneUse()) { 4751 Instruction *Ext = EI->user_back(); 4752 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 4753 all_of(Ext->users(), 4754 [](User *U) { return isa<GetElementPtrInst>(U); })) { 4755 // Use getExtractWithExtendCost() to calculate the cost of 4756 // extractelement/ext pair. 4757 CommonCost -= TTI->getExtractWithExtendCost( 4758 Ext->getOpcode(), Ext->getType(), VecTy, I); 4759 // Add back the cost of s|zext which is subtracted separately. 4760 CommonCost += TTI->getCastInstrCost( 4761 Ext->getOpcode(), Ext->getType(), EI->getType(), 4762 TTI::getCastContextHint(Ext), CostKind, Ext); 4763 continue; 4764 } 4765 } 4766 CommonCost -= 4767 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, I); 4768 } 4769 } else { 4770 AdjustExtractsCost(CommonCost); 4771 } 4772 return CommonCost; 4773 } 4774 case Instruction::InsertElement: { 4775 assert(E->ReuseShuffleIndices.empty() && 4776 "Unique insertelements only are expected."); 4777 auto *SrcVecTy = cast<FixedVectorType>(VL0->getType()); 4778 4779 unsigned const NumElts = SrcVecTy->getNumElements(); 4780 unsigned const NumScalars = VL.size(); 4781 APInt DemandedElts = APInt::getZero(NumElts); 4782 // TODO: Add support for Instruction::InsertValue. 4783 SmallVector<int> Mask; 4784 if (!E->ReorderIndices.empty()) { 4785 inversePermutation(E->ReorderIndices, Mask); 4786 Mask.append(NumElts - NumScalars, UndefMaskElem); 4787 } else { 4788 Mask.assign(NumElts, UndefMaskElem); 4789 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); 4790 } 4791 unsigned Offset = *getInsertIndex(VL0, 0); 4792 bool IsIdentity = true; 4793 SmallVector<int> PrevMask(NumElts, UndefMaskElem); 4794 Mask.swap(PrevMask); 4795 for (unsigned I = 0; I < NumScalars; ++I) { 4796 Optional<int> InsertIdx = getInsertIndex(VL[PrevMask[I]], 0); 4797 if (!InsertIdx || *InsertIdx == UndefMaskElem) 4798 continue; 4799 DemandedElts.setBit(*InsertIdx); 4800 IsIdentity &= *InsertIdx - Offset == I; 4801 Mask[*InsertIdx - Offset] = I; 4802 } 4803 assert(Offset < NumElts && "Failed to find vector index offset"); 4804 4805 InstructionCost Cost = 0; 4806 Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts, 4807 /*Insert*/ true, /*Extract*/ false); 4808 4809 if (IsIdentity && NumElts != NumScalars && Offset % NumScalars != 0) { 4810 // FIXME: Replace with SK_InsertSubvector once it is properly supported. 4811 unsigned Sz = PowerOf2Ceil(Offset + NumScalars); 4812 Cost += TTI->getShuffleCost( 4813 TargetTransformInfo::SK_PermuteSingleSrc, 4814 FixedVectorType::get(SrcVecTy->getElementType(), Sz)); 4815 } else if (!IsIdentity) { 4816 auto *FirstInsert = 4817 cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 4818 return !is_contained(E->Scalars, 4819 cast<Instruction>(V)->getOperand(0)); 4820 })); 4821 if (isUndefVector(FirstInsert->getOperand(0))) { 4822 Cost += TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, SrcVecTy, Mask); 4823 } else { 4824 SmallVector<int> InsertMask(NumElts); 4825 std::iota(InsertMask.begin(), InsertMask.end(), 0); 4826 for (unsigned I = 0; I < NumElts; I++) { 4827 if (Mask[I] != UndefMaskElem) 4828 InsertMask[Offset + I] = NumElts + I; 4829 } 4830 Cost += 4831 TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVecTy, InsertMask); 4832 } 4833 } 4834 4835 return Cost; 4836 } 4837 case Instruction::ZExt: 4838 case Instruction::SExt: 4839 case Instruction::FPToUI: 4840 case Instruction::FPToSI: 4841 case Instruction::FPExt: 4842 case Instruction::PtrToInt: 4843 case Instruction::IntToPtr: 4844 case Instruction::SIToFP: 4845 case Instruction::UIToFP: 4846 case Instruction::Trunc: 4847 case Instruction::FPTrunc: 4848 case Instruction::BitCast: { 4849 Type *SrcTy = VL0->getOperand(0)->getType(); 4850 InstructionCost ScalarEltCost = 4851 TTI->getCastInstrCost(E->getOpcode(), ScalarTy, SrcTy, 4852 TTI::getCastContextHint(VL0), CostKind, VL0); 4853 if (NeedToShuffleReuses) { 4854 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; 4855 } 4856 4857 // Calculate the cost of this instruction. 4858 InstructionCost ScalarCost = VL.size() * ScalarEltCost; 4859 4860 auto *SrcVecTy = FixedVectorType::get(SrcTy, VL.size()); 4861 InstructionCost VecCost = 0; 4862 // Check if the values are candidates to demote. 4863 if (!MinBWs.count(VL0) || VecTy != SrcVecTy) { 4864 VecCost = CommonCost + TTI->getCastInstrCost( 4865 E->getOpcode(), VecTy, SrcVecTy, 4866 TTI::getCastContextHint(VL0), CostKind, VL0); 4867 } 4868 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 4869 return VecCost - ScalarCost; 4870 } 4871 case Instruction::FCmp: 4872 case Instruction::ICmp: 4873 case Instruction::Select: { 4874 // Calculate the cost of this instruction. 4875 InstructionCost ScalarEltCost = 4876 TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, Builder.getInt1Ty(), 4877 CmpInst::BAD_ICMP_PREDICATE, CostKind, VL0); 4878 if (NeedToShuffleReuses) { 4879 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; 4880 } 4881 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); 4882 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 4883 4884 // Check if all entries in VL are either compares or selects with compares 4885 // as condition that have the same predicates. 4886 CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE; 4887 bool First = true; 4888 for (auto *V : VL) { 4889 CmpInst::Predicate CurrentPred; 4890 auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value()); 4891 if ((!match(V, m_Select(MatchCmp, m_Value(), m_Value())) && 4892 !match(V, MatchCmp)) || 4893 (!First && VecPred != CurrentPred)) { 4894 VecPred = CmpInst::BAD_ICMP_PREDICATE; 4895 break; 4896 } 4897 First = false; 4898 VecPred = CurrentPred; 4899 } 4900 4901 InstructionCost VecCost = TTI->getCmpSelInstrCost( 4902 E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0); 4903 // Check if it is possible and profitable to use min/max for selects in 4904 // VL. 4905 // 4906 auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL); 4907 if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) { 4908 IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy, 4909 {VecTy, VecTy}); 4910 InstructionCost IntrinsicCost = 4911 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 4912 // If the selects are the only uses of the compares, they will be dead 4913 // and we can adjust the cost by removing their cost. 4914 if (IntrinsicAndUse.second) 4915 IntrinsicCost -= 4916 TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy, MaskTy, 4917 CmpInst::BAD_ICMP_PREDICATE, CostKind); 4918 VecCost = std::min(VecCost, IntrinsicCost); 4919 } 4920 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 4921 return CommonCost + VecCost - ScalarCost; 4922 } 4923 case Instruction::FNeg: 4924 case Instruction::Add: 4925 case Instruction::FAdd: 4926 case Instruction::Sub: 4927 case Instruction::FSub: 4928 case Instruction::Mul: 4929 case Instruction::FMul: 4930 case Instruction::UDiv: 4931 case Instruction::SDiv: 4932 case Instruction::FDiv: 4933 case Instruction::URem: 4934 case Instruction::SRem: 4935 case Instruction::FRem: 4936 case Instruction::Shl: 4937 case Instruction::LShr: 4938 case Instruction::AShr: 4939 case Instruction::And: 4940 case Instruction::Or: 4941 case Instruction::Xor: { 4942 // Certain instructions can be cheaper to vectorize if they have a 4943 // constant second vector operand. 4944 TargetTransformInfo::OperandValueKind Op1VK = 4945 TargetTransformInfo::OK_AnyValue; 4946 TargetTransformInfo::OperandValueKind Op2VK = 4947 TargetTransformInfo::OK_UniformConstantValue; 4948 TargetTransformInfo::OperandValueProperties Op1VP = 4949 TargetTransformInfo::OP_None; 4950 TargetTransformInfo::OperandValueProperties Op2VP = 4951 TargetTransformInfo::OP_PowerOf2; 4952 4953 // If all operands are exactly the same ConstantInt then set the 4954 // operand kind to OK_UniformConstantValue. 4955 // If instead not all operands are constants, then set the operand kind 4956 // to OK_AnyValue. If all operands are constants but not the same, 4957 // then set the operand kind to OK_NonUniformConstantValue. 4958 ConstantInt *CInt0 = nullptr; 4959 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 4960 const Instruction *I = cast<Instruction>(VL[i]); 4961 unsigned OpIdx = isa<BinaryOperator>(I) ? 1 : 0; 4962 ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(OpIdx)); 4963 if (!CInt) { 4964 Op2VK = TargetTransformInfo::OK_AnyValue; 4965 Op2VP = TargetTransformInfo::OP_None; 4966 break; 4967 } 4968 if (Op2VP == TargetTransformInfo::OP_PowerOf2 && 4969 !CInt->getValue().isPowerOf2()) 4970 Op2VP = TargetTransformInfo::OP_None; 4971 if (i == 0) { 4972 CInt0 = CInt; 4973 continue; 4974 } 4975 if (CInt0 != CInt) 4976 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 4977 } 4978 4979 SmallVector<const Value *, 4> Operands(VL0->operand_values()); 4980 InstructionCost ScalarEltCost = 4981 TTI->getArithmeticInstrCost(E->getOpcode(), ScalarTy, CostKind, Op1VK, 4982 Op2VK, Op1VP, Op2VP, Operands, VL0); 4983 if (NeedToShuffleReuses) { 4984 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; 4985 } 4986 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 4987 InstructionCost VecCost = 4988 TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind, Op1VK, 4989 Op2VK, Op1VP, Op2VP, Operands, VL0); 4990 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 4991 return CommonCost + VecCost - ScalarCost; 4992 } 4993 case Instruction::GetElementPtr: { 4994 TargetTransformInfo::OperandValueKind Op1VK = 4995 TargetTransformInfo::OK_AnyValue; 4996 TargetTransformInfo::OperandValueKind Op2VK = 4997 TargetTransformInfo::OK_UniformConstantValue; 4998 4999 InstructionCost ScalarEltCost = TTI->getArithmeticInstrCost( 5000 Instruction::Add, ScalarTy, CostKind, Op1VK, Op2VK); 5001 if (NeedToShuffleReuses) { 5002 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; 5003 } 5004 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 5005 InstructionCost VecCost = TTI->getArithmeticInstrCost( 5006 Instruction::Add, VecTy, CostKind, Op1VK, Op2VK); 5007 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 5008 return CommonCost + VecCost - ScalarCost; 5009 } 5010 case Instruction::Load: { 5011 // Cost of wide load - cost of scalar loads. 5012 Align Alignment = cast<LoadInst>(VL0)->getAlign(); 5013 InstructionCost ScalarEltCost = TTI->getMemoryOpCost( 5014 Instruction::Load, ScalarTy, Alignment, 0, CostKind, VL0); 5015 if (NeedToShuffleReuses) { 5016 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; 5017 } 5018 InstructionCost ScalarLdCost = VecTy->getNumElements() * ScalarEltCost; 5019 InstructionCost VecLdCost; 5020 if (E->State == TreeEntry::Vectorize) { 5021 VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, Alignment, 0, 5022 CostKind, VL0); 5023 } else { 5024 assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState"); 5025 Align CommonAlignment = Alignment; 5026 for (Value *V : VL) 5027 CommonAlignment = 5028 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 5029 VecLdCost = TTI->getGatherScatterOpCost( 5030 Instruction::Load, VecTy, cast<LoadInst>(VL0)->getPointerOperand(), 5031 /*VariableMask=*/false, CommonAlignment, CostKind, VL0); 5032 } 5033 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecLdCost, ScalarLdCost)); 5034 return CommonCost + VecLdCost - ScalarLdCost; 5035 } 5036 case Instruction::Store: { 5037 // We know that we can merge the stores. Calculate the cost. 5038 bool IsReorder = !E->ReorderIndices.empty(); 5039 auto *SI = 5040 cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0); 5041 Align Alignment = SI->getAlign(); 5042 InstructionCost ScalarEltCost = TTI->getMemoryOpCost( 5043 Instruction::Store, ScalarTy, Alignment, 0, CostKind, VL0); 5044 InstructionCost ScalarStCost = VecTy->getNumElements() * ScalarEltCost; 5045 InstructionCost VecStCost = TTI->getMemoryOpCost( 5046 Instruction::Store, VecTy, Alignment, 0, CostKind, VL0); 5047 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecStCost, ScalarStCost)); 5048 return CommonCost + VecStCost - ScalarStCost; 5049 } 5050 case Instruction::Call: { 5051 CallInst *CI = cast<CallInst>(VL0); 5052 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 5053 5054 // Calculate the cost of the scalar and vector calls. 5055 IntrinsicCostAttributes CostAttrs(ID, *CI, 1); 5056 InstructionCost ScalarEltCost = 5057 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 5058 if (NeedToShuffleReuses) { 5059 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; 5060 } 5061 InstructionCost ScalarCallCost = VecTy->getNumElements() * ScalarEltCost; 5062 5063 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 5064 InstructionCost VecCallCost = 5065 std::min(VecCallCosts.first, VecCallCosts.second); 5066 5067 LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost 5068 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 5069 << " for " << *CI << "\n"); 5070 5071 return CommonCost + VecCallCost - ScalarCallCost; 5072 } 5073 case Instruction::ShuffleVector: { 5074 assert(E->isAltShuffle() && 5075 ((Instruction::isBinaryOp(E->getOpcode()) && 5076 Instruction::isBinaryOp(E->getAltOpcode())) || 5077 (Instruction::isCast(E->getOpcode()) && 5078 Instruction::isCast(E->getAltOpcode()))) && 5079 "Invalid Shuffle Vector Operand"); 5080 InstructionCost ScalarCost = 0; 5081 if (NeedToShuffleReuses) { 5082 for (unsigned Idx : E->ReuseShuffleIndices) { 5083 Instruction *I = cast<Instruction>(VL[Idx]); 5084 CommonCost -= TTI->getInstructionCost(I, CostKind); 5085 } 5086 for (Value *V : VL) { 5087 Instruction *I = cast<Instruction>(V); 5088 CommonCost += TTI->getInstructionCost(I, CostKind); 5089 } 5090 } 5091 for (Value *V : VL) { 5092 Instruction *I = cast<Instruction>(V); 5093 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 5094 ScalarCost += TTI->getInstructionCost(I, CostKind); 5095 } 5096 // VecCost is equal to sum of the cost of creating 2 vectors 5097 // and the cost of creating shuffle. 5098 InstructionCost VecCost = 0; 5099 // Try to find the previous shuffle node with the same operands and same 5100 // main/alternate ops. 5101 auto &&TryFindNodeWithEqualOperands = [this, E]() { 5102 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 5103 if (TE.get() == E) 5104 break; 5105 if (TE->isAltShuffle() && 5106 ((TE->getOpcode() == E->getOpcode() && 5107 TE->getAltOpcode() == E->getAltOpcode()) || 5108 (TE->getOpcode() == E->getAltOpcode() && 5109 TE->getAltOpcode() == E->getOpcode())) && 5110 TE->hasEqualOperands(*E)) 5111 return true; 5112 } 5113 return false; 5114 }; 5115 if (TryFindNodeWithEqualOperands()) { 5116 LLVM_DEBUG({ 5117 dbgs() << "SLP: diamond match for alternate node found.\n"; 5118 E->dump(); 5119 }); 5120 // No need to add new vector costs here since we're going to reuse 5121 // same main/alternate vector ops, just do different shuffling. 5122 } else if (Instruction::isBinaryOp(E->getOpcode())) { 5123 VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind); 5124 VecCost += TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy, 5125 CostKind); 5126 } else { 5127 Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType(); 5128 Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType(); 5129 auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size()); 5130 auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size()); 5131 VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty, 5132 TTI::CastContextHint::None, CostKind); 5133 VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty, 5134 TTI::CastContextHint::None, CostKind); 5135 } 5136 5137 SmallVector<int> Mask; 5138 buildSuffleEntryMask( 5139 E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices, 5140 [E](Instruction *I) { 5141 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 5142 return I->getOpcode() == E->getAltOpcode(); 5143 }, 5144 Mask); 5145 CommonCost = 5146 TTI->getShuffleCost(TargetTransformInfo::SK_Select, FinalVecTy, Mask); 5147 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 5148 return CommonCost + VecCost - ScalarCost; 5149 } 5150 default: 5151 llvm_unreachable("Unknown instruction"); 5152 } 5153 } 5154 5155 bool BoUpSLP::isFullyVectorizableTinyTree(bool ForReduction) const { 5156 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height " 5157 << VectorizableTree.size() << " is fully vectorizable .\n"); 5158 5159 auto &&AreVectorizableGathers = [this](const TreeEntry *TE, unsigned Limit) { 5160 SmallVector<int> Mask; 5161 return TE->State == TreeEntry::NeedToGather && 5162 !any_of(TE->Scalars, 5163 [this](Value *V) { return EphValues.contains(V); }) && 5164 (allConstant(TE->Scalars) || isSplat(TE->Scalars) || 5165 TE->Scalars.size() < Limit || 5166 ((TE->getOpcode() == Instruction::ExtractElement || 5167 all_of(TE->Scalars, 5168 [](Value *V) { 5169 return isa<ExtractElementInst, UndefValue>(V); 5170 })) && 5171 isFixedVectorShuffle(TE->Scalars, Mask)) || 5172 (TE->State == TreeEntry::NeedToGather && 5173 TE->getOpcode() == Instruction::Load && !TE->isAltShuffle())); 5174 }; 5175 5176 // We only handle trees of heights 1 and 2. 5177 if (VectorizableTree.size() == 1 && 5178 (VectorizableTree[0]->State == TreeEntry::Vectorize || 5179 (ForReduction && 5180 AreVectorizableGathers(VectorizableTree[0].get(), 5181 VectorizableTree[0]->Scalars.size()) && 5182 VectorizableTree[0]->getVectorFactor() > 2))) 5183 return true; 5184 5185 if (VectorizableTree.size() != 2) 5186 return false; 5187 5188 // Handle splat and all-constants stores. Also try to vectorize tiny trees 5189 // with the second gather nodes if they have less scalar operands rather than 5190 // the initial tree element (may be profitable to shuffle the second gather) 5191 // or they are extractelements, which form shuffle. 5192 SmallVector<int> Mask; 5193 if (VectorizableTree[0]->State == TreeEntry::Vectorize && 5194 AreVectorizableGathers(VectorizableTree[1].get(), 5195 VectorizableTree[0]->Scalars.size())) 5196 return true; 5197 5198 // Gathering cost would be too much for tiny trees. 5199 if (VectorizableTree[0]->State == TreeEntry::NeedToGather || 5200 (VectorizableTree[1]->State == TreeEntry::NeedToGather && 5201 VectorizableTree[0]->State != TreeEntry::ScatterVectorize)) 5202 return false; 5203 5204 return true; 5205 } 5206 5207 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts, 5208 TargetTransformInfo *TTI, 5209 bool MustMatchOrInst) { 5210 // Look past the root to find a source value. Arbitrarily follow the 5211 // path through operand 0 of any 'or'. Also, peek through optional 5212 // shift-left-by-multiple-of-8-bits. 5213 Value *ZextLoad = Root; 5214 const APInt *ShAmtC; 5215 bool FoundOr = false; 5216 while (!isa<ConstantExpr>(ZextLoad) && 5217 (match(ZextLoad, m_Or(m_Value(), m_Value())) || 5218 (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) && 5219 ShAmtC->urem(8) == 0))) { 5220 auto *BinOp = cast<BinaryOperator>(ZextLoad); 5221 ZextLoad = BinOp->getOperand(0); 5222 if (BinOp->getOpcode() == Instruction::Or) 5223 FoundOr = true; 5224 } 5225 // Check if the input is an extended load of the required or/shift expression. 5226 Value *Load; 5227 if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root || 5228 !match(ZextLoad, m_ZExt(m_Value(Load))) || !isa<LoadInst>(Load)) 5229 return false; 5230 5231 // Require that the total load bit width is a legal integer type. 5232 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target. 5233 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it. 5234 Type *SrcTy = Load->getType(); 5235 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts; 5236 if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth))) 5237 return false; 5238 5239 // Everything matched - assume that we can fold the whole sequence using 5240 // load combining. 5241 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at " 5242 << *(cast<Instruction>(Root)) << "\n"); 5243 5244 return true; 5245 } 5246 5247 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const { 5248 if (RdxKind != RecurKind::Or) 5249 return false; 5250 5251 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 5252 Value *FirstReduced = VectorizableTree[0]->Scalars[0]; 5253 return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI, 5254 /* MatchOr */ false); 5255 } 5256 5257 bool BoUpSLP::isLoadCombineCandidate() const { 5258 // Peek through a final sequence of stores and check if all operations are 5259 // likely to be load-combined. 5260 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 5261 for (Value *Scalar : VectorizableTree[0]->Scalars) { 5262 Value *X; 5263 if (!match(Scalar, m_Store(m_Value(X), m_Value())) || 5264 !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true)) 5265 return false; 5266 } 5267 return true; 5268 } 5269 5270 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const { 5271 // No need to vectorize inserts of gathered values. 5272 if (VectorizableTree.size() == 2 && 5273 isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) && 5274 VectorizableTree[1]->State == TreeEntry::NeedToGather) 5275 return true; 5276 5277 // We can vectorize the tree if its size is greater than or equal to the 5278 // minimum size specified by the MinTreeSize command line option. 5279 if (VectorizableTree.size() >= MinTreeSize) 5280 return false; 5281 5282 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 5283 // can vectorize it if we can prove it fully vectorizable. 5284 if (isFullyVectorizableTinyTree(ForReduction)) 5285 return false; 5286 5287 assert(VectorizableTree.empty() 5288 ? ExternalUses.empty() 5289 : true && "We shouldn't have any external users"); 5290 5291 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 5292 // vectorizable. 5293 return true; 5294 } 5295 5296 InstructionCost BoUpSLP::getSpillCost() const { 5297 // Walk from the bottom of the tree to the top, tracking which values are 5298 // live. When we see a call instruction that is not part of our tree, 5299 // query TTI to see if there is a cost to keeping values live over it 5300 // (for example, if spills and fills are required). 5301 unsigned BundleWidth = VectorizableTree.front()->Scalars.size(); 5302 InstructionCost Cost = 0; 5303 5304 SmallPtrSet<Instruction*, 4> LiveValues; 5305 Instruction *PrevInst = nullptr; 5306 5307 // The entries in VectorizableTree are not necessarily ordered by their 5308 // position in basic blocks. Collect them and order them by dominance so later 5309 // instructions are guaranteed to be visited first. For instructions in 5310 // different basic blocks, we only scan to the beginning of the block, so 5311 // their order does not matter, as long as all instructions in a basic block 5312 // are grouped together. Using dominance ensures a deterministic order. 5313 SmallVector<Instruction *, 16> OrderedScalars; 5314 for (const auto &TEPtr : VectorizableTree) { 5315 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]); 5316 if (!Inst) 5317 continue; 5318 OrderedScalars.push_back(Inst); 5319 } 5320 llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) { 5321 auto *NodeA = DT->getNode(A->getParent()); 5322 auto *NodeB = DT->getNode(B->getParent()); 5323 assert(NodeA && "Should only process reachable instructions"); 5324 assert(NodeB && "Should only process reachable instructions"); 5325 assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 5326 "Different nodes should have different DFS numbers"); 5327 if (NodeA != NodeB) 5328 return NodeA->getDFSNumIn() < NodeB->getDFSNumIn(); 5329 return B->comesBefore(A); 5330 }); 5331 5332 for (Instruction *Inst : OrderedScalars) { 5333 if (!PrevInst) { 5334 PrevInst = Inst; 5335 continue; 5336 } 5337 5338 // Update LiveValues. 5339 LiveValues.erase(PrevInst); 5340 for (auto &J : PrevInst->operands()) { 5341 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 5342 LiveValues.insert(cast<Instruction>(&*J)); 5343 } 5344 5345 LLVM_DEBUG({ 5346 dbgs() << "SLP: #LV: " << LiveValues.size(); 5347 for (auto *X : LiveValues) 5348 dbgs() << " " << X->getName(); 5349 dbgs() << ", Looking at "; 5350 Inst->dump(); 5351 }); 5352 5353 // Now find the sequence of instructions between PrevInst and Inst. 5354 unsigned NumCalls = 0; 5355 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 5356 PrevInstIt = 5357 PrevInst->getIterator().getReverse(); 5358 while (InstIt != PrevInstIt) { 5359 if (PrevInstIt == PrevInst->getParent()->rend()) { 5360 PrevInstIt = Inst->getParent()->rbegin(); 5361 continue; 5362 } 5363 5364 // Debug information does not impact spill cost. 5365 if ((isa<CallInst>(&*PrevInstIt) && 5366 !isa<DbgInfoIntrinsic>(&*PrevInstIt)) && 5367 &*PrevInstIt != PrevInst) 5368 NumCalls++; 5369 5370 ++PrevInstIt; 5371 } 5372 5373 if (NumCalls) { 5374 SmallVector<Type*, 4> V; 5375 for (auto *II : LiveValues) { 5376 auto *ScalarTy = II->getType(); 5377 if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy)) 5378 ScalarTy = VectorTy->getElementType(); 5379 V.push_back(FixedVectorType::get(ScalarTy, BundleWidth)); 5380 } 5381 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V); 5382 } 5383 5384 PrevInst = Inst; 5385 } 5386 5387 return Cost; 5388 } 5389 5390 /// Check if two insertelement instructions are from the same buildvector. 5391 static bool areTwoInsertFromSameBuildVector(InsertElementInst *VU, 5392 InsertElementInst *V) { 5393 // Instructions must be from the same basic blocks. 5394 if (VU->getParent() != V->getParent()) 5395 return false; 5396 // Checks if 2 insertelements are from the same buildvector. 5397 if (VU->getType() != V->getType()) 5398 return false; 5399 // Multiple used inserts are separate nodes. 5400 if (!VU->hasOneUse() && !V->hasOneUse()) 5401 return false; 5402 auto *IE1 = VU; 5403 auto *IE2 = V; 5404 // Go through the vector operand of insertelement instructions trying to find 5405 // either VU as the original vector for IE2 or V as the original vector for 5406 // IE1. 5407 do { 5408 if (IE2 == VU || IE1 == V) 5409 return true; 5410 if (IE1) { 5411 if (IE1 != VU && !IE1->hasOneUse()) 5412 IE1 = nullptr; 5413 else 5414 IE1 = dyn_cast<InsertElementInst>(IE1->getOperand(0)); 5415 } 5416 if (IE2) { 5417 if (IE2 != V && !IE2->hasOneUse()) 5418 IE2 = nullptr; 5419 else 5420 IE2 = dyn_cast<InsertElementInst>(IE2->getOperand(0)); 5421 } 5422 } while (IE1 || IE2); 5423 return false; 5424 } 5425 5426 InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) { 5427 InstructionCost Cost = 0; 5428 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size " 5429 << VectorizableTree.size() << ".\n"); 5430 5431 unsigned BundleWidth = VectorizableTree[0]->Scalars.size(); 5432 5433 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) { 5434 TreeEntry &TE = *VectorizableTree[I].get(); 5435 5436 InstructionCost C = getEntryCost(&TE, VectorizedVals); 5437 Cost += C; 5438 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 5439 << " for bundle that starts with " << *TE.Scalars[0] 5440 << ".\n" 5441 << "SLP: Current total cost = " << Cost << "\n"); 5442 } 5443 5444 SmallPtrSet<Value *, 16> ExtractCostCalculated; 5445 InstructionCost ExtractCost = 0; 5446 SmallVector<unsigned> VF; 5447 SmallVector<SmallVector<int>> ShuffleMask; 5448 SmallVector<Value *> FirstUsers; 5449 SmallVector<APInt> DemandedElts; 5450 for (ExternalUser &EU : ExternalUses) { 5451 // We only add extract cost once for the same scalar. 5452 if (!isa_and_nonnull<InsertElementInst>(EU.User) && 5453 !ExtractCostCalculated.insert(EU.Scalar).second) 5454 continue; 5455 5456 // Uses by ephemeral values are free (because the ephemeral value will be 5457 // removed prior to code generation, and so the extraction will be 5458 // removed as well). 5459 if (EphValues.count(EU.User)) 5460 continue; 5461 5462 // No extract cost for vector "scalar" 5463 if (isa<FixedVectorType>(EU.Scalar->getType())) 5464 continue; 5465 5466 // Already counted the cost for external uses when tried to adjust the cost 5467 // for extractelements, no need to add it again. 5468 if (isa<ExtractElementInst>(EU.Scalar)) 5469 continue; 5470 5471 // If found user is an insertelement, do not calculate extract cost but try 5472 // to detect it as a final shuffled/identity match. 5473 if (auto *VU = dyn_cast_or_null<InsertElementInst>(EU.User)) { 5474 if (auto *FTy = dyn_cast<FixedVectorType>(VU->getType())) { 5475 Optional<int> InsertIdx = getInsertIndex(VU, 0); 5476 if (!InsertIdx || *InsertIdx == UndefMaskElem) 5477 continue; 5478 auto *It = find_if(FirstUsers, [VU](Value *V) { 5479 return areTwoInsertFromSameBuildVector(VU, 5480 cast<InsertElementInst>(V)); 5481 }); 5482 int VecId = -1; 5483 if (It == FirstUsers.end()) { 5484 VF.push_back(FTy->getNumElements()); 5485 ShuffleMask.emplace_back(VF.back(), UndefMaskElem); 5486 // Find the insertvector, vectorized in tree, if any. 5487 Value *Base = VU; 5488 while (isa<InsertElementInst>(Base)) { 5489 // Build the mask for the vectorized insertelement instructions. 5490 if (const TreeEntry *E = getTreeEntry(Base)) { 5491 VU = cast<InsertElementInst>(Base); 5492 do { 5493 int Idx = E->findLaneForValue(Base); 5494 ShuffleMask.back()[Idx] = Idx; 5495 Base = cast<InsertElementInst>(Base)->getOperand(0); 5496 } while (E == getTreeEntry(Base)); 5497 break; 5498 } 5499 Base = cast<InsertElementInst>(Base)->getOperand(0); 5500 } 5501 FirstUsers.push_back(VU); 5502 DemandedElts.push_back(APInt::getZero(VF.back())); 5503 VecId = FirstUsers.size() - 1; 5504 } else { 5505 VecId = std::distance(FirstUsers.begin(), It); 5506 } 5507 int Idx = *InsertIdx; 5508 ShuffleMask[VecId][Idx] = EU.Lane; 5509 DemandedElts[VecId].setBit(Idx); 5510 continue; 5511 } 5512 } 5513 5514 // If we plan to rewrite the tree in a smaller type, we will need to sign 5515 // extend the extracted value back to the original type. Here, we account 5516 // for the extract and the added cost of the sign extend if needed. 5517 auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth); 5518 auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; 5519 if (MinBWs.count(ScalarRoot)) { 5520 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 5521 auto Extend = 5522 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; 5523 VecTy = FixedVectorType::get(MinTy, BundleWidth); 5524 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 5525 VecTy, EU.Lane); 5526 } else { 5527 ExtractCost += 5528 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); 5529 } 5530 } 5531 5532 InstructionCost SpillCost = getSpillCost(); 5533 Cost += SpillCost + ExtractCost; 5534 if (FirstUsers.size() == 1) { 5535 int Limit = ShuffleMask.front().size() * 2; 5536 if (all_of(ShuffleMask.front(), [Limit](int Idx) { return Idx < Limit; }) && 5537 !ShuffleVectorInst::isIdentityMask(ShuffleMask.front())) { 5538 InstructionCost C = TTI->getShuffleCost( 5539 TTI::SK_PermuteSingleSrc, 5540 cast<FixedVectorType>(FirstUsers.front()->getType()), 5541 ShuffleMask.front()); 5542 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 5543 << " for final shuffle of insertelement external users " 5544 << *VectorizableTree.front()->Scalars.front() << ".\n" 5545 << "SLP: Current total cost = " << Cost << "\n"); 5546 Cost += C; 5547 } 5548 InstructionCost InsertCost = TTI->getScalarizationOverhead( 5549 cast<FixedVectorType>(FirstUsers.front()->getType()), 5550 DemandedElts.front(), /*Insert*/ true, /*Extract*/ false); 5551 LLVM_DEBUG(dbgs() << "SLP: subtracting the cost " << InsertCost 5552 << " for insertelements gather.\n" 5553 << "SLP: Current total cost = " << Cost << "\n"); 5554 Cost -= InsertCost; 5555 } else if (FirstUsers.size() >= 2) { 5556 unsigned MaxVF = *std::max_element(VF.begin(), VF.end()); 5557 // Combined masks of the first 2 vectors. 5558 SmallVector<int> CombinedMask(MaxVF, UndefMaskElem); 5559 copy(ShuffleMask.front(), CombinedMask.begin()); 5560 APInt CombinedDemandedElts = DemandedElts.front().zextOrSelf(MaxVF); 5561 auto *VecTy = FixedVectorType::get( 5562 cast<VectorType>(FirstUsers.front()->getType())->getElementType(), 5563 MaxVF); 5564 for (int I = 0, E = ShuffleMask[1].size(); I < E; ++I) { 5565 if (ShuffleMask[1][I] != UndefMaskElem) { 5566 CombinedMask[I] = ShuffleMask[1][I] + MaxVF; 5567 CombinedDemandedElts.setBit(I); 5568 } 5569 } 5570 InstructionCost C = 5571 TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, VecTy, CombinedMask); 5572 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 5573 << " for final shuffle of vector node and external " 5574 "insertelement users " 5575 << *VectorizableTree.front()->Scalars.front() << ".\n" 5576 << "SLP: Current total cost = " << Cost << "\n"); 5577 Cost += C; 5578 InstructionCost InsertCost = TTI->getScalarizationOverhead( 5579 VecTy, CombinedDemandedElts, /*Insert*/ true, /*Extract*/ false); 5580 LLVM_DEBUG(dbgs() << "SLP: subtracting the cost " << InsertCost 5581 << " for insertelements gather.\n" 5582 << "SLP: Current total cost = " << Cost << "\n"); 5583 Cost -= InsertCost; 5584 for (int I = 2, E = FirstUsers.size(); I < E; ++I) { 5585 // Other elements - permutation of 2 vectors (the initial one and the 5586 // next Ith incoming vector). 5587 unsigned VF = ShuffleMask[I].size(); 5588 for (unsigned Idx = 0; Idx < VF; ++Idx) { 5589 int Mask = ShuffleMask[I][Idx]; 5590 if (Mask != UndefMaskElem) 5591 CombinedMask[Idx] = MaxVF + Mask; 5592 else if (CombinedMask[Idx] != UndefMaskElem) 5593 CombinedMask[Idx] = Idx; 5594 } 5595 for (unsigned Idx = VF; Idx < MaxVF; ++Idx) 5596 if (CombinedMask[Idx] != UndefMaskElem) 5597 CombinedMask[Idx] = Idx; 5598 InstructionCost C = 5599 TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, VecTy, CombinedMask); 5600 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 5601 << " for final shuffle of vector node and external " 5602 "insertelement users " 5603 << *VectorizableTree.front()->Scalars.front() << ".\n" 5604 << "SLP: Current total cost = " << Cost << "\n"); 5605 Cost += C; 5606 InstructionCost InsertCost = TTI->getScalarizationOverhead( 5607 cast<FixedVectorType>(FirstUsers[I]->getType()), DemandedElts[I], 5608 /*Insert*/ true, /*Extract*/ false); 5609 LLVM_DEBUG(dbgs() << "SLP: subtracting the cost " << InsertCost 5610 << " for insertelements gather.\n" 5611 << "SLP: Current total cost = " << Cost << "\n"); 5612 Cost -= InsertCost; 5613 } 5614 } 5615 5616 #ifndef NDEBUG 5617 SmallString<256> Str; 5618 { 5619 raw_svector_ostream OS(Str); 5620 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 5621 << "SLP: Extract Cost = " << ExtractCost << ".\n" 5622 << "SLP: Total Cost = " << Cost << ".\n"; 5623 } 5624 LLVM_DEBUG(dbgs() << Str); 5625 if (ViewSLPTree) 5626 ViewGraph(this, "SLP" + F->getName(), false, Str); 5627 #endif 5628 5629 return Cost; 5630 } 5631 5632 Optional<TargetTransformInfo::ShuffleKind> 5633 BoUpSLP::isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask, 5634 SmallVectorImpl<const TreeEntry *> &Entries) { 5635 // TODO: currently checking only for Scalars in the tree entry, need to count 5636 // reused elements too for better cost estimation. 5637 Mask.assign(TE->Scalars.size(), UndefMaskElem); 5638 Entries.clear(); 5639 // Build a lists of values to tree entries. 5640 DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>> ValueToTEs; 5641 for (const std::unique_ptr<TreeEntry> &EntryPtr : VectorizableTree) { 5642 if (EntryPtr.get() == TE) 5643 break; 5644 if (EntryPtr->State != TreeEntry::NeedToGather) 5645 continue; 5646 for (Value *V : EntryPtr->Scalars) 5647 ValueToTEs.try_emplace(V).first->getSecond().insert(EntryPtr.get()); 5648 } 5649 // Find all tree entries used by the gathered values. If no common entries 5650 // found - not a shuffle. 5651 // Here we build a set of tree nodes for each gathered value and trying to 5652 // find the intersection between these sets. If we have at least one common 5653 // tree node for each gathered value - we have just a permutation of the 5654 // single vector. If we have 2 different sets, we're in situation where we 5655 // have a permutation of 2 input vectors. 5656 SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs; 5657 DenseMap<Value *, int> UsedValuesEntry; 5658 for (Value *V : TE->Scalars) { 5659 if (isa<UndefValue>(V)) 5660 continue; 5661 // Build a list of tree entries where V is used. 5662 SmallPtrSet<const TreeEntry *, 4> VToTEs; 5663 auto It = ValueToTEs.find(V); 5664 if (It != ValueToTEs.end()) 5665 VToTEs = It->second; 5666 if (const TreeEntry *VTE = getTreeEntry(V)) 5667 VToTEs.insert(VTE); 5668 if (VToTEs.empty()) 5669 return None; 5670 if (UsedTEs.empty()) { 5671 // The first iteration, just insert the list of nodes to vector. 5672 UsedTEs.push_back(VToTEs); 5673 } else { 5674 // Need to check if there are any previously used tree nodes which use V. 5675 // If there are no such nodes, consider that we have another one input 5676 // vector. 5677 SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs); 5678 unsigned Idx = 0; 5679 for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) { 5680 // Do we have a non-empty intersection of previously listed tree entries 5681 // and tree entries using current V? 5682 set_intersect(VToTEs, Set); 5683 if (!VToTEs.empty()) { 5684 // Yes, write the new subset and continue analysis for the next 5685 // scalar. 5686 Set.swap(VToTEs); 5687 break; 5688 } 5689 VToTEs = SavedVToTEs; 5690 ++Idx; 5691 } 5692 // No non-empty intersection found - need to add a second set of possible 5693 // source vectors. 5694 if (Idx == UsedTEs.size()) { 5695 // If the number of input vectors is greater than 2 - not a permutation, 5696 // fallback to the regular gather. 5697 if (UsedTEs.size() == 2) 5698 return None; 5699 UsedTEs.push_back(SavedVToTEs); 5700 Idx = UsedTEs.size() - 1; 5701 } 5702 UsedValuesEntry.try_emplace(V, Idx); 5703 } 5704 } 5705 5706 unsigned VF = 0; 5707 if (UsedTEs.size() == 1) { 5708 // Try to find the perfect match in another gather node at first. 5709 auto It = find_if(UsedTEs.front(), [TE](const TreeEntry *EntryPtr) { 5710 return EntryPtr->isSame(TE->Scalars); 5711 }); 5712 if (It != UsedTEs.front().end()) { 5713 Entries.push_back(*It); 5714 std::iota(Mask.begin(), Mask.end(), 0); 5715 return TargetTransformInfo::SK_PermuteSingleSrc; 5716 } 5717 // No perfect match, just shuffle, so choose the first tree node. 5718 Entries.push_back(*UsedTEs.front().begin()); 5719 } else { 5720 // Try to find nodes with the same vector factor. 5721 assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries."); 5722 DenseMap<int, const TreeEntry *> VFToTE; 5723 for (const TreeEntry *TE : UsedTEs.front()) 5724 VFToTE.try_emplace(TE->getVectorFactor(), TE); 5725 for (const TreeEntry *TE : UsedTEs.back()) { 5726 auto It = VFToTE.find(TE->getVectorFactor()); 5727 if (It != VFToTE.end()) { 5728 VF = It->first; 5729 Entries.push_back(It->second); 5730 Entries.push_back(TE); 5731 break; 5732 } 5733 } 5734 // No 2 source vectors with the same vector factor - give up and do regular 5735 // gather. 5736 if (Entries.empty()) 5737 return None; 5738 } 5739 5740 // Build a shuffle mask for better cost estimation and vector emission. 5741 for (int I = 0, E = TE->Scalars.size(); I < E; ++I) { 5742 Value *V = TE->Scalars[I]; 5743 if (isa<UndefValue>(V)) 5744 continue; 5745 unsigned Idx = UsedValuesEntry.lookup(V); 5746 const TreeEntry *VTE = Entries[Idx]; 5747 int FoundLane = VTE->findLaneForValue(V); 5748 Mask[I] = Idx * VF + FoundLane; 5749 // Extra check required by isSingleSourceMaskImpl function (called by 5750 // ShuffleVectorInst::isSingleSourceMask). 5751 if (Mask[I] >= 2 * E) 5752 return None; 5753 } 5754 switch (Entries.size()) { 5755 case 1: 5756 return TargetTransformInfo::SK_PermuteSingleSrc; 5757 case 2: 5758 return TargetTransformInfo::SK_PermuteTwoSrc; 5759 default: 5760 break; 5761 } 5762 return None; 5763 } 5764 5765 InstructionCost 5766 BoUpSLP::getGatherCost(FixedVectorType *Ty, 5767 const DenseSet<unsigned> &ShuffledIndices, 5768 bool NeedToShuffle) const { 5769 unsigned NumElts = Ty->getNumElements(); 5770 APInt DemandedElts = APInt::getZero(NumElts); 5771 for (unsigned I = 0; I < NumElts; ++I) 5772 if (!ShuffledIndices.count(I)) 5773 DemandedElts.setBit(I); 5774 InstructionCost Cost = 5775 TTI->getScalarizationOverhead(Ty, DemandedElts, /*Insert*/ true, 5776 /*Extract*/ false); 5777 if (NeedToShuffle) 5778 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty); 5779 return Cost; 5780 } 5781 5782 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const { 5783 // Find the type of the operands in VL. 5784 Type *ScalarTy = VL[0]->getType(); 5785 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 5786 ScalarTy = SI->getValueOperand()->getType(); 5787 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 5788 bool DuplicateNonConst = false; 5789 // Find the cost of inserting/extracting values from the vector. 5790 // Check if the same elements are inserted several times and count them as 5791 // shuffle candidates. 5792 DenseSet<unsigned> ShuffledElements; 5793 DenseSet<Value *> UniqueElements; 5794 // Iterate in reverse order to consider insert elements with the high cost. 5795 for (unsigned I = VL.size(); I > 0; --I) { 5796 unsigned Idx = I - 1; 5797 // No need to shuffle duplicates for constants. 5798 if (isConstant(VL[Idx])) { 5799 ShuffledElements.insert(Idx); 5800 continue; 5801 } 5802 if (!UniqueElements.insert(VL[Idx]).second) { 5803 DuplicateNonConst = true; 5804 ShuffledElements.insert(Idx); 5805 } 5806 } 5807 return getGatherCost(VecTy, ShuffledElements, DuplicateNonConst); 5808 } 5809 5810 // Perform operand reordering on the instructions in VL and return the reordered 5811 // operands in Left and Right. 5812 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 5813 SmallVectorImpl<Value *> &Left, 5814 SmallVectorImpl<Value *> &Right, 5815 const DataLayout &DL, 5816 ScalarEvolution &SE, 5817 const BoUpSLP &R) { 5818 if (VL.empty()) 5819 return; 5820 VLOperands Ops(VL, DL, SE, R); 5821 // Reorder the operands in place. 5822 Ops.reorder(); 5823 Left = Ops.getVL(0); 5824 Right = Ops.getVL(1); 5825 } 5826 5827 void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) { 5828 // Get the basic block this bundle is in. All instructions in the bundle 5829 // should be in this block. 5830 auto *Front = E->getMainOp(); 5831 auto *BB = Front->getParent(); 5832 assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool { 5833 auto *I = cast<Instruction>(V); 5834 return !E->isOpcodeOrAlt(I) || I->getParent() == BB; 5835 })); 5836 5837 // The last instruction in the bundle in program order. 5838 Instruction *LastInst = nullptr; 5839 5840 // Find the last instruction. The common case should be that BB has been 5841 // scheduled, and the last instruction is VL.back(). So we start with 5842 // VL.back() and iterate over schedule data until we reach the end of the 5843 // bundle. The end of the bundle is marked by null ScheduleData. 5844 if (BlocksSchedules.count(BB)) { 5845 auto *Bundle = 5846 BlocksSchedules[BB]->getScheduleData(E->isOneOf(E->Scalars.back())); 5847 if (Bundle && Bundle->isPartOfBundle()) 5848 for (; Bundle; Bundle = Bundle->NextInBundle) 5849 if (Bundle->OpValue == Bundle->Inst) 5850 LastInst = Bundle->Inst; 5851 } 5852 5853 // LastInst can still be null at this point if there's either not an entry 5854 // for BB in BlocksSchedules or there's no ScheduleData available for 5855 // VL.back(). This can be the case if buildTree_rec aborts for various 5856 // reasons (e.g., the maximum recursion depth is reached, the maximum region 5857 // size is reached, etc.). ScheduleData is initialized in the scheduling 5858 // "dry-run". 5859 // 5860 // If this happens, we can still find the last instruction by brute force. We 5861 // iterate forwards from Front (inclusive) until we either see all 5862 // instructions in the bundle or reach the end of the block. If Front is the 5863 // last instruction in program order, LastInst will be set to Front, and we 5864 // will visit all the remaining instructions in the block. 5865 // 5866 // One of the reasons we exit early from buildTree_rec is to place an upper 5867 // bound on compile-time. Thus, taking an additional compile-time hit here is 5868 // not ideal. However, this should be exceedingly rare since it requires that 5869 // we both exit early from buildTree_rec and that the bundle be out-of-order 5870 // (causing us to iterate all the way to the end of the block). 5871 if (!LastInst) { 5872 SmallPtrSet<Value *, 16> Bundle(E->Scalars.begin(), E->Scalars.end()); 5873 for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) { 5874 if (Bundle.erase(&I) && E->isOpcodeOrAlt(&I)) 5875 LastInst = &I; 5876 if (Bundle.empty()) 5877 break; 5878 } 5879 } 5880 assert(LastInst && "Failed to find last instruction in bundle"); 5881 5882 // Set the insertion point after the last instruction in the bundle. Set the 5883 // debug location to Front. 5884 Builder.SetInsertPoint(BB, ++LastInst->getIterator()); 5885 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 5886 } 5887 5888 Value *BoUpSLP::gather(ArrayRef<Value *> VL) { 5889 // List of instructions/lanes from current block and/or the blocks which are 5890 // part of the current loop. These instructions will be inserted at the end to 5891 // make it possible to optimize loops and hoist invariant instructions out of 5892 // the loops body with better chances for success. 5893 SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts; 5894 SmallSet<int, 4> PostponedIndices; 5895 Loop *L = LI->getLoopFor(Builder.GetInsertBlock()); 5896 auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) { 5897 SmallPtrSet<BasicBlock *, 4> Visited; 5898 while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second) 5899 InsertBB = InsertBB->getSinglePredecessor(); 5900 return InsertBB && InsertBB == InstBB; 5901 }; 5902 for (int I = 0, E = VL.size(); I < E; ++I) { 5903 if (auto *Inst = dyn_cast<Instruction>(VL[I])) 5904 if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) || 5905 getTreeEntry(Inst) || (L && (L->contains(Inst)))) && 5906 PostponedIndices.insert(I).second) 5907 PostponedInsts.emplace_back(Inst, I); 5908 } 5909 5910 auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos) { 5911 Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(Pos)); 5912 auto *InsElt = dyn_cast<InsertElementInst>(Vec); 5913 if (!InsElt) 5914 return Vec; 5915 GatherShuffleSeq.insert(InsElt); 5916 CSEBlocks.insert(InsElt->getParent()); 5917 // Add to our 'need-to-extract' list. 5918 if (TreeEntry *Entry = getTreeEntry(V)) { 5919 // Find which lane we need to extract. 5920 unsigned FoundLane = Entry->findLaneForValue(V); 5921 ExternalUses.emplace_back(V, InsElt, FoundLane); 5922 } 5923 return Vec; 5924 }; 5925 Value *Val0 = 5926 isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0]; 5927 FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size()); 5928 Value *Vec = PoisonValue::get(VecTy); 5929 SmallVector<int> NonConsts; 5930 // Insert constant values at first. 5931 for (int I = 0, E = VL.size(); I < E; ++I) { 5932 if (PostponedIndices.contains(I)) 5933 continue; 5934 if (!isConstant(VL[I])) { 5935 NonConsts.push_back(I); 5936 continue; 5937 } 5938 Vec = CreateInsertElement(Vec, VL[I], I); 5939 } 5940 // Insert non-constant values. 5941 for (int I : NonConsts) 5942 Vec = CreateInsertElement(Vec, VL[I], I); 5943 // Append instructions, which are/may be part of the loop, in the end to make 5944 // it possible to hoist non-loop-based instructions. 5945 for (const std::pair<Value *, unsigned> &Pair : PostponedInsts) 5946 Vec = CreateInsertElement(Vec, Pair.first, Pair.second); 5947 5948 return Vec; 5949 } 5950 5951 namespace { 5952 /// Merges shuffle masks and emits final shuffle instruction, if required. 5953 class ShuffleInstructionBuilder { 5954 IRBuilderBase &Builder; 5955 const unsigned VF = 0; 5956 bool IsFinalized = false; 5957 SmallVector<int, 4> Mask; 5958 /// Holds all of the instructions that we gathered. 5959 SetVector<Instruction *> &GatherShuffleSeq; 5960 /// A list of blocks that we are going to CSE. 5961 SetVector<BasicBlock *> &CSEBlocks; 5962 5963 public: 5964 ShuffleInstructionBuilder(IRBuilderBase &Builder, unsigned VF, 5965 SetVector<Instruction *> &GatherShuffleSeq, 5966 SetVector<BasicBlock *> &CSEBlocks) 5967 : Builder(Builder), VF(VF), GatherShuffleSeq(GatherShuffleSeq), 5968 CSEBlocks(CSEBlocks) {} 5969 5970 /// Adds a mask, inverting it before applying. 5971 void addInversedMask(ArrayRef<unsigned> SubMask) { 5972 if (SubMask.empty()) 5973 return; 5974 SmallVector<int, 4> NewMask; 5975 inversePermutation(SubMask, NewMask); 5976 addMask(NewMask); 5977 } 5978 5979 /// Functions adds masks, merging them into single one. 5980 void addMask(ArrayRef<unsigned> SubMask) { 5981 SmallVector<int, 4> NewMask(SubMask.begin(), SubMask.end()); 5982 addMask(NewMask); 5983 } 5984 5985 void addMask(ArrayRef<int> SubMask) { ::addMask(Mask, SubMask); } 5986 5987 Value *finalize(Value *V) { 5988 IsFinalized = true; 5989 unsigned ValueVF = cast<FixedVectorType>(V->getType())->getNumElements(); 5990 if (VF == ValueVF && Mask.empty()) 5991 return V; 5992 SmallVector<int, 4> NormalizedMask(VF, UndefMaskElem); 5993 std::iota(NormalizedMask.begin(), NormalizedMask.end(), 0); 5994 addMask(NormalizedMask); 5995 5996 if (VF == ValueVF && ShuffleVectorInst::isIdentityMask(Mask)) 5997 return V; 5998 Value *Vec = Builder.CreateShuffleVector(V, Mask, "shuffle"); 5999 if (auto *I = dyn_cast<Instruction>(Vec)) { 6000 GatherShuffleSeq.insert(I); 6001 CSEBlocks.insert(I->getParent()); 6002 } 6003 return Vec; 6004 } 6005 6006 ~ShuffleInstructionBuilder() { 6007 assert((IsFinalized || Mask.empty()) && 6008 "Shuffle construction must be finalized."); 6009 } 6010 }; 6011 } // namespace 6012 6013 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 6014 unsigned VF = VL.size(); 6015 InstructionsState S = getSameOpcode(VL); 6016 if (S.getOpcode()) { 6017 if (TreeEntry *E = getTreeEntry(S.OpValue)) 6018 if (E->isSame(VL)) { 6019 Value *V = vectorizeTree(E); 6020 if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) { 6021 if (!E->ReuseShuffleIndices.empty()) { 6022 // Reshuffle to get only unique values. 6023 // If some of the scalars are duplicated in the vectorization tree 6024 // entry, we do not vectorize them but instead generate a mask for 6025 // the reuses. But if there are several users of the same entry, 6026 // they may have different vectorization factors. This is especially 6027 // important for PHI nodes. In this case, we need to adapt the 6028 // resulting instruction for the user vectorization factor and have 6029 // to reshuffle it again to take only unique elements of the vector. 6030 // Without this code the function incorrectly returns reduced vector 6031 // instruction with the same elements, not with the unique ones. 6032 6033 // block: 6034 // %phi = phi <2 x > { .., %entry} {%shuffle, %block} 6035 // %2 = shuffle <2 x > %phi, poison, <4 x > <1, 1, 0, 0> 6036 // ... (use %2) 6037 // %shuffle = shuffle <2 x> %2, poison, <2 x> {2, 0} 6038 // br %block 6039 SmallVector<int> UniqueIdxs(VF, UndefMaskElem); 6040 SmallSet<int, 4> UsedIdxs; 6041 int Pos = 0; 6042 int Sz = VL.size(); 6043 for (int Idx : E->ReuseShuffleIndices) { 6044 if (Idx != Sz && Idx != UndefMaskElem && 6045 UsedIdxs.insert(Idx).second) 6046 UniqueIdxs[Idx] = Pos; 6047 ++Pos; 6048 } 6049 assert(VF >= UsedIdxs.size() && "Expected vectorization factor " 6050 "less than original vector size."); 6051 UniqueIdxs.append(VF - UsedIdxs.size(), UndefMaskElem); 6052 V = Builder.CreateShuffleVector(V, UniqueIdxs, "shrink.shuffle"); 6053 } else { 6054 assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() && 6055 "Expected vectorization factor less " 6056 "than original vector size."); 6057 SmallVector<int> UniformMask(VF, 0); 6058 std::iota(UniformMask.begin(), UniformMask.end(), 0); 6059 V = Builder.CreateShuffleVector(V, UniformMask, "shrink.shuffle"); 6060 } 6061 if (auto *I = dyn_cast<Instruction>(V)) { 6062 GatherShuffleSeq.insert(I); 6063 CSEBlocks.insert(I->getParent()); 6064 } 6065 } 6066 return V; 6067 } 6068 } 6069 6070 // Check that every instruction appears once in this bundle. 6071 SmallVector<int> ReuseShuffleIndicies; 6072 SmallVector<Value *> UniqueValues; 6073 if (VL.size() > 2) { 6074 DenseMap<Value *, unsigned> UniquePositions; 6075 unsigned NumValues = 6076 std::distance(VL.begin(), find_if(reverse(VL), [](Value *V) { 6077 return !isa<UndefValue>(V); 6078 }).base()); 6079 VF = std::max<unsigned>(VF, PowerOf2Ceil(NumValues)); 6080 int UniqueVals = 0; 6081 for (Value *V : VL.drop_back(VL.size() - VF)) { 6082 if (isa<UndefValue>(V)) { 6083 ReuseShuffleIndicies.emplace_back(UndefMaskElem); 6084 continue; 6085 } 6086 if (isConstant(V)) { 6087 ReuseShuffleIndicies.emplace_back(UniqueValues.size()); 6088 UniqueValues.emplace_back(V); 6089 continue; 6090 } 6091 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 6092 ReuseShuffleIndicies.emplace_back(Res.first->second); 6093 if (Res.second) { 6094 UniqueValues.emplace_back(V); 6095 ++UniqueVals; 6096 } 6097 } 6098 if (UniqueVals == 1 && UniqueValues.size() == 1) { 6099 // Emit pure splat vector. 6100 ReuseShuffleIndicies.append(VF - ReuseShuffleIndicies.size(), 6101 UndefMaskElem); 6102 } else if (UniqueValues.size() >= VF - 1 || UniqueValues.size() <= 1) { 6103 ReuseShuffleIndicies.clear(); 6104 UniqueValues.clear(); 6105 UniqueValues.append(VL.begin(), std::next(VL.begin(), NumValues)); 6106 } 6107 UniqueValues.append(VF - UniqueValues.size(), 6108 PoisonValue::get(VL[0]->getType())); 6109 VL = UniqueValues; 6110 } 6111 6112 ShuffleInstructionBuilder ShuffleBuilder(Builder, VF, GatherShuffleSeq, 6113 CSEBlocks); 6114 Value *Vec = gather(VL); 6115 if (!ReuseShuffleIndicies.empty()) { 6116 ShuffleBuilder.addMask(ReuseShuffleIndicies); 6117 Vec = ShuffleBuilder.finalize(Vec); 6118 } 6119 return Vec; 6120 } 6121 6122 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 6123 IRBuilder<>::InsertPointGuard Guard(Builder); 6124 6125 if (E->VectorizedValue) { 6126 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 6127 return E->VectorizedValue; 6128 } 6129 6130 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 6131 unsigned VF = E->getVectorFactor(); 6132 ShuffleInstructionBuilder ShuffleBuilder(Builder, VF, GatherShuffleSeq, 6133 CSEBlocks); 6134 if (E->State == TreeEntry::NeedToGather) { 6135 if (E->getMainOp()) 6136 setInsertPointAfterBundle(E); 6137 Value *Vec; 6138 SmallVector<int> Mask; 6139 SmallVector<const TreeEntry *> Entries; 6140 Optional<TargetTransformInfo::ShuffleKind> Shuffle = 6141 isGatherShuffledEntry(E, Mask, Entries); 6142 if (Shuffle.hasValue()) { 6143 assert((Entries.size() == 1 || Entries.size() == 2) && 6144 "Expected shuffle of 1 or 2 entries."); 6145 Vec = Builder.CreateShuffleVector(Entries.front()->VectorizedValue, 6146 Entries.back()->VectorizedValue, Mask); 6147 if (auto *I = dyn_cast<Instruction>(Vec)) { 6148 GatherShuffleSeq.insert(I); 6149 CSEBlocks.insert(I->getParent()); 6150 } 6151 } else { 6152 Vec = gather(E->Scalars); 6153 } 6154 if (NeedToShuffleReuses) { 6155 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6156 Vec = ShuffleBuilder.finalize(Vec); 6157 } 6158 E->VectorizedValue = Vec; 6159 return Vec; 6160 } 6161 6162 assert((E->State == TreeEntry::Vectorize || 6163 E->State == TreeEntry::ScatterVectorize) && 6164 "Unhandled state"); 6165 unsigned ShuffleOrOp = 6166 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 6167 Instruction *VL0 = E->getMainOp(); 6168 Type *ScalarTy = VL0->getType(); 6169 if (auto *Store = dyn_cast<StoreInst>(VL0)) 6170 ScalarTy = Store->getValueOperand()->getType(); 6171 else if (auto *IE = dyn_cast<InsertElementInst>(VL0)) 6172 ScalarTy = IE->getOperand(1)->getType(); 6173 auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size()); 6174 switch (ShuffleOrOp) { 6175 case Instruction::PHI: { 6176 assert( 6177 (E->ReorderIndices.empty() || E != VectorizableTree.front().get()) && 6178 "PHI reordering is free."); 6179 auto *PH = cast<PHINode>(VL0); 6180 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 6181 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 6182 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 6183 Value *V = NewPhi; 6184 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6185 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6186 V = ShuffleBuilder.finalize(V); 6187 6188 E->VectorizedValue = V; 6189 6190 // PHINodes may have multiple entries from the same block. We want to 6191 // visit every block once. 6192 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 6193 6194 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 6195 ValueList Operands; 6196 BasicBlock *IBB = PH->getIncomingBlock(i); 6197 6198 if (!VisitedBBs.insert(IBB).second) { 6199 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 6200 continue; 6201 } 6202 6203 Builder.SetInsertPoint(IBB->getTerminator()); 6204 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 6205 Value *Vec = vectorizeTree(E->getOperand(i)); 6206 NewPhi->addIncoming(Vec, IBB); 6207 } 6208 6209 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 6210 "Invalid number of incoming values"); 6211 return V; 6212 } 6213 6214 case Instruction::ExtractElement: { 6215 Value *V = E->getSingleOperand(0); 6216 Builder.SetInsertPoint(VL0); 6217 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6218 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6219 V = ShuffleBuilder.finalize(V); 6220 E->VectorizedValue = V; 6221 return V; 6222 } 6223 case Instruction::ExtractValue: { 6224 auto *LI = cast<LoadInst>(E->getSingleOperand(0)); 6225 Builder.SetInsertPoint(LI); 6226 auto *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); 6227 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); 6228 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign()); 6229 Value *NewV = propagateMetadata(V, E->Scalars); 6230 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6231 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6232 NewV = ShuffleBuilder.finalize(NewV); 6233 E->VectorizedValue = NewV; 6234 return NewV; 6235 } 6236 case Instruction::InsertElement: { 6237 assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique"); 6238 Builder.SetInsertPoint(cast<Instruction>(E->Scalars.back())); 6239 Value *V = vectorizeTree(E->getOperand(1)); 6240 6241 // Create InsertVector shuffle if necessary 6242 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 6243 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0)); 6244 })); 6245 const unsigned NumElts = 6246 cast<FixedVectorType>(FirstInsert->getType())->getNumElements(); 6247 const unsigned NumScalars = E->Scalars.size(); 6248 6249 unsigned Offset = *getInsertIndex(VL0, 0); 6250 assert(Offset < NumElts && "Failed to find vector index offset"); 6251 6252 // Create shuffle to resize vector 6253 SmallVector<int> Mask; 6254 if (!E->ReorderIndices.empty()) { 6255 inversePermutation(E->ReorderIndices, Mask); 6256 Mask.append(NumElts - NumScalars, UndefMaskElem); 6257 } else { 6258 Mask.assign(NumElts, UndefMaskElem); 6259 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); 6260 } 6261 // Create InsertVector shuffle if necessary 6262 bool IsIdentity = true; 6263 SmallVector<int> PrevMask(NumElts, UndefMaskElem); 6264 Mask.swap(PrevMask); 6265 for (unsigned I = 0; I < NumScalars; ++I) { 6266 Value *Scalar = E->Scalars[PrevMask[I]]; 6267 Optional<int> InsertIdx = getInsertIndex(Scalar, 0); 6268 if (!InsertIdx || *InsertIdx == UndefMaskElem) 6269 continue; 6270 IsIdentity &= *InsertIdx - Offset == I; 6271 Mask[*InsertIdx - Offset] = I; 6272 } 6273 if (!IsIdentity || NumElts != NumScalars) { 6274 V = Builder.CreateShuffleVector(V, Mask); 6275 if (auto *I = dyn_cast<Instruction>(V)) { 6276 GatherShuffleSeq.insert(I); 6277 CSEBlocks.insert(I->getParent()); 6278 } 6279 } 6280 6281 if ((!IsIdentity || Offset != 0 || 6282 !isUndefVector(FirstInsert->getOperand(0))) && 6283 NumElts != NumScalars) { 6284 SmallVector<int> InsertMask(NumElts); 6285 std::iota(InsertMask.begin(), InsertMask.end(), 0); 6286 for (unsigned I = 0; I < NumElts; I++) { 6287 if (Mask[I] != UndefMaskElem) 6288 InsertMask[Offset + I] = NumElts + I; 6289 } 6290 6291 V = Builder.CreateShuffleVector( 6292 FirstInsert->getOperand(0), V, InsertMask, 6293 cast<Instruction>(E->Scalars.back())->getName()); 6294 if (auto *I = dyn_cast<Instruction>(V)) { 6295 GatherShuffleSeq.insert(I); 6296 CSEBlocks.insert(I->getParent()); 6297 } 6298 } 6299 6300 ++NumVectorInstructions; 6301 E->VectorizedValue = V; 6302 return V; 6303 } 6304 case Instruction::ZExt: 6305 case Instruction::SExt: 6306 case Instruction::FPToUI: 6307 case Instruction::FPToSI: 6308 case Instruction::FPExt: 6309 case Instruction::PtrToInt: 6310 case Instruction::IntToPtr: 6311 case Instruction::SIToFP: 6312 case Instruction::UIToFP: 6313 case Instruction::Trunc: 6314 case Instruction::FPTrunc: 6315 case Instruction::BitCast: { 6316 setInsertPointAfterBundle(E); 6317 6318 Value *InVec = vectorizeTree(E->getOperand(0)); 6319 6320 if (E->VectorizedValue) { 6321 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6322 return E->VectorizedValue; 6323 } 6324 6325 auto *CI = cast<CastInst>(VL0); 6326 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 6327 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6328 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6329 V = ShuffleBuilder.finalize(V); 6330 6331 E->VectorizedValue = V; 6332 ++NumVectorInstructions; 6333 return V; 6334 } 6335 case Instruction::FCmp: 6336 case Instruction::ICmp: { 6337 setInsertPointAfterBundle(E); 6338 6339 Value *L = vectorizeTree(E->getOperand(0)); 6340 Value *R = vectorizeTree(E->getOperand(1)); 6341 6342 if (E->VectorizedValue) { 6343 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6344 return E->VectorizedValue; 6345 } 6346 6347 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 6348 Value *V = Builder.CreateCmp(P0, L, R); 6349 propagateIRFlags(V, E->Scalars, VL0); 6350 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6351 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6352 V = ShuffleBuilder.finalize(V); 6353 6354 E->VectorizedValue = V; 6355 ++NumVectorInstructions; 6356 return V; 6357 } 6358 case Instruction::Select: { 6359 setInsertPointAfterBundle(E); 6360 6361 Value *Cond = vectorizeTree(E->getOperand(0)); 6362 Value *True = vectorizeTree(E->getOperand(1)); 6363 Value *False = vectorizeTree(E->getOperand(2)); 6364 6365 if (E->VectorizedValue) { 6366 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6367 return E->VectorizedValue; 6368 } 6369 6370 Value *V = Builder.CreateSelect(Cond, True, False); 6371 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6372 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6373 V = ShuffleBuilder.finalize(V); 6374 6375 E->VectorizedValue = V; 6376 ++NumVectorInstructions; 6377 return V; 6378 } 6379 case Instruction::FNeg: { 6380 setInsertPointAfterBundle(E); 6381 6382 Value *Op = vectorizeTree(E->getOperand(0)); 6383 6384 if (E->VectorizedValue) { 6385 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6386 return E->VectorizedValue; 6387 } 6388 6389 Value *V = Builder.CreateUnOp( 6390 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op); 6391 propagateIRFlags(V, E->Scalars, VL0); 6392 if (auto *I = dyn_cast<Instruction>(V)) 6393 V = propagateMetadata(I, E->Scalars); 6394 6395 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6396 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6397 V = ShuffleBuilder.finalize(V); 6398 6399 E->VectorizedValue = V; 6400 ++NumVectorInstructions; 6401 6402 return V; 6403 } 6404 case Instruction::Add: 6405 case Instruction::FAdd: 6406 case Instruction::Sub: 6407 case Instruction::FSub: 6408 case Instruction::Mul: 6409 case Instruction::FMul: 6410 case Instruction::UDiv: 6411 case Instruction::SDiv: 6412 case Instruction::FDiv: 6413 case Instruction::URem: 6414 case Instruction::SRem: 6415 case Instruction::FRem: 6416 case Instruction::Shl: 6417 case Instruction::LShr: 6418 case Instruction::AShr: 6419 case Instruction::And: 6420 case Instruction::Or: 6421 case Instruction::Xor: { 6422 setInsertPointAfterBundle(E); 6423 6424 Value *LHS = vectorizeTree(E->getOperand(0)); 6425 Value *RHS = vectorizeTree(E->getOperand(1)); 6426 6427 if (E->VectorizedValue) { 6428 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6429 return E->VectorizedValue; 6430 } 6431 6432 Value *V = Builder.CreateBinOp( 6433 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, 6434 RHS); 6435 propagateIRFlags(V, E->Scalars, VL0); 6436 if (auto *I = dyn_cast<Instruction>(V)) 6437 V = propagateMetadata(I, E->Scalars); 6438 6439 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6440 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6441 V = ShuffleBuilder.finalize(V); 6442 6443 E->VectorizedValue = V; 6444 ++NumVectorInstructions; 6445 6446 return V; 6447 } 6448 case Instruction::Load: { 6449 // Loads are inserted at the head of the tree because we don't want to 6450 // sink them all the way down past store instructions. 6451 setInsertPointAfterBundle(E); 6452 6453 LoadInst *LI = cast<LoadInst>(VL0); 6454 Instruction *NewLI; 6455 unsigned AS = LI->getPointerAddressSpace(); 6456 Value *PO = LI->getPointerOperand(); 6457 if (E->State == TreeEntry::Vectorize) { 6458 6459 Value *VecPtr = Builder.CreateBitCast(PO, VecTy->getPointerTo(AS)); 6460 6461 // The pointer operand uses an in-tree scalar so we add the new BitCast 6462 // to ExternalUses list to make sure that an extract will be generated 6463 // in the future. 6464 if (TreeEntry *Entry = getTreeEntry(PO)) { 6465 // Find which lane we need to extract. 6466 unsigned FoundLane = Entry->findLaneForValue(PO); 6467 ExternalUses.emplace_back(PO, cast<User>(VecPtr), FoundLane); 6468 } 6469 6470 NewLI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign()); 6471 } else { 6472 assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state"); 6473 Value *VecPtr = vectorizeTree(E->getOperand(0)); 6474 // Use the minimum alignment of the gathered loads. 6475 Align CommonAlignment = LI->getAlign(); 6476 for (Value *V : E->Scalars) 6477 CommonAlignment = 6478 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 6479 NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment); 6480 } 6481 Value *V = propagateMetadata(NewLI, E->Scalars); 6482 6483 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6484 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6485 V = ShuffleBuilder.finalize(V); 6486 E->VectorizedValue = V; 6487 ++NumVectorInstructions; 6488 return V; 6489 } 6490 case Instruction::Store: { 6491 auto *SI = cast<StoreInst>(VL0); 6492 unsigned AS = SI->getPointerAddressSpace(); 6493 6494 setInsertPointAfterBundle(E); 6495 6496 Value *VecValue = vectorizeTree(E->getOperand(0)); 6497 ShuffleBuilder.addMask(E->ReorderIndices); 6498 VecValue = ShuffleBuilder.finalize(VecValue); 6499 6500 Value *ScalarPtr = SI->getPointerOperand(); 6501 Value *VecPtr = Builder.CreateBitCast( 6502 ScalarPtr, VecValue->getType()->getPointerTo(AS)); 6503 StoreInst *ST = Builder.CreateAlignedStore(VecValue, VecPtr, 6504 SI->getAlign()); 6505 6506 // The pointer operand uses an in-tree scalar, so add the new BitCast to 6507 // ExternalUses to make sure that an extract will be generated in the 6508 // future. 6509 if (TreeEntry *Entry = getTreeEntry(ScalarPtr)) { 6510 // Find which lane we need to extract. 6511 unsigned FoundLane = Entry->findLaneForValue(ScalarPtr); 6512 ExternalUses.push_back( 6513 ExternalUser(ScalarPtr, cast<User>(VecPtr), FoundLane)); 6514 } 6515 6516 Value *V = propagateMetadata(ST, E->Scalars); 6517 6518 E->VectorizedValue = V; 6519 ++NumVectorInstructions; 6520 return V; 6521 } 6522 case Instruction::GetElementPtr: { 6523 auto *GEP0 = cast<GetElementPtrInst>(VL0); 6524 setInsertPointAfterBundle(E); 6525 6526 Value *Op0 = vectorizeTree(E->getOperand(0)); 6527 6528 SmallVector<Value *> OpVecs; 6529 for (int J = 1, N = GEP0->getNumOperands(); J < N; ++J) { 6530 Value *OpVec = vectorizeTree(E->getOperand(J)); 6531 OpVecs.push_back(OpVec); 6532 } 6533 6534 Value *V = Builder.CreateGEP(GEP0->getSourceElementType(), Op0, OpVecs); 6535 if (Instruction *I = dyn_cast<Instruction>(V)) 6536 V = propagateMetadata(I, E->Scalars); 6537 6538 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6539 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6540 V = ShuffleBuilder.finalize(V); 6541 6542 E->VectorizedValue = V; 6543 ++NumVectorInstructions; 6544 6545 return V; 6546 } 6547 case Instruction::Call: { 6548 CallInst *CI = cast<CallInst>(VL0); 6549 setInsertPointAfterBundle(E); 6550 6551 Intrinsic::ID IID = Intrinsic::not_intrinsic; 6552 if (Function *FI = CI->getCalledFunction()) 6553 IID = FI->getIntrinsicID(); 6554 6555 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6556 6557 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 6558 bool UseIntrinsic = ID != Intrinsic::not_intrinsic && 6559 VecCallCosts.first <= VecCallCosts.second; 6560 6561 Value *ScalarArg = nullptr; 6562 std::vector<Value *> OpVecs; 6563 SmallVector<Type *, 2> TysForDecl = 6564 {FixedVectorType::get(CI->getType(), E->Scalars.size())}; 6565 for (int j = 0, e = CI->arg_size(); j < e; ++j) { 6566 ValueList OpVL; 6567 // Some intrinsics have scalar arguments. This argument should not be 6568 // vectorized. 6569 if (UseIntrinsic && hasVectorInstrinsicScalarOpd(IID, j)) { 6570 CallInst *CEI = cast<CallInst>(VL0); 6571 ScalarArg = CEI->getArgOperand(j); 6572 OpVecs.push_back(CEI->getArgOperand(j)); 6573 if (hasVectorInstrinsicOverloadedScalarOpd(IID, j)) 6574 TysForDecl.push_back(ScalarArg->getType()); 6575 continue; 6576 } 6577 6578 Value *OpVec = vectorizeTree(E->getOperand(j)); 6579 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 6580 OpVecs.push_back(OpVec); 6581 } 6582 6583 Function *CF; 6584 if (!UseIntrinsic) { 6585 VFShape Shape = 6586 VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>( 6587 VecTy->getNumElements())), 6588 false /*HasGlobalPred*/); 6589 CF = VFDatabase(*CI).getVectorizedFunction(Shape); 6590 } else { 6591 CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl); 6592 } 6593 6594 SmallVector<OperandBundleDef, 1> OpBundles; 6595 CI->getOperandBundlesAsDefs(OpBundles); 6596 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 6597 6598 // The scalar argument uses an in-tree scalar so we add the new vectorized 6599 // call to ExternalUses list to make sure that an extract will be 6600 // generated in the future. 6601 if (ScalarArg) { 6602 if (TreeEntry *Entry = getTreeEntry(ScalarArg)) { 6603 // Find which lane we need to extract. 6604 unsigned FoundLane = Entry->findLaneForValue(ScalarArg); 6605 ExternalUses.push_back( 6606 ExternalUser(ScalarArg, cast<User>(V), FoundLane)); 6607 } 6608 } 6609 6610 propagateIRFlags(V, E->Scalars, VL0); 6611 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6612 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6613 V = ShuffleBuilder.finalize(V); 6614 6615 E->VectorizedValue = V; 6616 ++NumVectorInstructions; 6617 return V; 6618 } 6619 case Instruction::ShuffleVector: { 6620 assert(E->isAltShuffle() && 6621 ((Instruction::isBinaryOp(E->getOpcode()) && 6622 Instruction::isBinaryOp(E->getAltOpcode())) || 6623 (Instruction::isCast(E->getOpcode()) && 6624 Instruction::isCast(E->getAltOpcode()))) && 6625 "Invalid Shuffle Vector Operand"); 6626 6627 Value *LHS = nullptr, *RHS = nullptr; 6628 if (Instruction::isBinaryOp(E->getOpcode())) { 6629 setInsertPointAfterBundle(E); 6630 LHS = vectorizeTree(E->getOperand(0)); 6631 RHS = vectorizeTree(E->getOperand(1)); 6632 } else { 6633 setInsertPointAfterBundle(E); 6634 LHS = vectorizeTree(E->getOperand(0)); 6635 } 6636 6637 if (E->VectorizedValue) { 6638 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6639 return E->VectorizedValue; 6640 } 6641 6642 Value *V0, *V1; 6643 if (Instruction::isBinaryOp(E->getOpcode())) { 6644 V0 = Builder.CreateBinOp( 6645 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS); 6646 V1 = Builder.CreateBinOp( 6647 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS); 6648 } else { 6649 V0 = Builder.CreateCast( 6650 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy); 6651 V1 = Builder.CreateCast( 6652 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy); 6653 } 6654 // Add V0 and V1 to later analysis to try to find and remove matching 6655 // instruction, if any. 6656 for (Value *V : {V0, V1}) { 6657 if (auto *I = dyn_cast<Instruction>(V)) { 6658 GatherShuffleSeq.insert(I); 6659 CSEBlocks.insert(I->getParent()); 6660 } 6661 } 6662 6663 // Create shuffle to take alternate operations from the vector. 6664 // Also, gather up main and alt scalar ops to propagate IR flags to 6665 // each vector operation. 6666 ValueList OpScalars, AltScalars; 6667 SmallVector<int> Mask; 6668 buildSuffleEntryMask( 6669 E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices, 6670 [E](Instruction *I) { 6671 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 6672 return I->getOpcode() == E->getAltOpcode(); 6673 }, 6674 Mask, &OpScalars, &AltScalars); 6675 6676 propagateIRFlags(V0, OpScalars); 6677 propagateIRFlags(V1, AltScalars); 6678 6679 Value *V = Builder.CreateShuffleVector(V0, V1, Mask); 6680 if (auto *I = dyn_cast<Instruction>(V)) { 6681 V = propagateMetadata(I, E->Scalars); 6682 GatherShuffleSeq.insert(I); 6683 CSEBlocks.insert(I->getParent()); 6684 } 6685 V = ShuffleBuilder.finalize(V); 6686 6687 E->VectorizedValue = V; 6688 ++NumVectorInstructions; 6689 6690 return V; 6691 } 6692 default: 6693 llvm_unreachable("unknown inst"); 6694 } 6695 return nullptr; 6696 } 6697 6698 Value *BoUpSLP::vectorizeTree() { 6699 ExtraValueToDebugLocsMap ExternallyUsedValues; 6700 return vectorizeTree(ExternallyUsedValues); 6701 } 6702 6703 Value * 6704 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { 6705 // All blocks must be scheduled before any instructions are inserted. 6706 for (auto &BSIter : BlocksSchedules) { 6707 scheduleBlock(BSIter.second.get()); 6708 } 6709 6710 Builder.SetInsertPoint(&F->getEntryBlock().front()); 6711 auto *VectorRoot = vectorizeTree(VectorizableTree[0].get()); 6712 6713 // If the vectorized tree can be rewritten in a smaller type, we truncate the 6714 // vectorized root. InstCombine will then rewrite the entire expression. We 6715 // sign extend the extracted values below. 6716 auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; 6717 if (MinBWs.count(ScalarRoot)) { 6718 if (auto *I = dyn_cast<Instruction>(VectorRoot)) { 6719 // If current instr is a phi and not the last phi, insert it after the 6720 // last phi node. 6721 if (isa<PHINode>(I)) 6722 Builder.SetInsertPoint(&*I->getParent()->getFirstInsertionPt()); 6723 else 6724 Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); 6725 } 6726 auto BundleWidth = VectorizableTree[0]->Scalars.size(); 6727 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 6728 auto *VecTy = FixedVectorType::get(MinTy, BundleWidth); 6729 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); 6730 VectorizableTree[0]->VectorizedValue = Trunc; 6731 } 6732 6733 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() 6734 << " values .\n"); 6735 6736 // Extract all of the elements with the external uses. 6737 for (const auto &ExternalUse : ExternalUses) { 6738 Value *Scalar = ExternalUse.Scalar; 6739 llvm::User *User = ExternalUse.User; 6740 6741 // Skip users that we already RAUW. This happens when one instruction 6742 // has multiple uses of the same value. 6743 if (User && !is_contained(Scalar->users(), User)) 6744 continue; 6745 TreeEntry *E = getTreeEntry(Scalar); 6746 assert(E && "Invalid scalar"); 6747 assert(E->State != TreeEntry::NeedToGather && 6748 "Extracting from a gather list"); 6749 6750 Value *Vec = E->VectorizedValue; 6751 assert(Vec && "Can't find vectorizable value"); 6752 6753 Value *Lane = Builder.getInt32(ExternalUse.Lane); 6754 auto ExtractAndExtendIfNeeded = [&](Value *Vec) { 6755 if (Scalar->getType() != Vec->getType()) { 6756 Value *Ex; 6757 // "Reuse" the existing extract to improve final codegen. 6758 if (auto *ES = dyn_cast<ExtractElementInst>(Scalar)) { 6759 Ex = Builder.CreateExtractElement(ES->getOperand(0), 6760 ES->getOperand(1)); 6761 } else { 6762 Ex = Builder.CreateExtractElement(Vec, Lane); 6763 } 6764 // If necessary, sign-extend or zero-extend ScalarRoot 6765 // to the larger type. 6766 if (!MinBWs.count(ScalarRoot)) 6767 return Ex; 6768 if (MinBWs[ScalarRoot].second) 6769 return Builder.CreateSExt(Ex, Scalar->getType()); 6770 return Builder.CreateZExt(Ex, Scalar->getType()); 6771 } 6772 assert(isa<FixedVectorType>(Scalar->getType()) && 6773 isa<InsertElementInst>(Scalar) && 6774 "In-tree scalar of vector type is not insertelement?"); 6775 return Vec; 6776 }; 6777 // If User == nullptr, the Scalar is used as extra arg. Generate 6778 // ExtractElement instruction and update the record for this scalar in 6779 // ExternallyUsedValues. 6780 if (!User) { 6781 assert(ExternallyUsedValues.count(Scalar) && 6782 "Scalar with nullptr as an external user must be registered in " 6783 "ExternallyUsedValues map"); 6784 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 6785 Builder.SetInsertPoint(VecI->getParent(), 6786 std::next(VecI->getIterator())); 6787 } else { 6788 Builder.SetInsertPoint(&F->getEntryBlock().front()); 6789 } 6790 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 6791 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent()); 6792 auto &NewInstLocs = ExternallyUsedValues[NewInst]; 6793 auto It = ExternallyUsedValues.find(Scalar); 6794 assert(It != ExternallyUsedValues.end() && 6795 "Externally used scalar is not found in ExternallyUsedValues"); 6796 NewInstLocs.append(It->second); 6797 ExternallyUsedValues.erase(Scalar); 6798 // Required to update internally referenced instructions. 6799 Scalar->replaceAllUsesWith(NewInst); 6800 continue; 6801 } 6802 6803 // Generate extracts for out-of-tree users. 6804 // Find the insertion point for the extractelement lane. 6805 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 6806 if (PHINode *PH = dyn_cast<PHINode>(User)) { 6807 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 6808 if (PH->getIncomingValue(i) == Scalar) { 6809 Instruction *IncomingTerminator = 6810 PH->getIncomingBlock(i)->getTerminator(); 6811 if (isa<CatchSwitchInst>(IncomingTerminator)) { 6812 Builder.SetInsertPoint(VecI->getParent(), 6813 std::next(VecI->getIterator())); 6814 } else { 6815 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 6816 } 6817 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 6818 CSEBlocks.insert(PH->getIncomingBlock(i)); 6819 PH->setOperand(i, NewInst); 6820 } 6821 } 6822 } else { 6823 Builder.SetInsertPoint(cast<Instruction>(User)); 6824 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 6825 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 6826 User->replaceUsesOfWith(Scalar, NewInst); 6827 } 6828 } else { 6829 Builder.SetInsertPoint(&F->getEntryBlock().front()); 6830 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 6831 CSEBlocks.insert(&F->getEntryBlock()); 6832 User->replaceUsesOfWith(Scalar, NewInst); 6833 } 6834 6835 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 6836 } 6837 6838 // For each vectorized value: 6839 for (auto &TEPtr : VectorizableTree) { 6840 TreeEntry *Entry = TEPtr.get(); 6841 6842 // No need to handle users of gathered values. 6843 if (Entry->State == TreeEntry::NeedToGather) 6844 continue; 6845 6846 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 6847 6848 // For each lane: 6849 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 6850 Value *Scalar = Entry->Scalars[Lane]; 6851 6852 #ifndef NDEBUG 6853 Type *Ty = Scalar->getType(); 6854 if (!Ty->isVoidTy()) { 6855 for (User *U : Scalar->users()) { 6856 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 6857 6858 // It is legal to delete users in the ignorelist. 6859 assert((getTreeEntry(U) || is_contained(UserIgnoreList, U) || 6860 (isa_and_nonnull<Instruction>(U) && 6861 isDeleted(cast<Instruction>(U)))) && 6862 "Deleting out-of-tree value"); 6863 } 6864 } 6865 #endif 6866 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 6867 eraseInstruction(cast<Instruction>(Scalar)); 6868 } 6869 } 6870 6871 Builder.ClearInsertionPoint(); 6872 InstrElementSize.clear(); 6873 6874 return VectorizableTree[0]->VectorizedValue; 6875 } 6876 6877 void BoUpSLP::optimizeGatherSequence() { 6878 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherShuffleSeq.size() 6879 << " gather sequences instructions.\n"); 6880 // LICM InsertElementInst sequences. 6881 for (Instruction *I : GatherShuffleSeq) { 6882 if (isDeleted(I)) 6883 continue; 6884 6885 // Check if this block is inside a loop. 6886 Loop *L = LI->getLoopFor(I->getParent()); 6887 if (!L) 6888 continue; 6889 6890 // Check if it has a preheader. 6891 BasicBlock *PreHeader = L->getLoopPreheader(); 6892 if (!PreHeader) 6893 continue; 6894 6895 // If the vector or the element that we insert into it are 6896 // instructions that are defined in this basic block then we can't 6897 // hoist this instruction. 6898 if (any_of(I->operands(), [L](Value *V) { 6899 auto *OpI = dyn_cast<Instruction>(V); 6900 return OpI && L->contains(OpI); 6901 })) 6902 continue; 6903 6904 // We can hoist this instruction. Move it to the pre-header. 6905 I->moveBefore(PreHeader->getTerminator()); 6906 } 6907 6908 // Make a list of all reachable blocks in our CSE queue. 6909 SmallVector<const DomTreeNode *, 8> CSEWorkList; 6910 CSEWorkList.reserve(CSEBlocks.size()); 6911 for (BasicBlock *BB : CSEBlocks) 6912 if (DomTreeNode *N = DT->getNode(BB)) { 6913 assert(DT->isReachableFromEntry(N)); 6914 CSEWorkList.push_back(N); 6915 } 6916 6917 // Sort blocks by domination. This ensures we visit a block after all blocks 6918 // dominating it are visited. 6919 llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) { 6920 assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) && 6921 "Different nodes should have different DFS numbers"); 6922 return A->getDFSNumIn() < B->getDFSNumIn(); 6923 }); 6924 6925 // Less defined shuffles can be replaced by the more defined copies. 6926 // Between two shuffles one is less defined if it has the same vector operands 6927 // and its mask indeces are the same as in the first one or undefs. E.g. 6928 // shuffle %0, poison, <0, 0, 0, undef> is less defined than shuffle %0, 6929 // poison, <0, 0, 0, 0>. 6930 auto &&IsIdenticalOrLessDefined = [this](Instruction *I1, Instruction *I2, 6931 SmallVectorImpl<int> &NewMask) { 6932 if (I1->getType() != I2->getType()) 6933 return false; 6934 auto *SI1 = dyn_cast<ShuffleVectorInst>(I1); 6935 auto *SI2 = dyn_cast<ShuffleVectorInst>(I2); 6936 if (!SI1 || !SI2) 6937 return I1->isIdenticalTo(I2); 6938 if (SI1->isIdenticalTo(SI2)) 6939 return true; 6940 for (int I = 0, E = SI1->getNumOperands(); I < E; ++I) 6941 if (SI1->getOperand(I) != SI2->getOperand(I)) 6942 return false; 6943 // Check if the second instruction is more defined than the first one. 6944 NewMask.assign(SI2->getShuffleMask().begin(), SI2->getShuffleMask().end()); 6945 ArrayRef<int> SM1 = SI1->getShuffleMask(); 6946 // Count trailing undefs in the mask to check the final number of used 6947 // registers. 6948 unsigned LastUndefsCnt = 0; 6949 for (int I = 0, E = NewMask.size(); I < E; ++I) { 6950 if (SM1[I] == UndefMaskElem) 6951 ++LastUndefsCnt; 6952 else 6953 LastUndefsCnt = 0; 6954 if (NewMask[I] != UndefMaskElem && SM1[I] != UndefMaskElem && 6955 NewMask[I] != SM1[I]) 6956 return false; 6957 if (NewMask[I] == UndefMaskElem) 6958 NewMask[I] = SM1[I]; 6959 } 6960 // Check if the last undefs actually change the final number of used vector 6961 // registers. 6962 return SM1.size() - LastUndefsCnt > 1 && 6963 TTI->getNumberOfParts(SI1->getType()) == 6964 TTI->getNumberOfParts( 6965 FixedVectorType::get(SI1->getType()->getElementType(), 6966 SM1.size() - LastUndefsCnt)); 6967 }; 6968 // Perform O(N^2) search over the gather/shuffle sequences and merge identical 6969 // instructions. TODO: We can further optimize this scan if we split the 6970 // instructions into different buckets based on the insert lane. 6971 SmallVector<Instruction *, 16> Visited; 6972 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 6973 assert(*I && 6974 (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 6975 "Worklist not sorted properly!"); 6976 BasicBlock *BB = (*I)->getBlock(); 6977 // For all instructions in blocks containing gather sequences: 6978 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 6979 if (isDeleted(&In)) 6980 continue; 6981 if (!isa<InsertElementInst>(&In) && !isa<ExtractElementInst>(&In) && 6982 !isa<ShuffleVectorInst>(&In) && !GatherShuffleSeq.contains(&In)) 6983 continue; 6984 6985 // Check if we can replace this instruction with any of the 6986 // visited instructions. 6987 bool Replaced = false; 6988 for (Instruction *&V : Visited) { 6989 SmallVector<int> NewMask; 6990 if (IsIdenticalOrLessDefined(&In, V, NewMask) && 6991 DT->dominates(V->getParent(), In.getParent())) { 6992 In.replaceAllUsesWith(V); 6993 eraseInstruction(&In); 6994 if (auto *SI = dyn_cast<ShuffleVectorInst>(V)) 6995 if (!NewMask.empty()) 6996 SI->setShuffleMask(NewMask); 6997 Replaced = true; 6998 break; 6999 } 7000 if (isa<ShuffleVectorInst>(In) && isa<ShuffleVectorInst>(V) && 7001 GatherShuffleSeq.contains(V) && 7002 IsIdenticalOrLessDefined(V, &In, NewMask) && 7003 DT->dominates(In.getParent(), V->getParent())) { 7004 In.moveAfter(V); 7005 V->replaceAllUsesWith(&In); 7006 eraseInstruction(V); 7007 if (auto *SI = dyn_cast<ShuffleVectorInst>(&In)) 7008 if (!NewMask.empty()) 7009 SI->setShuffleMask(NewMask); 7010 V = &In; 7011 Replaced = true; 7012 break; 7013 } 7014 } 7015 if (!Replaced) { 7016 assert(!is_contained(Visited, &In)); 7017 Visited.push_back(&In); 7018 } 7019 } 7020 } 7021 CSEBlocks.clear(); 7022 GatherShuffleSeq.clear(); 7023 } 7024 7025 // Groups the instructions to a bundle (which is then a single scheduling entity) 7026 // and schedules instructions until the bundle gets ready. 7027 Optional<BoUpSLP::ScheduleData *> 7028 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 7029 const InstructionsState &S) { 7030 // No need to schedule PHIs, insertelement, extractelement and extractvalue 7031 // instructions. 7032 if (isa<PHINode>(S.OpValue) || isVectorLikeInstWithConstOps(S.OpValue)) 7033 return nullptr; 7034 7035 // Initialize the instruction bundle. 7036 Instruction *OldScheduleEnd = ScheduleEnd; 7037 ScheduleData *PrevInBundle = nullptr; 7038 ScheduleData *Bundle = nullptr; 7039 bool ReSchedule = false; 7040 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n"); 7041 7042 auto &&TryScheduleBundle = [this, OldScheduleEnd, SLP](bool ReSchedule, 7043 ScheduleData *Bundle) { 7044 // The scheduling region got new instructions at the lower end (or it is a 7045 // new region for the first bundle). This makes it necessary to 7046 // recalculate all dependencies. 7047 // It is seldom that this needs to be done a second time after adding the 7048 // initial bundle to the region. 7049 if (ScheduleEnd != OldScheduleEnd) { 7050 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) 7051 doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); }); 7052 ReSchedule = true; 7053 } 7054 if (ReSchedule) { 7055 resetSchedule(); 7056 initialFillReadyList(ReadyInsts); 7057 } 7058 if (Bundle) { 7059 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle 7060 << " in block " << BB->getName() << "\n"); 7061 calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP); 7062 } 7063 7064 // Now try to schedule the new bundle or (if no bundle) just calculate 7065 // dependencies. As soon as the bundle is "ready" it means that there are no 7066 // cyclic dependencies and we can schedule it. Note that's important that we 7067 // don't "schedule" the bundle yet (see cancelScheduling). 7068 while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) && 7069 !ReadyInsts.empty()) { 7070 ScheduleData *Picked = ReadyInsts.pop_back_val(); 7071 if (Picked->isSchedulingEntity() && Picked->isReady()) 7072 schedule(Picked, ReadyInsts); 7073 } 7074 }; 7075 7076 // Make sure that the scheduling region contains all 7077 // instructions of the bundle. 7078 for (Value *V : VL) { 7079 if (!extendSchedulingRegion(V, S)) { 7080 // If the scheduling region got new instructions at the lower end (or it 7081 // is a new region for the first bundle). This makes it necessary to 7082 // recalculate all dependencies. 7083 // Otherwise the compiler may crash trying to incorrectly calculate 7084 // dependencies and emit instruction in the wrong order at the actual 7085 // scheduling. 7086 TryScheduleBundle(/*ReSchedule=*/false, nullptr); 7087 return None; 7088 } 7089 } 7090 7091 for (Value *V : VL) { 7092 ScheduleData *BundleMember = getScheduleData(V); 7093 assert(BundleMember && 7094 "no ScheduleData for bundle member (maybe not in same basic block)"); 7095 if (BundleMember->IsScheduled) { 7096 // A bundle member was scheduled as single instruction before and now 7097 // needs to be scheduled as part of the bundle. We just get rid of the 7098 // existing schedule. 7099 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 7100 << " was already scheduled\n"); 7101 ReSchedule = true; 7102 } 7103 assert(BundleMember->isSchedulingEntity() && 7104 "bundle member already part of other bundle"); 7105 if (PrevInBundle) { 7106 PrevInBundle->NextInBundle = BundleMember; 7107 } else { 7108 Bundle = BundleMember; 7109 } 7110 BundleMember->UnscheduledDepsInBundle = 0; 7111 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 7112 7113 // Group the instructions to a bundle. 7114 BundleMember->FirstInBundle = Bundle; 7115 PrevInBundle = BundleMember; 7116 } 7117 assert(Bundle && "Failed to find schedule bundle"); 7118 TryScheduleBundle(ReSchedule, Bundle); 7119 if (!Bundle->isReady()) { 7120 cancelScheduling(VL, S.OpValue); 7121 return None; 7122 } 7123 return Bundle; 7124 } 7125 7126 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 7127 Value *OpValue) { 7128 if (isa<PHINode>(OpValue) || isVectorLikeInstWithConstOps(OpValue)) 7129 return; 7130 7131 ScheduleData *Bundle = getScheduleData(OpValue); 7132 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 7133 assert(!Bundle->IsScheduled && 7134 "Can't cancel bundle which is already scheduled"); 7135 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 7136 "tried to unbundle something which is not a bundle"); 7137 7138 // Un-bundle: make single instructions out of the bundle. 7139 ScheduleData *BundleMember = Bundle; 7140 while (BundleMember) { 7141 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 7142 BundleMember->FirstInBundle = BundleMember; 7143 ScheduleData *Next = BundleMember->NextInBundle; 7144 BundleMember->NextInBundle = nullptr; 7145 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 7146 if (BundleMember->UnscheduledDepsInBundle == 0) { 7147 ReadyInsts.insert(BundleMember); 7148 } 7149 BundleMember = Next; 7150 } 7151 } 7152 7153 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 7154 // Allocate a new ScheduleData for the instruction. 7155 if (ChunkPos >= ChunkSize) { 7156 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize)); 7157 ChunkPos = 0; 7158 } 7159 return &(ScheduleDataChunks.back()[ChunkPos++]); 7160 } 7161 7162 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 7163 const InstructionsState &S) { 7164 if (getScheduleData(V, isOneOf(S, V))) 7165 return true; 7166 Instruction *I = dyn_cast<Instruction>(V); 7167 assert(I && "bundle member must be an instruction"); 7168 assert(!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) && 7169 "phi nodes/insertelements/extractelements/extractvalues don't need to " 7170 "be scheduled"); 7171 auto &&CheckSheduleForI = [this, &S](Instruction *I) -> bool { 7172 ScheduleData *ISD = getScheduleData(I); 7173 if (!ISD) 7174 return false; 7175 assert(isInSchedulingRegion(ISD) && 7176 "ScheduleData not in scheduling region"); 7177 ScheduleData *SD = allocateScheduleDataChunks(); 7178 SD->Inst = I; 7179 SD->init(SchedulingRegionID, S.OpValue); 7180 ExtraScheduleDataMap[I][S.OpValue] = SD; 7181 return true; 7182 }; 7183 if (CheckSheduleForI(I)) 7184 return true; 7185 if (!ScheduleStart) { 7186 // It's the first instruction in the new region. 7187 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 7188 ScheduleStart = I; 7189 ScheduleEnd = I->getNextNode(); 7190 if (isOneOf(S, I) != I) 7191 CheckSheduleForI(I); 7192 assert(ScheduleEnd && "tried to vectorize a terminator?"); 7193 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 7194 return true; 7195 } 7196 // Search up and down at the same time, because we don't know if the new 7197 // instruction is above or below the existing scheduling region. 7198 BasicBlock::reverse_iterator UpIter = 7199 ++ScheduleStart->getIterator().getReverse(); 7200 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 7201 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 7202 BasicBlock::iterator LowerEnd = BB->end(); 7203 while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I && 7204 &*DownIter != I) { 7205 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 7206 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 7207 return false; 7208 } 7209 7210 ++UpIter; 7211 ++DownIter; 7212 } 7213 if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) { 7214 assert(I->getParent() == ScheduleStart->getParent() && 7215 "Instruction is in wrong basic block."); 7216 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 7217 ScheduleStart = I; 7218 if (isOneOf(S, I) != I) 7219 CheckSheduleForI(I); 7220 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I 7221 << "\n"); 7222 return true; 7223 } 7224 assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && 7225 "Expected to reach top of the basic block or instruction down the " 7226 "lower end."); 7227 assert(I->getParent() == ScheduleEnd->getParent() && 7228 "Instruction is in wrong basic block."); 7229 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 7230 nullptr); 7231 ScheduleEnd = I->getNextNode(); 7232 if (isOneOf(S, I) != I) 7233 CheckSheduleForI(I); 7234 assert(ScheduleEnd && "tried to vectorize a terminator?"); 7235 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 7236 return true; 7237 } 7238 7239 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 7240 Instruction *ToI, 7241 ScheduleData *PrevLoadStore, 7242 ScheduleData *NextLoadStore) { 7243 ScheduleData *CurrentLoadStore = PrevLoadStore; 7244 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 7245 ScheduleData *SD = ScheduleDataMap[I]; 7246 if (!SD) { 7247 SD = allocateScheduleDataChunks(); 7248 ScheduleDataMap[I] = SD; 7249 SD->Inst = I; 7250 } 7251 assert(!isInSchedulingRegion(SD) && 7252 "new ScheduleData already in scheduling region"); 7253 SD->init(SchedulingRegionID, I); 7254 7255 if (I->mayReadOrWriteMemory() && 7256 (!isa<IntrinsicInst>(I) || 7257 (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect && 7258 cast<IntrinsicInst>(I)->getIntrinsicID() != 7259 Intrinsic::pseudoprobe))) { 7260 // Update the linked list of memory accessing instructions. 7261 if (CurrentLoadStore) { 7262 CurrentLoadStore->NextLoadStore = SD; 7263 } else { 7264 FirstLoadStoreInRegion = SD; 7265 } 7266 CurrentLoadStore = SD; 7267 } 7268 } 7269 if (NextLoadStore) { 7270 if (CurrentLoadStore) 7271 CurrentLoadStore->NextLoadStore = NextLoadStore; 7272 } else { 7273 LastLoadStoreInRegion = CurrentLoadStore; 7274 } 7275 } 7276 7277 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 7278 bool InsertInReadyList, 7279 BoUpSLP *SLP) { 7280 assert(SD->isSchedulingEntity()); 7281 7282 SmallVector<ScheduleData *, 10> WorkList; 7283 WorkList.push_back(SD); 7284 7285 while (!WorkList.empty()) { 7286 ScheduleData *SD = WorkList.pop_back_val(); 7287 7288 ScheduleData *BundleMember = SD; 7289 while (BundleMember) { 7290 assert(isInSchedulingRegion(BundleMember)); 7291 if (!BundleMember->hasValidDependencies()) { 7292 7293 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember 7294 << "\n"); 7295 BundleMember->Dependencies = 0; 7296 BundleMember->resetUnscheduledDeps(); 7297 7298 // Handle def-use chain dependencies. 7299 if (BundleMember->OpValue != BundleMember->Inst) { 7300 ScheduleData *UseSD = getScheduleData(BundleMember->Inst); 7301 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 7302 BundleMember->Dependencies++; 7303 ScheduleData *DestBundle = UseSD->FirstInBundle; 7304 if (!DestBundle->IsScheduled) 7305 BundleMember->incrementUnscheduledDeps(1); 7306 if (!DestBundle->hasValidDependencies()) 7307 WorkList.push_back(DestBundle); 7308 } 7309 } else { 7310 for (User *U : BundleMember->Inst->users()) { 7311 if (isa<Instruction>(U)) { 7312 ScheduleData *UseSD = getScheduleData(U); 7313 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 7314 BundleMember->Dependencies++; 7315 ScheduleData *DestBundle = UseSD->FirstInBundle; 7316 if (!DestBundle->IsScheduled) 7317 BundleMember->incrementUnscheduledDeps(1); 7318 if (!DestBundle->hasValidDependencies()) 7319 WorkList.push_back(DestBundle); 7320 } 7321 } else { 7322 // I'm not sure if this can ever happen. But we need to be safe. 7323 // This lets the instruction/bundle never be scheduled and 7324 // eventually disable vectorization. 7325 BundleMember->Dependencies++; 7326 BundleMember->incrementUnscheduledDeps(1); 7327 } 7328 } 7329 } 7330 7331 // Handle the memory dependencies. 7332 ScheduleData *DepDest = BundleMember->NextLoadStore; 7333 if (DepDest) { 7334 Instruction *SrcInst = BundleMember->Inst; 7335 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); 7336 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 7337 unsigned numAliased = 0; 7338 unsigned DistToSrc = 1; 7339 7340 while (DepDest) { 7341 assert(isInSchedulingRegion(DepDest)); 7342 7343 // We have two limits to reduce the complexity: 7344 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 7345 // SLP->isAliased (which is the expensive part in this loop). 7346 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 7347 // the whole loop (even if the loop is fast, it's quadratic). 7348 // It's important for the loop break condition (see below) to 7349 // check this limit even between two read-only instructions. 7350 if (DistToSrc >= MaxMemDepDistance || 7351 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 7352 (numAliased >= AliasedCheckLimit || 7353 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 7354 7355 // We increment the counter only if the locations are aliased 7356 // (instead of counting all alias checks). This gives a better 7357 // balance between reduced runtime and accurate dependencies. 7358 numAliased++; 7359 7360 DepDest->MemoryDependencies.push_back(BundleMember); 7361 BundleMember->Dependencies++; 7362 ScheduleData *DestBundle = DepDest->FirstInBundle; 7363 if (!DestBundle->IsScheduled) { 7364 BundleMember->incrementUnscheduledDeps(1); 7365 } 7366 if (!DestBundle->hasValidDependencies()) { 7367 WorkList.push_back(DestBundle); 7368 } 7369 } 7370 DepDest = DepDest->NextLoadStore; 7371 7372 // Example, explaining the loop break condition: Let's assume our 7373 // starting instruction is i0 and MaxMemDepDistance = 3. 7374 // 7375 // +--------v--v--v 7376 // i0,i1,i2,i3,i4,i5,i6,i7,i8 7377 // +--------^--^--^ 7378 // 7379 // MaxMemDepDistance let us stop alias-checking at i3 and we add 7380 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 7381 // Previously we already added dependencies from i3 to i6,i7,i8 7382 // (because of MaxMemDepDistance). As we added a dependency from 7383 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 7384 // and we can abort this loop at i6. 7385 if (DistToSrc >= 2 * MaxMemDepDistance) 7386 break; 7387 DistToSrc++; 7388 } 7389 } 7390 } 7391 BundleMember = BundleMember->NextInBundle; 7392 } 7393 if (InsertInReadyList && SD->isReady()) { 7394 ReadyInsts.push_back(SD); 7395 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst 7396 << "\n"); 7397 } 7398 } 7399 } 7400 7401 void BoUpSLP::BlockScheduling::resetSchedule() { 7402 assert(ScheduleStart && 7403 "tried to reset schedule on block which has not been scheduled"); 7404 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 7405 doForAllOpcodes(I, [&](ScheduleData *SD) { 7406 assert(isInSchedulingRegion(SD) && 7407 "ScheduleData not in scheduling region"); 7408 SD->IsScheduled = false; 7409 SD->resetUnscheduledDeps(); 7410 }); 7411 } 7412 ReadyInsts.clear(); 7413 } 7414 7415 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 7416 if (!BS->ScheduleStart) 7417 return; 7418 7419 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 7420 7421 BS->resetSchedule(); 7422 7423 // For the real scheduling we use a more sophisticated ready-list: it is 7424 // sorted by the original instruction location. This lets the final schedule 7425 // be as close as possible to the original instruction order. 7426 struct ScheduleDataCompare { 7427 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 7428 return SD2->SchedulingPriority < SD1->SchedulingPriority; 7429 } 7430 }; 7431 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 7432 7433 // Ensure that all dependency data is updated and fill the ready-list with 7434 // initial instructions. 7435 int Idx = 0; 7436 int NumToSchedule = 0; 7437 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 7438 I = I->getNextNode()) { 7439 BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) { 7440 assert((isVectorLikeInstWithConstOps(SD->Inst) || 7441 SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr)) && 7442 "scheduler and vectorizer bundle mismatch"); 7443 SD->FirstInBundle->SchedulingPriority = Idx++; 7444 if (SD->isSchedulingEntity()) { 7445 BS->calculateDependencies(SD, false, this); 7446 NumToSchedule++; 7447 } 7448 }); 7449 } 7450 BS->initialFillReadyList(ReadyInsts); 7451 7452 Instruction *LastScheduledInst = BS->ScheduleEnd; 7453 7454 // Do the "real" scheduling. 7455 while (!ReadyInsts.empty()) { 7456 ScheduleData *picked = *ReadyInsts.begin(); 7457 ReadyInsts.erase(ReadyInsts.begin()); 7458 7459 // Move the scheduled instruction(s) to their dedicated places, if not 7460 // there yet. 7461 ScheduleData *BundleMember = picked; 7462 while (BundleMember) { 7463 Instruction *pickedInst = BundleMember->Inst; 7464 if (pickedInst->getNextNode() != LastScheduledInst) { 7465 BS->BB->getInstList().remove(pickedInst); 7466 BS->BB->getInstList().insert(LastScheduledInst->getIterator(), 7467 pickedInst); 7468 } 7469 LastScheduledInst = pickedInst; 7470 BundleMember = BundleMember->NextInBundle; 7471 } 7472 7473 BS->schedule(picked, ReadyInsts); 7474 NumToSchedule--; 7475 } 7476 assert(NumToSchedule == 0 && "could not schedule all instructions"); 7477 7478 // Avoid duplicate scheduling of the block. 7479 BS->ScheduleStart = nullptr; 7480 } 7481 7482 unsigned BoUpSLP::getVectorElementSize(Value *V) { 7483 // If V is a store, just return the width of the stored value (or value 7484 // truncated just before storing) without traversing the expression tree. 7485 // This is the common case. 7486 if (auto *Store = dyn_cast<StoreInst>(V)) { 7487 if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand())) 7488 return DL->getTypeSizeInBits(Trunc->getSrcTy()); 7489 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 7490 } 7491 7492 if (auto *IEI = dyn_cast<InsertElementInst>(V)) 7493 return getVectorElementSize(IEI->getOperand(1)); 7494 7495 auto E = InstrElementSize.find(V); 7496 if (E != InstrElementSize.end()) 7497 return E->second; 7498 7499 // If V is not a store, we can traverse the expression tree to find loads 7500 // that feed it. The type of the loaded value may indicate a more suitable 7501 // width than V's type. We want to base the vector element size on the width 7502 // of memory operations where possible. 7503 SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist; 7504 SmallPtrSet<Instruction *, 16> Visited; 7505 if (auto *I = dyn_cast<Instruction>(V)) { 7506 Worklist.emplace_back(I, I->getParent()); 7507 Visited.insert(I); 7508 } 7509 7510 // Traverse the expression tree in bottom-up order looking for loads. If we 7511 // encounter an instruction we don't yet handle, we give up. 7512 auto Width = 0u; 7513 while (!Worklist.empty()) { 7514 Instruction *I; 7515 BasicBlock *Parent; 7516 std::tie(I, Parent) = Worklist.pop_back_val(); 7517 7518 // We should only be looking at scalar instructions here. If the current 7519 // instruction has a vector type, skip. 7520 auto *Ty = I->getType(); 7521 if (isa<VectorType>(Ty)) 7522 continue; 7523 7524 // If the current instruction is a load, update MaxWidth to reflect the 7525 // width of the loaded value. 7526 if (isa<LoadInst>(I) || isa<ExtractElementInst>(I) || 7527 isa<ExtractValueInst>(I)) 7528 Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty)); 7529 7530 // Otherwise, we need to visit the operands of the instruction. We only 7531 // handle the interesting cases from buildTree here. If an operand is an 7532 // instruction we haven't yet visited and from the same basic block as the 7533 // user or the use is a PHI node, we add it to the worklist. 7534 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7535 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I) || 7536 isa<UnaryOperator>(I)) { 7537 for (Use &U : I->operands()) 7538 if (auto *J = dyn_cast<Instruction>(U.get())) 7539 if (Visited.insert(J).second && 7540 (isa<PHINode>(I) || J->getParent() == Parent)) 7541 Worklist.emplace_back(J, J->getParent()); 7542 } else { 7543 break; 7544 } 7545 } 7546 7547 // If we didn't encounter a memory access in the expression tree, or if we 7548 // gave up for some reason, just return the width of V. Otherwise, return the 7549 // maximum width we found. 7550 if (!Width) { 7551 if (auto *CI = dyn_cast<CmpInst>(V)) 7552 V = CI->getOperand(0); 7553 Width = DL->getTypeSizeInBits(V->getType()); 7554 } 7555 7556 for (Instruction *I : Visited) 7557 InstrElementSize[I] = Width; 7558 7559 return Width; 7560 } 7561 7562 // Determine if a value V in a vectorizable expression Expr can be demoted to a 7563 // smaller type with a truncation. We collect the values that will be demoted 7564 // in ToDemote and additional roots that require investigating in Roots. 7565 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, 7566 SmallVectorImpl<Value *> &ToDemote, 7567 SmallVectorImpl<Value *> &Roots) { 7568 // We can always demote constants. 7569 if (isa<Constant>(V)) { 7570 ToDemote.push_back(V); 7571 return true; 7572 } 7573 7574 // If the value is not an instruction in the expression with only one use, it 7575 // cannot be demoted. 7576 auto *I = dyn_cast<Instruction>(V); 7577 if (!I || !I->hasOneUse() || !Expr.count(I)) 7578 return false; 7579 7580 switch (I->getOpcode()) { 7581 7582 // We can always demote truncations and extensions. Since truncations can 7583 // seed additional demotion, we save the truncated value. 7584 case Instruction::Trunc: 7585 Roots.push_back(I->getOperand(0)); 7586 break; 7587 case Instruction::ZExt: 7588 case Instruction::SExt: 7589 if (isa<ExtractElementInst>(I->getOperand(0)) || 7590 isa<InsertElementInst>(I->getOperand(0))) 7591 return false; 7592 break; 7593 7594 // We can demote certain binary operations if we can demote both of their 7595 // operands. 7596 case Instruction::Add: 7597 case Instruction::Sub: 7598 case Instruction::Mul: 7599 case Instruction::And: 7600 case Instruction::Or: 7601 case Instruction::Xor: 7602 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || 7603 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) 7604 return false; 7605 break; 7606 7607 // We can demote selects if we can demote their true and false values. 7608 case Instruction::Select: { 7609 SelectInst *SI = cast<SelectInst>(I); 7610 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || 7611 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) 7612 return false; 7613 break; 7614 } 7615 7616 // We can demote phis if we can demote all their incoming operands. Note that 7617 // we don't need to worry about cycles since we ensure single use above. 7618 case Instruction::PHI: { 7619 PHINode *PN = cast<PHINode>(I); 7620 for (Value *IncValue : PN->incoming_values()) 7621 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) 7622 return false; 7623 break; 7624 } 7625 7626 // Otherwise, conservatively give up. 7627 default: 7628 return false; 7629 } 7630 7631 // Record the value that we can demote. 7632 ToDemote.push_back(V); 7633 return true; 7634 } 7635 7636 void BoUpSLP::computeMinimumValueSizes() { 7637 // If there are no external uses, the expression tree must be rooted by a 7638 // store. We can't demote in-memory values, so there is nothing to do here. 7639 if (ExternalUses.empty()) 7640 return; 7641 7642 // We only attempt to truncate integer expressions. 7643 auto &TreeRoot = VectorizableTree[0]->Scalars; 7644 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 7645 if (!TreeRootIT) 7646 return; 7647 7648 // If the expression is not rooted by a store, these roots should have 7649 // external uses. We will rely on InstCombine to rewrite the expression in 7650 // the narrower type. However, InstCombine only rewrites single-use values. 7651 // This means that if a tree entry other than a root is used externally, it 7652 // must have multiple uses and InstCombine will not rewrite it. The code 7653 // below ensures that only the roots are used externally. 7654 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); 7655 for (auto &EU : ExternalUses) 7656 if (!Expr.erase(EU.Scalar)) 7657 return; 7658 if (!Expr.empty()) 7659 return; 7660 7661 // Collect the scalar values of the vectorizable expression. We will use this 7662 // context to determine which values can be demoted. If we see a truncation, 7663 // we mark it as seeding another demotion. 7664 for (auto &EntryPtr : VectorizableTree) 7665 Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end()); 7666 7667 // Ensure the roots of the vectorizable tree don't form a cycle. They must 7668 // have a single external user that is not in the vectorizable tree. 7669 for (auto *Root : TreeRoot) 7670 if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) 7671 return; 7672 7673 // Conservatively determine if we can actually truncate the roots of the 7674 // expression. Collect the values that can be demoted in ToDemote and 7675 // additional roots that require investigating in Roots. 7676 SmallVector<Value *, 32> ToDemote; 7677 SmallVector<Value *, 4> Roots; 7678 for (auto *Root : TreeRoot) 7679 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) 7680 return; 7681 7682 // The maximum bit width required to represent all the values that can be 7683 // demoted without loss of precision. It would be safe to truncate the roots 7684 // of the expression to this width. 7685 auto MaxBitWidth = 8u; 7686 7687 // We first check if all the bits of the roots are demanded. If they're not, 7688 // we can truncate the roots to this narrower type. 7689 for (auto *Root : TreeRoot) { 7690 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 7691 MaxBitWidth = std::max<unsigned>( 7692 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); 7693 } 7694 7695 // True if the roots can be zero-extended back to their original type, rather 7696 // than sign-extended. We know that if the leading bits are not demanded, we 7697 // can safely zero-extend. So we initialize IsKnownPositive to True. 7698 bool IsKnownPositive = true; 7699 7700 // If all the bits of the roots are demanded, we can try a little harder to 7701 // compute a narrower type. This can happen, for example, if the roots are 7702 // getelementptr indices. InstCombine promotes these indices to the pointer 7703 // width. Thus, all their bits are technically demanded even though the 7704 // address computation might be vectorized in a smaller type. 7705 // 7706 // We start by looking at each entry that can be demoted. We compute the 7707 // maximum bit width required to store the scalar by using ValueTracking to 7708 // compute the number of high-order bits we can truncate. 7709 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) && 7710 llvm::all_of(TreeRoot, [](Value *R) { 7711 assert(R->hasOneUse() && "Root should have only one use!"); 7712 return isa<GetElementPtrInst>(R->user_back()); 7713 })) { 7714 MaxBitWidth = 8u; 7715 7716 // Determine if the sign bit of all the roots is known to be zero. If not, 7717 // IsKnownPositive is set to False. 7718 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { 7719 KnownBits Known = computeKnownBits(R, *DL); 7720 return Known.isNonNegative(); 7721 }); 7722 7723 // Determine the maximum number of bits required to store the scalar 7724 // values. 7725 for (auto *Scalar : ToDemote) { 7726 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); 7727 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 7728 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 7729 } 7730 7731 // If we can't prove that the sign bit is zero, we must add one to the 7732 // maximum bit width to account for the unknown sign bit. This preserves 7733 // the existing sign bit so we can safely sign-extend the root back to the 7734 // original type. Otherwise, if we know the sign bit is zero, we will 7735 // zero-extend the root instead. 7736 // 7737 // FIXME: This is somewhat suboptimal, as there will be cases where adding 7738 // one to the maximum bit width will yield a larger-than-necessary 7739 // type. In general, we need to add an extra bit only if we can't 7740 // prove that the upper bit of the original type is equal to the 7741 // upper bit of the proposed smaller type. If these two bits are the 7742 // same (either zero or one) we know that sign-extending from the 7743 // smaller type will result in the same value. Here, since we can't 7744 // yet prove this, we are just making the proposed smaller type 7745 // larger to ensure correctness. 7746 if (!IsKnownPositive) 7747 ++MaxBitWidth; 7748 } 7749 7750 // Round MaxBitWidth up to the next power-of-two. 7751 if (!isPowerOf2_64(MaxBitWidth)) 7752 MaxBitWidth = NextPowerOf2(MaxBitWidth); 7753 7754 // If the maximum bit width we compute is less than the with of the roots' 7755 // type, we can proceed with the narrowing. Otherwise, do nothing. 7756 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 7757 return; 7758 7759 // If we can truncate the root, we must collect additional values that might 7760 // be demoted as a result. That is, those seeded by truncations we will 7761 // modify. 7762 while (!Roots.empty()) 7763 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); 7764 7765 // Finally, map the values we can demote to the maximum bit with we computed. 7766 for (auto *Scalar : ToDemote) 7767 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive); 7768 } 7769 7770 namespace { 7771 7772 /// The SLPVectorizer Pass. 7773 struct SLPVectorizer : public FunctionPass { 7774 SLPVectorizerPass Impl; 7775 7776 /// Pass identification, replacement for typeid 7777 static char ID; 7778 7779 explicit SLPVectorizer() : FunctionPass(ID) { 7780 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 7781 } 7782 7783 bool doInitialization(Module &M) override { return false; } 7784 7785 bool runOnFunction(Function &F) override { 7786 if (skipFunction(F)) 7787 return false; 7788 7789 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 7790 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 7791 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 7792 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 7793 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 7794 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 7795 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 7796 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 7797 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 7798 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 7799 7800 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 7801 } 7802 7803 void getAnalysisUsage(AnalysisUsage &AU) const override { 7804 FunctionPass::getAnalysisUsage(AU); 7805 AU.addRequired<AssumptionCacheTracker>(); 7806 AU.addRequired<ScalarEvolutionWrapperPass>(); 7807 AU.addRequired<AAResultsWrapperPass>(); 7808 AU.addRequired<TargetTransformInfoWrapperPass>(); 7809 AU.addRequired<LoopInfoWrapperPass>(); 7810 AU.addRequired<DominatorTreeWrapperPass>(); 7811 AU.addRequired<DemandedBitsWrapperPass>(); 7812 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 7813 AU.addRequired<InjectTLIMappingsLegacy>(); 7814 AU.addPreserved<LoopInfoWrapperPass>(); 7815 AU.addPreserved<DominatorTreeWrapperPass>(); 7816 AU.addPreserved<AAResultsWrapperPass>(); 7817 AU.addPreserved<GlobalsAAWrapperPass>(); 7818 AU.setPreservesCFG(); 7819 } 7820 }; 7821 7822 } // end anonymous namespace 7823 7824 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 7825 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 7826 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 7827 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 7828 auto *AA = &AM.getResult<AAManager>(F); 7829 auto *LI = &AM.getResult<LoopAnalysis>(F); 7830 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 7831 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 7832 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 7833 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7834 7835 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 7836 if (!Changed) 7837 return PreservedAnalyses::all(); 7838 7839 PreservedAnalyses PA; 7840 PA.preserveSet<CFGAnalyses>(); 7841 return PA; 7842 } 7843 7844 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 7845 TargetTransformInfo *TTI_, 7846 TargetLibraryInfo *TLI_, AAResults *AA_, 7847 LoopInfo *LI_, DominatorTree *DT_, 7848 AssumptionCache *AC_, DemandedBits *DB_, 7849 OptimizationRemarkEmitter *ORE_) { 7850 if (!RunSLPVectorization) 7851 return false; 7852 SE = SE_; 7853 TTI = TTI_; 7854 TLI = TLI_; 7855 AA = AA_; 7856 LI = LI_; 7857 DT = DT_; 7858 AC = AC_; 7859 DB = DB_; 7860 DL = &F.getParent()->getDataLayout(); 7861 7862 Stores.clear(); 7863 GEPs.clear(); 7864 bool Changed = false; 7865 7866 // If the target claims to have no vector registers don't attempt 7867 // vectorization. 7868 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) 7869 return false; 7870 7871 // Don't vectorize when the attribute NoImplicitFloat is used. 7872 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 7873 return false; 7874 7875 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 7876 7877 // Use the bottom up slp vectorizer to construct chains that start with 7878 // store instructions. 7879 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 7880 7881 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 7882 // delete instructions. 7883 7884 // Update DFS numbers now so that we can use them for ordering. 7885 DT->updateDFSNumbers(); 7886 7887 // Scan the blocks in the function in post order. 7888 for (auto BB : post_order(&F.getEntryBlock())) { 7889 collectSeedInstructions(BB); 7890 7891 // Vectorize trees that end at stores. 7892 if (!Stores.empty()) { 7893 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 7894 << " underlying objects.\n"); 7895 Changed |= vectorizeStoreChains(R); 7896 } 7897 7898 // Vectorize trees that end at reductions. 7899 Changed |= vectorizeChainsInBlock(BB, R); 7900 7901 // Vectorize the index computations of getelementptr instructions. This 7902 // is primarily intended to catch gather-like idioms ending at 7903 // non-consecutive loads. 7904 if (!GEPs.empty()) { 7905 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 7906 << " underlying objects.\n"); 7907 Changed |= vectorizeGEPIndices(BB, R); 7908 } 7909 } 7910 7911 if (Changed) { 7912 R.optimizeGatherSequence(); 7913 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 7914 } 7915 return Changed; 7916 } 7917 7918 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 7919 unsigned Idx) { 7920 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size() 7921 << "\n"); 7922 const unsigned Sz = R.getVectorElementSize(Chain[0]); 7923 const unsigned MinVF = R.getMinVecRegSize() / Sz; 7924 unsigned VF = Chain.size(); 7925 7926 if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF) 7927 return false; 7928 7929 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx 7930 << "\n"); 7931 7932 R.buildTree(Chain); 7933 if (R.isTreeTinyAndNotFullyVectorizable()) 7934 return false; 7935 if (R.isLoadCombineCandidate()) 7936 return false; 7937 R.reorderTopToBottom(); 7938 R.reorderBottomToTop(); 7939 R.buildExternalUses(); 7940 7941 R.computeMinimumValueSizes(); 7942 7943 InstructionCost Cost = R.getTreeCost(); 7944 7945 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF =" << VF << "\n"); 7946 if (Cost < -SLPCostThreshold) { 7947 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n"); 7948 7949 using namespace ore; 7950 7951 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 7952 cast<StoreInst>(Chain[0])) 7953 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 7954 << " and with tree size " 7955 << NV("TreeSize", R.getTreeSize())); 7956 7957 R.vectorizeTree(); 7958 return true; 7959 } 7960 7961 return false; 7962 } 7963 7964 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 7965 BoUpSLP &R) { 7966 // We may run into multiple chains that merge into a single chain. We mark the 7967 // stores that we vectorized so that we don't visit the same store twice. 7968 BoUpSLP::ValueSet VectorizedStores; 7969 bool Changed = false; 7970 7971 int E = Stores.size(); 7972 SmallBitVector Tails(E, false); 7973 int MaxIter = MaxStoreLookup.getValue(); 7974 SmallVector<std::pair<int, int>, 16> ConsecutiveChain( 7975 E, std::make_pair(E, INT_MAX)); 7976 SmallVector<SmallBitVector, 4> CheckedPairs(E, SmallBitVector(E, false)); 7977 int IterCnt; 7978 auto &&FindConsecutiveAccess = [this, &Stores, &Tails, &IterCnt, MaxIter, 7979 &CheckedPairs, 7980 &ConsecutiveChain](int K, int Idx) { 7981 if (IterCnt >= MaxIter) 7982 return true; 7983 if (CheckedPairs[Idx].test(K)) 7984 return ConsecutiveChain[K].second == 1 && 7985 ConsecutiveChain[K].first == Idx; 7986 ++IterCnt; 7987 CheckedPairs[Idx].set(K); 7988 CheckedPairs[K].set(Idx); 7989 Optional<int> Diff = getPointersDiff( 7990 Stores[K]->getValueOperand()->getType(), Stores[K]->getPointerOperand(), 7991 Stores[Idx]->getValueOperand()->getType(), 7992 Stores[Idx]->getPointerOperand(), *DL, *SE, /*StrictCheck=*/true); 7993 if (!Diff || *Diff == 0) 7994 return false; 7995 int Val = *Diff; 7996 if (Val < 0) { 7997 if (ConsecutiveChain[Idx].second > -Val) { 7998 Tails.set(K); 7999 ConsecutiveChain[Idx] = std::make_pair(K, -Val); 8000 } 8001 return false; 8002 } 8003 if (ConsecutiveChain[K].second <= Val) 8004 return false; 8005 8006 Tails.set(Idx); 8007 ConsecutiveChain[K] = std::make_pair(Idx, Val); 8008 return Val == 1; 8009 }; 8010 // Do a quadratic search on all of the given stores in reverse order and find 8011 // all of the pairs of stores that follow each other. 8012 for (int Idx = E - 1; Idx >= 0; --Idx) { 8013 // If a store has multiple consecutive store candidates, search according 8014 // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ... 8015 // This is because usually pairing with immediate succeeding or preceding 8016 // candidate create the best chance to find slp vectorization opportunity. 8017 const int MaxLookDepth = std::max(E - Idx, Idx + 1); 8018 IterCnt = 0; 8019 for (int Offset = 1, F = MaxLookDepth; Offset < F; ++Offset) 8020 if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) || 8021 (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx))) 8022 break; 8023 } 8024 8025 // Tracks if we tried to vectorize stores starting from the given tail 8026 // already. 8027 SmallBitVector TriedTails(E, false); 8028 // For stores that start but don't end a link in the chain: 8029 for (int Cnt = E; Cnt > 0; --Cnt) { 8030 int I = Cnt - 1; 8031 if (ConsecutiveChain[I].first == E || Tails.test(I)) 8032 continue; 8033 // We found a store instr that starts a chain. Now follow the chain and try 8034 // to vectorize it. 8035 BoUpSLP::ValueList Operands; 8036 // Collect the chain into a list. 8037 while (I != E && !VectorizedStores.count(Stores[I])) { 8038 Operands.push_back(Stores[I]); 8039 Tails.set(I); 8040 if (ConsecutiveChain[I].second != 1) { 8041 // Mark the new end in the chain and go back, if required. It might be 8042 // required if the original stores come in reversed order, for example. 8043 if (ConsecutiveChain[I].first != E && 8044 Tails.test(ConsecutiveChain[I].first) && !TriedTails.test(I) && 8045 !VectorizedStores.count(Stores[ConsecutiveChain[I].first])) { 8046 TriedTails.set(I); 8047 Tails.reset(ConsecutiveChain[I].first); 8048 if (Cnt < ConsecutiveChain[I].first + 2) 8049 Cnt = ConsecutiveChain[I].first + 2; 8050 } 8051 break; 8052 } 8053 // Move to the next value in the chain. 8054 I = ConsecutiveChain[I].first; 8055 } 8056 assert(!Operands.empty() && "Expected non-empty list of stores."); 8057 8058 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 8059 unsigned EltSize = R.getVectorElementSize(Operands[0]); 8060 unsigned MaxElts = llvm::PowerOf2Floor(MaxVecRegSize / EltSize); 8061 8062 unsigned MinVF = R.getMinVF(EltSize); 8063 unsigned MaxVF = std::min(R.getMaximumVF(EltSize, Instruction::Store), 8064 MaxElts); 8065 8066 // FIXME: Is division-by-2 the correct step? Should we assert that the 8067 // register size is a power-of-2? 8068 unsigned StartIdx = 0; 8069 for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) { 8070 for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) { 8071 ArrayRef<Value *> Slice = makeArrayRef(Operands).slice(Cnt, Size); 8072 if (!VectorizedStores.count(Slice.front()) && 8073 !VectorizedStores.count(Slice.back()) && 8074 vectorizeStoreChain(Slice, R, Cnt)) { 8075 // Mark the vectorized stores so that we don't vectorize them again. 8076 VectorizedStores.insert(Slice.begin(), Slice.end()); 8077 Changed = true; 8078 // If we vectorized initial block, no need to try to vectorize it 8079 // again. 8080 if (Cnt == StartIdx) 8081 StartIdx += Size; 8082 Cnt += Size; 8083 continue; 8084 } 8085 ++Cnt; 8086 } 8087 // Check if the whole array was vectorized already - exit. 8088 if (StartIdx >= Operands.size()) 8089 break; 8090 } 8091 } 8092 8093 return Changed; 8094 } 8095 8096 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 8097 // Initialize the collections. We will make a single pass over the block. 8098 Stores.clear(); 8099 GEPs.clear(); 8100 8101 // Visit the store and getelementptr instructions in BB and organize them in 8102 // Stores and GEPs according to the underlying objects of their pointer 8103 // operands. 8104 for (Instruction &I : *BB) { 8105 // Ignore store instructions that are volatile or have a pointer operand 8106 // that doesn't point to a scalar type. 8107 if (auto *SI = dyn_cast<StoreInst>(&I)) { 8108 if (!SI->isSimple()) 8109 continue; 8110 if (!isValidElementType(SI->getValueOperand()->getType())) 8111 continue; 8112 Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI); 8113 } 8114 8115 // Ignore getelementptr instructions that have more than one index, a 8116 // constant index, or a pointer operand that doesn't point to a scalar 8117 // type. 8118 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 8119 auto Idx = GEP->idx_begin()->get(); 8120 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) 8121 continue; 8122 if (!isValidElementType(Idx->getType())) 8123 continue; 8124 if (GEP->getType()->isVectorTy()) 8125 continue; 8126 GEPs[GEP->getPointerOperand()].push_back(GEP); 8127 } 8128 } 8129 } 8130 8131 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 8132 if (!A || !B) 8133 return false; 8134 Value *VL[] = {A, B}; 8135 return tryToVectorizeList(VL, R); 8136 } 8137 8138 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 8139 bool LimitForRegisterSize) { 8140 if (VL.size() < 2) 8141 return false; 8142 8143 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " 8144 << VL.size() << ".\n"); 8145 8146 // Check that all of the parts are instructions of the same type, 8147 // we permit an alternate opcode via InstructionsState. 8148 InstructionsState S = getSameOpcode(VL); 8149 if (!S.getOpcode()) 8150 return false; 8151 8152 Instruction *I0 = cast<Instruction>(S.OpValue); 8153 // Make sure invalid types (including vector type) are rejected before 8154 // determining vectorization factor for scalar instructions. 8155 for (Value *V : VL) { 8156 Type *Ty = V->getType(); 8157 if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) { 8158 // NOTE: the following will give user internal llvm type name, which may 8159 // not be useful. 8160 R.getORE()->emit([&]() { 8161 std::string type_str; 8162 llvm::raw_string_ostream rso(type_str); 8163 Ty->print(rso); 8164 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0) 8165 << "Cannot SLP vectorize list: type " 8166 << rso.str() + " is unsupported by vectorizer"; 8167 }); 8168 return false; 8169 } 8170 } 8171 8172 unsigned Sz = R.getVectorElementSize(I0); 8173 unsigned MinVF = R.getMinVF(Sz); 8174 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF); 8175 MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF); 8176 if (MaxVF < 2) { 8177 R.getORE()->emit([&]() { 8178 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0) 8179 << "Cannot SLP vectorize list: vectorization factor " 8180 << "less than 2 is not supported"; 8181 }); 8182 return false; 8183 } 8184 8185 bool Changed = false; 8186 bool CandidateFound = false; 8187 InstructionCost MinCost = SLPCostThreshold.getValue(); 8188 Type *ScalarTy = VL[0]->getType(); 8189 if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 8190 ScalarTy = IE->getOperand(1)->getType(); 8191 8192 unsigned NextInst = 0, MaxInst = VL.size(); 8193 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) { 8194 // No actual vectorization should happen, if number of parts is the same as 8195 // provided vectorization factor (i.e. the scalar type is used for vector 8196 // code during codegen). 8197 auto *VecTy = FixedVectorType::get(ScalarTy, VF); 8198 if (TTI->getNumberOfParts(VecTy) == VF) 8199 continue; 8200 for (unsigned I = NextInst; I < MaxInst; ++I) { 8201 unsigned OpsWidth = 0; 8202 8203 if (I + VF > MaxInst) 8204 OpsWidth = MaxInst - I; 8205 else 8206 OpsWidth = VF; 8207 8208 if (!isPowerOf2_32(OpsWidth)) 8209 continue; 8210 8211 if ((LimitForRegisterSize && OpsWidth < MaxVF) || 8212 (VF > MinVF && OpsWidth <= VF / 2) || (VF == MinVF && OpsWidth < 2)) 8213 break; 8214 8215 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth); 8216 // Check that a previous iteration of this loop did not delete the Value. 8217 if (llvm::any_of(Ops, [&R](Value *V) { 8218 auto *I = dyn_cast<Instruction>(V); 8219 return I && R.isDeleted(I); 8220 })) 8221 continue; 8222 8223 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 8224 << "\n"); 8225 8226 R.buildTree(Ops); 8227 if (R.isTreeTinyAndNotFullyVectorizable()) 8228 continue; 8229 R.reorderTopToBottom(); 8230 R.reorderBottomToTop(); 8231 R.buildExternalUses(); 8232 8233 R.computeMinimumValueSizes(); 8234 InstructionCost Cost = R.getTreeCost(); 8235 CandidateFound = true; 8236 MinCost = std::min(MinCost, Cost); 8237 8238 if (Cost < -SLPCostThreshold) { 8239 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 8240 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 8241 cast<Instruction>(Ops[0])) 8242 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 8243 << " and with tree size " 8244 << ore::NV("TreeSize", R.getTreeSize())); 8245 8246 R.vectorizeTree(); 8247 // Move to the next bundle. 8248 I += VF - 1; 8249 NextInst = I + 1; 8250 Changed = true; 8251 } 8252 } 8253 } 8254 8255 if (!Changed && CandidateFound) { 8256 R.getORE()->emit([&]() { 8257 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0) 8258 << "List vectorization was possible but not beneficial with cost " 8259 << ore::NV("Cost", MinCost) << " >= " 8260 << ore::NV("Treshold", -SLPCostThreshold); 8261 }); 8262 } else if (!Changed) { 8263 R.getORE()->emit([&]() { 8264 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0) 8265 << "Cannot SLP vectorize list: vectorization was impossible" 8266 << " with available vectorization factors"; 8267 }); 8268 } 8269 return Changed; 8270 } 8271 8272 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 8273 if (!I) 8274 return false; 8275 8276 if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I)) 8277 return false; 8278 8279 Value *P = I->getParent(); 8280 8281 // Vectorize in current basic block only. 8282 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 8283 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 8284 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 8285 return false; 8286 8287 // Try to vectorize V. 8288 if (tryToVectorizePair(Op0, Op1, R)) 8289 return true; 8290 8291 auto *A = dyn_cast<BinaryOperator>(Op0); 8292 auto *B = dyn_cast<BinaryOperator>(Op1); 8293 // Try to skip B. 8294 if (B && B->hasOneUse()) { 8295 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 8296 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 8297 if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R)) 8298 return true; 8299 if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R)) 8300 return true; 8301 } 8302 8303 // Try to skip A. 8304 if (A && A->hasOneUse()) { 8305 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 8306 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 8307 if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R)) 8308 return true; 8309 if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R)) 8310 return true; 8311 } 8312 return false; 8313 } 8314 8315 namespace { 8316 8317 /// Model horizontal reductions. 8318 /// 8319 /// A horizontal reduction is a tree of reduction instructions that has values 8320 /// that can be put into a vector as its leaves. For example: 8321 /// 8322 /// mul mul mul mul 8323 /// \ / \ / 8324 /// + + 8325 /// \ / 8326 /// + 8327 /// This tree has "mul" as its leaf values and "+" as its reduction 8328 /// instructions. A reduction can feed into a store or a binary operation 8329 /// feeding a phi. 8330 /// ... 8331 /// \ / 8332 /// + 8333 /// | 8334 /// phi += 8335 /// 8336 /// Or: 8337 /// ... 8338 /// \ / 8339 /// + 8340 /// | 8341 /// *p = 8342 /// 8343 class HorizontalReduction { 8344 using ReductionOpsType = SmallVector<Value *, 16>; 8345 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; 8346 ReductionOpsListType ReductionOps; 8347 SmallVector<Value *, 32> ReducedVals; 8348 // Use map vector to make stable output. 8349 MapVector<Instruction *, Value *> ExtraArgs; 8350 WeakTrackingVH ReductionRoot; 8351 /// The type of reduction operation. 8352 RecurKind RdxKind; 8353 8354 const unsigned INVALID_OPERAND_INDEX = std::numeric_limits<unsigned>::max(); 8355 8356 static bool isCmpSelMinMax(Instruction *I) { 8357 return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) && 8358 RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I)); 8359 } 8360 8361 // And/or are potentially poison-safe logical patterns like: 8362 // select x, y, false 8363 // select x, true, y 8364 static bool isBoolLogicOp(Instruction *I) { 8365 return match(I, m_LogicalAnd(m_Value(), m_Value())) || 8366 match(I, m_LogicalOr(m_Value(), m_Value())); 8367 } 8368 8369 /// Checks if instruction is associative and can be vectorized. 8370 static bool isVectorizable(RecurKind Kind, Instruction *I) { 8371 if (Kind == RecurKind::None) 8372 return false; 8373 8374 // Integer ops that map to select instructions or intrinsics are fine. 8375 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) || 8376 isBoolLogicOp(I)) 8377 return true; 8378 8379 if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) { 8380 // FP min/max are associative except for NaN and -0.0. We do not 8381 // have to rule out -0.0 here because the intrinsic semantics do not 8382 // specify a fixed result for it. 8383 return I->getFastMathFlags().noNaNs(); 8384 } 8385 8386 return I->isAssociative(); 8387 } 8388 8389 static Value *getRdxOperand(Instruction *I, unsigned Index) { 8390 // Poison-safe 'or' takes the form: select X, true, Y 8391 // To make that work with the normal operand processing, we skip the 8392 // true value operand. 8393 // TODO: Change the code and data structures to handle this without a hack. 8394 if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1) 8395 return I->getOperand(2); 8396 return I->getOperand(Index); 8397 } 8398 8399 /// Checks if the ParentStackElem.first should be marked as a reduction 8400 /// operation with an extra argument or as extra argument itself. 8401 void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem, 8402 Value *ExtraArg) { 8403 if (ExtraArgs.count(ParentStackElem.first)) { 8404 ExtraArgs[ParentStackElem.first] = nullptr; 8405 // We ran into something like: 8406 // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg. 8407 // The whole ParentStackElem.first should be considered as an extra value 8408 // in this case. 8409 // Do not perform analysis of remaining operands of ParentStackElem.first 8410 // instruction, this whole instruction is an extra argument. 8411 ParentStackElem.second = INVALID_OPERAND_INDEX; 8412 } else { 8413 // We ran into something like: 8414 // ParentStackElem.first += ... + ExtraArg + ... 8415 ExtraArgs[ParentStackElem.first] = ExtraArg; 8416 } 8417 } 8418 8419 /// Creates reduction operation with the current opcode. 8420 static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS, 8421 Value *RHS, const Twine &Name, bool UseSelect) { 8422 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind); 8423 switch (Kind) { 8424 case RecurKind::Or: 8425 if (UseSelect && 8426 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType())) 8427 return Builder.CreateSelect(LHS, Builder.getTrue(), RHS, Name); 8428 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 8429 Name); 8430 case RecurKind::And: 8431 if (UseSelect && 8432 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType())) 8433 return Builder.CreateSelect(LHS, RHS, Builder.getFalse(), Name); 8434 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 8435 Name); 8436 case RecurKind::Add: 8437 case RecurKind::Mul: 8438 case RecurKind::Xor: 8439 case RecurKind::FAdd: 8440 case RecurKind::FMul: 8441 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 8442 Name); 8443 case RecurKind::FMax: 8444 return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS); 8445 case RecurKind::FMin: 8446 return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS); 8447 case RecurKind::SMax: 8448 if (UseSelect) { 8449 Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name); 8450 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 8451 } 8452 return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS); 8453 case RecurKind::SMin: 8454 if (UseSelect) { 8455 Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name); 8456 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 8457 } 8458 return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS); 8459 case RecurKind::UMax: 8460 if (UseSelect) { 8461 Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name); 8462 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 8463 } 8464 return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS); 8465 case RecurKind::UMin: 8466 if (UseSelect) { 8467 Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name); 8468 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 8469 } 8470 return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS); 8471 default: 8472 llvm_unreachable("Unknown reduction operation."); 8473 } 8474 } 8475 8476 /// Creates reduction operation with the current opcode with the IR flags 8477 /// from \p ReductionOps. 8478 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 8479 Value *RHS, const Twine &Name, 8480 const ReductionOpsListType &ReductionOps) { 8481 bool UseSelect = ReductionOps.size() == 2 || 8482 // Logical or/and. 8483 (ReductionOps.size() == 1 && 8484 isa<SelectInst>(ReductionOps.front().front())); 8485 assert((!UseSelect || ReductionOps.size() != 2 || 8486 isa<SelectInst>(ReductionOps[1][0])) && 8487 "Expected cmp + select pairs for reduction"); 8488 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect); 8489 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 8490 if (auto *Sel = dyn_cast<SelectInst>(Op)) { 8491 propagateIRFlags(Sel->getCondition(), ReductionOps[0]); 8492 propagateIRFlags(Op, ReductionOps[1]); 8493 return Op; 8494 } 8495 } 8496 propagateIRFlags(Op, ReductionOps[0]); 8497 return Op; 8498 } 8499 8500 /// Creates reduction operation with the current opcode with the IR flags 8501 /// from \p I. 8502 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 8503 Value *RHS, const Twine &Name, Instruction *I) { 8504 auto *SelI = dyn_cast<SelectInst>(I); 8505 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, SelI != nullptr); 8506 if (SelI && RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 8507 if (auto *Sel = dyn_cast<SelectInst>(Op)) 8508 propagateIRFlags(Sel->getCondition(), SelI->getCondition()); 8509 } 8510 propagateIRFlags(Op, I); 8511 return Op; 8512 } 8513 8514 static RecurKind getRdxKind(Instruction *I) { 8515 assert(I && "Expected instruction for reduction matching"); 8516 TargetTransformInfo::ReductionFlags RdxFlags; 8517 if (match(I, m_Add(m_Value(), m_Value()))) 8518 return RecurKind::Add; 8519 if (match(I, m_Mul(m_Value(), m_Value()))) 8520 return RecurKind::Mul; 8521 if (match(I, m_And(m_Value(), m_Value())) || 8522 match(I, m_LogicalAnd(m_Value(), m_Value()))) 8523 return RecurKind::And; 8524 if (match(I, m_Or(m_Value(), m_Value())) || 8525 match(I, m_LogicalOr(m_Value(), m_Value()))) 8526 return RecurKind::Or; 8527 if (match(I, m_Xor(m_Value(), m_Value()))) 8528 return RecurKind::Xor; 8529 if (match(I, m_FAdd(m_Value(), m_Value()))) 8530 return RecurKind::FAdd; 8531 if (match(I, m_FMul(m_Value(), m_Value()))) 8532 return RecurKind::FMul; 8533 8534 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value()))) 8535 return RecurKind::FMax; 8536 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value()))) 8537 return RecurKind::FMin; 8538 8539 // This matches either cmp+select or intrinsics. SLP is expected to handle 8540 // either form. 8541 // TODO: If we are canonicalizing to intrinsics, we can remove several 8542 // special-case paths that deal with selects. 8543 if (match(I, m_SMax(m_Value(), m_Value()))) 8544 return RecurKind::SMax; 8545 if (match(I, m_SMin(m_Value(), m_Value()))) 8546 return RecurKind::SMin; 8547 if (match(I, m_UMax(m_Value(), m_Value()))) 8548 return RecurKind::UMax; 8549 if (match(I, m_UMin(m_Value(), m_Value()))) 8550 return RecurKind::UMin; 8551 8552 if (auto *Select = dyn_cast<SelectInst>(I)) { 8553 // Try harder: look for min/max pattern based on instructions producing 8554 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2). 8555 // During the intermediate stages of SLP, it's very common to have 8556 // pattern like this (since optimizeGatherSequence is run only once 8557 // at the end): 8558 // %1 = extractelement <2 x i32> %a, i32 0 8559 // %2 = extractelement <2 x i32> %a, i32 1 8560 // %cond = icmp sgt i32 %1, %2 8561 // %3 = extractelement <2 x i32> %a, i32 0 8562 // %4 = extractelement <2 x i32> %a, i32 1 8563 // %select = select i1 %cond, i32 %3, i32 %4 8564 CmpInst::Predicate Pred; 8565 Instruction *L1; 8566 Instruction *L2; 8567 8568 Value *LHS = Select->getTrueValue(); 8569 Value *RHS = Select->getFalseValue(); 8570 Value *Cond = Select->getCondition(); 8571 8572 // TODO: Support inverse predicates. 8573 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) { 8574 if (!isa<ExtractElementInst>(RHS) || 8575 !L2->isIdenticalTo(cast<Instruction>(RHS))) 8576 return RecurKind::None; 8577 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) { 8578 if (!isa<ExtractElementInst>(LHS) || 8579 !L1->isIdenticalTo(cast<Instruction>(LHS))) 8580 return RecurKind::None; 8581 } else { 8582 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS)) 8583 return RecurKind::None; 8584 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) || 8585 !L1->isIdenticalTo(cast<Instruction>(LHS)) || 8586 !L2->isIdenticalTo(cast<Instruction>(RHS))) 8587 return RecurKind::None; 8588 } 8589 8590 TargetTransformInfo::ReductionFlags RdxFlags; 8591 switch (Pred) { 8592 default: 8593 return RecurKind::None; 8594 case CmpInst::ICMP_SGT: 8595 case CmpInst::ICMP_SGE: 8596 return RecurKind::SMax; 8597 case CmpInst::ICMP_SLT: 8598 case CmpInst::ICMP_SLE: 8599 return RecurKind::SMin; 8600 case CmpInst::ICMP_UGT: 8601 case CmpInst::ICMP_UGE: 8602 return RecurKind::UMax; 8603 case CmpInst::ICMP_ULT: 8604 case CmpInst::ICMP_ULE: 8605 return RecurKind::UMin; 8606 } 8607 } 8608 return RecurKind::None; 8609 } 8610 8611 /// Get the index of the first operand. 8612 static unsigned getFirstOperandIndex(Instruction *I) { 8613 return isCmpSelMinMax(I) ? 1 : 0; 8614 } 8615 8616 /// Total number of operands in the reduction operation. 8617 static unsigned getNumberOfOperands(Instruction *I) { 8618 return isCmpSelMinMax(I) ? 3 : 2; 8619 } 8620 8621 /// Checks if the instruction is in basic block \p BB. 8622 /// For a cmp+sel min/max reduction check that both ops are in \p BB. 8623 static bool hasSameParent(Instruction *I, BasicBlock *BB) { 8624 if (isCmpSelMinMax(I) || (isBoolLogicOp(I) && isa<SelectInst>(I))) { 8625 auto *Sel = cast<SelectInst>(I); 8626 auto *Cmp = dyn_cast<Instruction>(Sel->getCondition()); 8627 return Sel->getParent() == BB && Cmp && Cmp->getParent() == BB; 8628 } 8629 return I->getParent() == BB; 8630 } 8631 8632 /// Expected number of uses for reduction operations/reduced values. 8633 static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) { 8634 if (IsCmpSelMinMax) { 8635 // SelectInst must be used twice while the condition op must have single 8636 // use only. 8637 if (auto *Sel = dyn_cast<SelectInst>(I)) 8638 return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse(); 8639 return I->hasNUses(2); 8640 } 8641 8642 // Arithmetic reduction operation must be used once only. 8643 return I->hasOneUse(); 8644 } 8645 8646 /// Initializes the list of reduction operations. 8647 void initReductionOps(Instruction *I) { 8648 if (isCmpSelMinMax(I)) 8649 ReductionOps.assign(2, ReductionOpsType()); 8650 else 8651 ReductionOps.assign(1, ReductionOpsType()); 8652 } 8653 8654 /// Add all reduction operations for the reduction instruction \p I. 8655 void addReductionOps(Instruction *I) { 8656 if (isCmpSelMinMax(I)) { 8657 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition()); 8658 ReductionOps[1].emplace_back(I); 8659 } else { 8660 ReductionOps[0].emplace_back(I); 8661 } 8662 } 8663 8664 static Value *getLHS(RecurKind Kind, Instruction *I) { 8665 if (Kind == RecurKind::None) 8666 return nullptr; 8667 return I->getOperand(getFirstOperandIndex(I)); 8668 } 8669 static Value *getRHS(RecurKind Kind, Instruction *I) { 8670 if (Kind == RecurKind::None) 8671 return nullptr; 8672 return I->getOperand(getFirstOperandIndex(I) + 1); 8673 } 8674 8675 public: 8676 HorizontalReduction() = default; 8677 8678 /// Try to find a reduction tree. 8679 bool matchAssociativeReduction(PHINode *Phi, Instruction *Inst) { 8680 assert((!Phi || is_contained(Phi->operands(), Inst)) && 8681 "Phi needs to use the binary operator"); 8682 assert((isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) || 8683 isa<IntrinsicInst>(Inst)) && 8684 "Expected binop, select, or intrinsic for reduction matching"); 8685 RdxKind = getRdxKind(Inst); 8686 8687 // We could have a initial reductions that is not an add. 8688 // r *= v1 + v2 + v3 + v4 8689 // In such a case start looking for a tree rooted in the first '+'. 8690 if (Phi) { 8691 if (getLHS(RdxKind, Inst) == Phi) { 8692 Phi = nullptr; 8693 Inst = dyn_cast<Instruction>(getRHS(RdxKind, Inst)); 8694 if (!Inst) 8695 return false; 8696 RdxKind = getRdxKind(Inst); 8697 } else if (getRHS(RdxKind, Inst) == Phi) { 8698 Phi = nullptr; 8699 Inst = dyn_cast<Instruction>(getLHS(RdxKind, Inst)); 8700 if (!Inst) 8701 return false; 8702 RdxKind = getRdxKind(Inst); 8703 } 8704 } 8705 8706 if (!isVectorizable(RdxKind, Inst)) 8707 return false; 8708 8709 // Analyze "regular" integer/FP types for reductions - no target-specific 8710 // types or pointers. 8711 Type *Ty = Inst->getType(); 8712 if (!isValidElementType(Ty) || Ty->isPointerTy()) 8713 return false; 8714 8715 // Though the ultimate reduction may have multiple uses, its condition must 8716 // have only single use. 8717 if (auto *Sel = dyn_cast<SelectInst>(Inst)) 8718 if (!Sel->getCondition()->hasOneUse()) 8719 return false; 8720 8721 ReductionRoot = Inst; 8722 8723 // The opcode for leaf values that we perform a reduction on. 8724 // For example: load(x) + load(y) + load(z) + fptoui(w) 8725 // The leaf opcode for 'w' does not match, so we don't include it as a 8726 // potential candidate for the reduction. 8727 unsigned LeafOpcode = 0; 8728 8729 // Post-order traverse the reduction tree starting at Inst. We only handle 8730 // true trees containing binary operators or selects. 8731 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; 8732 Stack.push_back(std::make_pair(Inst, getFirstOperandIndex(Inst))); 8733 initReductionOps(Inst); 8734 while (!Stack.empty()) { 8735 Instruction *TreeN = Stack.back().first; 8736 unsigned EdgeToVisit = Stack.back().second++; 8737 const RecurKind TreeRdxKind = getRdxKind(TreeN); 8738 bool IsReducedValue = TreeRdxKind != RdxKind; 8739 8740 // Postorder visit. 8741 if (IsReducedValue || EdgeToVisit >= getNumberOfOperands(TreeN)) { 8742 if (IsReducedValue) 8743 ReducedVals.push_back(TreeN); 8744 else { 8745 auto ExtraArgsIter = ExtraArgs.find(TreeN); 8746 if (ExtraArgsIter != ExtraArgs.end() && !ExtraArgsIter->second) { 8747 // Check if TreeN is an extra argument of its parent operation. 8748 if (Stack.size() <= 1) { 8749 // TreeN can't be an extra argument as it is a root reduction 8750 // operation. 8751 return false; 8752 } 8753 // Yes, TreeN is an extra argument, do not add it to a list of 8754 // reduction operations. 8755 // Stack[Stack.size() - 2] always points to the parent operation. 8756 markExtraArg(Stack[Stack.size() - 2], TreeN); 8757 ExtraArgs.erase(TreeN); 8758 } else 8759 addReductionOps(TreeN); 8760 } 8761 // Retract. 8762 Stack.pop_back(); 8763 continue; 8764 } 8765 8766 // Visit operands. 8767 Value *EdgeVal = getRdxOperand(TreeN, EdgeToVisit); 8768 auto *EdgeInst = dyn_cast<Instruction>(EdgeVal); 8769 if (!EdgeInst) { 8770 // Edge value is not a reduction instruction or a leaf instruction. 8771 // (It may be a constant, function argument, or something else.) 8772 markExtraArg(Stack.back(), EdgeVal); 8773 continue; 8774 } 8775 RecurKind EdgeRdxKind = getRdxKind(EdgeInst); 8776 // Continue analysis if the next operand is a reduction operation or 8777 // (possibly) a leaf value. If the leaf value opcode is not set, 8778 // the first met operation != reduction operation is considered as the 8779 // leaf opcode. 8780 // Only handle trees in the current basic block. 8781 // Each tree node needs to have minimal number of users except for the 8782 // ultimate reduction. 8783 const bool IsRdxInst = EdgeRdxKind == RdxKind; 8784 if (EdgeInst != Phi && EdgeInst != Inst && 8785 hasSameParent(EdgeInst, Inst->getParent()) && 8786 hasRequiredNumberOfUses(isCmpSelMinMax(Inst), EdgeInst) && 8787 (!LeafOpcode || LeafOpcode == EdgeInst->getOpcode() || IsRdxInst)) { 8788 if (IsRdxInst) { 8789 // We need to be able to reassociate the reduction operations. 8790 if (!isVectorizable(EdgeRdxKind, EdgeInst)) { 8791 // I is an extra argument for TreeN (its parent operation). 8792 markExtraArg(Stack.back(), EdgeInst); 8793 continue; 8794 } 8795 } else if (!LeafOpcode) { 8796 LeafOpcode = EdgeInst->getOpcode(); 8797 } 8798 Stack.push_back( 8799 std::make_pair(EdgeInst, getFirstOperandIndex(EdgeInst))); 8800 continue; 8801 } 8802 // I is an extra argument for TreeN (its parent operation). 8803 markExtraArg(Stack.back(), EdgeInst); 8804 } 8805 return true; 8806 } 8807 8808 /// Attempt to vectorize the tree found by matchAssociativeReduction. 8809 Value *tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 8810 // If there are a sufficient number of reduction values, reduce 8811 // to a nearby power-of-2. We can safely generate oversized 8812 // vectors and rely on the backend to split them to legal sizes. 8813 unsigned NumReducedVals = ReducedVals.size(); 8814 if (NumReducedVals < 4) 8815 return nullptr; 8816 8817 // Intersect the fast-math-flags from all reduction operations. 8818 FastMathFlags RdxFMF; 8819 RdxFMF.set(); 8820 for (ReductionOpsType &RdxOp : ReductionOps) { 8821 for (Value *RdxVal : RdxOp) { 8822 if (auto *FPMO = dyn_cast<FPMathOperator>(RdxVal)) 8823 RdxFMF &= FPMO->getFastMathFlags(); 8824 } 8825 } 8826 8827 IRBuilder<> Builder(cast<Instruction>(ReductionRoot)); 8828 Builder.setFastMathFlags(RdxFMF); 8829 8830 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 8831 // The same extra argument may be used several times, so log each attempt 8832 // to use it. 8833 for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) { 8834 assert(Pair.first && "DebugLoc must be set."); 8835 ExternallyUsedValues[Pair.second].push_back(Pair.first); 8836 } 8837 8838 // The compare instruction of a min/max is the insertion point for new 8839 // instructions and may be replaced with a new compare instruction. 8840 auto getCmpForMinMaxReduction = [](Instruction *RdxRootInst) { 8841 assert(isa<SelectInst>(RdxRootInst) && 8842 "Expected min/max reduction to have select root instruction"); 8843 Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition(); 8844 assert(isa<Instruction>(ScalarCond) && 8845 "Expected min/max reduction to have compare condition"); 8846 return cast<Instruction>(ScalarCond); 8847 }; 8848 8849 // The reduction root is used as the insertion point for new instructions, 8850 // so set it as externally used to prevent it from being deleted. 8851 ExternallyUsedValues[ReductionRoot]; 8852 SmallVector<Value *, 16> IgnoreList; 8853 for (ReductionOpsType &RdxOp : ReductionOps) 8854 IgnoreList.append(RdxOp.begin(), RdxOp.end()); 8855 8856 unsigned ReduxWidth = PowerOf2Floor(NumReducedVals); 8857 if (NumReducedVals > ReduxWidth) { 8858 // In the loop below, we are building a tree based on a window of 8859 // 'ReduxWidth' values. 8860 // If the operands of those values have common traits (compare predicate, 8861 // constant operand, etc), then we want to group those together to 8862 // minimize the cost of the reduction. 8863 8864 // TODO: This should be extended to count common operands for 8865 // compares and binops. 8866 8867 // Step 1: Count the number of times each compare predicate occurs. 8868 SmallDenseMap<unsigned, unsigned> PredCountMap; 8869 for (Value *RdxVal : ReducedVals) { 8870 CmpInst::Predicate Pred; 8871 if (match(RdxVal, m_Cmp(Pred, m_Value(), m_Value()))) 8872 ++PredCountMap[Pred]; 8873 } 8874 // Step 2: Sort the values so the most common predicates come first. 8875 stable_sort(ReducedVals, [&PredCountMap](Value *A, Value *B) { 8876 CmpInst::Predicate PredA, PredB; 8877 if (match(A, m_Cmp(PredA, m_Value(), m_Value())) && 8878 match(B, m_Cmp(PredB, m_Value(), m_Value()))) { 8879 return PredCountMap[PredA] > PredCountMap[PredB]; 8880 } 8881 return false; 8882 }); 8883 } 8884 8885 Value *VectorizedTree = nullptr; 8886 unsigned i = 0; 8887 while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) { 8888 ArrayRef<Value *> VL(&ReducedVals[i], ReduxWidth); 8889 V.buildTree(VL, IgnoreList); 8890 if (V.isTreeTinyAndNotFullyVectorizable(/*ForReduction=*/true)) 8891 break; 8892 if (V.isLoadCombineReductionCandidate(RdxKind)) 8893 break; 8894 V.reorderTopToBottom(); 8895 V.reorderBottomToTop(/*IgnoreReorder=*/true); 8896 V.buildExternalUses(ExternallyUsedValues); 8897 8898 // For a poison-safe boolean logic reduction, do not replace select 8899 // instructions with logic ops. All reduced values will be frozen (see 8900 // below) to prevent leaking poison. 8901 if (isa<SelectInst>(ReductionRoot) && 8902 isBoolLogicOp(cast<Instruction>(ReductionRoot)) && 8903 NumReducedVals != ReduxWidth) 8904 break; 8905 8906 V.computeMinimumValueSizes(); 8907 8908 // Estimate cost. 8909 InstructionCost TreeCost = 8910 V.getTreeCost(makeArrayRef(&ReducedVals[i], ReduxWidth)); 8911 InstructionCost ReductionCost = 8912 getReductionCost(TTI, ReducedVals[i], ReduxWidth, RdxFMF); 8913 InstructionCost Cost = TreeCost + ReductionCost; 8914 if (!Cost.isValid()) { 8915 LLVM_DEBUG(dbgs() << "Encountered invalid baseline cost.\n"); 8916 return nullptr; 8917 } 8918 if (Cost >= -SLPCostThreshold) { 8919 V.getORE()->emit([&]() { 8920 return OptimizationRemarkMissed(SV_NAME, "HorSLPNotBeneficial", 8921 cast<Instruction>(VL[0])) 8922 << "Vectorizing horizontal reduction is possible" 8923 << "but not beneficial with cost " << ore::NV("Cost", Cost) 8924 << " and threshold " 8925 << ore::NV("Threshold", -SLPCostThreshold); 8926 }); 8927 break; 8928 } 8929 8930 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" 8931 << Cost << ". (HorRdx)\n"); 8932 V.getORE()->emit([&]() { 8933 return OptimizationRemark(SV_NAME, "VectorizedHorizontalReduction", 8934 cast<Instruction>(VL[0])) 8935 << "Vectorized horizontal reduction with cost " 8936 << ore::NV("Cost", Cost) << " and with tree size " 8937 << ore::NV("TreeSize", V.getTreeSize()); 8938 }); 8939 8940 // Vectorize a tree. 8941 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 8942 Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues); 8943 8944 // Emit a reduction. If the root is a select (min/max idiom), the insert 8945 // point is the compare condition of that select. 8946 Instruction *RdxRootInst = cast<Instruction>(ReductionRoot); 8947 if (isCmpSelMinMax(RdxRootInst)) 8948 Builder.SetInsertPoint(getCmpForMinMaxReduction(RdxRootInst)); 8949 else 8950 Builder.SetInsertPoint(RdxRootInst); 8951 8952 // To prevent poison from leaking across what used to be sequential, safe, 8953 // scalar boolean logic operations, the reduction operand must be frozen. 8954 if (isa<SelectInst>(RdxRootInst) && isBoolLogicOp(RdxRootInst)) 8955 VectorizedRoot = Builder.CreateFreeze(VectorizedRoot); 8956 8957 Value *ReducedSubTree = 8958 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); 8959 8960 if (!VectorizedTree) { 8961 // Initialize the final value in the reduction. 8962 VectorizedTree = ReducedSubTree; 8963 } else { 8964 // Update the final value in the reduction. 8965 Builder.SetCurrentDebugLocation(Loc); 8966 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree, 8967 ReducedSubTree, "op.rdx", ReductionOps); 8968 } 8969 i += ReduxWidth; 8970 ReduxWidth = PowerOf2Floor(NumReducedVals - i); 8971 } 8972 8973 if (VectorizedTree) { 8974 // Finish the reduction. 8975 for (; i < NumReducedVals; ++i) { 8976 auto *I = cast<Instruction>(ReducedVals[i]); 8977 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 8978 VectorizedTree = 8979 createOp(Builder, RdxKind, VectorizedTree, I, "", ReductionOps); 8980 } 8981 for (auto &Pair : ExternallyUsedValues) { 8982 // Add each externally used value to the final reduction. 8983 for (auto *I : Pair.second) { 8984 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 8985 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree, 8986 Pair.first, "op.extra", I); 8987 } 8988 } 8989 8990 ReductionRoot->replaceAllUsesWith(VectorizedTree); 8991 8992 // Mark all scalar reduction ops for deletion, they are replaced by the 8993 // vector reductions. 8994 V.eraseInstructions(IgnoreList); 8995 } 8996 return VectorizedTree; 8997 } 8998 8999 unsigned numReductionValues() const { return ReducedVals.size(); } 9000 9001 private: 9002 /// Calculate the cost of a reduction. 9003 InstructionCost getReductionCost(TargetTransformInfo *TTI, 9004 Value *FirstReducedVal, unsigned ReduxWidth, 9005 FastMathFlags FMF) { 9006 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 9007 Type *ScalarTy = FirstReducedVal->getType(); 9008 FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth); 9009 InstructionCost VectorCost, ScalarCost; 9010 switch (RdxKind) { 9011 case RecurKind::Add: 9012 case RecurKind::Mul: 9013 case RecurKind::Or: 9014 case RecurKind::And: 9015 case RecurKind::Xor: 9016 case RecurKind::FAdd: 9017 case RecurKind::FMul: { 9018 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind); 9019 VectorCost = 9020 TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF, CostKind); 9021 ScalarCost = TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy, CostKind); 9022 break; 9023 } 9024 case RecurKind::FMax: 9025 case RecurKind::FMin: { 9026 auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy); 9027 auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy)); 9028 VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy, 9029 /*unsigned=*/false, CostKind); 9030 CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind); 9031 ScalarCost = TTI->getCmpSelInstrCost(Instruction::FCmp, ScalarTy, 9032 SclCondTy, RdxPred, CostKind) + 9033 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 9034 SclCondTy, RdxPred, CostKind); 9035 break; 9036 } 9037 case RecurKind::SMax: 9038 case RecurKind::SMin: 9039 case RecurKind::UMax: 9040 case RecurKind::UMin: { 9041 auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy); 9042 auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy)); 9043 bool IsUnsigned = 9044 RdxKind == RecurKind::UMax || RdxKind == RecurKind::UMin; 9045 VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy, IsUnsigned, 9046 CostKind); 9047 CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind); 9048 ScalarCost = TTI->getCmpSelInstrCost(Instruction::ICmp, ScalarTy, 9049 SclCondTy, RdxPred, CostKind) + 9050 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 9051 SclCondTy, RdxPred, CostKind); 9052 break; 9053 } 9054 default: 9055 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 9056 } 9057 9058 // Scalar cost is repeated for N-1 elements. 9059 ScalarCost *= (ReduxWidth - 1); 9060 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost 9061 << " for reduction that starts with " << *FirstReducedVal 9062 << " (It is a splitting reduction)\n"); 9063 return VectorCost - ScalarCost; 9064 } 9065 9066 /// Emit a horizontal reduction of the vectorized value. 9067 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 9068 unsigned ReduxWidth, const TargetTransformInfo *TTI) { 9069 assert(VectorizedValue && "Need to have a vectorized tree node"); 9070 assert(isPowerOf2_32(ReduxWidth) && 9071 "We only handle power-of-two reductions for now"); 9072 assert(RdxKind != RecurKind::FMulAdd && 9073 "A call to the llvm.fmuladd intrinsic is not handled yet"); 9074 9075 ++NumVectorInstructions; 9076 return createSimpleTargetReduction(Builder, TTI, VectorizedValue, RdxKind); 9077 } 9078 }; 9079 9080 } // end anonymous namespace 9081 9082 static Optional<unsigned> getAggregateSize(Instruction *InsertInst) { 9083 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) 9084 return cast<FixedVectorType>(IE->getType())->getNumElements(); 9085 9086 unsigned AggregateSize = 1; 9087 auto *IV = cast<InsertValueInst>(InsertInst); 9088 Type *CurrentType = IV->getType(); 9089 do { 9090 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 9091 for (auto *Elt : ST->elements()) 9092 if (Elt != ST->getElementType(0)) // check homogeneity 9093 return None; 9094 AggregateSize *= ST->getNumElements(); 9095 CurrentType = ST->getElementType(0); 9096 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 9097 AggregateSize *= AT->getNumElements(); 9098 CurrentType = AT->getElementType(); 9099 } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) { 9100 AggregateSize *= VT->getNumElements(); 9101 return AggregateSize; 9102 } else if (CurrentType->isSingleValueType()) { 9103 return AggregateSize; 9104 } else { 9105 return None; 9106 } 9107 } while (true); 9108 } 9109 9110 static bool findBuildAggregate_rec(Instruction *LastInsertInst, 9111 TargetTransformInfo *TTI, 9112 SmallVectorImpl<Value *> &BuildVectorOpds, 9113 SmallVectorImpl<Value *> &InsertElts, 9114 unsigned OperandOffset) { 9115 do { 9116 Value *InsertedOperand = LastInsertInst->getOperand(1); 9117 Optional<int> OperandIndex = getInsertIndex(LastInsertInst, OperandOffset); 9118 if (!OperandIndex) 9119 return false; 9120 if (isa<InsertElementInst>(InsertedOperand) || 9121 isa<InsertValueInst>(InsertedOperand)) { 9122 if (!findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI, 9123 BuildVectorOpds, InsertElts, *OperandIndex)) 9124 return false; 9125 } else { 9126 BuildVectorOpds[*OperandIndex] = InsertedOperand; 9127 InsertElts[*OperandIndex] = LastInsertInst; 9128 } 9129 LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0)); 9130 } while (LastInsertInst != nullptr && 9131 (isa<InsertValueInst>(LastInsertInst) || 9132 isa<InsertElementInst>(LastInsertInst)) && 9133 LastInsertInst->hasOneUse()); 9134 return true; 9135 } 9136 9137 /// Recognize construction of vectors like 9138 /// %ra = insertelement <4 x float> poison, float %s0, i32 0 9139 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 9140 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 9141 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 9142 /// starting from the last insertelement or insertvalue instruction. 9143 /// 9144 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>}, 9145 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on. 9146 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples. 9147 /// 9148 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type. 9149 /// 9150 /// \return true if it matches. 9151 static bool findBuildAggregate(Instruction *LastInsertInst, 9152 TargetTransformInfo *TTI, 9153 SmallVectorImpl<Value *> &BuildVectorOpds, 9154 SmallVectorImpl<Value *> &InsertElts) { 9155 9156 assert((isa<InsertElementInst>(LastInsertInst) || 9157 isa<InsertValueInst>(LastInsertInst)) && 9158 "Expected insertelement or insertvalue instruction!"); 9159 9160 assert((BuildVectorOpds.empty() && InsertElts.empty()) && 9161 "Expected empty result vectors!"); 9162 9163 Optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst); 9164 if (!AggregateSize) 9165 return false; 9166 BuildVectorOpds.resize(*AggregateSize); 9167 InsertElts.resize(*AggregateSize); 9168 9169 if (findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 9170 0)) { 9171 llvm::erase_value(BuildVectorOpds, nullptr); 9172 llvm::erase_value(InsertElts, nullptr); 9173 if (BuildVectorOpds.size() >= 2) 9174 return true; 9175 } 9176 9177 return false; 9178 } 9179 9180 /// Try and get a reduction value from a phi node. 9181 /// 9182 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 9183 /// if they come from either \p ParentBB or a containing loop latch. 9184 /// 9185 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 9186 /// if not possible. 9187 static Value *getReductionValue(const DominatorTree *DT, PHINode *P, 9188 BasicBlock *ParentBB, LoopInfo *LI) { 9189 // There are situations where the reduction value is not dominated by the 9190 // reduction phi. Vectorizing such cases has been reported to cause 9191 // miscompiles. See PR25787. 9192 auto DominatedReduxValue = [&](Value *R) { 9193 return isa<Instruction>(R) && 9194 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent()); 9195 }; 9196 9197 Value *Rdx = nullptr; 9198 9199 // Return the incoming value if it comes from the same BB as the phi node. 9200 if (P->getIncomingBlock(0) == ParentBB) { 9201 Rdx = P->getIncomingValue(0); 9202 } else if (P->getIncomingBlock(1) == ParentBB) { 9203 Rdx = P->getIncomingValue(1); 9204 } 9205 9206 if (Rdx && DominatedReduxValue(Rdx)) 9207 return Rdx; 9208 9209 // Otherwise, check whether we have a loop latch to look at. 9210 Loop *BBL = LI->getLoopFor(ParentBB); 9211 if (!BBL) 9212 return nullptr; 9213 BasicBlock *BBLatch = BBL->getLoopLatch(); 9214 if (!BBLatch) 9215 return nullptr; 9216 9217 // There is a loop latch, return the incoming value if it comes from 9218 // that. This reduction pattern occasionally turns up. 9219 if (P->getIncomingBlock(0) == BBLatch) { 9220 Rdx = P->getIncomingValue(0); 9221 } else if (P->getIncomingBlock(1) == BBLatch) { 9222 Rdx = P->getIncomingValue(1); 9223 } 9224 9225 if (Rdx && DominatedReduxValue(Rdx)) 9226 return Rdx; 9227 9228 return nullptr; 9229 } 9230 9231 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) { 9232 if (match(I, m_BinOp(m_Value(V0), m_Value(V1)))) 9233 return true; 9234 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1)))) 9235 return true; 9236 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1)))) 9237 return true; 9238 if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1)))) 9239 return true; 9240 if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1)))) 9241 return true; 9242 if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1)))) 9243 return true; 9244 if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1)))) 9245 return true; 9246 return false; 9247 } 9248 9249 /// Attempt to reduce a horizontal reduction. 9250 /// If it is legal to match a horizontal reduction feeding the phi node \a P 9251 /// with reduction operators \a Root (or one of its operands) in a basic block 9252 /// \a BB, then check if it can be done. If horizontal reduction is not found 9253 /// and root instruction is a binary operation, vectorization of the operands is 9254 /// attempted. 9255 /// \returns true if a horizontal reduction was matched and reduced or operands 9256 /// of one of the binary instruction were vectorized. 9257 /// \returns false if a horizontal reduction was not matched (or not possible) 9258 /// or no vectorization of any binary operation feeding \a Root instruction was 9259 /// performed. 9260 static bool tryToVectorizeHorReductionOrInstOperands( 9261 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, 9262 TargetTransformInfo *TTI, 9263 const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) { 9264 if (!ShouldVectorizeHor) 9265 return false; 9266 9267 if (!Root) 9268 return false; 9269 9270 if (Root->getParent() != BB || isa<PHINode>(Root)) 9271 return false; 9272 // Start analysis starting from Root instruction. If horizontal reduction is 9273 // found, try to vectorize it. If it is not a horizontal reduction or 9274 // vectorization is not possible or not effective, and currently analyzed 9275 // instruction is a binary operation, try to vectorize the operands, using 9276 // pre-order DFS traversal order. If the operands were not vectorized, repeat 9277 // the same procedure considering each operand as a possible root of the 9278 // horizontal reduction. 9279 // Interrupt the process if the Root instruction itself was vectorized or all 9280 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 9281 // Skip the analysis of CmpInsts.Compiler implements postanalysis of the 9282 // CmpInsts so we can skip extra attempts in 9283 // tryToVectorizeHorReductionOrInstOperands and save compile time. 9284 std::queue<std::pair<Instruction *, unsigned>> Stack; 9285 Stack.emplace(Root, 0); 9286 SmallPtrSet<Value *, 8> VisitedInstrs; 9287 SmallVector<WeakTrackingVH> PostponedInsts; 9288 bool Res = false; 9289 auto &&TryToReduce = [TTI, &P, &R](Instruction *Inst, Value *&B0, 9290 Value *&B1) -> Value * { 9291 bool IsBinop = matchRdxBop(Inst, B0, B1); 9292 bool IsSelect = match(Inst, m_Select(m_Value(), m_Value(), m_Value())); 9293 if (IsBinop || IsSelect) { 9294 HorizontalReduction HorRdx; 9295 if (HorRdx.matchAssociativeReduction(P, Inst)) 9296 return HorRdx.tryToReduce(R, TTI); 9297 } 9298 return nullptr; 9299 }; 9300 while (!Stack.empty()) { 9301 Instruction *Inst; 9302 unsigned Level; 9303 std::tie(Inst, Level) = Stack.front(); 9304 Stack.pop(); 9305 // Do not try to analyze instruction that has already been vectorized. 9306 // This may happen when we vectorize instruction operands on a previous 9307 // iteration while stack was populated before that happened. 9308 if (R.isDeleted(Inst)) 9309 continue; 9310 Value *B0 = nullptr, *B1 = nullptr; 9311 if (Value *V = TryToReduce(Inst, B0, B1)) { 9312 Res = true; 9313 // Set P to nullptr to avoid re-analysis of phi node in 9314 // matchAssociativeReduction function unless this is the root node. 9315 P = nullptr; 9316 if (auto *I = dyn_cast<Instruction>(V)) { 9317 // Try to find another reduction. 9318 Stack.emplace(I, Level); 9319 continue; 9320 } 9321 } else { 9322 bool IsBinop = B0 && B1; 9323 if (P && IsBinop) { 9324 Inst = dyn_cast<Instruction>(B0); 9325 if (Inst == P) 9326 Inst = dyn_cast<Instruction>(B1); 9327 if (!Inst) { 9328 // Set P to nullptr to avoid re-analysis of phi node in 9329 // matchAssociativeReduction function unless this is the root node. 9330 P = nullptr; 9331 continue; 9332 } 9333 } 9334 // Set P to nullptr to avoid re-analysis of phi node in 9335 // matchAssociativeReduction function unless this is the root node. 9336 P = nullptr; 9337 // Do not try to vectorize CmpInst operands, this is done separately. 9338 // Final attempt for binop args vectorization should happen after the loop 9339 // to try to find reductions. 9340 if (!isa<CmpInst>(Inst)) 9341 PostponedInsts.push_back(Inst); 9342 } 9343 9344 // Try to vectorize operands. 9345 // Continue analysis for the instruction from the same basic block only to 9346 // save compile time. 9347 if (++Level < RecursionMaxDepth) 9348 for (auto *Op : Inst->operand_values()) 9349 if (VisitedInstrs.insert(Op).second) 9350 if (auto *I = dyn_cast<Instruction>(Op)) 9351 // Do not try to vectorize CmpInst operands, this is done 9352 // separately. 9353 if (!isa<PHINode>(I) && !isa<CmpInst>(I) && !R.isDeleted(I) && 9354 I->getParent() == BB) 9355 Stack.emplace(I, Level); 9356 } 9357 // Try to vectorized binops where reductions were not found. 9358 for (Value *V : PostponedInsts) 9359 if (auto *Inst = dyn_cast<Instruction>(V)) 9360 if (!R.isDeleted(Inst)) 9361 Res |= Vectorize(Inst, R); 9362 return Res; 9363 } 9364 9365 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V, 9366 BasicBlock *BB, BoUpSLP &R, 9367 TargetTransformInfo *TTI) { 9368 auto *I = dyn_cast_or_null<Instruction>(V); 9369 if (!I) 9370 return false; 9371 9372 if (!isa<BinaryOperator>(I)) 9373 P = nullptr; 9374 // Try to match and vectorize a horizontal reduction. 9375 auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool { 9376 return tryToVectorize(I, R); 9377 }; 9378 return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI, 9379 ExtraVectorization); 9380 } 9381 9382 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 9383 BasicBlock *BB, BoUpSLP &R) { 9384 const DataLayout &DL = BB->getModule()->getDataLayout(); 9385 if (!R.canMapToVector(IVI->getType(), DL)) 9386 return false; 9387 9388 SmallVector<Value *, 16> BuildVectorOpds; 9389 SmallVector<Value *, 16> BuildVectorInsts; 9390 if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts)) 9391 return false; 9392 9393 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 9394 // Aggregate value is unlikely to be processed in vector register, we need to 9395 // extract scalars into scalar registers, so NeedExtraction is set true. 9396 return tryToVectorizeList(BuildVectorOpds, R); 9397 } 9398 9399 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 9400 BasicBlock *BB, BoUpSLP &R) { 9401 SmallVector<Value *, 16> BuildVectorInsts; 9402 SmallVector<Value *, 16> BuildVectorOpds; 9403 SmallVector<int> Mask; 9404 if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) || 9405 (llvm::all_of( 9406 BuildVectorOpds, 9407 [](Value *V) { return isa<ExtractElementInst, UndefValue>(V); }) && 9408 isFixedVectorShuffle(BuildVectorOpds, Mask))) 9409 return false; 9410 9411 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n"); 9412 return tryToVectorizeList(BuildVectorInsts, R); 9413 } 9414 9415 template <typename T> 9416 static bool 9417 tryToVectorizeSequence(SmallVectorImpl<T *> &Incoming, 9418 function_ref<unsigned(T *)> Limit, 9419 function_ref<bool(T *, T *)> Comparator, 9420 function_ref<bool(T *, T *)> AreCompatible, 9421 function_ref<bool(ArrayRef<T *>, bool)> TryToVectorize, 9422 bool LimitForRegisterSize) { 9423 bool Changed = false; 9424 // Sort by type, parent, operands. 9425 stable_sort(Incoming, Comparator); 9426 9427 // Try to vectorize elements base on their type. 9428 SmallVector<T *> Candidates; 9429 for (auto *IncIt = Incoming.begin(), *E = Incoming.end(); IncIt != E;) { 9430 // Look for the next elements with the same type, parent and operand 9431 // kinds. 9432 auto *SameTypeIt = IncIt; 9433 while (SameTypeIt != E && AreCompatible(*SameTypeIt, *IncIt)) 9434 ++SameTypeIt; 9435 9436 // Try to vectorize them. 9437 unsigned NumElts = (SameTypeIt - IncIt); 9438 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at nodes (" 9439 << NumElts << ")\n"); 9440 // The vectorization is a 3-state attempt: 9441 // 1. Try to vectorize instructions with the same/alternate opcodes with the 9442 // size of maximal register at first. 9443 // 2. Try to vectorize remaining instructions with the same type, if 9444 // possible. This may result in the better vectorization results rather than 9445 // if we try just to vectorize instructions with the same/alternate opcodes. 9446 // 3. Final attempt to try to vectorize all instructions with the 9447 // same/alternate ops only, this may result in some extra final 9448 // vectorization. 9449 if (NumElts > 1 && 9450 TryToVectorize(makeArrayRef(IncIt, NumElts), LimitForRegisterSize)) { 9451 // Success start over because instructions might have been changed. 9452 Changed = true; 9453 } else if (NumElts < Limit(*IncIt) && 9454 (Candidates.empty() || 9455 Candidates.front()->getType() == (*IncIt)->getType())) { 9456 Candidates.append(IncIt, std::next(IncIt, NumElts)); 9457 } 9458 // Final attempt to vectorize instructions with the same types. 9459 if (Candidates.size() > 1 && 9460 (SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType())) { 9461 if (TryToVectorize(Candidates, /*LimitForRegisterSize=*/false)) { 9462 // Success start over because instructions might have been changed. 9463 Changed = true; 9464 } else if (LimitForRegisterSize) { 9465 // Try to vectorize using small vectors. 9466 for (auto *It = Candidates.begin(), *End = Candidates.end(); 9467 It != End;) { 9468 auto *SameTypeIt = It; 9469 while (SameTypeIt != End && AreCompatible(*SameTypeIt, *It)) 9470 ++SameTypeIt; 9471 unsigned NumElts = (SameTypeIt - It); 9472 if (NumElts > 1 && TryToVectorize(makeArrayRef(It, NumElts), 9473 /*LimitForRegisterSize=*/false)) 9474 Changed = true; 9475 It = SameTypeIt; 9476 } 9477 } 9478 Candidates.clear(); 9479 } 9480 9481 // Start over at the next instruction of a different type (or the end). 9482 IncIt = SameTypeIt; 9483 } 9484 return Changed; 9485 } 9486 9487 /// Compare two cmp instructions. If IsCompatibility is true, function returns 9488 /// true if 2 cmps have same/swapped predicates and mos compatible corresponding 9489 /// operands. If IsCompatibility is false, function implements strict weak 9490 /// ordering relation between two cmp instructions, returning true if the first 9491 /// instruction is "less" than the second, i.e. its predicate is less than the 9492 /// predicate of the second or the operands IDs are less than the operands IDs 9493 /// of the second cmp instruction. 9494 template <bool IsCompatibility> 9495 static bool compareCmp(Value *V, Value *V2, 9496 function_ref<bool(Instruction *)> IsDeleted) { 9497 auto *CI1 = cast<CmpInst>(V); 9498 auto *CI2 = cast<CmpInst>(V2); 9499 if (IsDeleted(CI2) || !isValidElementType(CI2->getType())) 9500 return false; 9501 if (CI1->getOperand(0)->getType()->getTypeID() < 9502 CI2->getOperand(0)->getType()->getTypeID()) 9503 return !IsCompatibility; 9504 if (CI1->getOperand(0)->getType()->getTypeID() > 9505 CI2->getOperand(0)->getType()->getTypeID()) 9506 return false; 9507 CmpInst::Predicate Pred1 = CI1->getPredicate(); 9508 CmpInst::Predicate Pred2 = CI2->getPredicate(); 9509 CmpInst::Predicate SwapPred1 = CmpInst::getSwappedPredicate(Pred1); 9510 CmpInst::Predicate SwapPred2 = CmpInst::getSwappedPredicate(Pred2); 9511 CmpInst::Predicate BasePred1 = std::min(Pred1, SwapPred1); 9512 CmpInst::Predicate BasePred2 = std::min(Pred2, SwapPred2); 9513 if (BasePred1 < BasePred2) 9514 return !IsCompatibility; 9515 if (BasePred1 > BasePred2) 9516 return false; 9517 // Compare operands. 9518 bool LEPreds = Pred1 <= Pred2; 9519 bool GEPreds = Pred1 >= Pred2; 9520 for (int I = 0, E = CI1->getNumOperands(); I < E; ++I) { 9521 auto *Op1 = CI1->getOperand(LEPreds ? I : E - I - 1); 9522 auto *Op2 = CI2->getOperand(GEPreds ? I : E - I - 1); 9523 if (Op1->getValueID() < Op2->getValueID()) 9524 return !IsCompatibility; 9525 if (Op1->getValueID() > Op2->getValueID()) 9526 return false; 9527 if (auto *I1 = dyn_cast<Instruction>(Op1)) 9528 if (auto *I2 = dyn_cast<Instruction>(Op2)) { 9529 if (I1->getParent() != I2->getParent()) 9530 return false; 9531 InstructionsState S = getSameOpcode({I1, I2}); 9532 if (S.getOpcode()) 9533 continue; 9534 return false; 9535 } 9536 } 9537 return IsCompatibility; 9538 } 9539 9540 bool SLPVectorizerPass::vectorizeSimpleInstructions( 9541 SmallVectorImpl<Instruction *> &Instructions, BasicBlock *BB, BoUpSLP &R, 9542 bool AtTerminator) { 9543 bool OpsChanged = false; 9544 SmallVector<Instruction *, 4> PostponedCmps; 9545 for (auto *I : reverse(Instructions)) { 9546 if (R.isDeleted(I)) 9547 continue; 9548 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) 9549 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 9550 else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) 9551 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 9552 else if (isa<CmpInst>(I)) 9553 PostponedCmps.push_back(I); 9554 } 9555 if (AtTerminator) { 9556 // Try to find reductions first. 9557 for (Instruction *I : PostponedCmps) { 9558 if (R.isDeleted(I)) 9559 continue; 9560 for (Value *Op : I->operands()) 9561 OpsChanged |= vectorizeRootInstruction(nullptr, Op, BB, R, TTI); 9562 } 9563 // Try to vectorize operands as vector bundles. 9564 for (Instruction *I : PostponedCmps) { 9565 if (R.isDeleted(I)) 9566 continue; 9567 OpsChanged |= tryToVectorize(I, R); 9568 } 9569 // Try to vectorize list of compares. 9570 // Sort by type, compare predicate, etc. 9571 auto &&CompareSorter = [&R](Value *V, Value *V2) { 9572 return compareCmp<false>(V, V2, 9573 [&R](Instruction *I) { return R.isDeleted(I); }); 9574 }; 9575 9576 auto &&AreCompatibleCompares = [&R](Value *V1, Value *V2) { 9577 if (V1 == V2) 9578 return true; 9579 return compareCmp<true>(V1, V2, 9580 [&R](Instruction *I) { return R.isDeleted(I); }); 9581 }; 9582 auto Limit = [&R](Value *V) { 9583 unsigned EltSize = R.getVectorElementSize(V); 9584 return std::max(2U, R.getMaxVecRegSize() / EltSize); 9585 }; 9586 9587 SmallVector<Value *> Vals(PostponedCmps.begin(), PostponedCmps.end()); 9588 OpsChanged |= tryToVectorizeSequence<Value>( 9589 Vals, Limit, CompareSorter, AreCompatibleCompares, 9590 [this, &R](ArrayRef<Value *> Candidates, bool LimitForRegisterSize) { 9591 // Exclude possible reductions from other blocks. 9592 bool ArePossiblyReducedInOtherBlock = 9593 any_of(Candidates, [](Value *V) { 9594 return any_of(V->users(), [V](User *U) { 9595 return isa<SelectInst>(U) && 9596 cast<SelectInst>(U)->getParent() != 9597 cast<Instruction>(V)->getParent(); 9598 }); 9599 }); 9600 if (ArePossiblyReducedInOtherBlock) 9601 return false; 9602 return tryToVectorizeList(Candidates, R, LimitForRegisterSize); 9603 }, 9604 /*LimitForRegisterSize=*/true); 9605 Instructions.clear(); 9606 } else { 9607 // Insert in reverse order since the PostponedCmps vector was filled in 9608 // reverse order. 9609 Instructions.assign(PostponedCmps.rbegin(), PostponedCmps.rend()); 9610 } 9611 return OpsChanged; 9612 } 9613 9614 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 9615 bool Changed = false; 9616 SmallVector<Value *, 4> Incoming; 9617 SmallPtrSet<Value *, 16> VisitedInstrs; 9618 // Maps phi nodes to the non-phi nodes found in the use tree for each phi 9619 // node. Allows better to identify the chains that can be vectorized in the 9620 // better way. 9621 DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes; 9622 auto PHICompare = [this, &PHIToOpcodes](Value *V1, Value *V2) { 9623 assert(isValidElementType(V1->getType()) && 9624 isValidElementType(V2->getType()) && 9625 "Expected vectorizable types only."); 9626 // It is fine to compare type IDs here, since we expect only vectorizable 9627 // types, like ints, floats and pointers, we don't care about other type. 9628 if (V1->getType()->getTypeID() < V2->getType()->getTypeID()) 9629 return true; 9630 if (V1->getType()->getTypeID() > V2->getType()->getTypeID()) 9631 return false; 9632 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 9633 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 9634 if (Opcodes1.size() < Opcodes2.size()) 9635 return true; 9636 if (Opcodes1.size() > Opcodes2.size()) 9637 return false; 9638 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 9639 // Undefs are compatible with any other value. 9640 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) 9641 continue; 9642 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 9643 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 9644 DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent()); 9645 DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent()); 9646 if (!NodeI1) 9647 return NodeI2 != nullptr; 9648 if (!NodeI2) 9649 return false; 9650 assert((NodeI1 == NodeI2) == 9651 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 9652 "Different nodes should have different DFS numbers"); 9653 if (NodeI1 != NodeI2) 9654 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 9655 InstructionsState S = getSameOpcode({I1, I2}); 9656 if (S.getOpcode()) 9657 continue; 9658 return I1->getOpcode() < I2->getOpcode(); 9659 } 9660 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 9661 continue; 9662 if (Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID()) 9663 return true; 9664 if (Opcodes1[I]->getValueID() > Opcodes2[I]->getValueID()) 9665 return false; 9666 } 9667 return false; 9668 }; 9669 auto AreCompatiblePHIs = [&PHIToOpcodes](Value *V1, Value *V2) { 9670 if (V1 == V2) 9671 return true; 9672 if (V1->getType() != V2->getType()) 9673 return false; 9674 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 9675 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 9676 if (Opcodes1.size() != Opcodes2.size()) 9677 return false; 9678 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 9679 // Undefs are compatible with any other value. 9680 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) 9681 continue; 9682 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 9683 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 9684 if (I1->getParent() != I2->getParent()) 9685 return false; 9686 InstructionsState S = getSameOpcode({I1, I2}); 9687 if (S.getOpcode()) 9688 continue; 9689 return false; 9690 } 9691 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 9692 continue; 9693 if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID()) 9694 return false; 9695 } 9696 return true; 9697 }; 9698 auto Limit = [&R](Value *V) { 9699 unsigned EltSize = R.getVectorElementSize(V); 9700 return std::max(2U, R.getMaxVecRegSize() / EltSize); 9701 }; 9702 9703 bool HaveVectorizedPhiNodes = false; 9704 do { 9705 // Collect the incoming values from the PHIs. 9706 Incoming.clear(); 9707 for (Instruction &I : *BB) { 9708 PHINode *P = dyn_cast<PHINode>(&I); 9709 if (!P) 9710 break; 9711 9712 // No need to analyze deleted, vectorized and non-vectorizable 9713 // instructions. 9714 if (!VisitedInstrs.count(P) && !R.isDeleted(P) && 9715 isValidElementType(P->getType())) 9716 Incoming.push_back(P); 9717 } 9718 9719 // Find the corresponding non-phi nodes for better matching when trying to 9720 // build the tree. 9721 for (Value *V : Incoming) { 9722 SmallVectorImpl<Value *> &Opcodes = 9723 PHIToOpcodes.try_emplace(V).first->getSecond(); 9724 if (!Opcodes.empty()) 9725 continue; 9726 SmallVector<Value *, 4> Nodes(1, V); 9727 SmallPtrSet<Value *, 4> Visited; 9728 while (!Nodes.empty()) { 9729 auto *PHI = cast<PHINode>(Nodes.pop_back_val()); 9730 if (!Visited.insert(PHI).second) 9731 continue; 9732 for (Value *V : PHI->incoming_values()) { 9733 if (auto *PHI1 = dyn_cast<PHINode>((V))) { 9734 Nodes.push_back(PHI1); 9735 continue; 9736 } 9737 Opcodes.emplace_back(V); 9738 } 9739 } 9740 } 9741 9742 HaveVectorizedPhiNodes = tryToVectorizeSequence<Value>( 9743 Incoming, Limit, PHICompare, AreCompatiblePHIs, 9744 [this, &R](ArrayRef<Value *> Candidates, bool LimitForRegisterSize) { 9745 return tryToVectorizeList(Candidates, R, LimitForRegisterSize); 9746 }, 9747 /*LimitForRegisterSize=*/true); 9748 Changed |= HaveVectorizedPhiNodes; 9749 VisitedInstrs.insert(Incoming.begin(), Incoming.end()); 9750 } while (HaveVectorizedPhiNodes); 9751 9752 VisitedInstrs.clear(); 9753 9754 SmallVector<Instruction *, 8> PostProcessInstructions; 9755 SmallDenseSet<Instruction *, 4> KeyNodes; 9756 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 9757 // Skip instructions with scalable type. The num of elements is unknown at 9758 // compile-time for scalable type. 9759 if (isa<ScalableVectorType>(it->getType())) 9760 continue; 9761 9762 // Skip instructions marked for the deletion. 9763 if (R.isDeleted(&*it)) 9764 continue; 9765 // We may go through BB multiple times so skip the one we have checked. 9766 if (!VisitedInstrs.insert(&*it).second) { 9767 if (it->use_empty() && KeyNodes.contains(&*it) && 9768 vectorizeSimpleInstructions(PostProcessInstructions, BB, R, 9769 it->isTerminator())) { 9770 // We would like to start over since some instructions are deleted 9771 // and the iterator may become invalid value. 9772 Changed = true; 9773 it = BB->begin(); 9774 e = BB->end(); 9775 } 9776 continue; 9777 } 9778 9779 if (isa<DbgInfoIntrinsic>(it)) 9780 continue; 9781 9782 // Try to vectorize reductions that use PHINodes. 9783 if (PHINode *P = dyn_cast<PHINode>(it)) { 9784 // Check that the PHI is a reduction PHI. 9785 if (P->getNumIncomingValues() == 2) { 9786 // Try to match and vectorize a horizontal reduction. 9787 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R, 9788 TTI)) { 9789 Changed = true; 9790 it = BB->begin(); 9791 e = BB->end(); 9792 continue; 9793 } 9794 } 9795 // Try to vectorize the incoming values of the PHI, to catch reductions 9796 // that feed into PHIs. 9797 for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) { 9798 // Skip if the incoming block is the current BB for now. Also, bypass 9799 // unreachable IR for efficiency and to avoid crashing. 9800 // TODO: Collect the skipped incoming values and try to vectorize them 9801 // after processing BB. 9802 if (BB == P->getIncomingBlock(I) || 9803 !DT->isReachableFromEntry(P->getIncomingBlock(I))) 9804 continue; 9805 9806 Changed |= vectorizeRootInstruction(nullptr, P->getIncomingValue(I), 9807 P->getIncomingBlock(I), R, TTI); 9808 } 9809 continue; 9810 } 9811 9812 // Ran into an instruction without users, like terminator, or function call 9813 // with ignored return value, store. Ignore unused instructions (basing on 9814 // instruction type, except for CallInst and InvokeInst). 9815 if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) || 9816 isa<InvokeInst>(it))) { 9817 KeyNodes.insert(&*it); 9818 bool OpsChanged = false; 9819 if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) { 9820 for (auto *V : it->operand_values()) { 9821 // Try to match and vectorize a horizontal reduction. 9822 OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI); 9823 } 9824 } 9825 // Start vectorization of post-process list of instructions from the 9826 // top-tree instructions to try to vectorize as many instructions as 9827 // possible. 9828 OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R, 9829 it->isTerminator()); 9830 if (OpsChanged) { 9831 // We would like to start over since some instructions are deleted 9832 // and the iterator may become invalid value. 9833 Changed = true; 9834 it = BB->begin(); 9835 e = BB->end(); 9836 continue; 9837 } 9838 } 9839 9840 if (isa<InsertElementInst>(it) || isa<CmpInst>(it) || 9841 isa<InsertValueInst>(it)) 9842 PostProcessInstructions.push_back(&*it); 9843 } 9844 9845 return Changed; 9846 } 9847 9848 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 9849 auto Changed = false; 9850 for (auto &Entry : GEPs) { 9851 // If the getelementptr list has fewer than two elements, there's nothing 9852 // to do. 9853 if (Entry.second.size() < 2) 9854 continue; 9855 9856 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 9857 << Entry.second.size() << ".\n"); 9858 9859 // Process the GEP list in chunks suitable for the target's supported 9860 // vector size. If a vector register can't hold 1 element, we are done. We 9861 // are trying to vectorize the index computations, so the maximum number of 9862 // elements is based on the size of the index expression, rather than the 9863 // size of the GEP itself (the target's pointer size). 9864 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 9865 unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin()); 9866 if (MaxVecRegSize < EltSize) 9867 continue; 9868 9869 unsigned MaxElts = MaxVecRegSize / EltSize; 9870 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) { 9871 auto Len = std::min<unsigned>(BE - BI, MaxElts); 9872 ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len); 9873 9874 // Initialize a set a candidate getelementptrs. Note that we use a 9875 // SetVector here to preserve program order. If the index computations 9876 // are vectorizable and begin with loads, we want to minimize the chance 9877 // of having to reorder them later. 9878 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 9879 9880 // Some of the candidates may have already been vectorized after we 9881 // initially collected them. If so, they are marked as deleted, so remove 9882 // them from the set of candidates. 9883 Candidates.remove_if( 9884 [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); }); 9885 9886 // Remove from the set of candidates all pairs of getelementptrs with 9887 // constant differences. Such getelementptrs are likely not good 9888 // candidates for vectorization in a bottom-up phase since one can be 9889 // computed from the other. We also ensure all candidate getelementptr 9890 // indices are unique. 9891 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 9892 auto *GEPI = GEPList[I]; 9893 if (!Candidates.count(GEPI)) 9894 continue; 9895 auto *SCEVI = SE->getSCEV(GEPList[I]); 9896 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 9897 auto *GEPJ = GEPList[J]; 9898 auto *SCEVJ = SE->getSCEV(GEPList[J]); 9899 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 9900 Candidates.remove(GEPI); 9901 Candidates.remove(GEPJ); 9902 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 9903 Candidates.remove(GEPJ); 9904 } 9905 } 9906 } 9907 9908 // We break out of the above computation as soon as we know there are 9909 // fewer than two candidates remaining. 9910 if (Candidates.size() < 2) 9911 continue; 9912 9913 // Add the single, non-constant index of each candidate to the bundle. We 9914 // ensured the indices met these constraints when we originally collected 9915 // the getelementptrs. 9916 SmallVector<Value *, 16> Bundle(Candidates.size()); 9917 auto BundleIndex = 0u; 9918 for (auto *V : Candidates) { 9919 auto *GEP = cast<GetElementPtrInst>(V); 9920 auto *GEPIdx = GEP->idx_begin()->get(); 9921 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 9922 Bundle[BundleIndex++] = GEPIdx; 9923 } 9924 9925 // Try and vectorize the indices. We are currently only interested in 9926 // gather-like cases of the form: 9927 // 9928 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 9929 // 9930 // where the loads of "a", the loads of "b", and the subtractions can be 9931 // performed in parallel. It's likely that detecting this pattern in a 9932 // bottom-up phase will be simpler and less costly than building a 9933 // full-blown top-down phase beginning at the consecutive loads. 9934 Changed |= tryToVectorizeList(Bundle, R); 9935 } 9936 } 9937 return Changed; 9938 } 9939 9940 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 9941 bool Changed = false; 9942 // Sort by type, base pointers and values operand. Value operands must be 9943 // compatible (have the same opcode, same parent), otherwise it is 9944 // definitely not profitable to try to vectorize them. 9945 auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) { 9946 if (V->getPointerOperandType()->getTypeID() < 9947 V2->getPointerOperandType()->getTypeID()) 9948 return true; 9949 if (V->getPointerOperandType()->getTypeID() > 9950 V2->getPointerOperandType()->getTypeID()) 9951 return false; 9952 // UndefValues are compatible with all other values. 9953 if (isa<UndefValue>(V->getValueOperand()) || 9954 isa<UndefValue>(V2->getValueOperand())) 9955 return false; 9956 if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand())) 9957 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 9958 DomTreeNodeBase<llvm::BasicBlock> *NodeI1 = 9959 DT->getNode(I1->getParent()); 9960 DomTreeNodeBase<llvm::BasicBlock> *NodeI2 = 9961 DT->getNode(I2->getParent()); 9962 assert(NodeI1 && "Should only process reachable instructions"); 9963 assert(NodeI1 && "Should only process reachable instructions"); 9964 assert((NodeI1 == NodeI2) == 9965 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 9966 "Different nodes should have different DFS numbers"); 9967 if (NodeI1 != NodeI2) 9968 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 9969 InstructionsState S = getSameOpcode({I1, I2}); 9970 if (S.getOpcode()) 9971 return false; 9972 return I1->getOpcode() < I2->getOpcode(); 9973 } 9974 if (isa<Constant>(V->getValueOperand()) && 9975 isa<Constant>(V2->getValueOperand())) 9976 return false; 9977 return V->getValueOperand()->getValueID() < 9978 V2->getValueOperand()->getValueID(); 9979 }; 9980 9981 auto &&AreCompatibleStores = [](StoreInst *V1, StoreInst *V2) { 9982 if (V1 == V2) 9983 return true; 9984 if (V1->getPointerOperandType() != V2->getPointerOperandType()) 9985 return false; 9986 // Undefs are compatible with any other value. 9987 if (isa<UndefValue>(V1->getValueOperand()) || 9988 isa<UndefValue>(V2->getValueOperand())) 9989 return true; 9990 if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand())) 9991 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 9992 if (I1->getParent() != I2->getParent()) 9993 return false; 9994 InstructionsState S = getSameOpcode({I1, I2}); 9995 return S.getOpcode() > 0; 9996 } 9997 if (isa<Constant>(V1->getValueOperand()) && 9998 isa<Constant>(V2->getValueOperand())) 9999 return true; 10000 return V1->getValueOperand()->getValueID() == 10001 V2->getValueOperand()->getValueID(); 10002 }; 10003 auto Limit = [&R, this](StoreInst *SI) { 10004 unsigned EltSize = DL->getTypeSizeInBits(SI->getValueOperand()->getType()); 10005 return R.getMinVF(EltSize); 10006 }; 10007 10008 // Attempt to sort and vectorize each of the store-groups. 10009 for (auto &Pair : Stores) { 10010 if (Pair.second.size() < 2) 10011 continue; 10012 10013 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 10014 << Pair.second.size() << ".\n"); 10015 10016 if (!isValidElementType(Pair.second.front()->getValueOperand()->getType())) 10017 continue; 10018 10019 Changed |= tryToVectorizeSequence<StoreInst>( 10020 Pair.second, Limit, StoreSorter, AreCompatibleStores, 10021 [this, &R](ArrayRef<StoreInst *> Candidates, bool) { 10022 return vectorizeStores(Candidates, R); 10023 }, 10024 /*LimitForRegisterSize=*/false); 10025 } 10026 return Changed; 10027 } 10028 10029 char SLPVectorizer::ID = 0; 10030 10031 static const char lv_name[] = "SLP Vectorizer"; 10032 10033 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 10034 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 10035 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 10036 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 10037 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 10038 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 10039 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 10040 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 10041 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 10042 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 10043 10044 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); } 10045