1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 20 #include "llvm/ADT/DenseMap.h" 21 #include "llvm/ADT/DenseSet.h" 22 #include "llvm/ADT/Optional.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/PriorityQueue.h" 25 #include "llvm/ADT/STLExtras.h" 26 #include "llvm/ADT/SetOperations.h" 27 #include "llvm/ADT/SetVector.h" 28 #include "llvm/ADT/SmallBitVector.h" 29 #include "llvm/ADT/SmallPtrSet.h" 30 #include "llvm/ADT/SmallSet.h" 31 #include "llvm/ADT/SmallString.h" 32 #include "llvm/ADT/Statistic.h" 33 #include "llvm/ADT/iterator.h" 34 #include "llvm/ADT/iterator_range.h" 35 #include "llvm/Analysis/AliasAnalysis.h" 36 #include "llvm/Analysis/AssumptionCache.h" 37 #include "llvm/Analysis/CodeMetrics.h" 38 #include "llvm/Analysis/DemandedBits.h" 39 #include "llvm/Analysis/GlobalsModRef.h" 40 #include "llvm/Analysis/IVDescriptors.h" 41 #include "llvm/Analysis/LoopAccessAnalysis.h" 42 #include "llvm/Analysis/LoopInfo.h" 43 #include "llvm/Analysis/MemoryLocation.h" 44 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 45 #include "llvm/Analysis/ScalarEvolution.h" 46 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 47 #include "llvm/Analysis/TargetLibraryInfo.h" 48 #include "llvm/Analysis/TargetTransformInfo.h" 49 #include "llvm/Analysis/ValueTracking.h" 50 #include "llvm/Analysis/VectorUtils.h" 51 #include "llvm/IR/Attributes.h" 52 #include "llvm/IR/BasicBlock.h" 53 #include "llvm/IR/Constant.h" 54 #include "llvm/IR/Constants.h" 55 #include "llvm/IR/DataLayout.h" 56 #include "llvm/IR/DebugLoc.h" 57 #include "llvm/IR/DerivedTypes.h" 58 #include "llvm/IR/Dominators.h" 59 #include "llvm/IR/Function.h" 60 #include "llvm/IR/IRBuilder.h" 61 #include "llvm/IR/InstrTypes.h" 62 #include "llvm/IR/Instruction.h" 63 #include "llvm/IR/Instructions.h" 64 #include "llvm/IR/IntrinsicInst.h" 65 #include "llvm/IR/Intrinsics.h" 66 #include "llvm/IR/Module.h" 67 #include "llvm/IR/NoFolder.h" 68 #include "llvm/IR/Operator.h" 69 #include "llvm/IR/PatternMatch.h" 70 #include "llvm/IR/Type.h" 71 #include "llvm/IR/Use.h" 72 #include "llvm/IR/User.h" 73 #include "llvm/IR/Value.h" 74 #include "llvm/IR/ValueHandle.h" 75 #include "llvm/IR/Verifier.h" 76 #include "llvm/InitializePasses.h" 77 #include "llvm/Pass.h" 78 #include "llvm/Support/Casting.h" 79 #include "llvm/Support/CommandLine.h" 80 #include "llvm/Support/Compiler.h" 81 #include "llvm/Support/DOTGraphTraits.h" 82 #include "llvm/Support/Debug.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/GraphWriter.h" 85 #include "llvm/Support/InstructionCost.h" 86 #include "llvm/Support/KnownBits.h" 87 #include "llvm/Support/MathExtras.h" 88 #include "llvm/Support/raw_ostream.h" 89 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 90 #include "llvm/Transforms/Utils/LoopUtils.h" 91 #include "llvm/Transforms/Vectorize.h" 92 #include <algorithm> 93 #include <cassert> 94 #include <cstdint> 95 #include <iterator> 96 #include <memory> 97 #include <set> 98 #include <string> 99 #include <tuple> 100 #include <utility> 101 #include <vector> 102 103 using namespace llvm; 104 using namespace llvm::PatternMatch; 105 using namespace slpvectorizer; 106 107 #define SV_NAME "slp-vectorizer" 108 #define DEBUG_TYPE "SLP" 109 110 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 111 112 cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden, 113 cl::desc("Run the SLP vectorization passes")); 114 115 static cl::opt<int> 116 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 117 cl::desc("Only vectorize if you gain more than this " 118 "number ")); 119 120 static cl::opt<bool> 121 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 122 cl::desc("Attempt to vectorize horizontal reductions")); 123 124 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 125 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 126 cl::desc( 127 "Attempt to vectorize horizontal reductions feeding into a store")); 128 129 static cl::opt<int> 130 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 131 cl::desc("Attempt to vectorize for this register size in bits")); 132 133 static cl::opt<unsigned> 134 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden, 135 cl::desc("Maximum SLP vectorization factor (0=unlimited)")); 136 137 static cl::opt<int> 138 MaxStoreLookup("slp-max-store-lookup", cl::init(32), cl::Hidden, 139 cl::desc("Maximum depth of the lookup for consecutive stores.")); 140 141 /// Limits the size of scheduling regions in a block. 142 /// It avoid long compile times for _very_ large blocks where vector 143 /// instructions are spread over a wide range. 144 /// This limit is way higher than needed by real-world functions. 145 static cl::opt<int> 146 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 147 cl::desc("Limit the size of the SLP scheduling region per block")); 148 149 static cl::opt<int> MinVectorRegSizeOption( 150 "slp-min-reg-size", cl::init(128), cl::Hidden, 151 cl::desc("Attempt to vectorize for this register size in bits")); 152 153 static cl::opt<unsigned> RecursionMaxDepth( 154 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 155 cl::desc("Limit the recursion depth when building a vectorizable tree")); 156 157 static cl::opt<unsigned> MinTreeSize( 158 "slp-min-tree-size", cl::init(3), cl::Hidden, 159 cl::desc("Only vectorize small trees if they are fully vectorizable")); 160 161 // The maximum depth that the look-ahead score heuristic will explore. 162 // The higher this value, the higher the compilation time overhead. 163 static cl::opt<int> LookAheadMaxDepth( 164 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden, 165 cl::desc("The maximum look-ahead depth for operand reordering scores")); 166 167 // The Look-ahead heuristic goes through the users of the bundle to calculate 168 // the users cost in getExternalUsesCost(). To avoid compilation time increase 169 // we limit the number of users visited to this value. 170 static cl::opt<unsigned> LookAheadUsersBudget( 171 "slp-look-ahead-users-budget", cl::init(2), cl::Hidden, 172 cl::desc("The maximum number of users to visit while visiting the " 173 "predecessors. This prevents compilation time increase.")); 174 175 static cl::opt<bool> 176 ViewSLPTree("view-slp-tree", cl::Hidden, 177 cl::desc("Display the SLP trees with Graphviz")); 178 179 // Limit the number of alias checks. The limit is chosen so that 180 // it has no negative effect on the llvm benchmarks. 181 static const unsigned AliasedCheckLimit = 10; 182 183 // Another limit for the alias checks: The maximum distance between load/store 184 // instructions where alias checks are done. 185 // This limit is useful for very large basic blocks. 186 static const unsigned MaxMemDepDistance = 160; 187 188 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 189 /// regions to be handled. 190 static const int MinScheduleRegionSize = 16; 191 192 /// Predicate for the element types that the SLP vectorizer supports. 193 /// 194 /// The most important thing to filter here are types which are invalid in LLVM 195 /// vectors. We also filter target specific types which have absolutely no 196 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 197 /// avoids spending time checking the cost model and realizing that they will 198 /// be inevitably scalarized. 199 static bool isValidElementType(Type *Ty) { 200 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 201 !Ty->isPPC_FP128Ty(); 202 } 203 204 /// \returns True if the value is a constant (but not globals/constant 205 /// expressions). 206 static bool isConstant(Value *V) { 207 return isa<Constant>(V) && !isa<ConstantExpr>(V) && !isa<GlobalValue>(V); 208 } 209 210 /// Checks if \p V is one of vector-like instructions, i.e. undef, 211 /// insertelement/extractelement with constant indices for fixed vector type or 212 /// extractvalue instruction. 213 static bool isVectorLikeInstWithConstOps(Value *V) { 214 if (!isa<InsertElementInst, ExtractElementInst>(V) && 215 !isa<ExtractValueInst, UndefValue>(V)) 216 return false; 217 auto *I = dyn_cast<Instruction>(V); 218 if (!I || isa<ExtractValueInst>(I)) 219 return true; 220 if (!isa<FixedVectorType>(I->getOperand(0)->getType())) 221 return false; 222 if (isa<ExtractElementInst>(I)) 223 return isConstant(I->getOperand(1)); 224 assert(isa<InsertElementInst>(V) && "Expected only insertelement."); 225 return isConstant(I->getOperand(2)); 226 } 227 228 /// \returns true if all of the instructions in \p VL are in the same block or 229 /// false otherwise. 230 static bool allSameBlock(ArrayRef<Value *> VL) { 231 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 232 if (!I0) 233 return false; 234 if (all_of(VL, isVectorLikeInstWithConstOps)) 235 return true; 236 237 BasicBlock *BB = I0->getParent(); 238 for (int I = 1, E = VL.size(); I < E; I++) { 239 auto *II = dyn_cast<Instruction>(VL[I]); 240 if (!II) 241 return false; 242 243 if (BB != II->getParent()) 244 return false; 245 } 246 return true; 247 } 248 249 /// \returns True if all of the values in \p VL are constants (but not 250 /// globals/constant expressions). 251 static bool allConstant(ArrayRef<Value *> VL) { 252 // Constant expressions and globals can't be vectorized like normal integer/FP 253 // constants. 254 return all_of(VL, isConstant); 255 } 256 257 /// \returns True if all of the values in \p VL are identical or some of them 258 /// are UndefValue. 259 static bool isSplat(ArrayRef<Value *> VL) { 260 Value *FirstNonUndef = nullptr; 261 for (Value *V : VL) { 262 if (isa<UndefValue>(V)) 263 continue; 264 if (!FirstNonUndef) { 265 FirstNonUndef = V; 266 continue; 267 } 268 if (V != FirstNonUndef) 269 return false; 270 } 271 return FirstNonUndef != nullptr; 272 } 273 274 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator. 275 static bool isCommutative(Instruction *I) { 276 if (auto *Cmp = dyn_cast<CmpInst>(I)) 277 return Cmp->isCommutative(); 278 if (auto *BO = dyn_cast<BinaryOperator>(I)) 279 return BO->isCommutative(); 280 // TODO: This should check for generic Instruction::isCommutative(), but 281 // we need to confirm that the caller code correctly handles Intrinsics 282 // for example (does not have 2 operands). 283 return false; 284 } 285 286 /// Checks if the vector of instructions can be represented as a shuffle, like: 287 /// %x0 = extractelement <4 x i8> %x, i32 0 288 /// %x3 = extractelement <4 x i8> %x, i32 3 289 /// %y1 = extractelement <4 x i8> %y, i32 1 290 /// %y2 = extractelement <4 x i8> %y, i32 2 291 /// %x0x0 = mul i8 %x0, %x0 292 /// %x3x3 = mul i8 %x3, %x3 293 /// %y1y1 = mul i8 %y1, %y1 294 /// %y2y2 = mul i8 %y2, %y2 295 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0 296 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 297 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 298 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 299 /// ret <4 x i8> %ins4 300 /// can be transformed into: 301 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 302 /// i32 6> 303 /// %2 = mul <4 x i8> %1, %1 304 /// ret <4 x i8> %2 305 /// We convert this initially to something like: 306 /// %x0 = extractelement <4 x i8> %x, i32 0 307 /// %x3 = extractelement <4 x i8> %x, i32 3 308 /// %y1 = extractelement <4 x i8> %y, i32 1 309 /// %y2 = extractelement <4 x i8> %y, i32 2 310 /// %1 = insertelement <4 x i8> poison, i8 %x0, i32 0 311 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1 312 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2 313 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3 314 /// %5 = mul <4 x i8> %4, %4 315 /// %6 = extractelement <4 x i8> %5, i32 0 316 /// %ins1 = insertelement <4 x i8> poison, i8 %6, i32 0 317 /// %7 = extractelement <4 x i8> %5, i32 1 318 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1 319 /// %8 = extractelement <4 x i8> %5, i32 2 320 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2 321 /// %9 = extractelement <4 x i8> %5, i32 3 322 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3 323 /// ret <4 x i8> %ins4 324 /// InstCombiner transforms this into a shuffle and vector mul 325 /// Mask will return the Shuffle Mask equivalent to the extracted elements. 326 /// TODO: Can we split off and reuse the shuffle mask detection from 327 /// TargetTransformInfo::getInstructionThroughput? 328 static Optional<TargetTransformInfo::ShuffleKind> 329 isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) { 330 auto *EI0 = cast<ExtractElementInst>(VL[0]); 331 if (isa<ScalableVectorType>(EI0->getVectorOperandType())) 332 return None; 333 unsigned Size = 334 cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements(); 335 Value *Vec1 = nullptr; 336 Value *Vec2 = nullptr; 337 enum ShuffleMode { Unknown, Select, Permute }; 338 ShuffleMode CommonShuffleMode = Unknown; 339 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 340 auto *EI = cast<ExtractElementInst>(VL[I]); 341 auto *Vec = EI->getVectorOperand(); 342 // All vector operands must have the same number of vector elements. 343 if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size) 344 return None; 345 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 346 if (!Idx) 347 return None; 348 // Undefined behavior if Idx is negative or >= Size. 349 if (Idx->getValue().uge(Size)) { 350 Mask.push_back(UndefMaskElem); 351 continue; 352 } 353 unsigned IntIdx = Idx->getValue().getZExtValue(); 354 Mask.push_back(IntIdx); 355 // We can extractelement from undef or poison vector. 356 if (isa<UndefValue>(Vec)) 357 continue; 358 // For correct shuffling we have to have at most 2 different vector operands 359 // in all extractelement instructions. 360 if (!Vec1 || Vec1 == Vec) 361 Vec1 = Vec; 362 else if (!Vec2 || Vec2 == Vec) 363 Vec2 = Vec; 364 else 365 return None; 366 if (CommonShuffleMode == Permute) 367 continue; 368 // If the extract index is not the same as the operation number, it is a 369 // permutation. 370 if (IntIdx != I) { 371 CommonShuffleMode = Permute; 372 continue; 373 } 374 CommonShuffleMode = Select; 375 } 376 // If we're not crossing lanes in different vectors, consider it as blending. 377 if (CommonShuffleMode == Select && Vec2) 378 return TargetTransformInfo::SK_Select; 379 // If Vec2 was never used, we have a permutation of a single vector, otherwise 380 // we have permutation of 2 vectors. 381 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 382 : TargetTransformInfo::SK_PermuteSingleSrc; 383 } 384 385 namespace { 386 387 /// Main data required for vectorization of instructions. 388 struct InstructionsState { 389 /// The very first instruction in the list with the main opcode. 390 Value *OpValue = nullptr; 391 392 /// The main/alternate instruction. 393 Instruction *MainOp = nullptr; 394 Instruction *AltOp = nullptr; 395 396 /// The main/alternate opcodes for the list of instructions. 397 unsigned getOpcode() const { 398 return MainOp ? MainOp->getOpcode() : 0; 399 } 400 401 unsigned getAltOpcode() const { 402 return AltOp ? AltOp->getOpcode() : 0; 403 } 404 405 /// Some of the instructions in the list have alternate opcodes. 406 bool isAltShuffle() const { return getOpcode() != getAltOpcode(); } 407 408 bool isOpcodeOrAlt(Instruction *I) const { 409 unsigned CheckedOpcode = I->getOpcode(); 410 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode; 411 } 412 413 InstructionsState() = delete; 414 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp) 415 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {} 416 }; 417 418 } // end anonymous namespace 419 420 /// Chooses the correct key for scheduling data. If \p Op has the same (or 421 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 422 /// OpValue. 423 static Value *isOneOf(const InstructionsState &S, Value *Op) { 424 auto *I = dyn_cast<Instruction>(Op); 425 if (I && S.isOpcodeOrAlt(I)) 426 return Op; 427 return S.OpValue; 428 } 429 430 /// \returns true if \p Opcode is allowed as part of of the main/alternate 431 /// instruction for SLP vectorization. 432 /// 433 /// Example of unsupported opcode is SDIV that can potentially cause UB if the 434 /// "shuffled out" lane would result in division by zero. 435 static bool isValidForAlternation(unsigned Opcode) { 436 if (Instruction::isIntDivRem(Opcode)) 437 return false; 438 439 return true; 440 } 441 442 /// \returns analysis of the Instructions in \p VL described in 443 /// InstructionsState, the Opcode that we suppose the whole list 444 /// could be vectorized even if its structure is diverse. 445 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 446 unsigned BaseIndex = 0) { 447 // Make sure these are all Instructions. 448 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); })) 449 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 450 451 bool IsCastOp = isa<CastInst>(VL[BaseIndex]); 452 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]); 453 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode(); 454 unsigned AltOpcode = Opcode; 455 unsigned AltIndex = BaseIndex; 456 457 // Check for one alternate opcode from another BinaryOperator. 458 // TODO - generalize to support all operators (types, calls etc.). 459 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { 460 unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode(); 461 if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) { 462 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 463 continue; 464 if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) && 465 isValidForAlternation(Opcode)) { 466 AltOpcode = InstOpcode; 467 AltIndex = Cnt; 468 continue; 469 } 470 } else if (IsCastOp && isa<CastInst>(VL[Cnt])) { 471 Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType(); 472 Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType(); 473 if (Ty0 == Ty1) { 474 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 475 continue; 476 if (Opcode == AltOpcode) { 477 assert(isValidForAlternation(Opcode) && 478 isValidForAlternation(InstOpcode) && 479 "Cast isn't safe for alternation, logic needs to be updated!"); 480 AltOpcode = InstOpcode; 481 AltIndex = Cnt; 482 continue; 483 } 484 } 485 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode) 486 continue; 487 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 488 } 489 490 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]), 491 cast<Instruction>(VL[AltIndex])); 492 } 493 494 /// \returns true if all of the values in \p VL have the same type or false 495 /// otherwise. 496 static bool allSameType(ArrayRef<Value *> VL) { 497 Type *Ty = VL[0]->getType(); 498 for (int i = 1, e = VL.size(); i < e; i++) 499 if (VL[i]->getType() != Ty) 500 return false; 501 502 return true; 503 } 504 505 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 506 static Optional<unsigned> getExtractIndex(Instruction *E) { 507 unsigned Opcode = E->getOpcode(); 508 assert((Opcode == Instruction::ExtractElement || 509 Opcode == Instruction::ExtractValue) && 510 "Expected extractelement or extractvalue instruction."); 511 if (Opcode == Instruction::ExtractElement) { 512 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 513 if (!CI) 514 return None; 515 return CI->getZExtValue(); 516 } 517 ExtractValueInst *EI = cast<ExtractValueInst>(E); 518 if (EI->getNumIndices() != 1) 519 return None; 520 return *EI->idx_begin(); 521 } 522 523 /// \returns True if in-tree use also needs extract. This refers to 524 /// possible scalar operand in vectorized instruction. 525 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 526 TargetLibraryInfo *TLI) { 527 unsigned Opcode = UserInst->getOpcode(); 528 switch (Opcode) { 529 case Instruction::Load: { 530 LoadInst *LI = cast<LoadInst>(UserInst); 531 return (LI->getPointerOperand() == Scalar); 532 } 533 case Instruction::Store: { 534 StoreInst *SI = cast<StoreInst>(UserInst); 535 return (SI->getPointerOperand() == Scalar); 536 } 537 case Instruction::Call: { 538 CallInst *CI = cast<CallInst>(UserInst); 539 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 540 for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) { 541 if (hasVectorInstrinsicScalarOpd(ID, i)) 542 return (CI->getArgOperand(i) == Scalar); 543 } 544 LLVM_FALLTHROUGH; 545 } 546 default: 547 return false; 548 } 549 } 550 551 /// \returns the AA location that is being access by the instruction. 552 static MemoryLocation getLocation(Instruction *I, AAResults *AA) { 553 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 554 return MemoryLocation::get(SI); 555 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 556 return MemoryLocation::get(LI); 557 return MemoryLocation(); 558 } 559 560 /// \returns True if the instruction is not a volatile or atomic load/store. 561 static bool isSimple(Instruction *I) { 562 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 563 return LI->isSimple(); 564 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 565 return SI->isSimple(); 566 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 567 return !MI->isVolatile(); 568 return true; 569 } 570 571 /// Shuffles \p Mask in accordance with the given \p SubMask. 572 static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask) { 573 if (SubMask.empty()) 574 return; 575 if (Mask.empty()) { 576 Mask.append(SubMask.begin(), SubMask.end()); 577 return; 578 } 579 SmallVector<int> NewMask(SubMask.size(), UndefMaskElem); 580 int TermValue = std::min(Mask.size(), SubMask.size()); 581 for (int I = 0, E = SubMask.size(); I < E; ++I) { 582 if (SubMask[I] >= TermValue || SubMask[I] == UndefMaskElem || 583 Mask[SubMask[I]] >= TermValue) 584 continue; 585 NewMask[I] = Mask[SubMask[I]]; 586 } 587 Mask.swap(NewMask); 588 } 589 590 /// Order may have elements assigned special value (size) which is out of 591 /// bounds. Such indices only appear on places which correspond to undef values 592 /// (see canReuseExtract for details) and used in order to avoid undef values 593 /// have effect on operands ordering. 594 /// The first loop below simply finds all unused indices and then the next loop 595 /// nest assigns these indices for undef values positions. 596 /// As an example below Order has two undef positions and they have assigned 597 /// values 3 and 7 respectively: 598 /// before: 6 9 5 4 9 2 1 0 599 /// after: 6 3 5 4 7 2 1 0 600 static void fixupOrderingIndices(SmallVectorImpl<unsigned> &Order) { 601 const unsigned Sz = Order.size(); 602 SmallBitVector UsedIndices(Sz); 603 SmallVector<int> MaskedIndices; 604 for (unsigned I = 0; I < Sz; ++I) { 605 if (Order[I] < Sz) 606 UsedIndices.set(Order[I]); 607 else 608 MaskedIndices.push_back(I); 609 } 610 if (MaskedIndices.empty()) 611 return; 612 SmallVector<int> AvailableIndices(MaskedIndices.size()); 613 unsigned Cnt = 0; 614 int Idx = UsedIndices.find_first(); 615 do { 616 AvailableIndices[Cnt] = Idx; 617 Idx = UsedIndices.find_next(Idx); 618 ++Cnt; 619 } while (Idx > 0); 620 assert(Cnt == MaskedIndices.size() && "Non-synced masked/available indices."); 621 for (int I = 0, E = MaskedIndices.size(); I < E; ++I) 622 Order[MaskedIndices[I]] = AvailableIndices[I]; 623 } 624 625 namespace llvm { 626 627 static void inversePermutation(ArrayRef<unsigned> Indices, 628 SmallVectorImpl<int> &Mask) { 629 Mask.clear(); 630 const unsigned E = Indices.size(); 631 Mask.resize(E, UndefMaskElem); 632 for (unsigned I = 0; I < E; ++I) 633 Mask[Indices[I]] = I; 634 } 635 636 /// \returns inserting index of InsertElement or InsertValue instruction, 637 /// using Offset as base offset for index. 638 static Optional<int> getInsertIndex(Value *InsertInst, unsigned Offset) { 639 int Index = Offset; 640 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) { 641 if (auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2))) { 642 auto *VT = cast<FixedVectorType>(IE->getType()); 643 if (CI->getValue().uge(VT->getNumElements())) 644 return UndefMaskElem; 645 Index *= VT->getNumElements(); 646 Index += CI->getZExtValue(); 647 return Index; 648 } 649 if (isa<UndefValue>(IE->getOperand(2))) 650 return UndefMaskElem; 651 return None; 652 } 653 654 auto *IV = cast<InsertValueInst>(InsertInst); 655 Type *CurrentType = IV->getType(); 656 for (unsigned I : IV->indices()) { 657 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 658 Index *= ST->getNumElements(); 659 CurrentType = ST->getElementType(I); 660 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 661 Index *= AT->getNumElements(); 662 CurrentType = AT->getElementType(); 663 } else { 664 return None; 665 } 666 Index += I; 667 } 668 return Index; 669 } 670 671 /// Reorders the list of scalars in accordance with the given \p Order and then 672 /// the \p Mask. \p Order - is the original order of the scalars, need to 673 /// reorder scalars into an unordered state at first according to the given 674 /// order. Then the ordered scalars are shuffled once again in accordance with 675 /// the provided mask. 676 static void reorderScalars(SmallVectorImpl<Value *> &Scalars, 677 ArrayRef<int> Mask) { 678 assert(!Mask.empty() && "Expected non-empty mask."); 679 SmallVector<Value *> Prev(Scalars.size(), 680 UndefValue::get(Scalars.front()->getType())); 681 Prev.swap(Scalars); 682 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 683 if (Mask[I] != UndefMaskElem) 684 Scalars[Mask[I]] = Prev[I]; 685 } 686 687 namespace slpvectorizer { 688 689 /// Bottom Up SLP Vectorizer. 690 class BoUpSLP { 691 struct TreeEntry; 692 struct ScheduleData; 693 694 public: 695 using ValueList = SmallVector<Value *, 8>; 696 using InstrList = SmallVector<Instruction *, 16>; 697 using ValueSet = SmallPtrSet<Value *, 16>; 698 using StoreList = SmallVector<StoreInst *, 8>; 699 using ExtraValueToDebugLocsMap = 700 MapVector<Value *, SmallVector<Instruction *, 2>>; 701 using OrdersType = SmallVector<unsigned, 4>; 702 703 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 704 TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li, 705 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 706 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 707 : F(Func), SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), 708 DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { 709 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 710 // Use the vector register size specified by the target unless overridden 711 // by a command-line option. 712 // TODO: It would be better to limit the vectorization factor based on 713 // data type rather than just register size. For example, x86 AVX has 714 // 256-bit registers, but it does not support integer operations 715 // at that width (that requires AVX2). 716 if (MaxVectorRegSizeOption.getNumOccurrences()) 717 MaxVecRegSize = MaxVectorRegSizeOption; 718 else 719 MaxVecRegSize = 720 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 721 .getFixedSize(); 722 723 if (MinVectorRegSizeOption.getNumOccurrences()) 724 MinVecRegSize = MinVectorRegSizeOption; 725 else 726 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 727 } 728 729 /// Vectorize the tree that starts with the elements in \p VL. 730 /// Returns the vectorized root. 731 Value *vectorizeTree(); 732 733 /// Vectorize the tree but with the list of externally used values \p 734 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 735 /// generated extractvalue instructions. 736 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues); 737 738 /// \returns the cost incurred by unwanted spills and fills, caused by 739 /// holding live values over call sites. 740 InstructionCost getSpillCost() const; 741 742 /// \returns the vectorization cost of the subtree that starts at \p VL. 743 /// A negative number means that this is profitable. 744 InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = None); 745 746 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 747 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 748 void buildTree(ArrayRef<Value *> Roots, 749 ArrayRef<Value *> UserIgnoreLst = None); 750 751 /// Builds external uses of the vectorized scalars, i.e. the list of 752 /// vectorized scalars to be extracted, their lanes and their scalar users. \p 753 /// ExternallyUsedValues contains additional list of external uses to handle 754 /// vectorization of reductions. 755 void 756 buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {}); 757 758 /// Clear the internal data structures that are created by 'buildTree'. 759 void deleteTree() { 760 VectorizableTree.clear(); 761 ScalarToTreeEntry.clear(); 762 MustGather.clear(); 763 ExternalUses.clear(); 764 for (auto &Iter : BlocksSchedules) { 765 BlockScheduling *BS = Iter.second.get(); 766 BS->clear(); 767 } 768 MinBWs.clear(); 769 InstrElementSize.clear(); 770 } 771 772 unsigned getTreeSize() const { return VectorizableTree.size(); } 773 774 /// Perform LICM and CSE on the newly generated gather sequences. 775 void optimizeGatherSequence(); 776 777 /// Checks if the specified gather tree entry \p TE can be represented as a 778 /// shuffled vector entry + (possibly) permutation with other gathers. It 779 /// implements the checks only for possibly ordered scalars (Loads, 780 /// ExtractElement, ExtractValue), which can be part of the graph. 781 Optional<OrdersType> findReusedOrderedScalars(const TreeEntry &TE); 782 783 /// Reorders the current graph to the most profitable order starting from the 784 /// root node to the leaf nodes. The best order is chosen only from the nodes 785 /// of the same size (vectorization factor). Smaller nodes are considered 786 /// parts of subgraph with smaller VF and they are reordered independently. We 787 /// can make it because we still need to extend smaller nodes to the wider VF 788 /// and we can merge reordering shuffles with the widening shuffles. 789 void reorderTopToBottom(); 790 791 /// Reorders the current graph to the most profitable order starting from 792 /// leaves to the root. It allows to rotate small subgraphs and reduce the 793 /// number of reshuffles if the leaf nodes use the same order. In this case we 794 /// can merge the orders and just shuffle user node instead of shuffling its 795 /// operands. Plus, even the leaf nodes have different orders, it allows to 796 /// sink reordering in the graph closer to the root node and merge it later 797 /// during analysis. 798 void reorderBottomToTop(bool IgnoreReorder = false); 799 800 /// \return The vector element size in bits to use when vectorizing the 801 /// expression tree ending at \p V. If V is a store, the size is the width of 802 /// the stored value. Otherwise, the size is the width of the largest loaded 803 /// value reaching V. This method is used by the vectorizer to calculate 804 /// vectorization factors. 805 unsigned getVectorElementSize(Value *V); 806 807 /// Compute the minimum type sizes required to represent the entries in a 808 /// vectorizable tree. 809 void computeMinimumValueSizes(); 810 811 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 812 unsigned getMaxVecRegSize() const { 813 return MaxVecRegSize; 814 } 815 816 // \returns minimum vector register size as set by cl::opt. 817 unsigned getMinVecRegSize() const { 818 return MinVecRegSize; 819 } 820 821 unsigned getMinVF(unsigned Sz) const { 822 return std::max(2U, getMinVecRegSize() / Sz); 823 } 824 825 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { 826 unsigned MaxVF = MaxVFOption.getNumOccurrences() ? 827 MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode); 828 return MaxVF ? MaxVF : UINT_MAX; 829 } 830 831 /// Check if homogeneous aggregate is isomorphic to some VectorType. 832 /// Accepts homogeneous multidimensional aggregate of scalars/vectors like 833 /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> }, 834 /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on. 835 /// 836 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 837 unsigned canMapToVector(Type *T, const DataLayout &DL) const; 838 839 /// \returns True if the VectorizableTree is both tiny and not fully 840 /// vectorizable. We do not vectorize such trees. 841 bool isTreeTinyAndNotFullyVectorizable(bool ForReduction = false) const; 842 843 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values 844 /// can be load combined in the backend. Load combining may not be allowed in 845 /// the IR optimizer, so we do not want to alter the pattern. For example, 846 /// partially transforming a scalar bswap() pattern into vector code is 847 /// effectively impossible for the backend to undo. 848 /// TODO: If load combining is allowed in the IR optimizer, this analysis 849 /// may not be necessary. 850 bool isLoadCombineReductionCandidate(RecurKind RdxKind) const; 851 852 /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values 853 /// can be load combined in the backend. Load combining may not be allowed in 854 /// the IR optimizer, so we do not want to alter the pattern. For example, 855 /// partially transforming a scalar bswap() pattern into vector code is 856 /// effectively impossible for the backend to undo. 857 /// TODO: If load combining is allowed in the IR optimizer, this analysis 858 /// may not be necessary. 859 bool isLoadCombineCandidate() const; 860 861 OptimizationRemarkEmitter *getORE() { return ORE; } 862 863 /// This structure holds any data we need about the edges being traversed 864 /// during buildTree_rec(). We keep track of: 865 /// (i) the user TreeEntry index, and 866 /// (ii) the index of the edge. 867 struct EdgeInfo { 868 EdgeInfo() = default; 869 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx) 870 : UserTE(UserTE), EdgeIdx(EdgeIdx) {} 871 /// The user TreeEntry. 872 TreeEntry *UserTE = nullptr; 873 /// The operand index of the use. 874 unsigned EdgeIdx = UINT_MAX; 875 #ifndef NDEBUG 876 friend inline raw_ostream &operator<<(raw_ostream &OS, 877 const BoUpSLP::EdgeInfo &EI) { 878 EI.dump(OS); 879 return OS; 880 } 881 /// Debug print. 882 void dump(raw_ostream &OS) const { 883 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null") 884 << " EdgeIdx:" << EdgeIdx << "}"; 885 } 886 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); } 887 #endif 888 }; 889 890 /// A helper data structure to hold the operands of a vector of instructions. 891 /// This supports a fixed vector length for all operand vectors. 892 class VLOperands { 893 /// For each operand we need (i) the value, and (ii) the opcode that it 894 /// would be attached to if the expression was in a left-linearized form. 895 /// This is required to avoid illegal operand reordering. 896 /// For example: 897 /// \verbatim 898 /// 0 Op1 899 /// |/ 900 /// Op1 Op2 Linearized + Op2 901 /// \ / ----------> |/ 902 /// - - 903 /// 904 /// Op1 - Op2 (0 + Op1) - Op2 905 /// \endverbatim 906 /// 907 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'. 908 /// 909 /// Another way to think of this is to track all the operations across the 910 /// path from the operand all the way to the root of the tree and to 911 /// calculate the operation that corresponds to this path. For example, the 912 /// path from Op2 to the root crosses the RHS of the '-', therefore the 913 /// corresponding operation is a '-' (which matches the one in the 914 /// linearized tree, as shown above). 915 /// 916 /// For lack of a better term, we refer to this operation as Accumulated 917 /// Path Operation (APO). 918 struct OperandData { 919 OperandData() = default; 920 OperandData(Value *V, bool APO, bool IsUsed) 921 : V(V), APO(APO), IsUsed(IsUsed) {} 922 /// The operand value. 923 Value *V = nullptr; 924 /// TreeEntries only allow a single opcode, or an alternate sequence of 925 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the 926 /// APO. It is set to 'true' if 'V' is attached to an inverse operation 927 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise 928 /// (e.g., Add/Mul) 929 bool APO = false; 930 /// Helper data for the reordering function. 931 bool IsUsed = false; 932 }; 933 934 /// During operand reordering, we are trying to select the operand at lane 935 /// that matches best with the operand at the neighboring lane. Our 936 /// selection is based on the type of value we are looking for. For example, 937 /// if the neighboring lane has a load, we need to look for a load that is 938 /// accessing a consecutive address. These strategies are summarized in the 939 /// 'ReorderingMode' enumerator. 940 enum class ReorderingMode { 941 Load, ///< Matching loads to consecutive memory addresses 942 Opcode, ///< Matching instructions based on opcode (same or alternate) 943 Constant, ///< Matching constants 944 Splat, ///< Matching the same instruction multiple times (broadcast) 945 Failed, ///< We failed to create a vectorizable group 946 }; 947 948 using OperandDataVec = SmallVector<OperandData, 2>; 949 950 /// A vector of operand vectors. 951 SmallVector<OperandDataVec, 4> OpsVec; 952 953 const DataLayout &DL; 954 ScalarEvolution &SE; 955 const BoUpSLP &R; 956 957 /// \returns the operand data at \p OpIdx and \p Lane. 958 OperandData &getData(unsigned OpIdx, unsigned Lane) { 959 return OpsVec[OpIdx][Lane]; 960 } 961 962 /// \returns the operand data at \p OpIdx and \p Lane. Const version. 963 const OperandData &getData(unsigned OpIdx, unsigned Lane) const { 964 return OpsVec[OpIdx][Lane]; 965 } 966 967 /// Clears the used flag for all entries. 968 void clearUsed() { 969 for (unsigned OpIdx = 0, NumOperands = getNumOperands(); 970 OpIdx != NumOperands; ++OpIdx) 971 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 972 ++Lane) 973 OpsVec[OpIdx][Lane].IsUsed = false; 974 } 975 976 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2. 977 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) { 978 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]); 979 } 980 981 // The hard-coded scores listed here are not very important. When computing 982 // the scores of matching one sub-tree with another, we are basically 983 // counting the number of values that are matching. So even if all scores 984 // are set to 1, we would still get a decent matching result. 985 // However, sometimes we have to break ties. For example we may have to 986 // choose between matching loads vs matching opcodes. This is what these 987 // scores are helping us with: they provide the order of preference. 988 989 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]). 990 static const int ScoreConsecutiveLoads = 3; 991 /// ExtractElementInst from same vector and consecutive indexes. 992 static const int ScoreConsecutiveExtracts = 3; 993 /// Constants. 994 static const int ScoreConstants = 2; 995 /// Instructions with the same opcode. 996 static const int ScoreSameOpcode = 2; 997 /// Instructions with alt opcodes (e.g, add + sub). 998 static const int ScoreAltOpcodes = 1; 999 /// Identical instructions (a.k.a. splat or broadcast). 1000 static const int ScoreSplat = 1; 1001 /// Matching with an undef is preferable to failing. 1002 static const int ScoreUndef = 1; 1003 /// Score for failing to find a decent match. 1004 static const int ScoreFail = 0; 1005 /// User exteranl to the vectorized code. 1006 static const int ExternalUseCost = 1; 1007 /// The user is internal but in a different lane. 1008 static const int UserInDiffLaneCost = ExternalUseCost; 1009 1010 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes. 1011 static int getShallowScore(Value *V1, Value *V2, const DataLayout &DL, 1012 ScalarEvolution &SE) { 1013 auto *LI1 = dyn_cast<LoadInst>(V1); 1014 auto *LI2 = dyn_cast<LoadInst>(V2); 1015 if (LI1 && LI2) { 1016 if (LI1->getParent() != LI2->getParent()) 1017 return VLOperands::ScoreFail; 1018 1019 Optional<int> Dist = getPointersDiff( 1020 LI1->getType(), LI1->getPointerOperand(), LI2->getType(), 1021 LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true); 1022 return (Dist && *Dist == 1) ? VLOperands::ScoreConsecutiveLoads 1023 : VLOperands::ScoreFail; 1024 } 1025 1026 auto *C1 = dyn_cast<Constant>(V1); 1027 auto *C2 = dyn_cast<Constant>(V2); 1028 if (C1 && C2) 1029 return VLOperands::ScoreConstants; 1030 1031 // Extracts from consecutive indexes of the same vector better score as 1032 // the extracts could be optimized away. 1033 Value *EV; 1034 ConstantInt *Ex1Idx, *Ex2Idx; 1035 if (match(V1, m_ExtractElt(m_Value(EV), m_ConstantInt(Ex1Idx))) && 1036 match(V2, m_ExtractElt(m_Deferred(EV), m_ConstantInt(Ex2Idx))) && 1037 Ex1Idx->getZExtValue() + 1 == Ex2Idx->getZExtValue()) 1038 return VLOperands::ScoreConsecutiveExtracts; 1039 1040 auto *I1 = dyn_cast<Instruction>(V1); 1041 auto *I2 = dyn_cast<Instruction>(V2); 1042 if (I1 && I2) { 1043 if (I1 == I2) 1044 return VLOperands::ScoreSplat; 1045 InstructionsState S = getSameOpcode({I1, I2}); 1046 // Note: Only consider instructions with <= 2 operands to avoid 1047 // complexity explosion. 1048 if (S.getOpcode() && S.MainOp->getNumOperands() <= 2) 1049 return S.isAltShuffle() ? VLOperands::ScoreAltOpcodes 1050 : VLOperands::ScoreSameOpcode; 1051 } 1052 1053 if (isa<UndefValue>(V2)) 1054 return VLOperands::ScoreUndef; 1055 1056 return VLOperands::ScoreFail; 1057 } 1058 1059 /// Holds the values and their lane that are taking part in the look-ahead 1060 /// score calculation. This is used in the external uses cost calculation. 1061 SmallDenseMap<Value *, int> InLookAheadValues; 1062 1063 /// \Returns the additinal cost due to uses of \p LHS and \p RHS that are 1064 /// either external to the vectorized code, or require shuffling. 1065 int getExternalUsesCost(const std::pair<Value *, int> &LHS, 1066 const std::pair<Value *, int> &RHS) { 1067 int Cost = 0; 1068 std::array<std::pair<Value *, int>, 2> Values = {{LHS, RHS}}; 1069 for (int Idx = 0, IdxE = Values.size(); Idx != IdxE; ++Idx) { 1070 Value *V = Values[Idx].first; 1071 if (isa<Constant>(V)) { 1072 // Since this is a function pass, it doesn't make semantic sense to 1073 // walk the users of a subclass of Constant. The users could be in 1074 // another function, or even another module that happens to be in 1075 // the same LLVMContext. 1076 continue; 1077 } 1078 1079 // Calculate the absolute lane, using the minimum relative lane of LHS 1080 // and RHS as base and Idx as the offset. 1081 int Ln = std::min(LHS.second, RHS.second) + Idx; 1082 assert(Ln >= 0 && "Bad lane calculation"); 1083 unsigned UsersBudget = LookAheadUsersBudget; 1084 for (User *U : V->users()) { 1085 if (const TreeEntry *UserTE = R.getTreeEntry(U)) { 1086 // The user is in the VectorizableTree. Check if we need to insert. 1087 auto It = llvm::find(UserTE->Scalars, U); 1088 assert(It != UserTE->Scalars.end() && "U is in UserTE"); 1089 int UserLn = std::distance(UserTE->Scalars.begin(), It); 1090 assert(UserLn >= 0 && "Bad lane"); 1091 if (UserLn != Ln) 1092 Cost += UserInDiffLaneCost; 1093 } else { 1094 // Check if the user is in the look-ahead code. 1095 auto It2 = InLookAheadValues.find(U); 1096 if (It2 != InLookAheadValues.end()) { 1097 // The user is in the look-ahead code. Check the lane. 1098 if (It2->second != Ln) 1099 Cost += UserInDiffLaneCost; 1100 } else { 1101 // The user is neither in SLP tree nor in the look-ahead code. 1102 Cost += ExternalUseCost; 1103 } 1104 } 1105 // Limit the number of visited uses to cap compilation time. 1106 if (--UsersBudget == 0) 1107 break; 1108 } 1109 } 1110 return Cost; 1111 } 1112 1113 /// Go through the operands of \p LHS and \p RHS recursively until \p 1114 /// MaxLevel, and return the cummulative score. For example: 1115 /// \verbatim 1116 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1] 1117 /// \ / \ / \ / \ / 1118 /// + + + + 1119 /// G1 G2 G3 G4 1120 /// \endverbatim 1121 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at 1122 /// each level recursively, accumulating the score. It starts from matching 1123 /// the additions at level 0, then moves on to the loads (level 1). The 1124 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and 1125 /// {B[0],B[1]} match with VLOperands::ScoreConsecutiveLoads, while 1126 /// {A[0],C[0]} has a score of VLOperands::ScoreFail. 1127 /// Please note that the order of the operands does not matter, as we 1128 /// evaluate the score of all profitable combinations of operands. In 1129 /// other words the score of G1 and G4 is the same as G1 and G2. This 1130 /// heuristic is based on ideas described in: 1131 /// Look-ahead SLP: Auto-vectorization in the presence of commutative 1132 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha, 1133 /// Luís F. W. Góes 1134 int getScoreAtLevelRec(const std::pair<Value *, int> &LHS, 1135 const std::pair<Value *, int> &RHS, int CurrLevel, 1136 int MaxLevel) { 1137 1138 Value *V1 = LHS.first; 1139 Value *V2 = RHS.first; 1140 // Get the shallow score of V1 and V2. 1141 int ShallowScoreAtThisLevel = 1142 std::max((int)ScoreFail, getShallowScore(V1, V2, DL, SE) - 1143 getExternalUsesCost(LHS, RHS)); 1144 int Lane1 = LHS.second; 1145 int Lane2 = RHS.second; 1146 1147 // If reached MaxLevel, 1148 // or if V1 and V2 are not instructions, 1149 // or if they are SPLAT, 1150 // or if they are not consecutive, early return the current cost. 1151 auto *I1 = dyn_cast<Instruction>(V1); 1152 auto *I2 = dyn_cast<Instruction>(V2); 1153 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 || 1154 ShallowScoreAtThisLevel == VLOperands::ScoreFail || 1155 (isa<LoadInst>(I1) && isa<LoadInst>(I2) && ShallowScoreAtThisLevel)) 1156 return ShallowScoreAtThisLevel; 1157 assert(I1 && I2 && "Should have early exited."); 1158 1159 // Keep track of in-tree values for determining the external-use cost. 1160 InLookAheadValues[V1] = Lane1; 1161 InLookAheadValues[V2] = Lane2; 1162 1163 // Contains the I2 operand indexes that got matched with I1 operands. 1164 SmallSet<unsigned, 4> Op2Used; 1165 1166 // Recursion towards the operands of I1 and I2. We are trying all possbile 1167 // operand pairs, and keeping track of the best score. 1168 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands(); 1169 OpIdx1 != NumOperands1; ++OpIdx1) { 1170 // Try to pair op1I with the best operand of I2. 1171 int MaxTmpScore = 0; 1172 unsigned MaxOpIdx2 = 0; 1173 bool FoundBest = false; 1174 // If I2 is commutative try all combinations. 1175 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1; 1176 unsigned ToIdx = isCommutative(I2) 1177 ? I2->getNumOperands() 1178 : std::min(I2->getNumOperands(), OpIdx1 + 1); 1179 assert(FromIdx <= ToIdx && "Bad index"); 1180 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) { 1181 // Skip operands already paired with OpIdx1. 1182 if (Op2Used.count(OpIdx2)) 1183 continue; 1184 // Recursively calculate the cost at each level 1185 int TmpScore = getScoreAtLevelRec({I1->getOperand(OpIdx1), Lane1}, 1186 {I2->getOperand(OpIdx2), Lane2}, 1187 CurrLevel + 1, MaxLevel); 1188 // Look for the best score. 1189 if (TmpScore > VLOperands::ScoreFail && TmpScore > MaxTmpScore) { 1190 MaxTmpScore = TmpScore; 1191 MaxOpIdx2 = OpIdx2; 1192 FoundBest = true; 1193 } 1194 } 1195 if (FoundBest) { 1196 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it. 1197 Op2Used.insert(MaxOpIdx2); 1198 ShallowScoreAtThisLevel += MaxTmpScore; 1199 } 1200 } 1201 return ShallowScoreAtThisLevel; 1202 } 1203 1204 /// \Returns the look-ahead score, which tells us how much the sub-trees 1205 /// rooted at \p LHS and \p RHS match, the more they match the higher the 1206 /// score. This helps break ties in an informed way when we cannot decide on 1207 /// the order of the operands by just considering the immediate 1208 /// predecessors. 1209 int getLookAheadScore(const std::pair<Value *, int> &LHS, 1210 const std::pair<Value *, int> &RHS) { 1211 InLookAheadValues.clear(); 1212 return getScoreAtLevelRec(LHS, RHS, 1, LookAheadMaxDepth); 1213 } 1214 1215 // Search all operands in Ops[*][Lane] for the one that matches best 1216 // Ops[OpIdx][LastLane] and return its opreand index. 1217 // If no good match can be found, return None. 1218 Optional<unsigned> 1219 getBestOperand(unsigned OpIdx, int Lane, int LastLane, 1220 ArrayRef<ReorderingMode> ReorderingModes) { 1221 unsigned NumOperands = getNumOperands(); 1222 1223 // The operand of the previous lane at OpIdx. 1224 Value *OpLastLane = getData(OpIdx, LastLane).V; 1225 1226 // Our strategy mode for OpIdx. 1227 ReorderingMode RMode = ReorderingModes[OpIdx]; 1228 1229 // The linearized opcode of the operand at OpIdx, Lane. 1230 bool OpIdxAPO = getData(OpIdx, Lane).APO; 1231 1232 // The best operand index and its score. 1233 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we 1234 // are using the score to differentiate between the two. 1235 struct BestOpData { 1236 Optional<unsigned> Idx = None; 1237 unsigned Score = 0; 1238 } BestOp; 1239 1240 // Iterate through all unused operands and look for the best. 1241 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) { 1242 // Get the operand at Idx and Lane. 1243 OperandData &OpData = getData(Idx, Lane); 1244 Value *Op = OpData.V; 1245 bool OpAPO = OpData.APO; 1246 1247 // Skip already selected operands. 1248 if (OpData.IsUsed) 1249 continue; 1250 1251 // Skip if we are trying to move the operand to a position with a 1252 // different opcode in the linearized tree form. This would break the 1253 // semantics. 1254 if (OpAPO != OpIdxAPO) 1255 continue; 1256 1257 // Look for an operand that matches the current mode. 1258 switch (RMode) { 1259 case ReorderingMode::Load: 1260 case ReorderingMode::Constant: 1261 case ReorderingMode::Opcode: { 1262 bool LeftToRight = Lane > LastLane; 1263 Value *OpLeft = (LeftToRight) ? OpLastLane : Op; 1264 Value *OpRight = (LeftToRight) ? Op : OpLastLane; 1265 unsigned Score = 1266 getLookAheadScore({OpLeft, LastLane}, {OpRight, Lane}); 1267 if (Score > BestOp.Score) { 1268 BestOp.Idx = Idx; 1269 BestOp.Score = Score; 1270 } 1271 break; 1272 } 1273 case ReorderingMode::Splat: 1274 if (Op == OpLastLane) 1275 BestOp.Idx = Idx; 1276 break; 1277 case ReorderingMode::Failed: 1278 return None; 1279 } 1280 } 1281 1282 if (BestOp.Idx) { 1283 getData(BestOp.Idx.getValue(), Lane).IsUsed = true; 1284 return BestOp.Idx; 1285 } 1286 // If we could not find a good match return None. 1287 return None; 1288 } 1289 1290 /// Helper for reorderOperandVecs. \Returns the lane that we should start 1291 /// reordering from. This is the one which has the least number of operands 1292 /// that can freely move about. 1293 unsigned getBestLaneToStartReordering() const { 1294 unsigned BestLane = 0; 1295 unsigned Min = UINT_MAX; 1296 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 1297 ++Lane) { 1298 unsigned NumFreeOps = getMaxNumOperandsThatCanBeReordered(Lane); 1299 if (NumFreeOps < Min) { 1300 Min = NumFreeOps; 1301 BestLane = Lane; 1302 } 1303 } 1304 return BestLane; 1305 } 1306 1307 /// \Returns the maximum number of operands that are allowed to be reordered 1308 /// for \p Lane. This is used as a heuristic for selecting the first lane to 1309 /// start operand reordering. 1310 unsigned getMaxNumOperandsThatCanBeReordered(unsigned Lane) const { 1311 unsigned CntTrue = 0; 1312 unsigned NumOperands = getNumOperands(); 1313 // Operands with the same APO can be reordered. We therefore need to count 1314 // how many of them we have for each APO, like this: Cnt[APO] = x. 1315 // Since we only have two APOs, namely true and false, we can avoid using 1316 // a map. Instead we can simply count the number of operands that 1317 // correspond to one of them (in this case the 'true' APO), and calculate 1318 // the other by subtracting it from the total number of operands. 1319 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) 1320 if (getData(OpIdx, Lane).APO) 1321 ++CntTrue; 1322 unsigned CntFalse = NumOperands - CntTrue; 1323 return std::max(CntTrue, CntFalse); 1324 } 1325 1326 /// Go through the instructions in VL and append their operands. 1327 void appendOperandsOfVL(ArrayRef<Value *> VL) { 1328 assert(!VL.empty() && "Bad VL"); 1329 assert((empty() || VL.size() == getNumLanes()) && 1330 "Expected same number of lanes"); 1331 assert(isa<Instruction>(VL[0]) && "Expected instruction"); 1332 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands(); 1333 OpsVec.resize(NumOperands); 1334 unsigned NumLanes = VL.size(); 1335 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1336 OpsVec[OpIdx].resize(NumLanes); 1337 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1338 assert(isa<Instruction>(VL[Lane]) && "Expected instruction"); 1339 // Our tree has just 3 nodes: the root and two operands. 1340 // It is therefore trivial to get the APO. We only need to check the 1341 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or 1342 // RHS operand. The LHS operand of both add and sub is never attached 1343 // to an inversese operation in the linearized form, therefore its APO 1344 // is false. The RHS is true only if VL[Lane] is an inverse operation. 1345 1346 // Since operand reordering is performed on groups of commutative 1347 // operations or alternating sequences (e.g., +, -), we can safely 1348 // tell the inverse operations by checking commutativity. 1349 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane])); 1350 bool APO = (OpIdx == 0) ? false : IsInverseOperation; 1351 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx), 1352 APO, false}; 1353 } 1354 } 1355 } 1356 1357 /// \returns the number of operands. 1358 unsigned getNumOperands() const { return OpsVec.size(); } 1359 1360 /// \returns the number of lanes. 1361 unsigned getNumLanes() const { return OpsVec[0].size(); } 1362 1363 /// \returns the operand value at \p OpIdx and \p Lane. 1364 Value *getValue(unsigned OpIdx, unsigned Lane) const { 1365 return getData(OpIdx, Lane).V; 1366 } 1367 1368 /// \returns true if the data structure is empty. 1369 bool empty() const { return OpsVec.empty(); } 1370 1371 /// Clears the data. 1372 void clear() { OpsVec.clear(); } 1373 1374 /// \Returns true if there are enough operands identical to \p Op to fill 1375 /// the whole vector. 1376 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow. 1377 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) { 1378 bool OpAPO = getData(OpIdx, Lane).APO; 1379 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) { 1380 if (Ln == Lane) 1381 continue; 1382 // This is set to true if we found a candidate for broadcast at Lane. 1383 bool FoundCandidate = false; 1384 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) { 1385 OperandData &Data = getData(OpI, Ln); 1386 if (Data.APO != OpAPO || Data.IsUsed) 1387 continue; 1388 if (Data.V == Op) { 1389 FoundCandidate = true; 1390 Data.IsUsed = true; 1391 break; 1392 } 1393 } 1394 if (!FoundCandidate) 1395 return false; 1396 } 1397 return true; 1398 } 1399 1400 public: 1401 /// Initialize with all the operands of the instruction vector \p RootVL. 1402 VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL, 1403 ScalarEvolution &SE, const BoUpSLP &R) 1404 : DL(DL), SE(SE), R(R) { 1405 // Append all the operands of RootVL. 1406 appendOperandsOfVL(RootVL); 1407 } 1408 1409 /// \Returns a value vector with the operands across all lanes for the 1410 /// opearnd at \p OpIdx. 1411 ValueList getVL(unsigned OpIdx) const { 1412 ValueList OpVL(OpsVec[OpIdx].size()); 1413 assert(OpsVec[OpIdx].size() == getNumLanes() && 1414 "Expected same num of lanes across all operands"); 1415 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane) 1416 OpVL[Lane] = OpsVec[OpIdx][Lane].V; 1417 return OpVL; 1418 } 1419 1420 // Performs operand reordering for 2 or more operands. 1421 // The original operands are in OrigOps[OpIdx][Lane]. 1422 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'. 1423 void reorder() { 1424 unsigned NumOperands = getNumOperands(); 1425 unsigned NumLanes = getNumLanes(); 1426 // Each operand has its own mode. We are using this mode to help us select 1427 // the instructions for each lane, so that they match best with the ones 1428 // we have selected so far. 1429 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands); 1430 1431 // This is a greedy single-pass algorithm. We are going over each lane 1432 // once and deciding on the best order right away with no back-tracking. 1433 // However, in order to increase its effectiveness, we start with the lane 1434 // that has operands that can move the least. For example, given the 1435 // following lanes: 1436 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd 1437 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st 1438 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd 1439 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th 1440 // we will start at Lane 1, since the operands of the subtraction cannot 1441 // be reordered. Then we will visit the rest of the lanes in a circular 1442 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3. 1443 1444 // Find the first lane that we will start our search from. 1445 unsigned FirstLane = getBestLaneToStartReordering(); 1446 1447 // Initialize the modes. 1448 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1449 Value *OpLane0 = getValue(OpIdx, FirstLane); 1450 // Keep track if we have instructions with all the same opcode on one 1451 // side. 1452 if (isa<LoadInst>(OpLane0)) 1453 ReorderingModes[OpIdx] = ReorderingMode::Load; 1454 else if (isa<Instruction>(OpLane0)) { 1455 // Check if OpLane0 should be broadcast. 1456 if (shouldBroadcast(OpLane0, OpIdx, FirstLane)) 1457 ReorderingModes[OpIdx] = ReorderingMode::Splat; 1458 else 1459 ReorderingModes[OpIdx] = ReorderingMode::Opcode; 1460 } 1461 else if (isa<Constant>(OpLane0)) 1462 ReorderingModes[OpIdx] = ReorderingMode::Constant; 1463 else if (isa<Argument>(OpLane0)) 1464 // Our best hope is a Splat. It may save some cost in some cases. 1465 ReorderingModes[OpIdx] = ReorderingMode::Splat; 1466 else 1467 // NOTE: This should be unreachable. 1468 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1469 } 1470 1471 // If the initial strategy fails for any of the operand indexes, then we 1472 // perform reordering again in a second pass. This helps avoid assigning 1473 // high priority to the failed strategy, and should improve reordering for 1474 // the non-failed operand indexes. 1475 for (int Pass = 0; Pass != 2; ++Pass) { 1476 // Skip the second pass if the first pass did not fail. 1477 bool StrategyFailed = false; 1478 // Mark all operand data as free to use. 1479 clearUsed(); 1480 // We keep the original operand order for the FirstLane, so reorder the 1481 // rest of the lanes. We are visiting the nodes in a circular fashion, 1482 // using FirstLane as the center point and increasing the radius 1483 // distance. 1484 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) { 1485 // Visit the lane on the right and then the lane on the left. 1486 for (int Direction : {+1, -1}) { 1487 int Lane = FirstLane + Direction * Distance; 1488 if (Lane < 0 || Lane >= (int)NumLanes) 1489 continue; 1490 int LastLane = Lane - Direction; 1491 assert(LastLane >= 0 && LastLane < (int)NumLanes && 1492 "Out of bounds"); 1493 // Look for a good match for each operand. 1494 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1495 // Search for the operand that matches SortedOps[OpIdx][Lane-1]. 1496 Optional<unsigned> BestIdx = 1497 getBestOperand(OpIdx, Lane, LastLane, ReorderingModes); 1498 // By not selecting a value, we allow the operands that follow to 1499 // select a better matching value. We will get a non-null value in 1500 // the next run of getBestOperand(). 1501 if (BestIdx) { 1502 // Swap the current operand with the one returned by 1503 // getBestOperand(). 1504 swap(OpIdx, BestIdx.getValue(), Lane); 1505 } else { 1506 // We failed to find a best operand, set mode to 'Failed'. 1507 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1508 // Enable the second pass. 1509 StrategyFailed = true; 1510 } 1511 } 1512 } 1513 } 1514 // Skip second pass if the strategy did not fail. 1515 if (!StrategyFailed) 1516 break; 1517 } 1518 } 1519 1520 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1521 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) { 1522 switch (RMode) { 1523 case ReorderingMode::Load: 1524 return "Load"; 1525 case ReorderingMode::Opcode: 1526 return "Opcode"; 1527 case ReorderingMode::Constant: 1528 return "Constant"; 1529 case ReorderingMode::Splat: 1530 return "Splat"; 1531 case ReorderingMode::Failed: 1532 return "Failed"; 1533 } 1534 llvm_unreachable("Unimplemented Reordering Type"); 1535 } 1536 1537 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode, 1538 raw_ostream &OS) { 1539 return OS << getModeStr(RMode); 1540 } 1541 1542 /// Debug print. 1543 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) { 1544 printMode(RMode, dbgs()); 1545 } 1546 1547 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) { 1548 return printMode(RMode, OS); 1549 } 1550 1551 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const { 1552 const unsigned Indent = 2; 1553 unsigned Cnt = 0; 1554 for (const OperandDataVec &OpDataVec : OpsVec) { 1555 OS << "Operand " << Cnt++ << "\n"; 1556 for (const OperandData &OpData : OpDataVec) { 1557 OS.indent(Indent) << "{"; 1558 if (Value *V = OpData.V) 1559 OS << *V; 1560 else 1561 OS << "null"; 1562 OS << ", APO:" << OpData.APO << "}\n"; 1563 } 1564 OS << "\n"; 1565 } 1566 return OS; 1567 } 1568 1569 /// Debug print. 1570 LLVM_DUMP_METHOD void dump() const { print(dbgs()); } 1571 #endif 1572 }; 1573 1574 /// Checks if the instruction is marked for deletion. 1575 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); } 1576 1577 /// Marks values operands for later deletion by replacing them with Undefs. 1578 void eraseInstructions(ArrayRef<Value *> AV); 1579 1580 ~BoUpSLP(); 1581 1582 private: 1583 /// Checks if all users of \p I are the part of the vectorization tree. 1584 bool areAllUsersVectorized(Instruction *I, 1585 ArrayRef<Value *> VectorizedVals) const; 1586 1587 /// \returns the cost of the vectorizable entry. 1588 InstructionCost getEntryCost(const TreeEntry *E, 1589 ArrayRef<Value *> VectorizedVals); 1590 1591 /// This is the recursive part of buildTree. 1592 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, 1593 const EdgeInfo &EI); 1594 1595 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can 1596 /// be vectorized to use the original vector (or aggregate "bitcast" to a 1597 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise 1598 /// returns false, setting \p CurrentOrder to either an empty vector or a 1599 /// non-identity permutation that allows to reuse extract instructions. 1600 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 1601 SmallVectorImpl<unsigned> &CurrentOrder) const; 1602 1603 /// Vectorize a single entry in the tree. 1604 Value *vectorizeTree(TreeEntry *E); 1605 1606 /// Vectorize a single entry in the tree, starting in \p VL. 1607 Value *vectorizeTree(ArrayRef<Value *> VL); 1608 1609 /// \returns the scalarization cost for this type. Scalarization in this 1610 /// context means the creation of vectors from a group of scalars. 1611 InstructionCost 1612 getGatherCost(FixedVectorType *Ty, 1613 const DenseSet<unsigned> &ShuffledIndices) const; 1614 1615 /// Checks if the gathered \p VL can be represented as shuffle(s) of previous 1616 /// tree entries. 1617 /// \returns ShuffleKind, if gathered values can be represented as shuffles of 1618 /// previous tree entries. \p Mask is filled with the shuffle mask. 1619 Optional<TargetTransformInfo::ShuffleKind> 1620 isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask, 1621 SmallVectorImpl<const TreeEntry *> &Entries); 1622 1623 /// \returns the scalarization cost for this list of values. Assuming that 1624 /// this subtree gets vectorized, we may need to extract the values from the 1625 /// roots. This method calculates the cost of extracting the values. 1626 InstructionCost getGatherCost(ArrayRef<Value *> VL) const; 1627 1628 /// Set the Builder insert point to one after the last instruction in 1629 /// the bundle 1630 void setInsertPointAfterBundle(const TreeEntry *E); 1631 1632 /// \returns a vector from a collection of scalars in \p VL. 1633 Value *gather(ArrayRef<Value *> VL); 1634 1635 /// \returns whether the VectorizableTree is fully vectorizable and will 1636 /// be beneficial even the tree height is tiny. 1637 bool isFullyVectorizableTinyTree(bool ForReduction) const; 1638 1639 /// Reorder commutative or alt operands to get better probability of 1640 /// generating vectorized code. 1641 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 1642 SmallVectorImpl<Value *> &Left, 1643 SmallVectorImpl<Value *> &Right, 1644 const DataLayout &DL, 1645 ScalarEvolution &SE, 1646 const BoUpSLP &R); 1647 struct TreeEntry { 1648 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>; 1649 TreeEntry(VecTreeTy &Container) : Container(Container) {} 1650 1651 /// \returns true if the scalars in VL are equal to this entry. 1652 bool isSame(ArrayRef<Value *> VL) const { 1653 auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) { 1654 if (Mask.size() != VL.size() && VL.size() == Scalars.size()) 1655 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 1656 return VL.size() == Mask.size() && 1657 std::equal(VL.begin(), VL.end(), Mask.begin(), 1658 [Scalars](Value *V, int Idx) { 1659 return (isa<UndefValue>(V) && 1660 Idx == UndefMaskElem) || 1661 (Idx != UndefMaskElem && V == Scalars[Idx]); 1662 }); 1663 }; 1664 if (!ReorderIndices.empty()) { 1665 // TODO: implement matching if the nodes are just reordered, still can 1666 // treat the vector as the same if the list of scalars matches VL 1667 // directly, without reordering. 1668 SmallVector<int> Mask; 1669 inversePermutation(ReorderIndices, Mask); 1670 if (VL.size() == Scalars.size()) 1671 return IsSame(Scalars, Mask); 1672 if (VL.size() == ReuseShuffleIndices.size()) { 1673 ::addMask(Mask, ReuseShuffleIndices); 1674 return IsSame(Scalars, Mask); 1675 } 1676 return false; 1677 } 1678 return IsSame(Scalars, ReuseShuffleIndices); 1679 } 1680 1681 /// A vector of scalars. 1682 ValueList Scalars; 1683 1684 /// The Scalars are vectorized into this value. It is initialized to Null. 1685 Value *VectorizedValue = nullptr; 1686 1687 /// Do we need to gather this sequence or vectorize it 1688 /// (either with vector instruction or with scatter/gather 1689 /// intrinsics for store/load)? 1690 enum EntryState { Vectorize, ScatterVectorize, NeedToGather }; 1691 EntryState State; 1692 1693 /// Does this sequence require some shuffling? 1694 SmallVector<int, 4> ReuseShuffleIndices; 1695 1696 /// Does this entry require reordering? 1697 SmallVector<unsigned, 4> ReorderIndices; 1698 1699 /// Points back to the VectorizableTree. 1700 /// 1701 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 1702 /// to be a pointer and needs to be able to initialize the child iterator. 1703 /// Thus we need a reference back to the container to translate the indices 1704 /// to entries. 1705 VecTreeTy &Container; 1706 1707 /// The TreeEntry index containing the user of this entry. We can actually 1708 /// have multiple users so the data structure is not truly a tree. 1709 SmallVector<EdgeInfo, 1> UserTreeIndices; 1710 1711 /// The index of this treeEntry in VectorizableTree. 1712 int Idx = -1; 1713 1714 private: 1715 /// The operands of each instruction in each lane Operands[op_index][lane]. 1716 /// Note: This helps avoid the replication of the code that performs the 1717 /// reordering of operands during buildTree_rec() and vectorizeTree(). 1718 SmallVector<ValueList, 2> Operands; 1719 1720 /// The main/alternate instruction. 1721 Instruction *MainOp = nullptr; 1722 Instruction *AltOp = nullptr; 1723 1724 public: 1725 /// Set this bundle's \p OpIdx'th operand to \p OpVL. 1726 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) { 1727 if (Operands.size() < OpIdx + 1) 1728 Operands.resize(OpIdx + 1); 1729 assert(Operands[OpIdx].empty() && "Already resized?"); 1730 Operands[OpIdx].resize(Scalars.size()); 1731 for (unsigned Lane = 0, E = Scalars.size(); Lane != E; ++Lane) 1732 Operands[OpIdx][Lane] = OpVL[Lane]; 1733 } 1734 1735 /// Set the operands of this bundle in their original order. 1736 void setOperandsInOrder() { 1737 assert(Operands.empty() && "Already initialized?"); 1738 auto *I0 = cast<Instruction>(Scalars[0]); 1739 Operands.resize(I0->getNumOperands()); 1740 unsigned NumLanes = Scalars.size(); 1741 for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands(); 1742 OpIdx != NumOperands; ++OpIdx) { 1743 Operands[OpIdx].resize(NumLanes); 1744 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1745 auto *I = cast<Instruction>(Scalars[Lane]); 1746 assert(I->getNumOperands() == NumOperands && 1747 "Expected same number of operands"); 1748 Operands[OpIdx][Lane] = I->getOperand(OpIdx); 1749 } 1750 } 1751 } 1752 1753 /// Reorders operands of the node to the given mask \p Mask. 1754 void reorderOperands(ArrayRef<int> Mask) { 1755 for (ValueList &Operand : Operands) 1756 reorderScalars(Operand, Mask); 1757 } 1758 1759 /// \returns the \p OpIdx operand of this TreeEntry. 1760 ValueList &getOperand(unsigned OpIdx) { 1761 assert(OpIdx < Operands.size() && "Off bounds"); 1762 return Operands[OpIdx]; 1763 } 1764 1765 /// \returns the number of operands. 1766 unsigned getNumOperands() const { return Operands.size(); } 1767 1768 /// \return the single \p OpIdx operand. 1769 Value *getSingleOperand(unsigned OpIdx) const { 1770 assert(OpIdx < Operands.size() && "Off bounds"); 1771 assert(!Operands[OpIdx].empty() && "No operand available"); 1772 return Operands[OpIdx][0]; 1773 } 1774 1775 /// Some of the instructions in the list have alternate opcodes. 1776 bool isAltShuffle() const { 1777 return getOpcode() != getAltOpcode(); 1778 } 1779 1780 bool isOpcodeOrAlt(Instruction *I) const { 1781 unsigned CheckedOpcode = I->getOpcode(); 1782 return (getOpcode() == CheckedOpcode || 1783 getAltOpcode() == CheckedOpcode); 1784 } 1785 1786 /// Chooses the correct key for scheduling data. If \p Op has the same (or 1787 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is 1788 /// \p OpValue. 1789 Value *isOneOf(Value *Op) const { 1790 auto *I = dyn_cast<Instruction>(Op); 1791 if (I && isOpcodeOrAlt(I)) 1792 return Op; 1793 return MainOp; 1794 } 1795 1796 void setOperations(const InstructionsState &S) { 1797 MainOp = S.MainOp; 1798 AltOp = S.AltOp; 1799 } 1800 1801 Instruction *getMainOp() const { 1802 return MainOp; 1803 } 1804 1805 Instruction *getAltOp() const { 1806 return AltOp; 1807 } 1808 1809 /// The main/alternate opcodes for the list of instructions. 1810 unsigned getOpcode() const { 1811 return MainOp ? MainOp->getOpcode() : 0; 1812 } 1813 1814 unsigned getAltOpcode() const { 1815 return AltOp ? AltOp->getOpcode() : 0; 1816 } 1817 1818 /// When ReuseReorderShuffleIndices is empty it just returns position of \p 1819 /// V within vector of Scalars. Otherwise, try to remap on its reuse index. 1820 int findLaneForValue(Value *V) const { 1821 unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V)); 1822 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 1823 if (!ReorderIndices.empty()) 1824 FoundLane = ReorderIndices[FoundLane]; 1825 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 1826 if (!ReuseShuffleIndices.empty()) { 1827 FoundLane = std::distance(ReuseShuffleIndices.begin(), 1828 find(ReuseShuffleIndices, FoundLane)); 1829 } 1830 return FoundLane; 1831 } 1832 1833 #ifndef NDEBUG 1834 /// Debug printer. 1835 LLVM_DUMP_METHOD void dump() const { 1836 dbgs() << Idx << ".\n"; 1837 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) { 1838 dbgs() << "Operand " << OpI << ":\n"; 1839 for (const Value *V : Operands[OpI]) 1840 dbgs().indent(2) << *V << "\n"; 1841 } 1842 dbgs() << "Scalars: \n"; 1843 for (Value *V : Scalars) 1844 dbgs().indent(2) << *V << "\n"; 1845 dbgs() << "State: "; 1846 switch (State) { 1847 case Vectorize: 1848 dbgs() << "Vectorize\n"; 1849 break; 1850 case ScatterVectorize: 1851 dbgs() << "ScatterVectorize\n"; 1852 break; 1853 case NeedToGather: 1854 dbgs() << "NeedToGather\n"; 1855 break; 1856 } 1857 dbgs() << "MainOp: "; 1858 if (MainOp) 1859 dbgs() << *MainOp << "\n"; 1860 else 1861 dbgs() << "NULL\n"; 1862 dbgs() << "AltOp: "; 1863 if (AltOp) 1864 dbgs() << *AltOp << "\n"; 1865 else 1866 dbgs() << "NULL\n"; 1867 dbgs() << "VectorizedValue: "; 1868 if (VectorizedValue) 1869 dbgs() << *VectorizedValue << "\n"; 1870 else 1871 dbgs() << "NULL\n"; 1872 dbgs() << "ReuseShuffleIndices: "; 1873 if (ReuseShuffleIndices.empty()) 1874 dbgs() << "Empty"; 1875 else 1876 for (unsigned ReuseIdx : ReuseShuffleIndices) 1877 dbgs() << ReuseIdx << ", "; 1878 dbgs() << "\n"; 1879 dbgs() << "ReorderIndices: "; 1880 for (unsigned ReorderIdx : ReorderIndices) 1881 dbgs() << ReorderIdx << ", "; 1882 dbgs() << "\n"; 1883 dbgs() << "UserTreeIndices: "; 1884 for (const auto &EInfo : UserTreeIndices) 1885 dbgs() << EInfo << ", "; 1886 dbgs() << "\n"; 1887 } 1888 #endif 1889 }; 1890 1891 #ifndef NDEBUG 1892 void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost, 1893 InstructionCost VecCost, 1894 InstructionCost ScalarCost) const { 1895 dbgs() << "SLP: Calculated costs for Tree:\n"; E->dump(); 1896 dbgs() << "SLP: Costs:\n"; 1897 dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n"; 1898 dbgs() << "SLP: VectorCost = " << VecCost << "\n"; 1899 dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n"; 1900 dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = " << 1901 ReuseShuffleCost + VecCost - ScalarCost << "\n"; 1902 } 1903 #endif 1904 1905 /// Create a new VectorizableTree entry. 1906 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle, 1907 const InstructionsState &S, 1908 const EdgeInfo &UserTreeIdx, 1909 ArrayRef<int> ReuseShuffleIndices = None, 1910 ArrayRef<unsigned> ReorderIndices = None) { 1911 TreeEntry::EntryState EntryState = 1912 Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather; 1913 return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx, 1914 ReuseShuffleIndices, ReorderIndices); 1915 } 1916 1917 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, 1918 TreeEntry::EntryState EntryState, 1919 Optional<ScheduleData *> Bundle, 1920 const InstructionsState &S, 1921 const EdgeInfo &UserTreeIdx, 1922 ArrayRef<int> ReuseShuffleIndices = None, 1923 ArrayRef<unsigned> ReorderIndices = None) { 1924 assert(((!Bundle && EntryState == TreeEntry::NeedToGather) || 1925 (Bundle && EntryState != TreeEntry::NeedToGather)) && 1926 "Need to vectorize gather entry?"); 1927 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree)); 1928 TreeEntry *Last = VectorizableTree.back().get(); 1929 Last->Idx = VectorizableTree.size() - 1; 1930 Last->State = EntryState; 1931 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(), 1932 ReuseShuffleIndices.end()); 1933 if (ReorderIndices.empty()) { 1934 Last->Scalars.assign(VL.begin(), VL.end()); 1935 Last->setOperations(S); 1936 } else { 1937 // Reorder scalars and build final mask. 1938 Last->Scalars.assign(VL.size(), nullptr); 1939 transform(ReorderIndices, Last->Scalars.begin(), 1940 [VL](unsigned Idx) -> Value * { 1941 if (Idx >= VL.size()) 1942 return UndefValue::get(VL.front()->getType()); 1943 return VL[Idx]; 1944 }); 1945 InstructionsState S = getSameOpcode(Last->Scalars); 1946 Last->setOperations(S); 1947 Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end()); 1948 } 1949 if (Last->State != TreeEntry::NeedToGather) { 1950 for (Value *V : VL) { 1951 assert(!getTreeEntry(V) && "Scalar already in tree!"); 1952 ScalarToTreeEntry[V] = Last; 1953 } 1954 // Update the scheduler bundle to point to this TreeEntry. 1955 unsigned Lane = 0; 1956 for (ScheduleData *BundleMember = Bundle.getValue(); BundleMember; 1957 BundleMember = BundleMember->NextInBundle) { 1958 BundleMember->TE = Last; 1959 BundleMember->Lane = Lane; 1960 ++Lane; 1961 } 1962 assert((!Bundle.getValue() || Lane == VL.size()) && 1963 "Bundle and VL out of sync"); 1964 } else { 1965 MustGather.insert(VL.begin(), VL.end()); 1966 } 1967 1968 if (UserTreeIdx.UserTE) 1969 Last->UserTreeIndices.push_back(UserTreeIdx); 1970 1971 return Last; 1972 } 1973 1974 /// -- Vectorization State -- 1975 /// Holds all of the tree entries. 1976 TreeEntry::VecTreeTy VectorizableTree; 1977 1978 #ifndef NDEBUG 1979 /// Debug printer. 1980 LLVM_DUMP_METHOD void dumpVectorizableTree() const { 1981 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) { 1982 VectorizableTree[Id]->dump(); 1983 dbgs() << "\n"; 1984 } 1985 } 1986 #endif 1987 1988 TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); } 1989 1990 const TreeEntry *getTreeEntry(Value *V) const { 1991 return ScalarToTreeEntry.lookup(V); 1992 } 1993 1994 /// Maps a specific scalar to its tree entry. 1995 SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry; 1996 1997 /// Maps a value to the proposed vectorizable size. 1998 SmallDenseMap<Value *, unsigned> InstrElementSize; 1999 2000 /// A list of scalars that we found that we need to keep as scalars. 2001 ValueSet MustGather; 2002 2003 /// This POD struct describes one external user in the vectorized tree. 2004 struct ExternalUser { 2005 ExternalUser(Value *S, llvm::User *U, int L) 2006 : Scalar(S), User(U), Lane(L) {} 2007 2008 // Which scalar in our function. 2009 Value *Scalar; 2010 2011 // Which user that uses the scalar. 2012 llvm::User *User; 2013 2014 // Which lane does the scalar belong to. 2015 int Lane; 2016 }; 2017 using UserList = SmallVector<ExternalUser, 16>; 2018 2019 /// Checks if two instructions may access the same memory. 2020 /// 2021 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 2022 /// is invariant in the calling loop. 2023 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 2024 Instruction *Inst2) { 2025 // First check if the result is already in the cache. 2026 AliasCacheKey key = std::make_pair(Inst1, Inst2); 2027 Optional<bool> &result = AliasCache[key]; 2028 if (result.hasValue()) { 2029 return result.getValue(); 2030 } 2031 bool aliased = true; 2032 if (Loc1.Ptr && isSimple(Inst1)) 2033 aliased = isModOrRefSet(AA->getModRefInfo(Inst2, Loc1)); 2034 // Store the result in the cache. 2035 result = aliased; 2036 return aliased; 2037 } 2038 2039 using AliasCacheKey = std::pair<Instruction *, Instruction *>; 2040 2041 /// Cache for alias results. 2042 /// TODO: consider moving this to the AliasAnalysis itself. 2043 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 2044 2045 /// Removes an instruction from its block and eventually deletes it. 2046 /// It's like Instruction::eraseFromParent() except that the actual deletion 2047 /// is delayed until BoUpSLP is destructed. 2048 /// This is required to ensure that there are no incorrect collisions in the 2049 /// AliasCache, which can happen if a new instruction is allocated at the 2050 /// same address as a previously deleted instruction. 2051 void eraseInstruction(Instruction *I, bool ReplaceOpsWithUndef = false) { 2052 auto It = DeletedInstructions.try_emplace(I, ReplaceOpsWithUndef).first; 2053 It->getSecond() = It->getSecond() && ReplaceOpsWithUndef; 2054 } 2055 2056 /// Temporary store for deleted instructions. Instructions will be deleted 2057 /// eventually when the BoUpSLP is destructed. 2058 DenseMap<Instruction *, bool> DeletedInstructions; 2059 2060 /// A list of values that need to extracted out of the tree. 2061 /// This list holds pairs of (Internal Scalar : External User). External User 2062 /// can be nullptr, it means that this Internal Scalar will be used later, 2063 /// after vectorization. 2064 UserList ExternalUses; 2065 2066 /// Values used only by @llvm.assume calls. 2067 SmallPtrSet<const Value *, 32> EphValues; 2068 2069 /// Holds all of the instructions that we gathered. 2070 SetVector<Instruction *> GatherSeq; 2071 2072 /// A list of blocks that we are going to CSE. 2073 SetVector<BasicBlock *> CSEBlocks; 2074 2075 /// Contains all scheduling relevant data for an instruction. 2076 /// A ScheduleData either represents a single instruction or a member of an 2077 /// instruction bundle (= a group of instructions which is combined into a 2078 /// vector instruction). 2079 struct ScheduleData { 2080 // The initial value for the dependency counters. It means that the 2081 // dependencies are not calculated yet. 2082 enum { InvalidDeps = -1 }; 2083 2084 ScheduleData() = default; 2085 2086 void init(int BlockSchedulingRegionID, Value *OpVal) { 2087 FirstInBundle = this; 2088 NextInBundle = nullptr; 2089 NextLoadStore = nullptr; 2090 IsScheduled = false; 2091 SchedulingRegionID = BlockSchedulingRegionID; 2092 UnscheduledDepsInBundle = UnscheduledDeps; 2093 clearDependencies(); 2094 OpValue = OpVal; 2095 TE = nullptr; 2096 Lane = -1; 2097 } 2098 2099 /// Returns true if the dependency information has been calculated. 2100 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 2101 2102 /// Returns true for single instructions and for bundle representatives 2103 /// (= the head of a bundle). 2104 bool isSchedulingEntity() const { return FirstInBundle == this; } 2105 2106 /// Returns true if it represents an instruction bundle and not only a 2107 /// single instruction. 2108 bool isPartOfBundle() const { 2109 return NextInBundle != nullptr || FirstInBundle != this; 2110 } 2111 2112 /// Returns true if it is ready for scheduling, i.e. it has no more 2113 /// unscheduled depending instructions/bundles. 2114 bool isReady() const { 2115 assert(isSchedulingEntity() && 2116 "can't consider non-scheduling entity for ready list"); 2117 return UnscheduledDepsInBundle == 0 && !IsScheduled; 2118 } 2119 2120 /// Modifies the number of unscheduled dependencies, also updating it for 2121 /// the whole bundle. 2122 int incrementUnscheduledDeps(int Incr) { 2123 UnscheduledDeps += Incr; 2124 return FirstInBundle->UnscheduledDepsInBundle += Incr; 2125 } 2126 2127 /// Sets the number of unscheduled dependencies to the number of 2128 /// dependencies. 2129 void resetUnscheduledDeps() { 2130 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 2131 } 2132 2133 /// Clears all dependency information. 2134 void clearDependencies() { 2135 Dependencies = InvalidDeps; 2136 resetUnscheduledDeps(); 2137 MemoryDependencies.clear(); 2138 } 2139 2140 void dump(raw_ostream &os) const { 2141 if (!isSchedulingEntity()) { 2142 os << "/ " << *Inst; 2143 } else if (NextInBundle) { 2144 os << '[' << *Inst; 2145 ScheduleData *SD = NextInBundle; 2146 while (SD) { 2147 os << ';' << *SD->Inst; 2148 SD = SD->NextInBundle; 2149 } 2150 os << ']'; 2151 } else { 2152 os << *Inst; 2153 } 2154 } 2155 2156 Instruction *Inst = nullptr; 2157 2158 /// Points to the head in an instruction bundle (and always to this for 2159 /// single instructions). 2160 ScheduleData *FirstInBundle = nullptr; 2161 2162 /// Single linked list of all instructions in a bundle. Null if it is a 2163 /// single instruction. 2164 ScheduleData *NextInBundle = nullptr; 2165 2166 /// Single linked list of all memory instructions (e.g. load, store, call) 2167 /// in the block - until the end of the scheduling region. 2168 ScheduleData *NextLoadStore = nullptr; 2169 2170 /// The dependent memory instructions. 2171 /// This list is derived on demand in calculateDependencies(). 2172 SmallVector<ScheduleData *, 4> MemoryDependencies; 2173 2174 /// This ScheduleData is in the current scheduling region if this matches 2175 /// the current SchedulingRegionID of BlockScheduling. 2176 int SchedulingRegionID = 0; 2177 2178 /// Used for getting a "good" final ordering of instructions. 2179 int SchedulingPriority = 0; 2180 2181 /// The number of dependencies. Constitutes of the number of users of the 2182 /// instruction plus the number of dependent memory instructions (if any). 2183 /// This value is calculated on demand. 2184 /// If InvalidDeps, the number of dependencies is not calculated yet. 2185 int Dependencies = InvalidDeps; 2186 2187 /// The number of dependencies minus the number of dependencies of scheduled 2188 /// instructions. As soon as this is zero, the instruction/bundle gets ready 2189 /// for scheduling. 2190 /// Note that this is negative as long as Dependencies is not calculated. 2191 int UnscheduledDeps = InvalidDeps; 2192 2193 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 2194 /// single instructions. 2195 int UnscheduledDepsInBundle = InvalidDeps; 2196 2197 /// True if this instruction is scheduled (or considered as scheduled in the 2198 /// dry-run). 2199 bool IsScheduled = false; 2200 2201 /// Opcode of the current instruction in the schedule data. 2202 Value *OpValue = nullptr; 2203 2204 /// The TreeEntry that this instruction corresponds to. 2205 TreeEntry *TE = nullptr; 2206 2207 /// The lane of this node in the TreeEntry. 2208 int Lane = -1; 2209 }; 2210 2211 #ifndef NDEBUG 2212 friend inline raw_ostream &operator<<(raw_ostream &os, 2213 const BoUpSLP::ScheduleData &SD) { 2214 SD.dump(os); 2215 return os; 2216 } 2217 #endif 2218 2219 friend struct GraphTraits<BoUpSLP *>; 2220 friend struct DOTGraphTraits<BoUpSLP *>; 2221 2222 /// Contains all scheduling data for a basic block. 2223 struct BlockScheduling { 2224 BlockScheduling(BasicBlock *BB) 2225 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} 2226 2227 void clear() { 2228 ReadyInsts.clear(); 2229 ScheduleStart = nullptr; 2230 ScheduleEnd = nullptr; 2231 FirstLoadStoreInRegion = nullptr; 2232 LastLoadStoreInRegion = nullptr; 2233 2234 // Reduce the maximum schedule region size by the size of the 2235 // previous scheduling run. 2236 ScheduleRegionSizeLimit -= ScheduleRegionSize; 2237 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 2238 ScheduleRegionSizeLimit = MinScheduleRegionSize; 2239 ScheduleRegionSize = 0; 2240 2241 // Make a new scheduling region, i.e. all existing ScheduleData is not 2242 // in the new region yet. 2243 ++SchedulingRegionID; 2244 } 2245 2246 ScheduleData *getScheduleData(Value *V) { 2247 ScheduleData *SD = ScheduleDataMap[V]; 2248 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 2249 return SD; 2250 return nullptr; 2251 } 2252 2253 ScheduleData *getScheduleData(Value *V, Value *Key) { 2254 if (V == Key) 2255 return getScheduleData(V); 2256 auto I = ExtraScheduleDataMap.find(V); 2257 if (I != ExtraScheduleDataMap.end()) { 2258 ScheduleData *SD = I->second[Key]; 2259 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 2260 return SD; 2261 } 2262 return nullptr; 2263 } 2264 2265 bool isInSchedulingRegion(ScheduleData *SD) const { 2266 return SD->SchedulingRegionID == SchedulingRegionID; 2267 } 2268 2269 /// Marks an instruction as scheduled and puts all dependent ready 2270 /// instructions into the ready-list. 2271 template <typename ReadyListType> 2272 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 2273 SD->IsScheduled = true; 2274 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 2275 2276 ScheduleData *BundleMember = SD; 2277 while (BundleMember) { 2278 if (BundleMember->Inst != BundleMember->OpValue) { 2279 BundleMember = BundleMember->NextInBundle; 2280 continue; 2281 } 2282 // Handle the def-use chain dependencies. 2283 2284 // Decrement the unscheduled counter and insert to ready list if ready. 2285 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) { 2286 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 2287 if (OpDef && OpDef->hasValidDependencies() && 2288 OpDef->incrementUnscheduledDeps(-1) == 0) { 2289 // There are no more unscheduled dependencies after 2290 // decrementing, so we can put the dependent instruction 2291 // into the ready list. 2292 ScheduleData *DepBundle = OpDef->FirstInBundle; 2293 assert(!DepBundle->IsScheduled && 2294 "already scheduled bundle gets ready"); 2295 ReadyList.insert(DepBundle); 2296 LLVM_DEBUG(dbgs() 2297 << "SLP: gets ready (def): " << *DepBundle << "\n"); 2298 } 2299 }); 2300 }; 2301 2302 // If BundleMember is a vector bundle, its operands may have been 2303 // reordered duiring buildTree(). We therefore need to get its operands 2304 // through the TreeEntry. 2305 if (TreeEntry *TE = BundleMember->TE) { 2306 int Lane = BundleMember->Lane; 2307 assert(Lane >= 0 && "Lane not set"); 2308 2309 // Since vectorization tree is being built recursively this assertion 2310 // ensures that the tree entry has all operands set before reaching 2311 // this code. Couple of exceptions known at the moment are extracts 2312 // where their second (immediate) operand is not added. Since 2313 // immediates do not affect scheduler behavior this is considered 2314 // okay. 2315 auto *In = TE->getMainOp(); 2316 assert(In && 2317 (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) || 2318 In->getNumOperands() == TE->getNumOperands()) && 2319 "Missed TreeEntry operands?"); 2320 (void)In; // fake use to avoid build failure when assertions disabled 2321 2322 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands(); 2323 OpIdx != NumOperands; ++OpIdx) 2324 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane])) 2325 DecrUnsched(I); 2326 } else { 2327 // If BundleMember is a stand-alone instruction, no operand reordering 2328 // has taken place, so we directly access its operands. 2329 for (Use &U : BundleMember->Inst->operands()) 2330 if (auto *I = dyn_cast<Instruction>(U.get())) 2331 DecrUnsched(I); 2332 } 2333 // Handle the memory dependencies. 2334 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 2335 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 2336 // There are no more unscheduled dependencies after decrementing, 2337 // so we can put the dependent instruction into the ready list. 2338 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 2339 assert(!DepBundle->IsScheduled && 2340 "already scheduled bundle gets ready"); 2341 ReadyList.insert(DepBundle); 2342 LLVM_DEBUG(dbgs() 2343 << "SLP: gets ready (mem): " << *DepBundle << "\n"); 2344 } 2345 } 2346 BundleMember = BundleMember->NextInBundle; 2347 } 2348 } 2349 2350 void doForAllOpcodes(Value *V, 2351 function_ref<void(ScheduleData *SD)> Action) { 2352 if (ScheduleData *SD = getScheduleData(V)) 2353 Action(SD); 2354 auto I = ExtraScheduleDataMap.find(V); 2355 if (I != ExtraScheduleDataMap.end()) 2356 for (auto &P : I->second) 2357 if (P.second->SchedulingRegionID == SchedulingRegionID) 2358 Action(P.second); 2359 } 2360 2361 /// Put all instructions into the ReadyList which are ready for scheduling. 2362 template <typename ReadyListType> 2363 void initialFillReadyList(ReadyListType &ReadyList) { 2364 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 2365 doForAllOpcodes(I, [&](ScheduleData *SD) { 2366 if (SD->isSchedulingEntity() && SD->isReady()) { 2367 ReadyList.insert(SD); 2368 LLVM_DEBUG(dbgs() 2369 << "SLP: initially in ready list: " << *I << "\n"); 2370 } 2371 }); 2372 } 2373 } 2374 2375 /// Checks if a bundle of instructions can be scheduled, i.e. has no 2376 /// cyclic dependencies. This is only a dry-run, no instructions are 2377 /// actually moved at this stage. 2378 /// \returns the scheduling bundle. The returned Optional value is non-None 2379 /// if \p VL is allowed to be scheduled. 2380 Optional<ScheduleData *> 2381 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 2382 const InstructionsState &S); 2383 2384 /// Un-bundles a group of instructions. 2385 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 2386 2387 /// Allocates schedule data chunk. 2388 ScheduleData *allocateScheduleDataChunks(); 2389 2390 /// Extends the scheduling region so that V is inside the region. 2391 /// \returns true if the region size is within the limit. 2392 bool extendSchedulingRegion(Value *V, const InstructionsState &S); 2393 2394 /// Initialize the ScheduleData structures for new instructions in the 2395 /// scheduling region. 2396 void initScheduleData(Instruction *FromI, Instruction *ToI, 2397 ScheduleData *PrevLoadStore, 2398 ScheduleData *NextLoadStore); 2399 2400 /// Updates the dependency information of a bundle and of all instructions/ 2401 /// bundles which depend on the original bundle. 2402 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 2403 BoUpSLP *SLP); 2404 2405 /// Sets all instruction in the scheduling region to un-scheduled. 2406 void resetSchedule(); 2407 2408 BasicBlock *BB; 2409 2410 /// Simple memory allocation for ScheduleData. 2411 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 2412 2413 /// The size of a ScheduleData array in ScheduleDataChunks. 2414 int ChunkSize; 2415 2416 /// The allocator position in the current chunk, which is the last entry 2417 /// of ScheduleDataChunks. 2418 int ChunkPos; 2419 2420 /// Attaches ScheduleData to Instruction. 2421 /// Note that the mapping survives during all vectorization iterations, i.e. 2422 /// ScheduleData structures are recycled. 2423 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 2424 2425 /// Attaches ScheduleData to Instruction with the leading key. 2426 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 2427 ExtraScheduleDataMap; 2428 2429 struct ReadyList : SmallVector<ScheduleData *, 8> { 2430 void insert(ScheduleData *SD) { push_back(SD); } 2431 }; 2432 2433 /// The ready-list for scheduling (only used for the dry-run). 2434 ReadyList ReadyInsts; 2435 2436 /// The first instruction of the scheduling region. 2437 Instruction *ScheduleStart = nullptr; 2438 2439 /// The first instruction _after_ the scheduling region. 2440 Instruction *ScheduleEnd = nullptr; 2441 2442 /// The first memory accessing instruction in the scheduling region 2443 /// (can be null). 2444 ScheduleData *FirstLoadStoreInRegion = nullptr; 2445 2446 /// The last memory accessing instruction in the scheduling region 2447 /// (can be null). 2448 ScheduleData *LastLoadStoreInRegion = nullptr; 2449 2450 /// The current size of the scheduling region. 2451 int ScheduleRegionSize = 0; 2452 2453 /// The maximum size allowed for the scheduling region. 2454 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; 2455 2456 /// The ID of the scheduling region. For a new vectorization iteration this 2457 /// is incremented which "removes" all ScheduleData from the region. 2458 // Make sure that the initial SchedulingRegionID is greater than the 2459 // initial SchedulingRegionID in ScheduleData (which is 0). 2460 int SchedulingRegionID = 1; 2461 }; 2462 2463 /// Attaches the BlockScheduling structures to basic blocks. 2464 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 2465 2466 /// Performs the "real" scheduling. Done before vectorization is actually 2467 /// performed in a basic block. 2468 void scheduleBlock(BlockScheduling *BS); 2469 2470 /// List of users to ignore during scheduling and that don't need extracting. 2471 ArrayRef<Value *> UserIgnoreList; 2472 2473 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of 2474 /// sorted SmallVectors of unsigned. 2475 struct OrdersTypeDenseMapInfo { 2476 static OrdersType getEmptyKey() { 2477 OrdersType V; 2478 V.push_back(~1U); 2479 return V; 2480 } 2481 2482 static OrdersType getTombstoneKey() { 2483 OrdersType V; 2484 V.push_back(~2U); 2485 return V; 2486 } 2487 2488 static unsigned getHashValue(const OrdersType &V) { 2489 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); 2490 } 2491 2492 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) { 2493 return LHS == RHS; 2494 } 2495 }; 2496 2497 // Analysis and block reference. 2498 Function *F; 2499 ScalarEvolution *SE; 2500 TargetTransformInfo *TTI; 2501 TargetLibraryInfo *TLI; 2502 AAResults *AA; 2503 LoopInfo *LI; 2504 DominatorTree *DT; 2505 AssumptionCache *AC; 2506 DemandedBits *DB; 2507 const DataLayout *DL; 2508 OptimizationRemarkEmitter *ORE; 2509 2510 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 2511 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 2512 2513 /// Instruction builder to construct the vectorized tree. 2514 IRBuilder<> Builder; 2515 2516 /// A map of scalar integer values to the smallest bit width with which they 2517 /// can legally be represented. The values map to (width, signed) pairs, 2518 /// where "width" indicates the minimum bit width and "signed" is True if the 2519 /// value must be signed-extended, rather than zero-extended, back to its 2520 /// original width. 2521 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs; 2522 }; 2523 2524 } // end namespace slpvectorizer 2525 2526 template <> struct GraphTraits<BoUpSLP *> { 2527 using TreeEntry = BoUpSLP::TreeEntry; 2528 2529 /// NodeRef has to be a pointer per the GraphWriter. 2530 using NodeRef = TreeEntry *; 2531 2532 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy; 2533 2534 /// Add the VectorizableTree to the index iterator to be able to return 2535 /// TreeEntry pointers. 2536 struct ChildIteratorType 2537 : public iterator_adaptor_base< 2538 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> { 2539 ContainerTy &VectorizableTree; 2540 2541 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W, 2542 ContainerTy &VT) 2543 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 2544 2545 NodeRef operator*() { return I->UserTE; } 2546 }; 2547 2548 static NodeRef getEntryNode(BoUpSLP &R) { 2549 return R.VectorizableTree[0].get(); 2550 } 2551 2552 static ChildIteratorType child_begin(NodeRef N) { 2553 return {N->UserTreeIndices.begin(), N->Container}; 2554 } 2555 2556 static ChildIteratorType child_end(NodeRef N) { 2557 return {N->UserTreeIndices.end(), N->Container}; 2558 } 2559 2560 /// For the node iterator we just need to turn the TreeEntry iterator into a 2561 /// TreeEntry* iterator so that it dereferences to NodeRef. 2562 class nodes_iterator { 2563 using ItTy = ContainerTy::iterator; 2564 ItTy It; 2565 2566 public: 2567 nodes_iterator(const ItTy &It2) : It(It2) {} 2568 NodeRef operator*() { return It->get(); } 2569 nodes_iterator operator++() { 2570 ++It; 2571 return *this; 2572 } 2573 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; } 2574 }; 2575 2576 static nodes_iterator nodes_begin(BoUpSLP *R) { 2577 return nodes_iterator(R->VectorizableTree.begin()); 2578 } 2579 2580 static nodes_iterator nodes_end(BoUpSLP *R) { 2581 return nodes_iterator(R->VectorizableTree.end()); 2582 } 2583 2584 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 2585 }; 2586 2587 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 2588 using TreeEntry = BoUpSLP::TreeEntry; 2589 2590 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 2591 2592 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 2593 std::string Str; 2594 raw_string_ostream OS(Str); 2595 if (isSplat(Entry->Scalars)) 2596 OS << "<splat> "; 2597 for (auto V : Entry->Scalars) { 2598 OS << *V; 2599 if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) { 2600 return EU.Scalar == V; 2601 })) 2602 OS << " <extract>"; 2603 OS << "\n"; 2604 } 2605 return Str; 2606 } 2607 2608 static std::string getNodeAttributes(const TreeEntry *Entry, 2609 const BoUpSLP *) { 2610 if (Entry->State == TreeEntry::NeedToGather) 2611 return "color=red"; 2612 return ""; 2613 } 2614 }; 2615 2616 } // end namespace llvm 2617 2618 BoUpSLP::~BoUpSLP() { 2619 for (const auto &Pair : DeletedInstructions) { 2620 // Replace operands of ignored instructions with Undefs in case if they were 2621 // marked for deletion. 2622 if (Pair.getSecond()) { 2623 Value *Undef = UndefValue::get(Pair.getFirst()->getType()); 2624 Pair.getFirst()->replaceAllUsesWith(Undef); 2625 } 2626 Pair.getFirst()->dropAllReferences(); 2627 } 2628 for (const auto &Pair : DeletedInstructions) { 2629 assert(Pair.getFirst()->use_empty() && 2630 "trying to erase instruction with users."); 2631 Pair.getFirst()->eraseFromParent(); 2632 } 2633 #ifdef EXPENSIVE_CHECKS 2634 // If we could guarantee that this call is not extremely slow, we could 2635 // remove the ifdef limitation (see PR47712). 2636 assert(!verifyFunction(*F, &dbgs())); 2637 #endif 2638 } 2639 2640 void BoUpSLP::eraseInstructions(ArrayRef<Value *> AV) { 2641 for (auto *V : AV) { 2642 if (auto *I = dyn_cast<Instruction>(V)) 2643 eraseInstruction(I, /*ReplaceOpsWithUndef=*/true); 2644 }; 2645 } 2646 2647 /// Reorders the given \p Reuses mask according to the given \p Mask. \p Reuses 2648 /// contains original mask for the scalars reused in the node. Procedure 2649 /// transform this mask in accordance with the given \p Mask. 2650 static void reorderReuses(SmallVectorImpl<int> &Reuses, ArrayRef<int> Mask) { 2651 assert(!Mask.empty() && Reuses.size() == Mask.size() && 2652 "Expected non-empty mask."); 2653 SmallVector<int> Prev(Reuses.begin(), Reuses.end()); 2654 Prev.swap(Reuses); 2655 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 2656 if (Mask[I] != UndefMaskElem) 2657 Reuses[Mask[I]] = Prev[I]; 2658 } 2659 2660 /// Reorders the given \p Order according to the given \p Mask. \p Order - is 2661 /// the original order of the scalars. Procedure transforms the provided order 2662 /// in accordance with the given \p Mask. If the resulting \p Order is just an 2663 /// identity order, \p Order is cleared. 2664 static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask) { 2665 assert(!Mask.empty() && "Expected non-empty mask."); 2666 SmallVector<int> MaskOrder; 2667 if (Order.empty()) { 2668 MaskOrder.resize(Mask.size()); 2669 std::iota(MaskOrder.begin(), MaskOrder.end(), 0); 2670 } else { 2671 inversePermutation(Order, MaskOrder); 2672 } 2673 reorderReuses(MaskOrder, Mask); 2674 if (ShuffleVectorInst::isIdentityMask(MaskOrder)) { 2675 Order.clear(); 2676 return; 2677 } 2678 Order.assign(Mask.size(), Mask.size()); 2679 for (unsigned I = 0, E = Mask.size(); I < E; ++I) 2680 if (MaskOrder[I] != UndefMaskElem) 2681 Order[MaskOrder[I]] = I; 2682 fixupOrderingIndices(Order); 2683 } 2684 2685 Optional<BoUpSLP::OrdersType> 2686 BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) { 2687 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only."); 2688 unsigned NumScalars = TE.Scalars.size(); 2689 OrdersType CurrentOrder(NumScalars, NumScalars); 2690 SmallVector<int> Positions; 2691 SmallBitVector UsedPositions(NumScalars); 2692 const TreeEntry *STE = nullptr; 2693 // Try to find all gathered scalars that are gets vectorized in other 2694 // vectorize node. Here we can have only one single tree vector node to 2695 // correctly identify order of the gathered scalars. 2696 for (unsigned I = 0; I < NumScalars; ++I) { 2697 Value *V = TE.Scalars[I]; 2698 if (!isa<LoadInst, ExtractElementInst, ExtractValueInst>(V)) 2699 continue; 2700 if (const auto *LocalSTE = getTreeEntry(V)) { 2701 if (!STE) 2702 STE = LocalSTE; 2703 else if (STE != LocalSTE) 2704 // Take the order only from the single vector node. 2705 return None; 2706 unsigned Lane = 2707 std::distance(STE->Scalars.begin(), find(STE->Scalars, V)); 2708 if (Lane >= NumScalars) 2709 return None; 2710 if (CurrentOrder[Lane] != NumScalars) { 2711 if (Lane != I) 2712 continue; 2713 UsedPositions.reset(CurrentOrder[Lane]); 2714 } 2715 // The partial identity (where only some elements of the gather node are 2716 // in the identity order) is good. 2717 CurrentOrder[Lane] = I; 2718 UsedPositions.set(I); 2719 } 2720 } 2721 // Need to keep the order if we have a vector entry and at least 2 scalars or 2722 // the vectorized entry has just 2 scalars. 2723 if (STE && (UsedPositions.count() > 1 || STE->Scalars.size() == 2)) { 2724 auto &&IsIdentityOrder = [NumScalars](ArrayRef<unsigned> CurrentOrder) { 2725 for (unsigned I = 0; I < NumScalars; ++I) 2726 if (CurrentOrder[I] != I && CurrentOrder[I] != NumScalars) 2727 return false; 2728 return true; 2729 }; 2730 if (IsIdentityOrder(CurrentOrder)) { 2731 CurrentOrder.clear(); 2732 return CurrentOrder; 2733 } 2734 auto *It = CurrentOrder.begin(); 2735 for (unsigned I = 0; I < NumScalars;) { 2736 if (UsedPositions.test(I)) { 2737 ++I; 2738 continue; 2739 } 2740 if (*It == NumScalars) { 2741 *It = I; 2742 ++I; 2743 } 2744 ++It; 2745 } 2746 return CurrentOrder; 2747 } 2748 return None; 2749 } 2750 2751 void BoUpSLP::reorderTopToBottom() { 2752 // Maps VF to the graph nodes. 2753 DenseMap<unsigned, SmallPtrSet<TreeEntry *, 4>> VFToOrderedEntries; 2754 // ExtractElement gather nodes which can be vectorized and need to handle 2755 // their ordering. 2756 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 2757 // Find all reorderable nodes with the given VF. 2758 // Currently the are vectorized loads,extracts + some gathering of extracts. 2759 for_each(VectorizableTree, [this, &VFToOrderedEntries, &GathersToOrders]( 2760 const std::unique_ptr<TreeEntry> &TE) { 2761 // No need to reorder if need to shuffle reuses, still need to shuffle the 2762 // node. 2763 if (!TE->ReuseShuffleIndices.empty()) 2764 return; 2765 if (TE->State == TreeEntry::Vectorize && 2766 isa<LoadInst, ExtractElementInst, ExtractValueInst, StoreInst, 2767 InsertElementInst>(TE->getMainOp()) && 2768 !TE->isAltShuffle()) { 2769 VFToOrderedEntries[TE->Scalars.size()].insert(TE.get()); 2770 return; 2771 } 2772 if (TE->State == TreeEntry::NeedToGather) { 2773 if (TE->getOpcode() == Instruction::ExtractElement && 2774 !TE->isAltShuffle() && 2775 isa<FixedVectorType>(cast<ExtractElementInst>(TE->getMainOp()) 2776 ->getVectorOperandType()) && 2777 allSameType(TE->Scalars) && allSameBlock(TE->Scalars)) { 2778 // Check that gather of extractelements can be represented as 2779 // just a shuffle of a single vector. 2780 OrdersType CurrentOrder; 2781 bool Reuse = 2782 canReuseExtract(TE->Scalars, TE->getMainOp(), CurrentOrder); 2783 if (Reuse || !CurrentOrder.empty()) { 2784 VFToOrderedEntries[TE->Scalars.size()].insert(TE.get()); 2785 GathersToOrders.try_emplace(TE.get(), CurrentOrder); 2786 return; 2787 } 2788 } 2789 if (Optional<OrdersType> CurrentOrder = 2790 findReusedOrderedScalars(*TE.get())) { 2791 VFToOrderedEntries[TE->Scalars.size()].insert(TE.get()); 2792 GathersToOrders.try_emplace(TE.get(), *CurrentOrder); 2793 } 2794 } 2795 }); 2796 2797 // Reorder the graph nodes according to their vectorization factor. 2798 for (unsigned VF = VectorizableTree.front()->Scalars.size(); VF > 1; 2799 VF /= 2) { 2800 auto It = VFToOrderedEntries.find(VF); 2801 if (It == VFToOrderedEntries.end()) 2802 continue; 2803 // Try to find the most profitable order. We just are looking for the most 2804 // used order and reorder scalar elements in the nodes according to this 2805 // mostly used order. 2806 const SmallPtrSetImpl<TreeEntry *> &OrderedEntries = It->getSecond(); 2807 // All operands are reordered and used only in this node - propagate the 2808 // most used order to the user node. 2809 MapVector<OrdersType, unsigned, 2810 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>> 2811 OrdersUses; 2812 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 2813 for (const TreeEntry *OpTE : OrderedEntries) { 2814 // No need to reorder this nodes, still need to extend and to use shuffle, 2815 // just need to merge reordering shuffle and the reuse shuffle. 2816 if (!OpTE->ReuseShuffleIndices.empty()) 2817 continue; 2818 // Count number of orders uses. 2819 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { 2820 if (OpTE->State == TreeEntry::NeedToGather) 2821 return GathersToOrders.find(OpTE)->second; 2822 return OpTE->ReorderIndices; 2823 }(); 2824 // Stores actually store the mask, not the order, need to invert. 2825 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 2826 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 2827 SmallVector<int> Mask; 2828 inversePermutation(Order, Mask); 2829 unsigned E = Order.size(); 2830 OrdersType CurrentOrder(E, E); 2831 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 2832 return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx); 2833 }); 2834 fixupOrderingIndices(CurrentOrder); 2835 ++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second; 2836 } else { 2837 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second; 2838 } 2839 } 2840 // Set order of the user node. 2841 if (OrdersUses.empty()) 2842 continue; 2843 // Choose the most used order. 2844 ArrayRef<unsigned> BestOrder = OrdersUses.front().first; 2845 unsigned Cnt = OrdersUses.front().second; 2846 for (const auto &Pair : drop_begin(OrdersUses)) { 2847 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 2848 BestOrder = Pair.first; 2849 Cnt = Pair.second; 2850 } 2851 } 2852 // Set order of the user node. 2853 if (BestOrder.empty()) 2854 continue; 2855 SmallVector<int> Mask; 2856 inversePermutation(BestOrder, Mask); 2857 SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem); 2858 unsigned E = BestOrder.size(); 2859 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 2860 return I < E ? static_cast<int>(I) : UndefMaskElem; 2861 }); 2862 // Do an actual reordering, if profitable. 2863 for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 2864 // Just do the reordering for the nodes with the given VF. 2865 if (TE->Scalars.size() != VF) { 2866 if (TE->ReuseShuffleIndices.size() == VF) { 2867 // Need to reorder the reuses masks of the operands with smaller VF to 2868 // be able to find the match between the graph nodes and scalar 2869 // operands of the given node during vectorization/cost estimation. 2870 assert(all_of(TE->UserTreeIndices, 2871 [VF, &TE](const EdgeInfo &EI) { 2872 return EI.UserTE->Scalars.size() == VF || 2873 EI.UserTE->Scalars.size() == 2874 TE->Scalars.size(); 2875 }) && 2876 "All users must be of VF size."); 2877 // Update ordering of the operands with the smaller VF than the given 2878 // one. 2879 reorderReuses(TE->ReuseShuffleIndices, Mask); 2880 } 2881 continue; 2882 } 2883 if (TE->State == TreeEntry::Vectorize && 2884 isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst, 2885 InsertElementInst>(TE->getMainOp()) && 2886 !TE->isAltShuffle()) { 2887 // Build correct orders for extract{element,value}, loads and 2888 // stores. 2889 reorderOrder(TE->ReorderIndices, Mask); 2890 if (isa<InsertElementInst, StoreInst>(TE->getMainOp())) 2891 TE->reorderOperands(Mask); 2892 } else { 2893 // Reorder the node and its operands. 2894 TE->reorderOperands(Mask); 2895 assert(TE->ReorderIndices.empty() && 2896 "Expected empty reorder sequence."); 2897 reorderScalars(TE->Scalars, Mask); 2898 } 2899 if (!TE->ReuseShuffleIndices.empty()) { 2900 // Apply reversed order to keep the original ordering of the reused 2901 // elements to avoid extra reorder indices shuffling. 2902 OrdersType CurrentOrder; 2903 reorderOrder(CurrentOrder, MaskOrder); 2904 SmallVector<int> NewReuses; 2905 inversePermutation(CurrentOrder, NewReuses); 2906 addMask(NewReuses, TE->ReuseShuffleIndices); 2907 TE->ReuseShuffleIndices.swap(NewReuses); 2908 } 2909 } 2910 } 2911 } 2912 2913 void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) { 2914 SetVector<TreeEntry *> OrderedEntries; 2915 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 2916 // Find all reorderable leaf nodes with the given VF. 2917 // Currently the are vectorized loads,extracts without alternate operands + 2918 // some gathering of extracts. 2919 SmallVector<TreeEntry *> NonVectorized; 2920 for_each(VectorizableTree, [this, &OrderedEntries, &GathersToOrders, 2921 &NonVectorized]( 2922 const std::unique_ptr<TreeEntry> &TE) { 2923 if (TE->State != TreeEntry::Vectorize) 2924 NonVectorized.push_back(TE.get()); 2925 // No need to reorder if need to shuffle reuses, still need to shuffle the 2926 // node. 2927 if (!TE->ReuseShuffleIndices.empty()) 2928 return; 2929 if (TE->State == TreeEntry::Vectorize && 2930 isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE->getMainOp()) && 2931 !TE->isAltShuffle()) { 2932 OrderedEntries.insert(TE.get()); 2933 return; 2934 } 2935 if (TE->State == TreeEntry::NeedToGather) { 2936 if (TE->getOpcode() == Instruction::ExtractElement && 2937 !TE->isAltShuffle() && 2938 isa<FixedVectorType>(cast<ExtractElementInst>(TE->getMainOp()) 2939 ->getVectorOperandType()) && 2940 allSameType(TE->Scalars) && allSameBlock(TE->Scalars)) { 2941 // Check that gather of extractelements can be represented as 2942 // just a shuffle of a single vector with a single user only. 2943 OrdersType CurrentOrder; 2944 bool Reuse = 2945 canReuseExtract(TE->Scalars, TE->getMainOp(), CurrentOrder); 2946 if ((Reuse || !CurrentOrder.empty()) && 2947 !any_of(VectorizableTree, 2948 [&TE](const std::unique_ptr<TreeEntry> &Entry) { 2949 return Entry->State == TreeEntry::NeedToGather && 2950 Entry.get() != TE.get() && 2951 Entry->isSame(TE->Scalars); 2952 })) { 2953 OrderedEntries.insert(TE.get()); 2954 GathersToOrders.try_emplace(TE.get(), CurrentOrder); 2955 return; 2956 } 2957 } 2958 if (Optional<OrdersType> CurrentOrder = 2959 findReusedOrderedScalars(*TE.get())) { 2960 OrderedEntries.insert(TE.get()); 2961 GathersToOrders.try_emplace(TE.get(), *CurrentOrder); 2962 } 2963 } 2964 }); 2965 2966 // Checks if the operands of the users are reordarable and have only single 2967 // use. 2968 auto &&CheckOperands = 2969 [this, &NonVectorized](const auto &Data, 2970 SmallVectorImpl<TreeEntry *> &GatherOps) { 2971 for (unsigned I = 0, E = Data.first->getNumOperands(); I < E; ++I) { 2972 if (any_of(Data.second, 2973 [I](const std::pair<unsigned, TreeEntry *> &OpData) { 2974 return OpData.first == I && 2975 OpData.second->State == TreeEntry::Vectorize; 2976 })) 2977 continue; 2978 ArrayRef<Value *> VL = Data.first->getOperand(I); 2979 const TreeEntry *TE = nullptr; 2980 const auto *It = find_if(VL, [this, &TE](Value *V) { 2981 TE = getTreeEntry(V); 2982 return TE; 2983 }); 2984 if (It != VL.end() && TE->isSame(VL)) 2985 return false; 2986 TreeEntry *Gather = nullptr; 2987 if (count_if(NonVectorized, [VL, &Gather](TreeEntry *TE) { 2988 assert(TE->State != TreeEntry::Vectorize && 2989 "Only non-vectorized nodes are expected."); 2990 if (TE->isSame(VL)) { 2991 Gather = TE; 2992 return true; 2993 } 2994 return false; 2995 }) > 1) 2996 return false; 2997 if (Gather) 2998 GatherOps.push_back(Gather); 2999 } 3000 return true; 3001 }; 3002 // 1. Propagate order to the graph nodes, which use only reordered nodes. 3003 // I.e., if the node has operands, that are reordered, try to make at least 3004 // one operand order in the natural order and reorder others + reorder the 3005 // user node itself. 3006 SmallPtrSet<const TreeEntry *, 4> Visited; 3007 while (!OrderedEntries.empty()) { 3008 // 1. Filter out only reordered nodes. 3009 // 2. If the entry has multiple uses - skip it and jump to the next node. 3010 MapVector<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>> Users; 3011 SmallVector<TreeEntry *> Filtered; 3012 for (TreeEntry *TE : OrderedEntries) { 3013 if (!(TE->State == TreeEntry::Vectorize || 3014 (TE->State == TreeEntry::NeedToGather && 3015 GathersToOrders.count(TE))) || 3016 TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() || 3017 !all_of(drop_begin(TE->UserTreeIndices), 3018 [TE](const EdgeInfo &EI) { 3019 return EI.UserTE == TE->UserTreeIndices.front().UserTE; 3020 }) || 3021 !Visited.insert(TE).second) { 3022 Filtered.push_back(TE); 3023 continue; 3024 } 3025 // Build a map between user nodes and their operands order to speedup 3026 // search. The graph currently does not provide this dependency directly. 3027 for (EdgeInfo &EI : TE->UserTreeIndices) { 3028 TreeEntry *UserTE = EI.UserTE; 3029 auto It = Users.find(UserTE); 3030 if (It == Users.end()) 3031 It = Users.insert({UserTE, {}}).first; 3032 It->second.emplace_back(EI.EdgeIdx, TE); 3033 } 3034 } 3035 // Erase filtered entries. 3036 for_each(Filtered, 3037 [&OrderedEntries](TreeEntry *TE) { OrderedEntries.remove(TE); }); 3038 for (const auto &Data : Users) { 3039 // Check that operands are used only in the User node. 3040 SmallVector<TreeEntry *> GatherOps; 3041 if (!CheckOperands(Data, GatherOps)) { 3042 for_each(Data.second, 3043 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { 3044 OrderedEntries.remove(Op.second); 3045 }); 3046 continue; 3047 } 3048 // All operands are reordered and used only in this node - propagate the 3049 // most used order to the user node. 3050 MapVector<OrdersType, unsigned, 3051 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>> 3052 OrdersUses; 3053 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 3054 for (const auto &Op : Data.second) { 3055 TreeEntry *OpTE = Op.second; 3056 if (!OpTE->ReuseShuffleIndices.empty() || 3057 (IgnoreReorder && OpTE == VectorizableTree.front().get())) 3058 continue; 3059 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { 3060 if (OpTE->State == TreeEntry::NeedToGather) 3061 return GathersToOrders.find(OpTE)->second; 3062 return OpTE->ReorderIndices; 3063 }(); 3064 // Stores actually store the mask, not the order, need to invert. 3065 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 3066 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 3067 SmallVector<int> Mask; 3068 inversePermutation(Order, Mask); 3069 unsigned E = Order.size(); 3070 OrdersType CurrentOrder(E, E); 3071 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 3072 return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx); 3073 }); 3074 fixupOrderingIndices(CurrentOrder); 3075 ++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second; 3076 } else { 3077 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second; 3078 } 3079 if (VisitedOps.insert(OpTE).second) 3080 OrdersUses.insert(std::make_pair(OrdersType(), 0)).first->second += 3081 OpTE->UserTreeIndices.size(); 3082 assert(OrdersUses[{}] > 0 && "Counter cannot be less than 0."); 3083 --OrdersUses[{}]; 3084 } 3085 // If no orders - skip current nodes and jump to the next one, if any. 3086 if (OrdersUses.empty()) { 3087 for_each(Data.second, 3088 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { 3089 OrderedEntries.remove(Op.second); 3090 }); 3091 continue; 3092 } 3093 // Choose the best order. 3094 ArrayRef<unsigned> BestOrder = OrdersUses.front().first; 3095 unsigned Cnt = OrdersUses.front().second; 3096 for (const auto &Pair : drop_begin(OrdersUses)) { 3097 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 3098 BestOrder = Pair.first; 3099 Cnt = Pair.second; 3100 } 3101 } 3102 // Set order of the user node (reordering of operands and user nodes). 3103 if (BestOrder.empty()) { 3104 for_each(Data.second, 3105 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { 3106 OrderedEntries.remove(Op.second); 3107 }); 3108 continue; 3109 } 3110 // Erase operands from OrderedEntries list and adjust their orders. 3111 VisitedOps.clear(); 3112 SmallVector<int> Mask; 3113 inversePermutation(BestOrder, Mask); 3114 SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem); 3115 unsigned E = BestOrder.size(); 3116 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 3117 return I < E ? static_cast<int>(I) : UndefMaskElem; 3118 }); 3119 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) { 3120 TreeEntry *TE = Op.second; 3121 OrderedEntries.remove(TE); 3122 if (!VisitedOps.insert(TE).second) 3123 continue; 3124 if (!TE->ReuseShuffleIndices.empty() && TE->ReorderIndices.empty()) { 3125 // Just reorder reuses indices. 3126 reorderReuses(TE->ReuseShuffleIndices, Mask); 3127 continue; 3128 } 3129 // Gathers are processed separately. 3130 if (TE->State != TreeEntry::Vectorize) 3131 continue; 3132 assert((BestOrder.size() == TE->ReorderIndices.size() || 3133 TE->ReorderIndices.empty()) && 3134 "Non-matching sizes of user/operand entries."); 3135 reorderOrder(TE->ReorderIndices, Mask); 3136 } 3137 // For gathers just need to reorder its scalars. 3138 for (TreeEntry *Gather : GatherOps) { 3139 assert(Gather->ReorderIndices.empty() && 3140 "Unexpected reordering of gathers."); 3141 if (!Gather->ReuseShuffleIndices.empty()) { 3142 // Just reorder reuses indices. 3143 reorderReuses(Gather->ReuseShuffleIndices, Mask); 3144 continue; 3145 } 3146 reorderScalars(Gather->Scalars, Mask); 3147 OrderedEntries.remove(Gather); 3148 } 3149 // Reorder operands of the user node and set the ordering for the user 3150 // node itself. 3151 if (Data.first->State != TreeEntry::Vectorize || 3152 !isa<ExtractElementInst, ExtractValueInst, LoadInst>( 3153 Data.first->getMainOp()) || 3154 Data.first->isAltShuffle()) 3155 Data.first->reorderOperands(Mask); 3156 if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) || 3157 Data.first->isAltShuffle()) { 3158 reorderScalars(Data.first->Scalars, Mask); 3159 reorderOrder(Data.first->ReorderIndices, MaskOrder); 3160 if (Data.first->ReuseShuffleIndices.empty() && 3161 !Data.first->ReorderIndices.empty() && 3162 !Data.first->isAltShuffle()) { 3163 // Insert user node to the list to try to sink reordering deeper in 3164 // the graph. 3165 OrderedEntries.insert(Data.first); 3166 } 3167 } else { 3168 reorderOrder(Data.first->ReorderIndices, Mask); 3169 } 3170 } 3171 } 3172 // If the reordering is unnecessary, just remove the reorder. 3173 if (IgnoreReorder && !VectorizableTree.front()->ReorderIndices.empty() && 3174 VectorizableTree.front()->ReuseShuffleIndices.empty()) 3175 VectorizableTree.front()->ReorderIndices.clear(); 3176 } 3177 3178 void BoUpSLP::buildExternalUses( 3179 const ExtraValueToDebugLocsMap &ExternallyUsedValues) { 3180 // Collect the values that we need to extract from the tree. 3181 for (auto &TEPtr : VectorizableTree) { 3182 TreeEntry *Entry = TEPtr.get(); 3183 3184 // No need to handle users of gathered values. 3185 if (Entry->State == TreeEntry::NeedToGather) 3186 continue; 3187 3188 // For each lane: 3189 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 3190 Value *Scalar = Entry->Scalars[Lane]; 3191 int FoundLane = Entry->findLaneForValue(Scalar); 3192 3193 // Check if the scalar is externally used as an extra arg. 3194 auto ExtI = ExternallyUsedValues.find(Scalar); 3195 if (ExtI != ExternallyUsedValues.end()) { 3196 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " 3197 << Lane << " from " << *Scalar << ".\n"); 3198 ExternalUses.emplace_back(Scalar, nullptr, FoundLane); 3199 } 3200 for (User *U : Scalar->users()) { 3201 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 3202 3203 Instruction *UserInst = dyn_cast<Instruction>(U); 3204 if (!UserInst) 3205 continue; 3206 3207 if (isDeleted(UserInst)) 3208 continue; 3209 3210 // Skip in-tree scalars that become vectors 3211 if (TreeEntry *UseEntry = getTreeEntry(U)) { 3212 Value *UseScalar = UseEntry->Scalars[0]; 3213 // Some in-tree scalars will remain as scalar in vectorized 3214 // instructions. If that is the case, the one in Lane 0 will 3215 // be used. 3216 if (UseScalar != U || 3217 UseEntry->State == TreeEntry::ScatterVectorize || 3218 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 3219 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 3220 << ".\n"); 3221 assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state"); 3222 continue; 3223 } 3224 } 3225 3226 // Ignore users in the user ignore list. 3227 if (is_contained(UserIgnoreList, UserInst)) 3228 continue; 3229 3230 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " 3231 << Lane << " from " << *Scalar << ".\n"); 3232 ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane)); 3233 } 3234 } 3235 } 3236 } 3237 3238 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 3239 ArrayRef<Value *> UserIgnoreLst) { 3240 deleteTree(); 3241 UserIgnoreList = UserIgnoreLst; 3242 if (!allSameType(Roots)) 3243 return; 3244 buildTree_rec(Roots, 0, EdgeInfo()); 3245 } 3246 3247 namespace { 3248 /// Tracks the state we can represent the loads in the given sequence. 3249 enum class LoadsState { Gather, Vectorize, ScatterVectorize }; 3250 } // anonymous namespace 3251 3252 /// Checks if the given array of loads can be represented as a vectorized, 3253 /// scatter or just simple gather. 3254 static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0, 3255 const TargetTransformInfo &TTI, 3256 const DataLayout &DL, ScalarEvolution &SE, 3257 SmallVectorImpl<unsigned> &Order, 3258 SmallVectorImpl<Value *> &PointerOps) { 3259 // Check that a vectorized load would load the same memory as a scalar 3260 // load. For example, we don't want to vectorize loads that are smaller 3261 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 3262 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 3263 // from such a struct, we read/write packed bits disagreeing with the 3264 // unvectorized version. 3265 Type *ScalarTy = VL0->getType(); 3266 3267 if (DL.getTypeSizeInBits(ScalarTy) != DL.getTypeAllocSizeInBits(ScalarTy)) 3268 return LoadsState::Gather; 3269 3270 // Make sure all loads in the bundle are simple - we can't vectorize 3271 // atomic or volatile loads. 3272 PointerOps.clear(); 3273 PointerOps.resize(VL.size()); 3274 auto *POIter = PointerOps.begin(); 3275 for (Value *V : VL) { 3276 auto *L = cast<LoadInst>(V); 3277 if (!L->isSimple()) 3278 return LoadsState::Gather; 3279 *POIter = L->getPointerOperand(); 3280 ++POIter; 3281 } 3282 3283 Order.clear(); 3284 // Check the order of pointer operands. 3285 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, DL, SE, Order)) { 3286 Value *Ptr0; 3287 Value *PtrN; 3288 if (Order.empty()) { 3289 Ptr0 = PointerOps.front(); 3290 PtrN = PointerOps.back(); 3291 } else { 3292 Ptr0 = PointerOps[Order.front()]; 3293 PtrN = PointerOps[Order.back()]; 3294 } 3295 Optional<int> Diff = 3296 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE); 3297 // Check that the sorted loads are consecutive. 3298 if (static_cast<unsigned>(*Diff) == VL.size() - 1) 3299 return LoadsState::Vectorize; 3300 Align CommonAlignment = cast<LoadInst>(VL0)->getAlign(); 3301 for (Value *V : VL) 3302 CommonAlignment = 3303 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 3304 if (TTI.isLegalMaskedGather(FixedVectorType::get(ScalarTy, VL.size()), 3305 CommonAlignment)) 3306 return LoadsState::ScatterVectorize; 3307 } 3308 3309 return LoadsState::Gather; 3310 } 3311 3312 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 3313 const EdgeInfo &UserTreeIdx) { 3314 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 3315 3316 SmallVector<int> ReuseShuffleIndicies; 3317 SmallVector<Value *> UniqueValues; 3318 auto &&TryToFindDuplicates = [&VL, &ReuseShuffleIndicies, &UniqueValues, 3319 &UserTreeIdx, 3320 this](const InstructionsState &S) { 3321 // Check that every instruction appears once in this bundle. 3322 DenseMap<Value *, unsigned> UniquePositions; 3323 for (Value *V : VL) { 3324 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 3325 ReuseShuffleIndicies.emplace_back(isa<UndefValue>(V) ? -1 3326 : Res.first->second); 3327 if (Res.second) 3328 UniqueValues.emplace_back(V); 3329 } 3330 size_t NumUniqueScalarValues = UniqueValues.size(); 3331 if (NumUniqueScalarValues == VL.size()) { 3332 ReuseShuffleIndicies.clear(); 3333 } else { 3334 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n"); 3335 if (NumUniqueScalarValues <= 1 || 3336 !llvm::isPowerOf2_32(NumUniqueScalarValues)) { 3337 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 3338 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3339 return false; 3340 } 3341 VL = UniqueValues; 3342 } 3343 return true; 3344 }; 3345 3346 InstructionsState S = getSameOpcode(VL); 3347 if (Depth == RecursionMaxDepth) { 3348 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 3349 if (TryToFindDuplicates(S)) 3350 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3351 ReuseShuffleIndicies); 3352 return; 3353 } 3354 3355 // Don't handle scalable vectors 3356 if (S.getOpcode() == Instruction::ExtractElement && 3357 isa<ScalableVectorType>( 3358 cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) { 3359 LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n"); 3360 if (TryToFindDuplicates(S)) 3361 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3362 ReuseShuffleIndicies); 3363 return; 3364 } 3365 3366 // Don't handle vectors. 3367 if (S.OpValue->getType()->isVectorTy() && 3368 !isa<InsertElementInst>(S.OpValue)) { 3369 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 3370 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3371 return; 3372 } 3373 3374 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 3375 if (SI->getValueOperand()->getType()->isVectorTy()) { 3376 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 3377 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3378 return; 3379 } 3380 3381 // If all of the operands are identical or constant we have a simple solution. 3382 // If we deal with insert/extract instructions, they all must have constant 3383 // indices, otherwise we should gather them, not try to vectorize. 3384 if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.getOpcode() || 3385 (isa<InsertElementInst, ExtractValueInst, ExtractElementInst>(S.MainOp) && 3386 !all_of(VL, isVectorLikeInstWithConstOps))) { 3387 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 3388 if (TryToFindDuplicates(S)) 3389 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3390 ReuseShuffleIndicies); 3391 return; 3392 } 3393 3394 // We now know that this is a vector of instructions of the same type from 3395 // the same block. 3396 3397 // Don't vectorize ephemeral values. 3398 for (Value *V : VL) { 3399 if (EphValues.count(V)) { 3400 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 3401 << ") is ephemeral.\n"); 3402 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3403 return; 3404 } 3405 } 3406 3407 // Check if this is a duplicate of another entry. 3408 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 3409 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n"); 3410 if (!E->isSame(VL)) { 3411 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 3412 if (TryToFindDuplicates(S)) 3413 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3414 ReuseShuffleIndicies); 3415 return; 3416 } 3417 // Record the reuse of the tree node. FIXME, currently this is only used to 3418 // properly draw the graph rather than for the actual vectorization. 3419 E->UserTreeIndices.push_back(UserTreeIdx); 3420 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue 3421 << ".\n"); 3422 return; 3423 } 3424 3425 // Check that none of the instructions in the bundle are already in the tree. 3426 for (Value *V : VL) { 3427 auto *I = dyn_cast<Instruction>(V); 3428 if (!I) 3429 continue; 3430 if (getTreeEntry(I)) { 3431 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 3432 << ") is already in tree.\n"); 3433 if (TryToFindDuplicates(S)) 3434 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3435 ReuseShuffleIndicies); 3436 return; 3437 } 3438 } 3439 3440 // If any of the scalars is marked as a value that needs to stay scalar, then 3441 // we need to gather the scalars. 3442 // The reduction nodes (stored in UserIgnoreList) also should stay scalar. 3443 for (Value *V : VL) { 3444 if (MustGather.count(V) || is_contained(UserIgnoreList, V)) { 3445 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 3446 if (TryToFindDuplicates(S)) 3447 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3448 ReuseShuffleIndicies); 3449 return; 3450 } 3451 } 3452 3453 // Check that all of the users of the scalars that we want to vectorize are 3454 // schedulable. 3455 auto *VL0 = cast<Instruction>(S.OpValue); 3456 BasicBlock *BB = VL0->getParent(); 3457 3458 if (!DT->isReachableFromEntry(BB)) { 3459 // Don't go into unreachable blocks. They may contain instructions with 3460 // dependency cycles which confuse the final scheduling. 3461 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 3462 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3463 return; 3464 } 3465 3466 // Check that every instruction appears once in this bundle. 3467 if (!TryToFindDuplicates(S)) 3468 return; 3469 3470 auto &BSRef = BlocksSchedules[BB]; 3471 if (!BSRef) 3472 BSRef = std::make_unique<BlockScheduling>(BB); 3473 3474 BlockScheduling &BS = *BSRef.get(); 3475 3476 Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S); 3477 if (!Bundle) { 3478 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 3479 assert((!BS.getScheduleData(VL0) || 3480 !BS.getScheduleData(VL0)->isPartOfBundle()) && 3481 "tryScheduleBundle should cancelScheduling on failure"); 3482 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3483 ReuseShuffleIndicies); 3484 return; 3485 } 3486 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 3487 3488 unsigned ShuffleOrOp = S.isAltShuffle() ? 3489 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 3490 switch (ShuffleOrOp) { 3491 case Instruction::PHI: { 3492 auto *PH = cast<PHINode>(VL0); 3493 3494 // Check for terminator values (e.g. invoke). 3495 for (Value *V : VL) 3496 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { 3497 Instruction *Term = dyn_cast<Instruction>( 3498 cast<PHINode>(V)->getIncomingValueForBlock( 3499 PH->getIncomingBlock(I))); 3500 if (Term && Term->isTerminator()) { 3501 LLVM_DEBUG(dbgs() 3502 << "SLP: Need to swizzle PHINodes (terminator use).\n"); 3503 BS.cancelScheduling(VL, VL0); 3504 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3505 ReuseShuffleIndicies); 3506 return; 3507 } 3508 } 3509 3510 TreeEntry *TE = 3511 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies); 3512 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 3513 3514 // Keeps the reordered operands to avoid code duplication. 3515 SmallVector<ValueList, 2> OperandsVec; 3516 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { 3517 if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) { 3518 ValueList Operands(VL.size(), PoisonValue::get(PH->getType())); 3519 TE->setOperand(I, Operands); 3520 OperandsVec.push_back(Operands); 3521 continue; 3522 } 3523 ValueList Operands; 3524 // Prepare the operand vector. 3525 for (Value *V : VL) 3526 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock( 3527 PH->getIncomingBlock(I))); 3528 TE->setOperand(I, Operands); 3529 OperandsVec.push_back(Operands); 3530 } 3531 for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx) 3532 buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx}); 3533 return; 3534 } 3535 case Instruction::ExtractValue: 3536 case Instruction::ExtractElement: { 3537 OrdersType CurrentOrder; 3538 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder); 3539 if (Reuse) { 3540 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n"); 3541 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3542 ReuseShuffleIndicies); 3543 // This is a special case, as it does not gather, but at the same time 3544 // we are not extending buildTree_rec() towards the operands. 3545 ValueList Op0; 3546 Op0.assign(VL.size(), VL0->getOperand(0)); 3547 VectorizableTree.back()->setOperand(0, Op0); 3548 return; 3549 } 3550 if (!CurrentOrder.empty()) { 3551 LLVM_DEBUG({ 3552 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " 3553 "with order"; 3554 for (unsigned Idx : CurrentOrder) 3555 dbgs() << " " << Idx; 3556 dbgs() << "\n"; 3557 }); 3558 fixupOrderingIndices(CurrentOrder); 3559 // Insert new order with initial value 0, if it does not exist, 3560 // otherwise return the iterator to the existing one. 3561 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3562 ReuseShuffleIndicies, CurrentOrder); 3563 // This is a special case, as it does not gather, but at the same time 3564 // we are not extending buildTree_rec() towards the operands. 3565 ValueList Op0; 3566 Op0.assign(VL.size(), VL0->getOperand(0)); 3567 VectorizableTree.back()->setOperand(0, Op0); 3568 return; 3569 } 3570 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n"); 3571 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3572 ReuseShuffleIndicies); 3573 BS.cancelScheduling(VL, VL0); 3574 return; 3575 } 3576 case Instruction::InsertElement: { 3577 assert(ReuseShuffleIndicies.empty() && "All inserts should be unique"); 3578 3579 // Check that we have a buildvector and not a shuffle of 2 or more 3580 // different vectors. 3581 ValueSet SourceVectors; 3582 int MinIdx = std::numeric_limits<int>::max(); 3583 for (Value *V : VL) { 3584 SourceVectors.insert(cast<Instruction>(V)->getOperand(0)); 3585 Optional<int> Idx = *getInsertIndex(V, 0); 3586 if (!Idx || *Idx == UndefMaskElem) 3587 continue; 3588 MinIdx = std::min(MinIdx, *Idx); 3589 } 3590 3591 if (count_if(VL, [&SourceVectors](Value *V) { 3592 return !SourceVectors.contains(V); 3593 }) >= 2) { 3594 // Found 2nd source vector - cancel. 3595 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with " 3596 "different source vectors.\n"); 3597 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3598 BS.cancelScheduling(VL, VL0); 3599 return; 3600 } 3601 3602 auto OrdCompare = [](const std::pair<int, int> &P1, 3603 const std::pair<int, int> &P2) { 3604 return P1.first > P2.first; 3605 }; 3606 PriorityQueue<std::pair<int, int>, SmallVector<std::pair<int, int>>, 3607 decltype(OrdCompare)> 3608 Indices(OrdCompare); 3609 for (int I = 0, E = VL.size(); I < E; ++I) { 3610 Optional<int> Idx = *getInsertIndex(VL[I], 0); 3611 if (!Idx || *Idx == UndefMaskElem) 3612 continue; 3613 Indices.emplace(*Idx, I); 3614 } 3615 OrdersType CurrentOrder(VL.size(), VL.size()); 3616 bool IsIdentity = true; 3617 for (int I = 0, E = VL.size(); I < E; ++I) { 3618 CurrentOrder[Indices.top().second] = I; 3619 IsIdentity &= Indices.top().second == I; 3620 Indices.pop(); 3621 } 3622 if (IsIdentity) 3623 CurrentOrder.clear(); 3624 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3625 None, CurrentOrder); 3626 LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n"); 3627 3628 constexpr int NumOps = 2; 3629 ValueList VectorOperands[NumOps]; 3630 for (int I = 0; I < NumOps; ++I) { 3631 for (Value *V : VL) 3632 VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I)); 3633 3634 TE->setOperand(I, VectorOperands[I]); 3635 } 3636 buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, NumOps - 1}); 3637 return; 3638 } 3639 case Instruction::Load: { 3640 // Check that a vectorized load would load the same memory as a scalar 3641 // load. For example, we don't want to vectorize loads that are smaller 3642 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 3643 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 3644 // from such a struct, we read/write packed bits disagreeing with the 3645 // unvectorized version. 3646 SmallVector<Value *> PointerOps; 3647 OrdersType CurrentOrder; 3648 TreeEntry *TE = nullptr; 3649 switch (canVectorizeLoads(VL, VL0, *TTI, *DL, *SE, CurrentOrder, 3650 PointerOps)) { 3651 case LoadsState::Vectorize: 3652 if (CurrentOrder.empty()) { 3653 // Original loads are consecutive and does not require reordering. 3654 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3655 ReuseShuffleIndicies); 3656 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 3657 } else { 3658 fixupOrderingIndices(CurrentOrder); 3659 // Need to reorder. 3660 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3661 ReuseShuffleIndicies, CurrentOrder); 3662 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n"); 3663 } 3664 TE->setOperandsInOrder(); 3665 break; 3666 case LoadsState::ScatterVectorize: 3667 // Vectorizing non-consecutive loads with `llvm.masked.gather`. 3668 TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S, 3669 UserTreeIdx, ReuseShuffleIndicies); 3670 TE->setOperandsInOrder(); 3671 buildTree_rec(PointerOps, Depth + 1, {TE, 0}); 3672 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n"); 3673 break; 3674 case LoadsState::Gather: 3675 BS.cancelScheduling(VL, VL0); 3676 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3677 ReuseShuffleIndicies); 3678 #ifndef NDEBUG 3679 Type *ScalarTy = VL0->getType(); 3680 if (DL->getTypeSizeInBits(ScalarTy) != 3681 DL->getTypeAllocSizeInBits(ScalarTy)) 3682 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 3683 else if (any_of(VL, [](Value *V) { 3684 return !cast<LoadInst>(V)->isSimple(); 3685 })) 3686 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 3687 else 3688 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 3689 #endif // NDEBUG 3690 break; 3691 } 3692 return; 3693 } 3694 case Instruction::ZExt: 3695 case Instruction::SExt: 3696 case Instruction::FPToUI: 3697 case Instruction::FPToSI: 3698 case Instruction::FPExt: 3699 case Instruction::PtrToInt: 3700 case Instruction::IntToPtr: 3701 case Instruction::SIToFP: 3702 case Instruction::UIToFP: 3703 case Instruction::Trunc: 3704 case Instruction::FPTrunc: 3705 case Instruction::BitCast: { 3706 Type *SrcTy = VL0->getOperand(0)->getType(); 3707 for (Value *V : VL) { 3708 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType(); 3709 if (Ty != SrcTy || !isValidElementType(Ty)) { 3710 BS.cancelScheduling(VL, VL0); 3711 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3712 ReuseShuffleIndicies); 3713 LLVM_DEBUG(dbgs() 3714 << "SLP: Gathering casts with different src types.\n"); 3715 return; 3716 } 3717 } 3718 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3719 ReuseShuffleIndicies); 3720 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 3721 3722 TE->setOperandsInOrder(); 3723 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 3724 ValueList Operands; 3725 // Prepare the operand vector. 3726 for (Value *V : VL) 3727 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3728 3729 buildTree_rec(Operands, Depth + 1, {TE, i}); 3730 } 3731 return; 3732 } 3733 case Instruction::ICmp: 3734 case Instruction::FCmp: { 3735 // Check that all of the compares have the same predicate. 3736 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 3737 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0); 3738 Type *ComparedTy = VL0->getOperand(0)->getType(); 3739 for (Value *V : VL) { 3740 CmpInst *Cmp = cast<CmpInst>(V); 3741 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) || 3742 Cmp->getOperand(0)->getType() != ComparedTy) { 3743 BS.cancelScheduling(VL, VL0); 3744 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3745 ReuseShuffleIndicies); 3746 LLVM_DEBUG(dbgs() 3747 << "SLP: Gathering cmp with different predicate.\n"); 3748 return; 3749 } 3750 } 3751 3752 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3753 ReuseShuffleIndicies); 3754 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 3755 3756 ValueList Left, Right; 3757 if (cast<CmpInst>(VL0)->isCommutative()) { 3758 // Commutative predicate - collect + sort operands of the instructions 3759 // so that each side is more likely to have the same opcode. 3760 assert(P0 == SwapP0 && "Commutative Predicate mismatch"); 3761 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 3762 } else { 3763 // Collect operands - commute if it uses the swapped predicate. 3764 for (Value *V : VL) { 3765 auto *Cmp = cast<CmpInst>(V); 3766 Value *LHS = Cmp->getOperand(0); 3767 Value *RHS = Cmp->getOperand(1); 3768 if (Cmp->getPredicate() != P0) 3769 std::swap(LHS, RHS); 3770 Left.push_back(LHS); 3771 Right.push_back(RHS); 3772 } 3773 } 3774 TE->setOperand(0, Left); 3775 TE->setOperand(1, Right); 3776 buildTree_rec(Left, Depth + 1, {TE, 0}); 3777 buildTree_rec(Right, Depth + 1, {TE, 1}); 3778 return; 3779 } 3780 case Instruction::Select: 3781 case Instruction::FNeg: 3782 case Instruction::Add: 3783 case Instruction::FAdd: 3784 case Instruction::Sub: 3785 case Instruction::FSub: 3786 case Instruction::Mul: 3787 case Instruction::FMul: 3788 case Instruction::UDiv: 3789 case Instruction::SDiv: 3790 case Instruction::FDiv: 3791 case Instruction::URem: 3792 case Instruction::SRem: 3793 case Instruction::FRem: 3794 case Instruction::Shl: 3795 case Instruction::LShr: 3796 case Instruction::AShr: 3797 case Instruction::And: 3798 case Instruction::Or: 3799 case Instruction::Xor: { 3800 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3801 ReuseShuffleIndicies); 3802 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n"); 3803 3804 // Sort operands of the instructions so that each side is more likely to 3805 // have the same opcode. 3806 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 3807 ValueList Left, Right; 3808 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 3809 TE->setOperand(0, Left); 3810 TE->setOperand(1, Right); 3811 buildTree_rec(Left, Depth + 1, {TE, 0}); 3812 buildTree_rec(Right, Depth + 1, {TE, 1}); 3813 return; 3814 } 3815 3816 TE->setOperandsInOrder(); 3817 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 3818 ValueList Operands; 3819 // Prepare the operand vector. 3820 for (Value *V : VL) 3821 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3822 3823 buildTree_rec(Operands, Depth + 1, {TE, i}); 3824 } 3825 return; 3826 } 3827 case Instruction::GetElementPtr: { 3828 // We don't combine GEPs with complicated (nested) indexing. 3829 for (Value *V : VL) { 3830 if (cast<Instruction>(V)->getNumOperands() != 2) { 3831 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 3832 BS.cancelScheduling(VL, VL0); 3833 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3834 ReuseShuffleIndicies); 3835 return; 3836 } 3837 } 3838 3839 // We can't combine several GEPs into one vector if they operate on 3840 // different types. 3841 Type *Ty0 = VL0->getOperand(0)->getType(); 3842 for (Value *V : VL) { 3843 Type *CurTy = cast<Instruction>(V)->getOperand(0)->getType(); 3844 if (Ty0 != CurTy) { 3845 LLVM_DEBUG(dbgs() 3846 << "SLP: not-vectorizable GEP (different types).\n"); 3847 BS.cancelScheduling(VL, VL0); 3848 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3849 ReuseShuffleIndicies); 3850 return; 3851 } 3852 } 3853 3854 // We don't combine GEPs with non-constant indexes. 3855 Type *Ty1 = VL0->getOperand(1)->getType(); 3856 for (Value *V : VL) { 3857 auto Op = cast<Instruction>(V)->getOperand(1); 3858 if (!isa<ConstantInt>(Op) || 3859 (Op->getType() != Ty1 && 3860 Op->getType()->getScalarSizeInBits() > 3861 DL->getIndexSizeInBits( 3862 V->getType()->getPointerAddressSpace()))) { 3863 LLVM_DEBUG(dbgs() 3864 << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 3865 BS.cancelScheduling(VL, VL0); 3866 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3867 ReuseShuffleIndicies); 3868 return; 3869 } 3870 } 3871 3872 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3873 ReuseShuffleIndicies); 3874 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 3875 TE->setOperandsInOrder(); 3876 for (unsigned i = 0, e = 2; i < e; ++i) { 3877 ValueList Operands; 3878 // Prepare the operand vector. 3879 for (Value *V : VL) 3880 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3881 3882 buildTree_rec(Operands, Depth + 1, {TE, i}); 3883 } 3884 return; 3885 } 3886 case Instruction::Store: { 3887 // Check if the stores are consecutive or if we need to swizzle them. 3888 llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType(); 3889 // Avoid types that are padded when being allocated as scalars, while 3890 // being packed together in a vector (such as i1). 3891 if (DL->getTypeSizeInBits(ScalarTy) != 3892 DL->getTypeAllocSizeInBits(ScalarTy)) { 3893 BS.cancelScheduling(VL, VL0); 3894 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3895 ReuseShuffleIndicies); 3896 LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n"); 3897 return; 3898 } 3899 // Make sure all stores in the bundle are simple - we can't vectorize 3900 // atomic or volatile stores. 3901 SmallVector<Value *, 4> PointerOps(VL.size()); 3902 ValueList Operands(VL.size()); 3903 auto POIter = PointerOps.begin(); 3904 auto OIter = Operands.begin(); 3905 for (Value *V : VL) { 3906 auto *SI = cast<StoreInst>(V); 3907 if (!SI->isSimple()) { 3908 BS.cancelScheduling(VL, VL0); 3909 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3910 ReuseShuffleIndicies); 3911 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n"); 3912 return; 3913 } 3914 *POIter = SI->getPointerOperand(); 3915 *OIter = SI->getValueOperand(); 3916 ++POIter; 3917 ++OIter; 3918 } 3919 3920 OrdersType CurrentOrder; 3921 // Check the order of pointer operands. 3922 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) { 3923 Value *Ptr0; 3924 Value *PtrN; 3925 if (CurrentOrder.empty()) { 3926 Ptr0 = PointerOps.front(); 3927 PtrN = PointerOps.back(); 3928 } else { 3929 Ptr0 = PointerOps[CurrentOrder.front()]; 3930 PtrN = PointerOps[CurrentOrder.back()]; 3931 } 3932 Optional<int> Dist = 3933 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE); 3934 // Check that the sorted pointer operands are consecutive. 3935 if (static_cast<unsigned>(*Dist) == VL.size() - 1) { 3936 if (CurrentOrder.empty()) { 3937 // Original stores are consecutive and does not require reordering. 3938 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, 3939 UserTreeIdx, ReuseShuffleIndicies); 3940 TE->setOperandsInOrder(); 3941 buildTree_rec(Operands, Depth + 1, {TE, 0}); 3942 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 3943 } else { 3944 fixupOrderingIndices(CurrentOrder); 3945 TreeEntry *TE = 3946 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3947 ReuseShuffleIndicies, CurrentOrder); 3948 TE->setOperandsInOrder(); 3949 buildTree_rec(Operands, Depth + 1, {TE, 0}); 3950 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n"); 3951 } 3952 return; 3953 } 3954 } 3955 3956 BS.cancelScheduling(VL, VL0); 3957 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3958 ReuseShuffleIndicies); 3959 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 3960 return; 3961 } 3962 case Instruction::Call: { 3963 // Check if the calls are all to the same vectorizable intrinsic or 3964 // library function. 3965 CallInst *CI = cast<CallInst>(VL0); 3966 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3967 3968 VFShape Shape = VFShape::get( 3969 *CI, ElementCount::getFixed(static_cast<unsigned int>(VL.size())), 3970 false /*HasGlobalPred*/); 3971 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3972 3973 if (!VecFunc && !isTriviallyVectorizable(ID)) { 3974 BS.cancelScheduling(VL, VL0); 3975 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3976 ReuseShuffleIndicies); 3977 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 3978 return; 3979 } 3980 Function *F = CI->getCalledFunction(); 3981 unsigned NumArgs = CI->arg_size(); 3982 SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr); 3983 for (unsigned j = 0; j != NumArgs; ++j) 3984 if (hasVectorInstrinsicScalarOpd(ID, j)) 3985 ScalarArgs[j] = CI->getArgOperand(j); 3986 for (Value *V : VL) { 3987 CallInst *CI2 = dyn_cast<CallInst>(V); 3988 if (!CI2 || CI2->getCalledFunction() != F || 3989 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 3990 (VecFunc && 3991 VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) || 3992 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 3993 BS.cancelScheduling(VL, VL0); 3994 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3995 ReuseShuffleIndicies); 3996 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V 3997 << "\n"); 3998 return; 3999 } 4000 // Some intrinsics have scalar arguments and should be same in order for 4001 // them to be vectorized. 4002 for (unsigned j = 0; j != NumArgs; ++j) { 4003 if (hasVectorInstrinsicScalarOpd(ID, j)) { 4004 Value *A1J = CI2->getArgOperand(j); 4005 if (ScalarArgs[j] != A1J) { 4006 BS.cancelScheduling(VL, VL0); 4007 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4008 ReuseShuffleIndicies); 4009 LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 4010 << " argument " << ScalarArgs[j] << "!=" << A1J 4011 << "\n"); 4012 return; 4013 } 4014 } 4015 } 4016 // Verify that the bundle operands are identical between the two calls. 4017 if (CI->hasOperandBundles() && 4018 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 4019 CI->op_begin() + CI->getBundleOperandsEndIndex(), 4020 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 4021 BS.cancelScheduling(VL, VL0); 4022 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4023 ReuseShuffleIndicies); 4024 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" 4025 << *CI << "!=" << *V << '\n'); 4026 return; 4027 } 4028 } 4029 4030 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 4031 ReuseShuffleIndicies); 4032 TE->setOperandsInOrder(); 4033 for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) { 4034 // For scalar operands no need to to create an entry since no need to 4035 // vectorize it. 4036 if (hasVectorInstrinsicScalarOpd(ID, i)) 4037 continue; 4038 ValueList Operands; 4039 // Prepare the operand vector. 4040 for (Value *V : VL) { 4041 auto *CI2 = cast<CallInst>(V); 4042 Operands.push_back(CI2->getArgOperand(i)); 4043 } 4044 buildTree_rec(Operands, Depth + 1, {TE, i}); 4045 } 4046 return; 4047 } 4048 case Instruction::ShuffleVector: { 4049 // If this is not an alternate sequence of opcode like add-sub 4050 // then do not vectorize this instruction. 4051 if (!S.isAltShuffle()) { 4052 BS.cancelScheduling(VL, VL0); 4053 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4054 ReuseShuffleIndicies); 4055 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 4056 return; 4057 } 4058 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 4059 ReuseShuffleIndicies); 4060 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 4061 4062 // Reorder operands if reordering would enable vectorization. 4063 if (isa<BinaryOperator>(VL0)) { 4064 ValueList Left, Right; 4065 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 4066 TE->setOperand(0, Left); 4067 TE->setOperand(1, Right); 4068 buildTree_rec(Left, Depth + 1, {TE, 0}); 4069 buildTree_rec(Right, Depth + 1, {TE, 1}); 4070 return; 4071 } 4072 4073 TE->setOperandsInOrder(); 4074 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 4075 ValueList Operands; 4076 // Prepare the operand vector. 4077 for (Value *V : VL) 4078 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 4079 4080 buildTree_rec(Operands, Depth + 1, {TE, i}); 4081 } 4082 return; 4083 } 4084 default: 4085 BS.cancelScheduling(VL, VL0); 4086 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4087 ReuseShuffleIndicies); 4088 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 4089 return; 4090 } 4091 } 4092 4093 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { 4094 unsigned N = 1; 4095 Type *EltTy = T; 4096 4097 while (isa<StructType>(EltTy) || isa<ArrayType>(EltTy) || 4098 isa<VectorType>(EltTy)) { 4099 if (auto *ST = dyn_cast<StructType>(EltTy)) { 4100 // Check that struct is homogeneous. 4101 for (const auto *Ty : ST->elements()) 4102 if (Ty != *ST->element_begin()) 4103 return 0; 4104 N *= ST->getNumElements(); 4105 EltTy = *ST->element_begin(); 4106 } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) { 4107 N *= AT->getNumElements(); 4108 EltTy = AT->getElementType(); 4109 } else { 4110 auto *VT = cast<FixedVectorType>(EltTy); 4111 N *= VT->getNumElements(); 4112 EltTy = VT->getElementType(); 4113 } 4114 } 4115 4116 if (!isValidElementType(EltTy)) 4117 return 0; 4118 uint64_t VTSize = DL.getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N)); 4119 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) 4120 return 0; 4121 return N; 4122 } 4123 4124 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 4125 SmallVectorImpl<unsigned> &CurrentOrder) const { 4126 Instruction *E0 = cast<Instruction>(OpValue); 4127 assert(E0->getOpcode() == Instruction::ExtractElement || 4128 E0->getOpcode() == Instruction::ExtractValue); 4129 assert(E0->getOpcode() == getSameOpcode(VL).getOpcode() && "Invalid opcode"); 4130 // Check if all of the extracts come from the same vector and from the 4131 // correct offset. 4132 Value *Vec = E0->getOperand(0); 4133 4134 CurrentOrder.clear(); 4135 4136 // We have to extract from a vector/aggregate with the same number of elements. 4137 unsigned NElts; 4138 if (E0->getOpcode() == Instruction::ExtractValue) { 4139 const DataLayout &DL = E0->getModule()->getDataLayout(); 4140 NElts = canMapToVector(Vec->getType(), DL); 4141 if (!NElts) 4142 return false; 4143 // Check if load can be rewritten as load of vector. 4144 LoadInst *LI = dyn_cast<LoadInst>(Vec); 4145 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 4146 return false; 4147 } else { 4148 NElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); 4149 } 4150 4151 if (NElts != VL.size()) 4152 return false; 4153 4154 // Check that all of the indices extract from the correct offset. 4155 bool ShouldKeepOrder = true; 4156 unsigned E = VL.size(); 4157 // Assign to all items the initial value E + 1 so we can check if the extract 4158 // instruction index was used already. 4159 // Also, later we can check that all the indices are used and we have a 4160 // consecutive access in the extract instructions, by checking that no 4161 // element of CurrentOrder still has value E + 1. 4162 CurrentOrder.assign(E, E + 1); 4163 unsigned I = 0; 4164 for (; I < E; ++I) { 4165 auto *Inst = cast<Instruction>(VL[I]); 4166 if (Inst->getOperand(0) != Vec) 4167 break; 4168 Optional<unsigned> Idx = getExtractIndex(Inst); 4169 if (!Idx) 4170 break; 4171 const unsigned ExtIdx = *Idx; 4172 if (ExtIdx != I) { 4173 if (ExtIdx >= E || CurrentOrder[ExtIdx] != E + 1) 4174 break; 4175 ShouldKeepOrder = false; 4176 CurrentOrder[ExtIdx] = I; 4177 } else { 4178 if (CurrentOrder[I] != E + 1) 4179 break; 4180 CurrentOrder[I] = I; 4181 } 4182 } 4183 if (I < E) { 4184 CurrentOrder.clear(); 4185 return false; 4186 } 4187 4188 return ShouldKeepOrder; 4189 } 4190 4191 bool BoUpSLP::areAllUsersVectorized(Instruction *I, 4192 ArrayRef<Value *> VectorizedVals) const { 4193 return (I->hasOneUse() && is_contained(VectorizedVals, I)) || 4194 llvm::all_of(I->users(), [this](User *U) { 4195 return ScalarToTreeEntry.count(U) > 0; 4196 }); 4197 } 4198 4199 static std::pair<InstructionCost, InstructionCost> 4200 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy, 4201 TargetTransformInfo *TTI, TargetLibraryInfo *TLI) { 4202 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4203 4204 // Calculate the cost of the scalar and vector calls. 4205 SmallVector<Type *, 4> VecTys; 4206 for (Use &Arg : CI->args()) 4207 VecTys.push_back( 4208 FixedVectorType::get(Arg->getType(), VecTy->getNumElements())); 4209 FastMathFlags FMF; 4210 if (auto *FPCI = dyn_cast<FPMathOperator>(CI)) 4211 FMF = FPCI->getFastMathFlags(); 4212 SmallVector<const Value *> Arguments(CI->args()); 4213 IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF, 4214 dyn_cast<IntrinsicInst>(CI)); 4215 auto IntrinsicCost = 4216 TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput); 4217 4218 auto Shape = VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>( 4219 VecTy->getNumElements())), 4220 false /*HasGlobalPred*/); 4221 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 4222 auto LibCost = IntrinsicCost; 4223 if (!CI->isNoBuiltin() && VecFunc) { 4224 // Calculate the cost of the vector library call. 4225 // If the corresponding vector call is cheaper, return its cost. 4226 LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys, 4227 TTI::TCK_RecipThroughput); 4228 } 4229 return {IntrinsicCost, LibCost}; 4230 } 4231 4232 /// Compute the cost of creating a vector of type \p VecTy containing the 4233 /// extracted values from \p VL. 4234 static InstructionCost 4235 computeExtractCost(ArrayRef<Value *> VL, FixedVectorType *VecTy, 4236 TargetTransformInfo::ShuffleKind ShuffleKind, 4237 ArrayRef<int> Mask, TargetTransformInfo &TTI) { 4238 unsigned NumOfParts = TTI.getNumberOfParts(VecTy); 4239 4240 if (ShuffleKind != TargetTransformInfo::SK_PermuteSingleSrc || !NumOfParts || 4241 VecTy->getNumElements() < NumOfParts) 4242 return TTI.getShuffleCost(ShuffleKind, VecTy, Mask); 4243 4244 bool AllConsecutive = true; 4245 unsigned EltsPerVector = VecTy->getNumElements() / NumOfParts; 4246 unsigned Idx = -1; 4247 InstructionCost Cost = 0; 4248 4249 // Process extracts in blocks of EltsPerVector to check if the source vector 4250 // operand can be re-used directly. If not, add the cost of creating a shuffle 4251 // to extract the values into a vector register. 4252 for (auto *V : VL) { 4253 ++Idx; 4254 4255 // Reached the start of a new vector registers. 4256 if (Idx % EltsPerVector == 0) { 4257 AllConsecutive = true; 4258 continue; 4259 } 4260 4261 // Check all extracts for a vector register on the target directly 4262 // extract values in order. 4263 unsigned CurrentIdx = *getExtractIndex(cast<Instruction>(V)); 4264 unsigned PrevIdx = *getExtractIndex(cast<Instruction>(VL[Idx - 1])); 4265 AllConsecutive &= PrevIdx + 1 == CurrentIdx && 4266 CurrentIdx % EltsPerVector == Idx % EltsPerVector; 4267 4268 if (AllConsecutive) 4269 continue; 4270 4271 // Skip all indices, except for the last index per vector block. 4272 if ((Idx + 1) % EltsPerVector != 0 && Idx + 1 != VL.size()) 4273 continue; 4274 4275 // If we have a series of extracts which are not consecutive and hence 4276 // cannot re-use the source vector register directly, compute the shuffle 4277 // cost to extract the a vector with EltsPerVector elements. 4278 Cost += TTI.getShuffleCost( 4279 TargetTransformInfo::SK_PermuteSingleSrc, 4280 FixedVectorType::get(VecTy->getElementType(), EltsPerVector)); 4281 } 4282 return Cost; 4283 } 4284 4285 /// Build shuffle mask for shuffle graph entries and lists of main and alternate 4286 /// operations operands. 4287 static void 4288 buildSuffleEntryMask(ArrayRef<Value *> VL, ArrayRef<unsigned> ReorderIndices, 4289 ArrayRef<int> ReusesIndices, 4290 const function_ref<bool(Instruction *)> IsAltOp, 4291 SmallVectorImpl<int> &Mask, 4292 SmallVectorImpl<Value *> *OpScalars = nullptr, 4293 SmallVectorImpl<Value *> *AltScalars = nullptr) { 4294 unsigned Sz = VL.size(); 4295 Mask.assign(Sz, UndefMaskElem); 4296 SmallVector<int> OrderMask; 4297 if (!ReorderIndices.empty()) 4298 inversePermutation(ReorderIndices, OrderMask); 4299 for (unsigned I = 0; I < Sz; ++I) { 4300 unsigned Idx = I; 4301 if (!ReorderIndices.empty()) 4302 Idx = OrderMask[I]; 4303 auto *OpInst = cast<Instruction>(VL[Idx]); 4304 if (IsAltOp(OpInst)) { 4305 Mask[I] = Sz + Idx; 4306 if (AltScalars) 4307 AltScalars->push_back(OpInst); 4308 } else { 4309 Mask[I] = Idx; 4310 if (OpScalars) 4311 OpScalars->push_back(OpInst); 4312 } 4313 } 4314 if (!ReusesIndices.empty()) { 4315 SmallVector<int> NewMask(ReusesIndices.size(), UndefMaskElem); 4316 transform(ReusesIndices, NewMask.begin(), [&Mask](int Idx) { 4317 return Idx != UndefMaskElem ? Mask[Idx] : UndefMaskElem; 4318 }); 4319 Mask.swap(NewMask); 4320 } 4321 } 4322 4323 InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E, 4324 ArrayRef<Value *> VectorizedVals) { 4325 ArrayRef<Value*> VL = E->Scalars; 4326 4327 Type *ScalarTy = VL[0]->getType(); 4328 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 4329 ScalarTy = SI->getValueOperand()->getType(); 4330 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0])) 4331 ScalarTy = CI->getOperand(0)->getType(); 4332 else if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 4333 ScalarTy = IE->getOperand(1)->getType(); 4334 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 4335 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 4336 4337 // If we have computed a smaller type for the expression, update VecTy so 4338 // that the costs will be accurate. 4339 if (MinBWs.count(VL[0])) 4340 VecTy = FixedVectorType::get( 4341 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size()); 4342 auto *FinalVecTy = VecTy; 4343 4344 unsigned ReuseShuffleNumbers = E->ReuseShuffleIndices.size(); 4345 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 4346 if (NeedToShuffleReuses) 4347 FinalVecTy = 4348 FixedVectorType::get(VecTy->getElementType(), ReuseShuffleNumbers); 4349 // FIXME: it tries to fix a problem with MSVC buildbots. 4350 TargetTransformInfo &TTIRef = *TTI; 4351 auto &&AdjustExtractsCost = [this, &TTIRef, CostKind, VL, VecTy, 4352 VectorizedVals](InstructionCost &Cost, 4353 bool IsGather) { 4354 DenseMap<Value *, int> ExtractVectorsTys; 4355 for (auto *V : VL) { 4356 // If all users of instruction are going to be vectorized and this 4357 // instruction itself is not going to be vectorized, consider this 4358 // instruction as dead and remove its cost from the final cost of the 4359 // vectorized tree. 4360 if (!areAllUsersVectorized(cast<Instruction>(V), VectorizedVals) || 4361 (IsGather && ScalarToTreeEntry.count(V))) 4362 continue; 4363 auto *EE = cast<ExtractElementInst>(V); 4364 unsigned Idx = *getExtractIndex(EE); 4365 if (TTIRef.getNumberOfParts(VecTy) != 4366 TTIRef.getNumberOfParts(EE->getVectorOperandType())) { 4367 auto It = 4368 ExtractVectorsTys.try_emplace(EE->getVectorOperand(), Idx).first; 4369 It->getSecond() = std::min<int>(It->second, Idx); 4370 } 4371 // Take credit for instruction that will become dead. 4372 if (EE->hasOneUse()) { 4373 Instruction *Ext = EE->user_back(); 4374 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 4375 all_of(Ext->users(), 4376 [](User *U) { return isa<GetElementPtrInst>(U); })) { 4377 // Use getExtractWithExtendCost() to calculate the cost of 4378 // extractelement/ext pair. 4379 Cost -= 4380 TTIRef.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(), 4381 EE->getVectorOperandType(), Idx); 4382 // Add back the cost of s|zext which is subtracted separately. 4383 Cost += TTIRef.getCastInstrCost( 4384 Ext->getOpcode(), Ext->getType(), EE->getType(), 4385 TTI::getCastContextHint(Ext), CostKind, Ext); 4386 continue; 4387 } 4388 } 4389 Cost -= TTIRef.getVectorInstrCost(Instruction::ExtractElement, 4390 EE->getVectorOperandType(), Idx); 4391 } 4392 // Add a cost for subvector extracts/inserts if required. 4393 for (const auto &Data : ExtractVectorsTys) { 4394 auto *EEVTy = cast<FixedVectorType>(Data.first->getType()); 4395 unsigned NumElts = VecTy->getNumElements(); 4396 if (TTIRef.getNumberOfParts(EEVTy) > TTIRef.getNumberOfParts(VecTy)) { 4397 unsigned Idx = (Data.second / NumElts) * NumElts; 4398 unsigned EENumElts = EEVTy->getNumElements(); 4399 if (Idx + NumElts <= EENumElts) { 4400 Cost += 4401 TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 4402 EEVTy, None, Idx, VecTy); 4403 } else { 4404 // Need to round up the subvector type vectorization factor to avoid a 4405 // crash in cost model functions. Make SubVT so that Idx + VF of SubVT 4406 // <= EENumElts. 4407 auto *SubVT = 4408 FixedVectorType::get(VecTy->getElementType(), EENumElts - Idx); 4409 Cost += 4410 TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 4411 EEVTy, None, Idx, SubVT); 4412 } 4413 } else { 4414 Cost += TTIRef.getShuffleCost(TargetTransformInfo::SK_InsertSubvector, 4415 VecTy, None, 0, EEVTy); 4416 } 4417 } 4418 }; 4419 if (E->State == TreeEntry::NeedToGather) { 4420 if (allConstant(VL)) 4421 return 0; 4422 if (isa<InsertElementInst>(VL[0])) 4423 return InstructionCost::getInvalid(); 4424 SmallVector<int> Mask; 4425 SmallVector<const TreeEntry *> Entries; 4426 Optional<TargetTransformInfo::ShuffleKind> Shuffle = 4427 isGatherShuffledEntry(E, Mask, Entries); 4428 if (Shuffle.hasValue()) { 4429 InstructionCost GatherCost = 0; 4430 if (ShuffleVectorInst::isIdentityMask(Mask)) { 4431 // Perfect match in the graph, will reuse the previously vectorized 4432 // node. Cost is 0. 4433 LLVM_DEBUG( 4434 dbgs() 4435 << "SLP: perfect diamond match for gather bundle that starts with " 4436 << *VL.front() << ".\n"); 4437 if (NeedToShuffleReuses) 4438 GatherCost = 4439 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, 4440 FinalVecTy, E->ReuseShuffleIndices); 4441 } else { 4442 LLVM_DEBUG(dbgs() << "SLP: shuffled " << Entries.size() 4443 << " entries for bundle that starts with " 4444 << *VL.front() << ".\n"); 4445 // Detected that instead of gather we can emit a shuffle of single/two 4446 // previously vectorized nodes. Add the cost of the permutation rather 4447 // than gather. 4448 ::addMask(Mask, E->ReuseShuffleIndices); 4449 GatherCost = TTI->getShuffleCost(*Shuffle, FinalVecTy, Mask); 4450 } 4451 return GatherCost; 4452 } 4453 if (isSplat(VL)) { 4454 // Found the broadcasting of the single scalar, calculate the cost as the 4455 // broadcast. 4456 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy); 4457 } 4458 if (E->getOpcode() == Instruction::ExtractElement && allSameType(VL) && 4459 allSameBlock(VL) && 4460 !isa<ScalableVectorType>( 4461 cast<ExtractElementInst>(E->getMainOp())->getVectorOperandType())) { 4462 // Check that gather of extractelements can be represented as just a 4463 // shuffle of a single/two vectors the scalars are extracted from. 4464 SmallVector<int> Mask; 4465 Optional<TargetTransformInfo::ShuffleKind> ShuffleKind = 4466 isFixedVectorShuffle(VL, Mask); 4467 if (ShuffleKind.hasValue()) { 4468 // Found the bunch of extractelement instructions that must be gathered 4469 // into a vector and can be represented as a permutation elements in a 4470 // single input vector or of 2 input vectors. 4471 InstructionCost Cost = 4472 computeExtractCost(VL, VecTy, *ShuffleKind, Mask, *TTI); 4473 AdjustExtractsCost(Cost, /*IsGather=*/true); 4474 if (NeedToShuffleReuses) 4475 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, 4476 FinalVecTy, E->ReuseShuffleIndices); 4477 return Cost; 4478 } 4479 } 4480 InstructionCost ReuseShuffleCost = 0; 4481 if (NeedToShuffleReuses) 4482 ReuseShuffleCost = TTI->getShuffleCost( 4483 TTI::SK_PermuteSingleSrc, FinalVecTy, E->ReuseShuffleIndices); 4484 // Improve gather cost for gather of loads, if we can group some of the 4485 // loads into vector loads. 4486 if (VL.size() > 2 && E->getOpcode() == Instruction::Load && 4487 !E->isAltShuffle()) { 4488 BoUpSLP::ValueSet VectorizedLoads; 4489 unsigned StartIdx = 0; 4490 unsigned VF = VL.size() / 2; 4491 unsigned VectorizedCnt = 0; 4492 unsigned ScatterVectorizeCnt = 0; 4493 const unsigned Sz = DL->getTypeSizeInBits(E->getMainOp()->getType()); 4494 for (unsigned MinVF = getMinVF(2 * Sz); VF >= MinVF; VF /= 2) { 4495 for (unsigned Cnt = StartIdx, End = VL.size(); Cnt + VF <= End; 4496 Cnt += VF) { 4497 ArrayRef<Value *> Slice = VL.slice(Cnt, VF); 4498 if (!VectorizedLoads.count(Slice.front()) && 4499 !VectorizedLoads.count(Slice.back()) && allSameBlock(Slice)) { 4500 SmallVector<Value *> PointerOps; 4501 OrdersType CurrentOrder; 4502 LoadsState LS = canVectorizeLoads(Slice, Slice.front(), *TTI, *DL, 4503 *SE, CurrentOrder, PointerOps); 4504 switch (LS) { 4505 case LoadsState::Vectorize: 4506 case LoadsState::ScatterVectorize: 4507 // Mark the vectorized loads so that we don't vectorize them 4508 // again. 4509 if (LS == LoadsState::Vectorize) 4510 ++VectorizedCnt; 4511 else 4512 ++ScatterVectorizeCnt; 4513 VectorizedLoads.insert(Slice.begin(), Slice.end()); 4514 // If we vectorized initial block, no need to try to vectorize it 4515 // again. 4516 if (Cnt == StartIdx) 4517 StartIdx += VF; 4518 break; 4519 case LoadsState::Gather: 4520 break; 4521 } 4522 } 4523 } 4524 // Check if the whole array was vectorized already - exit. 4525 if (StartIdx >= VL.size()) 4526 break; 4527 // Found vectorizable parts - exit. 4528 if (!VectorizedLoads.empty()) 4529 break; 4530 } 4531 if (!VectorizedLoads.empty()) { 4532 InstructionCost GatherCost = 0; 4533 unsigned NumParts = TTI->getNumberOfParts(VecTy); 4534 bool NeedInsertSubvectorAnalysis = 4535 !NumParts || (VL.size() / VF) > NumParts; 4536 // Get the cost for gathered loads. 4537 for (unsigned I = 0, End = VL.size(); I < End; I += VF) { 4538 if (VectorizedLoads.contains(VL[I])) 4539 continue; 4540 GatherCost += getGatherCost(VL.slice(I, VF)); 4541 } 4542 // The cost for vectorized loads. 4543 InstructionCost ScalarsCost = 0; 4544 for (Value *V : VectorizedLoads) { 4545 auto *LI = cast<LoadInst>(V); 4546 ScalarsCost += TTI->getMemoryOpCost( 4547 Instruction::Load, LI->getType(), LI->getAlign(), 4548 LI->getPointerAddressSpace(), CostKind, LI); 4549 } 4550 auto *LI = cast<LoadInst>(E->getMainOp()); 4551 auto *LoadTy = FixedVectorType::get(LI->getType(), VF); 4552 Align Alignment = LI->getAlign(); 4553 GatherCost += 4554 VectorizedCnt * 4555 TTI->getMemoryOpCost(Instruction::Load, LoadTy, Alignment, 4556 LI->getPointerAddressSpace(), CostKind, LI); 4557 GatherCost += ScatterVectorizeCnt * 4558 TTI->getGatherScatterOpCost( 4559 Instruction::Load, LoadTy, LI->getPointerOperand(), 4560 /*VariableMask=*/false, Alignment, CostKind, LI); 4561 if (NeedInsertSubvectorAnalysis) { 4562 // Add the cost for the subvectors insert. 4563 for (int I = VF, E = VL.size(); I < E; I += VF) 4564 GatherCost += TTI->getShuffleCost(TTI::SK_InsertSubvector, VecTy, 4565 None, I, LoadTy); 4566 } 4567 return ReuseShuffleCost + GatherCost - ScalarsCost; 4568 } 4569 } 4570 return ReuseShuffleCost + getGatherCost(VL); 4571 } 4572 InstructionCost CommonCost = 0; 4573 SmallVector<int> Mask; 4574 if (!E->ReorderIndices.empty()) { 4575 SmallVector<int> NewMask; 4576 if (E->getOpcode() == Instruction::Store) { 4577 // For stores the order is actually a mask. 4578 NewMask.resize(E->ReorderIndices.size()); 4579 copy(E->ReorderIndices, NewMask.begin()); 4580 } else { 4581 inversePermutation(E->ReorderIndices, NewMask); 4582 } 4583 ::addMask(Mask, NewMask); 4584 } 4585 if (NeedToShuffleReuses) 4586 ::addMask(Mask, E->ReuseShuffleIndices); 4587 if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask)) 4588 CommonCost = 4589 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask); 4590 assert((E->State == TreeEntry::Vectorize || 4591 E->State == TreeEntry::ScatterVectorize) && 4592 "Unhandled state"); 4593 assert(E->getOpcode() && allSameType(VL) && allSameBlock(VL) && "Invalid VL"); 4594 Instruction *VL0 = E->getMainOp(); 4595 unsigned ShuffleOrOp = 4596 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 4597 switch (ShuffleOrOp) { 4598 case Instruction::PHI: 4599 return 0; 4600 4601 case Instruction::ExtractValue: 4602 case Instruction::ExtractElement: { 4603 // The common cost of removal ExtractElement/ExtractValue instructions + 4604 // the cost of shuffles, if required to resuffle the original vector. 4605 if (NeedToShuffleReuses) { 4606 unsigned Idx = 0; 4607 for (unsigned I : E->ReuseShuffleIndices) { 4608 if (ShuffleOrOp == Instruction::ExtractElement) { 4609 auto *EE = cast<ExtractElementInst>(VL[I]); 4610 CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement, 4611 EE->getVectorOperandType(), 4612 *getExtractIndex(EE)); 4613 } else { 4614 CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement, 4615 VecTy, Idx); 4616 ++Idx; 4617 } 4618 } 4619 Idx = ReuseShuffleNumbers; 4620 for (Value *V : VL) { 4621 if (ShuffleOrOp == Instruction::ExtractElement) { 4622 auto *EE = cast<ExtractElementInst>(V); 4623 CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement, 4624 EE->getVectorOperandType(), 4625 *getExtractIndex(EE)); 4626 } else { 4627 --Idx; 4628 CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement, 4629 VecTy, Idx); 4630 } 4631 } 4632 } 4633 if (ShuffleOrOp == Instruction::ExtractValue) { 4634 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 4635 auto *EI = cast<Instruction>(VL[I]); 4636 // Take credit for instruction that will become dead. 4637 if (EI->hasOneUse()) { 4638 Instruction *Ext = EI->user_back(); 4639 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 4640 all_of(Ext->users(), 4641 [](User *U) { return isa<GetElementPtrInst>(U); })) { 4642 // Use getExtractWithExtendCost() to calculate the cost of 4643 // extractelement/ext pair. 4644 CommonCost -= TTI->getExtractWithExtendCost( 4645 Ext->getOpcode(), Ext->getType(), VecTy, I); 4646 // Add back the cost of s|zext which is subtracted separately. 4647 CommonCost += TTI->getCastInstrCost( 4648 Ext->getOpcode(), Ext->getType(), EI->getType(), 4649 TTI::getCastContextHint(Ext), CostKind, Ext); 4650 continue; 4651 } 4652 } 4653 CommonCost -= 4654 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, I); 4655 } 4656 } else { 4657 AdjustExtractsCost(CommonCost, /*IsGather=*/false); 4658 } 4659 return CommonCost; 4660 } 4661 case Instruction::InsertElement: { 4662 assert(E->ReuseShuffleIndices.empty() && 4663 "Unique insertelements only are expected."); 4664 auto *SrcVecTy = cast<FixedVectorType>(VL0->getType()); 4665 4666 unsigned const NumElts = SrcVecTy->getNumElements(); 4667 unsigned const NumScalars = VL.size(); 4668 APInt DemandedElts = APInt::getZero(NumElts); 4669 // TODO: Add support for Instruction::InsertValue. 4670 SmallVector<int> Mask; 4671 if (!E->ReorderIndices.empty()) { 4672 inversePermutation(E->ReorderIndices, Mask); 4673 Mask.append(NumElts - NumScalars, UndefMaskElem); 4674 } else { 4675 Mask.assign(NumElts, UndefMaskElem); 4676 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); 4677 } 4678 unsigned Offset = *getInsertIndex(VL0, 0); 4679 bool IsIdentity = true; 4680 SmallVector<int> PrevMask(NumElts, UndefMaskElem); 4681 Mask.swap(PrevMask); 4682 for (unsigned I = 0; I < NumScalars; ++I) { 4683 Optional<int> InsertIdx = getInsertIndex(VL[PrevMask[I]], 0); 4684 if (!InsertIdx || *InsertIdx == UndefMaskElem) 4685 continue; 4686 DemandedElts.setBit(*InsertIdx); 4687 IsIdentity &= *InsertIdx - Offset == I; 4688 Mask[*InsertIdx - Offset] = I; 4689 } 4690 assert(Offset < NumElts && "Failed to find vector index offset"); 4691 4692 InstructionCost Cost = 0; 4693 Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts, 4694 /*Insert*/ true, /*Extract*/ false); 4695 4696 if (IsIdentity && NumElts != NumScalars && Offset % NumScalars != 0) { 4697 // FIXME: Replace with SK_InsertSubvector once it is properly supported. 4698 unsigned Sz = PowerOf2Ceil(Offset + NumScalars); 4699 Cost += TTI->getShuffleCost( 4700 TargetTransformInfo::SK_PermuteSingleSrc, 4701 FixedVectorType::get(SrcVecTy->getElementType(), Sz)); 4702 } else if (!IsIdentity) { 4703 auto *FirstInsert = 4704 cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 4705 return !is_contained(E->Scalars, 4706 cast<Instruction>(V)->getOperand(0)); 4707 })); 4708 if (isa<UndefValue>(FirstInsert->getOperand(0))) { 4709 Cost += TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, SrcVecTy, Mask); 4710 } else { 4711 SmallVector<int> InsertMask(NumElts); 4712 std::iota(InsertMask.begin(), InsertMask.end(), 0); 4713 for (unsigned I = 0; I < NumElts; I++) { 4714 if (Mask[I] != UndefMaskElem) 4715 InsertMask[Offset + I] = NumElts + I; 4716 } 4717 Cost += 4718 TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVecTy, InsertMask); 4719 } 4720 } 4721 4722 return Cost; 4723 } 4724 case Instruction::ZExt: 4725 case Instruction::SExt: 4726 case Instruction::FPToUI: 4727 case Instruction::FPToSI: 4728 case Instruction::FPExt: 4729 case Instruction::PtrToInt: 4730 case Instruction::IntToPtr: 4731 case Instruction::SIToFP: 4732 case Instruction::UIToFP: 4733 case Instruction::Trunc: 4734 case Instruction::FPTrunc: 4735 case Instruction::BitCast: { 4736 Type *SrcTy = VL0->getOperand(0)->getType(); 4737 InstructionCost ScalarEltCost = 4738 TTI->getCastInstrCost(E->getOpcode(), ScalarTy, SrcTy, 4739 TTI::getCastContextHint(VL0), CostKind, VL0); 4740 if (NeedToShuffleReuses) { 4741 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4742 } 4743 4744 // Calculate the cost of this instruction. 4745 InstructionCost ScalarCost = VL.size() * ScalarEltCost; 4746 4747 auto *SrcVecTy = FixedVectorType::get(SrcTy, VL.size()); 4748 InstructionCost VecCost = 0; 4749 // Check if the values are candidates to demote. 4750 if (!MinBWs.count(VL0) || VecTy != SrcVecTy) { 4751 VecCost = CommonCost + TTI->getCastInstrCost( 4752 E->getOpcode(), VecTy, SrcVecTy, 4753 TTI::getCastContextHint(VL0), CostKind, VL0); 4754 } 4755 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 4756 return VecCost - ScalarCost; 4757 } 4758 case Instruction::FCmp: 4759 case Instruction::ICmp: 4760 case Instruction::Select: { 4761 // Calculate the cost of this instruction. 4762 InstructionCost ScalarEltCost = 4763 TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, Builder.getInt1Ty(), 4764 CmpInst::BAD_ICMP_PREDICATE, CostKind, VL0); 4765 if (NeedToShuffleReuses) { 4766 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4767 } 4768 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); 4769 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 4770 4771 // Check if all entries in VL are either compares or selects with compares 4772 // as condition that have the same predicates. 4773 CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE; 4774 bool First = true; 4775 for (auto *V : VL) { 4776 CmpInst::Predicate CurrentPred; 4777 auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value()); 4778 if ((!match(V, m_Select(MatchCmp, m_Value(), m_Value())) && 4779 !match(V, MatchCmp)) || 4780 (!First && VecPred != CurrentPred)) { 4781 VecPred = CmpInst::BAD_ICMP_PREDICATE; 4782 break; 4783 } 4784 First = false; 4785 VecPred = CurrentPred; 4786 } 4787 4788 InstructionCost VecCost = TTI->getCmpSelInstrCost( 4789 E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0); 4790 // Check if it is possible and profitable to use min/max for selects in 4791 // VL. 4792 // 4793 auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL); 4794 if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) { 4795 IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy, 4796 {VecTy, VecTy}); 4797 InstructionCost IntrinsicCost = 4798 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 4799 // If the selects are the only uses of the compares, they will be dead 4800 // and we can adjust the cost by removing their cost. 4801 if (IntrinsicAndUse.second) 4802 IntrinsicCost -= 4803 TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy, MaskTy, 4804 CmpInst::BAD_ICMP_PREDICATE, CostKind); 4805 VecCost = std::min(VecCost, IntrinsicCost); 4806 } 4807 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 4808 return CommonCost + VecCost - ScalarCost; 4809 } 4810 case Instruction::FNeg: 4811 case Instruction::Add: 4812 case Instruction::FAdd: 4813 case Instruction::Sub: 4814 case Instruction::FSub: 4815 case Instruction::Mul: 4816 case Instruction::FMul: 4817 case Instruction::UDiv: 4818 case Instruction::SDiv: 4819 case Instruction::FDiv: 4820 case Instruction::URem: 4821 case Instruction::SRem: 4822 case Instruction::FRem: 4823 case Instruction::Shl: 4824 case Instruction::LShr: 4825 case Instruction::AShr: 4826 case Instruction::And: 4827 case Instruction::Or: 4828 case Instruction::Xor: { 4829 // Certain instructions can be cheaper to vectorize if they have a 4830 // constant second vector operand. 4831 TargetTransformInfo::OperandValueKind Op1VK = 4832 TargetTransformInfo::OK_AnyValue; 4833 TargetTransformInfo::OperandValueKind Op2VK = 4834 TargetTransformInfo::OK_UniformConstantValue; 4835 TargetTransformInfo::OperandValueProperties Op1VP = 4836 TargetTransformInfo::OP_None; 4837 TargetTransformInfo::OperandValueProperties Op2VP = 4838 TargetTransformInfo::OP_PowerOf2; 4839 4840 // If all operands are exactly the same ConstantInt then set the 4841 // operand kind to OK_UniformConstantValue. 4842 // If instead not all operands are constants, then set the operand kind 4843 // to OK_AnyValue. If all operands are constants but not the same, 4844 // then set the operand kind to OK_NonUniformConstantValue. 4845 ConstantInt *CInt0 = nullptr; 4846 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 4847 const Instruction *I = cast<Instruction>(VL[i]); 4848 unsigned OpIdx = isa<BinaryOperator>(I) ? 1 : 0; 4849 ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(OpIdx)); 4850 if (!CInt) { 4851 Op2VK = TargetTransformInfo::OK_AnyValue; 4852 Op2VP = TargetTransformInfo::OP_None; 4853 break; 4854 } 4855 if (Op2VP == TargetTransformInfo::OP_PowerOf2 && 4856 !CInt->getValue().isPowerOf2()) 4857 Op2VP = TargetTransformInfo::OP_None; 4858 if (i == 0) { 4859 CInt0 = CInt; 4860 continue; 4861 } 4862 if (CInt0 != CInt) 4863 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 4864 } 4865 4866 SmallVector<const Value *, 4> Operands(VL0->operand_values()); 4867 InstructionCost ScalarEltCost = 4868 TTI->getArithmeticInstrCost(E->getOpcode(), ScalarTy, CostKind, Op1VK, 4869 Op2VK, Op1VP, Op2VP, Operands, VL0); 4870 if (NeedToShuffleReuses) { 4871 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4872 } 4873 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 4874 InstructionCost VecCost = 4875 TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind, Op1VK, 4876 Op2VK, Op1VP, Op2VP, Operands, VL0); 4877 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 4878 return CommonCost + VecCost - ScalarCost; 4879 } 4880 case Instruction::GetElementPtr: { 4881 TargetTransformInfo::OperandValueKind Op1VK = 4882 TargetTransformInfo::OK_AnyValue; 4883 TargetTransformInfo::OperandValueKind Op2VK = 4884 TargetTransformInfo::OK_UniformConstantValue; 4885 4886 InstructionCost ScalarEltCost = TTI->getArithmeticInstrCost( 4887 Instruction::Add, ScalarTy, CostKind, Op1VK, Op2VK); 4888 if (NeedToShuffleReuses) { 4889 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4890 } 4891 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 4892 InstructionCost VecCost = TTI->getArithmeticInstrCost( 4893 Instruction::Add, VecTy, CostKind, Op1VK, Op2VK); 4894 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 4895 return CommonCost + VecCost - ScalarCost; 4896 } 4897 case Instruction::Load: { 4898 // Cost of wide load - cost of scalar loads. 4899 Align Alignment = cast<LoadInst>(VL0)->getAlign(); 4900 InstructionCost ScalarEltCost = TTI->getMemoryOpCost( 4901 Instruction::Load, ScalarTy, Alignment, 0, CostKind, VL0); 4902 if (NeedToShuffleReuses) { 4903 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4904 } 4905 InstructionCost ScalarLdCost = VecTy->getNumElements() * ScalarEltCost; 4906 InstructionCost VecLdCost; 4907 if (E->State == TreeEntry::Vectorize) { 4908 VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, Alignment, 0, 4909 CostKind, VL0); 4910 } else { 4911 assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState"); 4912 Align CommonAlignment = Alignment; 4913 for (Value *V : VL) 4914 CommonAlignment = 4915 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 4916 VecLdCost = TTI->getGatherScatterOpCost( 4917 Instruction::Load, VecTy, cast<LoadInst>(VL0)->getPointerOperand(), 4918 /*VariableMask=*/false, CommonAlignment, CostKind, VL0); 4919 } 4920 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecLdCost, ScalarLdCost)); 4921 return CommonCost + VecLdCost - ScalarLdCost; 4922 } 4923 case Instruction::Store: { 4924 // We know that we can merge the stores. Calculate the cost. 4925 bool IsReorder = !E->ReorderIndices.empty(); 4926 auto *SI = 4927 cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0); 4928 Align Alignment = SI->getAlign(); 4929 InstructionCost ScalarEltCost = TTI->getMemoryOpCost( 4930 Instruction::Store, ScalarTy, Alignment, 0, CostKind, VL0); 4931 InstructionCost ScalarStCost = VecTy->getNumElements() * ScalarEltCost; 4932 InstructionCost VecStCost = TTI->getMemoryOpCost( 4933 Instruction::Store, VecTy, Alignment, 0, CostKind, VL0); 4934 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecStCost, ScalarStCost)); 4935 return CommonCost + VecStCost - ScalarStCost; 4936 } 4937 case Instruction::Call: { 4938 CallInst *CI = cast<CallInst>(VL0); 4939 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4940 4941 // Calculate the cost of the scalar and vector calls. 4942 IntrinsicCostAttributes CostAttrs(ID, *CI, 1); 4943 InstructionCost ScalarEltCost = 4944 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 4945 if (NeedToShuffleReuses) { 4946 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4947 } 4948 InstructionCost ScalarCallCost = VecTy->getNumElements() * ScalarEltCost; 4949 4950 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 4951 InstructionCost VecCallCost = 4952 std::min(VecCallCosts.first, VecCallCosts.second); 4953 4954 LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost 4955 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 4956 << " for " << *CI << "\n"); 4957 4958 return CommonCost + VecCallCost - ScalarCallCost; 4959 } 4960 case Instruction::ShuffleVector: { 4961 assert(E->isAltShuffle() && 4962 ((Instruction::isBinaryOp(E->getOpcode()) && 4963 Instruction::isBinaryOp(E->getAltOpcode())) || 4964 (Instruction::isCast(E->getOpcode()) && 4965 Instruction::isCast(E->getAltOpcode()))) && 4966 "Invalid Shuffle Vector Operand"); 4967 InstructionCost ScalarCost = 0; 4968 if (NeedToShuffleReuses) { 4969 for (unsigned Idx : E->ReuseShuffleIndices) { 4970 Instruction *I = cast<Instruction>(VL[Idx]); 4971 CommonCost -= TTI->getInstructionCost(I, CostKind); 4972 } 4973 for (Value *V : VL) { 4974 Instruction *I = cast<Instruction>(V); 4975 CommonCost += TTI->getInstructionCost(I, CostKind); 4976 } 4977 } 4978 for (Value *V : VL) { 4979 Instruction *I = cast<Instruction>(V); 4980 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 4981 ScalarCost += TTI->getInstructionCost(I, CostKind); 4982 } 4983 // VecCost is equal to sum of the cost of creating 2 vectors 4984 // and the cost of creating shuffle. 4985 InstructionCost VecCost = 0; 4986 if (Instruction::isBinaryOp(E->getOpcode())) { 4987 VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind); 4988 VecCost += TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy, 4989 CostKind); 4990 } else { 4991 Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType(); 4992 Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType(); 4993 auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size()); 4994 auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size()); 4995 VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty, 4996 TTI::CastContextHint::None, CostKind); 4997 VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty, 4998 TTI::CastContextHint::None, CostKind); 4999 } 5000 5001 SmallVector<int> Mask; 5002 buildSuffleEntryMask( 5003 E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices, 5004 [E](Instruction *I) { 5005 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 5006 return I->getOpcode() == E->getAltOpcode(); 5007 }, 5008 Mask); 5009 CommonCost = 5010 TTI->getShuffleCost(TargetTransformInfo::SK_Select, FinalVecTy, Mask); 5011 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 5012 return CommonCost + VecCost - ScalarCost; 5013 } 5014 default: 5015 llvm_unreachable("Unknown instruction"); 5016 } 5017 } 5018 5019 bool BoUpSLP::isFullyVectorizableTinyTree(bool ForReduction) const { 5020 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height " 5021 << VectorizableTree.size() << " is fully vectorizable .\n"); 5022 5023 auto &&AreVectorizableGathers = [this](const TreeEntry *TE, unsigned Limit) { 5024 SmallVector<int> Mask; 5025 return TE->State == TreeEntry::NeedToGather && 5026 !any_of(TE->Scalars, 5027 [this](Value *V) { return EphValues.contains(V); }) && 5028 (allConstant(TE->Scalars) || isSplat(TE->Scalars) || 5029 TE->Scalars.size() < Limit || 5030 (TE->getOpcode() == Instruction::ExtractElement && 5031 isFixedVectorShuffle(TE->Scalars, Mask)) || 5032 (TE->State == TreeEntry::NeedToGather && 5033 TE->getOpcode() == Instruction::Load && !TE->isAltShuffle())); 5034 }; 5035 5036 // We only handle trees of heights 1 and 2. 5037 if (VectorizableTree.size() == 1 && 5038 (VectorizableTree[0]->State == TreeEntry::Vectorize || 5039 (ForReduction && 5040 AreVectorizableGathers(VectorizableTree[0].get(), 5041 VectorizableTree[0]->Scalars.size()) && 5042 (VectorizableTree[0]->Scalars.size() > 2 || 5043 VectorizableTree[0]->ReuseShuffleIndices.size() > 2)))) 5044 return true; 5045 5046 if (VectorizableTree.size() != 2) 5047 return false; 5048 5049 // Handle splat and all-constants stores. Also try to vectorize tiny trees 5050 // with the second gather nodes if they have less scalar operands rather than 5051 // the initial tree element (may be profitable to shuffle the second gather) 5052 // or they are extractelements, which form shuffle. 5053 SmallVector<int> Mask; 5054 if (VectorizableTree[0]->State == TreeEntry::Vectorize && 5055 AreVectorizableGathers(VectorizableTree[1].get(), 5056 VectorizableTree[0]->Scalars.size())) 5057 return true; 5058 5059 // Gathering cost would be too much for tiny trees. 5060 if (VectorizableTree[0]->State == TreeEntry::NeedToGather || 5061 (VectorizableTree[1]->State == TreeEntry::NeedToGather && 5062 VectorizableTree[0]->State != TreeEntry::ScatterVectorize)) 5063 return false; 5064 5065 return true; 5066 } 5067 5068 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts, 5069 TargetTransformInfo *TTI, 5070 bool MustMatchOrInst) { 5071 // Look past the root to find a source value. Arbitrarily follow the 5072 // path through operand 0 of any 'or'. Also, peek through optional 5073 // shift-left-by-multiple-of-8-bits. 5074 Value *ZextLoad = Root; 5075 const APInt *ShAmtC; 5076 bool FoundOr = false; 5077 while (!isa<ConstantExpr>(ZextLoad) && 5078 (match(ZextLoad, m_Or(m_Value(), m_Value())) || 5079 (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) && 5080 ShAmtC->urem(8) == 0))) { 5081 auto *BinOp = cast<BinaryOperator>(ZextLoad); 5082 ZextLoad = BinOp->getOperand(0); 5083 if (BinOp->getOpcode() == Instruction::Or) 5084 FoundOr = true; 5085 } 5086 // Check if the input is an extended load of the required or/shift expression. 5087 Value *LoadPtr; 5088 if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root || 5089 !match(ZextLoad, m_ZExt(m_Load(m_Value(LoadPtr))))) 5090 return false; 5091 5092 // Require that the total load bit width is a legal integer type. 5093 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target. 5094 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it. 5095 Type *SrcTy = LoadPtr->getType()->getPointerElementType(); 5096 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts; 5097 if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth))) 5098 return false; 5099 5100 // Everything matched - assume that we can fold the whole sequence using 5101 // load combining. 5102 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at " 5103 << *(cast<Instruction>(Root)) << "\n"); 5104 5105 return true; 5106 } 5107 5108 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const { 5109 if (RdxKind != RecurKind::Or) 5110 return false; 5111 5112 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 5113 Value *FirstReduced = VectorizableTree[0]->Scalars[0]; 5114 return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI, 5115 /* MatchOr */ false); 5116 } 5117 5118 bool BoUpSLP::isLoadCombineCandidate() const { 5119 // Peek through a final sequence of stores and check if all operations are 5120 // likely to be load-combined. 5121 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 5122 for (Value *Scalar : VectorizableTree[0]->Scalars) { 5123 Value *X; 5124 if (!match(Scalar, m_Store(m_Value(X), m_Value())) || 5125 !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true)) 5126 return false; 5127 } 5128 return true; 5129 } 5130 5131 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const { 5132 // No need to vectorize inserts of gathered values. 5133 if (VectorizableTree.size() == 2 && 5134 isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) && 5135 VectorizableTree[1]->State == TreeEntry::NeedToGather) 5136 return true; 5137 5138 // We can vectorize the tree if its size is greater than or equal to the 5139 // minimum size specified by the MinTreeSize command line option. 5140 if (VectorizableTree.size() >= MinTreeSize) 5141 return false; 5142 5143 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 5144 // can vectorize it if we can prove it fully vectorizable. 5145 if (isFullyVectorizableTinyTree(ForReduction)) 5146 return false; 5147 5148 assert(VectorizableTree.empty() 5149 ? ExternalUses.empty() 5150 : true && "We shouldn't have any external users"); 5151 5152 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 5153 // vectorizable. 5154 return true; 5155 } 5156 5157 InstructionCost BoUpSLP::getSpillCost() const { 5158 // Walk from the bottom of the tree to the top, tracking which values are 5159 // live. When we see a call instruction that is not part of our tree, 5160 // query TTI to see if there is a cost to keeping values live over it 5161 // (for example, if spills and fills are required). 5162 unsigned BundleWidth = VectorizableTree.front()->Scalars.size(); 5163 InstructionCost Cost = 0; 5164 5165 SmallPtrSet<Instruction*, 4> LiveValues; 5166 Instruction *PrevInst = nullptr; 5167 5168 // The entries in VectorizableTree are not necessarily ordered by their 5169 // position in basic blocks. Collect them and order them by dominance so later 5170 // instructions are guaranteed to be visited first. For instructions in 5171 // different basic blocks, we only scan to the beginning of the block, so 5172 // their order does not matter, as long as all instructions in a basic block 5173 // are grouped together. Using dominance ensures a deterministic order. 5174 SmallVector<Instruction *, 16> OrderedScalars; 5175 for (const auto &TEPtr : VectorizableTree) { 5176 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]); 5177 if (!Inst) 5178 continue; 5179 OrderedScalars.push_back(Inst); 5180 } 5181 llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) { 5182 auto *NodeA = DT->getNode(A->getParent()); 5183 auto *NodeB = DT->getNode(B->getParent()); 5184 assert(NodeA && "Should only process reachable instructions"); 5185 assert(NodeB && "Should only process reachable instructions"); 5186 assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 5187 "Different nodes should have different DFS numbers"); 5188 if (NodeA != NodeB) 5189 return NodeA->getDFSNumIn() < NodeB->getDFSNumIn(); 5190 return B->comesBefore(A); 5191 }); 5192 5193 for (Instruction *Inst : OrderedScalars) { 5194 if (!PrevInst) { 5195 PrevInst = Inst; 5196 continue; 5197 } 5198 5199 // Update LiveValues. 5200 LiveValues.erase(PrevInst); 5201 for (auto &J : PrevInst->operands()) { 5202 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 5203 LiveValues.insert(cast<Instruction>(&*J)); 5204 } 5205 5206 LLVM_DEBUG({ 5207 dbgs() << "SLP: #LV: " << LiveValues.size(); 5208 for (auto *X : LiveValues) 5209 dbgs() << " " << X->getName(); 5210 dbgs() << ", Looking at "; 5211 Inst->dump(); 5212 }); 5213 5214 // Now find the sequence of instructions between PrevInst and Inst. 5215 unsigned NumCalls = 0; 5216 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 5217 PrevInstIt = 5218 PrevInst->getIterator().getReverse(); 5219 while (InstIt != PrevInstIt) { 5220 if (PrevInstIt == PrevInst->getParent()->rend()) { 5221 PrevInstIt = Inst->getParent()->rbegin(); 5222 continue; 5223 } 5224 5225 // Debug information does not impact spill cost. 5226 if ((isa<CallInst>(&*PrevInstIt) && 5227 !isa<DbgInfoIntrinsic>(&*PrevInstIt)) && 5228 &*PrevInstIt != PrevInst) 5229 NumCalls++; 5230 5231 ++PrevInstIt; 5232 } 5233 5234 if (NumCalls) { 5235 SmallVector<Type*, 4> V; 5236 for (auto *II : LiveValues) { 5237 auto *ScalarTy = II->getType(); 5238 if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy)) 5239 ScalarTy = VectorTy->getElementType(); 5240 V.push_back(FixedVectorType::get(ScalarTy, BundleWidth)); 5241 } 5242 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V); 5243 } 5244 5245 PrevInst = Inst; 5246 } 5247 5248 return Cost; 5249 } 5250 5251 InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) { 5252 InstructionCost Cost = 0; 5253 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size " 5254 << VectorizableTree.size() << ".\n"); 5255 5256 unsigned BundleWidth = VectorizableTree[0]->Scalars.size(); 5257 5258 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) { 5259 TreeEntry &TE = *VectorizableTree[I].get(); 5260 5261 InstructionCost C = getEntryCost(&TE, VectorizedVals); 5262 Cost += C; 5263 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 5264 << " for bundle that starts with " << *TE.Scalars[0] 5265 << ".\n" 5266 << "SLP: Current total cost = " << Cost << "\n"); 5267 } 5268 5269 SmallPtrSet<Value *, 16> ExtractCostCalculated; 5270 InstructionCost ExtractCost = 0; 5271 SmallVector<unsigned> VF; 5272 SmallVector<SmallVector<int>> ShuffleMask; 5273 SmallVector<Value *> FirstUsers; 5274 SmallVector<APInt> DemandedElts; 5275 for (ExternalUser &EU : ExternalUses) { 5276 // We only add extract cost once for the same scalar. 5277 if (!ExtractCostCalculated.insert(EU.Scalar).second) 5278 continue; 5279 5280 // Uses by ephemeral values are free (because the ephemeral value will be 5281 // removed prior to code generation, and so the extraction will be 5282 // removed as well). 5283 if (EphValues.count(EU.User)) 5284 continue; 5285 5286 // No extract cost for vector "scalar" 5287 if (isa<FixedVectorType>(EU.Scalar->getType())) 5288 continue; 5289 5290 // Already counted the cost for external uses when tried to adjust the cost 5291 // for extractelements, no need to add it again. 5292 if (isa<ExtractElementInst>(EU.Scalar)) 5293 continue; 5294 5295 // If found user is an insertelement, do not calculate extract cost but try 5296 // to detect it as a final shuffled/identity match. 5297 if (isa_and_nonnull<InsertElementInst>(EU.User)) { 5298 if (auto *FTy = dyn_cast<FixedVectorType>(EU.User->getType())) { 5299 Optional<int> InsertIdx = getInsertIndex(EU.User, 0); 5300 if (!InsertIdx || *InsertIdx == UndefMaskElem) 5301 continue; 5302 Value *VU = EU.User; 5303 auto *It = find_if(FirstUsers, [VU](Value *V) { 5304 // Checks if 2 insertelements are from the same buildvector. 5305 if (VU->getType() != V->getType()) 5306 return false; 5307 auto *IE1 = cast<InsertElementInst>(VU); 5308 auto *IE2 = cast<InsertElementInst>(V); 5309 // Go through of insertelement instructions trying to find either VU 5310 // as the original vector for IE2 or V as the original vector for IE1. 5311 do { 5312 if (IE1 == VU || IE2 == V) 5313 return true; 5314 if (IE1) 5315 IE1 = dyn_cast<InsertElementInst>(IE1->getOperand(0)); 5316 if (IE2) 5317 IE2 = dyn_cast<InsertElementInst>(IE2->getOperand(0)); 5318 } while (IE1 || IE2); 5319 return false; 5320 }); 5321 int VecId = -1; 5322 if (It == FirstUsers.end()) { 5323 VF.push_back(FTy->getNumElements()); 5324 ShuffleMask.emplace_back(VF.back(), UndefMaskElem); 5325 FirstUsers.push_back(EU.User); 5326 DemandedElts.push_back(APInt::getZero(VF.back())); 5327 VecId = FirstUsers.size() - 1; 5328 } else { 5329 VecId = std::distance(FirstUsers.begin(), It); 5330 } 5331 int Idx = *InsertIdx; 5332 ShuffleMask[VecId][Idx] = EU.Lane; 5333 DemandedElts[VecId].setBit(Idx); 5334 } 5335 } 5336 5337 // If we plan to rewrite the tree in a smaller type, we will need to sign 5338 // extend the extracted value back to the original type. Here, we account 5339 // for the extract and the added cost of the sign extend if needed. 5340 auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth); 5341 auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; 5342 if (MinBWs.count(ScalarRoot)) { 5343 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 5344 auto Extend = 5345 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; 5346 VecTy = FixedVectorType::get(MinTy, BundleWidth); 5347 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 5348 VecTy, EU.Lane); 5349 } else { 5350 ExtractCost += 5351 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); 5352 } 5353 } 5354 5355 InstructionCost SpillCost = getSpillCost(); 5356 Cost += SpillCost + ExtractCost; 5357 for (int I = 0, E = FirstUsers.size(); I < E; ++I) { 5358 // For the very first element - simple shuffle of the source vector. 5359 int Limit = ShuffleMask[I].size() * 2; 5360 if (I == 0 && 5361 all_of(ShuffleMask[I], [Limit](int Idx) { return Idx < Limit; }) && 5362 !ShuffleVectorInst::isIdentityMask(ShuffleMask[I])) { 5363 InstructionCost C = TTI->getShuffleCost( 5364 TTI::SK_PermuteSingleSrc, 5365 cast<FixedVectorType>(FirstUsers[I]->getType()), ShuffleMask[I]); 5366 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 5367 << " for final shuffle of insertelement external users " 5368 << *VectorizableTree.front()->Scalars.front() << ".\n" 5369 << "SLP: Current total cost = " << Cost << "\n"); 5370 Cost += C; 5371 continue; 5372 } 5373 // Other elements - permutation of 2 vectors (the initial one and the next 5374 // Ith incoming vector). 5375 unsigned VF = ShuffleMask[I].size(); 5376 for (unsigned Idx = 0; Idx < VF; ++Idx) { 5377 int &Mask = ShuffleMask[I][Idx]; 5378 Mask = Mask == UndefMaskElem ? Idx : VF + Mask; 5379 } 5380 InstructionCost C = TTI->getShuffleCost( 5381 TTI::SK_PermuteTwoSrc, cast<FixedVectorType>(FirstUsers[I]->getType()), 5382 ShuffleMask[I]); 5383 LLVM_DEBUG( 5384 dbgs() 5385 << "SLP: Adding cost " << C 5386 << " for final shuffle of vector node and external insertelement users " 5387 << *VectorizableTree.front()->Scalars.front() << ".\n" 5388 << "SLP: Current total cost = " << Cost << "\n"); 5389 Cost += C; 5390 InstructionCost InsertCost = TTI->getScalarizationOverhead( 5391 cast<FixedVectorType>(FirstUsers[I]->getType()), DemandedElts[I], 5392 /*Insert*/ true, 5393 /*Extract*/ false); 5394 Cost -= InsertCost; 5395 LLVM_DEBUG(dbgs() << "SLP: subtracting the cost " << InsertCost 5396 << " for insertelements gather.\n" 5397 << "SLP: Current total cost = " << Cost << "\n"); 5398 } 5399 5400 #ifndef NDEBUG 5401 SmallString<256> Str; 5402 { 5403 raw_svector_ostream OS(Str); 5404 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 5405 << "SLP: Extract Cost = " << ExtractCost << ".\n" 5406 << "SLP: Total Cost = " << Cost << ".\n"; 5407 } 5408 LLVM_DEBUG(dbgs() << Str); 5409 if (ViewSLPTree) 5410 ViewGraph(this, "SLP" + F->getName(), false, Str); 5411 #endif 5412 5413 return Cost; 5414 } 5415 5416 Optional<TargetTransformInfo::ShuffleKind> 5417 BoUpSLP::isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask, 5418 SmallVectorImpl<const TreeEntry *> &Entries) { 5419 // TODO: currently checking only for Scalars in the tree entry, need to count 5420 // reused elements too for better cost estimation. 5421 Mask.assign(TE->Scalars.size(), UndefMaskElem); 5422 Entries.clear(); 5423 // Build a lists of values to tree entries. 5424 DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>> ValueToTEs; 5425 for (const std::unique_ptr<TreeEntry> &EntryPtr : VectorizableTree) { 5426 if (EntryPtr.get() == TE) 5427 break; 5428 if (EntryPtr->State != TreeEntry::NeedToGather) 5429 continue; 5430 for (Value *V : EntryPtr->Scalars) 5431 ValueToTEs.try_emplace(V).first->getSecond().insert(EntryPtr.get()); 5432 } 5433 // Find all tree entries used by the gathered values. If no common entries 5434 // found - not a shuffle. 5435 // Here we build a set of tree nodes for each gathered value and trying to 5436 // find the intersection between these sets. If we have at least one common 5437 // tree node for each gathered value - we have just a permutation of the 5438 // single vector. If we have 2 different sets, we're in situation where we 5439 // have a permutation of 2 input vectors. 5440 SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs; 5441 DenseMap<Value *, int> UsedValuesEntry; 5442 for (Value *V : TE->Scalars) { 5443 if (isa<UndefValue>(V)) 5444 continue; 5445 // Build a list of tree entries where V is used. 5446 SmallPtrSet<const TreeEntry *, 4> VToTEs; 5447 auto It = ValueToTEs.find(V); 5448 if (It != ValueToTEs.end()) 5449 VToTEs = It->second; 5450 if (const TreeEntry *VTE = getTreeEntry(V)) 5451 VToTEs.insert(VTE); 5452 if (VToTEs.empty()) 5453 return None; 5454 if (UsedTEs.empty()) { 5455 // The first iteration, just insert the list of nodes to vector. 5456 UsedTEs.push_back(VToTEs); 5457 } else { 5458 // Need to check if there are any previously used tree nodes which use V. 5459 // If there are no such nodes, consider that we have another one input 5460 // vector. 5461 SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs); 5462 unsigned Idx = 0; 5463 for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) { 5464 // Do we have a non-empty intersection of previously listed tree entries 5465 // and tree entries using current V? 5466 set_intersect(VToTEs, Set); 5467 if (!VToTEs.empty()) { 5468 // Yes, write the new subset and continue analysis for the next 5469 // scalar. 5470 Set.swap(VToTEs); 5471 break; 5472 } 5473 VToTEs = SavedVToTEs; 5474 ++Idx; 5475 } 5476 // No non-empty intersection found - need to add a second set of possible 5477 // source vectors. 5478 if (Idx == UsedTEs.size()) { 5479 // If the number of input vectors is greater than 2 - not a permutation, 5480 // fallback to the regular gather. 5481 if (UsedTEs.size() == 2) 5482 return None; 5483 UsedTEs.push_back(SavedVToTEs); 5484 Idx = UsedTEs.size() - 1; 5485 } 5486 UsedValuesEntry.try_emplace(V, Idx); 5487 } 5488 } 5489 5490 unsigned VF = 0; 5491 if (UsedTEs.size() == 1) { 5492 // Try to find the perfect match in another gather node at first. 5493 auto It = find_if(UsedTEs.front(), [TE](const TreeEntry *EntryPtr) { 5494 return EntryPtr->isSame(TE->Scalars); 5495 }); 5496 if (It != UsedTEs.front().end()) { 5497 Entries.push_back(*It); 5498 std::iota(Mask.begin(), Mask.end(), 0); 5499 return TargetTransformInfo::SK_PermuteSingleSrc; 5500 } 5501 // No perfect match, just shuffle, so choose the first tree node. 5502 Entries.push_back(*UsedTEs.front().begin()); 5503 } else { 5504 // Try to find nodes with the same vector factor. 5505 assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries."); 5506 // FIXME: Shall be replaced by GetVF function once non-power-2 patch is 5507 // landed. 5508 auto &&GetVF = [](const TreeEntry *TE) { 5509 if (!TE->ReuseShuffleIndices.empty()) 5510 return TE->ReuseShuffleIndices.size(); 5511 return TE->Scalars.size(); 5512 }; 5513 DenseMap<int, const TreeEntry *> VFToTE; 5514 for (const TreeEntry *TE : UsedTEs.front()) 5515 VFToTE.try_emplace(GetVF(TE), TE); 5516 for (const TreeEntry *TE : UsedTEs.back()) { 5517 auto It = VFToTE.find(GetVF(TE)); 5518 if (It != VFToTE.end()) { 5519 VF = It->first; 5520 Entries.push_back(It->second); 5521 Entries.push_back(TE); 5522 break; 5523 } 5524 } 5525 // No 2 source vectors with the same vector factor - give up and do regular 5526 // gather. 5527 if (Entries.empty()) 5528 return None; 5529 } 5530 5531 // Build a shuffle mask for better cost estimation and vector emission. 5532 for (int I = 0, E = TE->Scalars.size(); I < E; ++I) { 5533 Value *V = TE->Scalars[I]; 5534 if (isa<UndefValue>(V)) 5535 continue; 5536 unsigned Idx = UsedValuesEntry.lookup(V); 5537 const TreeEntry *VTE = Entries[Idx]; 5538 int FoundLane = VTE->findLaneForValue(V); 5539 Mask[I] = Idx * VF + FoundLane; 5540 // Extra check required by isSingleSourceMaskImpl function (called by 5541 // ShuffleVectorInst::isSingleSourceMask). 5542 if (Mask[I] >= 2 * E) 5543 return None; 5544 } 5545 switch (Entries.size()) { 5546 case 1: 5547 return TargetTransformInfo::SK_PermuteSingleSrc; 5548 case 2: 5549 return TargetTransformInfo::SK_PermuteTwoSrc; 5550 default: 5551 break; 5552 } 5553 return None; 5554 } 5555 5556 InstructionCost 5557 BoUpSLP::getGatherCost(FixedVectorType *Ty, 5558 const DenseSet<unsigned> &ShuffledIndices) const { 5559 unsigned NumElts = Ty->getNumElements(); 5560 APInt DemandedElts = APInt::getZero(NumElts); 5561 for (unsigned I = 0; I < NumElts; ++I) 5562 if (!ShuffledIndices.count(I)) 5563 DemandedElts.setBit(I); 5564 InstructionCost Cost = 5565 TTI->getScalarizationOverhead(Ty, DemandedElts, /*Insert*/ true, 5566 /*Extract*/ false); 5567 if (!ShuffledIndices.empty()) 5568 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty); 5569 return Cost; 5570 } 5571 5572 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const { 5573 // Find the type of the operands in VL. 5574 Type *ScalarTy = VL[0]->getType(); 5575 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 5576 ScalarTy = SI->getValueOperand()->getType(); 5577 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 5578 // Find the cost of inserting/extracting values from the vector. 5579 // Check if the same elements are inserted several times and count them as 5580 // shuffle candidates. 5581 DenseSet<unsigned> ShuffledElements; 5582 DenseSet<Value *> UniqueElements; 5583 // Iterate in reverse order to consider insert elements with the high cost. 5584 for (unsigned I = VL.size(); I > 0; --I) { 5585 unsigned Idx = I - 1; 5586 if (isConstant(VL[Idx])) 5587 continue; 5588 if (!UniqueElements.insert(VL[Idx]).second) 5589 ShuffledElements.insert(Idx); 5590 } 5591 return getGatherCost(VecTy, ShuffledElements); 5592 } 5593 5594 // Perform operand reordering on the instructions in VL and return the reordered 5595 // operands in Left and Right. 5596 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 5597 SmallVectorImpl<Value *> &Left, 5598 SmallVectorImpl<Value *> &Right, 5599 const DataLayout &DL, 5600 ScalarEvolution &SE, 5601 const BoUpSLP &R) { 5602 if (VL.empty()) 5603 return; 5604 VLOperands Ops(VL, DL, SE, R); 5605 // Reorder the operands in place. 5606 Ops.reorder(); 5607 Left = Ops.getVL(0); 5608 Right = Ops.getVL(1); 5609 } 5610 5611 void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) { 5612 // Get the basic block this bundle is in. All instructions in the bundle 5613 // should be in this block. 5614 auto *Front = E->getMainOp(); 5615 auto *BB = Front->getParent(); 5616 assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool { 5617 auto *I = cast<Instruction>(V); 5618 return !E->isOpcodeOrAlt(I) || I->getParent() == BB; 5619 })); 5620 5621 // The last instruction in the bundle in program order. 5622 Instruction *LastInst = nullptr; 5623 5624 // Find the last instruction. The common case should be that BB has been 5625 // scheduled, and the last instruction is VL.back(). So we start with 5626 // VL.back() and iterate over schedule data until we reach the end of the 5627 // bundle. The end of the bundle is marked by null ScheduleData. 5628 if (BlocksSchedules.count(BB)) { 5629 auto *Bundle = 5630 BlocksSchedules[BB]->getScheduleData(E->isOneOf(E->Scalars.back())); 5631 if (Bundle && Bundle->isPartOfBundle()) 5632 for (; Bundle; Bundle = Bundle->NextInBundle) 5633 if (Bundle->OpValue == Bundle->Inst) 5634 LastInst = Bundle->Inst; 5635 } 5636 5637 // LastInst can still be null at this point if there's either not an entry 5638 // for BB in BlocksSchedules or there's no ScheduleData available for 5639 // VL.back(). This can be the case if buildTree_rec aborts for various 5640 // reasons (e.g., the maximum recursion depth is reached, the maximum region 5641 // size is reached, etc.). ScheduleData is initialized in the scheduling 5642 // "dry-run". 5643 // 5644 // If this happens, we can still find the last instruction by brute force. We 5645 // iterate forwards from Front (inclusive) until we either see all 5646 // instructions in the bundle or reach the end of the block. If Front is the 5647 // last instruction in program order, LastInst will be set to Front, and we 5648 // will visit all the remaining instructions in the block. 5649 // 5650 // One of the reasons we exit early from buildTree_rec is to place an upper 5651 // bound on compile-time. Thus, taking an additional compile-time hit here is 5652 // not ideal. However, this should be exceedingly rare since it requires that 5653 // we both exit early from buildTree_rec and that the bundle be out-of-order 5654 // (causing us to iterate all the way to the end of the block). 5655 if (!LastInst) { 5656 SmallPtrSet<Value *, 16> Bundle(E->Scalars.begin(), E->Scalars.end()); 5657 for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) { 5658 if (Bundle.erase(&I) && E->isOpcodeOrAlt(&I)) 5659 LastInst = &I; 5660 if (Bundle.empty()) 5661 break; 5662 } 5663 } 5664 assert(LastInst && "Failed to find last instruction in bundle"); 5665 5666 // Set the insertion point after the last instruction in the bundle. Set the 5667 // debug location to Front. 5668 Builder.SetInsertPoint(BB, ++LastInst->getIterator()); 5669 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 5670 } 5671 5672 Value *BoUpSLP::gather(ArrayRef<Value *> VL) { 5673 // List of instructions/lanes from current block and/or the blocks which are 5674 // part of the current loop. These instructions will be inserted at the end to 5675 // make it possible to optimize loops and hoist invariant instructions out of 5676 // the loops body with better chances for success. 5677 SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts; 5678 SmallSet<int, 4> PostponedIndices; 5679 Loop *L = LI->getLoopFor(Builder.GetInsertBlock()); 5680 auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) { 5681 SmallPtrSet<BasicBlock *, 4> Visited; 5682 while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second) 5683 InsertBB = InsertBB->getSinglePredecessor(); 5684 return InsertBB && InsertBB == InstBB; 5685 }; 5686 for (int I = 0, E = VL.size(); I < E; ++I) { 5687 if (auto *Inst = dyn_cast<Instruction>(VL[I])) 5688 if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) || 5689 getTreeEntry(Inst) || (L && (L->contains(Inst)))) && 5690 PostponedIndices.insert(I).second) 5691 PostponedInsts.emplace_back(Inst, I); 5692 } 5693 5694 auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos) { 5695 Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(Pos)); 5696 auto *InsElt = dyn_cast<InsertElementInst>(Vec); 5697 if (!InsElt) 5698 return Vec; 5699 GatherSeq.insert(InsElt); 5700 CSEBlocks.insert(InsElt->getParent()); 5701 // Add to our 'need-to-extract' list. 5702 if (TreeEntry *Entry = getTreeEntry(V)) { 5703 // Find which lane we need to extract. 5704 unsigned FoundLane = Entry->findLaneForValue(V); 5705 ExternalUses.emplace_back(V, InsElt, FoundLane); 5706 } 5707 return Vec; 5708 }; 5709 Value *Val0 = 5710 isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0]; 5711 FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size()); 5712 Value *Vec = PoisonValue::get(VecTy); 5713 SmallVector<int> NonConsts; 5714 // Insert constant values at first. 5715 for (int I = 0, E = VL.size(); I < E; ++I) { 5716 if (PostponedIndices.contains(I)) 5717 continue; 5718 if (!isConstant(VL[I])) { 5719 NonConsts.push_back(I); 5720 continue; 5721 } 5722 Vec = CreateInsertElement(Vec, VL[I], I); 5723 } 5724 // Insert non-constant values. 5725 for (int I : NonConsts) 5726 Vec = CreateInsertElement(Vec, VL[I], I); 5727 // Append instructions, which are/may be part of the loop, in the end to make 5728 // it possible to hoist non-loop-based instructions. 5729 for (const std::pair<Value *, unsigned> &Pair : PostponedInsts) 5730 Vec = CreateInsertElement(Vec, Pair.first, Pair.second); 5731 5732 return Vec; 5733 } 5734 5735 namespace { 5736 /// Merges shuffle masks and emits final shuffle instruction, if required. 5737 class ShuffleInstructionBuilder { 5738 IRBuilderBase &Builder; 5739 const unsigned VF = 0; 5740 bool IsFinalized = false; 5741 SmallVector<int, 4> Mask; 5742 5743 public: 5744 ShuffleInstructionBuilder(IRBuilderBase &Builder, unsigned VF) 5745 : Builder(Builder), VF(VF) {} 5746 5747 /// Adds a mask, inverting it before applying. 5748 void addInversedMask(ArrayRef<unsigned> SubMask) { 5749 if (SubMask.empty()) 5750 return; 5751 SmallVector<int, 4> NewMask; 5752 inversePermutation(SubMask, NewMask); 5753 addMask(NewMask); 5754 } 5755 5756 /// Functions adds masks, merging them into single one. 5757 void addMask(ArrayRef<unsigned> SubMask) { 5758 SmallVector<int, 4> NewMask(SubMask.begin(), SubMask.end()); 5759 addMask(NewMask); 5760 } 5761 5762 void addMask(ArrayRef<int> SubMask) { ::addMask(Mask, SubMask); } 5763 5764 Value *finalize(Value *V) { 5765 IsFinalized = true; 5766 unsigned ValueVF = cast<FixedVectorType>(V->getType())->getNumElements(); 5767 if (VF == ValueVF && Mask.empty()) 5768 return V; 5769 SmallVector<int, 4> NormalizedMask(VF, UndefMaskElem); 5770 std::iota(NormalizedMask.begin(), NormalizedMask.end(), 0); 5771 addMask(NormalizedMask); 5772 5773 if (VF == ValueVF && ShuffleVectorInst::isIdentityMask(Mask)) 5774 return V; 5775 return Builder.CreateShuffleVector(V, Mask, "shuffle"); 5776 } 5777 5778 ~ShuffleInstructionBuilder() { 5779 assert((IsFinalized || Mask.empty()) && 5780 "Shuffle construction must be finalized."); 5781 } 5782 }; 5783 } // namespace 5784 5785 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 5786 unsigned VF = VL.size(); 5787 InstructionsState S = getSameOpcode(VL); 5788 if (S.getOpcode()) { 5789 if (TreeEntry *E = getTreeEntry(S.OpValue)) 5790 if (E->isSame(VL)) { 5791 Value *V = vectorizeTree(E); 5792 if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) { 5793 if (!E->ReuseShuffleIndices.empty()) { 5794 // Reshuffle to get only unique values. 5795 // If some of the scalars are duplicated in the vectorization tree 5796 // entry, we do not vectorize them but instead generate a mask for 5797 // the reuses. But if there are several users of the same entry, 5798 // they may have different vectorization factors. This is especially 5799 // important for PHI nodes. In this case, we need to adapt the 5800 // resulting instruction for the user vectorization factor and have 5801 // to reshuffle it again to take only unique elements of the vector. 5802 // Without this code the function incorrectly returns reduced vector 5803 // instruction with the same elements, not with the unique ones. 5804 5805 // block: 5806 // %phi = phi <2 x > { .., %entry} {%shuffle, %block} 5807 // %2 = shuffle <2 x > %phi, poison, <4 x > <1, 1, 0, 0> 5808 // ... (use %2) 5809 // %shuffle = shuffle <2 x> %2, poison, <2 x> {2, 0} 5810 // br %block 5811 SmallVector<int> UniqueIdxs(VF, UndefMaskElem); 5812 SmallSet<int, 4> UsedIdxs; 5813 int Pos = 0; 5814 int Sz = VL.size(); 5815 for (int Idx : E->ReuseShuffleIndices) { 5816 if (Idx != Sz && Idx != UndefMaskElem && 5817 UsedIdxs.insert(Idx).second) 5818 UniqueIdxs[Idx] = Pos; 5819 ++Pos; 5820 } 5821 assert(VF >= UsedIdxs.size() && "Expected vectorization factor " 5822 "less than original vector size."); 5823 UniqueIdxs.append(VF - UsedIdxs.size(), UndefMaskElem); 5824 V = Builder.CreateShuffleVector(V, UniqueIdxs, "shrink.shuffle"); 5825 } else { 5826 assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() && 5827 "Expected vectorization factor less " 5828 "than original vector size."); 5829 SmallVector<int> UniformMask(VF, 0); 5830 std::iota(UniformMask.begin(), UniformMask.end(), 0); 5831 V = Builder.CreateShuffleVector(V, UniformMask, "shrink.shuffle"); 5832 } 5833 } 5834 return V; 5835 } 5836 } 5837 5838 // Check that every instruction appears once in this bundle. 5839 SmallVector<int> ReuseShuffleIndicies; 5840 SmallVector<Value *> UniqueValues; 5841 if (VL.size() > 2) { 5842 DenseMap<Value *, unsigned> UniquePositions; 5843 unsigned NumValues = 5844 std::distance(VL.begin(), find_if(reverse(VL), [](Value *V) { 5845 return !isa<UndefValue>(V); 5846 }).base()); 5847 VF = std::max<unsigned>(VF, PowerOf2Ceil(NumValues)); 5848 int UniqueVals = 0; 5849 for (Value *V : VL.drop_back(VL.size() - VF)) { 5850 if (isa<UndefValue>(V)) { 5851 ReuseShuffleIndicies.emplace_back(UndefMaskElem); 5852 continue; 5853 } 5854 if (isConstant(V)) { 5855 ReuseShuffleIndicies.emplace_back(UniqueValues.size()); 5856 UniqueValues.emplace_back(V); 5857 continue; 5858 } 5859 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 5860 ReuseShuffleIndicies.emplace_back(Res.first->second); 5861 if (Res.second) { 5862 UniqueValues.emplace_back(V); 5863 ++UniqueVals; 5864 } 5865 } 5866 if (UniqueVals == 1 && UniqueValues.size() == 1) { 5867 // Emit pure splat vector. 5868 ReuseShuffleIndicies.append(VF - ReuseShuffleIndicies.size(), 5869 UndefMaskElem); 5870 } else if (UniqueValues.size() >= VF - 1 || UniqueValues.size() <= 1) { 5871 ReuseShuffleIndicies.clear(); 5872 UniqueValues.clear(); 5873 UniqueValues.append(VL.begin(), std::next(VL.begin(), NumValues)); 5874 } 5875 UniqueValues.append(VF - UniqueValues.size(), 5876 PoisonValue::get(VL[0]->getType())); 5877 VL = UniqueValues; 5878 } 5879 5880 ShuffleInstructionBuilder ShuffleBuilder(Builder, VF); 5881 Value *Vec = gather(VL); 5882 if (!ReuseShuffleIndicies.empty()) { 5883 ShuffleBuilder.addMask(ReuseShuffleIndicies); 5884 Vec = ShuffleBuilder.finalize(Vec); 5885 if (auto *I = dyn_cast<Instruction>(Vec)) { 5886 GatherSeq.insert(I); 5887 CSEBlocks.insert(I->getParent()); 5888 } 5889 } 5890 return Vec; 5891 } 5892 5893 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 5894 IRBuilder<>::InsertPointGuard Guard(Builder); 5895 5896 if (E->VectorizedValue) { 5897 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 5898 return E->VectorizedValue; 5899 } 5900 5901 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 5902 unsigned VF = E->Scalars.size(); 5903 if (NeedToShuffleReuses) 5904 VF = E->ReuseShuffleIndices.size(); 5905 ShuffleInstructionBuilder ShuffleBuilder(Builder, VF); 5906 if (E->State == TreeEntry::NeedToGather) { 5907 if (E->getMainOp()) 5908 setInsertPointAfterBundle(E); 5909 Value *Vec; 5910 SmallVector<int> Mask; 5911 SmallVector<const TreeEntry *> Entries; 5912 Optional<TargetTransformInfo::ShuffleKind> Shuffle = 5913 isGatherShuffledEntry(E, Mask, Entries); 5914 if (Shuffle.hasValue()) { 5915 assert((Entries.size() == 1 || Entries.size() == 2) && 5916 "Expected shuffle of 1 or 2 entries."); 5917 Vec = Builder.CreateShuffleVector(Entries.front()->VectorizedValue, 5918 Entries.back()->VectorizedValue, Mask); 5919 } else { 5920 Vec = gather(E->Scalars); 5921 } 5922 if (NeedToShuffleReuses) { 5923 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5924 Vec = ShuffleBuilder.finalize(Vec); 5925 if (auto *I = dyn_cast<Instruction>(Vec)) { 5926 GatherSeq.insert(I); 5927 CSEBlocks.insert(I->getParent()); 5928 } 5929 } 5930 E->VectorizedValue = Vec; 5931 return Vec; 5932 } 5933 5934 assert((E->State == TreeEntry::Vectorize || 5935 E->State == TreeEntry::ScatterVectorize) && 5936 "Unhandled state"); 5937 unsigned ShuffleOrOp = 5938 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 5939 Instruction *VL0 = E->getMainOp(); 5940 Type *ScalarTy = VL0->getType(); 5941 if (auto *Store = dyn_cast<StoreInst>(VL0)) 5942 ScalarTy = Store->getValueOperand()->getType(); 5943 else if (auto *IE = dyn_cast<InsertElementInst>(VL0)) 5944 ScalarTy = IE->getOperand(1)->getType(); 5945 auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size()); 5946 switch (ShuffleOrOp) { 5947 case Instruction::PHI: { 5948 assert( 5949 (E->ReorderIndices.empty() || E != VectorizableTree.front().get()) && 5950 "PHI reordering is free."); 5951 auto *PH = cast<PHINode>(VL0); 5952 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 5953 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 5954 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 5955 Value *V = NewPhi; 5956 ShuffleBuilder.addInversedMask(E->ReorderIndices); 5957 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5958 V = ShuffleBuilder.finalize(V); 5959 5960 E->VectorizedValue = V; 5961 5962 // PHINodes may have multiple entries from the same block. We want to 5963 // visit every block once. 5964 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 5965 5966 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 5967 ValueList Operands; 5968 BasicBlock *IBB = PH->getIncomingBlock(i); 5969 5970 if (!VisitedBBs.insert(IBB).second) { 5971 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 5972 continue; 5973 } 5974 5975 Builder.SetInsertPoint(IBB->getTerminator()); 5976 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 5977 Value *Vec = vectorizeTree(E->getOperand(i)); 5978 NewPhi->addIncoming(Vec, IBB); 5979 } 5980 5981 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 5982 "Invalid number of incoming values"); 5983 return V; 5984 } 5985 5986 case Instruction::ExtractElement: { 5987 Value *V = E->getSingleOperand(0); 5988 Builder.SetInsertPoint(VL0); 5989 ShuffleBuilder.addInversedMask(E->ReorderIndices); 5990 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5991 V = ShuffleBuilder.finalize(V); 5992 E->VectorizedValue = V; 5993 return V; 5994 } 5995 case Instruction::ExtractValue: { 5996 auto *LI = cast<LoadInst>(E->getSingleOperand(0)); 5997 Builder.SetInsertPoint(LI); 5998 auto *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); 5999 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); 6000 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign()); 6001 Value *NewV = propagateMetadata(V, E->Scalars); 6002 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6003 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6004 NewV = ShuffleBuilder.finalize(NewV); 6005 E->VectorizedValue = NewV; 6006 return NewV; 6007 } 6008 case Instruction::InsertElement: { 6009 assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique"); 6010 Builder.SetInsertPoint(cast<Instruction>(E->Scalars.back())); 6011 Value *V = vectorizeTree(E->getOperand(1)); 6012 6013 // Create InsertVector shuffle if necessary 6014 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 6015 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0)); 6016 })); 6017 const unsigned NumElts = 6018 cast<FixedVectorType>(FirstInsert->getType())->getNumElements(); 6019 const unsigned NumScalars = E->Scalars.size(); 6020 6021 unsigned Offset = *getInsertIndex(VL0, 0); 6022 assert(Offset < NumElts && "Failed to find vector index offset"); 6023 6024 // Create shuffle to resize vector 6025 SmallVector<int> Mask; 6026 if (!E->ReorderIndices.empty()) { 6027 inversePermutation(E->ReorderIndices, Mask); 6028 Mask.append(NumElts - NumScalars, UndefMaskElem); 6029 } else { 6030 Mask.assign(NumElts, UndefMaskElem); 6031 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); 6032 } 6033 // Create InsertVector shuffle if necessary 6034 bool IsIdentity = true; 6035 SmallVector<int> PrevMask(NumElts, UndefMaskElem); 6036 Mask.swap(PrevMask); 6037 for (unsigned I = 0; I < NumScalars; ++I) { 6038 Value *Scalar = E->Scalars[PrevMask[I]]; 6039 Optional<int> InsertIdx = getInsertIndex(Scalar, 0); 6040 if (!InsertIdx || *InsertIdx == UndefMaskElem) 6041 continue; 6042 IsIdentity &= *InsertIdx - Offset == I; 6043 Mask[*InsertIdx - Offset] = I; 6044 } 6045 if (!IsIdentity || NumElts != NumScalars) 6046 V = Builder.CreateShuffleVector(V, Mask); 6047 6048 if ((!IsIdentity || Offset != 0 || 6049 !isa<UndefValue>(FirstInsert->getOperand(0))) && 6050 NumElts != NumScalars) { 6051 SmallVector<int> InsertMask(NumElts); 6052 std::iota(InsertMask.begin(), InsertMask.end(), 0); 6053 for (unsigned I = 0; I < NumElts; I++) { 6054 if (Mask[I] != UndefMaskElem) 6055 InsertMask[Offset + I] = NumElts + I; 6056 } 6057 6058 V = Builder.CreateShuffleVector( 6059 FirstInsert->getOperand(0), V, InsertMask, 6060 cast<Instruction>(E->Scalars.back())->getName()); 6061 } 6062 6063 ++NumVectorInstructions; 6064 E->VectorizedValue = V; 6065 return V; 6066 } 6067 case Instruction::ZExt: 6068 case Instruction::SExt: 6069 case Instruction::FPToUI: 6070 case Instruction::FPToSI: 6071 case Instruction::FPExt: 6072 case Instruction::PtrToInt: 6073 case Instruction::IntToPtr: 6074 case Instruction::SIToFP: 6075 case Instruction::UIToFP: 6076 case Instruction::Trunc: 6077 case Instruction::FPTrunc: 6078 case Instruction::BitCast: { 6079 setInsertPointAfterBundle(E); 6080 6081 Value *InVec = vectorizeTree(E->getOperand(0)); 6082 6083 if (E->VectorizedValue) { 6084 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6085 return E->VectorizedValue; 6086 } 6087 6088 auto *CI = cast<CastInst>(VL0); 6089 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 6090 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6091 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6092 V = ShuffleBuilder.finalize(V); 6093 6094 E->VectorizedValue = V; 6095 ++NumVectorInstructions; 6096 return V; 6097 } 6098 case Instruction::FCmp: 6099 case Instruction::ICmp: { 6100 setInsertPointAfterBundle(E); 6101 6102 Value *L = vectorizeTree(E->getOperand(0)); 6103 Value *R = vectorizeTree(E->getOperand(1)); 6104 6105 if (E->VectorizedValue) { 6106 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6107 return E->VectorizedValue; 6108 } 6109 6110 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 6111 Value *V = Builder.CreateCmp(P0, L, R); 6112 propagateIRFlags(V, E->Scalars, VL0); 6113 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6114 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6115 V = ShuffleBuilder.finalize(V); 6116 6117 E->VectorizedValue = V; 6118 ++NumVectorInstructions; 6119 return V; 6120 } 6121 case Instruction::Select: { 6122 setInsertPointAfterBundle(E); 6123 6124 Value *Cond = vectorizeTree(E->getOperand(0)); 6125 Value *True = vectorizeTree(E->getOperand(1)); 6126 Value *False = vectorizeTree(E->getOperand(2)); 6127 6128 if (E->VectorizedValue) { 6129 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6130 return E->VectorizedValue; 6131 } 6132 6133 Value *V = Builder.CreateSelect(Cond, True, False); 6134 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6135 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6136 V = ShuffleBuilder.finalize(V); 6137 6138 E->VectorizedValue = V; 6139 ++NumVectorInstructions; 6140 return V; 6141 } 6142 case Instruction::FNeg: { 6143 setInsertPointAfterBundle(E); 6144 6145 Value *Op = vectorizeTree(E->getOperand(0)); 6146 6147 if (E->VectorizedValue) { 6148 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6149 return E->VectorizedValue; 6150 } 6151 6152 Value *V = Builder.CreateUnOp( 6153 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op); 6154 propagateIRFlags(V, E->Scalars, VL0); 6155 if (auto *I = dyn_cast<Instruction>(V)) 6156 V = propagateMetadata(I, E->Scalars); 6157 6158 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6159 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6160 V = ShuffleBuilder.finalize(V); 6161 6162 E->VectorizedValue = V; 6163 ++NumVectorInstructions; 6164 6165 return V; 6166 } 6167 case Instruction::Add: 6168 case Instruction::FAdd: 6169 case Instruction::Sub: 6170 case Instruction::FSub: 6171 case Instruction::Mul: 6172 case Instruction::FMul: 6173 case Instruction::UDiv: 6174 case Instruction::SDiv: 6175 case Instruction::FDiv: 6176 case Instruction::URem: 6177 case Instruction::SRem: 6178 case Instruction::FRem: 6179 case Instruction::Shl: 6180 case Instruction::LShr: 6181 case Instruction::AShr: 6182 case Instruction::And: 6183 case Instruction::Or: 6184 case Instruction::Xor: { 6185 setInsertPointAfterBundle(E); 6186 6187 Value *LHS = vectorizeTree(E->getOperand(0)); 6188 Value *RHS = vectorizeTree(E->getOperand(1)); 6189 6190 if (E->VectorizedValue) { 6191 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6192 return E->VectorizedValue; 6193 } 6194 6195 Value *V = Builder.CreateBinOp( 6196 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, 6197 RHS); 6198 propagateIRFlags(V, E->Scalars, VL0); 6199 if (auto *I = dyn_cast<Instruction>(V)) 6200 V = propagateMetadata(I, E->Scalars); 6201 6202 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6203 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6204 V = ShuffleBuilder.finalize(V); 6205 6206 E->VectorizedValue = V; 6207 ++NumVectorInstructions; 6208 6209 return V; 6210 } 6211 case Instruction::Load: { 6212 // Loads are inserted at the head of the tree because we don't want to 6213 // sink them all the way down past store instructions. 6214 setInsertPointAfterBundle(E); 6215 6216 LoadInst *LI = cast<LoadInst>(VL0); 6217 Instruction *NewLI; 6218 unsigned AS = LI->getPointerAddressSpace(); 6219 Value *PO = LI->getPointerOperand(); 6220 if (E->State == TreeEntry::Vectorize) { 6221 6222 Value *VecPtr = Builder.CreateBitCast(PO, VecTy->getPointerTo(AS)); 6223 6224 // The pointer operand uses an in-tree scalar so we add the new BitCast 6225 // to ExternalUses list to make sure that an extract will be generated 6226 // in the future. 6227 if (TreeEntry *Entry = getTreeEntry(PO)) { 6228 // Find which lane we need to extract. 6229 unsigned FoundLane = Entry->findLaneForValue(PO); 6230 ExternalUses.emplace_back(PO, cast<User>(VecPtr), FoundLane); 6231 } 6232 6233 NewLI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign()); 6234 } else { 6235 assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state"); 6236 Value *VecPtr = vectorizeTree(E->getOperand(0)); 6237 // Use the minimum alignment of the gathered loads. 6238 Align CommonAlignment = LI->getAlign(); 6239 for (Value *V : E->Scalars) 6240 CommonAlignment = 6241 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 6242 NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment); 6243 } 6244 Value *V = propagateMetadata(NewLI, E->Scalars); 6245 6246 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6247 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6248 V = ShuffleBuilder.finalize(V); 6249 E->VectorizedValue = V; 6250 ++NumVectorInstructions; 6251 return V; 6252 } 6253 case Instruction::Store: { 6254 auto *SI = cast<StoreInst>(VL0); 6255 unsigned AS = SI->getPointerAddressSpace(); 6256 6257 setInsertPointAfterBundle(E); 6258 6259 Value *VecValue = vectorizeTree(E->getOperand(0)); 6260 ShuffleBuilder.addMask(E->ReorderIndices); 6261 VecValue = ShuffleBuilder.finalize(VecValue); 6262 6263 Value *ScalarPtr = SI->getPointerOperand(); 6264 Value *VecPtr = Builder.CreateBitCast( 6265 ScalarPtr, VecValue->getType()->getPointerTo(AS)); 6266 StoreInst *ST = Builder.CreateAlignedStore(VecValue, VecPtr, 6267 SI->getAlign()); 6268 6269 // The pointer operand uses an in-tree scalar, so add the new BitCast to 6270 // ExternalUses to make sure that an extract will be generated in the 6271 // future. 6272 if (TreeEntry *Entry = getTreeEntry(ScalarPtr)) { 6273 // Find which lane we need to extract. 6274 unsigned FoundLane = Entry->findLaneForValue(ScalarPtr); 6275 ExternalUses.push_back( 6276 ExternalUser(ScalarPtr, cast<User>(VecPtr), FoundLane)); 6277 } 6278 6279 Value *V = propagateMetadata(ST, E->Scalars); 6280 6281 E->VectorizedValue = V; 6282 ++NumVectorInstructions; 6283 return V; 6284 } 6285 case Instruction::GetElementPtr: { 6286 setInsertPointAfterBundle(E); 6287 6288 Value *Op0 = vectorizeTree(E->getOperand(0)); 6289 6290 std::vector<Value *> OpVecs; 6291 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 6292 ++j) { 6293 ValueList &VL = E->getOperand(j); 6294 // Need to cast all elements to the same type before vectorization to 6295 // avoid crash. 6296 Type *VL0Ty = VL0->getOperand(j)->getType(); 6297 Type *Ty = llvm::all_of( 6298 VL, [VL0Ty](Value *V) { return VL0Ty == V->getType(); }) 6299 ? VL0Ty 6300 : DL->getIndexType(cast<GetElementPtrInst>(VL0) 6301 ->getPointerOperandType() 6302 ->getScalarType()); 6303 for (Value *&V : VL) { 6304 auto *CI = cast<ConstantInt>(V); 6305 V = ConstantExpr::getIntegerCast(CI, Ty, 6306 CI->getValue().isSignBitSet()); 6307 } 6308 Value *OpVec = vectorizeTree(VL); 6309 OpVecs.push_back(OpVec); 6310 } 6311 6312 Value *V = Builder.CreateGEP( 6313 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs); 6314 if (Instruction *I = dyn_cast<Instruction>(V)) 6315 V = propagateMetadata(I, E->Scalars); 6316 6317 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6318 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6319 V = ShuffleBuilder.finalize(V); 6320 6321 E->VectorizedValue = V; 6322 ++NumVectorInstructions; 6323 6324 return V; 6325 } 6326 case Instruction::Call: { 6327 CallInst *CI = cast<CallInst>(VL0); 6328 setInsertPointAfterBundle(E); 6329 6330 Intrinsic::ID IID = Intrinsic::not_intrinsic; 6331 if (Function *FI = CI->getCalledFunction()) 6332 IID = FI->getIntrinsicID(); 6333 6334 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6335 6336 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 6337 bool UseIntrinsic = ID != Intrinsic::not_intrinsic && 6338 VecCallCosts.first <= VecCallCosts.second; 6339 6340 Value *ScalarArg = nullptr; 6341 std::vector<Value *> OpVecs; 6342 SmallVector<Type *, 2> TysForDecl = 6343 {FixedVectorType::get(CI->getType(), E->Scalars.size())}; 6344 for (int j = 0, e = CI->arg_size(); j < e; ++j) { 6345 ValueList OpVL; 6346 // Some intrinsics have scalar arguments. This argument should not be 6347 // vectorized. 6348 if (UseIntrinsic && hasVectorInstrinsicScalarOpd(IID, j)) { 6349 CallInst *CEI = cast<CallInst>(VL0); 6350 ScalarArg = CEI->getArgOperand(j); 6351 OpVecs.push_back(CEI->getArgOperand(j)); 6352 if (hasVectorInstrinsicOverloadedScalarOpd(IID, j)) 6353 TysForDecl.push_back(ScalarArg->getType()); 6354 continue; 6355 } 6356 6357 Value *OpVec = vectorizeTree(E->getOperand(j)); 6358 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 6359 OpVecs.push_back(OpVec); 6360 } 6361 6362 Function *CF; 6363 if (!UseIntrinsic) { 6364 VFShape Shape = 6365 VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>( 6366 VecTy->getNumElements())), 6367 false /*HasGlobalPred*/); 6368 CF = VFDatabase(*CI).getVectorizedFunction(Shape); 6369 } else { 6370 CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl); 6371 } 6372 6373 SmallVector<OperandBundleDef, 1> OpBundles; 6374 CI->getOperandBundlesAsDefs(OpBundles); 6375 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 6376 6377 // The scalar argument uses an in-tree scalar so we add the new vectorized 6378 // call to ExternalUses list to make sure that an extract will be 6379 // generated in the future. 6380 if (ScalarArg) { 6381 if (TreeEntry *Entry = getTreeEntry(ScalarArg)) { 6382 // Find which lane we need to extract. 6383 unsigned FoundLane = Entry->findLaneForValue(ScalarArg); 6384 ExternalUses.push_back( 6385 ExternalUser(ScalarArg, cast<User>(V), FoundLane)); 6386 } 6387 } 6388 6389 propagateIRFlags(V, E->Scalars, VL0); 6390 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6391 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6392 V = ShuffleBuilder.finalize(V); 6393 6394 E->VectorizedValue = V; 6395 ++NumVectorInstructions; 6396 return V; 6397 } 6398 case Instruction::ShuffleVector: { 6399 assert(E->isAltShuffle() && 6400 ((Instruction::isBinaryOp(E->getOpcode()) && 6401 Instruction::isBinaryOp(E->getAltOpcode())) || 6402 (Instruction::isCast(E->getOpcode()) && 6403 Instruction::isCast(E->getAltOpcode()))) && 6404 "Invalid Shuffle Vector Operand"); 6405 6406 Value *LHS = nullptr, *RHS = nullptr; 6407 if (Instruction::isBinaryOp(E->getOpcode())) { 6408 setInsertPointAfterBundle(E); 6409 LHS = vectorizeTree(E->getOperand(0)); 6410 RHS = vectorizeTree(E->getOperand(1)); 6411 } else { 6412 setInsertPointAfterBundle(E); 6413 LHS = vectorizeTree(E->getOperand(0)); 6414 } 6415 6416 if (E->VectorizedValue) { 6417 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6418 return E->VectorizedValue; 6419 } 6420 6421 Value *V0, *V1; 6422 if (Instruction::isBinaryOp(E->getOpcode())) { 6423 V0 = Builder.CreateBinOp( 6424 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS); 6425 V1 = Builder.CreateBinOp( 6426 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS); 6427 } else { 6428 V0 = Builder.CreateCast( 6429 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy); 6430 V1 = Builder.CreateCast( 6431 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy); 6432 } 6433 6434 // Create shuffle to take alternate operations from the vector. 6435 // Also, gather up main and alt scalar ops to propagate IR flags to 6436 // each vector operation. 6437 ValueList OpScalars, AltScalars; 6438 SmallVector<int> Mask; 6439 buildSuffleEntryMask( 6440 E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices, 6441 [E](Instruction *I) { 6442 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 6443 return I->getOpcode() == E->getAltOpcode(); 6444 }, 6445 Mask, &OpScalars, &AltScalars); 6446 6447 propagateIRFlags(V0, OpScalars); 6448 propagateIRFlags(V1, AltScalars); 6449 6450 Value *V = Builder.CreateShuffleVector(V0, V1, Mask); 6451 if (Instruction *I = dyn_cast<Instruction>(V)) 6452 V = propagateMetadata(I, E->Scalars); 6453 V = ShuffleBuilder.finalize(V); 6454 6455 E->VectorizedValue = V; 6456 ++NumVectorInstructions; 6457 6458 return V; 6459 } 6460 default: 6461 llvm_unreachable("unknown inst"); 6462 } 6463 return nullptr; 6464 } 6465 6466 Value *BoUpSLP::vectorizeTree() { 6467 ExtraValueToDebugLocsMap ExternallyUsedValues; 6468 return vectorizeTree(ExternallyUsedValues); 6469 } 6470 6471 Value * 6472 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { 6473 // All blocks must be scheduled before any instructions are inserted. 6474 for (auto &BSIter : BlocksSchedules) { 6475 scheduleBlock(BSIter.second.get()); 6476 } 6477 6478 Builder.SetInsertPoint(&F->getEntryBlock().front()); 6479 auto *VectorRoot = vectorizeTree(VectorizableTree[0].get()); 6480 6481 // If the vectorized tree can be rewritten in a smaller type, we truncate the 6482 // vectorized root. InstCombine will then rewrite the entire expression. We 6483 // sign extend the extracted values below. 6484 auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; 6485 if (MinBWs.count(ScalarRoot)) { 6486 if (auto *I = dyn_cast<Instruction>(VectorRoot)) { 6487 // If current instr is a phi and not the last phi, insert it after the 6488 // last phi node. 6489 if (isa<PHINode>(I)) 6490 Builder.SetInsertPoint(&*I->getParent()->getFirstInsertionPt()); 6491 else 6492 Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); 6493 } 6494 auto BundleWidth = VectorizableTree[0]->Scalars.size(); 6495 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 6496 auto *VecTy = FixedVectorType::get(MinTy, BundleWidth); 6497 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); 6498 VectorizableTree[0]->VectorizedValue = Trunc; 6499 } 6500 6501 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() 6502 << " values .\n"); 6503 6504 // Extract all of the elements with the external uses. 6505 for (const auto &ExternalUse : ExternalUses) { 6506 Value *Scalar = ExternalUse.Scalar; 6507 llvm::User *User = ExternalUse.User; 6508 6509 // Skip users that we already RAUW. This happens when one instruction 6510 // has multiple uses of the same value. 6511 if (User && !is_contained(Scalar->users(), User)) 6512 continue; 6513 TreeEntry *E = getTreeEntry(Scalar); 6514 assert(E && "Invalid scalar"); 6515 assert(E->State != TreeEntry::NeedToGather && 6516 "Extracting from a gather list"); 6517 6518 Value *Vec = E->VectorizedValue; 6519 assert(Vec && "Can't find vectorizable value"); 6520 6521 Value *Lane = Builder.getInt32(ExternalUse.Lane); 6522 auto ExtractAndExtendIfNeeded = [&](Value *Vec) { 6523 if (Scalar->getType() != Vec->getType()) { 6524 Value *Ex; 6525 // "Reuse" the existing extract to improve final codegen. 6526 if (auto *ES = dyn_cast<ExtractElementInst>(Scalar)) { 6527 Ex = Builder.CreateExtractElement(ES->getOperand(0), 6528 ES->getOperand(1)); 6529 } else { 6530 Ex = Builder.CreateExtractElement(Vec, Lane); 6531 } 6532 // If necessary, sign-extend or zero-extend ScalarRoot 6533 // to the larger type. 6534 if (!MinBWs.count(ScalarRoot)) 6535 return Ex; 6536 if (MinBWs[ScalarRoot].second) 6537 return Builder.CreateSExt(Ex, Scalar->getType()); 6538 return Builder.CreateZExt(Ex, Scalar->getType()); 6539 } 6540 assert(isa<FixedVectorType>(Scalar->getType()) && 6541 isa<InsertElementInst>(Scalar) && 6542 "In-tree scalar of vector type is not insertelement?"); 6543 return Vec; 6544 }; 6545 // If User == nullptr, the Scalar is used as extra arg. Generate 6546 // ExtractElement instruction and update the record for this scalar in 6547 // ExternallyUsedValues. 6548 if (!User) { 6549 assert(ExternallyUsedValues.count(Scalar) && 6550 "Scalar with nullptr as an external user must be registered in " 6551 "ExternallyUsedValues map"); 6552 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 6553 Builder.SetInsertPoint(VecI->getParent(), 6554 std::next(VecI->getIterator())); 6555 } else { 6556 Builder.SetInsertPoint(&F->getEntryBlock().front()); 6557 } 6558 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 6559 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent()); 6560 auto &NewInstLocs = ExternallyUsedValues[NewInst]; 6561 auto It = ExternallyUsedValues.find(Scalar); 6562 assert(It != ExternallyUsedValues.end() && 6563 "Externally used scalar is not found in ExternallyUsedValues"); 6564 NewInstLocs.append(It->second); 6565 ExternallyUsedValues.erase(Scalar); 6566 // Required to update internally referenced instructions. 6567 Scalar->replaceAllUsesWith(NewInst); 6568 continue; 6569 } 6570 6571 // Generate extracts for out-of-tree users. 6572 // Find the insertion point for the extractelement lane. 6573 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 6574 if (PHINode *PH = dyn_cast<PHINode>(User)) { 6575 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 6576 if (PH->getIncomingValue(i) == Scalar) { 6577 Instruction *IncomingTerminator = 6578 PH->getIncomingBlock(i)->getTerminator(); 6579 if (isa<CatchSwitchInst>(IncomingTerminator)) { 6580 Builder.SetInsertPoint(VecI->getParent(), 6581 std::next(VecI->getIterator())); 6582 } else { 6583 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 6584 } 6585 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 6586 CSEBlocks.insert(PH->getIncomingBlock(i)); 6587 PH->setOperand(i, NewInst); 6588 } 6589 } 6590 } else { 6591 Builder.SetInsertPoint(cast<Instruction>(User)); 6592 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 6593 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 6594 User->replaceUsesOfWith(Scalar, NewInst); 6595 } 6596 } else { 6597 Builder.SetInsertPoint(&F->getEntryBlock().front()); 6598 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 6599 CSEBlocks.insert(&F->getEntryBlock()); 6600 User->replaceUsesOfWith(Scalar, NewInst); 6601 } 6602 6603 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 6604 } 6605 6606 // For each vectorized value: 6607 for (auto &TEPtr : VectorizableTree) { 6608 TreeEntry *Entry = TEPtr.get(); 6609 6610 // No need to handle users of gathered values. 6611 if (Entry->State == TreeEntry::NeedToGather) 6612 continue; 6613 6614 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 6615 6616 // For each lane: 6617 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 6618 Value *Scalar = Entry->Scalars[Lane]; 6619 6620 #ifndef NDEBUG 6621 Type *Ty = Scalar->getType(); 6622 if (!Ty->isVoidTy()) { 6623 for (User *U : Scalar->users()) { 6624 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 6625 6626 // It is legal to delete users in the ignorelist. 6627 assert((getTreeEntry(U) || is_contained(UserIgnoreList, U) || 6628 (isa_and_nonnull<Instruction>(U) && 6629 isDeleted(cast<Instruction>(U)))) && 6630 "Deleting out-of-tree value"); 6631 } 6632 } 6633 #endif 6634 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 6635 eraseInstruction(cast<Instruction>(Scalar)); 6636 } 6637 } 6638 6639 Builder.ClearInsertionPoint(); 6640 InstrElementSize.clear(); 6641 6642 return VectorizableTree[0]->VectorizedValue; 6643 } 6644 6645 void BoUpSLP::optimizeGatherSequence() { 6646 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 6647 << " gather sequences instructions.\n"); 6648 // LICM InsertElementInst sequences. 6649 for (Instruction *I : GatherSeq) { 6650 if (isDeleted(I)) 6651 continue; 6652 6653 // Check if this block is inside a loop. 6654 Loop *L = LI->getLoopFor(I->getParent()); 6655 if (!L) 6656 continue; 6657 6658 // Check if it has a preheader. 6659 BasicBlock *PreHeader = L->getLoopPreheader(); 6660 if (!PreHeader) 6661 continue; 6662 6663 // If the vector or the element that we insert into it are 6664 // instructions that are defined in this basic block then we can't 6665 // hoist this instruction. 6666 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 6667 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 6668 if (Op0 && L->contains(Op0)) 6669 continue; 6670 if (Op1 && L->contains(Op1)) 6671 continue; 6672 6673 // We can hoist this instruction. Move it to the pre-header. 6674 I->moveBefore(PreHeader->getTerminator()); 6675 } 6676 6677 // Make a list of all reachable blocks in our CSE queue. 6678 SmallVector<const DomTreeNode *, 8> CSEWorkList; 6679 CSEWorkList.reserve(CSEBlocks.size()); 6680 for (BasicBlock *BB : CSEBlocks) 6681 if (DomTreeNode *N = DT->getNode(BB)) { 6682 assert(DT->isReachableFromEntry(N)); 6683 CSEWorkList.push_back(N); 6684 } 6685 6686 // Sort blocks by domination. This ensures we visit a block after all blocks 6687 // dominating it are visited. 6688 llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) { 6689 assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) && 6690 "Different nodes should have different DFS numbers"); 6691 return A->getDFSNumIn() < B->getDFSNumIn(); 6692 }); 6693 6694 // Perform O(N^2) search over the gather sequences and merge identical 6695 // instructions. TODO: We can further optimize this scan if we split the 6696 // instructions into different buckets based on the insert lane. 6697 SmallVector<Instruction *, 16> Visited; 6698 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 6699 assert(*I && 6700 (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 6701 "Worklist not sorted properly!"); 6702 BasicBlock *BB = (*I)->getBlock(); 6703 // For all instructions in blocks containing gather sequences: 6704 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 6705 if (isDeleted(&In)) 6706 continue; 6707 if (!isa<InsertElementInst>(&In) && !isa<ExtractElementInst>(&In) && 6708 !isa<ShuffleVectorInst>(&In)) 6709 continue; 6710 6711 // Check if we can replace this instruction with any of the 6712 // visited instructions. 6713 bool Replaced = false; 6714 for (Instruction *v : Visited) { 6715 if (In.isIdenticalTo(v) && 6716 DT->dominates(v->getParent(), In.getParent())) { 6717 In.replaceAllUsesWith(v); 6718 eraseInstruction(&In); 6719 Replaced = true; 6720 break; 6721 } 6722 } 6723 if (!Replaced) { 6724 assert(!is_contained(Visited, &In)); 6725 Visited.push_back(&In); 6726 } 6727 } 6728 } 6729 CSEBlocks.clear(); 6730 GatherSeq.clear(); 6731 } 6732 6733 // Groups the instructions to a bundle (which is then a single scheduling entity) 6734 // and schedules instructions until the bundle gets ready. 6735 Optional<BoUpSLP::ScheduleData *> 6736 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 6737 const InstructionsState &S) { 6738 // No need to schedule PHIs, insertelement, extractelement and extractvalue 6739 // instructions. 6740 if (isa<PHINode>(S.OpValue) || isVectorLikeInstWithConstOps(S.OpValue)) 6741 return nullptr; 6742 6743 // Initialize the instruction bundle. 6744 Instruction *OldScheduleEnd = ScheduleEnd; 6745 ScheduleData *PrevInBundle = nullptr; 6746 ScheduleData *Bundle = nullptr; 6747 bool ReSchedule = false; 6748 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n"); 6749 6750 auto &&TryScheduleBundle = [this, OldScheduleEnd, SLP](bool ReSchedule, 6751 ScheduleData *Bundle) { 6752 // The scheduling region got new instructions at the lower end (or it is a 6753 // new region for the first bundle). This makes it necessary to 6754 // recalculate all dependencies. 6755 // It is seldom that this needs to be done a second time after adding the 6756 // initial bundle to the region. 6757 if (ScheduleEnd != OldScheduleEnd) { 6758 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) 6759 doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); }); 6760 ReSchedule = true; 6761 } 6762 if (ReSchedule) { 6763 resetSchedule(); 6764 initialFillReadyList(ReadyInsts); 6765 } 6766 if (Bundle) { 6767 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle 6768 << " in block " << BB->getName() << "\n"); 6769 calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP); 6770 } 6771 6772 // Now try to schedule the new bundle or (if no bundle) just calculate 6773 // dependencies. As soon as the bundle is "ready" it means that there are no 6774 // cyclic dependencies and we can schedule it. Note that's important that we 6775 // don't "schedule" the bundle yet (see cancelScheduling). 6776 while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) && 6777 !ReadyInsts.empty()) { 6778 ScheduleData *Picked = ReadyInsts.pop_back_val(); 6779 if (Picked->isSchedulingEntity() && Picked->isReady()) 6780 schedule(Picked, ReadyInsts); 6781 } 6782 }; 6783 6784 // Make sure that the scheduling region contains all 6785 // instructions of the bundle. 6786 for (Value *V : VL) { 6787 if (!extendSchedulingRegion(V, S)) { 6788 // If the scheduling region got new instructions at the lower end (or it 6789 // is a new region for the first bundle). This makes it necessary to 6790 // recalculate all dependencies. 6791 // Otherwise the compiler may crash trying to incorrectly calculate 6792 // dependencies and emit instruction in the wrong order at the actual 6793 // scheduling. 6794 TryScheduleBundle(/*ReSchedule=*/false, nullptr); 6795 return None; 6796 } 6797 } 6798 6799 for (Value *V : VL) { 6800 ScheduleData *BundleMember = getScheduleData(V); 6801 assert(BundleMember && 6802 "no ScheduleData for bundle member (maybe not in same basic block)"); 6803 if (BundleMember->IsScheduled) { 6804 // A bundle member was scheduled as single instruction before and now 6805 // needs to be scheduled as part of the bundle. We just get rid of the 6806 // existing schedule. 6807 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 6808 << " was already scheduled\n"); 6809 ReSchedule = true; 6810 } 6811 assert(BundleMember->isSchedulingEntity() && 6812 "bundle member already part of other bundle"); 6813 if (PrevInBundle) { 6814 PrevInBundle->NextInBundle = BundleMember; 6815 } else { 6816 Bundle = BundleMember; 6817 } 6818 BundleMember->UnscheduledDepsInBundle = 0; 6819 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 6820 6821 // Group the instructions to a bundle. 6822 BundleMember->FirstInBundle = Bundle; 6823 PrevInBundle = BundleMember; 6824 } 6825 assert(Bundle && "Failed to find schedule bundle"); 6826 TryScheduleBundle(ReSchedule, Bundle); 6827 if (!Bundle->isReady()) { 6828 cancelScheduling(VL, S.OpValue); 6829 return None; 6830 } 6831 return Bundle; 6832 } 6833 6834 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 6835 Value *OpValue) { 6836 if (isa<PHINode>(OpValue) || isVectorLikeInstWithConstOps(OpValue)) 6837 return; 6838 6839 ScheduleData *Bundle = getScheduleData(OpValue); 6840 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 6841 assert(!Bundle->IsScheduled && 6842 "Can't cancel bundle which is already scheduled"); 6843 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 6844 "tried to unbundle something which is not a bundle"); 6845 6846 // Un-bundle: make single instructions out of the bundle. 6847 ScheduleData *BundleMember = Bundle; 6848 while (BundleMember) { 6849 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 6850 BundleMember->FirstInBundle = BundleMember; 6851 ScheduleData *Next = BundleMember->NextInBundle; 6852 BundleMember->NextInBundle = nullptr; 6853 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 6854 if (BundleMember->UnscheduledDepsInBundle == 0) { 6855 ReadyInsts.insert(BundleMember); 6856 } 6857 BundleMember = Next; 6858 } 6859 } 6860 6861 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 6862 // Allocate a new ScheduleData for the instruction. 6863 if (ChunkPos >= ChunkSize) { 6864 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize)); 6865 ChunkPos = 0; 6866 } 6867 return &(ScheduleDataChunks.back()[ChunkPos++]); 6868 } 6869 6870 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 6871 const InstructionsState &S) { 6872 if (getScheduleData(V, isOneOf(S, V))) 6873 return true; 6874 Instruction *I = dyn_cast<Instruction>(V); 6875 assert(I && "bundle member must be an instruction"); 6876 assert(!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) && 6877 "phi nodes/insertelements/extractelements/extractvalues don't need to " 6878 "be scheduled"); 6879 auto &&CheckSheduleForI = [this, &S](Instruction *I) -> bool { 6880 ScheduleData *ISD = getScheduleData(I); 6881 if (!ISD) 6882 return false; 6883 assert(isInSchedulingRegion(ISD) && 6884 "ScheduleData not in scheduling region"); 6885 ScheduleData *SD = allocateScheduleDataChunks(); 6886 SD->Inst = I; 6887 SD->init(SchedulingRegionID, S.OpValue); 6888 ExtraScheduleDataMap[I][S.OpValue] = SD; 6889 return true; 6890 }; 6891 if (CheckSheduleForI(I)) 6892 return true; 6893 if (!ScheduleStart) { 6894 // It's the first instruction in the new region. 6895 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 6896 ScheduleStart = I; 6897 ScheduleEnd = I->getNextNode(); 6898 if (isOneOf(S, I) != I) 6899 CheckSheduleForI(I); 6900 assert(ScheduleEnd && "tried to vectorize a terminator?"); 6901 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 6902 return true; 6903 } 6904 // Search up and down at the same time, because we don't know if the new 6905 // instruction is above or below the existing scheduling region. 6906 BasicBlock::reverse_iterator UpIter = 6907 ++ScheduleStart->getIterator().getReverse(); 6908 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 6909 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 6910 BasicBlock::iterator LowerEnd = BB->end(); 6911 while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I && 6912 &*DownIter != I) { 6913 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 6914 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 6915 return false; 6916 } 6917 6918 ++UpIter; 6919 ++DownIter; 6920 } 6921 if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) { 6922 assert(I->getParent() == ScheduleStart->getParent() && 6923 "Instruction is in wrong basic block."); 6924 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 6925 ScheduleStart = I; 6926 if (isOneOf(S, I) != I) 6927 CheckSheduleForI(I); 6928 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I 6929 << "\n"); 6930 return true; 6931 } 6932 assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && 6933 "Expected to reach top of the basic block or instruction down the " 6934 "lower end."); 6935 assert(I->getParent() == ScheduleEnd->getParent() && 6936 "Instruction is in wrong basic block."); 6937 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 6938 nullptr); 6939 ScheduleEnd = I->getNextNode(); 6940 if (isOneOf(S, I) != I) 6941 CheckSheduleForI(I); 6942 assert(ScheduleEnd && "tried to vectorize a terminator?"); 6943 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 6944 return true; 6945 } 6946 6947 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 6948 Instruction *ToI, 6949 ScheduleData *PrevLoadStore, 6950 ScheduleData *NextLoadStore) { 6951 ScheduleData *CurrentLoadStore = PrevLoadStore; 6952 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 6953 ScheduleData *SD = ScheduleDataMap[I]; 6954 if (!SD) { 6955 SD = allocateScheduleDataChunks(); 6956 ScheduleDataMap[I] = SD; 6957 SD->Inst = I; 6958 } 6959 assert(!isInSchedulingRegion(SD) && 6960 "new ScheduleData already in scheduling region"); 6961 SD->init(SchedulingRegionID, I); 6962 6963 if (I->mayReadOrWriteMemory() && 6964 (!isa<IntrinsicInst>(I) || 6965 (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect && 6966 cast<IntrinsicInst>(I)->getIntrinsicID() != 6967 Intrinsic::pseudoprobe))) { 6968 // Update the linked list of memory accessing instructions. 6969 if (CurrentLoadStore) { 6970 CurrentLoadStore->NextLoadStore = SD; 6971 } else { 6972 FirstLoadStoreInRegion = SD; 6973 } 6974 CurrentLoadStore = SD; 6975 } 6976 } 6977 if (NextLoadStore) { 6978 if (CurrentLoadStore) 6979 CurrentLoadStore->NextLoadStore = NextLoadStore; 6980 } else { 6981 LastLoadStoreInRegion = CurrentLoadStore; 6982 } 6983 } 6984 6985 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 6986 bool InsertInReadyList, 6987 BoUpSLP *SLP) { 6988 assert(SD->isSchedulingEntity()); 6989 6990 SmallVector<ScheduleData *, 10> WorkList; 6991 WorkList.push_back(SD); 6992 6993 while (!WorkList.empty()) { 6994 ScheduleData *SD = WorkList.pop_back_val(); 6995 6996 ScheduleData *BundleMember = SD; 6997 while (BundleMember) { 6998 assert(isInSchedulingRegion(BundleMember)); 6999 if (!BundleMember->hasValidDependencies()) { 7000 7001 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember 7002 << "\n"); 7003 BundleMember->Dependencies = 0; 7004 BundleMember->resetUnscheduledDeps(); 7005 7006 // Handle def-use chain dependencies. 7007 if (BundleMember->OpValue != BundleMember->Inst) { 7008 ScheduleData *UseSD = getScheduleData(BundleMember->Inst); 7009 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 7010 BundleMember->Dependencies++; 7011 ScheduleData *DestBundle = UseSD->FirstInBundle; 7012 if (!DestBundle->IsScheduled) 7013 BundleMember->incrementUnscheduledDeps(1); 7014 if (!DestBundle->hasValidDependencies()) 7015 WorkList.push_back(DestBundle); 7016 } 7017 } else { 7018 for (User *U : BundleMember->Inst->users()) { 7019 if (isa<Instruction>(U)) { 7020 ScheduleData *UseSD = getScheduleData(U); 7021 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 7022 BundleMember->Dependencies++; 7023 ScheduleData *DestBundle = UseSD->FirstInBundle; 7024 if (!DestBundle->IsScheduled) 7025 BundleMember->incrementUnscheduledDeps(1); 7026 if (!DestBundle->hasValidDependencies()) 7027 WorkList.push_back(DestBundle); 7028 } 7029 } else { 7030 // I'm not sure if this can ever happen. But we need to be safe. 7031 // This lets the instruction/bundle never be scheduled and 7032 // eventually disable vectorization. 7033 BundleMember->Dependencies++; 7034 BundleMember->incrementUnscheduledDeps(1); 7035 } 7036 } 7037 } 7038 7039 // Handle the memory dependencies. 7040 ScheduleData *DepDest = BundleMember->NextLoadStore; 7041 if (DepDest) { 7042 Instruction *SrcInst = BundleMember->Inst; 7043 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); 7044 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 7045 unsigned numAliased = 0; 7046 unsigned DistToSrc = 1; 7047 7048 while (DepDest) { 7049 assert(isInSchedulingRegion(DepDest)); 7050 7051 // We have two limits to reduce the complexity: 7052 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 7053 // SLP->isAliased (which is the expensive part in this loop). 7054 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 7055 // the whole loop (even if the loop is fast, it's quadratic). 7056 // It's important for the loop break condition (see below) to 7057 // check this limit even between two read-only instructions. 7058 if (DistToSrc >= MaxMemDepDistance || 7059 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 7060 (numAliased >= AliasedCheckLimit || 7061 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 7062 7063 // We increment the counter only if the locations are aliased 7064 // (instead of counting all alias checks). This gives a better 7065 // balance between reduced runtime and accurate dependencies. 7066 numAliased++; 7067 7068 DepDest->MemoryDependencies.push_back(BundleMember); 7069 BundleMember->Dependencies++; 7070 ScheduleData *DestBundle = DepDest->FirstInBundle; 7071 if (!DestBundle->IsScheduled) { 7072 BundleMember->incrementUnscheduledDeps(1); 7073 } 7074 if (!DestBundle->hasValidDependencies()) { 7075 WorkList.push_back(DestBundle); 7076 } 7077 } 7078 DepDest = DepDest->NextLoadStore; 7079 7080 // Example, explaining the loop break condition: Let's assume our 7081 // starting instruction is i0 and MaxMemDepDistance = 3. 7082 // 7083 // +--------v--v--v 7084 // i0,i1,i2,i3,i4,i5,i6,i7,i8 7085 // +--------^--^--^ 7086 // 7087 // MaxMemDepDistance let us stop alias-checking at i3 and we add 7088 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 7089 // Previously we already added dependencies from i3 to i6,i7,i8 7090 // (because of MaxMemDepDistance). As we added a dependency from 7091 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 7092 // and we can abort this loop at i6. 7093 if (DistToSrc >= 2 * MaxMemDepDistance) 7094 break; 7095 DistToSrc++; 7096 } 7097 } 7098 } 7099 BundleMember = BundleMember->NextInBundle; 7100 } 7101 if (InsertInReadyList && SD->isReady()) { 7102 ReadyInsts.push_back(SD); 7103 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst 7104 << "\n"); 7105 } 7106 } 7107 } 7108 7109 void BoUpSLP::BlockScheduling::resetSchedule() { 7110 assert(ScheduleStart && 7111 "tried to reset schedule on block which has not been scheduled"); 7112 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 7113 doForAllOpcodes(I, [&](ScheduleData *SD) { 7114 assert(isInSchedulingRegion(SD) && 7115 "ScheduleData not in scheduling region"); 7116 SD->IsScheduled = false; 7117 SD->resetUnscheduledDeps(); 7118 }); 7119 } 7120 ReadyInsts.clear(); 7121 } 7122 7123 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 7124 if (!BS->ScheduleStart) 7125 return; 7126 7127 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 7128 7129 BS->resetSchedule(); 7130 7131 // For the real scheduling we use a more sophisticated ready-list: it is 7132 // sorted by the original instruction location. This lets the final schedule 7133 // be as close as possible to the original instruction order. 7134 struct ScheduleDataCompare { 7135 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 7136 return SD2->SchedulingPriority < SD1->SchedulingPriority; 7137 } 7138 }; 7139 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 7140 7141 // Ensure that all dependency data is updated and fill the ready-list with 7142 // initial instructions. 7143 int Idx = 0; 7144 int NumToSchedule = 0; 7145 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 7146 I = I->getNextNode()) { 7147 BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) { 7148 assert((isVectorLikeInstWithConstOps(SD->Inst) || 7149 SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr)) && 7150 "scheduler and vectorizer bundle mismatch"); 7151 SD->FirstInBundle->SchedulingPriority = Idx++; 7152 if (SD->isSchedulingEntity()) { 7153 BS->calculateDependencies(SD, false, this); 7154 NumToSchedule++; 7155 } 7156 }); 7157 } 7158 BS->initialFillReadyList(ReadyInsts); 7159 7160 Instruction *LastScheduledInst = BS->ScheduleEnd; 7161 7162 // Do the "real" scheduling. 7163 while (!ReadyInsts.empty()) { 7164 ScheduleData *picked = *ReadyInsts.begin(); 7165 ReadyInsts.erase(ReadyInsts.begin()); 7166 7167 // Move the scheduled instruction(s) to their dedicated places, if not 7168 // there yet. 7169 ScheduleData *BundleMember = picked; 7170 while (BundleMember) { 7171 Instruction *pickedInst = BundleMember->Inst; 7172 if (pickedInst->getNextNode() != LastScheduledInst) { 7173 BS->BB->getInstList().remove(pickedInst); 7174 BS->BB->getInstList().insert(LastScheduledInst->getIterator(), 7175 pickedInst); 7176 } 7177 LastScheduledInst = pickedInst; 7178 BundleMember = BundleMember->NextInBundle; 7179 } 7180 7181 BS->schedule(picked, ReadyInsts); 7182 NumToSchedule--; 7183 } 7184 assert(NumToSchedule == 0 && "could not schedule all instructions"); 7185 7186 // Avoid duplicate scheduling of the block. 7187 BS->ScheduleStart = nullptr; 7188 } 7189 7190 unsigned BoUpSLP::getVectorElementSize(Value *V) { 7191 // If V is a store, just return the width of the stored value (or value 7192 // truncated just before storing) without traversing the expression tree. 7193 // This is the common case. 7194 if (auto *Store = dyn_cast<StoreInst>(V)) { 7195 if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand())) 7196 return DL->getTypeSizeInBits(Trunc->getSrcTy()); 7197 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 7198 } 7199 7200 if (auto *IEI = dyn_cast<InsertElementInst>(V)) 7201 return getVectorElementSize(IEI->getOperand(1)); 7202 7203 auto E = InstrElementSize.find(V); 7204 if (E != InstrElementSize.end()) 7205 return E->second; 7206 7207 // If V is not a store, we can traverse the expression tree to find loads 7208 // that feed it. The type of the loaded value may indicate a more suitable 7209 // width than V's type. We want to base the vector element size on the width 7210 // of memory operations where possible. 7211 SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist; 7212 SmallPtrSet<Instruction *, 16> Visited; 7213 if (auto *I = dyn_cast<Instruction>(V)) { 7214 Worklist.emplace_back(I, I->getParent()); 7215 Visited.insert(I); 7216 } 7217 7218 // Traverse the expression tree in bottom-up order looking for loads. If we 7219 // encounter an instruction we don't yet handle, we give up. 7220 auto Width = 0u; 7221 while (!Worklist.empty()) { 7222 Instruction *I; 7223 BasicBlock *Parent; 7224 std::tie(I, Parent) = Worklist.pop_back_val(); 7225 7226 // We should only be looking at scalar instructions here. If the current 7227 // instruction has a vector type, skip. 7228 auto *Ty = I->getType(); 7229 if (isa<VectorType>(Ty)) 7230 continue; 7231 7232 // If the current instruction is a load, update MaxWidth to reflect the 7233 // width of the loaded value. 7234 if (isa<LoadInst>(I) || isa<ExtractElementInst>(I) || 7235 isa<ExtractValueInst>(I)) 7236 Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty)); 7237 7238 // Otherwise, we need to visit the operands of the instruction. We only 7239 // handle the interesting cases from buildTree here. If an operand is an 7240 // instruction we haven't yet visited and from the same basic block as the 7241 // user or the use is a PHI node, we add it to the worklist. 7242 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7243 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I) || 7244 isa<UnaryOperator>(I)) { 7245 for (Use &U : I->operands()) 7246 if (auto *J = dyn_cast<Instruction>(U.get())) 7247 if (Visited.insert(J).second && 7248 (isa<PHINode>(I) || J->getParent() == Parent)) 7249 Worklist.emplace_back(J, J->getParent()); 7250 } else { 7251 break; 7252 } 7253 } 7254 7255 // If we didn't encounter a memory access in the expression tree, or if we 7256 // gave up for some reason, just return the width of V. Otherwise, return the 7257 // maximum width we found. 7258 if (!Width) { 7259 if (auto *CI = dyn_cast<CmpInst>(V)) 7260 V = CI->getOperand(0); 7261 Width = DL->getTypeSizeInBits(V->getType()); 7262 } 7263 7264 for (Instruction *I : Visited) 7265 InstrElementSize[I] = Width; 7266 7267 return Width; 7268 } 7269 7270 // Determine if a value V in a vectorizable expression Expr can be demoted to a 7271 // smaller type with a truncation. We collect the values that will be demoted 7272 // in ToDemote and additional roots that require investigating in Roots. 7273 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, 7274 SmallVectorImpl<Value *> &ToDemote, 7275 SmallVectorImpl<Value *> &Roots) { 7276 // We can always demote constants. 7277 if (isa<Constant>(V)) { 7278 ToDemote.push_back(V); 7279 return true; 7280 } 7281 7282 // If the value is not an instruction in the expression with only one use, it 7283 // cannot be demoted. 7284 auto *I = dyn_cast<Instruction>(V); 7285 if (!I || !I->hasOneUse() || !Expr.count(I)) 7286 return false; 7287 7288 switch (I->getOpcode()) { 7289 7290 // We can always demote truncations and extensions. Since truncations can 7291 // seed additional demotion, we save the truncated value. 7292 case Instruction::Trunc: 7293 Roots.push_back(I->getOperand(0)); 7294 break; 7295 case Instruction::ZExt: 7296 case Instruction::SExt: 7297 if (isa<ExtractElementInst>(I->getOperand(0)) || 7298 isa<InsertElementInst>(I->getOperand(0))) 7299 return false; 7300 break; 7301 7302 // We can demote certain binary operations if we can demote both of their 7303 // operands. 7304 case Instruction::Add: 7305 case Instruction::Sub: 7306 case Instruction::Mul: 7307 case Instruction::And: 7308 case Instruction::Or: 7309 case Instruction::Xor: 7310 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || 7311 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) 7312 return false; 7313 break; 7314 7315 // We can demote selects if we can demote their true and false values. 7316 case Instruction::Select: { 7317 SelectInst *SI = cast<SelectInst>(I); 7318 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || 7319 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) 7320 return false; 7321 break; 7322 } 7323 7324 // We can demote phis if we can demote all their incoming operands. Note that 7325 // we don't need to worry about cycles since we ensure single use above. 7326 case Instruction::PHI: { 7327 PHINode *PN = cast<PHINode>(I); 7328 for (Value *IncValue : PN->incoming_values()) 7329 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) 7330 return false; 7331 break; 7332 } 7333 7334 // Otherwise, conservatively give up. 7335 default: 7336 return false; 7337 } 7338 7339 // Record the value that we can demote. 7340 ToDemote.push_back(V); 7341 return true; 7342 } 7343 7344 void BoUpSLP::computeMinimumValueSizes() { 7345 // If there are no external uses, the expression tree must be rooted by a 7346 // store. We can't demote in-memory values, so there is nothing to do here. 7347 if (ExternalUses.empty()) 7348 return; 7349 7350 // We only attempt to truncate integer expressions. 7351 auto &TreeRoot = VectorizableTree[0]->Scalars; 7352 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 7353 if (!TreeRootIT) 7354 return; 7355 7356 // If the expression is not rooted by a store, these roots should have 7357 // external uses. We will rely on InstCombine to rewrite the expression in 7358 // the narrower type. However, InstCombine only rewrites single-use values. 7359 // This means that if a tree entry other than a root is used externally, it 7360 // must have multiple uses and InstCombine will not rewrite it. The code 7361 // below ensures that only the roots are used externally. 7362 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); 7363 for (auto &EU : ExternalUses) 7364 if (!Expr.erase(EU.Scalar)) 7365 return; 7366 if (!Expr.empty()) 7367 return; 7368 7369 // Collect the scalar values of the vectorizable expression. We will use this 7370 // context to determine which values can be demoted. If we see a truncation, 7371 // we mark it as seeding another demotion. 7372 for (auto &EntryPtr : VectorizableTree) 7373 Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end()); 7374 7375 // Ensure the roots of the vectorizable tree don't form a cycle. They must 7376 // have a single external user that is not in the vectorizable tree. 7377 for (auto *Root : TreeRoot) 7378 if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) 7379 return; 7380 7381 // Conservatively determine if we can actually truncate the roots of the 7382 // expression. Collect the values that can be demoted in ToDemote and 7383 // additional roots that require investigating in Roots. 7384 SmallVector<Value *, 32> ToDemote; 7385 SmallVector<Value *, 4> Roots; 7386 for (auto *Root : TreeRoot) 7387 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) 7388 return; 7389 7390 // The maximum bit width required to represent all the values that can be 7391 // demoted without loss of precision. It would be safe to truncate the roots 7392 // of the expression to this width. 7393 auto MaxBitWidth = 8u; 7394 7395 // We first check if all the bits of the roots are demanded. If they're not, 7396 // we can truncate the roots to this narrower type. 7397 for (auto *Root : TreeRoot) { 7398 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 7399 MaxBitWidth = std::max<unsigned>( 7400 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); 7401 } 7402 7403 // True if the roots can be zero-extended back to their original type, rather 7404 // than sign-extended. We know that if the leading bits are not demanded, we 7405 // can safely zero-extend. So we initialize IsKnownPositive to True. 7406 bool IsKnownPositive = true; 7407 7408 // If all the bits of the roots are demanded, we can try a little harder to 7409 // compute a narrower type. This can happen, for example, if the roots are 7410 // getelementptr indices. InstCombine promotes these indices to the pointer 7411 // width. Thus, all their bits are technically demanded even though the 7412 // address computation might be vectorized in a smaller type. 7413 // 7414 // We start by looking at each entry that can be demoted. We compute the 7415 // maximum bit width required to store the scalar by using ValueTracking to 7416 // compute the number of high-order bits we can truncate. 7417 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) && 7418 llvm::all_of(TreeRoot, [](Value *R) { 7419 assert(R->hasOneUse() && "Root should have only one use!"); 7420 return isa<GetElementPtrInst>(R->user_back()); 7421 })) { 7422 MaxBitWidth = 8u; 7423 7424 // Determine if the sign bit of all the roots is known to be zero. If not, 7425 // IsKnownPositive is set to False. 7426 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { 7427 KnownBits Known = computeKnownBits(R, *DL); 7428 return Known.isNonNegative(); 7429 }); 7430 7431 // Determine the maximum number of bits required to store the scalar 7432 // values. 7433 for (auto *Scalar : ToDemote) { 7434 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); 7435 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 7436 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 7437 } 7438 7439 // If we can't prove that the sign bit is zero, we must add one to the 7440 // maximum bit width to account for the unknown sign bit. This preserves 7441 // the existing sign bit so we can safely sign-extend the root back to the 7442 // original type. Otherwise, if we know the sign bit is zero, we will 7443 // zero-extend the root instead. 7444 // 7445 // FIXME: This is somewhat suboptimal, as there will be cases where adding 7446 // one to the maximum bit width will yield a larger-than-necessary 7447 // type. In general, we need to add an extra bit only if we can't 7448 // prove that the upper bit of the original type is equal to the 7449 // upper bit of the proposed smaller type. If these two bits are the 7450 // same (either zero or one) we know that sign-extending from the 7451 // smaller type will result in the same value. Here, since we can't 7452 // yet prove this, we are just making the proposed smaller type 7453 // larger to ensure correctness. 7454 if (!IsKnownPositive) 7455 ++MaxBitWidth; 7456 } 7457 7458 // Round MaxBitWidth up to the next power-of-two. 7459 if (!isPowerOf2_64(MaxBitWidth)) 7460 MaxBitWidth = NextPowerOf2(MaxBitWidth); 7461 7462 // If the maximum bit width we compute is less than the with of the roots' 7463 // type, we can proceed with the narrowing. Otherwise, do nothing. 7464 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 7465 return; 7466 7467 // If we can truncate the root, we must collect additional values that might 7468 // be demoted as a result. That is, those seeded by truncations we will 7469 // modify. 7470 while (!Roots.empty()) 7471 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); 7472 7473 // Finally, map the values we can demote to the maximum bit with we computed. 7474 for (auto *Scalar : ToDemote) 7475 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive); 7476 } 7477 7478 namespace { 7479 7480 /// The SLPVectorizer Pass. 7481 struct SLPVectorizer : public FunctionPass { 7482 SLPVectorizerPass Impl; 7483 7484 /// Pass identification, replacement for typeid 7485 static char ID; 7486 7487 explicit SLPVectorizer() : FunctionPass(ID) { 7488 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 7489 } 7490 7491 bool doInitialization(Module &M) override { return false; } 7492 7493 bool runOnFunction(Function &F) override { 7494 if (skipFunction(F)) 7495 return false; 7496 7497 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 7498 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 7499 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 7500 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 7501 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 7502 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 7503 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 7504 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 7505 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 7506 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 7507 7508 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 7509 } 7510 7511 void getAnalysisUsage(AnalysisUsage &AU) const override { 7512 FunctionPass::getAnalysisUsage(AU); 7513 AU.addRequired<AssumptionCacheTracker>(); 7514 AU.addRequired<ScalarEvolutionWrapperPass>(); 7515 AU.addRequired<AAResultsWrapperPass>(); 7516 AU.addRequired<TargetTransformInfoWrapperPass>(); 7517 AU.addRequired<LoopInfoWrapperPass>(); 7518 AU.addRequired<DominatorTreeWrapperPass>(); 7519 AU.addRequired<DemandedBitsWrapperPass>(); 7520 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 7521 AU.addRequired<InjectTLIMappingsLegacy>(); 7522 AU.addPreserved<LoopInfoWrapperPass>(); 7523 AU.addPreserved<DominatorTreeWrapperPass>(); 7524 AU.addPreserved<AAResultsWrapperPass>(); 7525 AU.addPreserved<GlobalsAAWrapperPass>(); 7526 AU.setPreservesCFG(); 7527 } 7528 }; 7529 7530 } // end anonymous namespace 7531 7532 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 7533 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 7534 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 7535 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 7536 auto *AA = &AM.getResult<AAManager>(F); 7537 auto *LI = &AM.getResult<LoopAnalysis>(F); 7538 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 7539 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 7540 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 7541 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7542 7543 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 7544 if (!Changed) 7545 return PreservedAnalyses::all(); 7546 7547 PreservedAnalyses PA; 7548 PA.preserveSet<CFGAnalyses>(); 7549 return PA; 7550 } 7551 7552 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 7553 TargetTransformInfo *TTI_, 7554 TargetLibraryInfo *TLI_, AAResults *AA_, 7555 LoopInfo *LI_, DominatorTree *DT_, 7556 AssumptionCache *AC_, DemandedBits *DB_, 7557 OptimizationRemarkEmitter *ORE_) { 7558 if (!RunSLPVectorization) 7559 return false; 7560 SE = SE_; 7561 TTI = TTI_; 7562 TLI = TLI_; 7563 AA = AA_; 7564 LI = LI_; 7565 DT = DT_; 7566 AC = AC_; 7567 DB = DB_; 7568 DL = &F.getParent()->getDataLayout(); 7569 7570 Stores.clear(); 7571 GEPs.clear(); 7572 bool Changed = false; 7573 7574 // If the target claims to have no vector registers don't attempt 7575 // vectorization. 7576 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) 7577 return false; 7578 7579 // Don't vectorize when the attribute NoImplicitFloat is used. 7580 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 7581 return false; 7582 7583 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 7584 7585 // Use the bottom up slp vectorizer to construct chains that start with 7586 // store instructions. 7587 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 7588 7589 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 7590 // delete instructions. 7591 7592 // Update DFS numbers now so that we can use them for ordering. 7593 DT->updateDFSNumbers(); 7594 7595 // Scan the blocks in the function in post order. 7596 for (auto BB : post_order(&F.getEntryBlock())) { 7597 collectSeedInstructions(BB); 7598 7599 // Vectorize trees that end at stores. 7600 if (!Stores.empty()) { 7601 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 7602 << " underlying objects.\n"); 7603 Changed |= vectorizeStoreChains(R); 7604 } 7605 7606 // Vectorize trees that end at reductions. 7607 Changed |= vectorizeChainsInBlock(BB, R); 7608 7609 // Vectorize the index computations of getelementptr instructions. This 7610 // is primarily intended to catch gather-like idioms ending at 7611 // non-consecutive loads. 7612 if (!GEPs.empty()) { 7613 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 7614 << " underlying objects.\n"); 7615 Changed |= vectorizeGEPIndices(BB, R); 7616 } 7617 } 7618 7619 if (Changed) { 7620 R.optimizeGatherSequence(); 7621 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 7622 } 7623 return Changed; 7624 } 7625 7626 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 7627 unsigned Idx) { 7628 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size() 7629 << "\n"); 7630 const unsigned Sz = R.getVectorElementSize(Chain[0]); 7631 const unsigned MinVF = R.getMinVecRegSize() / Sz; 7632 unsigned VF = Chain.size(); 7633 7634 if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF) 7635 return false; 7636 7637 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx 7638 << "\n"); 7639 7640 R.buildTree(Chain); 7641 if (R.isTreeTinyAndNotFullyVectorizable()) 7642 return false; 7643 if (R.isLoadCombineCandidate()) 7644 return false; 7645 R.reorderTopToBottom(); 7646 R.reorderBottomToTop(); 7647 R.buildExternalUses(); 7648 7649 R.computeMinimumValueSizes(); 7650 7651 InstructionCost Cost = R.getTreeCost(); 7652 7653 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF =" << VF << "\n"); 7654 if (Cost < -SLPCostThreshold) { 7655 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n"); 7656 7657 using namespace ore; 7658 7659 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 7660 cast<StoreInst>(Chain[0])) 7661 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 7662 << " and with tree size " 7663 << NV("TreeSize", R.getTreeSize())); 7664 7665 R.vectorizeTree(); 7666 return true; 7667 } 7668 7669 return false; 7670 } 7671 7672 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 7673 BoUpSLP &R) { 7674 // We may run into multiple chains that merge into a single chain. We mark the 7675 // stores that we vectorized so that we don't visit the same store twice. 7676 BoUpSLP::ValueSet VectorizedStores; 7677 bool Changed = false; 7678 7679 int E = Stores.size(); 7680 SmallBitVector Tails(E, false); 7681 int MaxIter = MaxStoreLookup.getValue(); 7682 SmallVector<std::pair<int, int>, 16> ConsecutiveChain( 7683 E, std::make_pair(E, INT_MAX)); 7684 SmallVector<SmallBitVector, 4> CheckedPairs(E, SmallBitVector(E, false)); 7685 int IterCnt; 7686 auto &&FindConsecutiveAccess = [this, &Stores, &Tails, &IterCnt, MaxIter, 7687 &CheckedPairs, 7688 &ConsecutiveChain](int K, int Idx) { 7689 if (IterCnt >= MaxIter) 7690 return true; 7691 if (CheckedPairs[Idx].test(K)) 7692 return ConsecutiveChain[K].second == 1 && 7693 ConsecutiveChain[K].first == Idx; 7694 ++IterCnt; 7695 CheckedPairs[Idx].set(K); 7696 CheckedPairs[K].set(Idx); 7697 Optional<int> Diff = getPointersDiff( 7698 Stores[K]->getValueOperand()->getType(), Stores[K]->getPointerOperand(), 7699 Stores[Idx]->getValueOperand()->getType(), 7700 Stores[Idx]->getPointerOperand(), *DL, *SE, /*StrictCheck=*/true); 7701 if (!Diff || *Diff == 0) 7702 return false; 7703 int Val = *Diff; 7704 if (Val < 0) { 7705 if (ConsecutiveChain[Idx].second > -Val) { 7706 Tails.set(K); 7707 ConsecutiveChain[Idx] = std::make_pair(K, -Val); 7708 } 7709 return false; 7710 } 7711 if (ConsecutiveChain[K].second <= Val) 7712 return false; 7713 7714 Tails.set(Idx); 7715 ConsecutiveChain[K] = std::make_pair(Idx, Val); 7716 return Val == 1; 7717 }; 7718 // Do a quadratic search on all of the given stores in reverse order and find 7719 // all of the pairs of stores that follow each other. 7720 for (int Idx = E - 1; Idx >= 0; --Idx) { 7721 // If a store has multiple consecutive store candidates, search according 7722 // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ... 7723 // This is because usually pairing with immediate succeeding or preceding 7724 // candidate create the best chance to find slp vectorization opportunity. 7725 const int MaxLookDepth = std::max(E - Idx, Idx + 1); 7726 IterCnt = 0; 7727 for (int Offset = 1, F = MaxLookDepth; Offset < F; ++Offset) 7728 if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) || 7729 (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx))) 7730 break; 7731 } 7732 7733 // Tracks if we tried to vectorize stores starting from the given tail 7734 // already. 7735 SmallBitVector TriedTails(E, false); 7736 // For stores that start but don't end a link in the chain: 7737 for (int Cnt = E; Cnt > 0; --Cnt) { 7738 int I = Cnt - 1; 7739 if (ConsecutiveChain[I].first == E || Tails.test(I)) 7740 continue; 7741 // We found a store instr that starts a chain. Now follow the chain and try 7742 // to vectorize it. 7743 BoUpSLP::ValueList Operands; 7744 // Collect the chain into a list. 7745 while (I != E && !VectorizedStores.count(Stores[I])) { 7746 Operands.push_back(Stores[I]); 7747 Tails.set(I); 7748 if (ConsecutiveChain[I].second != 1) { 7749 // Mark the new end in the chain and go back, if required. It might be 7750 // required if the original stores come in reversed order, for example. 7751 if (ConsecutiveChain[I].first != E && 7752 Tails.test(ConsecutiveChain[I].first) && !TriedTails.test(I) && 7753 !VectorizedStores.count(Stores[ConsecutiveChain[I].first])) { 7754 TriedTails.set(I); 7755 Tails.reset(ConsecutiveChain[I].first); 7756 if (Cnt < ConsecutiveChain[I].first + 2) 7757 Cnt = ConsecutiveChain[I].first + 2; 7758 } 7759 break; 7760 } 7761 // Move to the next value in the chain. 7762 I = ConsecutiveChain[I].first; 7763 } 7764 assert(!Operands.empty() && "Expected non-empty list of stores."); 7765 7766 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 7767 unsigned EltSize = R.getVectorElementSize(Operands[0]); 7768 unsigned MaxElts = llvm::PowerOf2Floor(MaxVecRegSize / EltSize); 7769 7770 unsigned MinVF = R.getMinVF(EltSize); 7771 unsigned MaxVF = std::min(R.getMaximumVF(EltSize, Instruction::Store), 7772 MaxElts); 7773 7774 // FIXME: Is division-by-2 the correct step? Should we assert that the 7775 // register size is a power-of-2? 7776 unsigned StartIdx = 0; 7777 for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) { 7778 for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) { 7779 ArrayRef<Value *> Slice = makeArrayRef(Operands).slice(Cnt, Size); 7780 if (!VectorizedStores.count(Slice.front()) && 7781 !VectorizedStores.count(Slice.back()) && 7782 vectorizeStoreChain(Slice, R, Cnt)) { 7783 // Mark the vectorized stores so that we don't vectorize them again. 7784 VectorizedStores.insert(Slice.begin(), Slice.end()); 7785 Changed = true; 7786 // If we vectorized initial block, no need to try to vectorize it 7787 // again. 7788 if (Cnt == StartIdx) 7789 StartIdx += Size; 7790 Cnt += Size; 7791 continue; 7792 } 7793 ++Cnt; 7794 } 7795 // Check if the whole array was vectorized already - exit. 7796 if (StartIdx >= Operands.size()) 7797 break; 7798 } 7799 } 7800 7801 return Changed; 7802 } 7803 7804 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 7805 // Initialize the collections. We will make a single pass over the block. 7806 Stores.clear(); 7807 GEPs.clear(); 7808 7809 // Visit the store and getelementptr instructions in BB and organize them in 7810 // Stores and GEPs according to the underlying objects of their pointer 7811 // operands. 7812 for (Instruction &I : *BB) { 7813 // Ignore store instructions that are volatile or have a pointer operand 7814 // that doesn't point to a scalar type. 7815 if (auto *SI = dyn_cast<StoreInst>(&I)) { 7816 if (!SI->isSimple()) 7817 continue; 7818 if (!isValidElementType(SI->getValueOperand()->getType())) 7819 continue; 7820 Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI); 7821 } 7822 7823 // Ignore getelementptr instructions that have more than one index, a 7824 // constant index, or a pointer operand that doesn't point to a scalar 7825 // type. 7826 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 7827 auto Idx = GEP->idx_begin()->get(); 7828 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) 7829 continue; 7830 if (!isValidElementType(Idx->getType())) 7831 continue; 7832 if (GEP->getType()->isVectorTy()) 7833 continue; 7834 GEPs[GEP->getPointerOperand()].push_back(GEP); 7835 } 7836 } 7837 } 7838 7839 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 7840 if (!A || !B) 7841 return false; 7842 Value *VL[] = {A, B}; 7843 return tryToVectorizeList(VL, R); 7844 } 7845 7846 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 7847 bool LimitForRegisterSize) { 7848 if (VL.size() < 2) 7849 return false; 7850 7851 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " 7852 << VL.size() << ".\n"); 7853 7854 // Check that all of the parts are instructions of the same type, 7855 // we permit an alternate opcode via InstructionsState. 7856 InstructionsState S = getSameOpcode(VL); 7857 if (!S.getOpcode()) 7858 return false; 7859 7860 Instruction *I0 = cast<Instruction>(S.OpValue); 7861 // Make sure invalid types (including vector type) are rejected before 7862 // determining vectorization factor for scalar instructions. 7863 for (Value *V : VL) { 7864 Type *Ty = V->getType(); 7865 if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) { 7866 // NOTE: the following will give user internal llvm type name, which may 7867 // not be useful. 7868 R.getORE()->emit([&]() { 7869 std::string type_str; 7870 llvm::raw_string_ostream rso(type_str); 7871 Ty->print(rso); 7872 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0) 7873 << "Cannot SLP vectorize list: type " 7874 << rso.str() + " is unsupported by vectorizer"; 7875 }); 7876 return false; 7877 } 7878 } 7879 7880 unsigned Sz = R.getVectorElementSize(I0); 7881 unsigned MinVF = R.getMinVF(Sz); 7882 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF); 7883 MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF); 7884 if (MaxVF < 2) { 7885 R.getORE()->emit([&]() { 7886 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0) 7887 << "Cannot SLP vectorize list: vectorization factor " 7888 << "less than 2 is not supported"; 7889 }); 7890 return false; 7891 } 7892 7893 bool Changed = false; 7894 bool CandidateFound = false; 7895 InstructionCost MinCost = SLPCostThreshold.getValue(); 7896 Type *ScalarTy = VL[0]->getType(); 7897 if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 7898 ScalarTy = IE->getOperand(1)->getType(); 7899 7900 unsigned NextInst = 0, MaxInst = VL.size(); 7901 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) { 7902 // No actual vectorization should happen, if number of parts is the same as 7903 // provided vectorization factor (i.e. the scalar type is used for vector 7904 // code during codegen). 7905 auto *VecTy = FixedVectorType::get(ScalarTy, VF); 7906 if (TTI->getNumberOfParts(VecTy) == VF) 7907 continue; 7908 for (unsigned I = NextInst; I < MaxInst; ++I) { 7909 unsigned OpsWidth = 0; 7910 7911 if (I + VF > MaxInst) 7912 OpsWidth = MaxInst - I; 7913 else 7914 OpsWidth = VF; 7915 7916 if (!isPowerOf2_32(OpsWidth)) 7917 continue; 7918 7919 if ((LimitForRegisterSize && OpsWidth < MaxVF) || 7920 (VF > MinVF && OpsWidth <= VF / 2) || (VF == MinVF && OpsWidth < 2)) 7921 break; 7922 7923 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth); 7924 // Check that a previous iteration of this loop did not delete the Value. 7925 if (llvm::any_of(Ops, [&R](Value *V) { 7926 auto *I = dyn_cast<Instruction>(V); 7927 return I && R.isDeleted(I); 7928 })) 7929 continue; 7930 7931 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 7932 << "\n"); 7933 7934 R.buildTree(Ops); 7935 if (R.isTreeTinyAndNotFullyVectorizable()) 7936 continue; 7937 R.reorderTopToBottom(); 7938 R.reorderBottomToTop(); 7939 R.buildExternalUses(); 7940 7941 R.computeMinimumValueSizes(); 7942 InstructionCost Cost = R.getTreeCost(); 7943 CandidateFound = true; 7944 MinCost = std::min(MinCost, Cost); 7945 7946 if (Cost < -SLPCostThreshold) { 7947 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 7948 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 7949 cast<Instruction>(Ops[0])) 7950 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 7951 << " and with tree size " 7952 << ore::NV("TreeSize", R.getTreeSize())); 7953 7954 R.vectorizeTree(); 7955 // Move to the next bundle. 7956 I += VF - 1; 7957 NextInst = I + 1; 7958 Changed = true; 7959 } 7960 } 7961 } 7962 7963 if (!Changed && CandidateFound) { 7964 R.getORE()->emit([&]() { 7965 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0) 7966 << "List vectorization was possible but not beneficial with cost " 7967 << ore::NV("Cost", MinCost) << " >= " 7968 << ore::NV("Treshold", -SLPCostThreshold); 7969 }); 7970 } else if (!Changed) { 7971 R.getORE()->emit([&]() { 7972 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0) 7973 << "Cannot SLP vectorize list: vectorization was impossible" 7974 << " with available vectorization factors"; 7975 }); 7976 } 7977 return Changed; 7978 } 7979 7980 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 7981 if (!I) 7982 return false; 7983 7984 if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I)) 7985 return false; 7986 7987 Value *P = I->getParent(); 7988 7989 // Vectorize in current basic block only. 7990 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 7991 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 7992 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 7993 return false; 7994 7995 // Try to vectorize V. 7996 if (tryToVectorizePair(Op0, Op1, R)) 7997 return true; 7998 7999 auto *A = dyn_cast<BinaryOperator>(Op0); 8000 auto *B = dyn_cast<BinaryOperator>(Op1); 8001 // Try to skip B. 8002 if (B && B->hasOneUse()) { 8003 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 8004 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 8005 if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R)) 8006 return true; 8007 if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R)) 8008 return true; 8009 } 8010 8011 // Try to skip A. 8012 if (A && A->hasOneUse()) { 8013 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 8014 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 8015 if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R)) 8016 return true; 8017 if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R)) 8018 return true; 8019 } 8020 return false; 8021 } 8022 8023 namespace { 8024 8025 /// Model horizontal reductions. 8026 /// 8027 /// A horizontal reduction is a tree of reduction instructions that has values 8028 /// that can be put into a vector as its leaves. For example: 8029 /// 8030 /// mul mul mul mul 8031 /// \ / \ / 8032 /// + + 8033 /// \ / 8034 /// + 8035 /// This tree has "mul" as its leaf values and "+" as its reduction 8036 /// instructions. A reduction can feed into a store or a binary operation 8037 /// feeding a phi. 8038 /// ... 8039 /// \ / 8040 /// + 8041 /// | 8042 /// phi += 8043 /// 8044 /// Or: 8045 /// ... 8046 /// \ / 8047 /// + 8048 /// | 8049 /// *p = 8050 /// 8051 class HorizontalReduction { 8052 using ReductionOpsType = SmallVector<Value *, 16>; 8053 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; 8054 ReductionOpsListType ReductionOps; 8055 SmallVector<Value *, 32> ReducedVals; 8056 // Use map vector to make stable output. 8057 MapVector<Instruction *, Value *> ExtraArgs; 8058 WeakTrackingVH ReductionRoot; 8059 /// The type of reduction operation. 8060 RecurKind RdxKind; 8061 8062 const unsigned INVALID_OPERAND_INDEX = std::numeric_limits<unsigned>::max(); 8063 8064 static bool isCmpSelMinMax(Instruction *I) { 8065 return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) && 8066 RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I)); 8067 } 8068 8069 // And/or are potentially poison-safe logical patterns like: 8070 // select x, y, false 8071 // select x, true, y 8072 static bool isBoolLogicOp(Instruction *I) { 8073 return match(I, m_LogicalAnd(m_Value(), m_Value())) || 8074 match(I, m_LogicalOr(m_Value(), m_Value())); 8075 } 8076 8077 /// Checks if instruction is associative and can be vectorized. 8078 static bool isVectorizable(RecurKind Kind, Instruction *I) { 8079 if (Kind == RecurKind::None) 8080 return false; 8081 8082 // Integer ops that map to select instructions or intrinsics are fine. 8083 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) || 8084 isBoolLogicOp(I)) 8085 return true; 8086 8087 if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) { 8088 // FP min/max are associative except for NaN and -0.0. We do not 8089 // have to rule out -0.0 here because the intrinsic semantics do not 8090 // specify a fixed result for it. 8091 return I->getFastMathFlags().noNaNs(); 8092 } 8093 8094 return I->isAssociative(); 8095 } 8096 8097 static Value *getRdxOperand(Instruction *I, unsigned Index) { 8098 // Poison-safe 'or' takes the form: select X, true, Y 8099 // To make that work with the normal operand processing, we skip the 8100 // true value operand. 8101 // TODO: Change the code and data structures to handle this without a hack. 8102 if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1) 8103 return I->getOperand(2); 8104 return I->getOperand(Index); 8105 } 8106 8107 /// Checks if the ParentStackElem.first should be marked as a reduction 8108 /// operation with an extra argument or as extra argument itself. 8109 void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem, 8110 Value *ExtraArg) { 8111 if (ExtraArgs.count(ParentStackElem.first)) { 8112 ExtraArgs[ParentStackElem.first] = nullptr; 8113 // We ran into something like: 8114 // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg. 8115 // The whole ParentStackElem.first should be considered as an extra value 8116 // in this case. 8117 // Do not perform analysis of remaining operands of ParentStackElem.first 8118 // instruction, this whole instruction is an extra argument. 8119 ParentStackElem.second = INVALID_OPERAND_INDEX; 8120 } else { 8121 // We ran into something like: 8122 // ParentStackElem.first += ... + ExtraArg + ... 8123 ExtraArgs[ParentStackElem.first] = ExtraArg; 8124 } 8125 } 8126 8127 /// Creates reduction operation with the current opcode. 8128 static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS, 8129 Value *RHS, const Twine &Name, bool UseSelect) { 8130 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind); 8131 switch (Kind) { 8132 case RecurKind::Or: 8133 if (UseSelect && 8134 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType())) 8135 return Builder.CreateSelect(LHS, Builder.getTrue(), RHS, Name); 8136 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 8137 Name); 8138 case RecurKind::And: 8139 if (UseSelect && 8140 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType())) 8141 return Builder.CreateSelect(LHS, RHS, Builder.getFalse(), Name); 8142 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 8143 Name); 8144 case RecurKind::Add: 8145 case RecurKind::Mul: 8146 case RecurKind::Xor: 8147 case RecurKind::FAdd: 8148 case RecurKind::FMul: 8149 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 8150 Name); 8151 case RecurKind::FMax: 8152 return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS); 8153 case RecurKind::FMin: 8154 return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS); 8155 case RecurKind::SMax: 8156 if (UseSelect) { 8157 Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name); 8158 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 8159 } 8160 return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS); 8161 case RecurKind::SMin: 8162 if (UseSelect) { 8163 Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name); 8164 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 8165 } 8166 return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS); 8167 case RecurKind::UMax: 8168 if (UseSelect) { 8169 Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name); 8170 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 8171 } 8172 return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS); 8173 case RecurKind::UMin: 8174 if (UseSelect) { 8175 Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name); 8176 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 8177 } 8178 return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS); 8179 default: 8180 llvm_unreachable("Unknown reduction operation."); 8181 } 8182 } 8183 8184 /// Creates reduction operation with the current opcode with the IR flags 8185 /// from \p ReductionOps. 8186 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 8187 Value *RHS, const Twine &Name, 8188 const ReductionOpsListType &ReductionOps) { 8189 bool UseSelect = ReductionOps.size() == 2 || 8190 // Logical or/and. 8191 (ReductionOps.size() == 1 && 8192 isa<SelectInst>(ReductionOps.front().front())); 8193 assert((!UseSelect || ReductionOps.size() != 2 || 8194 isa<SelectInst>(ReductionOps[1][0])) && 8195 "Expected cmp + select pairs for reduction"); 8196 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect); 8197 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 8198 if (auto *Sel = dyn_cast<SelectInst>(Op)) { 8199 propagateIRFlags(Sel->getCondition(), ReductionOps[0]); 8200 propagateIRFlags(Op, ReductionOps[1]); 8201 return Op; 8202 } 8203 } 8204 propagateIRFlags(Op, ReductionOps[0]); 8205 return Op; 8206 } 8207 8208 /// Creates reduction operation with the current opcode with the IR flags 8209 /// from \p I. 8210 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 8211 Value *RHS, const Twine &Name, Instruction *I) { 8212 auto *SelI = dyn_cast<SelectInst>(I); 8213 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, SelI != nullptr); 8214 if (SelI && RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 8215 if (auto *Sel = dyn_cast<SelectInst>(Op)) 8216 propagateIRFlags(Sel->getCondition(), SelI->getCondition()); 8217 } 8218 propagateIRFlags(Op, I); 8219 return Op; 8220 } 8221 8222 static RecurKind getRdxKind(Instruction *I) { 8223 assert(I && "Expected instruction for reduction matching"); 8224 TargetTransformInfo::ReductionFlags RdxFlags; 8225 if (match(I, m_Add(m_Value(), m_Value()))) 8226 return RecurKind::Add; 8227 if (match(I, m_Mul(m_Value(), m_Value()))) 8228 return RecurKind::Mul; 8229 if (match(I, m_And(m_Value(), m_Value())) || 8230 match(I, m_LogicalAnd(m_Value(), m_Value()))) 8231 return RecurKind::And; 8232 if (match(I, m_Or(m_Value(), m_Value())) || 8233 match(I, m_LogicalOr(m_Value(), m_Value()))) 8234 return RecurKind::Or; 8235 if (match(I, m_Xor(m_Value(), m_Value()))) 8236 return RecurKind::Xor; 8237 if (match(I, m_FAdd(m_Value(), m_Value()))) 8238 return RecurKind::FAdd; 8239 if (match(I, m_FMul(m_Value(), m_Value()))) 8240 return RecurKind::FMul; 8241 8242 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value()))) 8243 return RecurKind::FMax; 8244 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value()))) 8245 return RecurKind::FMin; 8246 8247 // This matches either cmp+select or intrinsics. SLP is expected to handle 8248 // either form. 8249 // TODO: If we are canonicalizing to intrinsics, we can remove several 8250 // special-case paths that deal with selects. 8251 if (match(I, m_SMax(m_Value(), m_Value()))) 8252 return RecurKind::SMax; 8253 if (match(I, m_SMin(m_Value(), m_Value()))) 8254 return RecurKind::SMin; 8255 if (match(I, m_UMax(m_Value(), m_Value()))) 8256 return RecurKind::UMax; 8257 if (match(I, m_UMin(m_Value(), m_Value()))) 8258 return RecurKind::UMin; 8259 8260 if (auto *Select = dyn_cast<SelectInst>(I)) { 8261 // Try harder: look for min/max pattern based on instructions producing 8262 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2). 8263 // During the intermediate stages of SLP, it's very common to have 8264 // pattern like this (since optimizeGatherSequence is run only once 8265 // at the end): 8266 // %1 = extractelement <2 x i32> %a, i32 0 8267 // %2 = extractelement <2 x i32> %a, i32 1 8268 // %cond = icmp sgt i32 %1, %2 8269 // %3 = extractelement <2 x i32> %a, i32 0 8270 // %4 = extractelement <2 x i32> %a, i32 1 8271 // %select = select i1 %cond, i32 %3, i32 %4 8272 CmpInst::Predicate Pred; 8273 Instruction *L1; 8274 Instruction *L2; 8275 8276 Value *LHS = Select->getTrueValue(); 8277 Value *RHS = Select->getFalseValue(); 8278 Value *Cond = Select->getCondition(); 8279 8280 // TODO: Support inverse predicates. 8281 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) { 8282 if (!isa<ExtractElementInst>(RHS) || 8283 !L2->isIdenticalTo(cast<Instruction>(RHS))) 8284 return RecurKind::None; 8285 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) { 8286 if (!isa<ExtractElementInst>(LHS) || 8287 !L1->isIdenticalTo(cast<Instruction>(LHS))) 8288 return RecurKind::None; 8289 } else { 8290 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS)) 8291 return RecurKind::None; 8292 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) || 8293 !L1->isIdenticalTo(cast<Instruction>(LHS)) || 8294 !L2->isIdenticalTo(cast<Instruction>(RHS))) 8295 return RecurKind::None; 8296 } 8297 8298 TargetTransformInfo::ReductionFlags RdxFlags; 8299 switch (Pred) { 8300 default: 8301 return RecurKind::None; 8302 case CmpInst::ICMP_SGT: 8303 case CmpInst::ICMP_SGE: 8304 return RecurKind::SMax; 8305 case CmpInst::ICMP_SLT: 8306 case CmpInst::ICMP_SLE: 8307 return RecurKind::SMin; 8308 case CmpInst::ICMP_UGT: 8309 case CmpInst::ICMP_UGE: 8310 return RecurKind::UMax; 8311 case CmpInst::ICMP_ULT: 8312 case CmpInst::ICMP_ULE: 8313 return RecurKind::UMin; 8314 } 8315 } 8316 return RecurKind::None; 8317 } 8318 8319 /// Get the index of the first operand. 8320 static unsigned getFirstOperandIndex(Instruction *I) { 8321 return isCmpSelMinMax(I) ? 1 : 0; 8322 } 8323 8324 /// Total number of operands in the reduction operation. 8325 static unsigned getNumberOfOperands(Instruction *I) { 8326 return isCmpSelMinMax(I) ? 3 : 2; 8327 } 8328 8329 /// Checks if the instruction is in basic block \p BB. 8330 /// For a cmp+sel min/max reduction check that both ops are in \p BB. 8331 static bool hasSameParent(Instruction *I, BasicBlock *BB) { 8332 if (isCmpSelMinMax(I) || (isBoolLogicOp(I) && isa<SelectInst>(I))) { 8333 auto *Sel = cast<SelectInst>(I); 8334 auto *Cmp = dyn_cast<Instruction>(Sel->getCondition()); 8335 return Sel->getParent() == BB && Cmp && Cmp->getParent() == BB; 8336 } 8337 return I->getParent() == BB; 8338 } 8339 8340 /// Expected number of uses for reduction operations/reduced values. 8341 static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) { 8342 if (IsCmpSelMinMax) { 8343 // SelectInst must be used twice while the condition op must have single 8344 // use only. 8345 if (auto *Sel = dyn_cast<SelectInst>(I)) 8346 return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse(); 8347 return I->hasNUses(2); 8348 } 8349 8350 // Arithmetic reduction operation must be used once only. 8351 return I->hasOneUse(); 8352 } 8353 8354 /// Initializes the list of reduction operations. 8355 void initReductionOps(Instruction *I) { 8356 if (isCmpSelMinMax(I)) 8357 ReductionOps.assign(2, ReductionOpsType()); 8358 else 8359 ReductionOps.assign(1, ReductionOpsType()); 8360 } 8361 8362 /// Add all reduction operations for the reduction instruction \p I. 8363 void addReductionOps(Instruction *I) { 8364 if (isCmpSelMinMax(I)) { 8365 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition()); 8366 ReductionOps[1].emplace_back(I); 8367 } else { 8368 ReductionOps[0].emplace_back(I); 8369 } 8370 } 8371 8372 static Value *getLHS(RecurKind Kind, Instruction *I) { 8373 if (Kind == RecurKind::None) 8374 return nullptr; 8375 return I->getOperand(getFirstOperandIndex(I)); 8376 } 8377 static Value *getRHS(RecurKind Kind, Instruction *I) { 8378 if (Kind == RecurKind::None) 8379 return nullptr; 8380 return I->getOperand(getFirstOperandIndex(I) + 1); 8381 } 8382 8383 public: 8384 HorizontalReduction() = default; 8385 8386 /// Try to find a reduction tree. 8387 bool matchAssociativeReduction(PHINode *Phi, Instruction *Inst) { 8388 assert((!Phi || is_contained(Phi->operands(), Inst)) && 8389 "Phi needs to use the binary operator"); 8390 assert((isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) || 8391 isa<IntrinsicInst>(Inst)) && 8392 "Expected binop, select, or intrinsic for reduction matching"); 8393 RdxKind = getRdxKind(Inst); 8394 8395 // We could have a initial reductions that is not an add. 8396 // r *= v1 + v2 + v3 + v4 8397 // In such a case start looking for a tree rooted in the first '+'. 8398 if (Phi) { 8399 if (getLHS(RdxKind, Inst) == Phi) { 8400 Phi = nullptr; 8401 Inst = dyn_cast<Instruction>(getRHS(RdxKind, Inst)); 8402 if (!Inst) 8403 return false; 8404 RdxKind = getRdxKind(Inst); 8405 } else if (getRHS(RdxKind, Inst) == Phi) { 8406 Phi = nullptr; 8407 Inst = dyn_cast<Instruction>(getLHS(RdxKind, Inst)); 8408 if (!Inst) 8409 return false; 8410 RdxKind = getRdxKind(Inst); 8411 } 8412 } 8413 8414 if (!isVectorizable(RdxKind, Inst)) 8415 return false; 8416 8417 // Analyze "regular" integer/FP types for reductions - no target-specific 8418 // types or pointers. 8419 Type *Ty = Inst->getType(); 8420 if (!isValidElementType(Ty) || Ty->isPointerTy()) 8421 return false; 8422 8423 // Though the ultimate reduction may have multiple uses, its condition must 8424 // have only single use. 8425 if (auto *Sel = dyn_cast<SelectInst>(Inst)) 8426 if (!Sel->getCondition()->hasOneUse()) 8427 return false; 8428 8429 ReductionRoot = Inst; 8430 8431 // The opcode for leaf values that we perform a reduction on. 8432 // For example: load(x) + load(y) + load(z) + fptoui(w) 8433 // The leaf opcode for 'w' does not match, so we don't include it as a 8434 // potential candidate for the reduction. 8435 unsigned LeafOpcode = 0; 8436 8437 // Post-order traverse the reduction tree starting at Inst. We only handle 8438 // true trees containing binary operators or selects. 8439 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; 8440 Stack.push_back(std::make_pair(Inst, getFirstOperandIndex(Inst))); 8441 initReductionOps(Inst); 8442 while (!Stack.empty()) { 8443 Instruction *TreeN = Stack.back().first; 8444 unsigned EdgeToVisit = Stack.back().second++; 8445 const RecurKind TreeRdxKind = getRdxKind(TreeN); 8446 bool IsReducedValue = TreeRdxKind != RdxKind; 8447 8448 // Postorder visit. 8449 if (IsReducedValue || EdgeToVisit >= getNumberOfOperands(TreeN)) { 8450 if (IsReducedValue) 8451 ReducedVals.push_back(TreeN); 8452 else { 8453 auto ExtraArgsIter = ExtraArgs.find(TreeN); 8454 if (ExtraArgsIter != ExtraArgs.end() && !ExtraArgsIter->second) { 8455 // Check if TreeN is an extra argument of its parent operation. 8456 if (Stack.size() <= 1) { 8457 // TreeN can't be an extra argument as it is a root reduction 8458 // operation. 8459 return false; 8460 } 8461 // Yes, TreeN is an extra argument, do not add it to a list of 8462 // reduction operations. 8463 // Stack[Stack.size() - 2] always points to the parent operation. 8464 markExtraArg(Stack[Stack.size() - 2], TreeN); 8465 ExtraArgs.erase(TreeN); 8466 } else 8467 addReductionOps(TreeN); 8468 } 8469 // Retract. 8470 Stack.pop_back(); 8471 continue; 8472 } 8473 8474 // Visit operands. 8475 Value *EdgeVal = getRdxOperand(TreeN, EdgeToVisit); 8476 auto *EdgeInst = dyn_cast<Instruction>(EdgeVal); 8477 if (!EdgeInst) { 8478 // Edge value is not a reduction instruction or a leaf instruction. 8479 // (It may be a constant, function argument, or something else.) 8480 markExtraArg(Stack.back(), EdgeVal); 8481 continue; 8482 } 8483 RecurKind EdgeRdxKind = getRdxKind(EdgeInst); 8484 // Continue analysis if the next operand is a reduction operation or 8485 // (possibly) a leaf value. If the leaf value opcode is not set, 8486 // the first met operation != reduction operation is considered as the 8487 // leaf opcode. 8488 // Only handle trees in the current basic block. 8489 // Each tree node needs to have minimal number of users except for the 8490 // ultimate reduction. 8491 const bool IsRdxInst = EdgeRdxKind == RdxKind; 8492 if (EdgeInst != Phi && EdgeInst != Inst && 8493 hasSameParent(EdgeInst, Inst->getParent()) && 8494 hasRequiredNumberOfUses(isCmpSelMinMax(Inst), EdgeInst) && 8495 (!LeafOpcode || LeafOpcode == EdgeInst->getOpcode() || IsRdxInst)) { 8496 if (IsRdxInst) { 8497 // We need to be able to reassociate the reduction operations. 8498 if (!isVectorizable(EdgeRdxKind, EdgeInst)) { 8499 // I is an extra argument for TreeN (its parent operation). 8500 markExtraArg(Stack.back(), EdgeInst); 8501 continue; 8502 } 8503 } else if (!LeafOpcode) { 8504 LeafOpcode = EdgeInst->getOpcode(); 8505 } 8506 Stack.push_back( 8507 std::make_pair(EdgeInst, getFirstOperandIndex(EdgeInst))); 8508 continue; 8509 } 8510 // I is an extra argument for TreeN (its parent operation). 8511 markExtraArg(Stack.back(), EdgeInst); 8512 } 8513 return true; 8514 } 8515 8516 /// Attempt to vectorize the tree found by matchAssociativeReduction. 8517 Value *tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 8518 // If there are a sufficient number of reduction values, reduce 8519 // to a nearby power-of-2. We can safely generate oversized 8520 // vectors and rely on the backend to split them to legal sizes. 8521 unsigned NumReducedVals = ReducedVals.size(); 8522 if (NumReducedVals < 4) 8523 return nullptr; 8524 8525 // Intersect the fast-math-flags from all reduction operations. 8526 FastMathFlags RdxFMF; 8527 RdxFMF.set(); 8528 for (ReductionOpsType &RdxOp : ReductionOps) { 8529 for (Value *RdxVal : RdxOp) { 8530 if (auto *FPMO = dyn_cast<FPMathOperator>(RdxVal)) 8531 RdxFMF &= FPMO->getFastMathFlags(); 8532 } 8533 } 8534 8535 IRBuilder<> Builder(cast<Instruction>(ReductionRoot)); 8536 Builder.setFastMathFlags(RdxFMF); 8537 8538 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 8539 // The same extra argument may be used several times, so log each attempt 8540 // to use it. 8541 for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) { 8542 assert(Pair.first && "DebugLoc must be set."); 8543 ExternallyUsedValues[Pair.second].push_back(Pair.first); 8544 } 8545 8546 // The compare instruction of a min/max is the insertion point for new 8547 // instructions and may be replaced with a new compare instruction. 8548 auto getCmpForMinMaxReduction = [](Instruction *RdxRootInst) { 8549 assert(isa<SelectInst>(RdxRootInst) && 8550 "Expected min/max reduction to have select root instruction"); 8551 Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition(); 8552 assert(isa<Instruction>(ScalarCond) && 8553 "Expected min/max reduction to have compare condition"); 8554 return cast<Instruction>(ScalarCond); 8555 }; 8556 8557 // The reduction root is used as the insertion point for new instructions, 8558 // so set it as externally used to prevent it from being deleted. 8559 ExternallyUsedValues[ReductionRoot]; 8560 SmallVector<Value *, 16> IgnoreList; 8561 for (ReductionOpsType &RdxOp : ReductionOps) 8562 IgnoreList.append(RdxOp.begin(), RdxOp.end()); 8563 8564 unsigned ReduxWidth = PowerOf2Floor(NumReducedVals); 8565 if (NumReducedVals > ReduxWidth) { 8566 // In the loop below, we are building a tree based on a window of 8567 // 'ReduxWidth' values. 8568 // If the operands of those values have common traits (compare predicate, 8569 // constant operand, etc), then we want to group those together to 8570 // minimize the cost of the reduction. 8571 8572 // TODO: This should be extended to count common operands for 8573 // compares and binops. 8574 8575 // Step 1: Count the number of times each compare predicate occurs. 8576 SmallDenseMap<unsigned, unsigned> PredCountMap; 8577 for (Value *RdxVal : ReducedVals) { 8578 CmpInst::Predicate Pred; 8579 if (match(RdxVal, m_Cmp(Pred, m_Value(), m_Value()))) 8580 ++PredCountMap[Pred]; 8581 } 8582 // Step 2: Sort the values so the most common predicates come first. 8583 stable_sort(ReducedVals, [&PredCountMap](Value *A, Value *B) { 8584 CmpInst::Predicate PredA, PredB; 8585 if (match(A, m_Cmp(PredA, m_Value(), m_Value())) && 8586 match(B, m_Cmp(PredB, m_Value(), m_Value()))) { 8587 return PredCountMap[PredA] > PredCountMap[PredB]; 8588 } 8589 return false; 8590 }); 8591 } 8592 8593 Value *VectorizedTree = nullptr; 8594 unsigned i = 0; 8595 while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) { 8596 ArrayRef<Value *> VL(&ReducedVals[i], ReduxWidth); 8597 V.buildTree(VL, IgnoreList); 8598 if (V.isTreeTinyAndNotFullyVectorizable(/*ForReduction=*/true)) 8599 break; 8600 if (V.isLoadCombineReductionCandidate(RdxKind)) 8601 break; 8602 V.reorderTopToBottom(); 8603 V.reorderBottomToTop(/*IgnoreReorder=*/true); 8604 V.buildExternalUses(ExternallyUsedValues); 8605 8606 // For a poison-safe boolean logic reduction, do not replace select 8607 // instructions with logic ops. All reduced values will be frozen (see 8608 // below) to prevent leaking poison. 8609 if (isa<SelectInst>(ReductionRoot) && 8610 isBoolLogicOp(cast<Instruction>(ReductionRoot)) && 8611 NumReducedVals != ReduxWidth) 8612 break; 8613 8614 V.computeMinimumValueSizes(); 8615 8616 // Estimate cost. 8617 InstructionCost TreeCost = 8618 V.getTreeCost(makeArrayRef(&ReducedVals[i], ReduxWidth)); 8619 InstructionCost ReductionCost = 8620 getReductionCost(TTI, ReducedVals[i], ReduxWidth, RdxFMF); 8621 InstructionCost Cost = TreeCost + ReductionCost; 8622 if (!Cost.isValid()) { 8623 LLVM_DEBUG(dbgs() << "Encountered invalid baseline cost.\n"); 8624 return nullptr; 8625 } 8626 if (Cost >= -SLPCostThreshold) { 8627 V.getORE()->emit([&]() { 8628 return OptimizationRemarkMissed(SV_NAME, "HorSLPNotBeneficial", 8629 cast<Instruction>(VL[0])) 8630 << "Vectorizing horizontal reduction is possible" 8631 << "but not beneficial with cost " << ore::NV("Cost", Cost) 8632 << " and threshold " 8633 << ore::NV("Threshold", -SLPCostThreshold); 8634 }); 8635 break; 8636 } 8637 8638 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" 8639 << Cost << ". (HorRdx)\n"); 8640 V.getORE()->emit([&]() { 8641 return OptimizationRemark(SV_NAME, "VectorizedHorizontalReduction", 8642 cast<Instruction>(VL[0])) 8643 << "Vectorized horizontal reduction with cost " 8644 << ore::NV("Cost", Cost) << " and with tree size " 8645 << ore::NV("TreeSize", V.getTreeSize()); 8646 }); 8647 8648 // Vectorize a tree. 8649 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 8650 Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues); 8651 8652 // Emit a reduction. If the root is a select (min/max idiom), the insert 8653 // point is the compare condition of that select. 8654 Instruction *RdxRootInst = cast<Instruction>(ReductionRoot); 8655 if (isCmpSelMinMax(RdxRootInst)) 8656 Builder.SetInsertPoint(getCmpForMinMaxReduction(RdxRootInst)); 8657 else 8658 Builder.SetInsertPoint(RdxRootInst); 8659 8660 // To prevent poison from leaking across what used to be sequential, safe, 8661 // scalar boolean logic operations, the reduction operand must be frozen. 8662 if (isa<SelectInst>(RdxRootInst) && isBoolLogicOp(RdxRootInst)) 8663 VectorizedRoot = Builder.CreateFreeze(VectorizedRoot); 8664 8665 Value *ReducedSubTree = 8666 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); 8667 8668 if (!VectorizedTree) { 8669 // Initialize the final value in the reduction. 8670 VectorizedTree = ReducedSubTree; 8671 } else { 8672 // Update the final value in the reduction. 8673 Builder.SetCurrentDebugLocation(Loc); 8674 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree, 8675 ReducedSubTree, "op.rdx", ReductionOps); 8676 } 8677 i += ReduxWidth; 8678 ReduxWidth = PowerOf2Floor(NumReducedVals - i); 8679 } 8680 8681 if (VectorizedTree) { 8682 // Finish the reduction. 8683 for (; i < NumReducedVals; ++i) { 8684 auto *I = cast<Instruction>(ReducedVals[i]); 8685 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 8686 VectorizedTree = 8687 createOp(Builder, RdxKind, VectorizedTree, I, "", ReductionOps); 8688 } 8689 for (auto &Pair : ExternallyUsedValues) { 8690 // Add each externally used value to the final reduction. 8691 for (auto *I : Pair.second) { 8692 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 8693 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree, 8694 Pair.first, "op.extra", I); 8695 } 8696 } 8697 8698 ReductionRoot->replaceAllUsesWith(VectorizedTree); 8699 8700 // Mark all scalar reduction ops for deletion, they are replaced by the 8701 // vector reductions. 8702 V.eraseInstructions(IgnoreList); 8703 } 8704 return VectorizedTree; 8705 } 8706 8707 unsigned numReductionValues() const { return ReducedVals.size(); } 8708 8709 private: 8710 /// Calculate the cost of a reduction. 8711 InstructionCost getReductionCost(TargetTransformInfo *TTI, 8712 Value *FirstReducedVal, unsigned ReduxWidth, 8713 FastMathFlags FMF) { 8714 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 8715 Type *ScalarTy = FirstReducedVal->getType(); 8716 FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth); 8717 InstructionCost VectorCost, ScalarCost; 8718 switch (RdxKind) { 8719 case RecurKind::Add: 8720 case RecurKind::Mul: 8721 case RecurKind::Or: 8722 case RecurKind::And: 8723 case RecurKind::Xor: 8724 case RecurKind::FAdd: 8725 case RecurKind::FMul: { 8726 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind); 8727 VectorCost = 8728 TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF, CostKind); 8729 ScalarCost = TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy, CostKind); 8730 break; 8731 } 8732 case RecurKind::FMax: 8733 case RecurKind::FMin: { 8734 auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy); 8735 auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy)); 8736 VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy, 8737 /*unsigned=*/false, CostKind); 8738 CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind); 8739 ScalarCost = TTI->getCmpSelInstrCost(Instruction::FCmp, ScalarTy, 8740 SclCondTy, RdxPred, CostKind) + 8741 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 8742 SclCondTy, RdxPred, CostKind); 8743 break; 8744 } 8745 case RecurKind::SMax: 8746 case RecurKind::SMin: 8747 case RecurKind::UMax: 8748 case RecurKind::UMin: { 8749 auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy); 8750 auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy)); 8751 bool IsUnsigned = 8752 RdxKind == RecurKind::UMax || RdxKind == RecurKind::UMin; 8753 VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy, IsUnsigned, 8754 CostKind); 8755 CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind); 8756 ScalarCost = TTI->getCmpSelInstrCost(Instruction::ICmp, ScalarTy, 8757 SclCondTy, RdxPred, CostKind) + 8758 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 8759 SclCondTy, RdxPred, CostKind); 8760 break; 8761 } 8762 default: 8763 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 8764 } 8765 8766 // Scalar cost is repeated for N-1 elements. 8767 ScalarCost *= (ReduxWidth - 1); 8768 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost 8769 << " for reduction that starts with " << *FirstReducedVal 8770 << " (It is a splitting reduction)\n"); 8771 return VectorCost - ScalarCost; 8772 } 8773 8774 /// Emit a horizontal reduction of the vectorized value. 8775 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 8776 unsigned ReduxWidth, const TargetTransformInfo *TTI) { 8777 assert(VectorizedValue && "Need to have a vectorized tree node"); 8778 assert(isPowerOf2_32(ReduxWidth) && 8779 "We only handle power-of-two reductions for now"); 8780 8781 ++NumVectorInstructions; 8782 return createSimpleTargetReduction(Builder, TTI, VectorizedValue, RdxKind, 8783 ReductionOps.back()); 8784 } 8785 }; 8786 8787 } // end anonymous namespace 8788 8789 static Optional<unsigned> getAggregateSize(Instruction *InsertInst) { 8790 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) 8791 return cast<FixedVectorType>(IE->getType())->getNumElements(); 8792 8793 unsigned AggregateSize = 1; 8794 auto *IV = cast<InsertValueInst>(InsertInst); 8795 Type *CurrentType = IV->getType(); 8796 do { 8797 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 8798 for (auto *Elt : ST->elements()) 8799 if (Elt != ST->getElementType(0)) // check homogeneity 8800 return None; 8801 AggregateSize *= ST->getNumElements(); 8802 CurrentType = ST->getElementType(0); 8803 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 8804 AggregateSize *= AT->getNumElements(); 8805 CurrentType = AT->getElementType(); 8806 } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) { 8807 AggregateSize *= VT->getNumElements(); 8808 return AggregateSize; 8809 } else if (CurrentType->isSingleValueType()) { 8810 return AggregateSize; 8811 } else { 8812 return None; 8813 } 8814 } while (true); 8815 } 8816 8817 static bool findBuildAggregate_rec(Instruction *LastInsertInst, 8818 TargetTransformInfo *TTI, 8819 SmallVectorImpl<Value *> &BuildVectorOpds, 8820 SmallVectorImpl<Value *> &InsertElts, 8821 unsigned OperandOffset) { 8822 do { 8823 Value *InsertedOperand = LastInsertInst->getOperand(1); 8824 Optional<int> OperandIndex = getInsertIndex(LastInsertInst, OperandOffset); 8825 if (!OperandIndex) 8826 return false; 8827 if (isa<InsertElementInst>(InsertedOperand) || 8828 isa<InsertValueInst>(InsertedOperand)) { 8829 if (!findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI, 8830 BuildVectorOpds, InsertElts, *OperandIndex)) 8831 return false; 8832 } else { 8833 BuildVectorOpds[*OperandIndex] = InsertedOperand; 8834 InsertElts[*OperandIndex] = LastInsertInst; 8835 } 8836 LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0)); 8837 } while (LastInsertInst != nullptr && 8838 (isa<InsertValueInst>(LastInsertInst) || 8839 isa<InsertElementInst>(LastInsertInst)) && 8840 LastInsertInst->hasOneUse()); 8841 return true; 8842 } 8843 8844 /// Recognize construction of vectors like 8845 /// %ra = insertelement <4 x float> poison, float %s0, i32 0 8846 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 8847 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 8848 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 8849 /// starting from the last insertelement or insertvalue instruction. 8850 /// 8851 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>}, 8852 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on. 8853 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples. 8854 /// 8855 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type. 8856 /// 8857 /// \return true if it matches. 8858 static bool findBuildAggregate(Instruction *LastInsertInst, 8859 TargetTransformInfo *TTI, 8860 SmallVectorImpl<Value *> &BuildVectorOpds, 8861 SmallVectorImpl<Value *> &InsertElts) { 8862 8863 assert((isa<InsertElementInst>(LastInsertInst) || 8864 isa<InsertValueInst>(LastInsertInst)) && 8865 "Expected insertelement or insertvalue instruction!"); 8866 8867 assert((BuildVectorOpds.empty() && InsertElts.empty()) && 8868 "Expected empty result vectors!"); 8869 8870 Optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst); 8871 if (!AggregateSize) 8872 return false; 8873 BuildVectorOpds.resize(*AggregateSize); 8874 InsertElts.resize(*AggregateSize); 8875 8876 if (findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 8877 0)) { 8878 llvm::erase_value(BuildVectorOpds, nullptr); 8879 llvm::erase_value(InsertElts, nullptr); 8880 if (BuildVectorOpds.size() >= 2) 8881 return true; 8882 } 8883 8884 return false; 8885 } 8886 8887 /// Try and get a reduction value from a phi node. 8888 /// 8889 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 8890 /// if they come from either \p ParentBB or a containing loop latch. 8891 /// 8892 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 8893 /// if not possible. 8894 static Value *getReductionValue(const DominatorTree *DT, PHINode *P, 8895 BasicBlock *ParentBB, LoopInfo *LI) { 8896 // There are situations where the reduction value is not dominated by the 8897 // reduction phi. Vectorizing such cases has been reported to cause 8898 // miscompiles. See PR25787. 8899 auto DominatedReduxValue = [&](Value *R) { 8900 return isa<Instruction>(R) && 8901 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent()); 8902 }; 8903 8904 Value *Rdx = nullptr; 8905 8906 // Return the incoming value if it comes from the same BB as the phi node. 8907 if (P->getIncomingBlock(0) == ParentBB) { 8908 Rdx = P->getIncomingValue(0); 8909 } else if (P->getIncomingBlock(1) == ParentBB) { 8910 Rdx = P->getIncomingValue(1); 8911 } 8912 8913 if (Rdx && DominatedReduxValue(Rdx)) 8914 return Rdx; 8915 8916 // Otherwise, check whether we have a loop latch to look at. 8917 Loop *BBL = LI->getLoopFor(ParentBB); 8918 if (!BBL) 8919 return nullptr; 8920 BasicBlock *BBLatch = BBL->getLoopLatch(); 8921 if (!BBLatch) 8922 return nullptr; 8923 8924 // There is a loop latch, return the incoming value if it comes from 8925 // that. This reduction pattern occasionally turns up. 8926 if (P->getIncomingBlock(0) == BBLatch) { 8927 Rdx = P->getIncomingValue(0); 8928 } else if (P->getIncomingBlock(1) == BBLatch) { 8929 Rdx = P->getIncomingValue(1); 8930 } 8931 8932 if (Rdx && DominatedReduxValue(Rdx)) 8933 return Rdx; 8934 8935 return nullptr; 8936 } 8937 8938 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) { 8939 if (match(I, m_BinOp(m_Value(V0), m_Value(V1)))) 8940 return true; 8941 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1)))) 8942 return true; 8943 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1)))) 8944 return true; 8945 if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1)))) 8946 return true; 8947 if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1)))) 8948 return true; 8949 if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1)))) 8950 return true; 8951 if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1)))) 8952 return true; 8953 return false; 8954 } 8955 8956 /// Attempt to reduce a horizontal reduction. 8957 /// If it is legal to match a horizontal reduction feeding the phi node \a P 8958 /// with reduction operators \a Root (or one of its operands) in a basic block 8959 /// \a BB, then check if it can be done. If horizontal reduction is not found 8960 /// and root instruction is a binary operation, vectorization of the operands is 8961 /// attempted. 8962 /// \returns true if a horizontal reduction was matched and reduced or operands 8963 /// of one of the binary instruction were vectorized. 8964 /// \returns false if a horizontal reduction was not matched (or not possible) 8965 /// or no vectorization of any binary operation feeding \a Root instruction was 8966 /// performed. 8967 static bool tryToVectorizeHorReductionOrInstOperands( 8968 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, 8969 TargetTransformInfo *TTI, 8970 const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) { 8971 if (!ShouldVectorizeHor) 8972 return false; 8973 8974 if (!Root) 8975 return false; 8976 8977 if (Root->getParent() != BB || isa<PHINode>(Root)) 8978 return false; 8979 // Start analysis starting from Root instruction. If horizontal reduction is 8980 // found, try to vectorize it. If it is not a horizontal reduction or 8981 // vectorization is not possible or not effective, and currently analyzed 8982 // instruction is a binary operation, try to vectorize the operands, using 8983 // pre-order DFS traversal order. If the operands were not vectorized, repeat 8984 // the same procedure considering each operand as a possible root of the 8985 // horizontal reduction. 8986 // Interrupt the process if the Root instruction itself was vectorized or all 8987 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 8988 // Skip the analysis of CmpInsts.Compiler implements postanalysis of the 8989 // CmpInsts so we can skip extra attempts in 8990 // tryToVectorizeHorReductionOrInstOperands and save compile time. 8991 std::queue<std::pair<Instruction *, unsigned>> Stack; 8992 Stack.emplace(Root, 0); 8993 SmallPtrSet<Value *, 8> VisitedInstrs; 8994 SmallVector<WeakTrackingVH> PostponedInsts; 8995 bool Res = false; 8996 auto &&TryToReduce = [TTI, &P, &R](Instruction *Inst, Value *&B0, 8997 Value *&B1) -> Value * { 8998 bool IsBinop = matchRdxBop(Inst, B0, B1); 8999 bool IsSelect = match(Inst, m_Select(m_Value(), m_Value(), m_Value())); 9000 if (IsBinop || IsSelect) { 9001 HorizontalReduction HorRdx; 9002 if (HorRdx.matchAssociativeReduction(P, Inst)) 9003 return HorRdx.tryToReduce(R, TTI); 9004 } 9005 return nullptr; 9006 }; 9007 while (!Stack.empty()) { 9008 Instruction *Inst; 9009 unsigned Level; 9010 std::tie(Inst, Level) = Stack.front(); 9011 Stack.pop(); 9012 // Do not try to analyze instruction that has already been vectorized. 9013 // This may happen when we vectorize instruction operands on a previous 9014 // iteration while stack was populated before that happened. 9015 if (R.isDeleted(Inst)) 9016 continue; 9017 Value *B0 = nullptr, *B1 = nullptr; 9018 if (Value *V = TryToReduce(Inst, B0, B1)) { 9019 Res = true; 9020 // Set P to nullptr to avoid re-analysis of phi node in 9021 // matchAssociativeReduction function unless this is the root node. 9022 P = nullptr; 9023 if (auto *I = dyn_cast<Instruction>(V)) { 9024 // Try to find another reduction. 9025 Stack.emplace(I, Level); 9026 continue; 9027 } 9028 } else { 9029 bool IsBinop = B0 && B1; 9030 if (P && IsBinop) { 9031 Inst = dyn_cast<Instruction>(B0); 9032 if (Inst == P) 9033 Inst = dyn_cast<Instruction>(B1); 9034 if (!Inst) { 9035 // Set P to nullptr to avoid re-analysis of phi node in 9036 // matchAssociativeReduction function unless this is the root node. 9037 P = nullptr; 9038 continue; 9039 } 9040 } 9041 // Set P to nullptr to avoid re-analysis of phi node in 9042 // matchAssociativeReduction function unless this is the root node. 9043 P = nullptr; 9044 // Do not try to vectorize CmpInst operands, this is done separately. 9045 // Final attempt for binop args vectorization should happen after the loop 9046 // to try to find reductions. 9047 if (!isa<CmpInst>(Inst)) 9048 PostponedInsts.push_back(Inst); 9049 } 9050 9051 // Try to vectorize operands. 9052 // Continue analysis for the instruction from the same basic block only to 9053 // save compile time. 9054 if (++Level < RecursionMaxDepth) 9055 for (auto *Op : Inst->operand_values()) 9056 if (VisitedInstrs.insert(Op).second) 9057 if (auto *I = dyn_cast<Instruction>(Op)) 9058 // Do not try to vectorize CmpInst operands, this is done 9059 // separately. 9060 if (!isa<PHINode>(I) && !isa<CmpInst>(I) && !R.isDeleted(I) && 9061 I->getParent() == BB) 9062 Stack.emplace(I, Level); 9063 } 9064 // Try to vectorized binops where reductions were not found. 9065 for (Value *V : PostponedInsts) 9066 if (auto *Inst = dyn_cast<Instruction>(V)) 9067 if (!R.isDeleted(Inst)) 9068 Res |= Vectorize(Inst, R); 9069 return Res; 9070 } 9071 9072 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V, 9073 BasicBlock *BB, BoUpSLP &R, 9074 TargetTransformInfo *TTI) { 9075 auto *I = dyn_cast_or_null<Instruction>(V); 9076 if (!I) 9077 return false; 9078 9079 if (!isa<BinaryOperator>(I)) 9080 P = nullptr; 9081 // Try to match and vectorize a horizontal reduction. 9082 auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool { 9083 return tryToVectorize(I, R); 9084 }; 9085 return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI, 9086 ExtraVectorization); 9087 } 9088 9089 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 9090 BasicBlock *BB, BoUpSLP &R) { 9091 const DataLayout &DL = BB->getModule()->getDataLayout(); 9092 if (!R.canMapToVector(IVI->getType(), DL)) 9093 return false; 9094 9095 SmallVector<Value *, 16> BuildVectorOpds; 9096 SmallVector<Value *, 16> BuildVectorInsts; 9097 if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts)) 9098 return false; 9099 9100 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 9101 // Aggregate value is unlikely to be processed in vector register, we need to 9102 // extract scalars into scalar registers, so NeedExtraction is set true. 9103 return tryToVectorizeList(BuildVectorOpds, R); 9104 } 9105 9106 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 9107 BasicBlock *BB, BoUpSLP &R) { 9108 SmallVector<Value *, 16> BuildVectorInsts; 9109 SmallVector<Value *, 16> BuildVectorOpds; 9110 SmallVector<int> Mask; 9111 if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) || 9112 (llvm::all_of(BuildVectorOpds, 9113 [](Value *V) { return isa<ExtractElementInst>(V); }) && 9114 isFixedVectorShuffle(BuildVectorOpds, Mask))) 9115 return false; 9116 9117 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n"); 9118 return tryToVectorizeList(BuildVectorInsts, R); 9119 } 9120 9121 bool SLPVectorizerPass::vectorizeSimpleInstructions( 9122 SmallVectorImpl<Instruction *> &Instructions, BasicBlock *BB, BoUpSLP &R, 9123 bool AtTerminator) { 9124 bool OpsChanged = false; 9125 SmallVector<Instruction *, 4> PostponedCmps; 9126 for (auto *I : reverse(Instructions)) { 9127 if (R.isDeleted(I)) 9128 continue; 9129 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) 9130 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 9131 else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) 9132 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 9133 else if (isa<CmpInst>(I)) 9134 PostponedCmps.push_back(I); 9135 } 9136 if (AtTerminator) { 9137 // Try to find reductions first. 9138 for (Instruction *I : PostponedCmps) { 9139 if (R.isDeleted(I)) 9140 continue; 9141 for (Value *Op : I->operands()) 9142 OpsChanged |= vectorizeRootInstruction(nullptr, Op, BB, R, TTI); 9143 } 9144 // Try to vectorize operands as vector bundles. 9145 for (Instruction *I : PostponedCmps) { 9146 if (R.isDeleted(I)) 9147 continue; 9148 OpsChanged |= tryToVectorize(I, R); 9149 } 9150 Instructions.clear(); 9151 } else { 9152 // Insert in reverse order since the PostponedCmps vector was filled in 9153 // reverse order. 9154 Instructions.assign(PostponedCmps.rbegin(), PostponedCmps.rend()); 9155 } 9156 return OpsChanged; 9157 } 9158 9159 template <typename T> 9160 static bool 9161 tryToVectorizeSequence(SmallVectorImpl<T *> &Incoming, 9162 function_ref<unsigned(T *)> Limit, 9163 function_ref<bool(T *, T *)> Comparator, 9164 function_ref<bool(T *, T *)> AreCompatible, 9165 function_ref<bool(ArrayRef<T *>, bool)> TryToVectorize, 9166 bool LimitForRegisterSize) { 9167 bool Changed = false; 9168 // Sort by type, parent, operands. 9169 stable_sort(Incoming, Comparator); 9170 9171 // Try to vectorize elements base on their type. 9172 SmallVector<T *> Candidates; 9173 for (auto *IncIt = Incoming.begin(), *E = Incoming.end(); IncIt != E;) { 9174 // Look for the next elements with the same type, parent and operand 9175 // kinds. 9176 auto *SameTypeIt = IncIt; 9177 while (SameTypeIt != E && AreCompatible(*SameTypeIt, *IncIt)) 9178 ++SameTypeIt; 9179 9180 // Try to vectorize them. 9181 unsigned NumElts = (SameTypeIt - IncIt); 9182 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at nodes (" 9183 << NumElts << ")\n"); 9184 // The vectorization is a 3-state attempt: 9185 // 1. Try to vectorize instructions with the same/alternate opcodes with the 9186 // size of maximal register at first. 9187 // 2. Try to vectorize remaining instructions with the same type, if 9188 // possible. This may result in the better vectorization results rather than 9189 // if we try just to vectorize instructions with the same/alternate opcodes. 9190 // 3. Final attempt to try to vectorize all instructions with the 9191 // same/alternate ops only, this may result in some extra final 9192 // vectorization. 9193 if (NumElts > 1 && 9194 TryToVectorize(makeArrayRef(IncIt, NumElts), LimitForRegisterSize)) { 9195 // Success start over because instructions might have been changed. 9196 Changed = true; 9197 } else if (NumElts < Limit(*IncIt) && 9198 (Candidates.empty() || 9199 Candidates.front()->getType() == (*IncIt)->getType())) { 9200 Candidates.append(IncIt, std::next(IncIt, NumElts)); 9201 } 9202 // Final attempt to vectorize instructions with the same types. 9203 if (Candidates.size() > 1 && 9204 (SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType())) { 9205 if (TryToVectorize(Candidates, /*LimitForRegisterSize=*/false)) { 9206 // Success start over because instructions might have been changed. 9207 Changed = true; 9208 } else if (LimitForRegisterSize) { 9209 // Try to vectorize using small vectors. 9210 for (auto *It = Candidates.begin(), *End = Candidates.end(); 9211 It != End;) { 9212 auto *SameTypeIt = It; 9213 while (SameTypeIt != End && AreCompatible(*SameTypeIt, *It)) 9214 ++SameTypeIt; 9215 unsigned NumElts = (SameTypeIt - It); 9216 if (NumElts > 1 && TryToVectorize(makeArrayRef(It, NumElts), 9217 /*LimitForRegisterSize=*/false)) 9218 Changed = true; 9219 It = SameTypeIt; 9220 } 9221 } 9222 Candidates.clear(); 9223 } 9224 9225 // Start over at the next instruction of a different type (or the end). 9226 IncIt = SameTypeIt; 9227 } 9228 return Changed; 9229 } 9230 9231 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 9232 bool Changed = false; 9233 SmallVector<Value *, 4> Incoming; 9234 SmallPtrSet<Value *, 16> VisitedInstrs; 9235 // Maps phi nodes to the non-phi nodes found in the use tree for each phi 9236 // node. Allows better to identify the chains that can be vectorized in the 9237 // better way. 9238 DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes; 9239 auto PHICompare = [this, &PHIToOpcodes](Value *V1, Value *V2) { 9240 assert(isValidElementType(V1->getType()) && 9241 isValidElementType(V2->getType()) && 9242 "Expected vectorizable types only."); 9243 // It is fine to compare type IDs here, since we expect only vectorizable 9244 // types, like ints, floats and pointers, we don't care about other type. 9245 if (V1->getType()->getTypeID() < V2->getType()->getTypeID()) 9246 return true; 9247 if (V1->getType()->getTypeID() > V2->getType()->getTypeID()) 9248 return false; 9249 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 9250 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 9251 if (Opcodes1.size() < Opcodes2.size()) 9252 return true; 9253 if (Opcodes1.size() > Opcodes2.size()) 9254 return false; 9255 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 9256 // Undefs are compatible with any other value. 9257 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) 9258 continue; 9259 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 9260 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 9261 DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent()); 9262 DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent()); 9263 if (!NodeI1) 9264 return NodeI2 != nullptr; 9265 if (!NodeI2) 9266 return false; 9267 assert((NodeI1 == NodeI2) == 9268 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 9269 "Different nodes should have different DFS numbers"); 9270 if (NodeI1 != NodeI2) 9271 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 9272 InstructionsState S = getSameOpcode({I1, I2}); 9273 if (S.getOpcode()) 9274 continue; 9275 return I1->getOpcode() < I2->getOpcode(); 9276 } 9277 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 9278 continue; 9279 if (Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID()) 9280 return true; 9281 if (Opcodes1[I]->getValueID() > Opcodes2[I]->getValueID()) 9282 return false; 9283 } 9284 return false; 9285 }; 9286 auto AreCompatiblePHIs = [&PHIToOpcodes](Value *V1, Value *V2) { 9287 if (V1 == V2) 9288 return true; 9289 if (V1->getType() != V2->getType()) 9290 return false; 9291 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 9292 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 9293 if (Opcodes1.size() != Opcodes2.size()) 9294 return false; 9295 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 9296 // Undefs are compatible with any other value. 9297 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) 9298 continue; 9299 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 9300 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 9301 if (I1->getParent() != I2->getParent()) 9302 return false; 9303 InstructionsState S = getSameOpcode({I1, I2}); 9304 if (S.getOpcode()) 9305 continue; 9306 return false; 9307 } 9308 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 9309 continue; 9310 if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID()) 9311 return false; 9312 } 9313 return true; 9314 }; 9315 auto Limit = [&R](Value *V) { 9316 unsigned EltSize = R.getVectorElementSize(V); 9317 return std::max(2U, R.getMaxVecRegSize() / EltSize); 9318 }; 9319 9320 bool HaveVectorizedPhiNodes = false; 9321 do { 9322 // Collect the incoming values from the PHIs. 9323 Incoming.clear(); 9324 for (Instruction &I : *BB) { 9325 PHINode *P = dyn_cast<PHINode>(&I); 9326 if (!P) 9327 break; 9328 9329 // No need to analyze deleted, vectorized and non-vectorizable 9330 // instructions. 9331 if (!VisitedInstrs.count(P) && !R.isDeleted(P) && 9332 isValidElementType(P->getType())) 9333 Incoming.push_back(P); 9334 } 9335 9336 // Find the corresponding non-phi nodes for better matching when trying to 9337 // build the tree. 9338 for (Value *V : Incoming) { 9339 SmallVectorImpl<Value *> &Opcodes = 9340 PHIToOpcodes.try_emplace(V).first->getSecond(); 9341 if (!Opcodes.empty()) 9342 continue; 9343 SmallVector<Value *, 4> Nodes(1, V); 9344 SmallPtrSet<Value *, 4> Visited; 9345 while (!Nodes.empty()) { 9346 auto *PHI = cast<PHINode>(Nodes.pop_back_val()); 9347 if (!Visited.insert(PHI).second) 9348 continue; 9349 for (Value *V : PHI->incoming_values()) { 9350 if (auto *PHI1 = dyn_cast<PHINode>((V))) { 9351 Nodes.push_back(PHI1); 9352 continue; 9353 } 9354 Opcodes.emplace_back(V); 9355 } 9356 } 9357 } 9358 9359 HaveVectorizedPhiNodes = tryToVectorizeSequence<Value>( 9360 Incoming, Limit, PHICompare, AreCompatiblePHIs, 9361 [this, &R](ArrayRef<Value *> Candidates, bool LimitForRegisterSize) { 9362 return tryToVectorizeList(Candidates, R, LimitForRegisterSize); 9363 }, 9364 /*LimitForRegisterSize=*/true); 9365 Changed |= HaveVectorizedPhiNodes; 9366 VisitedInstrs.insert(Incoming.begin(), Incoming.end()); 9367 } while (HaveVectorizedPhiNodes); 9368 9369 VisitedInstrs.clear(); 9370 9371 SmallVector<Instruction *, 8> PostProcessInstructions; 9372 SmallDenseSet<Instruction *, 4> KeyNodes; 9373 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 9374 // Skip instructions with scalable type. The num of elements is unknown at 9375 // compile-time for scalable type. 9376 if (isa<ScalableVectorType>(it->getType())) 9377 continue; 9378 9379 // Skip instructions marked for the deletion. 9380 if (R.isDeleted(&*it)) 9381 continue; 9382 // We may go through BB multiple times so skip the one we have checked. 9383 if (!VisitedInstrs.insert(&*it).second) { 9384 if (it->use_empty() && KeyNodes.contains(&*it) && 9385 vectorizeSimpleInstructions(PostProcessInstructions, BB, R, 9386 it->isTerminator())) { 9387 // We would like to start over since some instructions are deleted 9388 // and the iterator may become invalid value. 9389 Changed = true; 9390 it = BB->begin(); 9391 e = BB->end(); 9392 } 9393 continue; 9394 } 9395 9396 if (isa<DbgInfoIntrinsic>(it)) 9397 continue; 9398 9399 // Try to vectorize reductions that use PHINodes. 9400 if (PHINode *P = dyn_cast<PHINode>(it)) { 9401 // Check that the PHI is a reduction PHI. 9402 if (P->getNumIncomingValues() == 2) { 9403 // Try to match and vectorize a horizontal reduction. 9404 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R, 9405 TTI)) { 9406 Changed = true; 9407 it = BB->begin(); 9408 e = BB->end(); 9409 continue; 9410 } 9411 } 9412 // Try to vectorize the incoming values of the PHI, to catch reductions 9413 // that feed into PHIs. 9414 for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) { 9415 // Skip if the incoming block is the current BB for now. Also, bypass 9416 // unreachable IR for efficiency and to avoid crashing. 9417 // TODO: Collect the skipped incoming values and try to vectorize them 9418 // after processing BB. 9419 if (BB == P->getIncomingBlock(I) || 9420 !DT->isReachableFromEntry(P->getIncomingBlock(I))) 9421 continue; 9422 9423 Changed |= vectorizeRootInstruction(nullptr, P->getIncomingValue(I), 9424 P->getIncomingBlock(I), R, TTI); 9425 } 9426 continue; 9427 } 9428 9429 // Ran into an instruction without users, like terminator, or function call 9430 // with ignored return value, store. Ignore unused instructions (basing on 9431 // instruction type, except for CallInst and InvokeInst). 9432 if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) || 9433 isa<InvokeInst>(it))) { 9434 KeyNodes.insert(&*it); 9435 bool OpsChanged = false; 9436 if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) { 9437 for (auto *V : it->operand_values()) { 9438 // Try to match and vectorize a horizontal reduction. 9439 OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI); 9440 } 9441 } 9442 // Start vectorization of post-process list of instructions from the 9443 // top-tree instructions to try to vectorize as many instructions as 9444 // possible. 9445 OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R, 9446 it->isTerminator()); 9447 if (OpsChanged) { 9448 // We would like to start over since some instructions are deleted 9449 // and the iterator may become invalid value. 9450 Changed = true; 9451 it = BB->begin(); 9452 e = BB->end(); 9453 continue; 9454 } 9455 } 9456 9457 if (isa<InsertElementInst>(it) || isa<CmpInst>(it) || 9458 isa<InsertValueInst>(it)) 9459 PostProcessInstructions.push_back(&*it); 9460 } 9461 9462 return Changed; 9463 } 9464 9465 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 9466 auto Changed = false; 9467 for (auto &Entry : GEPs) { 9468 // If the getelementptr list has fewer than two elements, there's nothing 9469 // to do. 9470 if (Entry.second.size() < 2) 9471 continue; 9472 9473 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 9474 << Entry.second.size() << ".\n"); 9475 9476 // Process the GEP list in chunks suitable for the target's supported 9477 // vector size. If a vector register can't hold 1 element, we are done. We 9478 // are trying to vectorize the index computations, so the maximum number of 9479 // elements is based on the size of the index expression, rather than the 9480 // size of the GEP itself (the target's pointer size). 9481 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 9482 unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin()); 9483 if (MaxVecRegSize < EltSize) 9484 continue; 9485 9486 unsigned MaxElts = MaxVecRegSize / EltSize; 9487 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) { 9488 auto Len = std::min<unsigned>(BE - BI, MaxElts); 9489 ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len); 9490 9491 // Initialize a set a candidate getelementptrs. Note that we use a 9492 // SetVector here to preserve program order. If the index computations 9493 // are vectorizable and begin with loads, we want to minimize the chance 9494 // of having to reorder them later. 9495 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 9496 9497 // Some of the candidates may have already been vectorized after we 9498 // initially collected them. If so, they are marked as deleted, so remove 9499 // them from the set of candidates. 9500 Candidates.remove_if( 9501 [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); }); 9502 9503 // Remove from the set of candidates all pairs of getelementptrs with 9504 // constant differences. Such getelementptrs are likely not good 9505 // candidates for vectorization in a bottom-up phase since one can be 9506 // computed from the other. We also ensure all candidate getelementptr 9507 // indices are unique. 9508 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 9509 auto *GEPI = GEPList[I]; 9510 if (!Candidates.count(GEPI)) 9511 continue; 9512 auto *SCEVI = SE->getSCEV(GEPList[I]); 9513 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 9514 auto *GEPJ = GEPList[J]; 9515 auto *SCEVJ = SE->getSCEV(GEPList[J]); 9516 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 9517 Candidates.remove(GEPI); 9518 Candidates.remove(GEPJ); 9519 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 9520 Candidates.remove(GEPJ); 9521 } 9522 } 9523 } 9524 9525 // We break out of the above computation as soon as we know there are 9526 // fewer than two candidates remaining. 9527 if (Candidates.size() < 2) 9528 continue; 9529 9530 // Add the single, non-constant index of each candidate to the bundle. We 9531 // ensured the indices met these constraints when we originally collected 9532 // the getelementptrs. 9533 SmallVector<Value *, 16> Bundle(Candidates.size()); 9534 auto BundleIndex = 0u; 9535 for (auto *V : Candidates) { 9536 auto *GEP = cast<GetElementPtrInst>(V); 9537 auto *GEPIdx = GEP->idx_begin()->get(); 9538 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 9539 Bundle[BundleIndex++] = GEPIdx; 9540 } 9541 9542 // Try and vectorize the indices. We are currently only interested in 9543 // gather-like cases of the form: 9544 // 9545 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 9546 // 9547 // where the loads of "a", the loads of "b", and the subtractions can be 9548 // performed in parallel. It's likely that detecting this pattern in a 9549 // bottom-up phase will be simpler and less costly than building a 9550 // full-blown top-down phase beginning at the consecutive loads. 9551 Changed |= tryToVectorizeList(Bundle, R); 9552 } 9553 } 9554 return Changed; 9555 } 9556 9557 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 9558 bool Changed = false; 9559 // Sort by type, base pointers and values operand. Value operands must be 9560 // compatible (have the same opcode, same parent), otherwise it is 9561 // definitely not profitable to try to vectorize them. 9562 auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) { 9563 if (V->getPointerOperandType()->getTypeID() < 9564 V2->getPointerOperandType()->getTypeID()) 9565 return true; 9566 if (V->getPointerOperandType()->getTypeID() > 9567 V2->getPointerOperandType()->getTypeID()) 9568 return false; 9569 // UndefValues are compatible with all other values. 9570 if (isa<UndefValue>(V->getValueOperand()) || 9571 isa<UndefValue>(V2->getValueOperand())) 9572 return false; 9573 if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand())) 9574 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 9575 DomTreeNodeBase<llvm::BasicBlock> *NodeI1 = 9576 DT->getNode(I1->getParent()); 9577 DomTreeNodeBase<llvm::BasicBlock> *NodeI2 = 9578 DT->getNode(I2->getParent()); 9579 assert(NodeI1 && "Should only process reachable instructions"); 9580 assert(NodeI1 && "Should only process reachable instructions"); 9581 assert((NodeI1 == NodeI2) == 9582 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 9583 "Different nodes should have different DFS numbers"); 9584 if (NodeI1 != NodeI2) 9585 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 9586 InstructionsState S = getSameOpcode({I1, I2}); 9587 if (S.getOpcode()) 9588 return false; 9589 return I1->getOpcode() < I2->getOpcode(); 9590 } 9591 if (isa<Constant>(V->getValueOperand()) && 9592 isa<Constant>(V2->getValueOperand())) 9593 return false; 9594 return V->getValueOperand()->getValueID() < 9595 V2->getValueOperand()->getValueID(); 9596 }; 9597 9598 auto &&AreCompatibleStores = [](StoreInst *V1, StoreInst *V2) { 9599 if (V1 == V2) 9600 return true; 9601 if (V1->getPointerOperandType() != V2->getPointerOperandType()) 9602 return false; 9603 // Undefs are compatible with any other value. 9604 if (isa<UndefValue>(V1->getValueOperand()) || 9605 isa<UndefValue>(V2->getValueOperand())) 9606 return true; 9607 if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand())) 9608 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 9609 if (I1->getParent() != I2->getParent()) 9610 return false; 9611 InstructionsState S = getSameOpcode({I1, I2}); 9612 return S.getOpcode() > 0; 9613 } 9614 if (isa<Constant>(V1->getValueOperand()) && 9615 isa<Constant>(V2->getValueOperand())) 9616 return true; 9617 return V1->getValueOperand()->getValueID() == 9618 V2->getValueOperand()->getValueID(); 9619 }; 9620 auto Limit = [&R, this](StoreInst *SI) { 9621 unsigned EltSize = DL->getTypeSizeInBits(SI->getValueOperand()->getType()); 9622 return R.getMinVF(EltSize); 9623 }; 9624 9625 // Attempt to sort and vectorize each of the store-groups. 9626 for (auto &Pair : Stores) { 9627 if (Pair.second.size() < 2) 9628 continue; 9629 9630 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 9631 << Pair.second.size() << ".\n"); 9632 9633 if (!isValidElementType(Pair.second.front()->getValueOperand()->getType())) 9634 continue; 9635 9636 Changed |= tryToVectorizeSequence<StoreInst>( 9637 Pair.second, Limit, StoreSorter, AreCompatibleStores, 9638 [this, &R](ArrayRef<StoreInst *> Candidates, bool) { 9639 return vectorizeStores(Candidates, R); 9640 }, 9641 /*LimitForRegisterSize=*/false); 9642 } 9643 return Changed; 9644 } 9645 9646 char SLPVectorizer::ID = 0; 9647 9648 static const char lv_name[] = "SLP Vectorizer"; 9649 9650 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 9651 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 9652 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 9653 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 9654 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 9655 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 9656 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 9657 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 9658 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 9659 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 9660 9661 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); } 9662