1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 20 #include "llvm/ADT/DenseMap.h" 21 #include "llvm/ADT/DenseSet.h" 22 #include "llvm/ADT/Optional.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/PriorityQueue.h" 25 #include "llvm/ADT/STLExtras.h" 26 #include "llvm/ADT/SetOperations.h" 27 #include "llvm/ADT/SetVector.h" 28 #include "llvm/ADT/SmallBitVector.h" 29 #include "llvm/ADT/SmallPtrSet.h" 30 #include "llvm/ADT/SmallSet.h" 31 #include "llvm/ADT/SmallString.h" 32 #include "llvm/ADT/Statistic.h" 33 #include "llvm/ADT/iterator.h" 34 #include "llvm/ADT/iterator_range.h" 35 #include "llvm/Analysis/AliasAnalysis.h" 36 #include "llvm/Analysis/AssumptionCache.h" 37 #include "llvm/Analysis/CodeMetrics.h" 38 #include "llvm/Analysis/DemandedBits.h" 39 #include "llvm/Analysis/GlobalsModRef.h" 40 #include "llvm/Analysis/IVDescriptors.h" 41 #include "llvm/Analysis/LoopAccessAnalysis.h" 42 #include "llvm/Analysis/LoopInfo.h" 43 #include "llvm/Analysis/MemoryLocation.h" 44 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 45 #include "llvm/Analysis/ScalarEvolution.h" 46 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 47 #include "llvm/Analysis/TargetLibraryInfo.h" 48 #include "llvm/Analysis/TargetTransformInfo.h" 49 #include "llvm/Analysis/ValueTracking.h" 50 #include "llvm/Analysis/VectorUtils.h" 51 #include "llvm/IR/Attributes.h" 52 #include "llvm/IR/BasicBlock.h" 53 #include "llvm/IR/Constant.h" 54 #include "llvm/IR/Constants.h" 55 #include "llvm/IR/DataLayout.h" 56 #include "llvm/IR/DebugLoc.h" 57 #include "llvm/IR/DerivedTypes.h" 58 #include "llvm/IR/Dominators.h" 59 #include "llvm/IR/Function.h" 60 #include "llvm/IR/IRBuilder.h" 61 #include "llvm/IR/InstrTypes.h" 62 #include "llvm/IR/Instruction.h" 63 #include "llvm/IR/Instructions.h" 64 #include "llvm/IR/IntrinsicInst.h" 65 #include "llvm/IR/Intrinsics.h" 66 #include "llvm/IR/Module.h" 67 #include "llvm/IR/NoFolder.h" 68 #include "llvm/IR/Operator.h" 69 #include "llvm/IR/PatternMatch.h" 70 #include "llvm/IR/Type.h" 71 #include "llvm/IR/Use.h" 72 #include "llvm/IR/User.h" 73 #include "llvm/IR/Value.h" 74 #include "llvm/IR/ValueHandle.h" 75 #include "llvm/IR/Verifier.h" 76 #include "llvm/InitializePasses.h" 77 #include "llvm/Pass.h" 78 #include "llvm/Support/Casting.h" 79 #include "llvm/Support/CommandLine.h" 80 #include "llvm/Support/Compiler.h" 81 #include "llvm/Support/DOTGraphTraits.h" 82 #include "llvm/Support/Debug.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/GraphWriter.h" 85 #include "llvm/Support/InstructionCost.h" 86 #include "llvm/Support/KnownBits.h" 87 #include "llvm/Support/MathExtras.h" 88 #include "llvm/Support/raw_ostream.h" 89 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 90 #include "llvm/Transforms/Utils/LoopUtils.h" 91 #include "llvm/Transforms/Vectorize.h" 92 #include <algorithm> 93 #include <cassert> 94 #include <cstdint> 95 #include <iterator> 96 #include <memory> 97 #include <set> 98 #include <string> 99 #include <tuple> 100 #include <utility> 101 #include <vector> 102 103 using namespace llvm; 104 using namespace llvm::PatternMatch; 105 using namespace slpvectorizer; 106 107 #define SV_NAME "slp-vectorizer" 108 #define DEBUG_TYPE "SLP" 109 110 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 111 112 cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden, 113 cl::desc("Run the SLP vectorization passes")); 114 115 static cl::opt<int> 116 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 117 cl::desc("Only vectorize if you gain more than this " 118 "number ")); 119 120 static cl::opt<bool> 121 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 122 cl::desc("Attempt to vectorize horizontal reductions")); 123 124 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 125 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 126 cl::desc( 127 "Attempt to vectorize horizontal reductions feeding into a store")); 128 129 static cl::opt<int> 130 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 131 cl::desc("Attempt to vectorize for this register size in bits")); 132 133 static cl::opt<unsigned> 134 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden, 135 cl::desc("Maximum SLP vectorization factor (0=unlimited)")); 136 137 static cl::opt<int> 138 MaxStoreLookup("slp-max-store-lookup", cl::init(32), cl::Hidden, 139 cl::desc("Maximum depth of the lookup for consecutive stores.")); 140 141 /// Limits the size of scheduling regions in a block. 142 /// It avoid long compile times for _very_ large blocks where vector 143 /// instructions are spread over a wide range. 144 /// This limit is way higher than needed by real-world functions. 145 static cl::opt<int> 146 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 147 cl::desc("Limit the size of the SLP scheduling region per block")); 148 149 static cl::opt<int> MinVectorRegSizeOption( 150 "slp-min-reg-size", cl::init(128), cl::Hidden, 151 cl::desc("Attempt to vectorize for this register size in bits")); 152 153 static cl::opt<unsigned> RecursionMaxDepth( 154 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 155 cl::desc("Limit the recursion depth when building a vectorizable tree")); 156 157 static cl::opt<unsigned> MinTreeSize( 158 "slp-min-tree-size", cl::init(3), cl::Hidden, 159 cl::desc("Only vectorize small trees if they are fully vectorizable")); 160 161 // The maximum depth that the look-ahead score heuristic will explore. 162 // The higher this value, the higher the compilation time overhead. 163 static cl::opt<int> LookAheadMaxDepth( 164 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden, 165 cl::desc("The maximum look-ahead depth for operand reordering scores")); 166 167 // The Look-ahead heuristic goes through the users of the bundle to calculate 168 // the users cost in getExternalUsesCost(). To avoid compilation time increase 169 // we limit the number of users visited to this value. 170 static cl::opt<unsigned> LookAheadUsersBudget( 171 "slp-look-ahead-users-budget", cl::init(2), cl::Hidden, 172 cl::desc("The maximum number of users to visit while visiting the " 173 "predecessors. This prevents compilation time increase.")); 174 175 static cl::opt<bool> 176 ViewSLPTree("view-slp-tree", cl::Hidden, 177 cl::desc("Display the SLP trees with Graphviz")); 178 179 // Limit the number of alias checks. The limit is chosen so that 180 // it has no negative effect on the llvm benchmarks. 181 static const unsigned AliasedCheckLimit = 10; 182 183 // Another limit for the alias checks: The maximum distance between load/store 184 // instructions where alias checks are done. 185 // This limit is useful for very large basic blocks. 186 static const unsigned MaxMemDepDistance = 160; 187 188 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 189 /// regions to be handled. 190 static const int MinScheduleRegionSize = 16; 191 192 /// Predicate for the element types that the SLP vectorizer supports. 193 /// 194 /// The most important thing to filter here are types which are invalid in LLVM 195 /// vectors. We also filter target specific types which have absolutely no 196 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 197 /// avoids spending time checking the cost model and realizing that they will 198 /// be inevitably scalarized. 199 static bool isValidElementType(Type *Ty) { 200 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 201 !Ty->isPPC_FP128Ty(); 202 } 203 204 /// \returns True if the value is a constant (but not globals/constant 205 /// expressions). 206 static bool isConstant(Value *V) { 207 return isa<Constant>(V) && !isa<ConstantExpr>(V) && !isa<GlobalValue>(V); 208 } 209 210 /// Checks if \p V is one of vector-like instructions, i.e. undef, 211 /// insertelement/extractelement with constant indices for fixed vector type or 212 /// extractvalue instruction. 213 static bool isVectorLikeInstWithConstOps(Value *V) { 214 if (!isa<InsertElementInst, ExtractElementInst>(V) && 215 !isa<ExtractValueInst, UndefValue>(V)) 216 return false; 217 auto *I = dyn_cast<Instruction>(V); 218 if (!I || isa<ExtractValueInst>(I)) 219 return true; 220 if (!isa<FixedVectorType>(I->getOperand(0)->getType())) 221 return false; 222 if (isa<ExtractElementInst>(I)) 223 return isConstant(I->getOperand(1)); 224 assert(isa<InsertElementInst>(V) && "Expected only insertelement."); 225 return isConstant(I->getOperand(2)); 226 } 227 228 /// \returns true if all of the instructions in \p VL are in the same block or 229 /// false otherwise. 230 static bool allSameBlock(ArrayRef<Value *> VL) { 231 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 232 if (!I0) 233 return false; 234 if (all_of(VL, isVectorLikeInstWithConstOps)) 235 return true; 236 237 BasicBlock *BB = I0->getParent(); 238 for (int I = 1, E = VL.size(); I < E; I++) { 239 auto *II = dyn_cast<Instruction>(VL[I]); 240 if (!II) 241 return false; 242 243 if (BB != II->getParent()) 244 return false; 245 } 246 return true; 247 } 248 249 /// \returns True if all of the values in \p VL are constants (but not 250 /// globals/constant expressions). 251 static bool allConstant(ArrayRef<Value *> VL) { 252 // Constant expressions and globals can't be vectorized like normal integer/FP 253 // constants. 254 return all_of(VL, isConstant); 255 } 256 257 /// \returns True if all of the values in \p VL are identical. 258 static bool isSplat(ArrayRef<Value *> VL) { 259 for (unsigned i = 1, e = VL.size(); i < e; ++i) 260 if (VL[i] != VL[0]) 261 return false; 262 return true; 263 } 264 265 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator. 266 static bool isCommutative(Instruction *I) { 267 if (auto *Cmp = dyn_cast<CmpInst>(I)) 268 return Cmp->isCommutative(); 269 if (auto *BO = dyn_cast<BinaryOperator>(I)) 270 return BO->isCommutative(); 271 // TODO: This should check for generic Instruction::isCommutative(), but 272 // we need to confirm that the caller code correctly handles Intrinsics 273 // for example (does not have 2 operands). 274 return false; 275 } 276 277 /// Checks if the vector of instructions can be represented as a shuffle, like: 278 /// %x0 = extractelement <4 x i8> %x, i32 0 279 /// %x3 = extractelement <4 x i8> %x, i32 3 280 /// %y1 = extractelement <4 x i8> %y, i32 1 281 /// %y2 = extractelement <4 x i8> %y, i32 2 282 /// %x0x0 = mul i8 %x0, %x0 283 /// %x3x3 = mul i8 %x3, %x3 284 /// %y1y1 = mul i8 %y1, %y1 285 /// %y2y2 = mul i8 %y2, %y2 286 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0 287 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 288 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 289 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 290 /// ret <4 x i8> %ins4 291 /// can be transformed into: 292 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 293 /// i32 6> 294 /// %2 = mul <4 x i8> %1, %1 295 /// ret <4 x i8> %2 296 /// We convert this initially to something like: 297 /// %x0 = extractelement <4 x i8> %x, i32 0 298 /// %x3 = extractelement <4 x i8> %x, i32 3 299 /// %y1 = extractelement <4 x i8> %y, i32 1 300 /// %y2 = extractelement <4 x i8> %y, i32 2 301 /// %1 = insertelement <4 x i8> poison, i8 %x0, i32 0 302 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1 303 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2 304 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3 305 /// %5 = mul <4 x i8> %4, %4 306 /// %6 = extractelement <4 x i8> %5, i32 0 307 /// %ins1 = insertelement <4 x i8> poison, i8 %6, i32 0 308 /// %7 = extractelement <4 x i8> %5, i32 1 309 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1 310 /// %8 = extractelement <4 x i8> %5, i32 2 311 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2 312 /// %9 = extractelement <4 x i8> %5, i32 3 313 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3 314 /// ret <4 x i8> %ins4 315 /// InstCombiner transforms this into a shuffle and vector mul 316 /// Mask will return the Shuffle Mask equivalent to the extracted elements. 317 /// TODO: Can we split off and reuse the shuffle mask detection from 318 /// TargetTransformInfo::getInstructionThroughput? 319 static Optional<TargetTransformInfo::ShuffleKind> 320 isShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) { 321 auto *EI0 = cast<ExtractElementInst>(VL[0]); 322 unsigned Size = 323 cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements(); 324 Value *Vec1 = nullptr; 325 Value *Vec2 = nullptr; 326 enum ShuffleMode { Unknown, Select, Permute }; 327 ShuffleMode CommonShuffleMode = Unknown; 328 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 329 auto *EI = cast<ExtractElementInst>(VL[I]); 330 auto *Vec = EI->getVectorOperand(); 331 // All vector operands must have the same number of vector elements. 332 if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size) 333 return None; 334 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 335 if (!Idx) 336 return None; 337 // Undefined behavior if Idx is negative or >= Size. 338 if (Idx->getValue().uge(Size)) { 339 Mask.push_back(UndefMaskElem); 340 continue; 341 } 342 unsigned IntIdx = Idx->getValue().getZExtValue(); 343 Mask.push_back(IntIdx); 344 // We can extractelement from undef or poison vector. 345 if (isa<UndefValue>(Vec)) 346 continue; 347 // For correct shuffling we have to have at most 2 different vector operands 348 // in all extractelement instructions. 349 if (!Vec1 || Vec1 == Vec) 350 Vec1 = Vec; 351 else if (!Vec2 || Vec2 == Vec) 352 Vec2 = Vec; 353 else 354 return None; 355 if (CommonShuffleMode == Permute) 356 continue; 357 // If the extract index is not the same as the operation number, it is a 358 // permutation. 359 if (IntIdx != I) { 360 CommonShuffleMode = Permute; 361 continue; 362 } 363 CommonShuffleMode = Select; 364 } 365 // If we're not crossing lanes in different vectors, consider it as blending. 366 if (CommonShuffleMode == Select && Vec2) 367 return TargetTransformInfo::SK_Select; 368 // If Vec2 was never used, we have a permutation of a single vector, otherwise 369 // we have permutation of 2 vectors. 370 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 371 : TargetTransformInfo::SK_PermuteSingleSrc; 372 } 373 374 namespace { 375 376 /// Main data required for vectorization of instructions. 377 struct InstructionsState { 378 /// The very first instruction in the list with the main opcode. 379 Value *OpValue = nullptr; 380 381 /// The main/alternate instruction. 382 Instruction *MainOp = nullptr; 383 Instruction *AltOp = nullptr; 384 385 /// The main/alternate opcodes for the list of instructions. 386 unsigned getOpcode() const { 387 return MainOp ? MainOp->getOpcode() : 0; 388 } 389 390 unsigned getAltOpcode() const { 391 return AltOp ? AltOp->getOpcode() : 0; 392 } 393 394 /// Some of the instructions in the list have alternate opcodes. 395 bool isAltShuffle() const { return getOpcode() != getAltOpcode(); } 396 397 bool isOpcodeOrAlt(Instruction *I) const { 398 unsigned CheckedOpcode = I->getOpcode(); 399 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode; 400 } 401 402 InstructionsState() = delete; 403 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp) 404 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {} 405 }; 406 407 } // end anonymous namespace 408 409 /// Chooses the correct key for scheduling data. If \p Op has the same (or 410 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 411 /// OpValue. 412 static Value *isOneOf(const InstructionsState &S, Value *Op) { 413 auto *I = dyn_cast<Instruction>(Op); 414 if (I && S.isOpcodeOrAlt(I)) 415 return Op; 416 return S.OpValue; 417 } 418 419 /// \returns true if \p Opcode is allowed as part of of the main/alternate 420 /// instruction for SLP vectorization. 421 /// 422 /// Example of unsupported opcode is SDIV that can potentially cause UB if the 423 /// "shuffled out" lane would result in division by zero. 424 static bool isValidForAlternation(unsigned Opcode) { 425 if (Instruction::isIntDivRem(Opcode)) 426 return false; 427 428 return true; 429 } 430 431 /// \returns analysis of the Instructions in \p VL described in 432 /// InstructionsState, the Opcode that we suppose the whole list 433 /// could be vectorized even if its structure is diverse. 434 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 435 unsigned BaseIndex = 0) { 436 // Make sure these are all Instructions. 437 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); })) 438 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 439 440 bool IsCastOp = isa<CastInst>(VL[BaseIndex]); 441 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]); 442 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode(); 443 unsigned AltOpcode = Opcode; 444 unsigned AltIndex = BaseIndex; 445 446 // Check for one alternate opcode from another BinaryOperator. 447 // TODO - generalize to support all operators (types, calls etc.). 448 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { 449 unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode(); 450 if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) { 451 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 452 continue; 453 if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) && 454 isValidForAlternation(Opcode)) { 455 AltOpcode = InstOpcode; 456 AltIndex = Cnt; 457 continue; 458 } 459 } else if (IsCastOp && isa<CastInst>(VL[Cnt])) { 460 Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType(); 461 Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType(); 462 if (Ty0 == Ty1) { 463 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 464 continue; 465 if (Opcode == AltOpcode) { 466 assert(isValidForAlternation(Opcode) && 467 isValidForAlternation(InstOpcode) && 468 "Cast isn't safe for alternation, logic needs to be updated!"); 469 AltOpcode = InstOpcode; 470 AltIndex = Cnt; 471 continue; 472 } 473 } 474 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode) 475 continue; 476 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 477 } 478 479 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]), 480 cast<Instruction>(VL[AltIndex])); 481 } 482 483 /// \returns true if all of the values in \p VL have the same type or false 484 /// otherwise. 485 static bool allSameType(ArrayRef<Value *> VL) { 486 Type *Ty = VL[0]->getType(); 487 for (int i = 1, e = VL.size(); i < e; i++) 488 if (VL[i]->getType() != Ty) 489 return false; 490 491 return true; 492 } 493 494 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 495 static Optional<unsigned> getExtractIndex(Instruction *E) { 496 unsigned Opcode = E->getOpcode(); 497 assert((Opcode == Instruction::ExtractElement || 498 Opcode == Instruction::ExtractValue) && 499 "Expected extractelement or extractvalue instruction."); 500 if (Opcode == Instruction::ExtractElement) { 501 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 502 if (!CI) 503 return None; 504 return CI->getZExtValue(); 505 } 506 ExtractValueInst *EI = cast<ExtractValueInst>(E); 507 if (EI->getNumIndices() != 1) 508 return None; 509 return *EI->idx_begin(); 510 } 511 512 /// \returns True if in-tree use also needs extract. This refers to 513 /// possible scalar operand in vectorized instruction. 514 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 515 TargetLibraryInfo *TLI) { 516 unsigned Opcode = UserInst->getOpcode(); 517 switch (Opcode) { 518 case Instruction::Load: { 519 LoadInst *LI = cast<LoadInst>(UserInst); 520 return (LI->getPointerOperand() == Scalar); 521 } 522 case Instruction::Store: { 523 StoreInst *SI = cast<StoreInst>(UserInst); 524 return (SI->getPointerOperand() == Scalar); 525 } 526 case Instruction::Call: { 527 CallInst *CI = cast<CallInst>(UserInst); 528 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 529 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 530 if (hasVectorInstrinsicScalarOpd(ID, i)) 531 return (CI->getArgOperand(i) == Scalar); 532 } 533 LLVM_FALLTHROUGH; 534 } 535 default: 536 return false; 537 } 538 } 539 540 /// \returns the AA location that is being access by the instruction. 541 static MemoryLocation getLocation(Instruction *I, AAResults *AA) { 542 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 543 return MemoryLocation::get(SI); 544 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 545 return MemoryLocation::get(LI); 546 return MemoryLocation(); 547 } 548 549 /// \returns True if the instruction is not a volatile or atomic load/store. 550 static bool isSimple(Instruction *I) { 551 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 552 return LI->isSimple(); 553 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 554 return SI->isSimple(); 555 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 556 return !MI->isVolatile(); 557 return true; 558 } 559 560 /// Shuffles \p Mask in accordance with the given \p SubMask. 561 static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask) { 562 if (SubMask.empty()) 563 return; 564 if (Mask.empty()) { 565 Mask.append(SubMask.begin(), SubMask.end()); 566 return; 567 } 568 SmallVector<int> NewMask(SubMask.size(), UndefMaskElem); 569 int TermValue = std::min(Mask.size(), SubMask.size()); 570 for (int I = 0, E = SubMask.size(); I < E; ++I) { 571 if (SubMask[I] >= TermValue || SubMask[I] == UndefMaskElem || 572 Mask[SubMask[I]] >= TermValue) 573 continue; 574 NewMask[I] = Mask[SubMask[I]]; 575 } 576 Mask.swap(NewMask); 577 } 578 579 /// Order may have elements assigned special value (size) which is out of 580 /// bounds. Such indices only appear on places which correspond to undef values 581 /// (see canReuseExtract for details) and used in order to avoid undef values 582 /// have effect on operands ordering. 583 /// The first loop below simply finds all unused indices and then the next loop 584 /// nest assigns these indices for undef values positions. 585 /// As an example below Order has two undef positions and they have assigned 586 /// values 3 and 7 respectively: 587 /// before: 6 9 5 4 9 2 1 0 588 /// after: 6 3 5 4 7 2 1 0 589 /// \returns Fixed ordering. 590 static void fixupOrderingIndices(SmallVectorImpl<unsigned> &Order) { 591 const unsigned Sz = Order.size(); 592 SmallBitVector UsedIndices(Sz); 593 SmallVector<int> MaskedIndices; 594 for (unsigned I = 0; I < Sz; ++I) { 595 if (Order[I] < Sz) 596 UsedIndices.set(Order[I]); 597 else 598 MaskedIndices.push_back(I); 599 } 600 if (MaskedIndices.empty()) 601 return; 602 SmallVector<int> AvailableIndices(MaskedIndices.size()); 603 unsigned Cnt = 0; 604 int Idx = UsedIndices.find_first(); 605 do { 606 AvailableIndices[Cnt] = Idx; 607 Idx = UsedIndices.find_next(Idx); 608 ++Cnt; 609 } while (Idx > 0); 610 assert(Cnt == MaskedIndices.size() && "Non-synced masked/available indices."); 611 for (int I = 0, E = MaskedIndices.size(); I < E; ++I) 612 Order[MaskedIndices[I]] = AvailableIndices[I]; 613 } 614 615 namespace llvm { 616 617 static void inversePermutation(ArrayRef<unsigned> Indices, 618 SmallVectorImpl<int> &Mask) { 619 Mask.clear(); 620 const unsigned E = Indices.size(); 621 Mask.resize(E, UndefMaskElem); 622 for (unsigned I = 0; I < E; ++I) 623 Mask[Indices[I]] = I; 624 } 625 626 /// \returns inserting index of InsertElement or InsertValue instruction, 627 /// using Offset as base offset for index. 628 static Optional<int> getInsertIndex(Value *InsertInst, unsigned Offset) { 629 int Index = Offset; 630 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) { 631 if (auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2))) { 632 auto *VT = cast<FixedVectorType>(IE->getType()); 633 if (CI->getValue().uge(VT->getNumElements())) 634 return UndefMaskElem; 635 Index *= VT->getNumElements(); 636 Index += CI->getZExtValue(); 637 return Index; 638 } 639 if (isa<UndefValue>(IE->getOperand(2))) 640 return UndefMaskElem; 641 return None; 642 } 643 644 auto *IV = cast<InsertValueInst>(InsertInst); 645 Type *CurrentType = IV->getType(); 646 for (unsigned I : IV->indices()) { 647 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 648 Index *= ST->getNumElements(); 649 CurrentType = ST->getElementType(I); 650 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 651 Index *= AT->getNumElements(); 652 CurrentType = AT->getElementType(); 653 } else { 654 return None; 655 } 656 Index += I; 657 } 658 return Index; 659 } 660 661 /// Reorders the list of scalars in accordance with the given \p Order and then 662 /// the \p Mask. \p Order - is the original order of the scalars, need to 663 /// reorder scalars into an unordered state at first according to the given 664 /// order. Then the ordered scalars are shuffled once again in accordance with 665 /// the provided mask. 666 static void reorderScalars(SmallVectorImpl<Value *> &Scalars, 667 ArrayRef<int> Mask) { 668 assert(!Mask.empty() && "Expected non-empty mask."); 669 SmallVector<Value *> Prev(Scalars.size(), 670 UndefValue::get(Scalars.front()->getType())); 671 Prev.swap(Scalars); 672 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 673 if (Mask[I] != UndefMaskElem) 674 Scalars[Mask[I]] = Prev[I]; 675 } 676 677 namespace slpvectorizer { 678 679 /// Bottom Up SLP Vectorizer. 680 class BoUpSLP { 681 struct TreeEntry; 682 struct ScheduleData; 683 684 public: 685 using ValueList = SmallVector<Value *, 8>; 686 using InstrList = SmallVector<Instruction *, 16>; 687 using ValueSet = SmallPtrSet<Value *, 16>; 688 using StoreList = SmallVector<StoreInst *, 8>; 689 using ExtraValueToDebugLocsMap = 690 MapVector<Value *, SmallVector<Instruction *, 2>>; 691 using OrdersType = SmallVector<unsigned, 4>; 692 693 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 694 TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li, 695 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 696 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 697 : F(Func), SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), 698 DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { 699 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 700 // Use the vector register size specified by the target unless overridden 701 // by a command-line option. 702 // TODO: It would be better to limit the vectorization factor based on 703 // data type rather than just register size. For example, x86 AVX has 704 // 256-bit registers, but it does not support integer operations 705 // at that width (that requires AVX2). 706 if (MaxVectorRegSizeOption.getNumOccurrences()) 707 MaxVecRegSize = MaxVectorRegSizeOption; 708 else 709 MaxVecRegSize = 710 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 711 .getFixedSize(); 712 713 if (MinVectorRegSizeOption.getNumOccurrences()) 714 MinVecRegSize = MinVectorRegSizeOption; 715 else 716 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 717 } 718 719 /// Vectorize the tree that starts with the elements in \p VL. 720 /// Returns the vectorized root. 721 Value *vectorizeTree(); 722 723 /// Vectorize the tree but with the list of externally used values \p 724 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 725 /// generated extractvalue instructions. 726 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues); 727 728 /// \returns the cost incurred by unwanted spills and fills, caused by 729 /// holding live values over call sites. 730 InstructionCost getSpillCost() const; 731 732 /// \returns the vectorization cost of the subtree that starts at \p VL. 733 /// A negative number means that this is profitable. 734 InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = None); 735 736 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 737 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 738 void buildTree(ArrayRef<Value *> Roots, 739 ArrayRef<Value *> UserIgnoreLst = None); 740 741 /// Builds external uses of the vectorized scalars, i.e. the list of 742 /// vectorized scalars to be extracted, their lanes and their scalar users. \p 743 /// ExternallyUsedValues contains additional list of external uses to handle 744 /// vectorization of reductions. 745 void 746 buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {}); 747 748 /// Clear the internal data structures that are created by 'buildTree'. 749 void deleteTree() { 750 VectorizableTree.clear(); 751 ScalarToTreeEntry.clear(); 752 MustGather.clear(); 753 ExternalUses.clear(); 754 for (auto &Iter : BlocksSchedules) { 755 BlockScheduling *BS = Iter.second.get(); 756 BS->clear(); 757 } 758 MinBWs.clear(); 759 InstrElementSize.clear(); 760 } 761 762 unsigned getTreeSize() const { return VectorizableTree.size(); } 763 764 /// Perform LICM and CSE on the newly generated gather sequences. 765 void optimizeGatherSequence(); 766 767 /// Reorders the current graph to the most profitable order starting from the 768 /// root node to the leaf nodes. The best order is chosen only from the nodes 769 /// of the same size (vectorization factor). Smaller nodes are considered 770 /// parts of subgraph with smaller VF and they are reordered independently. We 771 /// can make it because we still need to extend smaller nodes to the wider VF 772 /// and we can merge reordering shuffles with the widening shuffles. 773 void reorderTopToBottom(); 774 775 /// Reorders the current graph to the most profitable order starting from 776 /// leaves to the root. It allows to rotate small subgraphs and reduce the 777 /// number of reshuffles if the leaf nodes use the same order. In this case we 778 /// can merge the orders and just shuffle user node instead of shuffling its 779 /// operands. Plus, even the leaf nodes have different orders, it allows to 780 /// sink reordering in the graph closer to the root node and merge it later 781 /// during analysis. 782 void reorderBottomToTop(); 783 784 /// \return The vector element size in bits to use when vectorizing the 785 /// expression tree ending at \p V. If V is a store, the size is the width of 786 /// the stored value. Otherwise, the size is the width of the largest loaded 787 /// value reaching V. This method is used by the vectorizer to calculate 788 /// vectorization factors. 789 unsigned getVectorElementSize(Value *V); 790 791 /// Compute the minimum type sizes required to represent the entries in a 792 /// vectorizable tree. 793 void computeMinimumValueSizes(); 794 795 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 796 unsigned getMaxVecRegSize() const { 797 return MaxVecRegSize; 798 } 799 800 // \returns minimum vector register size as set by cl::opt. 801 unsigned getMinVecRegSize() const { 802 return MinVecRegSize; 803 } 804 805 unsigned getMinVF(unsigned Sz) const { 806 return std::max(2U, getMinVecRegSize() / Sz); 807 } 808 809 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { 810 unsigned MaxVF = MaxVFOption.getNumOccurrences() ? 811 MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode); 812 return MaxVF ? MaxVF : UINT_MAX; 813 } 814 815 /// Check if homogeneous aggregate is isomorphic to some VectorType. 816 /// Accepts homogeneous multidimensional aggregate of scalars/vectors like 817 /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> }, 818 /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on. 819 /// 820 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 821 unsigned canMapToVector(Type *T, const DataLayout &DL) const; 822 823 /// \returns True if the VectorizableTree is both tiny and not fully 824 /// vectorizable. We do not vectorize such trees. 825 bool isTreeTinyAndNotFullyVectorizable() const; 826 827 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values 828 /// can be load combined in the backend. Load combining may not be allowed in 829 /// the IR optimizer, so we do not want to alter the pattern. For example, 830 /// partially transforming a scalar bswap() pattern into vector code is 831 /// effectively impossible for the backend to undo. 832 /// TODO: If load combining is allowed in the IR optimizer, this analysis 833 /// may not be necessary. 834 bool isLoadCombineReductionCandidate(RecurKind RdxKind) const; 835 836 /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values 837 /// can be load combined in the backend. Load combining may not be allowed in 838 /// the IR optimizer, so we do not want to alter the pattern. For example, 839 /// partially transforming a scalar bswap() pattern into vector code is 840 /// effectively impossible for the backend to undo. 841 /// TODO: If load combining is allowed in the IR optimizer, this analysis 842 /// may not be necessary. 843 bool isLoadCombineCandidate() const; 844 845 OptimizationRemarkEmitter *getORE() { return ORE; } 846 847 /// This structure holds any data we need about the edges being traversed 848 /// during buildTree_rec(). We keep track of: 849 /// (i) the user TreeEntry index, and 850 /// (ii) the index of the edge. 851 struct EdgeInfo { 852 EdgeInfo() = default; 853 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx) 854 : UserTE(UserTE), EdgeIdx(EdgeIdx) {} 855 /// The user TreeEntry. 856 TreeEntry *UserTE = nullptr; 857 /// The operand index of the use. 858 unsigned EdgeIdx = UINT_MAX; 859 #ifndef NDEBUG 860 friend inline raw_ostream &operator<<(raw_ostream &OS, 861 const BoUpSLP::EdgeInfo &EI) { 862 EI.dump(OS); 863 return OS; 864 } 865 /// Debug print. 866 void dump(raw_ostream &OS) const { 867 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null") 868 << " EdgeIdx:" << EdgeIdx << "}"; 869 } 870 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); } 871 #endif 872 }; 873 874 /// A helper data structure to hold the operands of a vector of instructions. 875 /// This supports a fixed vector length for all operand vectors. 876 class VLOperands { 877 /// For each operand we need (i) the value, and (ii) the opcode that it 878 /// would be attached to if the expression was in a left-linearized form. 879 /// This is required to avoid illegal operand reordering. 880 /// For example: 881 /// \verbatim 882 /// 0 Op1 883 /// |/ 884 /// Op1 Op2 Linearized + Op2 885 /// \ / ----------> |/ 886 /// - - 887 /// 888 /// Op1 - Op2 (0 + Op1) - Op2 889 /// \endverbatim 890 /// 891 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'. 892 /// 893 /// Another way to think of this is to track all the operations across the 894 /// path from the operand all the way to the root of the tree and to 895 /// calculate the operation that corresponds to this path. For example, the 896 /// path from Op2 to the root crosses the RHS of the '-', therefore the 897 /// corresponding operation is a '-' (which matches the one in the 898 /// linearized tree, as shown above). 899 /// 900 /// For lack of a better term, we refer to this operation as Accumulated 901 /// Path Operation (APO). 902 struct OperandData { 903 OperandData() = default; 904 OperandData(Value *V, bool APO, bool IsUsed) 905 : V(V), APO(APO), IsUsed(IsUsed) {} 906 /// The operand value. 907 Value *V = nullptr; 908 /// TreeEntries only allow a single opcode, or an alternate sequence of 909 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the 910 /// APO. It is set to 'true' if 'V' is attached to an inverse operation 911 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise 912 /// (e.g., Add/Mul) 913 bool APO = false; 914 /// Helper data for the reordering function. 915 bool IsUsed = false; 916 }; 917 918 /// During operand reordering, we are trying to select the operand at lane 919 /// that matches best with the operand at the neighboring lane. Our 920 /// selection is based on the type of value we are looking for. For example, 921 /// if the neighboring lane has a load, we need to look for a load that is 922 /// accessing a consecutive address. These strategies are summarized in the 923 /// 'ReorderingMode' enumerator. 924 enum class ReorderingMode { 925 Load, ///< Matching loads to consecutive memory addresses 926 Opcode, ///< Matching instructions based on opcode (same or alternate) 927 Constant, ///< Matching constants 928 Splat, ///< Matching the same instruction multiple times (broadcast) 929 Failed, ///< We failed to create a vectorizable group 930 }; 931 932 using OperandDataVec = SmallVector<OperandData, 2>; 933 934 /// A vector of operand vectors. 935 SmallVector<OperandDataVec, 4> OpsVec; 936 937 const DataLayout &DL; 938 ScalarEvolution &SE; 939 const BoUpSLP &R; 940 941 /// \returns the operand data at \p OpIdx and \p Lane. 942 OperandData &getData(unsigned OpIdx, unsigned Lane) { 943 return OpsVec[OpIdx][Lane]; 944 } 945 946 /// \returns the operand data at \p OpIdx and \p Lane. Const version. 947 const OperandData &getData(unsigned OpIdx, unsigned Lane) const { 948 return OpsVec[OpIdx][Lane]; 949 } 950 951 /// Clears the used flag for all entries. 952 void clearUsed() { 953 for (unsigned OpIdx = 0, NumOperands = getNumOperands(); 954 OpIdx != NumOperands; ++OpIdx) 955 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 956 ++Lane) 957 OpsVec[OpIdx][Lane].IsUsed = false; 958 } 959 960 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2. 961 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) { 962 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]); 963 } 964 965 // The hard-coded scores listed here are not very important. When computing 966 // the scores of matching one sub-tree with another, we are basically 967 // counting the number of values that are matching. So even if all scores 968 // are set to 1, we would still get a decent matching result. 969 // However, sometimes we have to break ties. For example we may have to 970 // choose between matching loads vs matching opcodes. This is what these 971 // scores are helping us with: they provide the order of preference. 972 973 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]). 974 static const int ScoreConsecutiveLoads = 3; 975 /// ExtractElementInst from same vector and consecutive indexes. 976 static const int ScoreConsecutiveExtracts = 3; 977 /// Constants. 978 static const int ScoreConstants = 2; 979 /// Instructions with the same opcode. 980 static const int ScoreSameOpcode = 2; 981 /// Instructions with alt opcodes (e.g, add + sub). 982 static const int ScoreAltOpcodes = 1; 983 /// Identical instructions (a.k.a. splat or broadcast). 984 static const int ScoreSplat = 1; 985 /// Matching with an undef is preferable to failing. 986 static const int ScoreUndef = 1; 987 /// Score for failing to find a decent match. 988 static const int ScoreFail = 0; 989 /// User exteranl to the vectorized code. 990 static const int ExternalUseCost = 1; 991 /// The user is internal but in a different lane. 992 static const int UserInDiffLaneCost = ExternalUseCost; 993 994 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes. 995 static int getShallowScore(Value *V1, Value *V2, const DataLayout &DL, 996 ScalarEvolution &SE) { 997 auto *LI1 = dyn_cast<LoadInst>(V1); 998 auto *LI2 = dyn_cast<LoadInst>(V2); 999 if (LI1 && LI2) { 1000 if (LI1->getParent() != LI2->getParent()) 1001 return VLOperands::ScoreFail; 1002 1003 Optional<int> Dist = getPointersDiff( 1004 LI1->getType(), LI1->getPointerOperand(), LI2->getType(), 1005 LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true); 1006 return (Dist && *Dist == 1) ? VLOperands::ScoreConsecutiveLoads 1007 : VLOperands::ScoreFail; 1008 } 1009 1010 auto *C1 = dyn_cast<Constant>(V1); 1011 auto *C2 = dyn_cast<Constant>(V2); 1012 if (C1 && C2) 1013 return VLOperands::ScoreConstants; 1014 1015 // Extracts from consecutive indexes of the same vector better score as 1016 // the extracts could be optimized away. 1017 Value *EV; 1018 ConstantInt *Ex1Idx, *Ex2Idx; 1019 if (match(V1, m_ExtractElt(m_Value(EV), m_ConstantInt(Ex1Idx))) && 1020 match(V2, m_ExtractElt(m_Deferred(EV), m_ConstantInt(Ex2Idx))) && 1021 Ex1Idx->getZExtValue() + 1 == Ex2Idx->getZExtValue()) 1022 return VLOperands::ScoreConsecutiveExtracts; 1023 1024 auto *I1 = dyn_cast<Instruction>(V1); 1025 auto *I2 = dyn_cast<Instruction>(V2); 1026 if (I1 && I2) { 1027 if (I1 == I2) 1028 return VLOperands::ScoreSplat; 1029 InstructionsState S = getSameOpcode({I1, I2}); 1030 // Note: Only consider instructions with <= 2 operands to avoid 1031 // complexity explosion. 1032 if (S.getOpcode() && S.MainOp->getNumOperands() <= 2) 1033 return S.isAltShuffle() ? VLOperands::ScoreAltOpcodes 1034 : VLOperands::ScoreSameOpcode; 1035 } 1036 1037 if (isa<UndefValue>(V2)) 1038 return VLOperands::ScoreUndef; 1039 1040 return VLOperands::ScoreFail; 1041 } 1042 1043 /// Holds the values and their lane that are taking part in the look-ahead 1044 /// score calculation. This is used in the external uses cost calculation. 1045 SmallDenseMap<Value *, int> InLookAheadValues; 1046 1047 /// \Returns the additinal cost due to uses of \p LHS and \p RHS that are 1048 /// either external to the vectorized code, or require shuffling. 1049 int getExternalUsesCost(const std::pair<Value *, int> &LHS, 1050 const std::pair<Value *, int> &RHS) { 1051 int Cost = 0; 1052 std::array<std::pair<Value *, int>, 2> Values = {{LHS, RHS}}; 1053 for (int Idx = 0, IdxE = Values.size(); Idx != IdxE; ++Idx) { 1054 Value *V = Values[Idx].first; 1055 if (isa<Constant>(V)) { 1056 // Since this is a function pass, it doesn't make semantic sense to 1057 // walk the users of a subclass of Constant. The users could be in 1058 // another function, or even another module that happens to be in 1059 // the same LLVMContext. 1060 continue; 1061 } 1062 1063 // Calculate the absolute lane, using the minimum relative lane of LHS 1064 // and RHS as base and Idx as the offset. 1065 int Ln = std::min(LHS.second, RHS.second) + Idx; 1066 assert(Ln >= 0 && "Bad lane calculation"); 1067 unsigned UsersBudget = LookAheadUsersBudget; 1068 for (User *U : V->users()) { 1069 if (const TreeEntry *UserTE = R.getTreeEntry(U)) { 1070 // The user is in the VectorizableTree. Check if we need to insert. 1071 auto It = llvm::find(UserTE->Scalars, U); 1072 assert(It != UserTE->Scalars.end() && "U is in UserTE"); 1073 int UserLn = std::distance(UserTE->Scalars.begin(), It); 1074 assert(UserLn >= 0 && "Bad lane"); 1075 if (UserLn != Ln) 1076 Cost += UserInDiffLaneCost; 1077 } else { 1078 // Check if the user is in the look-ahead code. 1079 auto It2 = InLookAheadValues.find(U); 1080 if (It2 != InLookAheadValues.end()) { 1081 // The user is in the look-ahead code. Check the lane. 1082 if (It2->second != Ln) 1083 Cost += UserInDiffLaneCost; 1084 } else { 1085 // The user is neither in SLP tree nor in the look-ahead code. 1086 Cost += ExternalUseCost; 1087 } 1088 } 1089 // Limit the number of visited uses to cap compilation time. 1090 if (--UsersBudget == 0) 1091 break; 1092 } 1093 } 1094 return Cost; 1095 } 1096 1097 /// Go through the operands of \p LHS and \p RHS recursively until \p 1098 /// MaxLevel, and return the cummulative score. For example: 1099 /// \verbatim 1100 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1] 1101 /// \ / \ / \ / \ / 1102 /// + + + + 1103 /// G1 G2 G3 G4 1104 /// \endverbatim 1105 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at 1106 /// each level recursively, accumulating the score. It starts from matching 1107 /// the additions at level 0, then moves on to the loads (level 1). The 1108 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and 1109 /// {B[0],B[1]} match with VLOperands::ScoreConsecutiveLoads, while 1110 /// {A[0],C[0]} has a score of VLOperands::ScoreFail. 1111 /// Please note that the order of the operands does not matter, as we 1112 /// evaluate the score of all profitable combinations of operands. In 1113 /// other words the score of G1 and G4 is the same as G1 and G2. This 1114 /// heuristic is based on ideas described in: 1115 /// Look-ahead SLP: Auto-vectorization in the presence of commutative 1116 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha, 1117 /// Luís F. W. Góes 1118 int getScoreAtLevelRec(const std::pair<Value *, int> &LHS, 1119 const std::pair<Value *, int> &RHS, int CurrLevel, 1120 int MaxLevel) { 1121 1122 Value *V1 = LHS.first; 1123 Value *V2 = RHS.first; 1124 // Get the shallow score of V1 and V2. 1125 int ShallowScoreAtThisLevel = 1126 std::max((int)ScoreFail, getShallowScore(V1, V2, DL, SE) - 1127 getExternalUsesCost(LHS, RHS)); 1128 int Lane1 = LHS.second; 1129 int Lane2 = RHS.second; 1130 1131 // If reached MaxLevel, 1132 // or if V1 and V2 are not instructions, 1133 // or if they are SPLAT, 1134 // or if they are not consecutive, early return the current cost. 1135 auto *I1 = dyn_cast<Instruction>(V1); 1136 auto *I2 = dyn_cast<Instruction>(V2); 1137 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 || 1138 ShallowScoreAtThisLevel == VLOperands::ScoreFail || 1139 (isa<LoadInst>(I1) && isa<LoadInst>(I2) && ShallowScoreAtThisLevel)) 1140 return ShallowScoreAtThisLevel; 1141 assert(I1 && I2 && "Should have early exited."); 1142 1143 // Keep track of in-tree values for determining the external-use cost. 1144 InLookAheadValues[V1] = Lane1; 1145 InLookAheadValues[V2] = Lane2; 1146 1147 // Contains the I2 operand indexes that got matched with I1 operands. 1148 SmallSet<unsigned, 4> Op2Used; 1149 1150 // Recursion towards the operands of I1 and I2. We are trying all possbile 1151 // operand pairs, and keeping track of the best score. 1152 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands(); 1153 OpIdx1 != NumOperands1; ++OpIdx1) { 1154 // Try to pair op1I with the best operand of I2. 1155 int MaxTmpScore = 0; 1156 unsigned MaxOpIdx2 = 0; 1157 bool FoundBest = false; 1158 // If I2 is commutative try all combinations. 1159 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1; 1160 unsigned ToIdx = isCommutative(I2) 1161 ? I2->getNumOperands() 1162 : std::min(I2->getNumOperands(), OpIdx1 + 1); 1163 assert(FromIdx <= ToIdx && "Bad index"); 1164 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) { 1165 // Skip operands already paired with OpIdx1. 1166 if (Op2Used.count(OpIdx2)) 1167 continue; 1168 // Recursively calculate the cost at each level 1169 int TmpScore = getScoreAtLevelRec({I1->getOperand(OpIdx1), Lane1}, 1170 {I2->getOperand(OpIdx2), Lane2}, 1171 CurrLevel + 1, MaxLevel); 1172 // Look for the best score. 1173 if (TmpScore > VLOperands::ScoreFail && TmpScore > MaxTmpScore) { 1174 MaxTmpScore = TmpScore; 1175 MaxOpIdx2 = OpIdx2; 1176 FoundBest = true; 1177 } 1178 } 1179 if (FoundBest) { 1180 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it. 1181 Op2Used.insert(MaxOpIdx2); 1182 ShallowScoreAtThisLevel += MaxTmpScore; 1183 } 1184 } 1185 return ShallowScoreAtThisLevel; 1186 } 1187 1188 /// \Returns the look-ahead score, which tells us how much the sub-trees 1189 /// rooted at \p LHS and \p RHS match, the more they match the higher the 1190 /// score. This helps break ties in an informed way when we cannot decide on 1191 /// the order of the operands by just considering the immediate 1192 /// predecessors. 1193 int getLookAheadScore(const std::pair<Value *, int> &LHS, 1194 const std::pair<Value *, int> &RHS) { 1195 InLookAheadValues.clear(); 1196 return getScoreAtLevelRec(LHS, RHS, 1, LookAheadMaxDepth); 1197 } 1198 1199 // Search all operands in Ops[*][Lane] for the one that matches best 1200 // Ops[OpIdx][LastLane] and return its opreand index. 1201 // If no good match can be found, return None. 1202 Optional<unsigned> 1203 getBestOperand(unsigned OpIdx, int Lane, int LastLane, 1204 ArrayRef<ReorderingMode> ReorderingModes) { 1205 unsigned NumOperands = getNumOperands(); 1206 1207 // The operand of the previous lane at OpIdx. 1208 Value *OpLastLane = getData(OpIdx, LastLane).V; 1209 1210 // Our strategy mode for OpIdx. 1211 ReorderingMode RMode = ReorderingModes[OpIdx]; 1212 1213 // The linearized opcode of the operand at OpIdx, Lane. 1214 bool OpIdxAPO = getData(OpIdx, Lane).APO; 1215 1216 // The best operand index and its score. 1217 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we 1218 // are using the score to differentiate between the two. 1219 struct BestOpData { 1220 Optional<unsigned> Idx = None; 1221 unsigned Score = 0; 1222 } BestOp; 1223 1224 // Iterate through all unused operands and look for the best. 1225 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) { 1226 // Get the operand at Idx and Lane. 1227 OperandData &OpData = getData(Idx, Lane); 1228 Value *Op = OpData.V; 1229 bool OpAPO = OpData.APO; 1230 1231 // Skip already selected operands. 1232 if (OpData.IsUsed) 1233 continue; 1234 1235 // Skip if we are trying to move the operand to a position with a 1236 // different opcode in the linearized tree form. This would break the 1237 // semantics. 1238 if (OpAPO != OpIdxAPO) 1239 continue; 1240 1241 // Look for an operand that matches the current mode. 1242 switch (RMode) { 1243 case ReorderingMode::Load: 1244 case ReorderingMode::Constant: 1245 case ReorderingMode::Opcode: { 1246 bool LeftToRight = Lane > LastLane; 1247 Value *OpLeft = (LeftToRight) ? OpLastLane : Op; 1248 Value *OpRight = (LeftToRight) ? Op : OpLastLane; 1249 unsigned Score = 1250 getLookAheadScore({OpLeft, LastLane}, {OpRight, Lane}); 1251 if (Score > BestOp.Score) { 1252 BestOp.Idx = Idx; 1253 BestOp.Score = Score; 1254 } 1255 break; 1256 } 1257 case ReorderingMode::Splat: 1258 if (Op == OpLastLane) 1259 BestOp.Idx = Idx; 1260 break; 1261 case ReorderingMode::Failed: 1262 return None; 1263 } 1264 } 1265 1266 if (BestOp.Idx) { 1267 getData(BestOp.Idx.getValue(), Lane).IsUsed = true; 1268 return BestOp.Idx; 1269 } 1270 // If we could not find a good match return None. 1271 return None; 1272 } 1273 1274 /// Helper for reorderOperandVecs. \Returns the lane that we should start 1275 /// reordering from. This is the one which has the least number of operands 1276 /// that can freely move about. 1277 unsigned getBestLaneToStartReordering() const { 1278 unsigned BestLane = 0; 1279 unsigned Min = UINT_MAX; 1280 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 1281 ++Lane) { 1282 unsigned NumFreeOps = getMaxNumOperandsThatCanBeReordered(Lane); 1283 if (NumFreeOps < Min) { 1284 Min = NumFreeOps; 1285 BestLane = Lane; 1286 } 1287 } 1288 return BestLane; 1289 } 1290 1291 /// \Returns the maximum number of operands that are allowed to be reordered 1292 /// for \p Lane. This is used as a heuristic for selecting the first lane to 1293 /// start operand reordering. 1294 unsigned getMaxNumOperandsThatCanBeReordered(unsigned Lane) const { 1295 unsigned CntTrue = 0; 1296 unsigned NumOperands = getNumOperands(); 1297 // Operands with the same APO can be reordered. We therefore need to count 1298 // how many of them we have for each APO, like this: Cnt[APO] = x. 1299 // Since we only have two APOs, namely true and false, we can avoid using 1300 // a map. Instead we can simply count the number of operands that 1301 // correspond to one of them (in this case the 'true' APO), and calculate 1302 // the other by subtracting it from the total number of operands. 1303 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) 1304 if (getData(OpIdx, Lane).APO) 1305 ++CntTrue; 1306 unsigned CntFalse = NumOperands - CntTrue; 1307 return std::max(CntTrue, CntFalse); 1308 } 1309 1310 /// Go through the instructions in VL and append their operands. 1311 void appendOperandsOfVL(ArrayRef<Value *> VL) { 1312 assert(!VL.empty() && "Bad VL"); 1313 assert((empty() || VL.size() == getNumLanes()) && 1314 "Expected same number of lanes"); 1315 assert(isa<Instruction>(VL[0]) && "Expected instruction"); 1316 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands(); 1317 OpsVec.resize(NumOperands); 1318 unsigned NumLanes = VL.size(); 1319 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1320 OpsVec[OpIdx].resize(NumLanes); 1321 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1322 assert(isa<Instruction>(VL[Lane]) && "Expected instruction"); 1323 // Our tree has just 3 nodes: the root and two operands. 1324 // It is therefore trivial to get the APO. We only need to check the 1325 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or 1326 // RHS operand. The LHS operand of both add and sub is never attached 1327 // to an inversese operation in the linearized form, therefore its APO 1328 // is false. The RHS is true only if VL[Lane] is an inverse operation. 1329 1330 // Since operand reordering is performed on groups of commutative 1331 // operations or alternating sequences (e.g., +, -), we can safely 1332 // tell the inverse operations by checking commutativity. 1333 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane])); 1334 bool APO = (OpIdx == 0) ? false : IsInverseOperation; 1335 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx), 1336 APO, false}; 1337 } 1338 } 1339 } 1340 1341 /// \returns the number of operands. 1342 unsigned getNumOperands() const { return OpsVec.size(); } 1343 1344 /// \returns the number of lanes. 1345 unsigned getNumLanes() const { return OpsVec[0].size(); } 1346 1347 /// \returns the operand value at \p OpIdx and \p Lane. 1348 Value *getValue(unsigned OpIdx, unsigned Lane) const { 1349 return getData(OpIdx, Lane).V; 1350 } 1351 1352 /// \returns true if the data structure is empty. 1353 bool empty() const { return OpsVec.empty(); } 1354 1355 /// Clears the data. 1356 void clear() { OpsVec.clear(); } 1357 1358 /// \Returns true if there are enough operands identical to \p Op to fill 1359 /// the whole vector. 1360 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow. 1361 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) { 1362 bool OpAPO = getData(OpIdx, Lane).APO; 1363 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) { 1364 if (Ln == Lane) 1365 continue; 1366 // This is set to true if we found a candidate for broadcast at Lane. 1367 bool FoundCandidate = false; 1368 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) { 1369 OperandData &Data = getData(OpI, Ln); 1370 if (Data.APO != OpAPO || Data.IsUsed) 1371 continue; 1372 if (Data.V == Op) { 1373 FoundCandidate = true; 1374 Data.IsUsed = true; 1375 break; 1376 } 1377 } 1378 if (!FoundCandidate) 1379 return false; 1380 } 1381 return true; 1382 } 1383 1384 public: 1385 /// Initialize with all the operands of the instruction vector \p RootVL. 1386 VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL, 1387 ScalarEvolution &SE, const BoUpSLP &R) 1388 : DL(DL), SE(SE), R(R) { 1389 // Append all the operands of RootVL. 1390 appendOperandsOfVL(RootVL); 1391 } 1392 1393 /// \Returns a value vector with the operands across all lanes for the 1394 /// opearnd at \p OpIdx. 1395 ValueList getVL(unsigned OpIdx) const { 1396 ValueList OpVL(OpsVec[OpIdx].size()); 1397 assert(OpsVec[OpIdx].size() == getNumLanes() && 1398 "Expected same num of lanes across all operands"); 1399 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane) 1400 OpVL[Lane] = OpsVec[OpIdx][Lane].V; 1401 return OpVL; 1402 } 1403 1404 // Performs operand reordering for 2 or more operands. 1405 // The original operands are in OrigOps[OpIdx][Lane]. 1406 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'. 1407 void reorder() { 1408 unsigned NumOperands = getNumOperands(); 1409 unsigned NumLanes = getNumLanes(); 1410 // Each operand has its own mode. We are using this mode to help us select 1411 // the instructions for each lane, so that they match best with the ones 1412 // we have selected so far. 1413 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands); 1414 1415 // This is a greedy single-pass algorithm. We are going over each lane 1416 // once and deciding on the best order right away with no back-tracking. 1417 // However, in order to increase its effectiveness, we start with the lane 1418 // that has operands that can move the least. For example, given the 1419 // following lanes: 1420 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd 1421 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st 1422 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd 1423 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th 1424 // we will start at Lane 1, since the operands of the subtraction cannot 1425 // be reordered. Then we will visit the rest of the lanes in a circular 1426 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3. 1427 1428 // Find the first lane that we will start our search from. 1429 unsigned FirstLane = getBestLaneToStartReordering(); 1430 1431 // Initialize the modes. 1432 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1433 Value *OpLane0 = getValue(OpIdx, FirstLane); 1434 // Keep track if we have instructions with all the same opcode on one 1435 // side. 1436 if (isa<LoadInst>(OpLane0)) 1437 ReorderingModes[OpIdx] = ReorderingMode::Load; 1438 else if (isa<Instruction>(OpLane0)) { 1439 // Check if OpLane0 should be broadcast. 1440 if (shouldBroadcast(OpLane0, OpIdx, FirstLane)) 1441 ReorderingModes[OpIdx] = ReorderingMode::Splat; 1442 else 1443 ReorderingModes[OpIdx] = ReorderingMode::Opcode; 1444 } 1445 else if (isa<Constant>(OpLane0)) 1446 ReorderingModes[OpIdx] = ReorderingMode::Constant; 1447 else if (isa<Argument>(OpLane0)) 1448 // Our best hope is a Splat. It may save some cost in some cases. 1449 ReorderingModes[OpIdx] = ReorderingMode::Splat; 1450 else 1451 // NOTE: This should be unreachable. 1452 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1453 } 1454 1455 // If the initial strategy fails for any of the operand indexes, then we 1456 // perform reordering again in a second pass. This helps avoid assigning 1457 // high priority to the failed strategy, and should improve reordering for 1458 // the non-failed operand indexes. 1459 for (int Pass = 0; Pass != 2; ++Pass) { 1460 // Skip the second pass if the first pass did not fail. 1461 bool StrategyFailed = false; 1462 // Mark all operand data as free to use. 1463 clearUsed(); 1464 // We keep the original operand order for the FirstLane, so reorder the 1465 // rest of the lanes. We are visiting the nodes in a circular fashion, 1466 // using FirstLane as the center point and increasing the radius 1467 // distance. 1468 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) { 1469 // Visit the lane on the right and then the lane on the left. 1470 for (int Direction : {+1, -1}) { 1471 int Lane = FirstLane + Direction * Distance; 1472 if (Lane < 0 || Lane >= (int)NumLanes) 1473 continue; 1474 int LastLane = Lane - Direction; 1475 assert(LastLane >= 0 && LastLane < (int)NumLanes && 1476 "Out of bounds"); 1477 // Look for a good match for each operand. 1478 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1479 // Search for the operand that matches SortedOps[OpIdx][Lane-1]. 1480 Optional<unsigned> BestIdx = 1481 getBestOperand(OpIdx, Lane, LastLane, ReorderingModes); 1482 // By not selecting a value, we allow the operands that follow to 1483 // select a better matching value. We will get a non-null value in 1484 // the next run of getBestOperand(). 1485 if (BestIdx) { 1486 // Swap the current operand with the one returned by 1487 // getBestOperand(). 1488 swap(OpIdx, BestIdx.getValue(), Lane); 1489 } else { 1490 // We failed to find a best operand, set mode to 'Failed'. 1491 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1492 // Enable the second pass. 1493 StrategyFailed = true; 1494 } 1495 } 1496 } 1497 } 1498 // Skip second pass if the strategy did not fail. 1499 if (!StrategyFailed) 1500 break; 1501 } 1502 } 1503 1504 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1505 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) { 1506 switch (RMode) { 1507 case ReorderingMode::Load: 1508 return "Load"; 1509 case ReorderingMode::Opcode: 1510 return "Opcode"; 1511 case ReorderingMode::Constant: 1512 return "Constant"; 1513 case ReorderingMode::Splat: 1514 return "Splat"; 1515 case ReorderingMode::Failed: 1516 return "Failed"; 1517 } 1518 llvm_unreachable("Unimplemented Reordering Type"); 1519 } 1520 1521 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode, 1522 raw_ostream &OS) { 1523 return OS << getModeStr(RMode); 1524 } 1525 1526 /// Debug print. 1527 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) { 1528 printMode(RMode, dbgs()); 1529 } 1530 1531 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) { 1532 return printMode(RMode, OS); 1533 } 1534 1535 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const { 1536 const unsigned Indent = 2; 1537 unsigned Cnt = 0; 1538 for (const OperandDataVec &OpDataVec : OpsVec) { 1539 OS << "Operand " << Cnt++ << "\n"; 1540 for (const OperandData &OpData : OpDataVec) { 1541 OS.indent(Indent) << "{"; 1542 if (Value *V = OpData.V) 1543 OS << *V; 1544 else 1545 OS << "null"; 1546 OS << ", APO:" << OpData.APO << "}\n"; 1547 } 1548 OS << "\n"; 1549 } 1550 return OS; 1551 } 1552 1553 /// Debug print. 1554 LLVM_DUMP_METHOD void dump() const { print(dbgs()); } 1555 #endif 1556 }; 1557 1558 /// Checks if the instruction is marked for deletion. 1559 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); } 1560 1561 /// Marks values operands for later deletion by replacing them with Undefs. 1562 void eraseInstructions(ArrayRef<Value *> AV); 1563 1564 ~BoUpSLP(); 1565 1566 private: 1567 /// Checks if all users of \p I are the part of the vectorization tree. 1568 bool areAllUsersVectorized(Instruction *I, 1569 ArrayRef<Value *> VectorizedVals) const; 1570 1571 /// \returns the cost of the vectorizable entry. 1572 InstructionCost getEntryCost(const TreeEntry *E, 1573 ArrayRef<Value *> VectorizedVals); 1574 1575 /// This is the recursive part of buildTree. 1576 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, 1577 const EdgeInfo &EI); 1578 1579 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can 1580 /// be vectorized to use the original vector (or aggregate "bitcast" to a 1581 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise 1582 /// returns false, setting \p CurrentOrder to either an empty vector or a 1583 /// non-identity permutation that allows to reuse extract instructions. 1584 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 1585 SmallVectorImpl<unsigned> &CurrentOrder) const; 1586 1587 /// Vectorize a single entry in the tree. 1588 Value *vectorizeTree(TreeEntry *E); 1589 1590 /// Vectorize a single entry in the tree, starting in \p VL. 1591 Value *vectorizeTree(ArrayRef<Value *> VL); 1592 1593 /// \returns the scalarization cost for this type. Scalarization in this 1594 /// context means the creation of vectors from a group of scalars. 1595 InstructionCost 1596 getGatherCost(FixedVectorType *Ty, 1597 const DenseSet<unsigned> &ShuffledIndices) const; 1598 1599 /// Checks if the gathered \p VL can be represented as shuffle(s) of previous 1600 /// tree entries. 1601 /// \returns ShuffleKind, if gathered values can be represented as shuffles of 1602 /// previous tree entries. \p Mask is filled with the shuffle mask. 1603 Optional<TargetTransformInfo::ShuffleKind> 1604 isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask, 1605 SmallVectorImpl<const TreeEntry *> &Entries); 1606 1607 /// \returns the scalarization cost for this list of values. Assuming that 1608 /// this subtree gets vectorized, we may need to extract the values from the 1609 /// roots. This method calculates the cost of extracting the values. 1610 InstructionCost getGatherCost(ArrayRef<Value *> VL) const; 1611 1612 /// Set the Builder insert point to one after the last instruction in 1613 /// the bundle 1614 void setInsertPointAfterBundle(const TreeEntry *E); 1615 1616 /// \returns a vector from a collection of scalars in \p VL. 1617 Value *gather(ArrayRef<Value *> VL); 1618 1619 /// \returns whether the VectorizableTree is fully vectorizable and will 1620 /// be beneficial even the tree height is tiny. 1621 bool isFullyVectorizableTinyTree() const; 1622 1623 /// Reorder commutative or alt operands to get better probability of 1624 /// generating vectorized code. 1625 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 1626 SmallVectorImpl<Value *> &Left, 1627 SmallVectorImpl<Value *> &Right, 1628 const DataLayout &DL, 1629 ScalarEvolution &SE, 1630 const BoUpSLP &R); 1631 struct TreeEntry { 1632 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>; 1633 TreeEntry(VecTreeTy &Container) : Container(Container) {} 1634 1635 /// \returns true if the scalars in VL are equal to this entry. 1636 bool isSame(ArrayRef<Value *> VL) const { 1637 auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) { 1638 if (Mask.size() != VL.size() && VL.size() == Scalars.size()) 1639 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 1640 return VL.size() == Mask.size() && 1641 std::equal( 1642 VL.begin(), VL.end(), Mask.begin(), 1643 [Scalars](Value *V, int Idx) { return V == Scalars[Idx]; }); 1644 }; 1645 if (!ReorderIndices.empty()) { 1646 // TODO: implement matching if the nodes are just reordered, still can 1647 // treat the vector as the same if the list of scalars matches VL 1648 // directly, without reordering. 1649 SmallVector<int> Mask; 1650 inversePermutation(ReorderIndices, Mask); 1651 if (VL.size() == Scalars.size()) 1652 return IsSame(Scalars, Mask); 1653 if (VL.size() == ReuseShuffleIndices.size()) { 1654 ::addMask(Mask, ReuseShuffleIndices); 1655 return IsSame(Scalars, Mask); 1656 } 1657 return false; 1658 } 1659 return IsSame(Scalars, ReuseShuffleIndices); 1660 } 1661 1662 /// A vector of scalars. 1663 ValueList Scalars; 1664 1665 /// The Scalars are vectorized into this value. It is initialized to Null. 1666 Value *VectorizedValue = nullptr; 1667 1668 /// Do we need to gather this sequence or vectorize it 1669 /// (either with vector instruction or with scatter/gather 1670 /// intrinsics for store/load)? 1671 enum EntryState { Vectorize, ScatterVectorize, NeedToGather }; 1672 EntryState State; 1673 1674 /// Does this sequence require some shuffling? 1675 SmallVector<int, 4> ReuseShuffleIndices; 1676 1677 /// Does this entry require reordering? 1678 SmallVector<unsigned, 4> ReorderIndices; 1679 1680 /// Points back to the VectorizableTree. 1681 /// 1682 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 1683 /// to be a pointer and needs to be able to initialize the child iterator. 1684 /// Thus we need a reference back to the container to translate the indices 1685 /// to entries. 1686 VecTreeTy &Container; 1687 1688 /// The TreeEntry index containing the user of this entry. We can actually 1689 /// have multiple users so the data structure is not truly a tree. 1690 SmallVector<EdgeInfo, 1> UserTreeIndices; 1691 1692 /// The index of this treeEntry in VectorizableTree. 1693 int Idx = -1; 1694 1695 private: 1696 /// The operands of each instruction in each lane Operands[op_index][lane]. 1697 /// Note: This helps avoid the replication of the code that performs the 1698 /// reordering of operands during buildTree_rec() and vectorizeTree(). 1699 SmallVector<ValueList, 2> Operands; 1700 1701 /// The main/alternate instruction. 1702 Instruction *MainOp = nullptr; 1703 Instruction *AltOp = nullptr; 1704 1705 public: 1706 /// Set this bundle's \p OpIdx'th operand to \p OpVL. 1707 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) { 1708 if (Operands.size() < OpIdx + 1) 1709 Operands.resize(OpIdx + 1); 1710 assert(Operands[OpIdx].empty() && "Already resized?"); 1711 Operands[OpIdx].resize(Scalars.size()); 1712 for (unsigned Lane = 0, E = Scalars.size(); Lane != E; ++Lane) 1713 Operands[OpIdx][Lane] = OpVL[Lane]; 1714 } 1715 1716 /// Set the operands of this bundle in their original order. 1717 void setOperandsInOrder() { 1718 assert(Operands.empty() && "Already initialized?"); 1719 auto *I0 = cast<Instruction>(Scalars[0]); 1720 Operands.resize(I0->getNumOperands()); 1721 unsigned NumLanes = Scalars.size(); 1722 for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands(); 1723 OpIdx != NumOperands; ++OpIdx) { 1724 Operands[OpIdx].resize(NumLanes); 1725 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1726 auto *I = cast<Instruction>(Scalars[Lane]); 1727 assert(I->getNumOperands() == NumOperands && 1728 "Expected same number of operands"); 1729 Operands[OpIdx][Lane] = I->getOperand(OpIdx); 1730 } 1731 } 1732 } 1733 1734 /// Reorders operands of the node to the given mask \p Mask. 1735 void reorderOperands(ArrayRef<int> Mask) { 1736 for (ValueList &Operand : Operands) 1737 reorderScalars(Operand, Mask); 1738 } 1739 1740 /// \returns the \p OpIdx operand of this TreeEntry. 1741 ValueList &getOperand(unsigned OpIdx) { 1742 assert(OpIdx < Operands.size() && "Off bounds"); 1743 return Operands[OpIdx]; 1744 } 1745 1746 /// \returns the number of operands. 1747 unsigned getNumOperands() const { return Operands.size(); } 1748 1749 /// \return the single \p OpIdx operand. 1750 Value *getSingleOperand(unsigned OpIdx) const { 1751 assert(OpIdx < Operands.size() && "Off bounds"); 1752 assert(!Operands[OpIdx].empty() && "No operand available"); 1753 return Operands[OpIdx][0]; 1754 } 1755 1756 /// Some of the instructions in the list have alternate opcodes. 1757 bool isAltShuffle() const { 1758 return getOpcode() != getAltOpcode(); 1759 } 1760 1761 bool isOpcodeOrAlt(Instruction *I) const { 1762 unsigned CheckedOpcode = I->getOpcode(); 1763 return (getOpcode() == CheckedOpcode || 1764 getAltOpcode() == CheckedOpcode); 1765 } 1766 1767 /// Chooses the correct key for scheduling data. If \p Op has the same (or 1768 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is 1769 /// \p OpValue. 1770 Value *isOneOf(Value *Op) const { 1771 auto *I = dyn_cast<Instruction>(Op); 1772 if (I && isOpcodeOrAlt(I)) 1773 return Op; 1774 return MainOp; 1775 } 1776 1777 void setOperations(const InstructionsState &S) { 1778 MainOp = S.MainOp; 1779 AltOp = S.AltOp; 1780 } 1781 1782 Instruction *getMainOp() const { 1783 return MainOp; 1784 } 1785 1786 Instruction *getAltOp() const { 1787 return AltOp; 1788 } 1789 1790 /// The main/alternate opcodes for the list of instructions. 1791 unsigned getOpcode() const { 1792 return MainOp ? MainOp->getOpcode() : 0; 1793 } 1794 1795 unsigned getAltOpcode() const { 1796 return AltOp ? AltOp->getOpcode() : 0; 1797 } 1798 1799 /// When ReuseReorderShuffleIndices is empty it just returns position of \p 1800 /// V within vector of Scalars. Otherwise, try to remap on its reuse index. 1801 int findLaneForValue(Value *V) const { 1802 unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V)); 1803 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 1804 if (!ReorderIndices.empty()) 1805 FoundLane = ReorderIndices[FoundLane]; 1806 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 1807 if (!ReuseShuffleIndices.empty()) { 1808 FoundLane = std::distance(ReuseShuffleIndices.begin(), 1809 find(ReuseShuffleIndices, FoundLane)); 1810 } 1811 return FoundLane; 1812 } 1813 1814 #ifndef NDEBUG 1815 /// Debug printer. 1816 LLVM_DUMP_METHOD void dump() const { 1817 dbgs() << Idx << ".\n"; 1818 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) { 1819 dbgs() << "Operand " << OpI << ":\n"; 1820 for (const Value *V : Operands[OpI]) 1821 dbgs().indent(2) << *V << "\n"; 1822 } 1823 dbgs() << "Scalars: \n"; 1824 for (Value *V : Scalars) 1825 dbgs().indent(2) << *V << "\n"; 1826 dbgs() << "State: "; 1827 switch (State) { 1828 case Vectorize: 1829 dbgs() << "Vectorize\n"; 1830 break; 1831 case ScatterVectorize: 1832 dbgs() << "ScatterVectorize\n"; 1833 break; 1834 case NeedToGather: 1835 dbgs() << "NeedToGather\n"; 1836 break; 1837 } 1838 dbgs() << "MainOp: "; 1839 if (MainOp) 1840 dbgs() << *MainOp << "\n"; 1841 else 1842 dbgs() << "NULL\n"; 1843 dbgs() << "AltOp: "; 1844 if (AltOp) 1845 dbgs() << *AltOp << "\n"; 1846 else 1847 dbgs() << "NULL\n"; 1848 dbgs() << "VectorizedValue: "; 1849 if (VectorizedValue) 1850 dbgs() << *VectorizedValue << "\n"; 1851 else 1852 dbgs() << "NULL\n"; 1853 dbgs() << "ReuseShuffleIndices: "; 1854 if (ReuseShuffleIndices.empty()) 1855 dbgs() << "Empty"; 1856 else 1857 for (unsigned ReuseIdx : ReuseShuffleIndices) 1858 dbgs() << ReuseIdx << ", "; 1859 dbgs() << "\n"; 1860 dbgs() << "ReorderIndices: "; 1861 for (unsigned ReorderIdx : ReorderIndices) 1862 dbgs() << ReorderIdx << ", "; 1863 dbgs() << "\n"; 1864 dbgs() << "UserTreeIndices: "; 1865 for (const auto &EInfo : UserTreeIndices) 1866 dbgs() << EInfo << ", "; 1867 dbgs() << "\n"; 1868 } 1869 #endif 1870 }; 1871 1872 #ifndef NDEBUG 1873 void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost, 1874 InstructionCost VecCost, 1875 InstructionCost ScalarCost) const { 1876 dbgs() << "SLP: Calculated costs for Tree:\n"; E->dump(); 1877 dbgs() << "SLP: Costs:\n"; 1878 dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n"; 1879 dbgs() << "SLP: VectorCost = " << VecCost << "\n"; 1880 dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n"; 1881 dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = " << 1882 ReuseShuffleCost + VecCost - ScalarCost << "\n"; 1883 } 1884 #endif 1885 1886 /// Create a new VectorizableTree entry. 1887 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle, 1888 const InstructionsState &S, 1889 const EdgeInfo &UserTreeIdx, 1890 ArrayRef<int> ReuseShuffleIndices = None, 1891 ArrayRef<unsigned> ReorderIndices = None) { 1892 TreeEntry::EntryState EntryState = 1893 Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather; 1894 return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx, 1895 ReuseShuffleIndices, ReorderIndices); 1896 } 1897 1898 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, 1899 TreeEntry::EntryState EntryState, 1900 Optional<ScheduleData *> Bundle, 1901 const InstructionsState &S, 1902 const EdgeInfo &UserTreeIdx, 1903 ArrayRef<int> ReuseShuffleIndices = None, 1904 ArrayRef<unsigned> ReorderIndices = None) { 1905 assert(((!Bundle && EntryState == TreeEntry::NeedToGather) || 1906 (Bundle && EntryState != TreeEntry::NeedToGather)) && 1907 "Need to vectorize gather entry?"); 1908 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree)); 1909 TreeEntry *Last = VectorizableTree.back().get(); 1910 Last->Idx = VectorizableTree.size() - 1; 1911 Last->State = EntryState; 1912 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(), 1913 ReuseShuffleIndices.end()); 1914 if (ReorderIndices.empty()) { 1915 Last->Scalars.assign(VL.begin(), VL.end()); 1916 Last->setOperations(S); 1917 } else { 1918 // Reorder scalars and build final mask. 1919 Last->Scalars.assign(VL.size(), nullptr); 1920 transform(ReorderIndices, Last->Scalars.begin(), 1921 [VL](unsigned Idx) -> Value * { 1922 if (Idx >= VL.size()) 1923 return UndefValue::get(VL.front()->getType()); 1924 return VL[Idx]; 1925 }); 1926 InstructionsState S = getSameOpcode(Last->Scalars); 1927 Last->setOperations(S); 1928 Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end()); 1929 } 1930 if (Last->State != TreeEntry::NeedToGather) { 1931 for (Value *V : VL) { 1932 assert(!getTreeEntry(V) && "Scalar already in tree!"); 1933 ScalarToTreeEntry[V] = Last; 1934 } 1935 // Update the scheduler bundle to point to this TreeEntry. 1936 unsigned Lane = 0; 1937 for (ScheduleData *BundleMember = Bundle.getValue(); BundleMember; 1938 BundleMember = BundleMember->NextInBundle) { 1939 BundleMember->TE = Last; 1940 BundleMember->Lane = Lane; 1941 ++Lane; 1942 } 1943 assert((!Bundle.getValue() || Lane == VL.size()) && 1944 "Bundle and VL out of sync"); 1945 } else { 1946 MustGather.insert(VL.begin(), VL.end()); 1947 } 1948 1949 if (UserTreeIdx.UserTE) 1950 Last->UserTreeIndices.push_back(UserTreeIdx); 1951 1952 return Last; 1953 } 1954 1955 /// -- Vectorization State -- 1956 /// Holds all of the tree entries. 1957 TreeEntry::VecTreeTy VectorizableTree; 1958 1959 #ifndef NDEBUG 1960 /// Debug printer. 1961 LLVM_DUMP_METHOD void dumpVectorizableTree() const { 1962 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) { 1963 VectorizableTree[Id]->dump(); 1964 dbgs() << "\n"; 1965 } 1966 } 1967 #endif 1968 1969 TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); } 1970 1971 const TreeEntry *getTreeEntry(Value *V) const { 1972 return ScalarToTreeEntry.lookup(V); 1973 } 1974 1975 /// Maps a specific scalar to its tree entry. 1976 SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry; 1977 1978 /// Maps a value to the proposed vectorizable size. 1979 SmallDenseMap<Value *, unsigned> InstrElementSize; 1980 1981 /// A list of scalars that we found that we need to keep as scalars. 1982 ValueSet MustGather; 1983 1984 /// This POD struct describes one external user in the vectorized tree. 1985 struct ExternalUser { 1986 ExternalUser(Value *S, llvm::User *U, int L) 1987 : Scalar(S), User(U), Lane(L) {} 1988 1989 // Which scalar in our function. 1990 Value *Scalar; 1991 1992 // Which user that uses the scalar. 1993 llvm::User *User; 1994 1995 // Which lane does the scalar belong to. 1996 int Lane; 1997 }; 1998 using UserList = SmallVector<ExternalUser, 16>; 1999 2000 /// Checks if two instructions may access the same memory. 2001 /// 2002 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 2003 /// is invariant in the calling loop. 2004 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 2005 Instruction *Inst2) { 2006 // First check if the result is already in the cache. 2007 AliasCacheKey key = std::make_pair(Inst1, Inst2); 2008 Optional<bool> &result = AliasCache[key]; 2009 if (result.hasValue()) { 2010 return result.getValue(); 2011 } 2012 bool aliased = true; 2013 if (Loc1.Ptr && isSimple(Inst1)) 2014 aliased = isModOrRefSet(AA->getModRefInfo(Inst2, Loc1)); 2015 // Store the result in the cache. 2016 result = aliased; 2017 return aliased; 2018 } 2019 2020 using AliasCacheKey = std::pair<Instruction *, Instruction *>; 2021 2022 /// Cache for alias results. 2023 /// TODO: consider moving this to the AliasAnalysis itself. 2024 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 2025 2026 /// Removes an instruction from its block and eventually deletes it. 2027 /// It's like Instruction::eraseFromParent() except that the actual deletion 2028 /// is delayed until BoUpSLP is destructed. 2029 /// This is required to ensure that there are no incorrect collisions in the 2030 /// AliasCache, which can happen if a new instruction is allocated at the 2031 /// same address as a previously deleted instruction. 2032 void eraseInstruction(Instruction *I, bool ReplaceOpsWithUndef = false) { 2033 auto It = DeletedInstructions.try_emplace(I, ReplaceOpsWithUndef).first; 2034 It->getSecond() = It->getSecond() && ReplaceOpsWithUndef; 2035 } 2036 2037 /// Temporary store for deleted instructions. Instructions will be deleted 2038 /// eventually when the BoUpSLP is destructed. 2039 DenseMap<Instruction *, bool> DeletedInstructions; 2040 2041 /// A list of values that need to extracted out of the tree. 2042 /// This list holds pairs of (Internal Scalar : External User). External User 2043 /// can be nullptr, it means that this Internal Scalar will be used later, 2044 /// after vectorization. 2045 UserList ExternalUses; 2046 2047 /// Values used only by @llvm.assume calls. 2048 SmallPtrSet<const Value *, 32> EphValues; 2049 2050 /// Holds all of the instructions that we gathered. 2051 SetVector<Instruction *> GatherSeq; 2052 2053 /// A list of blocks that we are going to CSE. 2054 SetVector<BasicBlock *> CSEBlocks; 2055 2056 /// Contains all scheduling relevant data for an instruction. 2057 /// A ScheduleData either represents a single instruction or a member of an 2058 /// instruction bundle (= a group of instructions which is combined into a 2059 /// vector instruction). 2060 struct ScheduleData { 2061 // The initial value for the dependency counters. It means that the 2062 // dependencies are not calculated yet. 2063 enum { InvalidDeps = -1 }; 2064 2065 ScheduleData() = default; 2066 2067 void init(int BlockSchedulingRegionID, Value *OpVal) { 2068 FirstInBundle = this; 2069 NextInBundle = nullptr; 2070 NextLoadStore = nullptr; 2071 IsScheduled = false; 2072 SchedulingRegionID = BlockSchedulingRegionID; 2073 UnscheduledDepsInBundle = UnscheduledDeps; 2074 clearDependencies(); 2075 OpValue = OpVal; 2076 TE = nullptr; 2077 Lane = -1; 2078 } 2079 2080 /// Returns true if the dependency information has been calculated. 2081 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 2082 2083 /// Returns true for single instructions and for bundle representatives 2084 /// (= the head of a bundle). 2085 bool isSchedulingEntity() const { return FirstInBundle == this; } 2086 2087 /// Returns true if it represents an instruction bundle and not only a 2088 /// single instruction. 2089 bool isPartOfBundle() const { 2090 return NextInBundle != nullptr || FirstInBundle != this; 2091 } 2092 2093 /// Returns true if it is ready for scheduling, i.e. it has no more 2094 /// unscheduled depending instructions/bundles. 2095 bool isReady() const { 2096 assert(isSchedulingEntity() && 2097 "can't consider non-scheduling entity for ready list"); 2098 return UnscheduledDepsInBundle == 0 && !IsScheduled; 2099 } 2100 2101 /// Modifies the number of unscheduled dependencies, also updating it for 2102 /// the whole bundle. 2103 int incrementUnscheduledDeps(int Incr) { 2104 UnscheduledDeps += Incr; 2105 return FirstInBundle->UnscheduledDepsInBundle += Incr; 2106 } 2107 2108 /// Sets the number of unscheduled dependencies to the number of 2109 /// dependencies. 2110 void resetUnscheduledDeps() { 2111 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 2112 } 2113 2114 /// Clears all dependency information. 2115 void clearDependencies() { 2116 Dependencies = InvalidDeps; 2117 resetUnscheduledDeps(); 2118 MemoryDependencies.clear(); 2119 } 2120 2121 void dump(raw_ostream &os) const { 2122 if (!isSchedulingEntity()) { 2123 os << "/ " << *Inst; 2124 } else if (NextInBundle) { 2125 os << '[' << *Inst; 2126 ScheduleData *SD = NextInBundle; 2127 while (SD) { 2128 os << ';' << *SD->Inst; 2129 SD = SD->NextInBundle; 2130 } 2131 os << ']'; 2132 } else { 2133 os << *Inst; 2134 } 2135 } 2136 2137 Instruction *Inst = nullptr; 2138 2139 /// Points to the head in an instruction bundle (and always to this for 2140 /// single instructions). 2141 ScheduleData *FirstInBundle = nullptr; 2142 2143 /// Single linked list of all instructions in a bundle. Null if it is a 2144 /// single instruction. 2145 ScheduleData *NextInBundle = nullptr; 2146 2147 /// Single linked list of all memory instructions (e.g. load, store, call) 2148 /// in the block - until the end of the scheduling region. 2149 ScheduleData *NextLoadStore = nullptr; 2150 2151 /// The dependent memory instructions. 2152 /// This list is derived on demand in calculateDependencies(). 2153 SmallVector<ScheduleData *, 4> MemoryDependencies; 2154 2155 /// This ScheduleData is in the current scheduling region if this matches 2156 /// the current SchedulingRegionID of BlockScheduling. 2157 int SchedulingRegionID = 0; 2158 2159 /// Used for getting a "good" final ordering of instructions. 2160 int SchedulingPriority = 0; 2161 2162 /// The number of dependencies. Constitutes of the number of users of the 2163 /// instruction plus the number of dependent memory instructions (if any). 2164 /// This value is calculated on demand. 2165 /// If InvalidDeps, the number of dependencies is not calculated yet. 2166 int Dependencies = InvalidDeps; 2167 2168 /// The number of dependencies minus the number of dependencies of scheduled 2169 /// instructions. As soon as this is zero, the instruction/bundle gets ready 2170 /// for scheduling. 2171 /// Note that this is negative as long as Dependencies is not calculated. 2172 int UnscheduledDeps = InvalidDeps; 2173 2174 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 2175 /// single instructions. 2176 int UnscheduledDepsInBundle = InvalidDeps; 2177 2178 /// True if this instruction is scheduled (or considered as scheduled in the 2179 /// dry-run). 2180 bool IsScheduled = false; 2181 2182 /// Opcode of the current instruction in the schedule data. 2183 Value *OpValue = nullptr; 2184 2185 /// The TreeEntry that this instruction corresponds to. 2186 TreeEntry *TE = nullptr; 2187 2188 /// The lane of this node in the TreeEntry. 2189 int Lane = -1; 2190 }; 2191 2192 #ifndef NDEBUG 2193 friend inline raw_ostream &operator<<(raw_ostream &os, 2194 const BoUpSLP::ScheduleData &SD) { 2195 SD.dump(os); 2196 return os; 2197 } 2198 #endif 2199 2200 friend struct GraphTraits<BoUpSLP *>; 2201 friend struct DOTGraphTraits<BoUpSLP *>; 2202 2203 /// Contains all scheduling data for a basic block. 2204 struct BlockScheduling { 2205 BlockScheduling(BasicBlock *BB) 2206 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} 2207 2208 void clear() { 2209 ReadyInsts.clear(); 2210 ScheduleStart = nullptr; 2211 ScheduleEnd = nullptr; 2212 FirstLoadStoreInRegion = nullptr; 2213 LastLoadStoreInRegion = nullptr; 2214 2215 // Reduce the maximum schedule region size by the size of the 2216 // previous scheduling run. 2217 ScheduleRegionSizeLimit -= ScheduleRegionSize; 2218 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 2219 ScheduleRegionSizeLimit = MinScheduleRegionSize; 2220 ScheduleRegionSize = 0; 2221 2222 // Make a new scheduling region, i.e. all existing ScheduleData is not 2223 // in the new region yet. 2224 ++SchedulingRegionID; 2225 } 2226 2227 ScheduleData *getScheduleData(Value *V) { 2228 ScheduleData *SD = ScheduleDataMap[V]; 2229 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 2230 return SD; 2231 return nullptr; 2232 } 2233 2234 ScheduleData *getScheduleData(Value *V, Value *Key) { 2235 if (V == Key) 2236 return getScheduleData(V); 2237 auto I = ExtraScheduleDataMap.find(V); 2238 if (I != ExtraScheduleDataMap.end()) { 2239 ScheduleData *SD = I->second[Key]; 2240 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 2241 return SD; 2242 } 2243 return nullptr; 2244 } 2245 2246 bool isInSchedulingRegion(ScheduleData *SD) const { 2247 return SD->SchedulingRegionID == SchedulingRegionID; 2248 } 2249 2250 /// Marks an instruction as scheduled and puts all dependent ready 2251 /// instructions into the ready-list. 2252 template <typename ReadyListType> 2253 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 2254 SD->IsScheduled = true; 2255 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 2256 2257 ScheduleData *BundleMember = SD; 2258 while (BundleMember) { 2259 if (BundleMember->Inst != BundleMember->OpValue) { 2260 BundleMember = BundleMember->NextInBundle; 2261 continue; 2262 } 2263 // Handle the def-use chain dependencies. 2264 2265 // Decrement the unscheduled counter and insert to ready list if ready. 2266 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) { 2267 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 2268 if (OpDef && OpDef->hasValidDependencies() && 2269 OpDef->incrementUnscheduledDeps(-1) == 0) { 2270 // There are no more unscheduled dependencies after 2271 // decrementing, so we can put the dependent instruction 2272 // into the ready list. 2273 ScheduleData *DepBundle = OpDef->FirstInBundle; 2274 assert(!DepBundle->IsScheduled && 2275 "already scheduled bundle gets ready"); 2276 ReadyList.insert(DepBundle); 2277 LLVM_DEBUG(dbgs() 2278 << "SLP: gets ready (def): " << *DepBundle << "\n"); 2279 } 2280 }); 2281 }; 2282 2283 // If BundleMember is a vector bundle, its operands may have been 2284 // reordered duiring buildTree(). We therefore need to get its operands 2285 // through the TreeEntry. 2286 if (TreeEntry *TE = BundleMember->TE) { 2287 int Lane = BundleMember->Lane; 2288 assert(Lane >= 0 && "Lane not set"); 2289 2290 // Since vectorization tree is being built recursively this assertion 2291 // ensures that the tree entry has all operands set before reaching 2292 // this code. Couple of exceptions known at the moment are extracts 2293 // where their second (immediate) operand is not added. Since 2294 // immediates do not affect scheduler behavior this is considered 2295 // okay. 2296 auto *In = TE->getMainOp(); 2297 assert(In && 2298 (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) || 2299 In->getNumOperands() == TE->getNumOperands()) && 2300 "Missed TreeEntry operands?"); 2301 (void)In; // fake use to avoid build failure when assertions disabled 2302 2303 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands(); 2304 OpIdx != NumOperands; ++OpIdx) 2305 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane])) 2306 DecrUnsched(I); 2307 } else { 2308 // If BundleMember is a stand-alone instruction, no operand reordering 2309 // has taken place, so we directly access its operands. 2310 for (Use &U : BundleMember->Inst->operands()) 2311 if (auto *I = dyn_cast<Instruction>(U.get())) 2312 DecrUnsched(I); 2313 } 2314 // Handle the memory dependencies. 2315 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 2316 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 2317 // There are no more unscheduled dependencies after decrementing, 2318 // so we can put the dependent instruction into the ready list. 2319 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 2320 assert(!DepBundle->IsScheduled && 2321 "already scheduled bundle gets ready"); 2322 ReadyList.insert(DepBundle); 2323 LLVM_DEBUG(dbgs() 2324 << "SLP: gets ready (mem): " << *DepBundle << "\n"); 2325 } 2326 } 2327 BundleMember = BundleMember->NextInBundle; 2328 } 2329 } 2330 2331 void doForAllOpcodes(Value *V, 2332 function_ref<void(ScheduleData *SD)> Action) { 2333 if (ScheduleData *SD = getScheduleData(V)) 2334 Action(SD); 2335 auto I = ExtraScheduleDataMap.find(V); 2336 if (I != ExtraScheduleDataMap.end()) 2337 for (auto &P : I->second) 2338 if (P.second->SchedulingRegionID == SchedulingRegionID) 2339 Action(P.second); 2340 } 2341 2342 /// Put all instructions into the ReadyList which are ready for scheduling. 2343 template <typename ReadyListType> 2344 void initialFillReadyList(ReadyListType &ReadyList) { 2345 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 2346 doForAllOpcodes(I, [&](ScheduleData *SD) { 2347 if (SD->isSchedulingEntity() && SD->isReady()) { 2348 ReadyList.insert(SD); 2349 LLVM_DEBUG(dbgs() 2350 << "SLP: initially in ready list: " << *I << "\n"); 2351 } 2352 }); 2353 } 2354 } 2355 2356 /// Checks if a bundle of instructions can be scheduled, i.e. has no 2357 /// cyclic dependencies. This is only a dry-run, no instructions are 2358 /// actually moved at this stage. 2359 /// \returns the scheduling bundle. The returned Optional value is non-None 2360 /// if \p VL is allowed to be scheduled. 2361 Optional<ScheduleData *> 2362 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 2363 const InstructionsState &S); 2364 2365 /// Un-bundles a group of instructions. 2366 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 2367 2368 /// Allocates schedule data chunk. 2369 ScheduleData *allocateScheduleDataChunks(); 2370 2371 /// Extends the scheduling region so that V is inside the region. 2372 /// \returns true if the region size is within the limit. 2373 bool extendSchedulingRegion(Value *V, const InstructionsState &S); 2374 2375 /// Initialize the ScheduleData structures for new instructions in the 2376 /// scheduling region. 2377 void initScheduleData(Instruction *FromI, Instruction *ToI, 2378 ScheduleData *PrevLoadStore, 2379 ScheduleData *NextLoadStore); 2380 2381 /// Updates the dependency information of a bundle and of all instructions/ 2382 /// bundles which depend on the original bundle. 2383 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 2384 BoUpSLP *SLP); 2385 2386 /// Sets all instruction in the scheduling region to un-scheduled. 2387 void resetSchedule(); 2388 2389 BasicBlock *BB; 2390 2391 /// Simple memory allocation for ScheduleData. 2392 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 2393 2394 /// The size of a ScheduleData array in ScheduleDataChunks. 2395 int ChunkSize; 2396 2397 /// The allocator position in the current chunk, which is the last entry 2398 /// of ScheduleDataChunks. 2399 int ChunkPos; 2400 2401 /// Attaches ScheduleData to Instruction. 2402 /// Note that the mapping survives during all vectorization iterations, i.e. 2403 /// ScheduleData structures are recycled. 2404 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 2405 2406 /// Attaches ScheduleData to Instruction with the leading key. 2407 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 2408 ExtraScheduleDataMap; 2409 2410 struct ReadyList : SmallVector<ScheduleData *, 8> { 2411 void insert(ScheduleData *SD) { push_back(SD); } 2412 }; 2413 2414 /// The ready-list for scheduling (only used for the dry-run). 2415 ReadyList ReadyInsts; 2416 2417 /// The first instruction of the scheduling region. 2418 Instruction *ScheduleStart = nullptr; 2419 2420 /// The first instruction _after_ the scheduling region. 2421 Instruction *ScheduleEnd = nullptr; 2422 2423 /// The first memory accessing instruction in the scheduling region 2424 /// (can be null). 2425 ScheduleData *FirstLoadStoreInRegion = nullptr; 2426 2427 /// The last memory accessing instruction in the scheduling region 2428 /// (can be null). 2429 ScheduleData *LastLoadStoreInRegion = nullptr; 2430 2431 /// The current size of the scheduling region. 2432 int ScheduleRegionSize = 0; 2433 2434 /// The maximum size allowed for the scheduling region. 2435 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; 2436 2437 /// The ID of the scheduling region. For a new vectorization iteration this 2438 /// is incremented which "removes" all ScheduleData from the region. 2439 // Make sure that the initial SchedulingRegionID is greater than the 2440 // initial SchedulingRegionID in ScheduleData (which is 0). 2441 int SchedulingRegionID = 1; 2442 }; 2443 2444 /// Attaches the BlockScheduling structures to basic blocks. 2445 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 2446 2447 /// Performs the "real" scheduling. Done before vectorization is actually 2448 /// performed in a basic block. 2449 void scheduleBlock(BlockScheduling *BS); 2450 2451 /// List of users to ignore during scheduling and that don't need extracting. 2452 ArrayRef<Value *> UserIgnoreList; 2453 2454 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of 2455 /// sorted SmallVectors of unsigned. 2456 struct OrdersTypeDenseMapInfo { 2457 static OrdersType getEmptyKey() { 2458 OrdersType V; 2459 V.push_back(~1U); 2460 return V; 2461 } 2462 2463 static OrdersType getTombstoneKey() { 2464 OrdersType V; 2465 V.push_back(~2U); 2466 return V; 2467 } 2468 2469 static unsigned getHashValue(const OrdersType &V) { 2470 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); 2471 } 2472 2473 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) { 2474 return LHS == RHS; 2475 } 2476 }; 2477 2478 // Analysis and block reference. 2479 Function *F; 2480 ScalarEvolution *SE; 2481 TargetTransformInfo *TTI; 2482 TargetLibraryInfo *TLI; 2483 AAResults *AA; 2484 LoopInfo *LI; 2485 DominatorTree *DT; 2486 AssumptionCache *AC; 2487 DemandedBits *DB; 2488 const DataLayout *DL; 2489 OptimizationRemarkEmitter *ORE; 2490 2491 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 2492 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 2493 2494 /// Instruction builder to construct the vectorized tree. 2495 IRBuilder<> Builder; 2496 2497 /// A map of scalar integer values to the smallest bit width with which they 2498 /// can legally be represented. The values map to (width, signed) pairs, 2499 /// where "width" indicates the minimum bit width and "signed" is True if the 2500 /// value must be signed-extended, rather than zero-extended, back to its 2501 /// original width. 2502 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs; 2503 }; 2504 2505 } // end namespace slpvectorizer 2506 2507 template <> struct GraphTraits<BoUpSLP *> { 2508 using TreeEntry = BoUpSLP::TreeEntry; 2509 2510 /// NodeRef has to be a pointer per the GraphWriter. 2511 using NodeRef = TreeEntry *; 2512 2513 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy; 2514 2515 /// Add the VectorizableTree to the index iterator to be able to return 2516 /// TreeEntry pointers. 2517 struct ChildIteratorType 2518 : public iterator_adaptor_base< 2519 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> { 2520 ContainerTy &VectorizableTree; 2521 2522 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W, 2523 ContainerTy &VT) 2524 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 2525 2526 NodeRef operator*() { return I->UserTE; } 2527 }; 2528 2529 static NodeRef getEntryNode(BoUpSLP &R) { 2530 return R.VectorizableTree[0].get(); 2531 } 2532 2533 static ChildIteratorType child_begin(NodeRef N) { 2534 return {N->UserTreeIndices.begin(), N->Container}; 2535 } 2536 2537 static ChildIteratorType child_end(NodeRef N) { 2538 return {N->UserTreeIndices.end(), N->Container}; 2539 } 2540 2541 /// For the node iterator we just need to turn the TreeEntry iterator into a 2542 /// TreeEntry* iterator so that it dereferences to NodeRef. 2543 class nodes_iterator { 2544 using ItTy = ContainerTy::iterator; 2545 ItTy It; 2546 2547 public: 2548 nodes_iterator(const ItTy &It2) : It(It2) {} 2549 NodeRef operator*() { return It->get(); } 2550 nodes_iterator operator++() { 2551 ++It; 2552 return *this; 2553 } 2554 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; } 2555 }; 2556 2557 static nodes_iterator nodes_begin(BoUpSLP *R) { 2558 return nodes_iterator(R->VectorizableTree.begin()); 2559 } 2560 2561 static nodes_iterator nodes_end(BoUpSLP *R) { 2562 return nodes_iterator(R->VectorizableTree.end()); 2563 } 2564 2565 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 2566 }; 2567 2568 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 2569 using TreeEntry = BoUpSLP::TreeEntry; 2570 2571 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 2572 2573 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 2574 std::string Str; 2575 raw_string_ostream OS(Str); 2576 if (isSplat(Entry->Scalars)) { 2577 OS << "<splat> " << *Entry->Scalars[0]; 2578 return Str; 2579 } 2580 for (auto V : Entry->Scalars) { 2581 OS << *V; 2582 if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) { 2583 return EU.Scalar == V; 2584 })) 2585 OS << " <extract>"; 2586 OS << "\n"; 2587 } 2588 return Str; 2589 } 2590 2591 static std::string getNodeAttributes(const TreeEntry *Entry, 2592 const BoUpSLP *) { 2593 if (Entry->State == TreeEntry::NeedToGather) 2594 return "color=red"; 2595 return ""; 2596 } 2597 }; 2598 2599 } // end namespace llvm 2600 2601 BoUpSLP::~BoUpSLP() { 2602 for (const auto &Pair : DeletedInstructions) { 2603 // Replace operands of ignored instructions with Undefs in case if they were 2604 // marked for deletion. 2605 if (Pair.getSecond()) { 2606 Value *Undef = UndefValue::get(Pair.getFirst()->getType()); 2607 Pair.getFirst()->replaceAllUsesWith(Undef); 2608 } 2609 Pair.getFirst()->dropAllReferences(); 2610 } 2611 for (const auto &Pair : DeletedInstructions) { 2612 assert(Pair.getFirst()->use_empty() && 2613 "trying to erase instruction with users."); 2614 Pair.getFirst()->eraseFromParent(); 2615 } 2616 #ifdef EXPENSIVE_CHECKS 2617 // If we could guarantee that this call is not extremely slow, we could 2618 // remove the ifdef limitation (see PR47712). 2619 assert(!verifyFunction(*F, &dbgs())); 2620 #endif 2621 } 2622 2623 void BoUpSLP::eraseInstructions(ArrayRef<Value *> AV) { 2624 for (auto *V : AV) { 2625 if (auto *I = dyn_cast<Instruction>(V)) 2626 eraseInstruction(I, /*ReplaceOpsWithUndef=*/true); 2627 }; 2628 } 2629 2630 /// Reorders the given \p Reuses mask according to the given \p Mask. \p Reuses 2631 /// contains original mask for the scalars reused in the node. Procedure 2632 /// transform this mask in accordance with the given \p Mask. 2633 static void reorderReuses(SmallVectorImpl<int> &Reuses, ArrayRef<int> Mask) { 2634 assert(!Mask.empty() && Reuses.size() == Mask.size() && 2635 "Expected non-empty mask."); 2636 SmallVector<int> Prev(Reuses.begin(), Reuses.end()); 2637 Prev.swap(Reuses); 2638 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 2639 if (Mask[I] != UndefMaskElem) 2640 Reuses[Mask[I]] = Prev[I]; 2641 } 2642 2643 /// Reorders the given \p Order according to the given \p Mask. \p Order - is 2644 /// the original order of the scalars. Procedure transforms the provided order 2645 /// in accordance with the given \p Mask. If the resulting \p Order is just an 2646 /// identity order, \p Order is cleared. 2647 static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask) { 2648 assert(!Mask.empty() && "Expected non-empty mask."); 2649 SmallVector<int> MaskOrder; 2650 if (Order.empty()) { 2651 MaskOrder.resize(Mask.size()); 2652 std::iota(MaskOrder.begin(), MaskOrder.end(), 0); 2653 } else { 2654 inversePermutation(Order, MaskOrder); 2655 } 2656 reorderReuses(MaskOrder, Mask); 2657 if (ShuffleVectorInst::isIdentityMask(MaskOrder)) { 2658 Order.clear(); 2659 return; 2660 } 2661 Order.assign(Mask.size(), Mask.size()); 2662 for (unsigned I = 0, E = Mask.size(); I < E; ++I) 2663 if (MaskOrder[I] != UndefMaskElem) 2664 Order[MaskOrder[I]] = I; 2665 fixupOrderingIndices(Order); 2666 } 2667 2668 void BoUpSLP::reorderTopToBottom() { 2669 // Maps VF to the graph nodes. 2670 DenseMap<unsigned, SmallPtrSet<TreeEntry *, 4>> VFToOrderedEntries; 2671 // ExtractElement gather nodes which can be vectorized and need to handle 2672 // their ordering. 2673 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 2674 // Find all reorderable nodes with the given VF. 2675 // Currently the are vectorized loads,extracts + some gathering of extracts. 2676 for_each(VectorizableTree, [this, &VFToOrderedEntries, &GathersToOrders]( 2677 const std::unique_ptr<TreeEntry> &TE) { 2678 // No need to reorder if need to shuffle reuses, still need to shuffle the 2679 // node. 2680 if (!TE->ReuseShuffleIndices.empty()) 2681 return; 2682 if (TE->State == TreeEntry::Vectorize && 2683 isa<LoadInst, ExtractElementInst, ExtractValueInst, StoreInst, 2684 InsertElementInst>(TE->getMainOp()) && 2685 !TE->isAltShuffle()) { 2686 VFToOrderedEntries[TE->Scalars.size()].insert(TE.get()); 2687 } else if (TE->State == TreeEntry::NeedToGather && 2688 TE->getOpcode() == Instruction::ExtractElement && 2689 !TE->isAltShuffle() && 2690 isa<FixedVectorType>(cast<ExtractElementInst>(TE->getMainOp()) 2691 ->getVectorOperandType()) && 2692 allSameType(TE->Scalars) && allSameBlock(TE->Scalars)) { 2693 // Check that gather of extractelements can be represented as 2694 // just a shuffle of a single vector. 2695 OrdersType CurrentOrder; 2696 bool Reuse = canReuseExtract(TE->Scalars, TE->getMainOp(), CurrentOrder); 2697 if (Reuse || !CurrentOrder.empty()) { 2698 VFToOrderedEntries[TE->Scalars.size()].insert(TE.get()); 2699 GathersToOrders.try_emplace(TE.get(), CurrentOrder); 2700 } 2701 } 2702 }); 2703 2704 // Reorder the graph nodes according to their vectorization factor. 2705 for (unsigned VF = VectorizableTree.front()->Scalars.size(); VF > 1; 2706 VF /= 2) { 2707 auto It = VFToOrderedEntries.find(VF); 2708 if (It == VFToOrderedEntries.end()) 2709 continue; 2710 // Try to find the most profitable order. We just are looking for the most 2711 // used order and reorder scalar elements in the nodes according to this 2712 // mostly used order. 2713 const SmallPtrSetImpl<TreeEntry *> &OrderedEntries = It->getSecond(); 2714 // All operands are reordered and used only in this node - propagate the 2715 // most used order to the user node. 2716 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo> OrdersUses; 2717 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 2718 for (const TreeEntry *OpTE : OrderedEntries) { 2719 // No need to reorder this nodes, still need to extend and to use shuffle, 2720 // just need to merge reordering shuffle and the reuse shuffle. 2721 if (!OpTE->ReuseShuffleIndices.empty()) 2722 continue; 2723 // Count number of orders uses. 2724 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { 2725 if (OpTE->State == TreeEntry::NeedToGather) 2726 return GathersToOrders.find(OpTE)->second; 2727 return OpTE->ReorderIndices; 2728 }(); 2729 // Stores actually store the mask, not the order, need to invert. 2730 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 2731 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 2732 SmallVector<int> Mask; 2733 inversePermutation(Order, Mask); 2734 unsigned E = Order.size(); 2735 OrdersType CurrentOrder(E, E); 2736 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 2737 return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx); 2738 }); 2739 fixupOrderingIndices(CurrentOrder); 2740 ++OrdersUses.try_emplace(CurrentOrder).first->getSecond(); 2741 } else { 2742 ++OrdersUses.try_emplace(Order).first->getSecond(); 2743 } 2744 } 2745 // Set order of the user node. 2746 if (OrdersUses.empty()) 2747 continue; 2748 // Choose the most used order. 2749 ArrayRef<unsigned> BestOrder = OrdersUses.begin()->first; 2750 unsigned Cnt = OrdersUses.begin()->second; 2751 for (const auto &Pair : llvm::drop_begin(OrdersUses)) { 2752 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 2753 BestOrder = Pair.first; 2754 Cnt = Pair.second; 2755 } 2756 } 2757 // Set order of the user node. 2758 if (BestOrder.empty()) 2759 continue; 2760 SmallVector<int> Mask; 2761 inversePermutation(BestOrder, Mask); 2762 SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem); 2763 unsigned E = BestOrder.size(); 2764 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 2765 return I < E ? static_cast<int>(I) : UndefMaskElem; 2766 }); 2767 // Do an actual reordering, if profitable. 2768 for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 2769 // Just do the reordering for the nodes with the given VF. 2770 if (TE->Scalars.size() != VF) { 2771 if (TE->ReuseShuffleIndices.size() == VF) { 2772 // Need to reorder the reuses masks of the operands with smaller VF to 2773 // be able to find the match between the graph nodes and scalar 2774 // operands of the given node during vectorization/cost estimation. 2775 assert(all_of(TE->UserTreeIndices, 2776 [VF, &TE](const EdgeInfo &EI) { 2777 return EI.UserTE->Scalars.size() == VF || 2778 EI.UserTE->Scalars.size() == 2779 TE->Scalars.size(); 2780 }) && 2781 "All users must be of VF size."); 2782 // Update ordering of the operands with the smaller VF than the given 2783 // one. 2784 reorderReuses(TE->ReuseShuffleIndices, Mask); 2785 } 2786 continue; 2787 } 2788 if (TE->State == TreeEntry::Vectorize && 2789 isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst, 2790 InsertElementInst>(TE->getMainOp()) && 2791 !TE->isAltShuffle()) { 2792 // Build correct orders for extract{element,value}, loads and 2793 // stores. 2794 reorderOrder(TE->ReorderIndices, Mask); 2795 if (isa<InsertElementInst, StoreInst>(TE->getMainOp())) 2796 TE->reorderOperands(Mask); 2797 } else { 2798 // Reorder the node and its operands. 2799 TE->reorderOperands(Mask); 2800 assert(TE->ReorderIndices.empty() && 2801 "Expected empty reorder sequence."); 2802 reorderScalars(TE->Scalars, Mask); 2803 } 2804 if (!TE->ReuseShuffleIndices.empty()) { 2805 // Apply reversed order to keep the original ordering of the reused 2806 // elements to avoid extra reorder indices shuffling. 2807 OrdersType CurrentOrder; 2808 reorderOrder(CurrentOrder, MaskOrder); 2809 SmallVector<int> NewReuses; 2810 inversePermutation(CurrentOrder, NewReuses); 2811 addMask(NewReuses, TE->ReuseShuffleIndices); 2812 TE->ReuseShuffleIndices.swap(NewReuses); 2813 } 2814 } 2815 } 2816 } 2817 2818 void BoUpSLP::reorderBottomToTop() { 2819 SetVector<TreeEntry *> OrderedEntries; 2820 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 2821 // Find all reorderable leaf nodes with the given VF. 2822 // Currently the are vectorized loads,extracts without alternate operands + 2823 // some gathering of extracts. 2824 SmallVector<TreeEntry *> NonVectorized; 2825 for_each(VectorizableTree, [this, &OrderedEntries, &GathersToOrders, 2826 &NonVectorized]( 2827 const std::unique_ptr<TreeEntry> &TE) { 2828 // No need to reorder if need to shuffle reuses, still need to shuffle the 2829 // node. 2830 if (!TE->ReuseShuffleIndices.empty()) 2831 return; 2832 if (TE->State == TreeEntry::Vectorize && 2833 isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE->getMainOp()) && 2834 !TE->isAltShuffle()) { 2835 OrderedEntries.insert(TE.get()); 2836 } else if (TE->State == TreeEntry::NeedToGather && 2837 TE->getOpcode() == Instruction::ExtractElement && 2838 !TE->isAltShuffle() && 2839 isa<FixedVectorType>(cast<ExtractElementInst>(TE->getMainOp()) 2840 ->getVectorOperandType()) && 2841 allSameType(TE->Scalars) && allSameBlock(TE->Scalars)) { 2842 // Check that gather of extractelements can be represented as 2843 // just a shuffle of a single vector with a single user only. 2844 OrdersType CurrentOrder; 2845 bool Reuse = canReuseExtract(TE->Scalars, TE->getMainOp(), CurrentOrder); 2846 if ((Reuse || !CurrentOrder.empty()) && 2847 !any_of( 2848 VectorizableTree, [&TE](const std::unique_ptr<TreeEntry> &Entry) { 2849 return Entry->State == TreeEntry::NeedToGather && 2850 Entry.get() != TE.get() && Entry->isSame(TE->Scalars); 2851 })) { 2852 OrderedEntries.insert(TE.get()); 2853 GathersToOrders.try_emplace(TE.get(), CurrentOrder); 2854 } 2855 } 2856 if (TE->State != TreeEntry::Vectorize) 2857 NonVectorized.push_back(TE.get()); 2858 }); 2859 2860 // Checks if the operands of the users are reordarable and have only single 2861 // use. 2862 auto &&CheckOperands = 2863 [this, &NonVectorized](const auto &Data, 2864 SmallVectorImpl<TreeEntry *> &GatherOps) { 2865 for (unsigned I = 0, E = Data.first->getNumOperands(); I < E; ++I) { 2866 if (any_of(Data.second, 2867 [I](const std::pair<unsigned, TreeEntry *> &OpData) { 2868 return OpData.first == I && 2869 OpData.second->State == TreeEntry::Vectorize; 2870 })) 2871 continue; 2872 ArrayRef<Value *> VL = Data.first->getOperand(I); 2873 const TreeEntry *TE = nullptr; 2874 const auto *It = find_if(VL, [this, &TE](Value *V) { 2875 TE = getTreeEntry(V); 2876 return TE; 2877 }); 2878 if (It != VL.end() && TE->isSame(VL)) 2879 return false; 2880 TreeEntry *Gather = nullptr; 2881 if (count_if(NonVectorized, [VL, &Gather](TreeEntry *TE) { 2882 assert(TE->State != TreeEntry::Vectorize && 2883 "Only non-vectorized nodes are expected."); 2884 if (TE->isSame(VL)) { 2885 Gather = TE; 2886 return true; 2887 } 2888 return false; 2889 }) > 1) 2890 return false; 2891 if (Gather) 2892 GatherOps.push_back(Gather); 2893 } 2894 return true; 2895 }; 2896 // 1. Propagate order to the graph nodes, which use only reordered nodes. 2897 // I.e., if the node has operands, that are reordered, try to make at least 2898 // one operand order in the natural order and reorder others + reorder the 2899 // user node itself. 2900 SmallPtrSet<const TreeEntry *, 4> Visited; 2901 while (!OrderedEntries.empty()) { 2902 // 1. Filter out only reordered nodes. 2903 // 2. If the entry has multiple uses - skip it and jump to the next node. 2904 MapVector<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>> Users; 2905 SmallVector<TreeEntry *> Filtered; 2906 for (TreeEntry *TE : OrderedEntries) { 2907 if (!(TE->State == TreeEntry::Vectorize || 2908 (TE->State == TreeEntry::NeedToGather && 2909 TE->getOpcode() == Instruction::ExtractElement)) || 2910 TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() || 2911 !all_of(drop_begin(TE->UserTreeIndices), 2912 [TE](const EdgeInfo &EI) { 2913 return EI.UserTE == TE->UserTreeIndices.front().UserTE; 2914 }) || 2915 !Visited.insert(TE).second) { 2916 Filtered.push_back(TE); 2917 continue; 2918 } 2919 // Build a map between user nodes and their operands order to speedup 2920 // search. The graph currently does not provide this dependency directly. 2921 for (EdgeInfo &EI : TE->UserTreeIndices) { 2922 TreeEntry *UserTE = EI.UserTE; 2923 auto It = Users.find(UserTE); 2924 if (It == Users.end()) 2925 It = Users.insert({UserTE, {}}).first; 2926 It->second.emplace_back(EI.EdgeIdx, TE); 2927 } 2928 } 2929 // Erase filtered entries. 2930 for_each(Filtered, 2931 [&OrderedEntries](TreeEntry *TE) { OrderedEntries.remove(TE); }); 2932 for (const auto &Data : Users) { 2933 // Check that operands are used only in the User node. 2934 SmallVector<TreeEntry *> GatherOps; 2935 if (!CheckOperands(Data, GatherOps)) { 2936 for_each(Data.second, 2937 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { 2938 OrderedEntries.remove(Op.second); 2939 }); 2940 continue; 2941 } 2942 // All operands are reordered and used only in this node - propagate the 2943 // most used order to the user node. 2944 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo> OrdersUses; 2945 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 2946 for (const auto &Op : Data.second) { 2947 TreeEntry *OpTE = Op.second; 2948 if (!OpTE->ReuseShuffleIndices.empty()) 2949 continue; 2950 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { 2951 if (OpTE->State == TreeEntry::NeedToGather) 2952 return GathersToOrders.find(OpTE)->second; 2953 return OpTE->ReorderIndices; 2954 }(); 2955 // Stores actually store the mask, not the order, need to invert. 2956 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 2957 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 2958 SmallVector<int> Mask; 2959 inversePermutation(Order, Mask); 2960 unsigned E = Order.size(); 2961 OrdersType CurrentOrder(E, E); 2962 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 2963 return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx); 2964 }); 2965 fixupOrderingIndices(CurrentOrder); 2966 ++OrdersUses.try_emplace(CurrentOrder).first->getSecond(); 2967 } else { 2968 ++OrdersUses.try_emplace(Order).first->getSecond(); 2969 } 2970 if (VisitedOps.insert(OpTE).second) 2971 OrdersUses.try_emplace({}, 0).first->getSecond() += 2972 OpTE->UserTreeIndices.size(); 2973 --OrdersUses[{}]; 2974 } 2975 // If no orders - skip current nodes and jump to the next one, if any. 2976 if (OrdersUses.empty()) { 2977 for_each(Data.second, 2978 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { 2979 OrderedEntries.remove(Op.second); 2980 }); 2981 continue; 2982 } 2983 // Choose the best order. 2984 ArrayRef<unsigned> BestOrder = OrdersUses.begin()->first; 2985 unsigned Cnt = OrdersUses.begin()->second; 2986 for (const auto &Pair : llvm::drop_begin(OrdersUses)) { 2987 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 2988 BestOrder = Pair.first; 2989 Cnt = Pair.second; 2990 } 2991 } 2992 // Set order of the user node (reordering of operands and user nodes). 2993 if (BestOrder.empty()) { 2994 for_each(Data.second, 2995 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { 2996 OrderedEntries.remove(Op.second); 2997 }); 2998 continue; 2999 } 3000 // Erase operands from OrderedEntries list and adjust their orders. 3001 VisitedOps.clear(); 3002 SmallVector<int> Mask; 3003 inversePermutation(BestOrder, Mask); 3004 SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem); 3005 unsigned E = BestOrder.size(); 3006 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 3007 return I < E ? static_cast<int>(I) : UndefMaskElem; 3008 }); 3009 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) { 3010 TreeEntry *TE = Op.second; 3011 OrderedEntries.remove(TE); 3012 if (!VisitedOps.insert(TE).second) 3013 continue; 3014 if (!TE->ReuseShuffleIndices.empty() && TE->ReorderIndices.empty()) { 3015 // Just reorder reuses indices. 3016 reorderReuses(TE->ReuseShuffleIndices, Mask); 3017 continue; 3018 } 3019 // Gathers are processed separately. 3020 if (TE->State != TreeEntry::Vectorize) 3021 continue; 3022 assert((BestOrder.size() == TE->ReorderIndices.size() || 3023 TE->ReorderIndices.empty()) && 3024 "Non-matching sizes of user/operand entries."); 3025 reorderOrder(TE->ReorderIndices, Mask); 3026 } 3027 // For gathers just need to reorder its scalars. 3028 for (TreeEntry *Gather : GatherOps) { 3029 if (!Gather->ReuseShuffleIndices.empty()) 3030 continue; 3031 assert(Gather->ReorderIndices.empty() && 3032 "Unexpected reordering of gathers."); 3033 reorderScalars(Gather->Scalars, Mask); 3034 OrderedEntries.remove(Gather); 3035 } 3036 // Reorder operands of the user node and set the ordering for the user 3037 // node itself. 3038 if (Data.first->State != TreeEntry::Vectorize || 3039 !isa<ExtractElementInst, ExtractValueInst, LoadInst>( 3040 Data.first->getMainOp()) || 3041 Data.first->isAltShuffle()) 3042 Data.first->reorderOperands(Mask); 3043 if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) || 3044 Data.first->isAltShuffle()) { 3045 reorderScalars(Data.first->Scalars, Mask); 3046 reorderOrder(Data.first->ReorderIndices, MaskOrder); 3047 if (Data.first->ReuseShuffleIndices.empty() && 3048 !Data.first->ReorderIndices.empty() && 3049 !Data.first->isAltShuffle()) { 3050 // Insert user node to the list to try to sink reordering deeper in 3051 // the graph. 3052 OrderedEntries.insert(Data.first); 3053 } 3054 } else { 3055 reorderOrder(Data.first->ReorderIndices, Mask); 3056 } 3057 } 3058 } 3059 } 3060 3061 void BoUpSLP::buildExternalUses( 3062 const ExtraValueToDebugLocsMap &ExternallyUsedValues) { 3063 // Collect the values that we need to extract from the tree. 3064 for (auto &TEPtr : VectorizableTree) { 3065 TreeEntry *Entry = TEPtr.get(); 3066 3067 // No need to handle users of gathered values. 3068 if (Entry->State == TreeEntry::NeedToGather) 3069 continue; 3070 3071 // For each lane: 3072 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 3073 Value *Scalar = Entry->Scalars[Lane]; 3074 int FoundLane = Entry->findLaneForValue(Scalar); 3075 3076 // Check if the scalar is externally used as an extra arg. 3077 auto ExtI = ExternallyUsedValues.find(Scalar); 3078 if (ExtI != ExternallyUsedValues.end()) { 3079 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " 3080 << Lane << " from " << *Scalar << ".\n"); 3081 ExternalUses.emplace_back(Scalar, nullptr, FoundLane); 3082 } 3083 for (User *U : Scalar->users()) { 3084 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 3085 3086 Instruction *UserInst = dyn_cast<Instruction>(U); 3087 if (!UserInst) 3088 continue; 3089 3090 if (isDeleted(UserInst)) 3091 continue; 3092 3093 // Skip in-tree scalars that become vectors 3094 if (TreeEntry *UseEntry = getTreeEntry(U)) { 3095 Value *UseScalar = UseEntry->Scalars[0]; 3096 // Some in-tree scalars will remain as scalar in vectorized 3097 // instructions. If that is the case, the one in Lane 0 will 3098 // be used. 3099 if (UseScalar != U || 3100 UseEntry->State == TreeEntry::ScatterVectorize || 3101 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 3102 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 3103 << ".\n"); 3104 assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state"); 3105 continue; 3106 } 3107 } 3108 3109 // Ignore users in the user ignore list. 3110 if (is_contained(UserIgnoreList, UserInst)) 3111 continue; 3112 3113 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " 3114 << Lane << " from " << *Scalar << ".\n"); 3115 ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane)); 3116 } 3117 } 3118 } 3119 } 3120 3121 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 3122 ArrayRef<Value *> UserIgnoreLst) { 3123 deleteTree(); 3124 UserIgnoreList = UserIgnoreLst; 3125 if (!allSameType(Roots)) 3126 return; 3127 buildTree_rec(Roots, 0, EdgeInfo()); 3128 } 3129 3130 namespace { 3131 /// Tracks the state we can represent the loads in the given sequence. 3132 enum class LoadsState { Gather, Vectorize, ScatterVectorize }; 3133 } // anonymous namespace 3134 3135 /// Checks if the given array of loads can be represented as a vectorized, 3136 /// scatter or just simple gather. 3137 static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0, 3138 const TargetTransformInfo &TTI, 3139 const DataLayout &DL, ScalarEvolution &SE, 3140 SmallVectorImpl<unsigned> &Order, 3141 SmallVectorImpl<Value *> &PointerOps) { 3142 // Check that a vectorized load would load the same memory as a scalar 3143 // load. For example, we don't want to vectorize loads that are smaller 3144 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 3145 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 3146 // from such a struct, we read/write packed bits disagreeing with the 3147 // unvectorized version. 3148 Type *ScalarTy = VL0->getType(); 3149 3150 if (DL.getTypeSizeInBits(ScalarTy) != DL.getTypeAllocSizeInBits(ScalarTy)) 3151 return LoadsState::Gather; 3152 3153 // Make sure all loads in the bundle are simple - we can't vectorize 3154 // atomic or volatile loads. 3155 PointerOps.clear(); 3156 PointerOps.resize(VL.size()); 3157 auto *POIter = PointerOps.begin(); 3158 for (Value *V : VL) { 3159 auto *L = cast<LoadInst>(V); 3160 if (!L->isSimple()) 3161 return LoadsState::Gather; 3162 *POIter = L->getPointerOperand(); 3163 ++POIter; 3164 } 3165 3166 Order.clear(); 3167 // Check the order of pointer operands. 3168 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, DL, SE, Order)) { 3169 Value *Ptr0; 3170 Value *PtrN; 3171 if (Order.empty()) { 3172 Ptr0 = PointerOps.front(); 3173 PtrN = PointerOps.back(); 3174 } else { 3175 Ptr0 = PointerOps[Order.front()]; 3176 PtrN = PointerOps[Order.back()]; 3177 } 3178 Optional<int> Diff = 3179 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE); 3180 // Check that the sorted loads are consecutive. 3181 if (static_cast<unsigned>(*Diff) == VL.size() - 1) 3182 return LoadsState::Vectorize; 3183 Align CommonAlignment = cast<LoadInst>(VL0)->getAlign(); 3184 for (Value *V : VL) 3185 CommonAlignment = 3186 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 3187 if (TTI.isLegalMaskedGather(FixedVectorType::get(ScalarTy, VL.size()), 3188 CommonAlignment)) 3189 return LoadsState::ScatterVectorize; 3190 } 3191 3192 return LoadsState::Gather; 3193 } 3194 3195 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 3196 const EdgeInfo &UserTreeIdx) { 3197 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 3198 3199 InstructionsState S = getSameOpcode(VL); 3200 if (Depth == RecursionMaxDepth) { 3201 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 3202 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3203 return; 3204 } 3205 3206 // Don't handle scalable vectors 3207 if (S.getOpcode() == Instruction::ExtractElement && 3208 isa<ScalableVectorType>( 3209 cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) { 3210 LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n"); 3211 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3212 return; 3213 } 3214 3215 // Don't handle vectors. 3216 if (S.OpValue->getType()->isVectorTy() && 3217 !isa<InsertElementInst>(S.OpValue)) { 3218 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 3219 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3220 return; 3221 } 3222 3223 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 3224 if (SI->getValueOperand()->getType()->isVectorTy()) { 3225 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 3226 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3227 return; 3228 } 3229 3230 // If all of the operands are identical or constant we have a simple solution. 3231 // If we deal with insert/extract instructions, they all must have constant 3232 // indices, otherwise we should gather them, not try to vectorize. 3233 if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.getOpcode() || 3234 (isa<InsertElementInst, ExtractValueInst, ExtractElementInst>(S.MainOp) && 3235 !all_of(VL, isVectorLikeInstWithConstOps))) { 3236 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 3237 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3238 return; 3239 } 3240 3241 // We now know that this is a vector of instructions of the same type from 3242 // the same block. 3243 3244 // Don't vectorize ephemeral values. 3245 for (Value *V : VL) { 3246 if (EphValues.count(V)) { 3247 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 3248 << ") is ephemeral.\n"); 3249 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3250 return; 3251 } 3252 } 3253 3254 // Check if this is a duplicate of another entry. 3255 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 3256 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n"); 3257 if (!E->isSame(VL)) { 3258 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 3259 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3260 return; 3261 } 3262 // Record the reuse of the tree node. FIXME, currently this is only used to 3263 // properly draw the graph rather than for the actual vectorization. 3264 E->UserTreeIndices.push_back(UserTreeIdx); 3265 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue 3266 << ".\n"); 3267 return; 3268 } 3269 3270 // Check that none of the instructions in the bundle are already in the tree. 3271 for (Value *V : VL) { 3272 auto *I = dyn_cast<Instruction>(V); 3273 if (!I) 3274 continue; 3275 if (getTreeEntry(I)) { 3276 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 3277 << ") is already in tree.\n"); 3278 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3279 return; 3280 } 3281 } 3282 3283 // If any of the scalars is marked as a value that needs to stay scalar, then 3284 // we need to gather the scalars. 3285 // The reduction nodes (stored in UserIgnoreList) also should stay scalar. 3286 for (Value *V : VL) { 3287 if (MustGather.count(V) || is_contained(UserIgnoreList, V)) { 3288 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 3289 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3290 return; 3291 } 3292 } 3293 3294 // Check that all of the users of the scalars that we want to vectorize are 3295 // schedulable. 3296 auto *VL0 = cast<Instruction>(S.OpValue); 3297 BasicBlock *BB = VL0->getParent(); 3298 3299 if (!DT->isReachableFromEntry(BB)) { 3300 // Don't go into unreachable blocks. They may contain instructions with 3301 // dependency cycles which confuse the final scheduling. 3302 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 3303 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3304 return; 3305 } 3306 3307 // Check that every instruction appears once in this bundle. 3308 SmallVector<int> ReuseShuffleIndicies; 3309 SmallVector<Value *, 4> UniqueValues; 3310 DenseMap<Value *, unsigned> UniquePositions; 3311 for (Value *V : VL) { 3312 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 3313 ReuseShuffleIndicies.emplace_back(Res.first->second); 3314 if (Res.second) 3315 UniqueValues.emplace_back(V); 3316 } 3317 size_t NumUniqueScalarValues = UniqueValues.size(); 3318 if (NumUniqueScalarValues == VL.size()) { 3319 ReuseShuffleIndicies.clear(); 3320 } else { 3321 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n"); 3322 if (NumUniqueScalarValues <= 1 || 3323 !llvm::isPowerOf2_32(NumUniqueScalarValues)) { 3324 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 3325 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3326 return; 3327 } 3328 VL = UniqueValues; 3329 } 3330 3331 auto &BSRef = BlocksSchedules[BB]; 3332 if (!BSRef) 3333 BSRef = std::make_unique<BlockScheduling>(BB); 3334 3335 BlockScheduling &BS = *BSRef.get(); 3336 3337 Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S); 3338 if (!Bundle) { 3339 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 3340 assert((!BS.getScheduleData(VL0) || 3341 !BS.getScheduleData(VL0)->isPartOfBundle()) && 3342 "tryScheduleBundle should cancelScheduling on failure"); 3343 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3344 ReuseShuffleIndicies); 3345 return; 3346 } 3347 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 3348 3349 unsigned ShuffleOrOp = S.isAltShuffle() ? 3350 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 3351 switch (ShuffleOrOp) { 3352 case Instruction::PHI: { 3353 auto *PH = cast<PHINode>(VL0); 3354 3355 // Check for terminator values (e.g. invoke). 3356 for (Value *V : VL) 3357 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { 3358 Instruction *Term = dyn_cast<Instruction>( 3359 cast<PHINode>(V)->getIncomingValueForBlock( 3360 PH->getIncomingBlock(I))); 3361 if (Term && Term->isTerminator()) { 3362 LLVM_DEBUG(dbgs() 3363 << "SLP: Need to swizzle PHINodes (terminator use).\n"); 3364 BS.cancelScheduling(VL, VL0); 3365 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3366 ReuseShuffleIndicies); 3367 return; 3368 } 3369 } 3370 3371 TreeEntry *TE = 3372 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies); 3373 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 3374 3375 // Keeps the reordered operands to avoid code duplication. 3376 SmallVector<ValueList, 2> OperandsVec; 3377 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { 3378 if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) { 3379 ValueList Operands(VL.size(), PoisonValue::get(PH->getType())); 3380 TE->setOperand(I, Operands); 3381 OperandsVec.push_back(Operands); 3382 continue; 3383 } 3384 ValueList Operands; 3385 // Prepare the operand vector. 3386 for (Value *V : VL) 3387 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock( 3388 PH->getIncomingBlock(I))); 3389 TE->setOperand(I, Operands); 3390 OperandsVec.push_back(Operands); 3391 } 3392 for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx) 3393 buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx}); 3394 return; 3395 } 3396 case Instruction::ExtractValue: 3397 case Instruction::ExtractElement: { 3398 OrdersType CurrentOrder; 3399 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder); 3400 if (Reuse) { 3401 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n"); 3402 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3403 ReuseShuffleIndicies); 3404 // This is a special case, as it does not gather, but at the same time 3405 // we are not extending buildTree_rec() towards the operands. 3406 ValueList Op0; 3407 Op0.assign(VL.size(), VL0->getOperand(0)); 3408 VectorizableTree.back()->setOperand(0, Op0); 3409 return; 3410 } 3411 if (!CurrentOrder.empty()) { 3412 LLVM_DEBUG({ 3413 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " 3414 "with order"; 3415 for (unsigned Idx : CurrentOrder) 3416 dbgs() << " " << Idx; 3417 dbgs() << "\n"; 3418 }); 3419 fixupOrderingIndices(CurrentOrder); 3420 // Insert new order with initial value 0, if it does not exist, 3421 // otherwise return the iterator to the existing one. 3422 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3423 ReuseShuffleIndicies, CurrentOrder); 3424 // This is a special case, as it does not gather, but at the same time 3425 // we are not extending buildTree_rec() towards the operands. 3426 ValueList Op0; 3427 Op0.assign(VL.size(), VL0->getOperand(0)); 3428 VectorizableTree.back()->setOperand(0, Op0); 3429 return; 3430 } 3431 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n"); 3432 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3433 ReuseShuffleIndicies); 3434 BS.cancelScheduling(VL, VL0); 3435 return; 3436 } 3437 case Instruction::InsertElement: { 3438 assert(ReuseShuffleIndicies.empty() && "All inserts should be unique"); 3439 3440 // Check that we have a buildvector and not a shuffle of 2 or more 3441 // different vectors. 3442 ValueSet SourceVectors; 3443 int MinIdx = std::numeric_limits<int>::max(); 3444 for (Value *V : VL) { 3445 SourceVectors.insert(cast<Instruction>(V)->getOperand(0)); 3446 Optional<int> Idx = *getInsertIndex(V, 0); 3447 if (!Idx || *Idx == UndefMaskElem) 3448 continue; 3449 MinIdx = std::min(MinIdx, *Idx); 3450 } 3451 3452 if (count_if(VL, [&SourceVectors](Value *V) { 3453 return !SourceVectors.contains(V); 3454 }) >= 2) { 3455 // Found 2nd source vector - cancel. 3456 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with " 3457 "different source vectors.\n"); 3458 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3459 BS.cancelScheduling(VL, VL0); 3460 return; 3461 } 3462 3463 auto OrdCompare = [](const std::pair<int, int> &P1, 3464 const std::pair<int, int> &P2) { 3465 return P1.first > P2.first; 3466 }; 3467 PriorityQueue<std::pair<int, int>, SmallVector<std::pair<int, int>>, 3468 decltype(OrdCompare)> 3469 Indices(OrdCompare); 3470 for (int I = 0, E = VL.size(); I < E; ++I) { 3471 Optional<int> Idx = *getInsertIndex(VL[I], 0); 3472 if (!Idx || *Idx == UndefMaskElem) 3473 continue; 3474 Indices.emplace(*Idx, I); 3475 } 3476 OrdersType CurrentOrder(VL.size(), VL.size()); 3477 bool IsIdentity = true; 3478 for (int I = 0, E = VL.size(); I < E; ++I) { 3479 CurrentOrder[Indices.top().second] = I; 3480 IsIdentity &= Indices.top().second == I; 3481 Indices.pop(); 3482 } 3483 if (IsIdentity) 3484 CurrentOrder.clear(); 3485 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3486 None, CurrentOrder); 3487 LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n"); 3488 3489 constexpr int NumOps = 2; 3490 ValueList VectorOperands[NumOps]; 3491 for (int I = 0; I < NumOps; ++I) { 3492 for (Value *V : VL) 3493 VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I)); 3494 3495 TE->setOperand(I, VectorOperands[I]); 3496 } 3497 buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, NumOps - 1}); 3498 return; 3499 } 3500 case Instruction::Load: { 3501 // Check that a vectorized load would load the same memory as a scalar 3502 // load. For example, we don't want to vectorize loads that are smaller 3503 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 3504 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 3505 // from such a struct, we read/write packed bits disagreeing with the 3506 // unvectorized version. 3507 SmallVector<Value *> PointerOps; 3508 OrdersType CurrentOrder; 3509 TreeEntry *TE = nullptr; 3510 switch (canVectorizeLoads(VL, VL0, *TTI, *DL, *SE, CurrentOrder, 3511 PointerOps)) { 3512 case LoadsState::Vectorize: 3513 if (CurrentOrder.empty()) { 3514 // Original loads are consecutive and does not require reordering. 3515 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3516 ReuseShuffleIndicies); 3517 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 3518 } else { 3519 fixupOrderingIndices(CurrentOrder); 3520 // Need to reorder. 3521 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3522 ReuseShuffleIndicies, CurrentOrder); 3523 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n"); 3524 } 3525 TE->setOperandsInOrder(); 3526 break; 3527 case LoadsState::ScatterVectorize: 3528 // Vectorizing non-consecutive loads with `llvm.masked.gather`. 3529 TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S, 3530 UserTreeIdx, ReuseShuffleIndicies); 3531 TE->setOperandsInOrder(); 3532 buildTree_rec(PointerOps, Depth + 1, {TE, 0}); 3533 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n"); 3534 break; 3535 case LoadsState::Gather: 3536 BS.cancelScheduling(VL, VL0); 3537 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3538 ReuseShuffleIndicies); 3539 #ifndef NDEBUG 3540 Type *ScalarTy = VL0->getType(); 3541 if (DL->getTypeSizeInBits(ScalarTy) != 3542 DL->getTypeAllocSizeInBits(ScalarTy)) 3543 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 3544 else if (any_of(VL, [](Value *V) { 3545 return !cast<LoadInst>(V)->isSimple(); 3546 })) 3547 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 3548 else 3549 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 3550 #endif // NDEBUG 3551 break; 3552 } 3553 return; 3554 } 3555 case Instruction::ZExt: 3556 case Instruction::SExt: 3557 case Instruction::FPToUI: 3558 case Instruction::FPToSI: 3559 case Instruction::FPExt: 3560 case Instruction::PtrToInt: 3561 case Instruction::IntToPtr: 3562 case Instruction::SIToFP: 3563 case Instruction::UIToFP: 3564 case Instruction::Trunc: 3565 case Instruction::FPTrunc: 3566 case Instruction::BitCast: { 3567 Type *SrcTy = VL0->getOperand(0)->getType(); 3568 for (Value *V : VL) { 3569 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType(); 3570 if (Ty != SrcTy || !isValidElementType(Ty)) { 3571 BS.cancelScheduling(VL, VL0); 3572 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3573 ReuseShuffleIndicies); 3574 LLVM_DEBUG(dbgs() 3575 << "SLP: Gathering casts with different src types.\n"); 3576 return; 3577 } 3578 } 3579 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3580 ReuseShuffleIndicies); 3581 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 3582 3583 TE->setOperandsInOrder(); 3584 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 3585 ValueList Operands; 3586 // Prepare the operand vector. 3587 for (Value *V : VL) 3588 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3589 3590 buildTree_rec(Operands, Depth + 1, {TE, i}); 3591 } 3592 return; 3593 } 3594 case Instruction::ICmp: 3595 case Instruction::FCmp: { 3596 // Check that all of the compares have the same predicate. 3597 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 3598 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0); 3599 Type *ComparedTy = VL0->getOperand(0)->getType(); 3600 for (Value *V : VL) { 3601 CmpInst *Cmp = cast<CmpInst>(V); 3602 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) || 3603 Cmp->getOperand(0)->getType() != ComparedTy) { 3604 BS.cancelScheduling(VL, VL0); 3605 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3606 ReuseShuffleIndicies); 3607 LLVM_DEBUG(dbgs() 3608 << "SLP: Gathering cmp with different predicate.\n"); 3609 return; 3610 } 3611 } 3612 3613 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3614 ReuseShuffleIndicies); 3615 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 3616 3617 ValueList Left, Right; 3618 if (cast<CmpInst>(VL0)->isCommutative()) { 3619 // Commutative predicate - collect + sort operands of the instructions 3620 // so that each side is more likely to have the same opcode. 3621 assert(P0 == SwapP0 && "Commutative Predicate mismatch"); 3622 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 3623 } else { 3624 // Collect operands - commute if it uses the swapped predicate. 3625 for (Value *V : VL) { 3626 auto *Cmp = cast<CmpInst>(V); 3627 Value *LHS = Cmp->getOperand(0); 3628 Value *RHS = Cmp->getOperand(1); 3629 if (Cmp->getPredicate() != P0) 3630 std::swap(LHS, RHS); 3631 Left.push_back(LHS); 3632 Right.push_back(RHS); 3633 } 3634 } 3635 TE->setOperand(0, Left); 3636 TE->setOperand(1, Right); 3637 buildTree_rec(Left, Depth + 1, {TE, 0}); 3638 buildTree_rec(Right, Depth + 1, {TE, 1}); 3639 return; 3640 } 3641 case Instruction::Select: 3642 case Instruction::FNeg: 3643 case Instruction::Add: 3644 case Instruction::FAdd: 3645 case Instruction::Sub: 3646 case Instruction::FSub: 3647 case Instruction::Mul: 3648 case Instruction::FMul: 3649 case Instruction::UDiv: 3650 case Instruction::SDiv: 3651 case Instruction::FDiv: 3652 case Instruction::URem: 3653 case Instruction::SRem: 3654 case Instruction::FRem: 3655 case Instruction::Shl: 3656 case Instruction::LShr: 3657 case Instruction::AShr: 3658 case Instruction::And: 3659 case Instruction::Or: 3660 case Instruction::Xor: { 3661 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3662 ReuseShuffleIndicies); 3663 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n"); 3664 3665 // Sort operands of the instructions so that each side is more likely to 3666 // have the same opcode. 3667 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 3668 ValueList Left, Right; 3669 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 3670 TE->setOperand(0, Left); 3671 TE->setOperand(1, Right); 3672 buildTree_rec(Left, Depth + 1, {TE, 0}); 3673 buildTree_rec(Right, Depth + 1, {TE, 1}); 3674 return; 3675 } 3676 3677 TE->setOperandsInOrder(); 3678 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 3679 ValueList Operands; 3680 // Prepare the operand vector. 3681 for (Value *V : VL) 3682 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3683 3684 buildTree_rec(Operands, Depth + 1, {TE, i}); 3685 } 3686 return; 3687 } 3688 case Instruction::GetElementPtr: { 3689 // We don't combine GEPs with complicated (nested) indexing. 3690 for (Value *V : VL) { 3691 if (cast<Instruction>(V)->getNumOperands() != 2) { 3692 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 3693 BS.cancelScheduling(VL, VL0); 3694 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3695 ReuseShuffleIndicies); 3696 return; 3697 } 3698 } 3699 3700 // We can't combine several GEPs into one vector if they operate on 3701 // different types. 3702 Type *Ty0 = VL0->getOperand(0)->getType(); 3703 for (Value *V : VL) { 3704 Type *CurTy = cast<Instruction>(V)->getOperand(0)->getType(); 3705 if (Ty0 != CurTy) { 3706 LLVM_DEBUG(dbgs() 3707 << "SLP: not-vectorizable GEP (different types).\n"); 3708 BS.cancelScheduling(VL, VL0); 3709 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3710 ReuseShuffleIndicies); 3711 return; 3712 } 3713 } 3714 3715 // We don't combine GEPs with non-constant indexes. 3716 Type *Ty1 = VL0->getOperand(1)->getType(); 3717 for (Value *V : VL) { 3718 auto Op = cast<Instruction>(V)->getOperand(1); 3719 if (!isa<ConstantInt>(Op) || 3720 (Op->getType() != Ty1 && 3721 Op->getType()->getScalarSizeInBits() > 3722 DL->getIndexSizeInBits( 3723 V->getType()->getPointerAddressSpace()))) { 3724 LLVM_DEBUG(dbgs() 3725 << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 3726 BS.cancelScheduling(VL, VL0); 3727 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3728 ReuseShuffleIndicies); 3729 return; 3730 } 3731 } 3732 3733 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3734 ReuseShuffleIndicies); 3735 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 3736 TE->setOperandsInOrder(); 3737 for (unsigned i = 0, e = 2; i < e; ++i) { 3738 ValueList Operands; 3739 // Prepare the operand vector. 3740 for (Value *V : VL) 3741 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3742 3743 buildTree_rec(Operands, Depth + 1, {TE, i}); 3744 } 3745 return; 3746 } 3747 case Instruction::Store: { 3748 // Check if the stores are consecutive or if we need to swizzle them. 3749 llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType(); 3750 // Avoid types that are padded when being allocated as scalars, while 3751 // being packed together in a vector (such as i1). 3752 if (DL->getTypeSizeInBits(ScalarTy) != 3753 DL->getTypeAllocSizeInBits(ScalarTy)) { 3754 BS.cancelScheduling(VL, VL0); 3755 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3756 ReuseShuffleIndicies); 3757 LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n"); 3758 return; 3759 } 3760 // Make sure all stores in the bundle are simple - we can't vectorize 3761 // atomic or volatile stores. 3762 SmallVector<Value *, 4> PointerOps(VL.size()); 3763 ValueList Operands(VL.size()); 3764 auto POIter = PointerOps.begin(); 3765 auto OIter = Operands.begin(); 3766 for (Value *V : VL) { 3767 auto *SI = cast<StoreInst>(V); 3768 if (!SI->isSimple()) { 3769 BS.cancelScheduling(VL, VL0); 3770 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3771 ReuseShuffleIndicies); 3772 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n"); 3773 return; 3774 } 3775 *POIter = SI->getPointerOperand(); 3776 *OIter = SI->getValueOperand(); 3777 ++POIter; 3778 ++OIter; 3779 } 3780 3781 OrdersType CurrentOrder; 3782 // Check the order of pointer operands. 3783 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) { 3784 Value *Ptr0; 3785 Value *PtrN; 3786 if (CurrentOrder.empty()) { 3787 Ptr0 = PointerOps.front(); 3788 PtrN = PointerOps.back(); 3789 } else { 3790 Ptr0 = PointerOps[CurrentOrder.front()]; 3791 PtrN = PointerOps[CurrentOrder.back()]; 3792 } 3793 Optional<int> Dist = 3794 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE); 3795 // Check that the sorted pointer operands are consecutive. 3796 if (static_cast<unsigned>(*Dist) == VL.size() - 1) { 3797 if (CurrentOrder.empty()) { 3798 // Original stores are consecutive and does not require reordering. 3799 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, 3800 UserTreeIdx, ReuseShuffleIndicies); 3801 TE->setOperandsInOrder(); 3802 buildTree_rec(Operands, Depth + 1, {TE, 0}); 3803 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 3804 } else { 3805 fixupOrderingIndices(CurrentOrder); 3806 TreeEntry *TE = 3807 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3808 ReuseShuffleIndicies, CurrentOrder); 3809 TE->setOperandsInOrder(); 3810 buildTree_rec(Operands, Depth + 1, {TE, 0}); 3811 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n"); 3812 } 3813 return; 3814 } 3815 } 3816 3817 BS.cancelScheduling(VL, VL0); 3818 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3819 ReuseShuffleIndicies); 3820 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 3821 return; 3822 } 3823 case Instruction::Call: { 3824 // Check if the calls are all to the same vectorizable intrinsic or 3825 // library function. 3826 CallInst *CI = cast<CallInst>(VL0); 3827 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3828 3829 VFShape Shape = VFShape::get( 3830 *CI, ElementCount::getFixed(static_cast<unsigned int>(VL.size())), 3831 false /*HasGlobalPred*/); 3832 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3833 3834 if (!VecFunc && !isTriviallyVectorizable(ID)) { 3835 BS.cancelScheduling(VL, VL0); 3836 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3837 ReuseShuffleIndicies); 3838 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 3839 return; 3840 } 3841 Function *F = CI->getCalledFunction(); 3842 unsigned NumArgs = CI->getNumArgOperands(); 3843 SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr); 3844 for (unsigned j = 0; j != NumArgs; ++j) 3845 if (hasVectorInstrinsicScalarOpd(ID, j)) 3846 ScalarArgs[j] = CI->getArgOperand(j); 3847 for (Value *V : VL) { 3848 CallInst *CI2 = dyn_cast<CallInst>(V); 3849 if (!CI2 || CI2->getCalledFunction() != F || 3850 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 3851 (VecFunc && 3852 VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) || 3853 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 3854 BS.cancelScheduling(VL, VL0); 3855 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3856 ReuseShuffleIndicies); 3857 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V 3858 << "\n"); 3859 return; 3860 } 3861 // Some intrinsics have scalar arguments and should be same in order for 3862 // them to be vectorized. 3863 for (unsigned j = 0; j != NumArgs; ++j) { 3864 if (hasVectorInstrinsicScalarOpd(ID, j)) { 3865 Value *A1J = CI2->getArgOperand(j); 3866 if (ScalarArgs[j] != A1J) { 3867 BS.cancelScheduling(VL, VL0); 3868 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3869 ReuseShuffleIndicies); 3870 LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 3871 << " argument " << ScalarArgs[j] << "!=" << A1J 3872 << "\n"); 3873 return; 3874 } 3875 } 3876 } 3877 // Verify that the bundle operands are identical between the two calls. 3878 if (CI->hasOperandBundles() && 3879 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 3880 CI->op_begin() + CI->getBundleOperandsEndIndex(), 3881 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 3882 BS.cancelScheduling(VL, VL0); 3883 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3884 ReuseShuffleIndicies); 3885 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" 3886 << *CI << "!=" << *V << '\n'); 3887 return; 3888 } 3889 } 3890 3891 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3892 ReuseShuffleIndicies); 3893 TE->setOperandsInOrder(); 3894 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 3895 ValueList Operands; 3896 // Prepare the operand vector. 3897 for (Value *V : VL) { 3898 auto *CI2 = cast<CallInst>(V); 3899 Operands.push_back(CI2->getArgOperand(i)); 3900 } 3901 buildTree_rec(Operands, Depth + 1, {TE, i}); 3902 } 3903 return; 3904 } 3905 case Instruction::ShuffleVector: { 3906 // If this is not an alternate sequence of opcode like add-sub 3907 // then do not vectorize this instruction. 3908 if (!S.isAltShuffle()) { 3909 BS.cancelScheduling(VL, VL0); 3910 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3911 ReuseShuffleIndicies); 3912 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 3913 return; 3914 } 3915 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3916 ReuseShuffleIndicies); 3917 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 3918 3919 // Reorder operands if reordering would enable vectorization. 3920 if (isa<BinaryOperator>(VL0)) { 3921 ValueList Left, Right; 3922 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 3923 TE->setOperand(0, Left); 3924 TE->setOperand(1, Right); 3925 buildTree_rec(Left, Depth + 1, {TE, 0}); 3926 buildTree_rec(Right, Depth + 1, {TE, 1}); 3927 return; 3928 } 3929 3930 TE->setOperandsInOrder(); 3931 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 3932 ValueList Operands; 3933 // Prepare the operand vector. 3934 for (Value *V : VL) 3935 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3936 3937 buildTree_rec(Operands, Depth + 1, {TE, i}); 3938 } 3939 return; 3940 } 3941 default: 3942 BS.cancelScheduling(VL, VL0); 3943 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3944 ReuseShuffleIndicies); 3945 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 3946 return; 3947 } 3948 } 3949 3950 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { 3951 unsigned N = 1; 3952 Type *EltTy = T; 3953 3954 while (isa<StructType>(EltTy) || isa<ArrayType>(EltTy) || 3955 isa<VectorType>(EltTy)) { 3956 if (auto *ST = dyn_cast<StructType>(EltTy)) { 3957 // Check that struct is homogeneous. 3958 for (const auto *Ty : ST->elements()) 3959 if (Ty != *ST->element_begin()) 3960 return 0; 3961 N *= ST->getNumElements(); 3962 EltTy = *ST->element_begin(); 3963 } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) { 3964 N *= AT->getNumElements(); 3965 EltTy = AT->getElementType(); 3966 } else { 3967 auto *VT = cast<FixedVectorType>(EltTy); 3968 N *= VT->getNumElements(); 3969 EltTy = VT->getElementType(); 3970 } 3971 } 3972 3973 if (!isValidElementType(EltTy)) 3974 return 0; 3975 uint64_t VTSize = DL.getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N)); 3976 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) 3977 return 0; 3978 return N; 3979 } 3980 3981 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 3982 SmallVectorImpl<unsigned> &CurrentOrder) const { 3983 Instruction *E0 = cast<Instruction>(OpValue); 3984 assert(E0->getOpcode() == Instruction::ExtractElement || 3985 E0->getOpcode() == Instruction::ExtractValue); 3986 assert(E0->getOpcode() == getSameOpcode(VL).getOpcode() && "Invalid opcode"); 3987 // Check if all of the extracts come from the same vector and from the 3988 // correct offset. 3989 Value *Vec = E0->getOperand(0); 3990 3991 CurrentOrder.clear(); 3992 3993 // We have to extract from a vector/aggregate with the same number of elements. 3994 unsigned NElts; 3995 if (E0->getOpcode() == Instruction::ExtractValue) { 3996 const DataLayout &DL = E0->getModule()->getDataLayout(); 3997 NElts = canMapToVector(Vec->getType(), DL); 3998 if (!NElts) 3999 return false; 4000 // Check if load can be rewritten as load of vector. 4001 LoadInst *LI = dyn_cast<LoadInst>(Vec); 4002 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 4003 return false; 4004 } else { 4005 NElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); 4006 } 4007 4008 if (NElts != VL.size()) 4009 return false; 4010 4011 // Check that all of the indices extract from the correct offset. 4012 bool ShouldKeepOrder = true; 4013 unsigned E = VL.size(); 4014 // Assign to all items the initial value E + 1 so we can check if the extract 4015 // instruction index was used already. 4016 // Also, later we can check that all the indices are used and we have a 4017 // consecutive access in the extract instructions, by checking that no 4018 // element of CurrentOrder still has value E + 1. 4019 CurrentOrder.assign(E, E + 1); 4020 unsigned I = 0; 4021 for (; I < E; ++I) { 4022 auto *Inst = cast<Instruction>(VL[I]); 4023 if (Inst->getOperand(0) != Vec) 4024 break; 4025 Optional<unsigned> Idx = getExtractIndex(Inst); 4026 if (!Idx) 4027 break; 4028 const unsigned ExtIdx = *Idx; 4029 if (ExtIdx != I) { 4030 if (ExtIdx >= E || CurrentOrder[ExtIdx] != E + 1) 4031 break; 4032 ShouldKeepOrder = false; 4033 CurrentOrder[ExtIdx] = I; 4034 } else { 4035 if (CurrentOrder[I] != E + 1) 4036 break; 4037 CurrentOrder[I] = I; 4038 } 4039 } 4040 if (I < E) { 4041 CurrentOrder.clear(); 4042 return false; 4043 } 4044 4045 return ShouldKeepOrder; 4046 } 4047 4048 bool BoUpSLP::areAllUsersVectorized(Instruction *I, 4049 ArrayRef<Value *> VectorizedVals) const { 4050 return (I->hasOneUse() && is_contained(VectorizedVals, I)) || 4051 llvm::all_of(I->users(), [this](User *U) { 4052 return ScalarToTreeEntry.count(U) > 0; 4053 }); 4054 } 4055 4056 static std::pair<InstructionCost, InstructionCost> 4057 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy, 4058 TargetTransformInfo *TTI, TargetLibraryInfo *TLI) { 4059 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4060 4061 // Calculate the cost of the scalar and vector calls. 4062 SmallVector<Type *, 4> VecTys; 4063 for (Use &Arg : CI->args()) 4064 VecTys.push_back( 4065 FixedVectorType::get(Arg->getType(), VecTy->getNumElements())); 4066 FastMathFlags FMF; 4067 if (auto *FPCI = dyn_cast<FPMathOperator>(CI)) 4068 FMF = FPCI->getFastMathFlags(); 4069 SmallVector<const Value *> Arguments(CI->args()); 4070 IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF, 4071 dyn_cast<IntrinsicInst>(CI)); 4072 auto IntrinsicCost = 4073 TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput); 4074 4075 auto Shape = VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>( 4076 VecTy->getNumElements())), 4077 false /*HasGlobalPred*/); 4078 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 4079 auto LibCost = IntrinsicCost; 4080 if (!CI->isNoBuiltin() && VecFunc) { 4081 // Calculate the cost of the vector library call. 4082 // If the corresponding vector call is cheaper, return its cost. 4083 LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys, 4084 TTI::TCK_RecipThroughput); 4085 } 4086 return {IntrinsicCost, LibCost}; 4087 } 4088 4089 /// Compute the cost of creating a vector of type \p VecTy containing the 4090 /// extracted values from \p VL. 4091 static InstructionCost 4092 computeExtractCost(ArrayRef<Value *> VL, FixedVectorType *VecTy, 4093 TargetTransformInfo::ShuffleKind ShuffleKind, 4094 ArrayRef<int> Mask, TargetTransformInfo &TTI) { 4095 unsigned NumOfParts = TTI.getNumberOfParts(VecTy); 4096 4097 if (ShuffleKind != TargetTransformInfo::SK_PermuteSingleSrc || !NumOfParts || 4098 VecTy->getNumElements() < NumOfParts) 4099 return TTI.getShuffleCost(ShuffleKind, VecTy, Mask); 4100 4101 bool AllConsecutive = true; 4102 unsigned EltsPerVector = VecTy->getNumElements() / NumOfParts; 4103 unsigned Idx = -1; 4104 InstructionCost Cost = 0; 4105 4106 // Process extracts in blocks of EltsPerVector to check if the source vector 4107 // operand can be re-used directly. If not, add the cost of creating a shuffle 4108 // to extract the values into a vector register. 4109 for (auto *V : VL) { 4110 ++Idx; 4111 4112 // Reached the start of a new vector registers. 4113 if (Idx % EltsPerVector == 0) { 4114 AllConsecutive = true; 4115 continue; 4116 } 4117 4118 // Check all extracts for a vector register on the target directly 4119 // extract values in order. 4120 unsigned CurrentIdx = *getExtractIndex(cast<Instruction>(V)); 4121 unsigned PrevIdx = *getExtractIndex(cast<Instruction>(VL[Idx - 1])); 4122 AllConsecutive &= PrevIdx + 1 == CurrentIdx && 4123 CurrentIdx % EltsPerVector == Idx % EltsPerVector; 4124 4125 if (AllConsecutive) 4126 continue; 4127 4128 // Skip all indices, except for the last index per vector block. 4129 if ((Idx + 1) % EltsPerVector != 0 && Idx + 1 != VL.size()) 4130 continue; 4131 4132 // If we have a series of extracts which are not consecutive and hence 4133 // cannot re-use the source vector register directly, compute the shuffle 4134 // cost to extract the a vector with EltsPerVector elements. 4135 Cost += TTI.getShuffleCost( 4136 TargetTransformInfo::SK_PermuteSingleSrc, 4137 FixedVectorType::get(VecTy->getElementType(), EltsPerVector)); 4138 } 4139 return Cost; 4140 } 4141 4142 /// Build shuffle mask for shuffle graph entries and lists of main and alternate 4143 /// operations operands. 4144 static void 4145 buildSuffleEntryMask(ArrayRef<Value *> VL, ArrayRef<unsigned> ReorderIndices, 4146 ArrayRef<int> ReusesIndices, 4147 const function_ref<bool(Instruction *)> IsAltOp, 4148 SmallVectorImpl<int> &Mask, 4149 SmallVectorImpl<Value *> *OpScalars = nullptr, 4150 SmallVectorImpl<Value *> *AltScalars = nullptr) { 4151 unsigned Sz = VL.size(); 4152 Mask.assign(Sz, UndefMaskElem); 4153 SmallVector<int> OrderMask; 4154 if (!ReorderIndices.empty()) 4155 inversePermutation(ReorderIndices, OrderMask); 4156 for (unsigned I = 0; I < Sz; ++I) { 4157 unsigned Idx = I; 4158 if (!ReorderIndices.empty()) 4159 Idx = OrderMask[I]; 4160 auto *OpInst = cast<Instruction>(VL[Idx]); 4161 if (IsAltOp(OpInst)) { 4162 Mask[I] = Sz + Idx; 4163 if (AltScalars) 4164 AltScalars->push_back(OpInst); 4165 } else { 4166 Mask[I] = Idx; 4167 if (OpScalars) 4168 OpScalars->push_back(OpInst); 4169 } 4170 } 4171 if (!ReusesIndices.empty()) { 4172 SmallVector<int> NewMask(ReusesIndices.size(), UndefMaskElem); 4173 transform(ReusesIndices, NewMask.begin(), [&Mask](int Idx) { 4174 return Idx != UndefMaskElem ? Mask[Idx] : UndefMaskElem; 4175 }); 4176 Mask.swap(NewMask); 4177 } 4178 } 4179 4180 InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E, 4181 ArrayRef<Value *> VectorizedVals) { 4182 ArrayRef<Value*> VL = E->Scalars; 4183 4184 Type *ScalarTy = VL[0]->getType(); 4185 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 4186 ScalarTy = SI->getValueOperand()->getType(); 4187 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0])) 4188 ScalarTy = CI->getOperand(0)->getType(); 4189 else if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 4190 ScalarTy = IE->getOperand(1)->getType(); 4191 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 4192 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 4193 4194 // If we have computed a smaller type for the expression, update VecTy so 4195 // that the costs will be accurate. 4196 if (MinBWs.count(VL[0])) 4197 VecTy = FixedVectorType::get( 4198 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size()); 4199 auto *FinalVecTy = VecTy; 4200 4201 unsigned ReuseShuffleNumbers = E->ReuseShuffleIndices.size(); 4202 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 4203 if (NeedToShuffleReuses) 4204 FinalVecTy = 4205 FixedVectorType::get(VecTy->getElementType(), ReuseShuffleNumbers); 4206 // FIXME: it tries to fix a problem with MSVC buildbots. 4207 TargetTransformInfo &TTIRef = *TTI; 4208 auto &&AdjustExtractsCost = [this, &TTIRef, CostKind, VL, VecTy, 4209 VectorizedVals](InstructionCost &Cost, 4210 bool IsGather) { 4211 DenseMap<Value *, int> ExtractVectorsTys; 4212 for (auto *V : VL) { 4213 // If all users of instruction are going to be vectorized and this 4214 // instruction itself is not going to be vectorized, consider this 4215 // instruction as dead and remove its cost from the final cost of the 4216 // vectorized tree. 4217 if (!areAllUsersVectorized(cast<Instruction>(V), VectorizedVals) || 4218 (IsGather && ScalarToTreeEntry.count(V))) 4219 continue; 4220 auto *EE = cast<ExtractElementInst>(V); 4221 unsigned Idx = *getExtractIndex(EE); 4222 if (TTIRef.getNumberOfParts(VecTy) != 4223 TTIRef.getNumberOfParts(EE->getVectorOperandType())) { 4224 auto It = 4225 ExtractVectorsTys.try_emplace(EE->getVectorOperand(), Idx).first; 4226 It->getSecond() = std::min<int>(It->second, Idx); 4227 } 4228 // Take credit for instruction that will become dead. 4229 if (EE->hasOneUse()) { 4230 Instruction *Ext = EE->user_back(); 4231 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 4232 all_of(Ext->users(), 4233 [](User *U) { return isa<GetElementPtrInst>(U); })) { 4234 // Use getExtractWithExtendCost() to calculate the cost of 4235 // extractelement/ext pair. 4236 Cost -= 4237 TTIRef.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(), 4238 EE->getVectorOperandType(), Idx); 4239 // Add back the cost of s|zext which is subtracted separately. 4240 Cost += TTIRef.getCastInstrCost( 4241 Ext->getOpcode(), Ext->getType(), EE->getType(), 4242 TTI::getCastContextHint(Ext), CostKind, Ext); 4243 continue; 4244 } 4245 } 4246 Cost -= TTIRef.getVectorInstrCost(Instruction::ExtractElement, 4247 EE->getVectorOperandType(), Idx); 4248 } 4249 // Add a cost for subvector extracts/inserts if required. 4250 for (const auto &Data : ExtractVectorsTys) { 4251 auto *EEVTy = cast<FixedVectorType>(Data.first->getType()); 4252 unsigned NumElts = VecTy->getNumElements(); 4253 if (TTIRef.getNumberOfParts(EEVTy) > TTIRef.getNumberOfParts(VecTy)) { 4254 unsigned Idx = (Data.second / NumElts) * NumElts; 4255 unsigned EENumElts = EEVTy->getNumElements(); 4256 if (Idx + NumElts <= EENumElts) { 4257 Cost += 4258 TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 4259 EEVTy, None, Idx, VecTy); 4260 } else { 4261 // Need to round up the subvector type vectorization factor to avoid a 4262 // crash in cost model functions. Make SubVT so that Idx + VF of SubVT 4263 // <= EENumElts. 4264 auto *SubVT = 4265 FixedVectorType::get(VecTy->getElementType(), EENumElts - Idx); 4266 Cost += 4267 TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 4268 EEVTy, None, Idx, SubVT); 4269 } 4270 } else { 4271 Cost += TTIRef.getShuffleCost(TargetTransformInfo::SK_InsertSubvector, 4272 VecTy, None, 0, EEVTy); 4273 } 4274 } 4275 }; 4276 if (E->State == TreeEntry::NeedToGather) { 4277 if (allConstant(VL)) 4278 return 0; 4279 if (isa<InsertElementInst>(VL[0])) 4280 return InstructionCost::getInvalid(); 4281 SmallVector<int> Mask; 4282 SmallVector<const TreeEntry *> Entries; 4283 Optional<TargetTransformInfo::ShuffleKind> Shuffle = 4284 isGatherShuffledEntry(E, Mask, Entries); 4285 if (Shuffle.hasValue()) { 4286 InstructionCost GatherCost = 0; 4287 if (ShuffleVectorInst::isIdentityMask(Mask)) { 4288 // Perfect match in the graph, will reuse the previously vectorized 4289 // node. Cost is 0. 4290 LLVM_DEBUG( 4291 dbgs() 4292 << "SLP: perfect diamond match for gather bundle that starts with " 4293 << *VL.front() << ".\n"); 4294 if (NeedToShuffleReuses) 4295 GatherCost = 4296 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, 4297 FinalVecTy, E->ReuseShuffleIndices); 4298 } else { 4299 LLVM_DEBUG(dbgs() << "SLP: shuffled " << Entries.size() 4300 << " entries for bundle that starts with " 4301 << *VL.front() << ".\n"); 4302 // Detected that instead of gather we can emit a shuffle of single/two 4303 // previously vectorized nodes. Add the cost of the permutation rather 4304 // than gather. 4305 ::addMask(Mask, E->ReuseShuffleIndices); 4306 GatherCost = TTI->getShuffleCost(*Shuffle, FinalVecTy, Mask); 4307 } 4308 return GatherCost; 4309 } 4310 if (isSplat(VL)) { 4311 // Found the broadcasting of the single scalar, calculate the cost as the 4312 // broadcast. 4313 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy); 4314 } 4315 if (E->getOpcode() == Instruction::ExtractElement && allSameType(VL) && 4316 allSameBlock(VL) && 4317 !isa<ScalableVectorType>( 4318 cast<ExtractElementInst>(E->getMainOp())->getVectorOperandType())) { 4319 // Check that gather of extractelements can be represented as just a 4320 // shuffle of a single/two vectors the scalars are extracted from. 4321 SmallVector<int> Mask; 4322 Optional<TargetTransformInfo::ShuffleKind> ShuffleKind = 4323 isShuffle(VL, Mask); 4324 if (ShuffleKind.hasValue()) { 4325 // Found the bunch of extractelement instructions that must be gathered 4326 // into a vector and can be represented as a permutation elements in a 4327 // single input vector or of 2 input vectors. 4328 InstructionCost Cost = 4329 computeExtractCost(VL, VecTy, *ShuffleKind, Mask, *TTI); 4330 AdjustExtractsCost(Cost, /*IsGather=*/true); 4331 if (NeedToShuffleReuses) 4332 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, 4333 FinalVecTy, E->ReuseShuffleIndices); 4334 return Cost; 4335 } 4336 } 4337 InstructionCost ReuseShuffleCost = 0; 4338 if (NeedToShuffleReuses) 4339 ReuseShuffleCost = TTI->getShuffleCost( 4340 TTI::SK_PermuteSingleSrc, FinalVecTy, E->ReuseShuffleIndices); 4341 // Improve gather cost for gather of loads, if we can group some of the 4342 // loads into vector loads. 4343 if (VL.size() > 2 && E->getOpcode() == Instruction::Load && 4344 !E->isAltShuffle()) { 4345 BoUpSLP::ValueSet VectorizedLoads; 4346 unsigned StartIdx = 0; 4347 unsigned VF = VL.size() / 2; 4348 unsigned VectorizedCnt = 0; 4349 unsigned ScatterVectorizeCnt = 0; 4350 const unsigned Sz = DL->getTypeSizeInBits(E->getMainOp()->getType()); 4351 for (unsigned MinVF = getMinVF(2 * Sz); VF >= MinVF; VF /= 2) { 4352 for (unsigned Cnt = StartIdx, End = VL.size(); Cnt + VF <= End; 4353 Cnt += VF) { 4354 ArrayRef<Value *> Slice = VL.slice(Cnt, VF); 4355 if (!VectorizedLoads.count(Slice.front()) && 4356 !VectorizedLoads.count(Slice.back()) && allSameBlock(Slice)) { 4357 SmallVector<Value *> PointerOps; 4358 OrdersType CurrentOrder; 4359 LoadsState LS = canVectorizeLoads(Slice, Slice.front(), *TTI, *DL, 4360 *SE, CurrentOrder, PointerOps); 4361 switch (LS) { 4362 case LoadsState::Vectorize: 4363 case LoadsState::ScatterVectorize: 4364 // Mark the vectorized loads so that we don't vectorize them 4365 // again. 4366 if (LS == LoadsState::Vectorize) 4367 ++VectorizedCnt; 4368 else 4369 ++ScatterVectorizeCnt; 4370 VectorizedLoads.insert(Slice.begin(), Slice.end()); 4371 // If we vectorized initial block, no need to try to vectorize it 4372 // again. 4373 if (Cnt == StartIdx) 4374 StartIdx += VF; 4375 break; 4376 case LoadsState::Gather: 4377 break; 4378 } 4379 } 4380 } 4381 // Check if the whole array was vectorized already - exit. 4382 if (StartIdx >= VL.size()) 4383 break; 4384 // Found vectorizable parts - exit. 4385 if (!VectorizedLoads.empty()) 4386 break; 4387 } 4388 if (!VectorizedLoads.empty()) { 4389 InstructionCost GatherCost = 0; 4390 // Get the cost for gathered loads. 4391 for (unsigned I = 0, End = VL.size(); I < End; I += VF) { 4392 if (VectorizedLoads.contains(VL[I])) 4393 continue; 4394 GatherCost += getGatherCost(VL.slice(I, VF)); 4395 } 4396 // The cost for vectorized loads. 4397 InstructionCost ScalarsCost = 0; 4398 for (Value *V : VectorizedLoads) { 4399 auto *LI = cast<LoadInst>(V); 4400 ScalarsCost += TTI->getMemoryOpCost( 4401 Instruction::Load, LI->getType(), LI->getAlign(), 4402 LI->getPointerAddressSpace(), CostKind, LI); 4403 } 4404 auto *LI = cast<LoadInst>(E->getMainOp()); 4405 auto *LoadTy = FixedVectorType::get(LI->getType(), VF); 4406 Align Alignment = LI->getAlign(); 4407 GatherCost += 4408 VectorizedCnt * 4409 TTI->getMemoryOpCost(Instruction::Load, LoadTy, Alignment, 4410 LI->getPointerAddressSpace(), CostKind, LI); 4411 GatherCost += ScatterVectorizeCnt * 4412 TTI->getGatherScatterOpCost( 4413 Instruction::Load, LoadTy, LI->getPointerOperand(), 4414 /*VariableMask=*/false, Alignment, CostKind, LI); 4415 // Add the cost for the subvectors shuffling. 4416 GatherCost += ((VL.size() - VF) / VF) * 4417 TTI->getShuffleCost(TTI::SK_Select, VecTy); 4418 return ReuseShuffleCost + GatherCost - ScalarsCost; 4419 } 4420 } 4421 return ReuseShuffleCost + getGatherCost(VL); 4422 } 4423 InstructionCost CommonCost = 0; 4424 SmallVector<int> Mask; 4425 if (!E->ReorderIndices.empty()) { 4426 SmallVector<int> NewMask; 4427 if (E->getOpcode() == Instruction::Store) { 4428 // For stores the order is actually a mask. 4429 NewMask.resize(E->ReorderIndices.size()); 4430 copy(E->ReorderIndices, NewMask.begin()); 4431 } else { 4432 inversePermutation(E->ReorderIndices, NewMask); 4433 } 4434 ::addMask(Mask, NewMask); 4435 } 4436 if (NeedToShuffleReuses) 4437 ::addMask(Mask, E->ReuseShuffleIndices); 4438 if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask)) 4439 CommonCost = 4440 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask); 4441 assert((E->State == TreeEntry::Vectorize || 4442 E->State == TreeEntry::ScatterVectorize) && 4443 "Unhandled state"); 4444 assert(E->getOpcode() && allSameType(VL) && allSameBlock(VL) && "Invalid VL"); 4445 Instruction *VL0 = E->getMainOp(); 4446 unsigned ShuffleOrOp = 4447 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 4448 switch (ShuffleOrOp) { 4449 case Instruction::PHI: 4450 return 0; 4451 4452 case Instruction::ExtractValue: 4453 case Instruction::ExtractElement: { 4454 // The common cost of removal ExtractElement/ExtractValue instructions + 4455 // the cost of shuffles, if required to resuffle the original vector. 4456 if (NeedToShuffleReuses) { 4457 unsigned Idx = 0; 4458 for (unsigned I : E->ReuseShuffleIndices) { 4459 if (ShuffleOrOp == Instruction::ExtractElement) { 4460 auto *EE = cast<ExtractElementInst>(VL[I]); 4461 CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement, 4462 EE->getVectorOperandType(), 4463 *getExtractIndex(EE)); 4464 } else { 4465 CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement, 4466 VecTy, Idx); 4467 ++Idx; 4468 } 4469 } 4470 Idx = ReuseShuffleNumbers; 4471 for (Value *V : VL) { 4472 if (ShuffleOrOp == Instruction::ExtractElement) { 4473 auto *EE = cast<ExtractElementInst>(V); 4474 CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement, 4475 EE->getVectorOperandType(), 4476 *getExtractIndex(EE)); 4477 } else { 4478 --Idx; 4479 CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement, 4480 VecTy, Idx); 4481 } 4482 } 4483 } 4484 if (ShuffleOrOp == Instruction::ExtractValue) { 4485 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 4486 auto *EI = cast<Instruction>(VL[I]); 4487 // Take credit for instruction that will become dead. 4488 if (EI->hasOneUse()) { 4489 Instruction *Ext = EI->user_back(); 4490 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 4491 all_of(Ext->users(), 4492 [](User *U) { return isa<GetElementPtrInst>(U); })) { 4493 // Use getExtractWithExtendCost() to calculate the cost of 4494 // extractelement/ext pair. 4495 CommonCost -= TTI->getExtractWithExtendCost( 4496 Ext->getOpcode(), Ext->getType(), VecTy, I); 4497 // Add back the cost of s|zext which is subtracted separately. 4498 CommonCost += TTI->getCastInstrCost( 4499 Ext->getOpcode(), Ext->getType(), EI->getType(), 4500 TTI::getCastContextHint(Ext), CostKind, Ext); 4501 continue; 4502 } 4503 } 4504 CommonCost -= 4505 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, I); 4506 } 4507 } else { 4508 AdjustExtractsCost(CommonCost, /*IsGather=*/false); 4509 } 4510 return CommonCost; 4511 } 4512 case Instruction::InsertElement: { 4513 assert(E->ReuseShuffleIndices.empty() && 4514 "Unique insertelements only are expected."); 4515 auto *SrcVecTy = cast<FixedVectorType>(VL0->getType()); 4516 4517 unsigned const NumElts = SrcVecTy->getNumElements(); 4518 unsigned const NumScalars = VL.size(); 4519 APInt DemandedElts = APInt::getZero(NumElts); 4520 // TODO: Add support for Instruction::InsertValue. 4521 SmallVector<int> Mask; 4522 if (!E->ReorderIndices.empty()) { 4523 inversePermutation(E->ReorderIndices, Mask); 4524 Mask.append(NumElts - NumScalars, UndefMaskElem); 4525 } else { 4526 Mask.assign(NumElts, UndefMaskElem); 4527 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); 4528 } 4529 unsigned Offset = *getInsertIndex(VL0, 0); 4530 bool IsIdentity = true; 4531 SmallVector<int> PrevMask(NumElts, UndefMaskElem); 4532 Mask.swap(PrevMask); 4533 for (unsigned I = 0; I < NumScalars; ++I) { 4534 Optional<int> InsertIdx = getInsertIndex(VL[PrevMask[I]], 0); 4535 if (!InsertIdx || *InsertIdx == UndefMaskElem) 4536 continue; 4537 DemandedElts.setBit(*InsertIdx); 4538 IsIdentity &= *InsertIdx - Offset == I; 4539 Mask[*InsertIdx - Offset] = I; 4540 } 4541 assert(Offset < NumElts && "Failed to find vector index offset"); 4542 4543 InstructionCost Cost = 0; 4544 Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts, 4545 /*Insert*/ true, /*Extract*/ false); 4546 4547 if (IsIdentity && NumElts != NumScalars && Offset % NumScalars != 0) { 4548 // FIXME: Replace with SK_InsertSubvector once it is properly supported. 4549 unsigned Sz = PowerOf2Ceil(Offset + NumScalars); 4550 Cost += TTI->getShuffleCost( 4551 TargetTransformInfo::SK_PermuteSingleSrc, 4552 FixedVectorType::get(SrcVecTy->getElementType(), Sz)); 4553 } else if (!IsIdentity) { 4554 auto *FirstInsert = 4555 cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 4556 return !is_contained(E->Scalars, 4557 cast<Instruction>(V)->getOperand(0)); 4558 })); 4559 if (isa<UndefValue>(FirstInsert->getOperand(0))) { 4560 Cost += TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, SrcVecTy, Mask); 4561 } else { 4562 SmallVector<int> InsertMask(NumElts); 4563 std::iota(InsertMask.begin(), InsertMask.end(), 0); 4564 for (unsigned I = 0; I < NumElts; I++) { 4565 if (Mask[I] != UndefMaskElem) 4566 InsertMask[Offset + I] = NumElts + I; 4567 } 4568 Cost += 4569 TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVecTy, InsertMask); 4570 } 4571 } 4572 4573 return Cost; 4574 } 4575 case Instruction::ZExt: 4576 case Instruction::SExt: 4577 case Instruction::FPToUI: 4578 case Instruction::FPToSI: 4579 case Instruction::FPExt: 4580 case Instruction::PtrToInt: 4581 case Instruction::IntToPtr: 4582 case Instruction::SIToFP: 4583 case Instruction::UIToFP: 4584 case Instruction::Trunc: 4585 case Instruction::FPTrunc: 4586 case Instruction::BitCast: { 4587 Type *SrcTy = VL0->getOperand(0)->getType(); 4588 InstructionCost ScalarEltCost = 4589 TTI->getCastInstrCost(E->getOpcode(), ScalarTy, SrcTy, 4590 TTI::getCastContextHint(VL0), CostKind, VL0); 4591 if (NeedToShuffleReuses) { 4592 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4593 } 4594 4595 // Calculate the cost of this instruction. 4596 InstructionCost ScalarCost = VL.size() * ScalarEltCost; 4597 4598 auto *SrcVecTy = FixedVectorType::get(SrcTy, VL.size()); 4599 InstructionCost VecCost = 0; 4600 // Check if the values are candidates to demote. 4601 if (!MinBWs.count(VL0) || VecTy != SrcVecTy) { 4602 VecCost = CommonCost + TTI->getCastInstrCost( 4603 E->getOpcode(), VecTy, SrcVecTy, 4604 TTI::getCastContextHint(VL0), CostKind, VL0); 4605 } 4606 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 4607 return VecCost - ScalarCost; 4608 } 4609 case Instruction::FCmp: 4610 case Instruction::ICmp: 4611 case Instruction::Select: { 4612 // Calculate the cost of this instruction. 4613 InstructionCost ScalarEltCost = 4614 TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, Builder.getInt1Ty(), 4615 CmpInst::BAD_ICMP_PREDICATE, CostKind, VL0); 4616 if (NeedToShuffleReuses) { 4617 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4618 } 4619 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); 4620 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 4621 4622 // Check if all entries in VL are either compares or selects with compares 4623 // as condition that have the same predicates. 4624 CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE; 4625 bool First = true; 4626 for (auto *V : VL) { 4627 CmpInst::Predicate CurrentPred; 4628 auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value()); 4629 if ((!match(V, m_Select(MatchCmp, m_Value(), m_Value())) && 4630 !match(V, MatchCmp)) || 4631 (!First && VecPred != CurrentPred)) { 4632 VecPred = CmpInst::BAD_ICMP_PREDICATE; 4633 break; 4634 } 4635 First = false; 4636 VecPred = CurrentPred; 4637 } 4638 4639 InstructionCost VecCost = TTI->getCmpSelInstrCost( 4640 E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0); 4641 // Check if it is possible and profitable to use min/max for selects in 4642 // VL. 4643 // 4644 auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL); 4645 if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) { 4646 IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy, 4647 {VecTy, VecTy}); 4648 InstructionCost IntrinsicCost = 4649 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 4650 // If the selects are the only uses of the compares, they will be dead 4651 // and we can adjust the cost by removing their cost. 4652 if (IntrinsicAndUse.second) 4653 IntrinsicCost -= 4654 TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy, MaskTy, 4655 CmpInst::BAD_ICMP_PREDICATE, CostKind); 4656 VecCost = std::min(VecCost, IntrinsicCost); 4657 } 4658 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 4659 return CommonCost + VecCost - ScalarCost; 4660 } 4661 case Instruction::FNeg: 4662 case Instruction::Add: 4663 case Instruction::FAdd: 4664 case Instruction::Sub: 4665 case Instruction::FSub: 4666 case Instruction::Mul: 4667 case Instruction::FMul: 4668 case Instruction::UDiv: 4669 case Instruction::SDiv: 4670 case Instruction::FDiv: 4671 case Instruction::URem: 4672 case Instruction::SRem: 4673 case Instruction::FRem: 4674 case Instruction::Shl: 4675 case Instruction::LShr: 4676 case Instruction::AShr: 4677 case Instruction::And: 4678 case Instruction::Or: 4679 case Instruction::Xor: { 4680 // Certain instructions can be cheaper to vectorize if they have a 4681 // constant second vector operand. 4682 TargetTransformInfo::OperandValueKind Op1VK = 4683 TargetTransformInfo::OK_AnyValue; 4684 TargetTransformInfo::OperandValueKind Op2VK = 4685 TargetTransformInfo::OK_UniformConstantValue; 4686 TargetTransformInfo::OperandValueProperties Op1VP = 4687 TargetTransformInfo::OP_None; 4688 TargetTransformInfo::OperandValueProperties Op2VP = 4689 TargetTransformInfo::OP_PowerOf2; 4690 4691 // If all operands are exactly the same ConstantInt then set the 4692 // operand kind to OK_UniformConstantValue. 4693 // If instead not all operands are constants, then set the operand kind 4694 // to OK_AnyValue. If all operands are constants but not the same, 4695 // then set the operand kind to OK_NonUniformConstantValue. 4696 ConstantInt *CInt0 = nullptr; 4697 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 4698 const Instruction *I = cast<Instruction>(VL[i]); 4699 unsigned OpIdx = isa<BinaryOperator>(I) ? 1 : 0; 4700 ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(OpIdx)); 4701 if (!CInt) { 4702 Op2VK = TargetTransformInfo::OK_AnyValue; 4703 Op2VP = TargetTransformInfo::OP_None; 4704 break; 4705 } 4706 if (Op2VP == TargetTransformInfo::OP_PowerOf2 && 4707 !CInt->getValue().isPowerOf2()) 4708 Op2VP = TargetTransformInfo::OP_None; 4709 if (i == 0) { 4710 CInt0 = CInt; 4711 continue; 4712 } 4713 if (CInt0 != CInt) 4714 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 4715 } 4716 4717 SmallVector<const Value *, 4> Operands(VL0->operand_values()); 4718 InstructionCost ScalarEltCost = 4719 TTI->getArithmeticInstrCost(E->getOpcode(), ScalarTy, CostKind, Op1VK, 4720 Op2VK, Op1VP, Op2VP, Operands, VL0); 4721 if (NeedToShuffleReuses) { 4722 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4723 } 4724 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 4725 InstructionCost VecCost = 4726 TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind, Op1VK, 4727 Op2VK, Op1VP, Op2VP, Operands, VL0); 4728 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 4729 return CommonCost + VecCost - ScalarCost; 4730 } 4731 case Instruction::GetElementPtr: { 4732 TargetTransformInfo::OperandValueKind Op1VK = 4733 TargetTransformInfo::OK_AnyValue; 4734 TargetTransformInfo::OperandValueKind Op2VK = 4735 TargetTransformInfo::OK_UniformConstantValue; 4736 4737 InstructionCost ScalarEltCost = TTI->getArithmeticInstrCost( 4738 Instruction::Add, ScalarTy, CostKind, Op1VK, Op2VK); 4739 if (NeedToShuffleReuses) { 4740 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4741 } 4742 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 4743 InstructionCost VecCost = TTI->getArithmeticInstrCost( 4744 Instruction::Add, VecTy, CostKind, Op1VK, Op2VK); 4745 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 4746 return CommonCost + VecCost - ScalarCost; 4747 } 4748 case Instruction::Load: { 4749 // Cost of wide load - cost of scalar loads. 4750 Align Alignment = cast<LoadInst>(VL0)->getAlign(); 4751 InstructionCost ScalarEltCost = TTI->getMemoryOpCost( 4752 Instruction::Load, ScalarTy, Alignment, 0, CostKind, VL0); 4753 if (NeedToShuffleReuses) { 4754 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4755 } 4756 InstructionCost ScalarLdCost = VecTy->getNumElements() * ScalarEltCost; 4757 InstructionCost VecLdCost; 4758 if (E->State == TreeEntry::Vectorize) { 4759 VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, Alignment, 0, 4760 CostKind, VL0); 4761 } else { 4762 assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState"); 4763 Align CommonAlignment = Alignment; 4764 for (Value *V : VL) 4765 CommonAlignment = 4766 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 4767 VecLdCost = TTI->getGatherScatterOpCost( 4768 Instruction::Load, VecTy, cast<LoadInst>(VL0)->getPointerOperand(), 4769 /*VariableMask=*/false, CommonAlignment, CostKind, VL0); 4770 } 4771 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecLdCost, ScalarLdCost)); 4772 return CommonCost + VecLdCost - ScalarLdCost; 4773 } 4774 case Instruction::Store: { 4775 // We know that we can merge the stores. Calculate the cost. 4776 bool IsReorder = !E->ReorderIndices.empty(); 4777 auto *SI = 4778 cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0); 4779 Align Alignment = SI->getAlign(); 4780 InstructionCost ScalarEltCost = TTI->getMemoryOpCost( 4781 Instruction::Store, ScalarTy, Alignment, 0, CostKind, VL0); 4782 InstructionCost ScalarStCost = VecTy->getNumElements() * ScalarEltCost; 4783 InstructionCost VecStCost = TTI->getMemoryOpCost( 4784 Instruction::Store, VecTy, Alignment, 0, CostKind, VL0); 4785 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecStCost, ScalarStCost)); 4786 return CommonCost + VecStCost - ScalarStCost; 4787 } 4788 case Instruction::Call: { 4789 CallInst *CI = cast<CallInst>(VL0); 4790 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4791 4792 // Calculate the cost of the scalar and vector calls. 4793 IntrinsicCostAttributes CostAttrs(ID, *CI, 1); 4794 InstructionCost ScalarEltCost = 4795 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 4796 if (NeedToShuffleReuses) { 4797 CommonCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4798 } 4799 InstructionCost ScalarCallCost = VecTy->getNumElements() * ScalarEltCost; 4800 4801 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 4802 InstructionCost VecCallCost = 4803 std::min(VecCallCosts.first, VecCallCosts.second); 4804 4805 LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost 4806 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 4807 << " for " << *CI << "\n"); 4808 4809 return CommonCost + VecCallCost - ScalarCallCost; 4810 } 4811 case Instruction::ShuffleVector: { 4812 assert(E->isAltShuffle() && 4813 ((Instruction::isBinaryOp(E->getOpcode()) && 4814 Instruction::isBinaryOp(E->getAltOpcode())) || 4815 (Instruction::isCast(E->getOpcode()) && 4816 Instruction::isCast(E->getAltOpcode()))) && 4817 "Invalid Shuffle Vector Operand"); 4818 InstructionCost ScalarCost = 0; 4819 if (NeedToShuffleReuses) { 4820 for (unsigned Idx : E->ReuseShuffleIndices) { 4821 Instruction *I = cast<Instruction>(VL[Idx]); 4822 CommonCost -= TTI->getInstructionCost(I, CostKind); 4823 } 4824 for (Value *V : VL) { 4825 Instruction *I = cast<Instruction>(V); 4826 CommonCost += TTI->getInstructionCost(I, CostKind); 4827 } 4828 } 4829 for (Value *V : VL) { 4830 Instruction *I = cast<Instruction>(V); 4831 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 4832 ScalarCost += TTI->getInstructionCost(I, CostKind); 4833 } 4834 // VecCost is equal to sum of the cost of creating 2 vectors 4835 // and the cost of creating shuffle. 4836 InstructionCost VecCost = 0; 4837 if (Instruction::isBinaryOp(E->getOpcode())) { 4838 VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind); 4839 VecCost += TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy, 4840 CostKind); 4841 } else { 4842 Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType(); 4843 Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType(); 4844 auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size()); 4845 auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size()); 4846 VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty, 4847 TTI::CastContextHint::None, CostKind); 4848 VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty, 4849 TTI::CastContextHint::None, CostKind); 4850 } 4851 4852 SmallVector<int> Mask; 4853 buildSuffleEntryMask( 4854 E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices, 4855 [E](Instruction *I) { 4856 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 4857 return I->getOpcode() == E->getAltOpcode(); 4858 }, 4859 Mask); 4860 CommonCost = 4861 TTI->getShuffleCost(TargetTransformInfo::SK_Select, FinalVecTy, Mask); 4862 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 4863 return CommonCost + VecCost - ScalarCost; 4864 } 4865 default: 4866 llvm_unreachable("Unknown instruction"); 4867 } 4868 } 4869 4870 bool BoUpSLP::isFullyVectorizableTinyTree() const { 4871 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height " 4872 << VectorizableTree.size() << " is fully vectorizable .\n"); 4873 4874 // We only handle trees of heights 1 and 2. 4875 if (VectorizableTree.size() == 1 && 4876 VectorizableTree[0]->State == TreeEntry::Vectorize) 4877 return true; 4878 4879 if (VectorizableTree.size() != 2) 4880 return false; 4881 4882 // Handle splat and all-constants stores. Also try to vectorize tiny trees 4883 // with the second gather nodes if they have less scalar operands rather than 4884 // the initial tree element (may be profitable to shuffle the second gather) 4885 // or they are extractelements, which form shuffle. 4886 SmallVector<int> Mask; 4887 if (VectorizableTree[0]->State == TreeEntry::Vectorize && 4888 (allConstant(VectorizableTree[1]->Scalars) || 4889 isSplat(VectorizableTree[1]->Scalars) || 4890 (VectorizableTree[1]->State == TreeEntry::NeedToGather && 4891 VectorizableTree[1]->Scalars.size() < 4892 VectorizableTree[0]->Scalars.size()) || 4893 (VectorizableTree[1]->State == TreeEntry::NeedToGather && 4894 VectorizableTree[1]->getOpcode() == Instruction::ExtractElement && 4895 isShuffle(VectorizableTree[1]->Scalars, Mask)))) 4896 return true; 4897 4898 // Gathering cost would be too much for tiny trees. 4899 if (VectorizableTree[0]->State == TreeEntry::NeedToGather || 4900 VectorizableTree[1]->State == TreeEntry::NeedToGather) 4901 return false; 4902 4903 return true; 4904 } 4905 4906 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts, 4907 TargetTransformInfo *TTI, 4908 bool MustMatchOrInst) { 4909 // Look past the root to find a source value. Arbitrarily follow the 4910 // path through operand 0 of any 'or'. Also, peek through optional 4911 // shift-left-by-multiple-of-8-bits. 4912 Value *ZextLoad = Root; 4913 const APInt *ShAmtC; 4914 bool FoundOr = false; 4915 while (!isa<ConstantExpr>(ZextLoad) && 4916 (match(ZextLoad, m_Or(m_Value(), m_Value())) || 4917 (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) && 4918 ShAmtC->urem(8) == 0))) { 4919 auto *BinOp = cast<BinaryOperator>(ZextLoad); 4920 ZextLoad = BinOp->getOperand(0); 4921 if (BinOp->getOpcode() == Instruction::Or) 4922 FoundOr = true; 4923 } 4924 // Check if the input is an extended load of the required or/shift expression. 4925 Value *LoadPtr; 4926 if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root || 4927 !match(ZextLoad, m_ZExt(m_Load(m_Value(LoadPtr))))) 4928 return false; 4929 4930 // Require that the total load bit width is a legal integer type. 4931 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target. 4932 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it. 4933 Type *SrcTy = LoadPtr->getType()->getPointerElementType(); 4934 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts; 4935 if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth))) 4936 return false; 4937 4938 // Everything matched - assume that we can fold the whole sequence using 4939 // load combining. 4940 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at " 4941 << *(cast<Instruction>(Root)) << "\n"); 4942 4943 return true; 4944 } 4945 4946 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const { 4947 if (RdxKind != RecurKind::Or) 4948 return false; 4949 4950 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 4951 Value *FirstReduced = VectorizableTree[0]->Scalars[0]; 4952 return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI, 4953 /* MatchOr */ false); 4954 } 4955 4956 bool BoUpSLP::isLoadCombineCandidate() const { 4957 // Peek through a final sequence of stores and check if all operations are 4958 // likely to be load-combined. 4959 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 4960 for (Value *Scalar : VectorizableTree[0]->Scalars) { 4961 Value *X; 4962 if (!match(Scalar, m_Store(m_Value(X), m_Value())) || 4963 !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true)) 4964 return false; 4965 } 4966 return true; 4967 } 4968 4969 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() const { 4970 // No need to vectorize inserts of gathered values. 4971 if (VectorizableTree.size() == 2 && 4972 isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) && 4973 VectorizableTree[1]->State == TreeEntry::NeedToGather) 4974 return true; 4975 4976 // We can vectorize the tree if its size is greater than or equal to the 4977 // minimum size specified by the MinTreeSize command line option. 4978 if (VectorizableTree.size() >= MinTreeSize) 4979 return false; 4980 4981 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 4982 // can vectorize it if we can prove it fully vectorizable. 4983 if (isFullyVectorizableTinyTree()) 4984 return false; 4985 4986 assert(VectorizableTree.empty() 4987 ? ExternalUses.empty() 4988 : true && "We shouldn't have any external users"); 4989 4990 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 4991 // vectorizable. 4992 return true; 4993 } 4994 4995 InstructionCost BoUpSLP::getSpillCost() const { 4996 // Walk from the bottom of the tree to the top, tracking which values are 4997 // live. When we see a call instruction that is not part of our tree, 4998 // query TTI to see if there is a cost to keeping values live over it 4999 // (for example, if spills and fills are required). 5000 unsigned BundleWidth = VectorizableTree.front()->Scalars.size(); 5001 InstructionCost Cost = 0; 5002 5003 SmallPtrSet<Instruction*, 4> LiveValues; 5004 Instruction *PrevInst = nullptr; 5005 5006 // The entries in VectorizableTree are not necessarily ordered by their 5007 // position in basic blocks. Collect them and order them by dominance so later 5008 // instructions are guaranteed to be visited first. For instructions in 5009 // different basic blocks, we only scan to the beginning of the block, so 5010 // their order does not matter, as long as all instructions in a basic block 5011 // are grouped together. Using dominance ensures a deterministic order. 5012 SmallVector<Instruction *, 16> OrderedScalars; 5013 for (const auto &TEPtr : VectorizableTree) { 5014 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]); 5015 if (!Inst) 5016 continue; 5017 OrderedScalars.push_back(Inst); 5018 } 5019 llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) { 5020 auto *NodeA = DT->getNode(A->getParent()); 5021 auto *NodeB = DT->getNode(B->getParent()); 5022 assert(NodeA && "Should only process reachable instructions"); 5023 assert(NodeB && "Should only process reachable instructions"); 5024 assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 5025 "Different nodes should have different DFS numbers"); 5026 if (NodeA != NodeB) 5027 return NodeA->getDFSNumIn() < NodeB->getDFSNumIn(); 5028 return B->comesBefore(A); 5029 }); 5030 5031 for (Instruction *Inst : OrderedScalars) { 5032 if (!PrevInst) { 5033 PrevInst = Inst; 5034 continue; 5035 } 5036 5037 // Update LiveValues. 5038 LiveValues.erase(PrevInst); 5039 for (auto &J : PrevInst->operands()) { 5040 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 5041 LiveValues.insert(cast<Instruction>(&*J)); 5042 } 5043 5044 LLVM_DEBUG({ 5045 dbgs() << "SLP: #LV: " << LiveValues.size(); 5046 for (auto *X : LiveValues) 5047 dbgs() << " " << X->getName(); 5048 dbgs() << ", Looking at "; 5049 Inst->dump(); 5050 }); 5051 5052 // Now find the sequence of instructions between PrevInst and Inst. 5053 unsigned NumCalls = 0; 5054 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 5055 PrevInstIt = 5056 PrevInst->getIterator().getReverse(); 5057 while (InstIt != PrevInstIt) { 5058 if (PrevInstIt == PrevInst->getParent()->rend()) { 5059 PrevInstIt = Inst->getParent()->rbegin(); 5060 continue; 5061 } 5062 5063 // Debug information does not impact spill cost. 5064 if ((isa<CallInst>(&*PrevInstIt) && 5065 !isa<DbgInfoIntrinsic>(&*PrevInstIt)) && 5066 &*PrevInstIt != PrevInst) 5067 NumCalls++; 5068 5069 ++PrevInstIt; 5070 } 5071 5072 if (NumCalls) { 5073 SmallVector<Type*, 4> V; 5074 for (auto *II : LiveValues) { 5075 auto *ScalarTy = II->getType(); 5076 if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy)) 5077 ScalarTy = VectorTy->getElementType(); 5078 V.push_back(FixedVectorType::get(ScalarTy, BundleWidth)); 5079 } 5080 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V); 5081 } 5082 5083 PrevInst = Inst; 5084 } 5085 5086 return Cost; 5087 } 5088 5089 InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) { 5090 InstructionCost Cost = 0; 5091 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size " 5092 << VectorizableTree.size() << ".\n"); 5093 5094 unsigned BundleWidth = VectorizableTree[0]->Scalars.size(); 5095 5096 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) { 5097 TreeEntry &TE = *VectorizableTree[I].get(); 5098 5099 InstructionCost C = getEntryCost(&TE, VectorizedVals); 5100 Cost += C; 5101 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 5102 << " for bundle that starts with " << *TE.Scalars[0] 5103 << ".\n" 5104 << "SLP: Current total cost = " << Cost << "\n"); 5105 } 5106 5107 SmallPtrSet<Value *, 16> ExtractCostCalculated; 5108 InstructionCost ExtractCost = 0; 5109 SmallVector<unsigned> VF; 5110 SmallVector<SmallVector<int>> ShuffleMask; 5111 SmallVector<Value *> FirstUsers; 5112 SmallVector<APInt> DemandedElts; 5113 for (ExternalUser &EU : ExternalUses) { 5114 // We only add extract cost once for the same scalar. 5115 if (!ExtractCostCalculated.insert(EU.Scalar).second) 5116 continue; 5117 5118 // Uses by ephemeral values are free (because the ephemeral value will be 5119 // removed prior to code generation, and so the extraction will be 5120 // removed as well). 5121 if (EphValues.count(EU.User)) 5122 continue; 5123 5124 // No extract cost for vector "scalar" 5125 if (isa<FixedVectorType>(EU.Scalar->getType())) 5126 continue; 5127 5128 // Already counted the cost for external uses when tried to adjust the cost 5129 // for extractelements, no need to add it again. 5130 if (isa<ExtractElementInst>(EU.Scalar)) 5131 continue; 5132 5133 // If found user is an insertelement, do not calculate extract cost but try 5134 // to detect it as a final shuffled/identity match. 5135 if (EU.User && isa<InsertElementInst>(EU.User)) { 5136 if (auto *FTy = dyn_cast<FixedVectorType>(EU.User->getType())) { 5137 Optional<int> InsertIdx = getInsertIndex(EU.User, 0); 5138 if (!InsertIdx || *InsertIdx == UndefMaskElem) 5139 continue; 5140 Value *VU = EU.User; 5141 auto *It = find_if(FirstUsers, [VU](Value *V) { 5142 // Checks if 2 insertelements are from the same buildvector. 5143 if (VU->getType() != V->getType()) 5144 return false; 5145 auto *IE1 = cast<InsertElementInst>(VU); 5146 auto *IE2 = cast<InsertElementInst>(V); 5147 // Go though of insertelement instructions trying to find either VU as 5148 // the original vector for IE2 or V as the original vector for IE1. 5149 do { 5150 if (IE1 == VU || IE2 == V) 5151 return true; 5152 if (IE1) 5153 IE1 = dyn_cast<InsertElementInst>(IE1->getOperand(0)); 5154 if (IE2) 5155 IE2 = dyn_cast<InsertElementInst>(IE2->getOperand(0)); 5156 } while (IE1 || IE2); 5157 return false; 5158 }); 5159 int VecId = -1; 5160 if (It == FirstUsers.end()) { 5161 VF.push_back(FTy->getNumElements()); 5162 ShuffleMask.emplace_back(VF.back(), UndefMaskElem); 5163 FirstUsers.push_back(EU.User); 5164 DemandedElts.push_back(APInt::getZero(VF.back())); 5165 VecId = FirstUsers.size() - 1; 5166 } else { 5167 VecId = std::distance(FirstUsers.begin(), It); 5168 } 5169 int Idx = *InsertIdx; 5170 ShuffleMask[VecId][Idx] = EU.Lane; 5171 DemandedElts[VecId].setBit(Idx); 5172 } 5173 } 5174 5175 // If we plan to rewrite the tree in a smaller type, we will need to sign 5176 // extend the extracted value back to the original type. Here, we account 5177 // for the extract and the added cost of the sign extend if needed. 5178 auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth); 5179 auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; 5180 if (MinBWs.count(ScalarRoot)) { 5181 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 5182 auto Extend = 5183 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; 5184 VecTy = FixedVectorType::get(MinTy, BundleWidth); 5185 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 5186 VecTy, EU.Lane); 5187 } else { 5188 ExtractCost += 5189 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); 5190 } 5191 } 5192 5193 InstructionCost SpillCost = getSpillCost(); 5194 Cost += SpillCost + ExtractCost; 5195 for (int I = 0, E = FirstUsers.size(); I < E; ++I) { 5196 // For the very first element - simple shuffle of the source vector. 5197 int Limit = ShuffleMask[I].size() * 2; 5198 if (I == 0 && 5199 all_of(ShuffleMask[I], [Limit](int Idx) { return Idx < Limit; }) && 5200 !ShuffleVectorInst::isIdentityMask(ShuffleMask[I])) { 5201 InstructionCost C = TTI->getShuffleCost( 5202 TTI::SK_PermuteSingleSrc, 5203 cast<FixedVectorType>(FirstUsers[I]->getType()), ShuffleMask[I]); 5204 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 5205 << " for final shuffle of insertelement external users " 5206 << *VectorizableTree.front()->Scalars.front() << ".\n" 5207 << "SLP: Current total cost = " << Cost << "\n"); 5208 Cost += C; 5209 continue; 5210 } 5211 // Other elements - permutation of 2 vectors (the initial one and the next 5212 // Ith incoming vector). 5213 unsigned VF = ShuffleMask[I].size(); 5214 for (unsigned Idx = 0; Idx < VF; ++Idx) { 5215 int &Mask = ShuffleMask[I][Idx]; 5216 Mask = Mask == UndefMaskElem ? Idx : VF + Mask; 5217 } 5218 InstructionCost C = TTI->getShuffleCost( 5219 TTI::SK_PermuteTwoSrc, cast<FixedVectorType>(FirstUsers[I]->getType()), 5220 ShuffleMask[I]); 5221 LLVM_DEBUG( 5222 dbgs() 5223 << "SLP: Adding cost " << C 5224 << " for final shuffle of vector node and external insertelement users " 5225 << *VectorizableTree.front()->Scalars.front() << ".\n" 5226 << "SLP: Current total cost = " << Cost << "\n"); 5227 Cost += C; 5228 InstructionCost InsertCost = TTI->getScalarizationOverhead( 5229 cast<FixedVectorType>(FirstUsers[I]->getType()), DemandedElts[I], 5230 /*Insert*/ true, 5231 /*Extract*/ false); 5232 Cost -= InsertCost; 5233 LLVM_DEBUG(dbgs() << "SLP: subtracting the cost " << InsertCost 5234 << " for insertelements gather.\n" 5235 << "SLP: Current total cost = " << Cost << "\n"); 5236 } 5237 5238 #ifndef NDEBUG 5239 SmallString<256> Str; 5240 { 5241 raw_svector_ostream OS(Str); 5242 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 5243 << "SLP: Extract Cost = " << ExtractCost << ".\n" 5244 << "SLP: Total Cost = " << Cost << ".\n"; 5245 } 5246 LLVM_DEBUG(dbgs() << Str); 5247 if (ViewSLPTree) 5248 ViewGraph(this, "SLP" + F->getName(), false, Str); 5249 #endif 5250 5251 return Cost; 5252 } 5253 5254 Optional<TargetTransformInfo::ShuffleKind> 5255 BoUpSLP::isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask, 5256 SmallVectorImpl<const TreeEntry *> &Entries) { 5257 // TODO: currently checking only for Scalars in the tree entry, need to count 5258 // reused elements too for better cost estimation. 5259 Mask.assign(TE->Scalars.size(), UndefMaskElem); 5260 Entries.clear(); 5261 // Build a lists of values to tree entries. 5262 DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>> ValueToTEs; 5263 for (const std::unique_ptr<TreeEntry> &EntryPtr : VectorizableTree) { 5264 if (EntryPtr.get() == TE) 5265 break; 5266 if (EntryPtr->State != TreeEntry::NeedToGather) 5267 continue; 5268 for (Value *V : EntryPtr->Scalars) 5269 ValueToTEs.try_emplace(V).first->getSecond().insert(EntryPtr.get()); 5270 } 5271 // Find all tree entries used by the gathered values. If no common entries 5272 // found - not a shuffle. 5273 // Here we build a set of tree nodes for each gathered value and trying to 5274 // find the intersection between these sets. If we have at least one common 5275 // tree node for each gathered value - we have just a permutation of the 5276 // single vector. If we have 2 different sets, we're in situation where we 5277 // have a permutation of 2 input vectors. 5278 SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs; 5279 DenseMap<Value *, int> UsedValuesEntry; 5280 for (Value *V : TE->Scalars) { 5281 if (isa<UndefValue>(V)) 5282 continue; 5283 // Build a list of tree entries where V is used. 5284 SmallPtrSet<const TreeEntry *, 4> VToTEs; 5285 auto It = ValueToTEs.find(V); 5286 if (It != ValueToTEs.end()) 5287 VToTEs = It->second; 5288 if (const TreeEntry *VTE = getTreeEntry(V)) 5289 VToTEs.insert(VTE); 5290 if (VToTEs.empty()) 5291 return None; 5292 if (UsedTEs.empty()) { 5293 // The first iteration, just insert the list of nodes to vector. 5294 UsedTEs.push_back(VToTEs); 5295 } else { 5296 // Need to check if there are any previously used tree nodes which use V. 5297 // If there are no such nodes, consider that we have another one input 5298 // vector. 5299 SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs); 5300 unsigned Idx = 0; 5301 for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) { 5302 // Do we have a non-empty intersection of previously listed tree entries 5303 // and tree entries using current V? 5304 set_intersect(VToTEs, Set); 5305 if (!VToTEs.empty()) { 5306 // Yes, write the new subset and continue analysis for the next 5307 // scalar. 5308 Set.swap(VToTEs); 5309 break; 5310 } 5311 VToTEs = SavedVToTEs; 5312 ++Idx; 5313 } 5314 // No non-empty intersection found - need to add a second set of possible 5315 // source vectors. 5316 if (Idx == UsedTEs.size()) { 5317 // If the number of input vectors is greater than 2 - not a permutation, 5318 // fallback to the regular gather. 5319 if (UsedTEs.size() == 2) 5320 return None; 5321 UsedTEs.push_back(SavedVToTEs); 5322 Idx = UsedTEs.size() - 1; 5323 } 5324 UsedValuesEntry.try_emplace(V, Idx); 5325 } 5326 } 5327 5328 unsigned VF = 0; 5329 if (UsedTEs.size() == 1) { 5330 // Try to find the perfect match in another gather node at first. 5331 auto It = find_if(UsedTEs.front(), [TE](const TreeEntry *EntryPtr) { 5332 return EntryPtr->isSame(TE->Scalars); 5333 }); 5334 if (It != UsedTEs.front().end()) { 5335 Entries.push_back(*It); 5336 std::iota(Mask.begin(), Mask.end(), 0); 5337 return TargetTransformInfo::SK_PermuteSingleSrc; 5338 } 5339 // No perfect match, just shuffle, so choose the first tree node. 5340 Entries.push_back(*UsedTEs.front().begin()); 5341 } else { 5342 // Try to find nodes with the same vector factor. 5343 assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries."); 5344 // FIXME: Shall be replaced by GetVF function once non-power-2 patch is 5345 // landed. 5346 auto &&GetVF = [](const TreeEntry *TE) { 5347 if (!TE->ReuseShuffleIndices.empty()) 5348 return TE->ReuseShuffleIndices.size(); 5349 return TE->Scalars.size(); 5350 }; 5351 DenseMap<int, const TreeEntry *> VFToTE; 5352 for (const TreeEntry *TE : UsedTEs.front()) 5353 VFToTE.try_emplace(GetVF(TE), TE); 5354 for (const TreeEntry *TE : UsedTEs.back()) { 5355 auto It = VFToTE.find(GetVF(TE)); 5356 if (It != VFToTE.end()) { 5357 VF = It->first; 5358 Entries.push_back(It->second); 5359 Entries.push_back(TE); 5360 break; 5361 } 5362 } 5363 // No 2 source vectors with the same vector factor - give up and do regular 5364 // gather. 5365 if (Entries.empty()) 5366 return None; 5367 } 5368 5369 // Build a shuffle mask for better cost estimation and vector emission. 5370 for (int I = 0, E = TE->Scalars.size(); I < E; ++I) { 5371 Value *V = TE->Scalars[I]; 5372 if (isa<UndefValue>(V)) 5373 continue; 5374 unsigned Idx = UsedValuesEntry.lookup(V); 5375 const TreeEntry *VTE = Entries[Idx]; 5376 int FoundLane = VTE->findLaneForValue(V); 5377 Mask[I] = Idx * VF + FoundLane; 5378 // Extra check required by isSingleSourceMaskImpl function (called by 5379 // ShuffleVectorInst::isSingleSourceMask). 5380 if (Mask[I] >= 2 * E) 5381 return None; 5382 } 5383 switch (Entries.size()) { 5384 case 1: 5385 return TargetTransformInfo::SK_PermuteSingleSrc; 5386 case 2: 5387 return TargetTransformInfo::SK_PermuteTwoSrc; 5388 default: 5389 break; 5390 } 5391 return None; 5392 } 5393 5394 InstructionCost 5395 BoUpSLP::getGatherCost(FixedVectorType *Ty, 5396 const DenseSet<unsigned> &ShuffledIndices) const { 5397 unsigned NumElts = Ty->getNumElements(); 5398 APInt DemandedElts = APInt::getZero(NumElts); 5399 for (unsigned I = 0; I < NumElts; ++I) 5400 if (!ShuffledIndices.count(I)) 5401 DemandedElts.setBit(I); 5402 InstructionCost Cost = 5403 TTI->getScalarizationOverhead(Ty, DemandedElts, /*Insert*/ true, 5404 /*Extract*/ false); 5405 if (!ShuffledIndices.empty()) 5406 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty); 5407 return Cost; 5408 } 5409 5410 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const { 5411 // Find the type of the operands in VL. 5412 Type *ScalarTy = VL[0]->getType(); 5413 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 5414 ScalarTy = SI->getValueOperand()->getType(); 5415 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 5416 // Find the cost of inserting/extracting values from the vector. 5417 // Check if the same elements are inserted several times and count them as 5418 // shuffle candidates. 5419 DenseSet<unsigned> ShuffledElements; 5420 DenseSet<Value *> UniqueElements; 5421 // Iterate in reverse order to consider insert elements with the high cost. 5422 for (unsigned I = VL.size(); I > 0; --I) { 5423 unsigned Idx = I - 1; 5424 if (isConstant(VL[Idx])) 5425 continue; 5426 if (!UniqueElements.insert(VL[Idx]).second) 5427 ShuffledElements.insert(Idx); 5428 } 5429 return getGatherCost(VecTy, ShuffledElements); 5430 } 5431 5432 // Perform operand reordering on the instructions in VL and return the reordered 5433 // operands in Left and Right. 5434 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 5435 SmallVectorImpl<Value *> &Left, 5436 SmallVectorImpl<Value *> &Right, 5437 const DataLayout &DL, 5438 ScalarEvolution &SE, 5439 const BoUpSLP &R) { 5440 if (VL.empty()) 5441 return; 5442 VLOperands Ops(VL, DL, SE, R); 5443 // Reorder the operands in place. 5444 Ops.reorder(); 5445 Left = Ops.getVL(0); 5446 Right = Ops.getVL(1); 5447 } 5448 5449 void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) { 5450 // Get the basic block this bundle is in. All instructions in the bundle 5451 // should be in this block. 5452 auto *Front = E->getMainOp(); 5453 auto *BB = Front->getParent(); 5454 assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool { 5455 auto *I = cast<Instruction>(V); 5456 return !E->isOpcodeOrAlt(I) || I->getParent() == BB; 5457 })); 5458 5459 // The last instruction in the bundle in program order. 5460 Instruction *LastInst = nullptr; 5461 5462 // Find the last instruction. The common case should be that BB has been 5463 // scheduled, and the last instruction is VL.back(). So we start with 5464 // VL.back() and iterate over schedule data until we reach the end of the 5465 // bundle. The end of the bundle is marked by null ScheduleData. 5466 if (BlocksSchedules.count(BB)) { 5467 auto *Bundle = 5468 BlocksSchedules[BB]->getScheduleData(E->isOneOf(E->Scalars.back())); 5469 if (Bundle && Bundle->isPartOfBundle()) 5470 for (; Bundle; Bundle = Bundle->NextInBundle) 5471 if (Bundle->OpValue == Bundle->Inst) 5472 LastInst = Bundle->Inst; 5473 } 5474 5475 // LastInst can still be null at this point if there's either not an entry 5476 // for BB in BlocksSchedules or there's no ScheduleData available for 5477 // VL.back(). This can be the case if buildTree_rec aborts for various 5478 // reasons (e.g., the maximum recursion depth is reached, the maximum region 5479 // size is reached, etc.). ScheduleData is initialized in the scheduling 5480 // "dry-run". 5481 // 5482 // If this happens, we can still find the last instruction by brute force. We 5483 // iterate forwards from Front (inclusive) until we either see all 5484 // instructions in the bundle or reach the end of the block. If Front is the 5485 // last instruction in program order, LastInst will be set to Front, and we 5486 // will visit all the remaining instructions in the block. 5487 // 5488 // One of the reasons we exit early from buildTree_rec is to place an upper 5489 // bound on compile-time. Thus, taking an additional compile-time hit here is 5490 // not ideal. However, this should be exceedingly rare since it requires that 5491 // we both exit early from buildTree_rec and that the bundle be out-of-order 5492 // (causing us to iterate all the way to the end of the block). 5493 if (!LastInst) { 5494 SmallPtrSet<Value *, 16> Bundle(E->Scalars.begin(), E->Scalars.end()); 5495 for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) { 5496 if (Bundle.erase(&I) && E->isOpcodeOrAlt(&I)) 5497 LastInst = &I; 5498 if (Bundle.empty()) 5499 break; 5500 } 5501 } 5502 assert(LastInst && "Failed to find last instruction in bundle"); 5503 5504 // Set the insertion point after the last instruction in the bundle. Set the 5505 // debug location to Front. 5506 Builder.SetInsertPoint(BB, ++LastInst->getIterator()); 5507 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 5508 } 5509 5510 Value *BoUpSLP::gather(ArrayRef<Value *> VL) { 5511 // List of instructions/lanes from current block and/or the blocks which are 5512 // part of the current loop. These instructions will be inserted at the end to 5513 // make it possible to optimize loops and hoist invariant instructions out of 5514 // the loops body with better chances for success. 5515 SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts; 5516 SmallSet<int, 4> PostponedIndices; 5517 Loop *L = LI->getLoopFor(Builder.GetInsertBlock()); 5518 auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) { 5519 SmallPtrSet<BasicBlock *, 4> Visited; 5520 while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second) 5521 InsertBB = InsertBB->getSinglePredecessor(); 5522 return InsertBB && InsertBB == InstBB; 5523 }; 5524 for (int I = 0, E = VL.size(); I < E; ++I) { 5525 if (auto *Inst = dyn_cast<Instruction>(VL[I])) 5526 if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) || 5527 getTreeEntry(Inst) || (L && (L->contains(Inst)))) && 5528 PostponedIndices.insert(I).second) 5529 PostponedInsts.emplace_back(Inst, I); 5530 } 5531 5532 auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos) { 5533 Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(Pos)); 5534 auto *InsElt = dyn_cast<InsertElementInst>(Vec); 5535 if (!InsElt) 5536 return Vec; 5537 GatherSeq.insert(InsElt); 5538 CSEBlocks.insert(InsElt->getParent()); 5539 // Add to our 'need-to-extract' list. 5540 if (TreeEntry *Entry = getTreeEntry(V)) { 5541 // Find which lane we need to extract. 5542 unsigned FoundLane = Entry->findLaneForValue(V); 5543 ExternalUses.emplace_back(V, InsElt, FoundLane); 5544 } 5545 return Vec; 5546 }; 5547 Value *Val0 = 5548 isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0]; 5549 FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size()); 5550 Value *Vec = PoisonValue::get(VecTy); 5551 SmallVector<int> NonConsts; 5552 // Insert constant values at first. 5553 for (int I = 0, E = VL.size(); I < E; ++I) { 5554 if (PostponedIndices.contains(I)) 5555 continue; 5556 if (!isConstant(VL[I])) { 5557 NonConsts.push_back(I); 5558 continue; 5559 } 5560 Vec = CreateInsertElement(Vec, VL[I], I); 5561 } 5562 // Insert non-constant values. 5563 for (int I : NonConsts) 5564 Vec = CreateInsertElement(Vec, VL[I], I); 5565 // Append instructions, which are/may be part of the loop, in the end to make 5566 // it possible to hoist non-loop-based instructions. 5567 for (const std::pair<Value *, unsigned> &Pair : PostponedInsts) 5568 Vec = CreateInsertElement(Vec, Pair.first, Pair.second); 5569 5570 return Vec; 5571 } 5572 5573 namespace { 5574 /// Merges shuffle masks and emits final shuffle instruction, if required. 5575 class ShuffleInstructionBuilder { 5576 IRBuilderBase &Builder; 5577 const unsigned VF = 0; 5578 bool IsFinalized = false; 5579 SmallVector<int, 4> Mask; 5580 5581 public: 5582 ShuffleInstructionBuilder(IRBuilderBase &Builder, unsigned VF) 5583 : Builder(Builder), VF(VF) {} 5584 5585 /// Adds a mask, inverting it before applying. 5586 void addInversedMask(ArrayRef<unsigned> SubMask) { 5587 if (SubMask.empty()) 5588 return; 5589 SmallVector<int, 4> NewMask; 5590 inversePermutation(SubMask, NewMask); 5591 addMask(NewMask); 5592 } 5593 5594 /// Functions adds masks, merging them into single one. 5595 void addMask(ArrayRef<unsigned> SubMask) { 5596 SmallVector<int, 4> NewMask(SubMask.begin(), SubMask.end()); 5597 addMask(NewMask); 5598 } 5599 5600 void addMask(ArrayRef<int> SubMask) { ::addMask(Mask, SubMask); } 5601 5602 Value *finalize(Value *V) { 5603 IsFinalized = true; 5604 unsigned ValueVF = cast<FixedVectorType>(V->getType())->getNumElements(); 5605 if (VF == ValueVF && Mask.empty()) 5606 return V; 5607 SmallVector<int, 4> NormalizedMask(VF, UndefMaskElem); 5608 std::iota(NormalizedMask.begin(), NormalizedMask.end(), 0); 5609 addMask(NormalizedMask); 5610 5611 if (VF == ValueVF && ShuffleVectorInst::isIdentityMask(Mask)) 5612 return V; 5613 return Builder.CreateShuffleVector(V, Mask, "shuffle"); 5614 } 5615 5616 ~ShuffleInstructionBuilder() { 5617 assert((IsFinalized || Mask.empty()) && 5618 "Shuffle construction must be finalized."); 5619 } 5620 }; 5621 } // namespace 5622 5623 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 5624 unsigned VF = VL.size(); 5625 InstructionsState S = getSameOpcode(VL); 5626 if (S.getOpcode()) { 5627 if (TreeEntry *E = getTreeEntry(S.OpValue)) 5628 if (E->isSame(VL)) { 5629 Value *V = vectorizeTree(E); 5630 if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) { 5631 if (!E->ReuseShuffleIndices.empty()) { 5632 // Reshuffle to get only unique values. 5633 // If some of the scalars are duplicated in the vectorization tree 5634 // entry, we do not vectorize them but instead generate a mask for 5635 // the reuses. But if there are several users of the same entry, 5636 // they may have different vectorization factors. This is especially 5637 // important for PHI nodes. In this case, we need to adapt the 5638 // resulting instruction for the user vectorization factor and have 5639 // to reshuffle it again to take only unique elements of the vector. 5640 // Without this code the function incorrectly returns reduced vector 5641 // instruction with the same elements, not with the unique ones. 5642 5643 // block: 5644 // %phi = phi <2 x > { .., %entry} {%shuffle, %block} 5645 // %2 = shuffle <2 x > %phi, %poison, <4 x > <0, 0, 1, 1> 5646 // ... (use %2) 5647 // %shuffle = shuffle <2 x> %2, poison, <2 x> {0, 2} 5648 // br %block 5649 SmallVector<int> UniqueIdxs; 5650 SmallSet<int, 4> UsedIdxs; 5651 int Pos = 0; 5652 int Sz = VL.size(); 5653 for (int Idx : E->ReuseShuffleIndices) { 5654 if (Idx != Sz && UsedIdxs.insert(Idx).second) 5655 UniqueIdxs.emplace_back(Pos); 5656 ++Pos; 5657 } 5658 assert(VF >= UsedIdxs.size() && "Expected vectorization factor " 5659 "less than original vector size."); 5660 UniqueIdxs.append(VF - UsedIdxs.size(), UndefMaskElem); 5661 V = Builder.CreateShuffleVector(V, UniqueIdxs, "shrink.shuffle"); 5662 } else { 5663 assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() && 5664 "Expected vectorization factor less " 5665 "than original vector size."); 5666 SmallVector<int> UniformMask(VF, 0); 5667 std::iota(UniformMask.begin(), UniformMask.end(), 0); 5668 V = Builder.CreateShuffleVector(V, UniformMask, "shrink.shuffle"); 5669 } 5670 } 5671 return V; 5672 } 5673 } 5674 5675 // Check that every instruction appears once in this bundle. 5676 SmallVector<int> ReuseShuffleIndicies; 5677 SmallVector<Value *> UniqueValues; 5678 if (VL.size() > 2) { 5679 DenseMap<Value *, unsigned> UniquePositions; 5680 unsigned NumValues = 5681 std::distance(VL.begin(), find_if(reverse(VL), [](Value *V) { 5682 return !isa<UndefValue>(V); 5683 }).base()); 5684 VF = std::max<unsigned>(VF, PowerOf2Ceil(NumValues)); 5685 int UniqueVals = 0; 5686 for (Value *V : VL.drop_back(VL.size() - VF)) { 5687 if (isa<UndefValue>(V)) { 5688 ReuseShuffleIndicies.emplace_back(UndefMaskElem); 5689 continue; 5690 } 5691 if (isConstant(V)) { 5692 ReuseShuffleIndicies.emplace_back(UniqueValues.size()); 5693 UniqueValues.emplace_back(V); 5694 continue; 5695 } 5696 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 5697 ReuseShuffleIndicies.emplace_back(Res.first->second); 5698 if (Res.second) { 5699 UniqueValues.emplace_back(V); 5700 ++UniqueVals; 5701 } 5702 } 5703 if (UniqueVals == 1 && UniqueValues.size() == 1) { 5704 // Emit pure splat vector. 5705 ReuseShuffleIndicies.append(VF - ReuseShuffleIndicies.size(), 5706 UndefMaskElem); 5707 } else if (UniqueValues.size() >= VF - 1 || UniqueValues.size() <= 1) { 5708 ReuseShuffleIndicies.clear(); 5709 UniqueValues.clear(); 5710 UniqueValues.append(VL.begin(), std::next(VL.begin(), NumValues)); 5711 } 5712 UniqueValues.append(VF - UniqueValues.size(), 5713 PoisonValue::get(VL[0]->getType())); 5714 VL = UniqueValues; 5715 } 5716 5717 ShuffleInstructionBuilder ShuffleBuilder(Builder, VF); 5718 Value *Vec = gather(VL); 5719 if (!ReuseShuffleIndicies.empty()) { 5720 ShuffleBuilder.addMask(ReuseShuffleIndicies); 5721 Vec = ShuffleBuilder.finalize(Vec); 5722 if (auto *I = dyn_cast<Instruction>(Vec)) { 5723 GatherSeq.insert(I); 5724 CSEBlocks.insert(I->getParent()); 5725 } 5726 } 5727 return Vec; 5728 } 5729 5730 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 5731 IRBuilder<>::InsertPointGuard Guard(Builder); 5732 5733 if (E->VectorizedValue) { 5734 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 5735 return E->VectorizedValue; 5736 } 5737 5738 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 5739 unsigned VF = E->Scalars.size(); 5740 if (NeedToShuffleReuses) 5741 VF = E->ReuseShuffleIndices.size(); 5742 ShuffleInstructionBuilder ShuffleBuilder(Builder, VF); 5743 if (E->State == TreeEntry::NeedToGather) { 5744 setInsertPointAfterBundle(E); 5745 Value *Vec; 5746 SmallVector<int> Mask; 5747 SmallVector<const TreeEntry *> Entries; 5748 Optional<TargetTransformInfo::ShuffleKind> Shuffle = 5749 isGatherShuffledEntry(E, Mask, Entries); 5750 if (Shuffle.hasValue()) { 5751 assert((Entries.size() == 1 || Entries.size() == 2) && 5752 "Expected shuffle of 1 or 2 entries."); 5753 Vec = Builder.CreateShuffleVector(Entries.front()->VectorizedValue, 5754 Entries.back()->VectorizedValue, Mask); 5755 } else { 5756 Vec = gather(E->Scalars); 5757 } 5758 if (NeedToShuffleReuses) { 5759 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5760 Vec = ShuffleBuilder.finalize(Vec); 5761 if (auto *I = dyn_cast<Instruction>(Vec)) { 5762 GatherSeq.insert(I); 5763 CSEBlocks.insert(I->getParent()); 5764 } 5765 } 5766 E->VectorizedValue = Vec; 5767 return Vec; 5768 } 5769 5770 assert((E->State == TreeEntry::Vectorize || 5771 E->State == TreeEntry::ScatterVectorize) && 5772 "Unhandled state"); 5773 unsigned ShuffleOrOp = 5774 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 5775 Instruction *VL0 = E->getMainOp(); 5776 Type *ScalarTy = VL0->getType(); 5777 if (auto *Store = dyn_cast<StoreInst>(VL0)) 5778 ScalarTy = Store->getValueOperand()->getType(); 5779 else if (auto *IE = dyn_cast<InsertElementInst>(VL0)) 5780 ScalarTy = IE->getOperand(1)->getType(); 5781 auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size()); 5782 switch (ShuffleOrOp) { 5783 case Instruction::PHI: { 5784 assert( 5785 (E->ReorderIndices.empty() || E != VectorizableTree.front().get()) && 5786 "PHI reordering is free."); 5787 auto *PH = cast<PHINode>(VL0); 5788 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 5789 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 5790 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 5791 Value *V = NewPhi; 5792 ShuffleBuilder.addInversedMask(E->ReorderIndices); 5793 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5794 V = ShuffleBuilder.finalize(V); 5795 5796 E->VectorizedValue = V; 5797 5798 // PHINodes may have multiple entries from the same block. We want to 5799 // visit every block once. 5800 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 5801 5802 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 5803 ValueList Operands; 5804 BasicBlock *IBB = PH->getIncomingBlock(i); 5805 5806 if (!VisitedBBs.insert(IBB).second) { 5807 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 5808 continue; 5809 } 5810 5811 Builder.SetInsertPoint(IBB->getTerminator()); 5812 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 5813 Value *Vec = vectorizeTree(E->getOperand(i)); 5814 NewPhi->addIncoming(Vec, IBB); 5815 } 5816 5817 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 5818 "Invalid number of incoming values"); 5819 return V; 5820 } 5821 5822 case Instruction::ExtractElement: { 5823 Value *V = E->getSingleOperand(0); 5824 Builder.SetInsertPoint(VL0); 5825 ShuffleBuilder.addInversedMask(E->ReorderIndices); 5826 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5827 V = ShuffleBuilder.finalize(V); 5828 E->VectorizedValue = V; 5829 return V; 5830 } 5831 case Instruction::ExtractValue: { 5832 auto *LI = cast<LoadInst>(E->getSingleOperand(0)); 5833 Builder.SetInsertPoint(LI); 5834 auto *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); 5835 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); 5836 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign()); 5837 Value *NewV = propagateMetadata(V, E->Scalars); 5838 ShuffleBuilder.addInversedMask(E->ReorderIndices); 5839 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5840 NewV = ShuffleBuilder.finalize(NewV); 5841 E->VectorizedValue = NewV; 5842 return NewV; 5843 } 5844 case Instruction::InsertElement: { 5845 assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique"); 5846 Builder.SetInsertPoint(cast<Instruction>(E->Scalars.back())); 5847 Value *V = vectorizeTree(E->getOperand(1)); 5848 5849 // Create InsertVector shuffle if necessary 5850 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 5851 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0)); 5852 })); 5853 const unsigned NumElts = 5854 cast<FixedVectorType>(FirstInsert->getType())->getNumElements(); 5855 const unsigned NumScalars = E->Scalars.size(); 5856 5857 unsigned Offset = *getInsertIndex(VL0, 0); 5858 assert(Offset < NumElts && "Failed to find vector index offset"); 5859 5860 // Create shuffle to resize vector 5861 SmallVector<int> Mask; 5862 if (!E->ReorderIndices.empty()) { 5863 inversePermutation(E->ReorderIndices, Mask); 5864 Mask.append(NumElts - NumScalars, UndefMaskElem); 5865 } else { 5866 Mask.assign(NumElts, UndefMaskElem); 5867 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); 5868 } 5869 // Create InsertVector shuffle if necessary 5870 bool IsIdentity = true; 5871 SmallVector<int> PrevMask(NumElts, UndefMaskElem); 5872 Mask.swap(PrevMask); 5873 for (unsigned I = 0; I < NumScalars; ++I) { 5874 Value *Scalar = E->Scalars[PrevMask[I]]; 5875 Optional<int> InsertIdx = getInsertIndex(Scalar, 0); 5876 if (!InsertIdx || *InsertIdx == UndefMaskElem) 5877 continue; 5878 IsIdentity &= *InsertIdx - Offset == I; 5879 Mask[*InsertIdx - Offset] = I; 5880 } 5881 if (!IsIdentity || NumElts != NumScalars) 5882 V = Builder.CreateShuffleVector(V, Mask); 5883 5884 if ((!IsIdentity || Offset != 0 || 5885 !isa<UndefValue>(FirstInsert->getOperand(0))) && 5886 NumElts != NumScalars) { 5887 SmallVector<int> InsertMask(NumElts); 5888 std::iota(InsertMask.begin(), InsertMask.end(), 0); 5889 for (unsigned I = 0; I < NumElts; I++) { 5890 if (Mask[I] != UndefMaskElem) 5891 InsertMask[Offset + I] = NumElts + I; 5892 } 5893 5894 V = Builder.CreateShuffleVector( 5895 FirstInsert->getOperand(0), V, InsertMask, 5896 cast<Instruction>(E->Scalars.back())->getName()); 5897 } 5898 5899 ++NumVectorInstructions; 5900 E->VectorizedValue = V; 5901 return V; 5902 } 5903 case Instruction::ZExt: 5904 case Instruction::SExt: 5905 case Instruction::FPToUI: 5906 case Instruction::FPToSI: 5907 case Instruction::FPExt: 5908 case Instruction::PtrToInt: 5909 case Instruction::IntToPtr: 5910 case Instruction::SIToFP: 5911 case Instruction::UIToFP: 5912 case Instruction::Trunc: 5913 case Instruction::FPTrunc: 5914 case Instruction::BitCast: { 5915 setInsertPointAfterBundle(E); 5916 5917 Value *InVec = vectorizeTree(E->getOperand(0)); 5918 5919 if (E->VectorizedValue) { 5920 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 5921 return E->VectorizedValue; 5922 } 5923 5924 auto *CI = cast<CastInst>(VL0); 5925 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 5926 ShuffleBuilder.addInversedMask(E->ReorderIndices); 5927 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5928 V = ShuffleBuilder.finalize(V); 5929 5930 E->VectorizedValue = V; 5931 ++NumVectorInstructions; 5932 return V; 5933 } 5934 case Instruction::FCmp: 5935 case Instruction::ICmp: { 5936 setInsertPointAfterBundle(E); 5937 5938 Value *L = vectorizeTree(E->getOperand(0)); 5939 Value *R = vectorizeTree(E->getOperand(1)); 5940 5941 if (E->VectorizedValue) { 5942 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 5943 return E->VectorizedValue; 5944 } 5945 5946 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 5947 Value *V = Builder.CreateCmp(P0, L, R); 5948 propagateIRFlags(V, E->Scalars, VL0); 5949 ShuffleBuilder.addInversedMask(E->ReorderIndices); 5950 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5951 V = ShuffleBuilder.finalize(V); 5952 5953 E->VectorizedValue = V; 5954 ++NumVectorInstructions; 5955 return V; 5956 } 5957 case Instruction::Select: { 5958 setInsertPointAfterBundle(E); 5959 5960 Value *Cond = vectorizeTree(E->getOperand(0)); 5961 Value *True = vectorizeTree(E->getOperand(1)); 5962 Value *False = vectorizeTree(E->getOperand(2)); 5963 5964 if (E->VectorizedValue) { 5965 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 5966 return E->VectorizedValue; 5967 } 5968 5969 Value *V = Builder.CreateSelect(Cond, True, False); 5970 ShuffleBuilder.addInversedMask(E->ReorderIndices); 5971 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5972 V = ShuffleBuilder.finalize(V); 5973 5974 E->VectorizedValue = V; 5975 ++NumVectorInstructions; 5976 return V; 5977 } 5978 case Instruction::FNeg: { 5979 setInsertPointAfterBundle(E); 5980 5981 Value *Op = vectorizeTree(E->getOperand(0)); 5982 5983 if (E->VectorizedValue) { 5984 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 5985 return E->VectorizedValue; 5986 } 5987 5988 Value *V = Builder.CreateUnOp( 5989 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op); 5990 propagateIRFlags(V, E->Scalars, VL0); 5991 if (auto *I = dyn_cast<Instruction>(V)) 5992 V = propagateMetadata(I, E->Scalars); 5993 5994 ShuffleBuilder.addInversedMask(E->ReorderIndices); 5995 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5996 V = ShuffleBuilder.finalize(V); 5997 5998 E->VectorizedValue = V; 5999 ++NumVectorInstructions; 6000 6001 return V; 6002 } 6003 case Instruction::Add: 6004 case Instruction::FAdd: 6005 case Instruction::Sub: 6006 case Instruction::FSub: 6007 case Instruction::Mul: 6008 case Instruction::FMul: 6009 case Instruction::UDiv: 6010 case Instruction::SDiv: 6011 case Instruction::FDiv: 6012 case Instruction::URem: 6013 case Instruction::SRem: 6014 case Instruction::FRem: 6015 case Instruction::Shl: 6016 case Instruction::LShr: 6017 case Instruction::AShr: 6018 case Instruction::And: 6019 case Instruction::Or: 6020 case Instruction::Xor: { 6021 setInsertPointAfterBundle(E); 6022 6023 Value *LHS = vectorizeTree(E->getOperand(0)); 6024 Value *RHS = vectorizeTree(E->getOperand(1)); 6025 6026 if (E->VectorizedValue) { 6027 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6028 return E->VectorizedValue; 6029 } 6030 6031 Value *V = Builder.CreateBinOp( 6032 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, 6033 RHS); 6034 propagateIRFlags(V, E->Scalars, VL0); 6035 if (auto *I = dyn_cast<Instruction>(V)) 6036 V = propagateMetadata(I, E->Scalars); 6037 6038 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6039 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6040 V = ShuffleBuilder.finalize(V); 6041 6042 E->VectorizedValue = V; 6043 ++NumVectorInstructions; 6044 6045 return V; 6046 } 6047 case Instruction::Load: { 6048 // Loads are inserted at the head of the tree because we don't want to 6049 // sink them all the way down past store instructions. 6050 setInsertPointAfterBundle(E); 6051 6052 LoadInst *LI = cast<LoadInst>(VL0); 6053 Instruction *NewLI; 6054 unsigned AS = LI->getPointerAddressSpace(); 6055 Value *PO = LI->getPointerOperand(); 6056 if (E->State == TreeEntry::Vectorize) { 6057 6058 Value *VecPtr = Builder.CreateBitCast(PO, VecTy->getPointerTo(AS)); 6059 6060 // The pointer operand uses an in-tree scalar so we add the new BitCast 6061 // to ExternalUses list to make sure that an extract will be generated 6062 // in the future. 6063 if (TreeEntry *Entry = getTreeEntry(PO)) { 6064 // Find which lane we need to extract. 6065 unsigned FoundLane = Entry->findLaneForValue(PO); 6066 ExternalUses.emplace_back(PO, cast<User>(VecPtr), FoundLane); 6067 } 6068 6069 NewLI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign()); 6070 } else { 6071 assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state"); 6072 Value *VecPtr = vectorizeTree(E->getOperand(0)); 6073 // Use the minimum alignment of the gathered loads. 6074 Align CommonAlignment = LI->getAlign(); 6075 for (Value *V : E->Scalars) 6076 CommonAlignment = 6077 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 6078 NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment); 6079 } 6080 Value *V = propagateMetadata(NewLI, E->Scalars); 6081 6082 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6083 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6084 V = ShuffleBuilder.finalize(V); 6085 E->VectorizedValue = V; 6086 ++NumVectorInstructions; 6087 return V; 6088 } 6089 case Instruction::Store: { 6090 auto *SI = cast<StoreInst>(VL0); 6091 unsigned AS = SI->getPointerAddressSpace(); 6092 6093 setInsertPointAfterBundle(E); 6094 6095 Value *VecValue = vectorizeTree(E->getOperand(0)); 6096 ShuffleBuilder.addMask(E->ReorderIndices); 6097 VecValue = ShuffleBuilder.finalize(VecValue); 6098 6099 Value *ScalarPtr = SI->getPointerOperand(); 6100 Value *VecPtr = Builder.CreateBitCast( 6101 ScalarPtr, VecValue->getType()->getPointerTo(AS)); 6102 StoreInst *ST = Builder.CreateAlignedStore(VecValue, VecPtr, 6103 SI->getAlign()); 6104 6105 // The pointer operand uses an in-tree scalar, so add the new BitCast to 6106 // ExternalUses to make sure that an extract will be generated in the 6107 // future. 6108 if (TreeEntry *Entry = getTreeEntry(ScalarPtr)) { 6109 // Find which lane we need to extract. 6110 unsigned FoundLane = Entry->findLaneForValue(ScalarPtr); 6111 ExternalUses.push_back( 6112 ExternalUser(ScalarPtr, cast<User>(VecPtr), FoundLane)); 6113 } 6114 6115 Value *V = propagateMetadata(ST, E->Scalars); 6116 6117 E->VectorizedValue = V; 6118 ++NumVectorInstructions; 6119 return V; 6120 } 6121 case Instruction::GetElementPtr: { 6122 setInsertPointAfterBundle(E); 6123 6124 Value *Op0 = vectorizeTree(E->getOperand(0)); 6125 6126 std::vector<Value *> OpVecs; 6127 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 6128 ++j) { 6129 ValueList &VL = E->getOperand(j); 6130 // Need to cast all elements to the same type before vectorization to 6131 // avoid crash. 6132 Type *VL0Ty = VL0->getOperand(j)->getType(); 6133 Type *Ty = llvm::all_of( 6134 VL, [VL0Ty](Value *V) { return VL0Ty == V->getType(); }) 6135 ? VL0Ty 6136 : DL->getIndexType(cast<GetElementPtrInst>(VL0) 6137 ->getPointerOperandType() 6138 ->getScalarType()); 6139 for (Value *&V : VL) { 6140 auto *CI = cast<ConstantInt>(V); 6141 V = ConstantExpr::getIntegerCast(CI, Ty, 6142 CI->getValue().isSignBitSet()); 6143 } 6144 Value *OpVec = vectorizeTree(VL); 6145 OpVecs.push_back(OpVec); 6146 } 6147 6148 Value *V = Builder.CreateGEP( 6149 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs); 6150 if (Instruction *I = dyn_cast<Instruction>(V)) 6151 V = propagateMetadata(I, E->Scalars); 6152 6153 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6154 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6155 V = ShuffleBuilder.finalize(V); 6156 6157 E->VectorizedValue = V; 6158 ++NumVectorInstructions; 6159 6160 return V; 6161 } 6162 case Instruction::Call: { 6163 CallInst *CI = cast<CallInst>(VL0); 6164 setInsertPointAfterBundle(E); 6165 6166 Intrinsic::ID IID = Intrinsic::not_intrinsic; 6167 if (Function *FI = CI->getCalledFunction()) 6168 IID = FI->getIntrinsicID(); 6169 6170 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6171 6172 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 6173 bool UseIntrinsic = ID != Intrinsic::not_intrinsic && 6174 VecCallCosts.first <= VecCallCosts.second; 6175 6176 Value *ScalarArg = nullptr; 6177 std::vector<Value *> OpVecs; 6178 SmallVector<Type *, 2> TysForDecl = 6179 {FixedVectorType::get(CI->getType(), E->Scalars.size())}; 6180 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 6181 ValueList OpVL; 6182 // Some intrinsics have scalar arguments. This argument should not be 6183 // vectorized. 6184 if (UseIntrinsic && hasVectorInstrinsicScalarOpd(IID, j)) { 6185 CallInst *CEI = cast<CallInst>(VL0); 6186 ScalarArg = CEI->getArgOperand(j); 6187 OpVecs.push_back(CEI->getArgOperand(j)); 6188 if (hasVectorInstrinsicOverloadedScalarOpd(IID, j)) 6189 TysForDecl.push_back(ScalarArg->getType()); 6190 continue; 6191 } 6192 6193 Value *OpVec = vectorizeTree(E->getOperand(j)); 6194 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 6195 OpVecs.push_back(OpVec); 6196 } 6197 6198 Function *CF; 6199 if (!UseIntrinsic) { 6200 VFShape Shape = 6201 VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>( 6202 VecTy->getNumElements())), 6203 false /*HasGlobalPred*/); 6204 CF = VFDatabase(*CI).getVectorizedFunction(Shape); 6205 } else { 6206 CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl); 6207 } 6208 6209 SmallVector<OperandBundleDef, 1> OpBundles; 6210 CI->getOperandBundlesAsDefs(OpBundles); 6211 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 6212 6213 // The scalar argument uses an in-tree scalar so we add the new vectorized 6214 // call to ExternalUses list to make sure that an extract will be 6215 // generated in the future. 6216 if (ScalarArg) { 6217 if (TreeEntry *Entry = getTreeEntry(ScalarArg)) { 6218 // Find which lane we need to extract. 6219 unsigned FoundLane = Entry->findLaneForValue(ScalarArg); 6220 ExternalUses.push_back( 6221 ExternalUser(ScalarArg, cast<User>(V), FoundLane)); 6222 } 6223 } 6224 6225 propagateIRFlags(V, E->Scalars, VL0); 6226 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6227 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6228 V = ShuffleBuilder.finalize(V); 6229 6230 E->VectorizedValue = V; 6231 ++NumVectorInstructions; 6232 return V; 6233 } 6234 case Instruction::ShuffleVector: { 6235 assert(E->isAltShuffle() && 6236 ((Instruction::isBinaryOp(E->getOpcode()) && 6237 Instruction::isBinaryOp(E->getAltOpcode())) || 6238 (Instruction::isCast(E->getOpcode()) && 6239 Instruction::isCast(E->getAltOpcode()))) && 6240 "Invalid Shuffle Vector Operand"); 6241 6242 Value *LHS = nullptr, *RHS = nullptr; 6243 if (Instruction::isBinaryOp(E->getOpcode())) { 6244 setInsertPointAfterBundle(E); 6245 LHS = vectorizeTree(E->getOperand(0)); 6246 RHS = vectorizeTree(E->getOperand(1)); 6247 } else { 6248 setInsertPointAfterBundle(E); 6249 LHS = vectorizeTree(E->getOperand(0)); 6250 } 6251 6252 if (E->VectorizedValue) { 6253 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6254 return E->VectorizedValue; 6255 } 6256 6257 Value *V0, *V1; 6258 if (Instruction::isBinaryOp(E->getOpcode())) { 6259 V0 = Builder.CreateBinOp( 6260 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS); 6261 V1 = Builder.CreateBinOp( 6262 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS); 6263 } else { 6264 V0 = Builder.CreateCast( 6265 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy); 6266 V1 = Builder.CreateCast( 6267 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy); 6268 } 6269 6270 // Create shuffle to take alternate operations from the vector. 6271 // Also, gather up main and alt scalar ops to propagate IR flags to 6272 // each vector operation. 6273 ValueList OpScalars, AltScalars; 6274 SmallVector<int> Mask; 6275 buildSuffleEntryMask( 6276 E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices, 6277 [E](Instruction *I) { 6278 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 6279 return I->getOpcode() == E->getAltOpcode(); 6280 }, 6281 Mask, &OpScalars, &AltScalars); 6282 6283 propagateIRFlags(V0, OpScalars); 6284 propagateIRFlags(V1, AltScalars); 6285 6286 Value *V = Builder.CreateShuffleVector(V0, V1, Mask); 6287 if (Instruction *I = dyn_cast<Instruction>(V)) 6288 V = propagateMetadata(I, E->Scalars); 6289 V = ShuffleBuilder.finalize(V); 6290 6291 E->VectorizedValue = V; 6292 ++NumVectorInstructions; 6293 6294 return V; 6295 } 6296 default: 6297 llvm_unreachable("unknown inst"); 6298 } 6299 return nullptr; 6300 } 6301 6302 Value *BoUpSLP::vectorizeTree() { 6303 ExtraValueToDebugLocsMap ExternallyUsedValues; 6304 return vectorizeTree(ExternallyUsedValues); 6305 } 6306 6307 Value * 6308 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { 6309 // All blocks must be scheduled before any instructions are inserted. 6310 for (auto &BSIter : BlocksSchedules) { 6311 scheduleBlock(BSIter.second.get()); 6312 } 6313 6314 Builder.SetInsertPoint(&F->getEntryBlock().front()); 6315 auto *VectorRoot = vectorizeTree(VectorizableTree[0].get()); 6316 6317 // If the vectorized tree can be rewritten in a smaller type, we truncate the 6318 // vectorized root. InstCombine will then rewrite the entire expression. We 6319 // sign extend the extracted values below. 6320 auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; 6321 if (MinBWs.count(ScalarRoot)) { 6322 if (auto *I = dyn_cast<Instruction>(VectorRoot)) { 6323 // If current instr is a phi and not the last phi, insert it after the 6324 // last phi node. 6325 if (isa<PHINode>(I)) 6326 Builder.SetInsertPoint(&*I->getParent()->getFirstInsertionPt()); 6327 else 6328 Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); 6329 } 6330 auto BundleWidth = VectorizableTree[0]->Scalars.size(); 6331 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 6332 auto *VecTy = FixedVectorType::get(MinTy, BundleWidth); 6333 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); 6334 VectorizableTree[0]->VectorizedValue = Trunc; 6335 } 6336 6337 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() 6338 << " values .\n"); 6339 6340 // Extract all of the elements with the external uses. 6341 for (const auto &ExternalUse : ExternalUses) { 6342 Value *Scalar = ExternalUse.Scalar; 6343 llvm::User *User = ExternalUse.User; 6344 6345 // Skip users that we already RAUW. This happens when one instruction 6346 // has multiple uses of the same value. 6347 if (User && !is_contained(Scalar->users(), User)) 6348 continue; 6349 TreeEntry *E = getTreeEntry(Scalar); 6350 assert(E && "Invalid scalar"); 6351 assert(E->State != TreeEntry::NeedToGather && 6352 "Extracting from a gather list"); 6353 6354 Value *Vec = E->VectorizedValue; 6355 assert(Vec && "Can't find vectorizable value"); 6356 6357 Value *Lane = Builder.getInt32(ExternalUse.Lane); 6358 auto ExtractAndExtendIfNeeded = [&](Value *Vec) { 6359 if (Scalar->getType() != Vec->getType()) { 6360 Value *Ex; 6361 // "Reuse" the existing extract to improve final codegen. 6362 if (auto *ES = dyn_cast<ExtractElementInst>(Scalar)) { 6363 Ex = Builder.CreateExtractElement(ES->getOperand(0), 6364 ES->getOperand(1)); 6365 } else { 6366 Ex = Builder.CreateExtractElement(Vec, Lane); 6367 } 6368 // If necessary, sign-extend or zero-extend ScalarRoot 6369 // to the larger type. 6370 if (!MinBWs.count(ScalarRoot)) 6371 return Ex; 6372 if (MinBWs[ScalarRoot].second) 6373 return Builder.CreateSExt(Ex, Scalar->getType()); 6374 return Builder.CreateZExt(Ex, Scalar->getType()); 6375 } 6376 assert(isa<FixedVectorType>(Scalar->getType()) && 6377 isa<InsertElementInst>(Scalar) && 6378 "In-tree scalar of vector type is not insertelement?"); 6379 return Vec; 6380 }; 6381 // If User == nullptr, the Scalar is used as extra arg. Generate 6382 // ExtractElement instruction and update the record for this scalar in 6383 // ExternallyUsedValues. 6384 if (!User) { 6385 assert(ExternallyUsedValues.count(Scalar) && 6386 "Scalar with nullptr as an external user must be registered in " 6387 "ExternallyUsedValues map"); 6388 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 6389 Builder.SetInsertPoint(VecI->getParent(), 6390 std::next(VecI->getIterator())); 6391 } else { 6392 Builder.SetInsertPoint(&F->getEntryBlock().front()); 6393 } 6394 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 6395 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent()); 6396 auto &NewInstLocs = ExternallyUsedValues[NewInst]; 6397 auto It = ExternallyUsedValues.find(Scalar); 6398 assert(It != ExternallyUsedValues.end() && 6399 "Externally used scalar is not found in ExternallyUsedValues"); 6400 NewInstLocs.append(It->second); 6401 ExternallyUsedValues.erase(Scalar); 6402 // Required to update internally referenced instructions. 6403 Scalar->replaceAllUsesWith(NewInst); 6404 continue; 6405 } 6406 6407 // Generate extracts for out-of-tree users. 6408 // Find the insertion point for the extractelement lane. 6409 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 6410 if (PHINode *PH = dyn_cast<PHINode>(User)) { 6411 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 6412 if (PH->getIncomingValue(i) == Scalar) { 6413 Instruction *IncomingTerminator = 6414 PH->getIncomingBlock(i)->getTerminator(); 6415 if (isa<CatchSwitchInst>(IncomingTerminator)) { 6416 Builder.SetInsertPoint(VecI->getParent(), 6417 std::next(VecI->getIterator())); 6418 } else { 6419 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 6420 } 6421 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 6422 CSEBlocks.insert(PH->getIncomingBlock(i)); 6423 PH->setOperand(i, NewInst); 6424 } 6425 } 6426 } else { 6427 Builder.SetInsertPoint(cast<Instruction>(User)); 6428 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 6429 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 6430 User->replaceUsesOfWith(Scalar, NewInst); 6431 } 6432 } else { 6433 Builder.SetInsertPoint(&F->getEntryBlock().front()); 6434 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 6435 CSEBlocks.insert(&F->getEntryBlock()); 6436 User->replaceUsesOfWith(Scalar, NewInst); 6437 } 6438 6439 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 6440 } 6441 6442 // For each vectorized value: 6443 for (auto &TEPtr : VectorizableTree) { 6444 TreeEntry *Entry = TEPtr.get(); 6445 6446 // No need to handle users of gathered values. 6447 if (Entry->State == TreeEntry::NeedToGather) 6448 continue; 6449 6450 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 6451 6452 // For each lane: 6453 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 6454 Value *Scalar = Entry->Scalars[Lane]; 6455 6456 #ifndef NDEBUG 6457 Type *Ty = Scalar->getType(); 6458 if (!Ty->isVoidTy()) { 6459 for (User *U : Scalar->users()) { 6460 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 6461 6462 // It is legal to delete users in the ignorelist. 6463 assert((getTreeEntry(U) || is_contained(UserIgnoreList, U) || 6464 (isa_and_nonnull<Instruction>(U) && 6465 isDeleted(cast<Instruction>(U)))) && 6466 "Deleting out-of-tree value"); 6467 } 6468 } 6469 #endif 6470 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 6471 eraseInstruction(cast<Instruction>(Scalar)); 6472 } 6473 } 6474 6475 Builder.ClearInsertionPoint(); 6476 InstrElementSize.clear(); 6477 6478 return VectorizableTree[0]->VectorizedValue; 6479 } 6480 6481 void BoUpSLP::optimizeGatherSequence() { 6482 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 6483 << " gather sequences instructions.\n"); 6484 // LICM InsertElementInst sequences. 6485 for (Instruction *I : GatherSeq) { 6486 if (isDeleted(I)) 6487 continue; 6488 6489 // Check if this block is inside a loop. 6490 Loop *L = LI->getLoopFor(I->getParent()); 6491 if (!L) 6492 continue; 6493 6494 // Check if it has a preheader. 6495 BasicBlock *PreHeader = L->getLoopPreheader(); 6496 if (!PreHeader) 6497 continue; 6498 6499 // If the vector or the element that we insert into it are 6500 // instructions that are defined in this basic block then we can't 6501 // hoist this instruction. 6502 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 6503 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 6504 if (Op0 && L->contains(Op0)) 6505 continue; 6506 if (Op1 && L->contains(Op1)) 6507 continue; 6508 6509 // We can hoist this instruction. Move it to the pre-header. 6510 I->moveBefore(PreHeader->getTerminator()); 6511 } 6512 6513 // Make a list of all reachable blocks in our CSE queue. 6514 SmallVector<const DomTreeNode *, 8> CSEWorkList; 6515 CSEWorkList.reserve(CSEBlocks.size()); 6516 for (BasicBlock *BB : CSEBlocks) 6517 if (DomTreeNode *N = DT->getNode(BB)) { 6518 assert(DT->isReachableFromEntry(N)); 6519 CSEWorkList.push_back(N); 6520 } 6521 6522 // Sort blocks by domination. This ensures we visit a block after all blocks 6523 // dominating it are visited. 6524 llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) { 6525 assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) && 6526 "Different nodes should have different DFS numbers"); 6527 return A->getDFSNumIn() < B->getDFSNumIn(); 6528 }); 6529 6530 // Perform O(N^2) search over the gather sequences and merge identical 6531 // instructions. TODO: We can further optimize this scan if we split the 6532 // instructions into different buckets based on the insert lane. 6533 SmallVector<Instruction *, 16> Visited; 6534 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 6535 assert(*I && 6536 (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 6537 "Worklist not sorted properly!"); 6538 BasicBlock *BB = (*I)->getBlock(); 6539 // For all instructions in blocks containing gather sequences: 6540 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 6541 Instruction *In = &*it++; 6542 if (isDeleted(In)) 6543 continue; 6544 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In) && 6545 !isa<ShuffleVectorInst>(In)) 6546 continue; 6547 6548 // Check if we can replace this instruction with any of the 6549 // visited instructions. 6550 for (Instruction *v : Visited) { 6551 if (In->isIdenticalTo(v) && 6552 DT->dominates(v->getParent(), In->getParent())) { 6553 In->replaceAllUsesWith(v); 6554 eraseInstruction(In); 6555 In = nullptr; 6556 break; 6557 } 6558 } 6559 if (In) { 6560 assert(!is_contained(Visited, In)); 6561 Visited.push_back(In); 6562 } 6563 } 6564 } 6565 CSEBlocks.clear(); 6566 GatherSeq.clear(); 6567 } 6568 6569 // Groups the instructions to a bundle (which is then a single scheduling entity) 6570 // and schedules instructions until the bundle gets ready. 6571 Optional<BoUpSLP::ScheduleData *> 6572 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 6573 const InstructionsState &S) { 6574 // No need to schedule PHIs, insertelement, extractelement and extractvalue 6575 // instructions. 6576 if (isa<PHINode>(S.OpValue) || isVectorLikeInstWithConstOps(S.OpValue)) 6577 return nullptr; 6578 6579 // Initialize the instruction bundle. 6580 Instruction *OldScheduleEnd = ScheduleEnd; 6581 ScheduleData *PrevInBundle = nullptr; 6582 ScheduleData *Bundle = nullptr; 6583 bool ReSchedule = false; 6584 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n"); 6585 6586 auto &&TryScheduleBundle = [this, OldScheduleEnd, SLP](bool ReSchedule, 6587 ScheduleData *Bundle) { 6588 // The scheduling region got new instructions at the lower end (or it is a 6589 // new region for the first bundle). This makes it necessary to 6590 // recalculate all dependencies. 6591 // It is seldom that this needs to be done a second time after adding the 6592 // initial bundle to the region. 6593 if (ScheduleEnd != OldScheduleEnd) { 6594 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) 6595 doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); }); 6596 ReSchedule = true; 6597 } 6598 if (ReSchedule) { 6599 resetSchedule(); 6600 initialFillReadyList(ReadyInsts); 6601 } 6602 if (Bundle) { 6603 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle 6604 << " in block " << BB->getName() << "\n"); 6605 calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP); 6606 } 6607 6608 // Now try to schedule the new bundle or (if no bundle) just calculate 6609 // dependencies. As soon as the bundle is "ready" it means that there are no 6610 // cyclic dependencies and we can schedule it. Note that's important that we 6611 // don't "schedule" the bundle yet (see cancelScheduling). 6612 while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) && 6613 !ReadyInsts.empty()) { 6614 ScheduleData *Picked = ReadyInsts.pop_back_val(); 6615 if (Picked->isSchedulingEntity() && Picked->isReady()) 6616 schedule(Picked, ReadyInsts); 6617 } 6618 }; 6619 6620 // Make sure that the scheduling region contains all 6621 // instructions of the bundle. 6622 for (Value *V : VL) { 6623 if (!extendSchedulingRegion(V, S)) { 6624 // If the scheduling region got new instructions at the lower end (or it 6625 // is a new region for the first bundle). This makes it necessary to 6626 // recalculate all dependencies. 6627 // Otherwise the compiler may crash trying to incorrectly calculate 6628 // dependencies and emit instruction in the wrong order at the actual 6629 // scheduling. 6630 TryScheduleBundle(/*ReSchedule=*/false, nullptr); 6631 return None; 6632 } 6633 } 6634 6635 for (Value *V : VL) { 6636 ScheduleData *BundleMember = getScheduleData(V); 6637 assert(BundleMember && 6638 "no ScheduleData for bundle member (maybe not in same basic block)"); 6639 if (BundleMember->IsScheduled) { 6640 // A bundle member was scheduled as single instruction before and now 6641 // needs to be scheduled as part of the bundle. We just get rid of the 6642 // existing schedule. 6643 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 6644 << " was already scheduled\n"); 6645 ReSchedule = true; 6646 } 6647 assert(BundleMember->isSchedulingEntity() && 6648 "bundle member already part of other bundle"); 6649 if (PrevInBundle) { 6650 PrevInBundle->NextInBundle = BundleMember; 6651 } else { 6652 Bundle = BundleMember; 6653 } 6654 BundleMember->UnscheduledDepsInBundle = 0; 6655 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 6656 6657 // Group the instructions to a bundle. 6658 BundleMember->FirstInBundle = Bundle; 6659 PrevInBundle = BundleMember; 6660 } 6661 assert(Bundle && "Failed to find schedule bundle"); 6662 TryScheduleBundle(ReSchedule, Bundle); 6663 if (!Bundle->isReady()) { 6664 cancelScheduling(VL, S.OpValue); 6665 return None; 6666 } 6667 return Bundle; 6668 } 6669 6670 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 6671 Value *OpValue) { 6672 if (isa<PHINode>(OpValue) || isVectorLikeInstWithConstOps(OpValue)) 6673 return; 6674 6675 ScheduleData *Bundle = getScheduleData(OpValue); 6676 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 6677 assert(!Bundle->IsScheduled && 6678 "Can't cancel bundle which is already scheduled"); 6679 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 6680 "tried to unbundle something which is not a bundle"); 6681 6682 // Un-bundle: make single instructions out of the bundle. 6683 ScheduleData *BundleMember = Bundle; 6684 while (BundleMember) { 6685 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 6686 BundleMember->FirstInBundle = BundleMember; 6687 ScheduleData *Next = BundleMember->NextInBundle; 6688 BundleMember->NextInBundle = nullptr; 6689 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 6690 if (BundleMember->UnscheduledDepsInBundle == 0) { 6691 ReadyInsts.insert(BundleMember); 6692 } 6693 BundleMember = Next; 6694 } 6695 } 6696 6697 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 6698 // Allocate a new ScheduleData for the instruction. 6699 if (ChunkPos >= ChunkSize) { 6700 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize)); 6701 ChunkPos = 0; 6702 } 6703 return &(ScheduleDataChunks.back()[ChunkPos++]); 6704 } 6705 6706 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 6707 const InstructionsState &S) { 6708 if (getScheduleData(V, isOneOf(S, V))) 6709 return true; 6710 Instruction *I = dyn_cast<Instruction>(V); 6711 assert(I && "bundle member must be an instruction"); 6712 assert(!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) && 6713 "phi nodes/insertelements/extractelements/extractvalues don't need to " 6714 "be scheduled"); 6715 auto &&CheckSheduleForI = [this, &S](Instruction *I) -> bool { 6716 ScheduleData *ISD = getScheduleData(I); 6717 if (!ISD) 6718 return false; 6719 assert(isInSchedulingRegion(ISD) && 6720 "ScheduleData not in scheduling region"); 6721 ScheduleData *SD = allocateScheduleDataChunks(); 6722 SD->Inst = I; 6723 SD->init(SchedulingRegionID, S.OpValue); 6724 ExtraScheduleDataMap[I][S.OpValue] = SD; 6725 return true; 6726 }; 6727 if (CheckSheduleForI(I)) 6728 return true; 6729 if (!ScheduleStart) { 6730 // It's the first instruction in the new region. 6731 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 6732 ScheduleStart = I; 6733 ScheduleEnd = I->getNextNode(); 6734 if (isOneOf(S, I) != I) 6735 CheckSheduleForI(I); 6736 assert(ScheduleEnd && "tried to vectorize a terminator?"); 6737 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 6738 return true; 6739 } 6740 // Search up and down at the same time, because we don't know if the new 6741 // instruction is above or below the existing scheduling region. 6742 BasicBlock::reverse_iterator UpIter = 6743 ++ScheduleStart->getIterator().getReverse(); 6744 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 6745 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 6746 BasicBlock::iterator LowerEnd = BB->end(); 6747 while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I && 6748 &*DownIter != I) { 6749 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 6750 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 6751 return false; 6752 } 6753 6754 ++UpIter; 6755 ++DownIter; 6756 } 6757 if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) { 6758 assert(I->getParent() == ScheduleStart->getParent() && 6759 "Instruction is in wrong basic block."); 6760 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 6761 ScheduleStart = I; 6762 if (isOneOf(S, I) != I) 6763 CheckSheduleForI(I); 6764 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I 6765 << "\n"); 6766 return true; 6767 } 6768 assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && 6769 "Expected to reach top of the basic block or instruction down the " 6770 "lower end."); 6771 assert(I->getParent() == ScheduleEnd->getParent() && 6772 "Instruction is in wrong basic block."); 6773 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 6774 nullptr); 6775 ScheduleEnd = I->getNextNode(); 6776 if (isOneOf(S, I) != I) 6777 CheckSheduleForI(I); 6778 assert(ScheduleEnd && "tried to vectorize a terminator?"); 6779 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 6780 return true; 6781 } 6782 6783 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 6784 Instruction *ToI, 6785 ScheduleData *PrevLoadStore, 6786 ScheduleData *NextLoadStore) { 6787 ScheduleData *CurrentLoadStore = PrevLoadStore; 6788 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 6789 ScheduleData *SD = ScheduleDataMap[I]; 6790 if (!SD) { 6791 SD = allocateScheduleDataChunks(); 6792 ScheduleDataMap[I] = SD; 6793 SD->Inst = I; 6794 } 6795 assert(!isInSchedulingRegion(SD) && 6796 "new ScheduleData already in scheduling region"); 6797 SD->init(SchedulingRegionID, I); 6798 6799 if (I->mayReadOrWriteMemory() && 6800 (!isa<IntrinsicInst>(I) || 6801 (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect && 6802 cast<IntrinsicInst>(I)->getIntrinsicID() != 6803 Intrinsic::pseudoprobe))) { 6804 // Update the linked list of memory accessing instructions. 6805 if (CurrentLoadStore) { 6806 CurrentLoadStore->NextLoadStore = SD; 6807 } else { 6808 FirstLoadStoreInRegion = SD; 6809 } 6810 CurrentLoadStore = SD; 6811 } 6812 } 6813 if (NextLoadStore) { 6814 if (CurrentLoadStore) 6815 CurrentLoadStore->NextLoadStore = NextLoadStore; 6816 } else { 6817 LastLoadStoreInRegion = CurrentLoadStore; 6818 } 6819 } 6820 6821 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 6822 bool InsertInReadyList, 6823 BoUpSLP *SLP) { 6824 assert(SD->isSchedulingEntity()); 6825 6826 SmallVector<ScheduleData *, 10> WorkList; 6827 WorkList.push_back(SD); 6828 6829 while (!WorkList.empty()) { 6830 ScheduleData *SD = WorkList.pop_back_val(); 6831 6832 ScheduleData *BundleMember = SD; 6833 while (BundleMember) { 6834 assert(isInSchedulingRegion(BundleMember)); 6835 if (!BundleMember->hasValidDependencies()) { 6836 6837 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember 6838 << "\n"); 6839 BundleMember->Dependencies = 0; 6840 BundleMember->resetUnscheduledDeps(); 6841 6842 // Handle def-use chain dependencies. 6843 if (BundleMember->OpValue != BundleMember->Inst) { 6844 ScheduleData *UseSD = getScheduleData(BundleMember->Inst); 6845 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 6846 BundleMember->Dependencies++; 6847 ScheduleData *DestBundle = UseSD->FirstInBundle; 6848 if (!DestBundle->IsScheduled) 6849 BundleMember->incrementUnscheduledDeps(1); 6850 if (!DestBundle->hasValidDependencies()) 6851 WorkList.push_back(DestBundle); 6852 } 6853 } else { 6854 for (User *U : BundleMember->Inst->users()) { 6855 if (isa<Instruction>(U)) { 6856 ScheduleData *UseSD = getScheduleData(U); 6857 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 6858 BundleMember->Dependencies++; 6859 ScheduleData *DestBundle = UseSD->FirstInBundle; 6860 if (!DestBundle->IsScheduled) 6861 BundleMember->incrementUnscheduledDeps(1); 6862 if (!DestBundle->hasValidDependencies()) 6863 WorkList.push_back(DestBundle); 6864 } 6865 } else { 6866 // I'm not sure if this can ever happen. But we need to be safe. 6867 // This lets the instruction/bundle never be scheduled and 6868 // eventually disable vectorization. 6869 BundleMember->Dependencies++; 6870 BundleMember->incrementUnscheduledDeps(1); 6871 } 6872 } 6873 } 6874 6875 // Handle the memory dependencies. 6876 ScheduleData *DepDest = BundleMember->NextLoadStore; 6877 if (DepDest) { 6878 Instruction *SrcInst = BundleMember->Inst; 6879 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); 6880 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 6881 unsigned numAliased = 0; 6882 unsigned DistToSrc = 1; 6883 6884 while (DepDest) { 6885 assert(isInSchedulingRegion(DepDest)); 6886 6887 // We have two limits to reduce the complexity: 6888 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 6889 // SLP->isAliased (which is the expensive part in this loop). 6890 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 6891 // the whole loop (even if the loop is fast, it's quadratic). 6892 // It's important for the loop break condition (see below) to 6893 // check this limit even between two read-only instructions. 6894 if (DistToSrc >= MaxMemDepDistance || 6895 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 6896 (numAliased >= AliasedCheckLimit || 6897 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 6898 6899 // We increment the counter only if the locations are aliased 6900 // (instead of counting all alias checks). This gives a better 6901 // balance between reduced runtime and accurate dependencies. 6902 numAliased++; 6903 6904 DepDest->MemoryDependencies.push_back(BundleMember); 6905 BundleMember->Dependencies++; 6906 ScheduleData *DestBundle = DepDest->FirstInBundle; 6907 if (!DestBundle->IsScheduled) { 6908 BundleMember->incrementUnscheduledDeps(1); 6909 } 6910 if (!DestBundle->hasValidDependencies()) { 6911 WorkList.push_back(DestBundle); 6912 } 6913 } 6914 DepDest = DepDest->NextLoadStore; 6915 6916 // Example, explaining the loop break condition: Let's assume our 6917 // starting instruction is i0 and MaxMemDepDistance = 3. 6918 // 6919 // +--------v--v--v 6920 // i0,i1,i2,i3,i4,i5,i6,i7,i8 6921 // +--------^--^--^ 6922 // 6923 // MaxMemDepDistance let us stop alias-checking at i3 and we add 6924 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 6925 // Previously we already added dependencies from i3 to i6,i7,i8 6926 // (because of MaxMemDepDistance). As we added a dependency from 6927 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 6928 // and we can abort this loop at i6. 6929 if (DistToSrc >= 2 * MaxMemDepDistance) 6930 break; 6931 DistToSrc++; 6932 } 6933 } 6934 } 6935 BundleMember = BundleMember->NextInBundle; 6936 } 6937 if (InsertInReadyList && SD->isReady()) { 6938 ReadyInsts.push_back(SD); 6939 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst 6940 << "\n"); 6941 } 6942 } 6943 } 6944 6945 void BoUpSLP::BlockScheduling::resetSchedule() { 6946 assert(ScheduleStart && 6947 "tried to reset schedule on block which has not been scheduled"); 6948 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 6949 doForAllOpcodes(I, [&](ScheduleData *SD) { 6950 assert(isInSchedulingRegion(SD) && 6951 "ScheduleData not in scheduling region"); 6952 SD->IsScheduled = false; 6953 SD->resetUnscheduledDeps(); 6954 }); 6955 } 6956 ReadyInsts.clear(); 6957 } 6958 6959 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 6960 if (!BS->ScheduleStart) 6961 return; 6962 6963 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 6964 6965 BS->resetSchedule(); 6966 6967 // For the real scheduling we use a more sophisticated ready-list: it is 6968 // sorted by the original instruction location. This lets the final schedule 6969 // be as close as possible to the original instruction order. 6970 struct ScheduleDataCompare { 6971 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 6972 return SD2->SchedulingPriority < SD1->SchedulingPriority; 6973 } 6974 }; 6975 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 6976 6977 // Ensure that all dependency data is updated and fill the ready-list with 6978 // initial instructions. 6979 int Idx = 0; 6980 int NumToSchedule = 0; 6981 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 6982 I = I->getNextNode()) { 6983 BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) { 6984 assert((isVectorLikeInstWithConstOps(SD->Inst) || 6985 SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr)) && 6986 "scheduler and vectorizer bundle mismatch"); 6987 SD->FirstInBundle->SchedulingPriority = Idx++; 6988 if (SD->isSchedulingEntity()) { 6989 BS->calculateDependencies(SD, false, this); 6990 NumToSchedule++; 6991 } 6992 }); 6993 } 6994 BS->initialFillReadyList(ReadyInsts); 6995 6996 Instruction *LastScheduledInst = BS->ScheduleEnd; 6997 6998 // Do the "real" scheduling. 6999 while (!ReadyInsts.empty()) { 7000 ScheduleData *picked = *ReadyInsts.begin(); 7001 ReadyInsts.erase(ReadyInsts.begin()); 7002 7003 // Move the scheduled instruction(s) to their dedicated places, if not 7004 // there yet. 7005 ScheduleData *BundleMember = picked; 7006 while (BundleMember) { 7007 Instruction *pickedInst = BundleMember->Inst; 7008 if (pickedInst->getNextNode() != LastScheduledInst) { 7009 BS->BB->getInstList().remove(pickedInst); 7010 BS->BB->getInstList().insert(LastScheduledInst->getIterator(), 7011 pickedInst); 7012 } 7013 LastScheduledInst = pickedInst; 7014 BundleMember = BundleMember->NextInBundle; 7015 } 7016 7017 BS->schedule(picked, ReadyInsts); 7018 NumToSchedule--; 7019 } 7020 assert(NumToSchedule == 0 && "could not schedule all instructions"); 7021 7022 // Avoid duplicate scheduling of the block. 7023 BS->ScheduleStart = nullptr; 7024 } 7025 7026 unsigned BoUpSLP::getVectorElementSize(Value *V) { 7027 // If V is a store, just return the width of the stored value (or value 7028 // truncated just before storing) without traversing the expression tree. 7029 // This is the common case. 7030 if (auto *Store = dyn_cast<StoreInst>(V)) { 7031 if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand())) 7032 return DL->getTypeSizeInBits(Trunc->getSrcTy()); 7033 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 7034 } 7035 7036 if (auto *IEI = dyn_cast<InsertElementInst>(V)) 7037 return getVectorElementSize(IEI->getOperand(1)); 7038 7039 auto E = InstrElementSize.find(V); 7040 if (E != InstrElementSize.end()) 7041 return E->second; 7042 7043 // If V is not a store, we can traverse the expression tree to find loads 7044 // that feed it. The type of the loaded value may indicate a more suitable 7045 // width than V's type. We want to base the vector element size on the width 7046 // of memory operations where possible. 7047 SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist; 7048 SmallPtrSet<Instruction *, 16> Visited; 7049 if (auto *I = dyn_cast<Instruction>(V)) { 7050 Worklist.emplace_back(I, I->getParent()); 7051 Visited.insert(I); 7052 } 7053 7054 // Traverse the expression tree in bottom-up order looking for loads. If we 7055 // encounter an instruction we don't yet handle, we give up. 7056 auto Width = 0u; 7057 while (!Worklist.empty()) { 7058 Instruction *I; 7059 BasicBlock *Parent; 7060 std::tie(I, Parent) = Worklist.pop_back_val(); 7061 7062 // We should only be looking at scalar instructions here. If the current 7063 // instruction has a vector type, skip. 7064 auto *Ty = I->getType(); 7065 if (isa<VectorType>(Ty)) 7066 continue; 7067 7068 // If the current instruction is a load, update MaxWidth to reflect the 7069 // width of the loaded value. 7070 if (isa<LoadInst>(I) || isa<ExtractElementInst>(I) || 7071 isa<ExtractValueInst>(I)) 7072 Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty)); 7073 7074 // Otherwise, we need to visit the operands of the instruction. We only 7075 // handle the interesting cases from buildTree here. If an operand is an 7076 // instruction we haven't yet visited and from the same basic block as the 7077 // user or the use is a PHI node, we add it to the worklist. 7078 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7079 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I) || 7080 isa<UnaryOperator>(I)) { 7081 for (Use &U : I->operands()) 7082 if (auto *J = dyn_cast<Instruction>(U.get())) 7083 if (Visited.insert(J).second && 7084 (isa<PHINode>(I) || J->getParent() == Parent)) 7085 Worklist.emplace_back(J, J->getParent()); 7086 } else { 7087 break; 7088 } 7089 } 7090 7091 // If we didn't encounter a memory access in the expression tree, or if we 7092 // gave up for some reason, just return the width of V. Otherwise, return the 7093 // maximum width we found. 7094 if (!Width) { 7095 if (auto *CI = dyn_cast<CmpInst>(V)) 7096 V = CI->getOperand(0); 7097 Width = DL->getTypeSizeInBits(V->getType()); 7098 } 7099 7100 for (Instruction *I : Visited) 7101 InstrElementSize[I] = Width; 7102 7103 return Width; 7104 } 7105 7106 // Determine if a value V in a vectorizable expression Expr can be demoted to a 7107 // smaller type with a truncation. We collect the values that will be demoted 7108 // in ToDemote and additional roots that require investigating in Roots. 7109 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, 7110 SmallVectorImpl<Value *> &ToDemote, 7111 SmallVectorImpl<Value *> &Roots) { 7112 // We can always demote constants. 7113 if (isa<Constant>(V)) { 7114 ToDemote.push_back(V); 7115 return true; 7116 } 7117 7118 // If the value is not an instruction in the expression with only one use, it 7119 // cannot be demoted. 7120 auto *I = dyn_cast<Instruction>(V); 7121 if (!I || !I->hasOneUse() || !Expr.count(I)) 7122 return false; 7123 7124 switch (I->getOpcode()) { 7125 7126 // We can always demote truncations and extensions. Since truncations can 7127 // seed additional demotion, we save the truncated value. 7128 case Instruction::Trunc: 7129 Roots.push_back(I->getOperand(0)); 7130 break; 7131 case Instruction::ZExt: 7132 case Instruction::SExt: 7133 if (isa<ExtractElementInst>(I->getOperand(0)) || 7134 isa<InsertElementInst>(I->getOperand(0))) 7135 return false; 7136 break; 7137 7138 // We can demote certain binary operations if we can demote both of their 7139 // operands. 7140 case Instruction::Add: 7141 case Instruction::Sub: 7142 case Instruction::Mul: 7143 case Instruction::And: 7144 case Instruction::Or: 7145 case Instruction::Xor: 7146 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || 7147 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) 7148 return false; 7149 break; 7150 7151 // We can demote selects if we can demote their true and false values. 7152 case Instruction::Select: { 7153 SelectInst *SI = cast<SelectInst>(I); 7154 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || 7155 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) 7156 return false; 7157 break; 7158 } 7159 7160 // We can demote phis if we can demote all their incoming operands. Note that 7161 // we don't need to worry about cycles since we ensure single use above. 7162 case Instruction::PHI: { 7163 PHINode *PN = cast<PHINode>(I); 7164 for (Value *IncValue : PN->incoming_values()) 7165 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) 7166 return false; 7167 break; 7168 } 7169 7170 // Otherwise, conservatively give up. 7171 default: 7172 return false; 7173 } 7174 7175 // Record the value that we can demote. 7176 ToDemote.push_back(V); 7177 return true; 7178 } 7179 7180 void BoUpSLP::computeMinimumValueSizes() { 7181 // If there are no external uses, the expression tree must be rooted by a 7182 // store. We can't demote in-memory values, so there is nothing to do here. 7183 if (ExternalUses.empty()) 7184 return; 7185 7186 // We only attempt to truncate integer expressions. 7187 auto &TreeRoot = VectorizableTree[0]->Scalars; 7188 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 7189 if (!TreeRootIT) 7190 return; 7191 7192 // If the expression is not rooted by a store, these roots should have 7193 // external uses. We will rely on InstCombine to rewrite the expression in 7194 // the narrower type. However, InstCombine only rewrites single-use values. 7195 // This means that if a tree entry other than a root is used externally, it 7196 // must have multiple uses and InstCombine will not rewrite it. The code 7197 // below ensures that only the roots are used externally. 7198 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); 7199 for (auto &EU : ExternalUses) 7200 if (!Expr.erase(EU.Scalar)) 7201 return; 7202 if (!Expr.empty()) 7203 return; 7204 7205 // Collect the scalar values of the vectorizable expression. We will use this 7206 // context to determine which values can be demoted. If we see a truncation, 7207 // we mark it as seeding another demotion. 7208 for (auto &EntryPtr : VectorizableTree) 7209 Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end()); 7210 7211 // Ensure the roots of the vectorizable tree don't form a cycle. They must 7212 // have a single external user that is not in the vectorizable tree. 7213 for (auto *Root : TreeRoot) 7214 if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) 7215 return; 7216 7217 // Conservatively determine if we can actually truncate the roots of the 7218 // expression. Collect the values that can be demoted in ToDemote and 7219 // additional roots that require investigating in Roots. 7220 SmallVector<Value *, 32> ToDemote; 7221 SmallVector<Value *, 4> Roots; 7222 for (auto *Root : TreeRoot) 7223 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) 7224 return; 7225 7226 // The maximum bit width required to represent all the values that can be 7227 // demoted without loss of precision. It would be safe to truncate the roots 7228 // of the expression to this width. 7229 auto MaxBitWidth = 8u; 7230 7231 // We first check if all the bits of the roots are demanded. If they're not, 7232 // we can truncate the roots to this narrower type. 7233 for (auto *Root : TreeRoot) { 7234 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 7235 MaxBitWidth = std::max<unsigned>( 7236 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); 7237 } 7238 7239 // True if the roots can be zero-extended back to their original type, rather 7240 // than sign-extended. We know that if the leading bits are not demanded, we 7241 // can safely zero-extend. So we initialize IsKnownPositive to True. 7242 bool IsKnownPositive = true; 7243 7244 // If all the bits of the roots are demanded, we can try a little harder to 7245 // compute a narrower type. This can happen, for example, if the roots are 7246 // getelementptr indices. InstCombine promotes these indices to the pointer 7247 // width. Thus, all their bits are technically demanded even though the 7248 // address computation might be vectorized in a smaller type. 7249 // 7250 // We start by looking at each entry that can be demoted. We compute the 7251 // maximum bit width required to store the scalar by using ValueTracking to 7252 // compute the number of high-order bits we can truncate. 7253 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) && 7254 llvm::all_of(TreeRoot, [](Value *R) { 7255 assert(R->hasOneUse() && "Root should have only one use!"); 7256 return isa<GetElementPtrInst>(R->user_back()); 7257 })) { 7258 MaxBitWidth = 8u; 7259 7260 // Determine if the sign bit of all the roots is known to be zero. If not, 7261 // IsKnownPositive is set to False. 7262 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { 7263 KnownBits Known = computeKnownBits(R, *DL); 7264 return Known.isNonNegative(); 7265 }); 7266 7267 // Determine the maximum number of bits required to store the scalar 7268 // values. 7269 for (auto *Scalar : ToDemote) { 7270 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); 7271 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 7272 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 7273 } 7274 7275 // If we can't prove that the sign bit is zero, we must add one to the 7276 // maximum bit width to account for the unknown sign bit. This preserves 7277 // the existing sign bit so we can safely sign-extend the root back to the 7278 // original type. Otherwise, if we know the sign bit is zero, we will 7279 // zero-extend the root instead. 7280 // 7281 // FIXME: This is somewhat suboptimal, as there will be cases where adding 7282 // one to the maximum bit width will yield a larger-than-necessary 7283 // type. In general, we need to add an extra bit only if we can't 7284 // prove that the upper bit of the original type is equal to the 7285 // upper bit of the proposed smaller type. If these two bits are the 7286 // same (either zero or one) we know that sign-extending from the 7287 // smaller type will result in the same value. Here, since we can't 7288 // yet prove this, we are just making the proposed smaller type 7289 // larger to ensure correctness. 7290 if (!IsKnownPositive) 7291 ++MaxBitWidth; 7292 } 7293 7294 // Round MaxBitWidth up to the next power-of-two. 7295 if (!isPowerOf2_64(MaxBitWidth)) 7296 MaxBitWidth = NextPowerOf2(MaxBitWidth); 7297 7298 // If the maximum bit width we compute is less than the with of the roots' 7299 // type, we can proceed with the narrowing. Otherwise, do nothing. 7300 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 7301 return; 7302 7303 // If we can truncate the root, we must collect additional values that might 7304 // be demoted as a result. That is, those seeded by truncations we will 7305 // modify. 7306 while (!Roots.empty()) 7307 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); 7308 7309 // Finally, map the values we can demote to the maximum bit with we computed. 7310 for (auto *Scalar : ToDemote) 7311 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive); 7312 } 7313 7314 namespace { 7315 7316 /// The SLPVectorizer Pass. 7317 struct SLPVectorizer : public FunctionPass { 7318 SLPVectorizerPass Impl; 7319 7320 /// Pass identification, replacement for typeid 7321 static char ID; 7322 7323 explicit SLPVectorizer() : FunctionPass(ID) { 7324 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 7325 } 7326 7327 bool doInitialization(Module &M) override { 7328 return false; 7329 } 7330 7331 bool runOnFunction(Function &F) override { 7332 if (skipFunction(F)) 7333 return false; 7334 7335 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 7336 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 7337 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 7338 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 7339 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 7340 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 7341 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 7342 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 7343 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 7344 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 7345 7346 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 7347 } 7348 7349 void getAnalysisUsage(AnalysisUsage &AU) const override { 7350 FunctionPass::getAnalysisUsage(AU); 7351 AU.addRequired<AssumptionCacheTracker>(); 7352 AU.addRequired<ScalarEvolutionWrapperPass>(); 7353 AU.addRequired<AAResultsWrapperPass>(); 7354 AU.addRequired<TargetTransformInfoWrapperPass>(); 7355 AU.addRequired<LoopInfoWrapperPass>(); 7356 AU.addRequired<DominatorTreeWrapperPass>(); 7357 AU.addRequired<DemandedBitsWrapperPass>(); 7358 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 7359 AU.addRequired<InjectTLIMappingsLegacy>(); 7360 AU.addPreserved<LoopInfoWrapperPass>(); 7361 AU.addPreserved<DominatorTreeWrapperPass>(); 7362 AU.addPreserved<AAResultsWrapperPass>(); 7363 AU.addPreserved<GlobalsAAWrapperPass>(); 7364 AU.setPreservesCFG(); 7365 } 7366 }; 7367 7368 } // end anonymous namespace 7369 7370 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 7371 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 7372 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 7373 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 7374 auto *AA = &AM.getResult<AAManager>(F); 7375 auto *LI = &AM.getResult<LoopAnalysis>(F); 7376 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 7377 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 7378 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 7379 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 7380 7381 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 7382 if (!Changed) 7383 return PreservedAnalyses::all(); 7384 7385 PreservedAnalyses PA; 7386 PA.preserveSet<CFGAnalyses>(); 7387 return PA; 7388 } 7389 7390 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 7391 TargetTransformInfo *TTI_, 7392 TargetLibraryInfo *TLI_, AAResults *AA_, 7393 LoopInfo *LI_, DominatorTree *DT_, 7394 AssumptionCache *AC_, DemandedBits *DB_, 7395 OptimizationRemarkEmitter *ORE_) { 7396 if (!RunSLPVectorization) 7397 return false; 7398 SE = SE_; 7399 TTI = TTI_; 7400 TLI = TLI_; 7401 AA = AA_; 7402 LI = LI_; 7403 DT = DT_; 7404 AC = AC_; 7405 DB = DB_; 7406 DL = &F.getParent()->getDataLayout(); 7407 7408 Stores.clear(); 7409 GEPs.clear(); 7410 bool Changed = false; 7411 7412 // If the target claims to have no vector registers don't attempt 7413 // vectorization. 7414 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) 7415 return false; 7416 7417 // Don't vectorize when the attribute NoImplicitFloat is used. 7418 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 7419 return false; 7420 7421 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 7422 7423 // Use the bottom up slp vectorizer to construct chains that start with 7424 // store instructions. 7425 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 7426 7427 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 7428 // delete instructions. 7429 7430 // Update DFS numbers now so that we can use them for ordering. 7431 DT->updateDFSNumbers(); 7432 7433 // Scan the blocks in the function in post order. 7434 for (auto BB : post_order(&F.getEntryBlock())) { 7435 collectSeedInstructions(BB); 7436 7437 // Vectorize trees that end at stores. 7438 if (!Stores.empty()) { 7439 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 7440 << " underlying objects.\n"); 7441 Changed |= vectorizeStoreChains(R); 7442 } 7443 7444 // Vectorize trees that end at reductions. 7445 Changed |= vectorizeChainsInBlock(BB, R); 7446 7447 // Vectorize the index computations of getelementptr instructions. This 7448 // is primarily intended to catch gather-like idioms ending at 7449 // non-consecutive loads. 7450 if (!GEPs.empty()) { 7451 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 7452 << " underlying objects.\n"); 7453 Changed |= vectorizeGEPIndices(BB, R); 7454 } 7455 } 7456 7457 if (Changed) { 7458 R.optimizeGatherSequence(); 7459 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 7460 } 7461 return Changed; 7462 } 7463 7464 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 7465 unsigned Idx) { 7466 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size() 7467 << "\n"); 7468 const unsigned Sz = R.getVectorElementSize(Chain[0]); 7469 const unsigned MinVF = R.getMinVecRegSize() / Sz; 7470 unsigned VF = Chain.size(); 7471 7472 if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF) 7473 return false; 7474 7475 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx 7476 << "\n"); 7477 7478 R.buildTree(Chain); 7479 if (R.isTreeTinyAndNotFullyVectorizable()) 7480 return false; 7481 if (R.isLoadCombineCandidate()) 7482 return false; 7483 R.reorderTopToBottom(); 7484 R.reorderBottomToTop(); 7485 R.buildExternalUses(); 7486 7487 R.computeMinimumValueSizes(); 7488 7489 InstructionCost Cost = R.getTreeCost(); 7490 7491 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF =" << VF << "\n"); 7492 if (Cost < -SLPCostThreshold) { 7493 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n"); 7494 7495 using namespace ore; 7496 7497 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 7498 cast<StoreInst>(Chain[0])) 7499 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 7500 << " and with tree size " 7501 << NV("TreeSize", R.getTreeSize())); 7502 7503 R.vectorizeTree(); 7504 return true; 7505 } 7506 7507 return false; 7508 } 7509 7510 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 7511 BoUpSLP &R) { 7512 // We may run into multiple chains that merge into a single chain. We mark the 7513 // stores that we vectorized so that we don't visit the same store twice. 7514 BoUpSLP::ValueSet VectorizedStores; 7515 bool Changed = false; 7516 7517 int E = Stores.size(); 7518 SmallBitVector Tails(E, false); 7519 int MaxIter = MaxStoreLookup.getValue(); 7520 SmallVector<std::pair<int, int>, 16> ConsecutiveChain( 7521 E, std::make_pair(E, INT_MAX)); 7522 SmallVector<SmallBitVector, 4> CheckedPairs(E, SmallBitVector(E, false)); 7523 int IterCnt; 7524 auto &&FindConsecutiveAccess = [this, &Stores, &Tails, &IterCnt, MaxIter, 7525 &CheckedPairs, 7526 &ConsecutiveChain](int K, int Idx) { 7527 if (IterCnt >= MaxIter) 7528 return true; 7529 if (CheckedPairs[Idx].test(K)) 7530 return ConsecutiveChain[K].second == 1 && 7531 ConsecutiveChain[K].first == Idx; 7532 ++IterCnt; 7533 CheckedPairs[Idx].set(K); 7534 CheckedPairs[K].set(Idx); 7535 Optional<int> Diff = getPointersDiff( 7536 Stores[K]->getValueOperand()->getType(), Stores[K]->getPointerOperand(), 7537 Stores[Idx]->getValueOperand()->getType(), 7538 Stores[Idx]->getPointerOperand(), *DL, *SE, /*StrictCheck=*/true); 7539 if (!Diff || *Diff == 0) 7540 return false; 7541 int Val = *Diff; 7542 if (Val < 0) { 7543 if (ConsecutiveChain[Idx].second > -Val) { 7544 Tails.set(K); 7545 ConsecutiveChain[Idx] = std::make_pair(K, -Val); 7546 } 7547 return false; 7548 } 7549 if (ConsecutiveChain[K].second <= Val) 7550 return false; 7551 7552 Tails.set(Idx); 7553 ConsecutiveChain[K] = std::make_pair(Idx, Val); 7554 return Val == 1; 7555 }; 7556 // Do a quadratic search on all of the given stores in reverse order and find 7557 // all of the pairs of stores that follow each other. 7558 for (int Idx = E - 1; Idx >= 0; --Idx) { 7559 // If a store has multiple consecutive store candidates, search according 7560 // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ... 7561 // This is because usually pairing with immediate succeeding or preceding 7562 // candidate create the best chance to find slp vectorization opportunity. 7563 const int MaxLookDepth = std::max(E - Idx, Idx + 1); 7564 IterCnt = 0; 7565 for (int Offset = 1, F = MaxLookDepth; Offset < F; ++Offset) 7566 if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) || 7567 (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx))) 7568 break; 7569 } 7570 7571 // Tracks if we tried to vectorize stores starting from the given tail 7572 // already. 7573 SmallBitVector TriedTails(E, false); 7574 // For stores that start but don't end a link in the chain: 7575 for (int Cnt = E; Cnt > 0; --Cnt) { 7576 int I = Cnt - 1; 7577 if (ConsecutiveChain[I].first == E || Tails.test(I)) 7578 continue; 7579 // We found a store instr that starts a chain. Now follow the chain and try 7580 // to vectorize it. 7581 BoUpSLP::ValueList Operands; 7582 // Collect the chain into a list. 7583 while (I != E && !VectorizedStores.count(Stores[I])) { 7584 Operands.push_back(Stores[I]); 7585 Tails.set(I); 7586 if (ConsecutiveChain[I].second != 1) { 7587 // Mark the new end in the chain and go back, if required. It might be 7588 // required if the original stores come in reversed order, for example. 7589 if (ConsecutiveChain[I].first != E && 7590 Tails.test(ConsecutiveChain[I].first) && !TriedTails.test(I) && 7591 !VectorizedStores.count(Stores[ConsecutiveChain[I].first])) { 7592 TriedTails.set(I); 7593 Tails.reset(ConsecutiveChain[I].first); 7594 if (Cnt < ConsecutiveChain[I].first + 2) 7595 Cnt = ConsecutiveChain[I].first + 2; 7596 } 7597 break; 7598 } 7599 // Move to the next value in the chain. 7600 I = ConsecutiveChain[I].first; 7601 } 7602 assert(!Operands.empty() && "Expected non-empty list of stores."); 7603 7604 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 7605 unsigned EltSize = R.getVectorElementSize(Operands[0]); 7606 unsigned MaxElts = llvm::PowerOf2Floor(MaxVecRegSize / EltSize); 7607 7608 unsigned MinVF = R.getMinVF(EltSize); 7609 unsigned MaxVF = std::min(R.getMaximumVF(EltSize, Instruction::Store), 7610 MaxElts); 7611 7612 // FIXME: Is division-by-2 the correct step? Should we assert that the 7613 // register size is a power-of-2? 7614 unsigned StartIdx = 0; 7615 for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) { 7616 for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) { 7617 ArrayRef<Value *> Slice = makeArrayRef(Operands).slice(Cnt, Size); 7618 if (!VectorizedStores.count(Slice.front()) && 7619 !VectorizedStores.count(Slice.back()) && 7620 vectorizeStoreChain(Slice, R, Cnt)) { 7621 // Mark the vectorized stores so that we don't vectorize them again. 7622 VectorizedStores.insert(Slice.begin(), Slice.end()); 7623 Changed = true; 7624 // If we vectorized initial block, no need to try to vectorize it 7625 // again. 7626 if (Cnt == StartIdx) 7627 StartIdx += Size; 7628 Cnt += Size; 7629 continue; 7630 } 7631 ++Cnt; 7632 } 7633 // Check if the whole array was vectorized already - exit. 7634 if (StartIdx >= Operands.size()) 7635 break; 7636 } 7637 } 7638 7639 return Changed; 7640 } 7641 7642 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 7643 // Initialize the collections. We will make a single pass over the block. 7644 Stores.clear(); 7645 GEPs.clear(); 7646 7647 // Visit the store and getelementptr instructions in BB and organize them in 7648 // Stores and GEPs according to the underlying objects of their pointer 7649 // operands. 7650 for (Instruction &I : *BB) { 7651 // Ignore store instructions that are volatile or have a pointer operand 7652 // that doesn't point to a scalar type. 7653 if (auto *SI = dyn_cast<StoreInst>(&I)) { 7654 if (!SI->isSimple()) 7655 continue; 7656 if (!isValidElementType(SI->getValueOperand()->getType())) 7657 continue; 7658 Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI); 7659 } 7660 7661 // Ignore getelementptr instructions that have more than one index, a 7662 // constant index, or a pointer operand that doesn't point to a scalar 7663 // type. 7664 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 7665 auto Idx = GEP->idx_begin()->get(); 7666 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) 7667 continue; 7668 if (!isValidElementType(Idx->getType())) 7669 continue; 7670 if (GEP->getType()->isVectorTy()) 7671 continue; 7672 GEPs[GEP->getPointerOperand()].push_back(GEP); 7673 } 7674 } 7675 } 7676 7677 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 7678 if (!A || !B) 7679 return false; 7680 Value *VL[] = {A, B}; 7681 return tryToVectorizeList(VL, R); 7682 } 7683 7684 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 7685 bool LimitForRegisterSize) { 7686 if (VL.size() < 2) 7687 return false; 7688 7689 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " 7690 << VL.size() << ".\n"); 7691 7692 // Check that all of the parts are instructions of the same type, 7693 // we permit an alternate opcode via InstructionsState. 7694 InstructionsState S = getSameOpcode(VL); 7695 if (!S.getOpcode()) 7696 return false; 7697 7698 Instruction *I0 = cast<Instruction>(S.OpValue); 7699 // Make sure invalid types (including vector type) are rejected before 7700 // determining vectorization factor for scalar instructions. 7701 for (Value *V : VL) { 7702 Type *Ty = V->getType(); 7703 if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) { 7704 // NOTE: the following will give user internal llvm type name, which may 7705 // not be useful. 7706 R.getORE()->emit([&]() { 7707 std::string type_str; 7708 llvm::raw_string_ostream rso(type_str); 7709 Ty->print(rso); 7710 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0) 7711 << "Cannot SLP vectorize list: type " 7712 << rso.str() + " is unsupported by vectorizer"; 7713 }); 7714 return false; 7715 } 7716 } 7717 7718 unsigned Sz = R.getVectorElementSize(I0); 7719 unsigned MinVF = R.getMinVF(Sz); 7720 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF); 7721 MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF); 7722 if (MaxVF < 2) { 7723 R.getORE()->emit([&]() { 7724 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0) 7725 << "Cannot SLP vectorize list: vectorization factor " 7726 << "less than 2 is not supported"; 7727 }); 7728 return false; 7729 } 7730 7731 bool Changed = false; 7732 bool CandidateFound = false; 7733 InstructionCost MinCost = SLPCostThreshold.getValue(); 7734 Type *ScalarTy = VL[0]->getType(); 7735 if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 7736 ScalarTy = IE->getOperand(1)->getType(); 7737 7738 unsigned NextInst = 0, MaxInst = VL.size(); 7739 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) { 7740 // No actual vectorization should happen, if number of parts is the same as 7741 // provided vectorization factor (i.e. the scalar type is used for vector 7742 // code during codegen). 7743 auto *VecTy = FixedVectorType::get(ScalarTy, VF); 7744 if (TTI->getNumberOfParts(VecTy) == VF) 7745 continue; 7746 for (unsigned I = NextInst; I < MaxInst; ++I) { 7747 unsigned OpsWidth = 0; 7748 7749 if (I + VF > MaxInst) 7750 OpsWidth = MaxInst - I; 7751 else 7752 OpsWidth = VF; 7753 7754 if (!isPowerOf2_32(OpsWidth)) 7755 continue; 7756 7757 if ((LimitForRegisterSize && OpsWidth < MaxVF) || 7758 (VF > MinVF && OpsWidth <= VF / 2) || (VF == MinVF && OpsWidth < 2)) 7759 break; 7760 7761 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth); 7762 // Check that a previous iteration of this loop did not delete the Value. 7763 if (llvm::any_of(Ops, [&R](Value *V) { 7764 auto *I = dyn_cast<Instruction>(V); 7765 return I && R.isDeleted(I); 7766 })) 7767 continue; 7768 7769 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 7770 << "\n"); 7771 7772 R.buildTree(Ops); 7773 if (R.isTreeTinyAndNotFullyVectorizable()) 7774 continue; 7775 R.reorderTopToBottom(); 7776 R.reorderBottomToTop(); 7777 R.buildExternalUses(); 7778 7779 R.computeMinimumValueSizes(); 7780 InstructionCost Cost = R.getTreeCost(); 7781 CandidateFound = true; 7782 MinCost = std::min(MinCost, Cost); 7783 7784 if (Cost < -SLPCostThreshold) { 7785 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 7786 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 7787 cast<Instruction>(Ops[0])) 7788 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 7789 << " and with tree size " 7790 << ore::NV("TreeSize", R.getTreeSize())); 7791 7792 R.vectorizeTree(); 7793 // Move to the next bundle. 7794 I += VF - 1; 7795 NextInst = I + 1; 7796 Changed = true; 7797 } 7798 } 7799 } 7800 7801 if (!Changed && CandidateFound) { 7802 R.getORE()->emit([&]() { 7803 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0) 7804 << "List vectorization was possible but not beneficial with cost " 7805 << ore::NV("Cost", MinCost) << " >= " 7806 << ore::NV("Treshold", -SLPCostThreshold); 7807 }); 7808 } else if (!Changed) { 7809 R.getORE()->emit([&]() { 7810 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0) 7811 << "Cannot SLP vectorize list: vectorization was impossible" 7812 << " with available vectorization factors"; 7813 }); 7814 } 7815 return Changed; 7816 } 7817 7818 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 7819 if (!I) 7820 return false; 7821 7822 if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I)) 7823 return false; 7824 7825 Value *P = I->getParent(); 7826 7827 // Vectorize in current basic block only. 7828 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 7829 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 7830 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 7831 return false; 7832 7833 // Try to vectorize V. 7834 if (tryToVectorizePair(Op0, Op1, R)) 7835 return true; 7836 7837 auto *A = dyn_cast<BinaryOperator>(Op0); 7838 auto *B = dyn_cast<BinaryOperator>(Op1); 7839 // Try to skip B. 7840 if (B && B->hasOneUse()) { 7841 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 7842 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 7843 if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R)) 7844 return true; 7845 if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R)) 7846 return true; 7847 } 7848 7849 // Try to skip A. 7850 if (A && A->hasOneUse()) { 7851 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 7852 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 7853 if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R)) 7854 return true; 7855 if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R)) 7856 return true; 7857 } 7858 return false; 7859 } 7860 7861 namespace { 7862 7863 /// Model horizontal reductions. 7864 /// 7865 /// A horizontal reduction is a tree of reduction instructions that has values 7866 /// that can be put into a vector as its leaves. For example: 7867 /// 7868 /// mul mul mul mul 7869 /// \ / \ / 7870 /// + + 7871 /// \ / 7872 /// + 7873 /// This tree has "mul" as its leaf values and "+" as its reduction 7874 /// instructions. A reduction can feed into a store or a binary operation 7875 /// feeding a phi. 7876 /// ... 7877 /// \ / 7878 /// + 7879 /// | 7880 /// phi += 7881 /// 7882 /// Or: 7883 /// ... 7884 /// \ / 7885 /// + 7886 /// | 7887 /// *p = 7888 /// 7889 class HorizontalReduction { 7890 using ReductionOpsType = SmallVector<Value *, 16>; 7891 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; 7892 ReductionOpsListType ReductionOps; 7893 SmallVector<Value *, 32> ReducedVals; 7894 // Use map vector to make stable output. 7895 MapVector<Instruction *, Value *> ExtraArgs; 7896 WeakTrackingVH ReductionRoot; 7897 /// The type of reduction operation. 7898 RecurKind RdxKind; 7899 7900 const unsigned INVALID_OPERAND_INDEX = std::numeric_limits<unsigned>::max(); 7901 7902 static bool isCmpSelMinMax(Instruction *I) { 7903 return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) && 7904 RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I)); 7905 } 7906 7907 // And/or are potentially poison-safe logical patterns like: 7908 // select x, y, false 7909 // select x, true, y 7910 static bool isBoolLogicOp(Instruction *I) { 7911 return match(I, m_LogicalAnd(m_Value(), m_Value())) || 7912 match(I, m_LogicalOr(m_Value(), m_Value())); 7913 } 7914 7915 /// Checks if instruction is associative and can be vectorized. 7916 static bool isVectorizable(RecurKind Kind, Instruction *I) { 7917 if (Kind == RecurKind::None) 7918 return false; 7919 7920 // Integer ops that map to select instructions or intrinsics are fine. 7921 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) || 7922 isBoolLogicOp(I)) 7923 return true; 7924 7925 if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) { 7926 // FP min/max are associative except for NaN and -0.0. We do not 7927 // have to rule out -0.0 here because the intrinsic semantics do not 7928 // specify a fixed result for it. 7929 return I->getFastMathFlags().noNaNs(); 7930 } 7931 7932 return I->isAssociative(); 7933 } 7934 7935 static Value *getRdxOperand(Instruction *I, unsigned Index) { 7936 // Poison-safe 'or' takes the form: select X, true, Y 7937 // To make that work with the normal operand processing, we skip the 7938 // true value operand. 7939 // TODO: Change the code and data structures to handle this without a hack. 7940 if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1) 7941 return I->getOperand(2); 7942 return I->getOperand(Index); 7943 } 7944 7945 /// Checks if the ParentStackElem.first should be marked as a reduction 7946 /// operation with an extra argument or as extra argument itself. 7947 void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem, 7948 Value *ExtraArg) { 7949 if (ExtraArgs.count(ParentStackElem.first)) { 7950 ExtraArgs[ParentStackElem.first] = nullptr; 7951 // We ran into something like: 7952 // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg. 7953 // The whole ParentStackElem.first should be considered as an extra value 7954 // in this case. 7955 // Do not perform analysis of remaining operands of ParentStackElem.first 7956 // instruction, this whole instruction is an extra argument. 7957 ParentStackElem.second = INVALID_OPERAND_INDEX; 7958 } else { 7959 // We ran into something like: 7960 // ParentStackElem.first += ... + ExtraArg + ... 7961 ExtraArgs[ParentStackElem.first] = ExtraArg; 7962 } 7963 } 7964 7965 /// Creates reduction operation with the current opcode. 7966 static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS, 7967 Value *RHS, const Twine &Name, bool UseSelect) { 7968 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind); 7969 switch (Kind) { 7970 case RecurKind::Add: 7971 case RecurKind::Mul: 7972 case RecurKind::Or: 7973 case RecurKind::And: 7974 case RecurKind::Xor: 7975 case RecurKind::FAdd: 7976 case RecurKind::FMul: 7977 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 7978 Name); 7979 case RecurKind::FMax: 7980 return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS); 7981 case RecurKind::FMin: 7982 return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS); 7983 case RecurKind::SMax: 7984 if (UseSelect) { 7985 Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name); 7986 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 7987 } 7988 return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS); 7989 case RecurKind::SMin: 7990 if (UseSelect) { 7991 Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name); 7992 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 7993 } 7994 return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS); 7995 case RecurKind::UMax: 7996 if (UseSelect) { 7997 Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name); 7998 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 7999 } 8000 return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS); 8001 case RecurKind::UMin: 8002 if (UseSelect) { 8003 Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name); 8004 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 8005 } 8006 return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS); 8007 default: 8008 llvm_unreachable("Unknown reduction operation."); 8009 } 8010 } 8011 8012 /// Creates reduction operation with the current opcode with the IR flags 8013 /// from \p ReductionOps. 8014 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 8015 Value *RHS, const Twine &Name, 8016 const ReductionOpsListType &ReductionOps) { 8017 bool UseSelect = ReductionOps.size() == 2; 8018 assert((!UseSelect || isa<SelectInst>(ReductionOps[1][0])) && 8019 "Expected cmp + select pairs for reduction"); 8020 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect); 8021 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 8022 if (auto *Sel = dyn_cast<SelectInst>(Op)) { 8023 propagateIRFlags(Sel->getCondition(), ReductionOps[0]); 8024 propagateIRFlags(Op, ReductionOps[1]); 8025 return Op; 8026 } 8027 } 8028 propagateIRFlags(Op, ReductionOps[0]); 8029 return Op; 8030 } 8031 8032 /// Creates reduction operation with the current opcode with the IR flags 8033 /// from \p I. 8034 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 8035 Value *RHS, const Twine &Name, Instruction *I) { 8036 auto *SelI = dyn_cast<SelectInst>(I); 8037 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, SelI != nullptr); 8038 if (SelI && RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 8039 if (auto *Sel = dyn_cast<SelectInst>(Op)) 8040 propagateIRFlags(Sel->getCondition(), SelI->getCondition()); 8041 } 8042 propagateIRFlags(Op, I); 8043 return Op; 8044 } 8045 8046 static RecurKind getRdxKind(Instruction *I) { 8047 assert(I && "Expected instruction for reduction matching"); 8048 TargetTransformInfo::ReductionFlags RdxFlags; 8049 if (match(I, m_Add(m_Value(), m_Value()))) 8050 return RecurKind::Add; 8051 if (match(I, m_Mul(m_Value(), m_Value()))) 8052 return RecurKind::Mul; 8053 if (match(I, m_And(m_Value(), m_Value())) || 8054 match(I, m_LogicalAnd(m_Value(), m_Value()))) 8055 return RecurKind::And; 8056 if (match(I, m_Or(m_Value(), m_Value())) || 8057 match(I, m_LogicalOr(m_Value(), m_Value()))) 8058 return RecurKind::Or; 8059 if (match(I, m_Xor(m_Value(), m_Value()))) 8060 return RecurKind::Xor; 8061 if (match(I, m_FAdd(m_Value(), m_Value()))) 8062 return RecurKind::FAdd; 8063 if (match(I, m_FMul(m_Value(), m_Value()))) 8064 return RecurKind::FMul; 8065 8066 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value()))) 8067 return RecurKind::FMax; 8068 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value()))) 8069 return RecurKind::FMin; 8070 8071 // This matches either cmp+select or intrinsics. SLP is expected to handle 8072 // either form. 8073 // TODO: If we are canonicalizing to intrinsics, we can remove several 8074 // special-case paths that deal with selects. 8075 if (match(I, m_SMax(m_Value(), m_Value()))) 8076 return RecurKind::SMax; 8077 if (match(I, m_SMin(m_Value(), m_Value()))) 8078 return RecurKind::SMin; 8079 if (match(I, m_UMax(m_Value(), m_Value()))) 8080 return RecurKind::UMax; 8081 if (match(I, m_UMin(m_Value(), m_Value()))) 8082 return RecurKind::UMin; 8083 8084 if (auto *Select = dyn_cast<SelectInst>(I)) { 8085 // Try harder: look for min/max pattern based on instructions producing 8086 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2). 8087 // During the intermediate stages of SLP, it's very common to have 8088 // pattern like this (since optimizeGatherSequence is run only once 8089 // at the end): 8090 // %1 = extractelement <2 x i32> %a, i32 0 8091 // %2 = extractelement <2 x i32> %a, i32 1 8092 // %cond = icmp sgt i32 %1, %2 8093 // %3 = extractelement <2 x i32> %a, i32 0 8094 // %4 = extractelement <2 x i32> %a, i32 1 8095 // %select = select i1 %cond, i32 %3, i32 %4 8096 CmpInst::Predicate Pred; 8097 Instruction *L1; 8098 Instruction *L2; 8099 8100 Value *LHS = Select->getTrueValue(); 8101 Value *RHS = Select->getFalseValue(); 8102 Value *Cond = Select->getCondition(); 8103 8104 // TODO: Support inverse predicates. 8105 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) { 8106 if (!isa<ExtractElementInst>(RHS) || 8107 !L2->isIdenticalTo(cast<Instruction>(RHS))) 8108 return RecurKind::None; 8109 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) { 8110 if (!isa<ExtractElementInst>(LHS) || 8111 !L1->isIdenticalTo(cast<Instruction>(LHS))) 8112 return RecurKind::None; 8113 } else { 8114 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS)) 8115 return RecurKind::None; 8116 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) || 8117 !L1->isIdenticalTo(cast<Instruction>(LHS)) || 8118 !L2->isIdenticalTo(cast<Instruction>(RHS))) 8119 return RecurKind::None; 8120 } 8121 8122 TargetTransformInfo::ReductionFlags RdxFlags; 8123 switch (Pred) { 8124 default: 8125 return RecurKind::None; 8126 case CmpInst::ICMP_SGT: 8127 case CmpInst::ICMP_SGE: 8128 return RecurKind::SMax; 8129 case CmpInst::ICMP_SLT: 8130 case CmpInst::ICMP_SLE: 8131 return RecurKind::SMin; 8132 case CmpInst::ICMP_UGT: 8133 case CmpInst::ICMP_UGE: 8134 return RecurKind::UMax; 8135 case CmpInst::ICMP_ULT: 8136 case CmpInst::ICMP_ULE: 8137 return RecurKind::UMin; 8138 } 8139 } 8140 return RecurKind::None; 8141 } 8142 8143 /// Get the index of the first operand. 8144 static unsigned getFirstOperandIndex(Instruction *I) { 8145 return isCmpSelMinMax(I) ? 1 : 0; 8146 } 8147 8148 /// Total number of operands in the reduction operation. 8149 static unsigned getNumberOfOperands(Instruction *I) { 8150 return isCmpSelMinMax(I) ? 3 : 2; 8151 } 8152 8153 /// Checks if the instruction is in basic block \p BB. 8154 /// For a cmp+sel min/max reduction check that both ops are in \p BB. 8155 static bool hasSameParent(Instruction *I, BasicBlock *BB) { 8156 if (isCmpSelMinMax(I)) { 8157 auto *Sel = cast<SelectInst>(I); 8158 auto *Cmp = cast<Instruction>(Sel->getCondition()); 8159 return Sel->getParent() == BB && Cmp->getParent() == BB; 8160 } 8161 return I->getParent() == BB; 8162 } 8163 8164 /// Expected number of uses for reduction operations/reduced values. 8165 static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) { 8166 if (IsCmpSelMinMax) { 8167 // SelectInst must be used twice while the condition op must have single 8168 // use only. 8169 if (auto *Sel = dyn_cast<SelectInst>(I)) 8170 return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse(); 8171 return I->hasNUses(2); 8172 } 8173 8174 // Arithmetic reduction operation must be used once only. 8175 return I->hasOneUse(); 8176 } 8177 8178 /// Initializes the list of reduction operations. 8179 void initReductionOps(Instruction *I) { 8180 if (isCmpSelMinMax(I)) 8181 ReductionOps.assign(2, ReductionOpsType()); 8182 else 8183 ReductionOps.assign(1, ReductionOpsType()); 8184 } 8185 8186 /// Add all reduction operations for the reduction instruction \p I. 8187 void addReductionOps(Instruction *I) { 8188 if (isCmpSelMinMax(I)) { 8189 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition()); 8190 ReductionOps[1].emplace_back(I); 8191 } else { 8192 ReductionOps[0].emplace_back(I); 8193 } 8194 } 8195 8196 static Value *getLHS(RecurKind Kind, Instruction *I) { 8197 if (Kind == RecurKind::None) 8198 return nullptr; 8199 return I->getOperand(getFirstOperandIndex(I)); 8200 } 8201 static Value *getRHS(RecurKind Kind, Instruction *I) { 8202 if (Kind == RecurKind::None) 8203 return nullptr; 8204 return I->getOperand(getFirstOperandIndex(I) + 1); 8205 } 8206 8207 public: 8208 HorizontalReduction() = default; 8209 8210 /// Try to find a reduction tree. 8211 bool matchAssociativeReduction(PHINode *Phi, Instruction *Inst) { 8212 assert((!Phi || is_contained(Phi->operands(), Inst)) && 8213 "Phi needs to use the binary operator"); 8214 assert((isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) || 8215 isa<IntrinsicInst>(Inst)) && 8216 "Expected binop, select, or intrinsic for reduction matching"); 8217 RdxKind = getRdxKind(Inst); 8218 8219 // We could have a initial reductions that is not an add. 8220 // r *= v1 + v2 + v3 + v4 8221 // In such a case start looking for a tree rooted in the first '+'. 8222 if (Phi) { 8223 if (getLHS(RdxKind, Inst) == Phi) { 8224 Phi = nullptr; 8225 Inst = dyn_cast<Instruction>(getRHS(RdxKind, Inst)); 8226 if (!Inst) 8227 return false; 8228 RdxKind = getRdxKind(Inst); 8229 } else if (getRHS(RdxKind, Inst) == Phi) { 8230 Phi = nullptr; 8231 Inst = dyn_cast<Instruction>(getLHS(RdxKind, Inst)); 8232 if (!Inst) 8233 return false; 8234 RdxKind = getRdxKind(Inst); 8235 } 8236 } 8237 8238 if (!isVectorizable(RdxKind, Inst)) 8239 return false; 8240 8241 // Analyze "regular" integer/FP types for reductions - no target-specific 8242 // types or pointers. 8243 Type *Ty = Inst->getType(); 8244 if (!isValidElementType(Ty) || Ty->isPointerTy()) 8245 return false; 8246 8247 // Though the ultimate reduction may have multiple uses, its condition must 8248 // have only single use. 8249 if (auto *Sel = dyn_cast<SelectInst>(Inst)) 8250 if (!Sel->getCondition()->hasOneUse()) 8251 return false; 8252 8253 ReductionRoot = Inst; 8254 8255 // The opcode for leaf values that we perform a reduction on. 8256 // For example: load(x) + load(y) + load(z) + fptoui(w) 8257 // The leaf opcode for 'w' does not match, so we don't include it as a 8258 // potential candidate for the reduction. 8259 unsigned LeafOpcode = 0; 8260 8261 // Post-order traverse the reduction tree starting at Inst. We only handle 8262 // true trees containing binary operators or selects. 8263 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; 8264 Stack.push_back(std::make_pair(Inst, getFirstOperandIndex(Inst))); 8265 initReductionOps(Inst); 8266 while (!Stack.empty()) { 8267 Instruction *TreeN = Stack.back().first; 8268 unsigned EdgeToVisit = Stack.back().second++; 8269 const RecurKind TreeRdxKind = getRdxKind(TreeN); 8270 bool IsReducedValue = TreeRdxKind != RdxKind; 8271 8272 // Postorder visit. 8273 if (IsReducedValue || EdgeToVisit >= getNumberOfOperands(TreeN)) { 8274 if (IsReducedValue) 8275 ReducedVals.push_back(TreeN); 8276 else { 8277 auto ExtraArgsIter = ExtraArgs.find(TreeN); 8278 if (ExtraArgsIter != ExtraArgs.end() && !ExtraArgsIter->second) { 8279 // Check if TreeN is an extra argument of its parent operation. 8280 if (Stack.size() <= 1) { 8281 // TreeN can't be an extra argument as it is a root reduction 8282 // operation. 8283 return false; 8284 } 8285 // Yes, TreeN is an extra argument, do not add it to a list of 8286 // reduction operations. 8287 // Stack[Stack.size() - 2] always points to the parent operation. 8288 markExtraArg(Stack[Stack.size() - 2], TreeN); 8289 ExtraArgs.erase(TreeN); 8290 } else 8291 addReductionOps(TreeN); 8292 } 8293 // Retract. 8294 Stack.pop_back(); 8295 continue; 8296 } 8297 8298 // Visit operands. 8299 Value *EdgeVal = getRdxOperand(TreeN, EdgeToVisit); 8300 auto *EdgeInst = dyn_cast<Instruction>(EdgeVal); 8301 if (!EdgeInst) { 8302 // Edge value is not a reduction instruction or a leaf instruction. 8303 // (It may be a constant, function argument, or something else.) 8304 markExtraArg(Stack.back(), EdgeVal); 8305 continue; 8306 } 8307 RecurKind EdgeRdxKind = getRdxKind(EdgeInst); 8308 // Continue analysis if the next operand is a reduction operation or 8309 // (possibly) a leaf value. If the leaf value opcode is not set, 8310 // the first met operation != reduction operation is considered as the 8311 // leaf opcode. 8312 // Only handle trees in the current basic block. 8313 // Each tree node needs to have minimal number of users except for the 8314 // ultimate reduction. 8315 const bool IsRdxInst = EdgeRdxKind == RdxKind; 8316 if (EdgeInst != Phi && EdgeInst != Inst && 8317 hasSameParent(EdgeInst, Inst->getParent()) && 8318 hasRequiredNumberOfUses(isCmpSelMinMax(Inst), EdgeInst) && 8319 (!LeafOpcode || LeafOpcode == EdgeInst->getOpcode() || IsRdxInst)) { 8320 if (IsRdxInst) { 8321 // We need to be able to reassociate the reduction operations. 8322 if (!isVectorizable(EdgeRdxKind, EdgeInst)) { 8323 // I is an extra argument for TreeN (its parent operation). 8324 markExtraArg(Stack.back(), EdgeInst); 8325 continue; 8326 } 8327 } else if (!LeafOpcode) { 8328 LeafOpcode = EdgeInst->getOpcode(); 8329 } 8330 Stack.push_back( 8331 std::make_pair(EdgeInst, getFirstOperandIndex(EdgeInst))); 8332 continue; 8333 } 8334 // I is an extra argument for TreeN (its parent operation). 8335 markExtraArg(Stack.back(), EdgeInst); 8336 } 8337 return true; 8338 } 8339 8340 /// Attempt to vectorize the tree found by matchAssociativeReduction. 8341 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 8342 // If there are a sufficient number of reduction values, reduce 8343 // to a nearby power-of-2. We can safely generate oversized 8344 // vectors and rely on the backend to split them to legal sizes. 8345 unsigned NumReducedVals = ReducedVals.size(); 8346 if (NumReducedVals < 4) 8347 return false; 8348 8349 // Intersect the fast-math-flags from all reduction operations. 8350 FastMathFlags RdxFMF; 8351 RdxFMF.set(); 8352 for (ReductionOpsType &RdxOp : ReductionOps) { 8353 for (Value *RdxVal : RdxOp) { 8354 if (auto *FPMO = dyn_cast<FPMathOperator>(RdxVal)) 8355 RdxFMF &= FPMO->getFastMathFlags(); 8356 } 8357 } 8358 8359 IRBuilder<> Builder(cast<Instruction>(ReductionRoot)); 8360 Builder.setFastMathFlags(RdxFMF); 8361 8362 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 8363 // The same extra argument may be used several times, so log each attempt 8364 // to use it. 8365 for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) { 8366 assert(Pair.first && "DebugLoc must be set."); 8367 ExternallyUsedValues[Pair.second].push_back(Pair.first); 8368 } 8369 8370 // The compare instruction of a min/max is the insertion point for new 8371 // instructions and may be replaced with a new compare instruction. 8372 auto getCmpForMinMaxReduction = [](Instruction *RdxRootInst) { 8373 assert(isa<SelectInst>(RdxRootInst) && 8374 "Expected min/max reduction to have select root instruction"); 8375 Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition(); 8376 assert(isa<Instruction>(ScalarCond) && 8377 "Expected min/max reduction to have compare condition"); 8378 return cast<Instruction>(ScalarCond); 8379 }; 8380 8381 // The reduction root is used as the insertion point for new instructions, 8382 // so set it as externally used to prevent it from being deleted. 8383 ExternallyUsedValues[ReductionRoot]; 8384 SmallVector<Value *, 16> IgnoreList; 8385 for (ReductionOpsType &RdxOp : ReductionOps) 8386 IgnoreList.append(RdxOp.begin(), RdxOp.end()); 8387 8388 unsigned ReduxWidth = PowerOf2Floor(NumReducedVals); 8389 if (NumReducedVals > ReduxWidth) { 8390 // In the loop below, we are building a tree based on a window of 8391 // 'ReduxWidth' values. 8392 // If the operands of those values have common traits (compare predicate, 8393 // constant operand, etc), then we want to group those together to 8394 // minimize the cost of the reduction. 8395 8396 // TODO: This should be extended to count common operands for 8397 // compares and binops. 8398 8399 // Step 1: Count the number of times each compare predicate occurs. 8400 SmallDenseMap<unsigned, unsigned> PredCountMap; 8401 for (Value *RdxVal : ReducedVals) { 8402 CmpInst::Predicate Pred; 8403 if (match(RdxVal, m_Cmp(Pred, m_Value(), m_Value()))) 8404 ++PredCountMap[Pred]; 8405 } 8406 // Step 2: Sort the values so the most common predicates come first. 8407 stable_sort(ReducedVals, [&PredCountMap](Value *A, Value *B) { 8408 CmpInst::Predicate PredA, PredB; 8409 if (match(A, m_Cmp(PredA, m_Value(), m_Value())) && 8410 match(B, m_Cmp(PredB, m_Value(), m_Value()))) { 8411 return PredCountMap[PredA] > PredCountMap[PredB]; 8412 } 8413 return false; 8414 }); 8415 } 8416 8417 Value *VectorizedTree = nullptr; 8418 unsigned i = 0; 8419 while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) { 8420 ArrayRef<Value *> VL(&ReducedVals[i], ReduxWidth); 8421 V.buildTree(VL, IgnoreList); 8422 if (V.isTreeTinyAndNotFullyVectorizable()) 8423 break; 8424 if (V.isLoadCombineReductionCandidate(RdxKind)) 8425 break; 8426 V.reorderTopToBottom(); 8427 V.reorderBottomToTop(); 8428 V.buildExternalUses(ExternallyUsedValues); 8429 8430 // For a poison-safe boolean logic reduction, do not replace select 8431 // instructions with logic ops. All reduced values will be frozen (see 8432 // below) to prevent leaking poison. 8433 if (isa<SelectInst>(ReductionRoot) && 8434 isBoolLogicOp(cast<Instruction>(ReductionRoot)) && 8435 NumReducedVals != ReduxWidth) 8436 break; 8437 8438 V.computeMinimumValueSizes(); 8439 8440 // Estimate cost. 8441 InstructionCost TreeCost = 8442 V.getTreeCost(makeArrayRef(&ReducedVals[i], ReduxWidth)); 8443 InstructionCost ReductionCost = 8444 getReductionCost(TTI, ReducedVals[i], ReduxWidth, RdxFMF); 8445 InstructionCost Cost = TreeCost + ReductionCost; 8446 if (!Cost.isValid()) { 8447 LLVM_DEBUG(dbgs() << "Encountered invalid baseline cost.\n"); 8448 return false; 8449 } 8450 if (Cost >= -SLPCostThreshold) { 8451 V.getORE()->emit([&]() { 8452 return OptimizationRemarkMissed(SV_NAME, "HorSLPNotBeneficial", 8453 cast<Instruction>(VL[0])) 8454 << "Vectorizing horizontal reduction is possible" 8455 << "but not beneficial with cost " << ore::NV("Cost", Cost) 8456 << " and threshold " 8457 << ore::NV("Threshold", -SLPCostThreshold); 8458 }); 8459 break; 8460 } 8461 8462 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" 8463 << Cost << ". (HorRdx)\n"); 8464 V.getORE()->emit([&]() { 8465 return OptimizationRemark(SV_NAME, "VectorizedHorizontalReduction", 8466 cast<Instruction>(VL[0])) 8467 << "Vectorized horizontal reduction with cost " 8468 << ore::NV("Cost", Cost) << " and with tree size " 8469 << ore::NV("TreeSize", V.getTreeSize()); 8470 }); 8471 8472 // Vectorize a tree. 8473 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 8474 Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues); 8475 8476 // Emit a reduction. If the root is a select (min/max idiom), the insert 8477 // point is the compare condition of that select. 8478 Instruction *RdxRootInst = cast<Instruction>(ReductionRoot); 8479 if (isCmpSelMinMax(RdxRootInst)) 8480 Builder.SetInsertPoint(getCmpForMinMaxReduction(RdxRootInst)); 8481 else 8482 Builder.SetInsertPoint(RdxRootInst); 8483 8484 // To prevent poison from leaking across what used to be sequential, safe, 8485 // scalar boolean logic operations, the reduction operand must be frozen. 8486 if (isa<SelectInst>(RdxRootInst) && isBoolLogicOp(RdxRootInst)) 8487 VectorizedRoot = Builder.CreateFreeze(VectorizedRoot); 8488 8489 Value *ReducedSubTree = 8490 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); 8491 8492 if (!VectorizedTree) { 8493 // Initialize the final value in the reduction. 8494 VectorizedTree = ReducedSubTree; 8495 } else { 8496 // Update the final value in the reduction. 8497 Builder.SetCurrentDebugLocation(Loc); 8498 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree, 8499 ReducedSubTree, "op.rdx", ReductionOps); 8500 } 8501 i += ReduxWidth; 8502 ReduxWidth = PowerOf2Floor(NumReducedVals - i); 8503 } 8504 8505 if (VectorizedTree) { 8506 // Finish the reduction. 8507 for (; i < NumReducedVals; ++i) { 8508 auto *I = cast<Instruction>(ReducedVals[i]); 8509 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 8510 VectorizedTree = 8511 createOp(Builder, RdxKind, VectorizedTree, I, "", ReductionOps); 8512 } 8513 for (auto &Pair : ExternallyUsedValues) { 8514 // Add each externally used value to the final reduction. 8515 for (auto *I : Pair.second) { 8516 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 8517 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree, 8518 Pair.first, "op.extra", I); 8519 } 8520 } 8521 8522 ReductionRoot->replaceAllUsesWith(VectorizedTree); 8523 8524 // Mark all scalar reduction ops for deletion, they are replaced by the 8525 // vector reductions. 8526 V.eraseInstructions(IgnoreList); 8527 } 8528 return VectorizedTree != nullptr; 8529 } 8530 8531 unsigned numReductionValues() const { return ReducedVals.size(); } 8532 8533 private: 8534 /// Calculate the cost of a reduction. 8535 InstructionCost getReductionCost(TargetTransformInfo *TTI, 8536 Value *FirstReducedVal, unsigned ReduxWidth, 8537 FastMathFlags FMF) { 8538 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 8539 Type *ScalarTy = FirstReducedVal->getType(); 8540 FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth); 8541 InstructionCost VectorCost, ScalarCost; 8542 switch (RdxKind) { 8543 case RecurKind::Add: 8544 case RecurKind::Mul: 8545 case RecurKind::Or: 8546 case RecurKind::And: 8547 case RecurKind::Xor: 8548 case RecurKind::FAdd: 8549 case RecurKind::FMul: { 8550 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind); 8551 VectorCost = 8552 TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF, CostKind); 8553 ScalarCost = TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy, CostKind); 8554 break; 8555 } 8556 case RecurKind::FMax: 8557 case RecurKind::FMin: { 8558 auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy)); 8559 VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy, 8560 /*unsigned=*/false, CostKind); 8561 ScalarCost = 8562 TTI->getCmpSelInstrCost(Instruction::FCmp, ScalarTy) + 8563 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 8564 CmpInst::makeCmpResultType(ScalarTy)); 8565 break; 8566 } 8567 case RecurKind::SMax: 8568 case RecurKind::SMin: 8569 case RecurKind::UMax: 8570 case RecurKind::UMin: { 8571 auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy)); 8572 bool IsUnsigned = 8573 RdxKind == RecurKind::UMax || RdxKind == RecurKind::UMin; 8574 VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy, IsUnsigned, 8575 CostKind); 8576 ScalarCost = 8577 TTI->getCmpSelInstrCost(Instruction::ICmp, ScalarTy) + 8578 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 8579 CmpInst::makeCmpResultType(ScalarTy)); 8580 break; 8581 } 8582 default: 8583 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 8584 } 8585 8586 // Scalar cost is repeated for N-1 elements. 8587 ScalarCost *= (ReduxWidth - 1); 8588 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost 8589 << " for reduction that starts with " << *FirstReducedVal 8590 << " (It is a splitting reduction)\n"); 8591 return VectorCost - ScalarCost; 8592 } 8593 8594 /// Emit a horizontal reduction of the vectorized value. 8595 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 8596 unsigned ReduxWidth, const TargetTransformInfo *TTI) { 8597 assert(VectorizedValue && "Need to have a vectorized tree node"); 8598 assert(isPowerOf2_32(ReduxWidth) && 8599 "We only handle power-of-two reductions for now"); 8600 8601 return createSimpleTargetReduction(Builder, TTI, VectorizedValue, RdxKind, 8602 ReductionOps.back()); 8603 } 8604 }; 8605 8606 } // end anonymous namespace 8607 8608 static Optional<unsigned> getAggregateSize(Instruction *InsertInst) { 8609 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) 8610 return cast<FixedVectorType>(IE->getType())->getNumElements(); 8611 8612 unsigned AggregateSize = 1; 8613 auto *IV = cast<InsertValueInst>(InsertInst); 8614 Type *CurrentType = IV->getType(); 8615 do { 8616 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 8617 for (auto *Elt : ST->elements()) 8618 if (Elt != ST->getElementType(0)) // check homogeneity 8619 return None; 8620 AggregateSize *= ST->getNumElements(); 8621 CurrentType = ST->getElementType(0); 8622 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 8623 AggregateSize *= AT->getNumElements(); 8624 CurrentType = AT->getElementType(); 8625 } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) { 8626 AggregateSize *= VT->getNumElements(); 8627 return AggregateSize; 8628 } else if (CurrentType->isSingleValueType()) { 8629 return AggregateSize; 8630 } else { 8631 return None; 8632 } 8633 } while (true); 8634 } 8635 8636 static bool findBuildAggregate_rec(Instruction *LastInsertInst, 8637 TargetTransformInfo *TTI, 8638 SmallVectorImpl<Value *> &BuildVectorOpds, 8639 SmallVectorImpl<Value *> &InsertElts, 8640 unsigned OperandOffset) { 8641 do { 8642 Value *InsertedOperand = LastInsertInst->getOperand(1); 8643 Optional<int> OperandIndex = getInsertIndex(LastInsertInst, OperandOffset); 8644 if (!OperandIndex) 8645 return false; 8646 if (isa<InsertElementInst>(InsertedOperand) || 8647 isa<InsertValueInst>(InsertedOperand)) { 8648 if (!findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI, 8649 BuildVectorOpds, InsertElts, *OperandIndex)) 8650 return false; 8651 } else { 8652 BuildVectorOpds[*OperandIndex] = InsertedOperand; 8653 InsertElts[*OperandIndex] = LastInsertInst; 8654 } 8655 LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0)); 8656 } while (LastInsertInst != nullptr && 8657 (isa<InsertValueInst>(LastInsertInst) || 8658 isa<InsertElementInst>(LastInsertInst)) && 8659 LastInsertInst->hasOneUse()); 8660 return true; 8661 } 8662 8663 /// Recognize construction of vectors like 8664 /// %ra = insertelement <4 x float> poison, float %s0, i32 0 8665 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 8666 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 8667 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 8668 /// starting from the last insertelement or insertvalue instruction. 8669 /// 8670 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>}, 8671 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on. 8672 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples. 8673 /// 8674 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type. 8675 /// 8676 /// \return true if it matches. 8677 static bool findBuildAggregate(Instruction *LastInsertInst, 8678 TargetTransformInfo *TTI, 8679 SmallVectorImpl<Value *> &BuildVectorOpds, 8680 SmallVectorImpl<Value *> &InsertElts) { 8681 8682 assert((isa<InsertElementInst>(LastInsertInst) || 8683 isa<InsertValueInst>(LastInsertInst)) && 8684 "Expected insertelement or insertvalue instruction!"); 8685 8686 assert((BuildVectorOpds.empty() && InsertElts.empty()) && 8687 "Expected empty result vectors!"); 8688 8689 Optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst); 8690 if (!AggregateSize) 8691 return false; 8692 BuildVectorOpds.resize(*AggregateSize); 8693 InsertElts.resize(*AggregateSize); 8694 8695 if (findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 8696 0)) { 8697 llvm::erase_value(BuildVectorOpds, nullptr); 8698 llvm::erase_value(InsertElts, nullptr); 8699 if (BuildVectorOpds.size() >= 2) 8700 return true; 8701 } 8702 8703 return false; 8704 } 8705 8706 /// Try and get a reduction value from a phi node. 8707 /// 8708 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 8709 /// if they come from either \p ParentBB or a containing loop latch. 8710 /// 8711 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 8712 /// if not possible. 8713 static Value *getReductionValue(const DominatorTree *DT, PHINode *P, 8714 BasicBlock *ParentBB, LoopInfo *LI) { 8715 // There are situations where the reduction value is not dominated by the 8716 // reduction phi. Vectorizing such cases has been reported to cause 8717 // miscompiles. See PR25787. 8718 auto DominatedReduxValue = [&](Value *R) { 8719 return isa<Instruction>(R) && 8720 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent()); 8721 }; 8722 8723 Value *Rdx = nullptr; 8724 8725 // Return the incoming value if it comes from the same BB as the phi node. 8726 if (P->getIncomingBlock(0) == ParentBB) { 8727 Rdx = P->getIncomingValue(0); 8728 } else if (P->getIncomingBlock(1) == ParentBB) { 8729 Rdx = P->getIncomingValue(1); 8730 } 8731 8732 if (Rdx && DominatedReduxValue(Rdx)) 8733 return Rdx; 8734 8735 // Otherwise, check whether we have a loop latch to look at. 8736 Loop *BBL = LI->getLoopFor(ParentBB); 8737 if (!BBL) 8738 return nullptr; 8739 BasicBlock *BBLatch = BBL->getLoopLatch(); 8740 if (!BBLatch) 8741 return nullptr; 8742 8743 // There is a loop latch, return the incoming value if it comes from 8744 // that. This reduction pattern occasionally turns up. 8745 if (P->getIncomingBlock(0) == BBLatch) { 8746 Rdx = P->getIncomingValue(0); 8747 } else if (P->getIncomingBlock(1) == BBLatch) { 8748 Rdx = P->getIncomingValue(1); 8749 } 8750 8751 if (Rdx && DominatedReduxValue(Rdx)) 8752 return Rdx; 8753 8754 return nullptr; 8755 } 8756 8757 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) { 8758 if (match(I, m_BinOp(m_Value(V0), m_Value(V1)))) 8759 return true; 8760 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1)))) 8761 return true; 8762 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1)))) 8763 return true; 8764 if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1)))) 8765 return true; 8766 if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1)))) 8767 return true; 8768 if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1)))) 8769 return true; 8770 if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1)))) 8771 return true; 8772 return false; 8773 } 8774 8775 /// Attempt to reduce a horizontal reduction. 8776 /// If it is legal to match a horizontal reduction feeding the phi node \a P 8777 /// with reduction operators \a Root (or one of its operands) in a basic block 8778 /// \a BB, then check if it can be done. If horizontal reduction is not found 8779 /// and root instruction is a binary operation, vectorization of the operands is 8780 /// attempted. 8781 /// \returns true if a horizontal reduction was matched and reduced or operands 8782 /// of one of the binary instruction were vectorized. 8783 /// \returns false if a horizontal reduction was not matched (or not possible) 8784 /// or no vectorization of any binary operation feeding \a Root instruction was 8785 /// performed. 8786 static bool tryToVectorizeHorReductionOrInstOperands( 8787 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, 8788 TargetTransformInfo *TTI, 8789 const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) { 8790 if (!ShouldVectorizeHor) 8791 return false; 8792 8793 if (!Root) 8794 return false; 8795 8796 if (Root->getParent() != BB || isa<PHINode>(Root)) 8797 return false; 8798 // Start analysis starting from Root instruction. If horizontal reduction is 8799 // found, try to vectorize it. If it is not a horizontal reduction or 8800 // vectorization is not possible or not effective, and currently analyzed 8801 // instruction is a binary operation, try to vectorize the operands, using 8802 // pre-order DFS traversal order. If the operands were not vectorized, repeat 8803 // the same procedure considering each operand as a possible root of the 8804 // horizontal reduction. 8805 // Interrupt the process if the Root instruction itself was vectorized or all 8806 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 8807 // Skip the analysis of CmpInsts.Compiler implements postanalysis of the 8808 // CmpInsts so we can skip extra attempts in 8809 // tryToVectorizeHorReductionOrInstOperands and save compile time. 8810 SmallVector<std::pair<Instruction *, unsigned>, 8> Stack(1, {Root, 0}); 8811 SmallPtrSet<Value *, 8> VisitedInstrs; 8812 bool Res = false; 8813 while (!Stack.empty()) { 8814 Instruction *Inst; 8815 unsigned Level; 8816 std::tie(Inst, Level) = Stack.pop_back_val(); 8817 // Do not try to analyze instruction that has already been vectorized. 8818 // This may happen when we vectorize instruction operands on a previous 8819 // iteration while stack was populated before that happened. 8820 if (R.isDeleted(Inst)) 8821 continue; 8822 Value *B0, *B1; 8823 bool IsBinop = matchRdxBop(Inst, B0, B1); 8824 bool IsSelect = match(Inst, m_Select(m_Value(), m_Value(), m_Value())); 8825 if (IsBinop || IsSelect) { 8826 HorizontalReduction HorRdx; 8827 if (HorRdx.matchAssociativeReduction(P, Inst)) { 8828 if (HorRdx.tryToReduce(R, TTI)) { 8829 Res = true; 8830 // Set P to nullptr to avoid re-analysis of phi node in 8831 // matchAssociativeReduction function unless this is the root node. 8832 P = nullptr; 8833 continue; 8834 } 8835 } 8836 if (P && IsBinop) { 8837 Inst = dyn_cast<Instruction>(B0); 8838 if (Inst == P) 8839 Inst = dyn_cast<Instruction>(B1); 8840 if (!Inst) { 8841 // Set P to nullptr to avoid re-analysis of phi node in 8842 // matchAssociativeReduction function unless this is the root node. 8843 P = nullptr; 8844 continue; 8845 } 8846 } 8847 } 8848 // Set P to nullptr to avoid re-analysis of phi node in 8849 // matchAssociativeReduction function unless this is the root node. 8850 P = nullptr; 8851 // Do not try to vectorize CmpInst operands, this is done separately. 8852 if (!isa<CmpInst>(Inst) && Vectorize(Inst, R)) { 8853 Res = true; 8854 continue; 8855 } 8856 8857 // Try to vectorize operands. 8858 // Continue analysis for the instruction from the same basic block only to 8859 // save compile time. 8860 if (++Level < RecursionMaxDepth) 8861 for (auto *Op : Inst->operand_values()) 8862 if (VisitedInstrs.insert(Op).second) 8863 if (auto *I = dyn_cast<Instruction>(Op)) 8864 // Do not try to vectorize CmpInst operands, this is done 8865 // separately. 8866 if (!isa<PHINode>(I) && !isa<CmpInst>(I) && !R.isDeleted(I) && 8867 I->getParent() == BB) 8868 Stack.emplace_back(I, Level); 8869 } 8870 return Res; 8871 } 8872 8873 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V, 8874 BasicBlock *BB, BoUpSLP &R, 8875 TargetTransformInfo *TTI) { 8876 auto *I = dyn_cast_or_null<Instruction>(V); 8877 if (!I) 8878 return false; 8879 8880 if (!isa<BinaryOperator>(I)) 8881 P = nullptr; 8882 // Try to match and vectorize a horizontal reduction. 8883 auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool { 8884 return tryToVectorize(I, R); 8885 }; 8886 return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI, 8887 ExtraVectorization); 8888 } 8889 8890 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 8891 BasicBlock *BB, BoUpSLP &R) { 8892 const DataLayout &DL = BB->getModule()->getDataLayout(); 8893 if (!R.canMapToVector(IVI->getType(), DL)) 8894 return false; 8895 8896 SmallVector<Value *, 16> BuildVectorOpds; 8897 SmallVector<Value *, 16> BuildVectorInsts; 8898 if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts)) 8899 return false; 8900 8901 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 8902 // Aggregate value is unlikely to be processed in vector register, we need to 8903 // extract scalars into scalar registers, so NeedExtraction is set true. 8904 return tryToVectorizeList(BuildVectorOpds, R); 8905 } 8906 8907 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 8908 BasicBlock *BB, BoUpSLP &R) { 8909 SmallVector<Value *, 16> BuildVectorInsts; 8910 SmallVector<Value *, 16> BuildVectorOpds; 8911 SmallVector<int> Mask; 8912 if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) || 8913 (llvm::all_of(BuildVectorOpds, 8914 [](Value *V) { return isa<ExtractElementInst>(V); }) && 8915 isShuffle(BuildVectorOpds, Mask))) 8916 return false; 8917 8918 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n"); 8919 return tryToVectorizeList(BuildVectorInsts, R); 8920 } 8921 8922 bool SLPVectorizerPass::vectorizeSimpleInstructions( 8923 SmallVectorImpl<Instruction *> &Instructions, BasicBlock *BB, BoUpSLP &R, 8924 bool AtTerminator) { 8925 bool OpsChanged = false; 8926 SmallVector<Instruction *, 4> PostponedCmps; 8927 for (auto *I : reverse(Instructions)) { 8928 if (R.isDeleted(I)) 8929 continue; 8930 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) 8931 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 8932 else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) 8933 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 8934 else if (isa<CmpInst>(I)) 8935 PostponedCmps.push_back(I); 8936 } 8937 if (AtTerminator) { 8938 // Try to find reductions first. 8939 for (Instruction *I : PostponedCmps) { 8940 if (R.isDeleted(I)) 8941 continue; 8942 for (Value *Op : I->operands()) 8943 OpsChanged |= vectorizeRootInstruction(nullptr, Op, BB, R, TTI); 8944 } 8945 // Try to vectorize operands as vector bundles. 8946 for (Instruction *I : PostponedCmps) { 8947 if (R.isDeleted(I)) 8948 continue; 8949 OpsChanged |= tryToVectorize(I, R); 8950 } 8951 Instructions.clear(); 8952 } else { 8953 // Insert in reverse order since the PostponedCmps vector was filled in 8954 // reverse order. 8955 Instructions.assign(PostponedCmps.rbegin(), PostponedCmps.rend()); 8956 } 8957 return OpsChanged; 8958 } 8959 8960 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 8961 bool Changed = false; 8962 SmallVector<Value *, 4> Incoming; 8963 SmallPtrSet<Value *, 16> VisitedInstrs; 8964 // Maps phi nodes to the non-phi nodes found in the use tree for each phi 8965 // node. Allows better to identify the chains that can be vectorized in the 8966 // better way. 8967 DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes; 8968 8969 bool HaveVectorizedPhiNodes = true; 8970 while (HaveVectorizedPhiNodes) { 8971 HaveVectorizedPhiNodes = false; 8972 8973 // Collect the incoming values from the PHIs. 8974 Incoming.clear(); 8975 for (Instruction &I : *BB) { 8976 PHINode *P = dyn_cast<PHINode>(&I); 8977 if (!P) 8978 break; 8979 8980 // No need to analyze deleted, vectorized and non-vectorizable 8981 // instructions. 8982 if (!VisitedInstrs.count(P) && !R.isDeleted(P) && 8983 isValidElementType(P->getType())) 8984 Incoming.push_back(P); 8985 } 8986 8987 // Find the corresponding non-phi nodes for better matching when trying to 8988 // build the tree. 8989 for (Value *V : Incoming) { 8990 SmallVectorImpl<Value *> &Opcodes = 8991 PHIToOpcodes.try_emplace(V).first->getSecond(); 8992 if (!Opcodes.empty()) 8993 continue; 8994 SmallVector<Value *, 4> Nodes(1, V); 8995 SmallPtrSet<Value *, 4> Visited; 8996 while (!Nodes.empty()) { 8997 auto *PHI = cast<PHINode>(Nodes.pop_back_val()); 8998 if (!Visited.insert(PHI).second) 8999 continue; 9000 for (Value *V : PHI->incoming_values()) { 9001 if (auto *PHI1 = dyn_cast<PHINode>((V))) { 9002 Nodes.push_back(PHI1); 9003 continue; 9004 } 9005 Opcodes.emplace_back(V); 9006 } 9007 } 9008 } 9009 9010 // Sort by type, parent, operands. 9011 stable_sort(Incoming, [this, &PHIToOpcodes](Value *V1, Value *V2) { 9012 assert(isValidElementType(V1->getType()) && 9013 isValidElementType(V2->getType()) && 9014 "Expected vectorizable types only."); 9015 // It is fine to compare type IDs here, since we expect only vectorizable 9016 // types, like ints, floats and pointers, we don't care about other type. 9017 if (V1->getType()->getTypeID() < V2->getType()->getTypeID()) 9018 return true; 9019 if (V1->getType()->getTypeID() > V2->getType()->getTypeID()) 9020 return false; 9021 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 9022 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 9023 if (Opcodes1.size() < Opcodes2.size()) 9024 return true; 9025 if (Opcodes1.size() > Opcodes2.size()) 9026 return false; 9027 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 9028 // Undefs are compatible with any other value. 9029 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) 9030 continue; 9031 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 9032 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 9033 DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent()); 9034 DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent()); 9035 if (!NodeI1) 9036 return NodeI2 != nullptr; 9037 if (!NodeI2) 9038 return false; 9039 assert((NodeI1 == NodeI2) == 9040 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 9041 "Different nodes should have different DFS numbers"); 9042 if (NodeI1 != NodeI2) 9043 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 9044 InstructionsState S = getSameOpcode({I1, I2}); 9045 if (S.getOpcode()) 9046 continue; 9047 return I1->getOpcode() < I2->getOpcode(); 9048 } 9049 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 9050 continue; 9051 if (Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID()) 9052 return true; 9053 if (Opcodes1[I]->getValueID() > Opcodes2[I]->getValueID()) 9054 return false; 9055 } 9056 return false; 9057 }); 9058 9059 auto &&AreCompatiblePHIs = [&PHIToOpcodes](Value *V1, Value *V2) { 9060 if (V1 == V2) 9061 return true; 9062 if (V1->getType() != V2->getType()) 9063 return false; 9064 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 9065 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 9066 if (Opcodes1.size() != Opcodes2.size()) 9067 return false; 9068 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 9069 // Undefs are compatible with any other value. 9070 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) 9071 continue; 9072 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 9073 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 9074 if (I1->getParent() != I2->getParent()) 9075 return false; 9076 InstructionsState S = getSameOpcode({I1, I2}); 9077 if (S.getOpcode()) 9078 continue; 9079 return false; 9080 } 9081 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 9082 continue; 9083 if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID()) 9084 return false; 9085 } 9086 return true; 9087 }; 9088 9089 // Try to vectorize elements base on their type. 9090 SmallVector<Value *, 4> Candidates; 9091 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 9092 E = Incoming.end(); 9093 IncIt != E;) { 9094 9095 // Look for the next elements with the same type, parent and operand 9096 // kinds. 9097 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 9098 while (SameTypeIt != E && AreCompatiblePHIs(*SameTypeIt, *IncIt)) { 9099 VisitedInstrs.insert(*SameTypeIt); 9100 ++SameTypeIt; 9101 } 9102 9103 // Try to vectorize them. 9104 unsigned NumElts = (SameTypeIt - IncIt); 9105 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at PHIs (" 9106 << NumElts << ")\n"); 9107 // The order in which the phi nodes appear in the program does not matter. 9108 // So allow tryToVectorizeList to reorder them if it is beneficial. This 9109 // is done when there are exactly two elements since tryToVectorizeList 9110 // asserts that there are only two values when AllowReorder is true. 9111 // The vectorization is a 3-state attempt: 9112 // 1. Try to vectorize PHIs with the same/alternate opcodes with the size 9113 // of maximal register at first. 9114 // 2. Try to vectorize remaining PHIs with the same type, if possible. 9115 // This may result in the better vectorization results rather than if we 9116 // try just to vectorize PHIs with the same/alternate opcodes. 9117 // 3. Final attempt to try to vectorize all PHIs with the same/alternate 9118 // ops only, this may result in some extra final vectorization. 9119 if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R, 9120 /*LimitForRegisterSize=*/true)) { 9121 // Success start over because instructions might have been changed. 9122 HaveVectorizedPhiNodes = true; 9123 Changed = true; 9124 } else if (NumElts * R.getVectorElementSize(*IncIt) < 9125 R.getMaxVecRegSize() && 9126 (Candidates.empty() || 9127 Candidates.front()->getType() == (*IncIt)->getType())) { 9128 Candidates.append(IncIt, std::next(IncIt, NumElts)); 9129 } 9130 // Final attempt to vectorize phis with the same types. 9131 if (Candidates.size() > 1 && 9132 (SameTypeIt == E || 9133 (*SameTypeIt)->getType() != (*IncIt)->getType())) { 9134 if (tryToVectorizeList(Candidates, R)) { 9135 // Success start over because instructions might have been changed. 9136 HaveVectorizedPhiNodes = true; 9137 Changed = true; 9138 } else { 9139 // Try to vectorize using small vectors. 9140 for (SmallVector<Value *, 4>::iterator It = Candidates.begin(), 9141 End = Candidates.end(); 9142 It != End;) { 9143 SmallVector<Value *, 4>::iterator SameTypeIt = It; 9144 while (SameTypeIt != End && AreCompatiblePHIs(*SameTypeIt, *It)) 9145 ++SameTypeIt; 9146 unsigned NumElts = (SameTypeIt - It); 9147 if (NumElts > 1 && 9148 tryToVectorizeList(makeArrayRef(It, NumElts), R)) { 9149 HaveVectorizedPhiNodes = true; 9150 Changed = true; 9151 } 9152 It = SameTypeIt; 9153 } 9154 } 9155 Candidates.clear(); 9156 } 9157 9158 // Start over at the next instruction of a different type (or the end). 9159 IncIt = SameTypeIt; 9160 } 9161 } 9162 9163 VisitedInstrs.clear(); 9164 9165 SmallVector<Instruction *, 8> PostProcessInstructions; 9166 SmallDenseSet<Instruction *, 4> KeyNodes; 9167 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 9168 // Skip instructions with scalable type. The num of elements is unknown at 9169 // compile-time for scalable type. 9170 if (isa<ScalableVectorType>(it->getType())) 9171 continue; 9172 9173 // Skip instructions marked for the deletion. 9174 if (R.isDeleted(&*it)) 9175 continue; 9176 // We may go through BB multiple times so skip the one we have checked. 9177 if (!VisitedInstrs.insert(&*it).second) { 9178 if (it->use_empty() && KeyNodes.contains(&*it) && 9179 vectorizeSimpleInstructions(PostProcessInstructions, BB, R, 9180 it->isTerminator())) { 9181 // We would like to start over since some instructions are deleted 9182 // and the iterator may become invalid value. 9183 Changed = true; 9184 it = BB->begin(); 9185 e = BB->end(); 9186 } 9187 continue; 9188 } 9189 9190 if (isa<DbgInfoIntrinsic>(it)) 9191 continue; 9192 9193 // Try to vectorize reductions that use PHINodes. 9194 if (PHINode *P = dyn_cast<PHINode>(it)) { 9195 // Check that the PHI is a reduction PHI. 9196 if (P->getNumIncomingValues() == 2) { 9197 // Try to match and vectorize a horizontal reduction. 9198 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R, 9199 TTI)) { 9200 Changed = true; 9201 it = BB->begin(); 9202 e = BB->end(); 9203 continue; 9204 } 9205 } 9206 // Try to vectorize the incoming values of the PHI, to catch reductions 9207 // that feed into PHIs. 9208 for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) { 9209 // Skip if the incoming block is the current BB for now. Also, bypass 9210 // unreachable IR for efficiency and to avoid crashing. 9211 // TODO: Collect the skipped incoming values and try to vectorize them 9212 // after processing BB. 9213 if (BB == P->getIncomingBlock(I) || 9214 !DT->isReachableFromEntry(P->getIncomingBlock(I))) 9215 continue; 9216 9217 Changed |= vectorizeRootInstruction(nullptr, P->getIncomingValue(I), 9218 P->getIncomingBlock(I), R, TTI); 9219 } 9220 continue; 9221 } 9222 9223 // Ran into an instruction without users, like terminator, or function call 9224 // with ignored return value, store. Ignore unused instructions (basing on 9225 // instruction type, except for CallInst and InvokeInst). 9226 if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) || 9227 isa<InvokeInst>(it))) { 9228 KeyNodes.insert(&*it); 9229 bool OpsChanged = false; 9230 if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) { 9231 for (auto *V : it->operand_values()) { 9232 // Try to match and vectorize a horizontal reduction. 9233 OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI); 9234 } 9235 } 9236 // Start vectorization of post-process list of instructions from the 9237 // top-tree instructions to try to vectorize as many instructions as 9238 // possible. 9239 OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R, 9240 it->isTerminator()); 9241 if (OpsChanged) { 9242 // We would like to start over since some instructions are deleted 9243 // and the iterator may become invalid value. 9244 Changed = true; 9245 it = BB->begin(); 9246 e = BB->end(); 9247 continue; 9248 } 9249 } 9250 9251 if (isa<InsertElementInst>(it) || isa<CmpInst>(it) || 9252 isa<InsertValueInst>(it)) 9253 PostProcessInstructions.push_back(&*it); 9254 } 9255 9256 return Changed; 9257 } 9258 9259 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 9260 auto Changed = false; 9261 for (auto &Entry : GEPs) { 9262 // If the getelementptr list has fewer than two elements, there's nothing 9263 // to do. 9264 if (Entry.second.size() < 2) 9265 continue; 9266 9267 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 9268 << Entry.second.size() << ".\n"); 9269 9270 // Process the GEP list in chunks suitable for the target's supported 9271 // vector size. If a vector register can't hold 1 element, we are done. We 9272 // are trying to vectorize the index computations, so the maximum number of 9273 // elements is based on the size of the index expression, rather than the 9274 // size of the GEP itself (the target's pointer size). 9275 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 9276 unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin()); 9277 if (MaxVecRegSize < EltSize) 9278 continue; 9279 9280 unsigned MaxElts = MaxVecRegSize / EltSize; 9281 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) { 9282 auto Len = std::min<unsigned>(BE - BI, MaxElts); 9283 ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len); 9284 9285 // Initialize a set a candidate getelementptrs. Note that we use a 9286 // SetVector here to preserve program order. If the index computations 9287 // are vectorizable and begin with loads, we want to minimize the chance 9288 // of having to reorder them later. 9289 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 9290 9291 // Some of the candidates may have already been vectorized after we 9292 // initially collected them. If so, they are marked as deleted, so remove 9293 // them from the set of candidates. 9294 Candidates.remove_if( 9295 [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); }); 9296 9297 // Remove from the set of candidates all pairs of getelementptrs with 9298 // constant differences. Such getelementptrs are likely not good 9299 // candidates for vectorization in a bottom-up phase since one can be 9300 // computed from the other. We also ensure all candidate getelementptr 9301 // indices are unique. 9302 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 9303 auto *GEPI = GEPList[I]; 9304 if (!Candidates.count(GEPI)) 9305 continue; 9306 auto *SCEVI = SE->getSCEV(GEPList[I]); 9307 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 9308 auto *GEPJ = GEPList[J]; 9309 auto *SCEVJ = SE->getSCEV(GEPList[J]); 9310 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 9311 Candidates.remove(GEPI); 9312 Candidates.remove(GEPJ); 9313 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 9314 Candidates.remove(GEPJ); 9315 } 9316 } 9317 } 9318 9319 // We break out of the above computation as soon as we know there are 9320 // fewer than two candidates remaining. 9321 if (Candidates.size() < 2) 9322 continue; 9323 9324 // Add the single, non-constant index of each candidate to the bundle. We 9325 // ensured the indices met these constraints when we originally collected 9326 // the getelementptrs. 9327 SmallVector<Value *, 16> Bundle(Candidates.size()); 9328 auto BundleIndex = 0u; 9329 for (auto *V : Candidates) { 9330 auto *GEP = cast<GetElementPtrInst>(V); 9331 auto *GEPIdx = GEP->idx_begin()->get(); 9332 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 9333 Bundle[BundleIndex++] = GEPIdx; 9334 } 9335 9336 // Try and vectorize the indices. We are currently only interested in 9337 // gather-like cases of the form: 9338 // 9339 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 9340 // 9341 // where the loads of "a", the loads of "b", and the subtractions can be 9342 // performed in parallel. It's likely that detecting this pattern in a 9343 // bottom-up phase will be simpler and less costly than building a 9344 // full-blown top-down phase beginning at the consecutive loads. 9345 Changed |= tryToVectorizeList(Bundle, R); 9346 } 9347 } 9348 return Changed; 9349 } 9350 9351 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 9352 bool Changed = false; 9353 // Sort by type, base pointers and values operand. Value operands must be 9354 // compatible (have the same opcode, same parent), otherwise it is 9355 // definitely not profitable to try to vectorize them. 9356 auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) { 9357 if (V->getPointerOperandType()->getTypeID() < 9358 V2->getPointerOperandType()->getTypeID()) 9359 return true; 9360 if (V->getPointerOperandType()->getTypeID() > 9361 V2->getPointerOperandType()->getTypeID()) 9362 return false; 9363 // UndefValues are compatible with all other values. 9364 if (isa<UndefValue>(V->getValueOperand()) || 9365 isa<UndefValue>(V2->getValueOperand())) 9366 return false; 9367 if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand())) 9368 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 9369 DomTreeNodeBase<llvm::BasicBlock> *NodeI1 = 9370 DT->getNode(I1->getParent()); 9371 DomTreeNodeBase<llvm::BasicBlock> *NodeI2 = 9372 DT->getNode(I2->getParent()); 9373 assert(NodeI1 && "Should only process reachable instructions"); 9374 assert(NodeI1 && "Should only process reachable instructions"); 9375 assert((NodeI1 == NodeI2) == 9376 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 9377 "Different nodes should have different DFS numbers"); 9378 if (NodeI1 != NodeI2) 9379 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 9380 InstructionsState S = getSameOpcode({I1, I2}); 9381 if (S.getOpcode()) 9382 return false; 9383 return I1->getOpcode() < I2->getOpcode(); 9384 } 9385 if (isa<Constant>(V->getValueOperand()) && 9386 isa<Constant>(V2->getValueOperand())) 9387 return false; 9388 return V->getValueOperand()->getValueID() < 9389 V2->getValueOperand()->getValueID(); 9390 }; 9391 9392 auto &&AreCompatibleStores = [](StoreInst *V1, StoreInst *V2) { 9393 if (V1 == V2) 9394 return true; 9395 if (V1->getPointerOperandType() != V2->getPointerOperandType()) 9396 return false; 9397 // Undefs are compatible with any other value. 9398 if (isa<UndefValue>(V1->getValueOperand()) || 9399 isa<UndefValue>(V2->getValueOperand())) 9400 return true; 9401 if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand())) 9402 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 9403 if (I1->getParent() != I2->getParent()) 9404 return false; 9405 InstructionsState S = getSameOpcode({I1, I2}); 9406 return S.getOpcode() > 0; 9407 } 9408 if (isa<Constant>(V1->getValueOperand()) && 9409 isa<Constant>(V2->getValueOperand())) 9410 return true; 9411 return V1->getValueOperand()->getValueID() == 9412 V2->getValueOperand()->getValueID(); 9413 }; 9414 9415 // Attempt to sort and vectorize each of the store-groups. 9416 for (auto &Pair : Stores) { 9417 if (Pair.second.size() < 2) 9418 continue; 9419 9420 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 9421 << Pair.second.size() << ".\n"); 9422 9423 stable_sort(Pair.second, StoreSorter); 9424 9425 // Try to vectorize elements based on their compatibility. 9426 for (ArrayRef<StoreInst *>::iterator IncIt = Pair.second.begin(), 9427 E = Pair.second.end(); 9428 IncIt != E;) { 9429 9430 // Look for the next elements with the same type. 9431 ArrayRef<StoreInst *>::iterator SameTypeIt = IncIt; 9432 Type *EltTy = (*IncIt)->getPointerOperand()->getType(); 9433 9434 while (SameTypeIt != E && AreCompatibleStores(*SameTypeIt, *IncIt)) 9435 ++SameTypeIt; 9436 9437 // Try to vectorize them. 9438 unsigned NumElts = (SameTypeIt - IncIt); 9439 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at stores (" 9440 << NumElts << ")\n"); 9441 if (NumElts > 1 && !EltTy->getPointerElementType()->isVectorTy() && 9442 vectorizeStores(makeArrayRef(IncIt, NumElts), R)) { 9443 // Success start over because instructions might have been changed. 9444 Changed = true; 9445 } 9446 9447 // Start over at the next instruction of a different type (or the end). 9448 IncIt = SameTypeIt; 9449 } 9450 } 9451 return Changed; 9452 } 9453 9454 char SLPVectorizer::ID = 0; 9455 9456 static const char lv_name[] = "SLP Vectorizer"; 9457 9458 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 9459 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 9460 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 9461 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 9462 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 9463 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 9464 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 9465 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 9466 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 9467 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 9468 9469 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); } 9470