1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 20 #include "llvm/ADT/DenseMap.h" 21 #include "llvm/ADT/DenseSet.h" 22 #include "llvm/ADT/Optional.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/ADT/SetVector.h" 26 #include "llvm/ADT/SmallBitVector.h" 27 #include "llvm/ADT/SmallPtrSet.h" 28 #include "llvm/ADT/SmallSet.h" 29 #include "llvm/ADT/SmallString.h" 30 #include "llvm/ADT/Statistic.h" 31 #include "llvm/ADT/iterator.h" 32 #include "llvm/ADT/iterator_range.h" 33 #include "llvm/Analysis/AliasAnalysis.h" 34 #include "llvm/Analysis/AssumptionCache.h" 35 #include "llvm/Analysis/CodeMetrics.h" 36 #include "llvm/Analysis/DemandedBits.h" 37 #include "llvm/Analysis/GlobalsModRef.h" 38 #include "llvm/Analysis/IVDescriptors.h" 39 #include "llvm/Analysis/LoopAccessAnalysis.h" 40 #include "llvm/Analysis/LoopInfo.h" 41 #include "llvm/Analysis/MemoryLocation.h" 42 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 43 #include "llvm/Analysis/ScalarEvolution.h" 44 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 45 #include "llvm/Analysis/TargetLibraryInfo.h" 46 #include "llvm/Analysis/TargetTransformInfo.h" 47 #include "llvm/Analysis/ValueTracking.h" 48 #include "llvm/Analysis/VectorUtils.h" 49 #include "llvm/IR/Attributes.h" 50 #include "llvm/IR/BasicBlock.h" 51 #include "llvm/IR/Constant.h" 52 #include "llvm/IR/Constants.h" 53 #include "llvm/IR/DataLayout.h" 54 #include "llvm/IR/DebugLoc.h" 55 #include "llvm/IR/DerivedTypes.h" 56 #include "llvm/IR/Dominators.h" 57 #include "llvm/IR/Function.h" 58 #include "llvm/IR/IRBuilder.h" 59 #include "llvm/IR/InstrTypes.h" 60 #include "llvm/IR/Instruction.h" 61 #include "llvm/IR/Instructions.h" 62 #include "llvm/IR/IntrinsicInst.h" 63 #include "llvm/IR/Intrinsics.h" 64 #include "llvm/IR/Module.h" 65 #include "llvm/IR/NoFolder.h" 66 #include "llvm/IR/Operator.h" 67 #include "llvm/IR/PatternMatch.h" 68 #include "llvm/IR/Type.h" 69 #include "llvm/IR/Use.h" 70 #include "llvm/IR/User.h" 71 #include "llvm/IR/Value.h" 72 #include "llvm/IR/ValueHandle.h" 73 #include "llvm/IR/Verifier.h" 74 #include "llvm/InitializePasses.h" 75 #include "llvm/Pass.h" 76 #include "llvm/Support/Casting.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Compiler.h" 79 #include "llvm/Support/DOTGraphTraits.h" 80 #include "llvm/Support/Debug.h" 81 #include "llvm/Support/ErrorHandling.h" 82 #include "llvm/Support/GraphWriter.h" 83 #include "llvm/Support/InstructionCost.h" 84 #include "llvm/Support/KnownBits.h" 85 #include "llvm/Support/MathExtras.h" 86 #include "llvm/Support/raw_ostream.h" 87 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 88 #include "llvm/Transforms/Utils/LoopUtils.h" 89 #include "llvm/Transforms/Vectorize.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <memory> 95 #include <set> 96 #include <string> 97 #include <tuple> 98 #include <utility> 99 #include <vector> 100 101 using namespace llvm; 102 using namespace llvm::PatternMatch; 103 using namespace slpvectorizer; 104 105 #define SV_NAME "slp-vectorizer" 106 #define DEBUG_TYPE "SLP" 107 108 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 109 110 cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden, 111 cl::desc("Run the SLP vectorization passes")); 112 113 static cl::opt<int> 114 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 115 cl::desc("Only vectorize if you gain more than this " 116 "number ")); 117 118 static cl::opt<bool> 119 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 120 cl::desc("Attempt to vectorize horizontal reductions")); 121 122 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 123 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 124 cl::desc( 125 "Attempt to vectorize horizontal reductions feeding into a store")); 126 127 static cl::opt<int> 128 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 129 cl::desc("Attempt to vectorize for this register size in bits")); 130 131 static cl::opt<unsigned> 132 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden, 133 cl::desc("Maximum SLP vectorization factor (0=unlimited)")); 134 135 static cl::opt<int> 136 MaxStoreLookup("slp-max-store-lookup", cl::init(32), cl::Hidden, 137 cl::desc("Maximum depth of the lookup for consecutive stores.")); 138 139 /// Limits the size of scheduling regions in a block. 140 /// It avoid long compile times for _very_ large blocks where vector 141 /// instructions are spread over a wide range. 142 /// This limit is way higher than needed by real-world functions. 143 static cl::opt<int> 144 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 145 cl::desc("Limit the size of the SLP scheduling region per block")); 146 147 static cl::opt<int> MinVectorRegSizeOption( 148 "slp-min-reg-size", cl::init(128), cl::Hidden, 149 cl::desc("Attempt to vectorize for this register size in bits")); 150 151 static cl::opt<unsigned> RecursionMaxDepth( 152 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 153 cl::desc("Limit the recursion depth when building a vectorizable tree")); 154 155 static cl::opt<unsigned> MinTreeSize( 156 "slp-min-tree-size", cl::init(3), cl::Hidden, 157 cl::desc("Only vectorize small trees if they are fully vectorizable")); 158 159 // The maximum depth that the look-ahead score heuristic will explore. 160 // The higher this value, the higher the compilation time overhead. 161 static cl::opt<int> LookAheadMaxDepth( 162 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden, 163 cl::desc("The maximum look-ahead depth for operand reordering scores")); 164 165 // The Look-ahead heuristic goes through the users of the bundle to calculate 166 // the users cost in getExternalUsesCost(). To avoid compilation time increase 167 // we limit the number of users visited to this value. 168 static cl::opt<unsigned> LookAheadUsersBudget( 169 "slp-look-ahead-users-budget", cl::init(2), cl::Hidden, 170 cl::desc("The maximum number of users to visit while visiting the " 171 "predecessors. This prevents compilation time increase.")); 172 173 static cl::opt<bool> 174 ViewSLPTree("view-slp-tree", cl::Hidden, 175 cl::desc("Display the SLP trees with Graphviz")); 176 177 // Limit the number of alias checks. The limit is chosen so that 178 // it has no negative effect on the llvm benchmarks. 179 static const unsigned AliasedCheckLimit = 10; 180 181 // Another limit for the alias checks: The maximum distance between load/store 182 // instructions where alias checks are done. 183 // This limit is useful for very large basic blocks. 184 static const unsigned MaxMemDepDistance = 160; 185 186 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 187 /// regions to be handled. 188 static const int MinScheduleRegionSize = 16; 189 190 /// Predicate for the element types that the SLP vectorizer supports. 191 /// 192 /// The most important thing to filter here are types which are invalid in LLVM 193 /// vectors. We also filter target specific types which have absolutely no 194 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 195 /// avoids spending time checking the cost model and realizing that they will 196 /// be inevitably scalarized. 197 static bool isValidElementType(Type *Ty) { 198 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 199 !Ty->isPPC_FP128Ty(); 200 } 201 202 /// \returns true if all of the instructions in \p VL are in the same block or 203 /// false otherwise. 204 static bool allSameBlock(ArrayRef<Value *> VL) { 205 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 206 if (!I0) 207 return false; 208 BasicBlock *BB = I0->getParent(); 209 for (int I = 1, E = VL.size(); I < E; I++) { 210 auto *II = dyn_cast<Instruction>(VL[I]); 211 if (!II) 212 return false; 213 214 if (BB != II->getParent()) 215 return false; 216 } 217 return true; 218 } 219 220 /// \returns True if all of the values in \p VL are constants (but not 221 /// globals/constant expressions). 222 static bool allConstant(ArrayRef<Value *> VL) { 223 // Constant expressions and globals can't be vectorized like normal integer/FP 224 // constants. 225 for (Value *i : VL) 226 if (!isa<Constant>(i) || isa<ConstantExpr>(i) || isa<GlobalValue>(i)) 227 return false; 228 return true; 229 } 230 231 /// \returns True if all of the values in \p VL are identical. 232 static bool isSplat(ArrayRef<Value *> VL) { 233 for (unsigned i = 1, e = VL.size(); i < e; ++i) 234 if (VL[i] != VL[0]) 235 return false; 236 return true; 237 } 238 239 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator. 240 static bool isCommutative(Instruction *I) { 241 if (auto *Cmp = dyn_cast<CmpInst>(I)) 242 return Cmp->isCommutative(); 243 if (auto *BO = dyn_cast<BinaryOperator>(I)) 244 return BO->isCommutative(); 245 // TODO: This should check for generic Instruction::isCommutative(), but 246 // we need to confirm that the caller code correctly handles Intrinsics 247 // for example (does not have 2 operands). 248 return false; 249 } 250 251 /// Checks if the vector of instructions can be represented as a shuffle, like: 252 /// %x0 = extractelement <4 x i8> %x, i32 0 253 /// %x3 = extractelement <4 x i8> %x, i32 3 254 /// %y1 = extractelement <4 x i8> %y, i32 1 255 /// %y2 = extractelement <4 x i8> %y, i32 2 256 /// %x0x0 = mul i8 %x0, %x0 257 /// %x3x3 = mul i8 %x3, %x3 258 /// %y1y1 = mul i8 %y1, %y1 259 /// %y2y2 = mul i8 %y2, %y2 260 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0 261 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 262 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 263 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 264 /// ret <4 x i8> %ins4 265 /// can be transformed into: 266 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 267 /// i32 6> 268 /// %2 = mul <4 x i8> %1, %1 269 /// ret <4 x i8> %2 270 /// We convert this initially to something like: 271 /// %x0 = extractelement <4 x i8> %x, i32 0 272 /// %x3 = extractelement <4 x i8> %x, i32 3 273 /// %y1 = extractelement <4 x i8> %y, i32 1 274 /// %y2 = extractelement <4 x i8> %y, i32 2 275 /// %1 = insertelement <4 x i8> poison, i8 %x0, i32 0 276 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1 277 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2 278 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3 279 /// %5 = mul <4 x i8> %4, %4 280 /// %6 = extractelement <4 x i8> %5, i32 0 281 /// %ins1 = insertelement <4 x i8> poison, i8 %6, i32 0 282 /// %7 = extractelement <4 x i8> %5, i32 1 283 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1 284 /// %8 = extractelement <4 x i8> %5, i32 2 285 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2 286 /// %9 = extractelement <4 x i8> %5, i32 3 287 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3 288 /// ret <4 x i8> %ins4 289 /// InstCombiner transforms this into a shuffle and vector mul 290 /// Mask will return the Shuffle Mask equivalent to the extracted elements. 291 /// TODO: Can we split off and reuse the shuffle mask detection from 292 /// TargetTransformInfo::getInstructionThroughput? 293 static Optional<TargetTransformInfo::ShuffleKind> 294 isShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) { 295 auto *EI0 = cast<ExtractElementInst>(VL[0]); 296 unsigned Size = 297 cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements(); 298 Value *Vec1 = nullptr; 299 Value *Vec2 = nullptr; 300 enum ShuffleMode { Unknown, Select, Permute }; 301 ShuffleMode CommonShuffleMode = Unknown; 302 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 303 auto *EI = cast<ExtractElementInst>(VL[I]); 304 auto *Vec = EI->getVectorOperand(); 305 // All vector operands must have the same number of vector elements. 306 if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size) 307 return None; 308 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 309 if (!Idx) 310 return None; 311 // Undefined behavior if Idx is negative or >= Size. 312 if (Idx->getValue().uge(Size)) { 313 Mask.push_back(UndefMaskElem); 314 continue; 315 } 316 unsigned IntIdx = Idx->getValue().getZExtValue(); 317 Mask.push_back(IntIdx); 318 // We can extractelement from undef or poison vector. 319 if (isa<UndefValue>(Vec)) 320 continue; 321 // For correct shuffling we have to have at most 2 different vector operands 322 // in all extractelement instructions. 323 if (!Vec1 || Vec1 == Vec) 324 Vec1 = Vec; 325 else if (!Vec2 || Vec2 == Vec) 326 Vec2 = Vec; 327 else 328 return None; 329 if (CommonShuffleMode == Permute) 330 continue; 331 // If the extract index is not the same as the operation number, it is a 332 // permutation. 333 if (IntIdx != I) { 334 CommonShuffleMode = Permute; 335 continue; 336 } 337 CommonShuffleMode = Select; 338 } 339 // If we're not crossing lanes in different vectors, consider it as blending. 340 if (CommonShuffleMode == Select && Vec2) 341 return TargetTransformInfo::SK_Select; 342 // If Vec2 was never used, we have a permutation of a single vector, otherwise 343 // we have permutation of 2 vectors. 344 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 345 : TargetTransformInfo::SK_PermuteSingleSrc; 346 } 347 348 namespace { 349 350 /// Main data required for vectorization of instructions. 351 struct InstructionsState { 352 /// The very first instruction in the list with the main opcode. 353 Value *OpValue = nullptr; 354 355 /// The main/alternate instruction. 356 Instruction *MainOp = nullptr; 357 Instruction *AltOp = nullptr; 358 359 /// The main/alternate opcodes for the list of instructions. 360 unsigned getOpcode() const { 361 return MainOp ? MainOp->getOpcode() : 0; 362 } 363 364 unsigned getAltOpcode() const { 365 return AltOp ? AltOp->getOpcode() : 0; 366 } 367 368 /// Some of the instructions in the list have alternate opcodes. 369 bool isAltShuffle() const { return getOpcode() != getAltOpcode(); } 370 371 bool isOpcodeOrAlt(Instruction *I) const { 372 unsigned CheckedOpcode = I->getOpcode(); 373 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode; 374 } 375 376 InstructionsState() = delete; 377 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp) 378 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {} 379 }; 380 381 } // end anonymous namespace 382 383 /// Chooses the correct key for scheduling data. If \p Op has the same (or 384 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 385 /// OpValue. 386 static Value *isOneOf(const InstructionsState &S, Value *Op) { 387 auto *I = dyn_cast<Instruction>(Op); 388 if (I && S.isOpcodeOrAlt(I)) 389 return Op; 390 return S.OpValue; 391 } 392 393 /// \returns true if \p Opcode is allowed as part of of the main/alternate 394 /// instruction for SLP vectorization. 395 /// 396 /// Example of unsupported opcode is SDIV that can potentially cause UB if the 397 /// "shuffled out" lane would result in division by zero. 398 static bool isValidForAlternation(unsigned Opcode) { 399 if (Instruction::isIntDivRem(Opcode)) 400 return false; 401 402 return true; 403 } 404 405 /// \returns analysis of the Instructions in \p VL described in 406 /// InstructionsState, the Opcode that we suppose the whole list 407 /// could be vectorized even if its structure is diverse. 408 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 409 unsigned BaseIndex = 0) { 410 // Make sure these are all Instructions. 411 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); })) 412 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 413 414 bool IsCastOp = isa<CastInst>(VL[BaseIndex]); 415 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]); 416 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode(); 417 unsigned AltOpcode = Opcode; 418 unsigned AltIndex = BaseIndex; 419 420 // Check for one alternate opcode from another BinaryOperator. 421 // TODO - generalize to support all operators (types, calls etc.). 422 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { 423 unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode(); 424 if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) { 425 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 426 continue; 427 if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) && 428 isValidForAlternation(Opcode)) { 429 AltOpcode = InstOpcode; 430 AltIndex = Cnt; 431 continue; 432 } 433 } else if (IsCastOp && isa<CastInst>(VL[Cnt])) { 434 Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType(); 435 Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType(); 436 if (Ty0 == Ty1) { 437 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 438 continue; 439 if (Opcode == AltOpcode) { 440 assert(isValidForAlternation(Opcode) && 441 isValidForAlternation(InstOpcode) && 442 "Cast isn't safe for alternation, logic needs to be updated!"); 443 AltOpcode = InstOpcode; 444 AltIndex = Cnt; 445 continue; 446 } 447 } 448 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode) 449 continue; 450 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 451 } 452 453 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]), 454 cast<Instruction>(VL[AltIndex])); 455 } 456 457 /// \returns true if all of the values in \p VL have the same type or false 458 /// otherwise. 459 static bool allSameType(ArrayRef<Value *> VL) { 460 Type *Ty = VL[0]->getType(); 461 for (int i = 1, e = VL.size(); i < e; i++) 462 if (VL[i]->getType() != Ty) 463 return false; 464 465 return true; 466 } 467 468 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 469 static Optional<unsigned> getExtractIndex(Instruction *E) { 470 unsigned Opcode = E->getOpcode(); 471 assert((Opcode == Instruction::ExtractElement || 472 Opcode == Instruction::ExtractValue) && 473 "Expected extractelement or extractvalue instruction."); 474 if (Opcode == Instruction::ExtractElement) { 475 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 476 if (!CI) 477 return None; 478 return CI->getZExtValue(); 479 } 480 ExtractValueInst *EI = cast<ExtractValueInst>(E); 481 if (EI->getNumIndices() != 1) 482 return None; 483 return *EI->idx_begin(); 484 } 485 486 /// \returns True if in-tree use also needs extract. This refers to 487 /// possible scalar operand in vectorized instruction. 488 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 489 TargetLibraryInfo *TLI) { 490 unsigned Opcode = UserInst->getOpcode(); 491 switch (Opcode) { 492 case Instruction::Load: { 493 LoadInst *LI = cast<LoadInst>(UserInst); 494 return (LI->getPointerOperand() == Scalar); 495 } 496 case Instruction::Store: { 497 StoreInst *SI = cast<StoreInst>(UserInst); 498 return (SI->getPointerOperand() == Scalar); 499 } 500 case Instruction::Call: { 501 CallInst *CI = cast<CallInst>(UserInst); 502 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 503 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 504 if (hasVectorInstrinsicScalarOpd(ID, i)) 505 return (CI->getArgOperand(i) == Scalar); 506 } 507 LLVM_FALLTHROUGH; 508 } 509 default: 510 return false; 511 } 512 } 513 514 /// \returns the AA location that is being access by the instruction. 515 static MemoryLocation getLocation(Instruction *I, AAResults *AA) { 516 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 517 return MemoryLocation::get(SI); 518 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 519 return MemoryLocation::get(LI); 520 return MemoryLocation(); 521 } 522 523 /// \returns True if the instruction is not a volatile or atomic load/store. 524 static bool isSimple(Instruction *I) { 525 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 526 return LI->isSimple(); 527 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 528 return SI->isSimple(); 529 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 530 return !MI->isVolatile(); 531 return true; 532 } 533 534 namespace llvm { 535 536 static void inversePermutation(ArrayRef<unsigned> Indices, 537 SmallVectorImpl<int> &Mask) { 538 Mask.clear(); 539 const unsigned E = Indices.size(); 540 Mask.resize(E, E + 1); 541 for (unsigned I = 0; I < E; ++I) 542 Mask[Indices[I]] = I; 543 } 544 545 /// \returns inserting index of InsertElement or InsertValue instruction, 546 /// using Offset as base offset for index. 547 static Optional<int> getInsertIndex(Value *InsertInst, unsigned Offset) { 548 int Index = Offset; 549 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) { 550 if (auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2))) { 551 auto *VT = cast<FixedVectorType>(IE->getType()); 552 if (CI->getValue().uge(VT->getNumElements())) 553 return UndefMaskElem; 554 Index *= VT->getNumElements(); 555 Index += CI->getZExtValue(); 556 return Index; 557 } 558 if (isa<UndefValue>(IE->getOperand(2))) 559 return UndefMaskElem; 560 return None; 561 } 562 563 auto *IV = cast<InsertValueInst>(InsertInst); 564 Type *CurrentType = IV->getType(); 565 for (unsigned I : IV->indices()) { 566 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 567 Index *= ST->getNumElements(); 568 CurrentType = ST->getElementType(I); 569 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 570 Index *= AT->getNumElements(); 571 CurrentType = AT->getElementType(); 572 } else { 573 return None; 574 } 575 Index += I; 576 } 577 return Index; 578 } 579 580 namespace slpvectorizer { 581 582 /// Bottom Up SLP Vectorizer. 583 class BoUpSLP { 584 struct TreeEntry; 585 struct ScheduleData; 586 587 public: 588 using ValueList = SmallVector<Value *, 8>; 589 using InstrList = SmallVector<Instruction *, 16>; 590 using ValueSet = SmallPtrSet<Value *, 16>; 591 using StoreList = SmallVector<StoreInst *, 8>; 592 using ExtraValueToDebugLocsMap = 593 MapVector<Value *, SmallVector<Instruction *, 2>>; 594 using OrdersType = SmallVector<unsigned, 4>; 595 596 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 597 TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li, 598 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 599 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 600 : F(Func), SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), 601 DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { 602 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 603 // Use the vector register size specified by the target unless overridden 604 // by a command-line option. 605 // TODO: It would be better to limit the vectorization factor based on 606 // data type rather than just register size. For example, x86 AVX has 607 // 256-bit registers, but it does not support integer operations 608 // at that width (that requires AVX2). 609 if (MaxVectorRegSizeOption.getNumOccurrences()) 610 MaxVecRegSize = MaxVectorRegSizeOption; 611 else 612 MaxVecRegSize = 613 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 614 .getFixedSize(); 615 616 if (MinVectorRegSizeOption.getNumOccurrences()) 617 MinVecRegSize = MinVectorRegSizeOption; 618 else 619 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 620 } 621 622 /// Vectorize the tree that starts with the elements in \p VL. 623 /// Returns the vectorized root. 624 Value *vectorizeTree(); 625 626 /// Vectorize the tree but with the list of externally used values \p 627 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 628 /// generated extractvalue instructions. 629 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues); 630 631 /// \returns the cost incurred by unwanted spills and fills, caused by 632 /// holding live values over call sites. 633 InstructionCost getSpillCost() const; 634 635 /// \returns the vectorization cost of the subtree that starts at \p VL. 636 /// A negative number means that this is profitable. 637 InstructionCost getTreeCost(); 638 639 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 640 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 641 void buildTree(ArrayRef<Value *> Roots, 642 ArrayRef<Value *> UserIgnoreLst = None); 643 644 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 645 /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking 646 /// into account (and updating it, if required) list of externally used 647 /// values stored in \p ExternallyUsedValues. 648 void buildTree(ArrayRef<Value *> Roots, 649 ExtraValueToDebugLocsMap &ExternallyUsedValues, 650 ArrayRef<Value *> UserIgnoreLst = None); 651 652 /// Clear the internal data structures that are created by 'buildTree'. 653 void deleteTree() { 654 VectorizableTree.clear(); 655 ScalarToTreeEntry.clear(); 656 MustGather.clear(); 657 ExternalUses.clear(); 658 NumOpsWantToKeepOrder.clear(); 659 NumOpsWantToKeepOriginalOrder = 0; 660 for (auto &Iter : BlocksSchedules) { 661 BlockScheduling *BS = Iter.second.get(); 662 BS->clear(); 663 } 664 MinBWs.clear(); 665 InstrElementSize.clear(); 666 } 667 668 unsigned getTreeSize() const { return VectorizableTree.size(); } 669 670 /// Perform LICM and CSE on the newly generated gather sequences. 671 void optimizeGatherSequence(); 672 673 /// \returns The best order of instructions for vectorization. 674 Optional<ArrayRef<unsigned>> bestOrder() const { 675 assert(llvm::all_of( 676 NumOpsWantToKeepOrder, 677 [this](const decltype(NumOpsWantToKeepOrder)::value_type &D) { 678 return D.getFirst().size() == 679 VectorizableTree[0]->Scalars.size(); 680 }) && 681 "All orders must have the same size as number of instructions in " 682 "tree node."); 683 auto I = std::max_element( 684 NumOpsWantToKeepOrder.begin(), NumOpsWantToKeepOrder.end(), 685 [](const decltype(NumOpsWantToKeepOrder)::value_type &D1, 686 const decltype(NumOpsWantToKeepOrder)::value_type &D2) { 687 return D1.second < D2.second; 688 }); 689 if (I == NumOpsWantToKeepOrder.end() || 690 I->getSecond() <= NumOpsWantToKeepOriginalOrder) 691 return None; 692 693 return makeArrayRef(I->getFirst()); 694 } 695 696 /// Builds the correct order for root instructions. 697 /// If some leaves have the same instructions to be vectorized, we may 698 /// incorrectly evaluate the best order for the root node (it is built for the 699 /// vector of instructions without repeated instructions and, thus, has less 700 /// elements than the root node). This function builds the correct order for 701 /// the root node. 702 /// For example, if the root node is \<a+b, a+c, a+d, f+e\>, then the leaves 703 /// are \<a, a, a, f\> and \<b, c, d, e\>. When we try to vectorize the first 704 /// leaf, it will be shrink to \<a, b\>. If instructions in this leaf should 705 /// be reordered, the best order will be \<1, 0\>. We need to extend this 706 /// order for the root node. For the root node this order should look like 707 /// \<3, 0, 1, 2\>. This function extends the order for the reused 708 /// instructions. 709 void findRootOrder(OrdersType &Order) { 710 // If the leaf has the same number of instructions to vectorize as the root 711 // - order must be set already. 712 unsigned RootSize = VectorizableTree[0]->Scalars.size(); 713 if (Order.size() == RootSize) 714 return; 715 SmallVector<unsigned, 4> RealOrder(Order.size()); 716 std::swap(Order, RealOrder); 717 SmallVector<int, 4> Mask; 718 inversePermutation(RealOrder, Mask); 719 Order.assign(Mask.begin(), Mask.end()); 720 // The leaf has less number of instructions - need to find the true order of 721 // the root. 722 // Scan the nodes starting from the leaf back to the root. 723 const TreeEntry *PNode = VectorizableTree.back().get(); 724 SmallVector<const TreeEntry *, 4> Nodes(1, PNode); 725 SmallPtrSet<const TreeEntry *, 4> Visited; 726 while (!Nodes.empty() && Order.size() != RootSize) { 727 const TreeEntry *PNode = Nodes.pop_back_val(); 728 if (!Visited.insert(PNode).second) 729 continue; 730 const TreeEntry &Node = *PNode; 731 for (const EdgeInfo &EI : Node.UserTreeIndices) 732 if (EI.UserTE) 733 Nodes.push_back(EI.UserTE); 734 if (Node.ReuseShuffleIndices.empty()) 735 continue; 736 // Build the order for the parent node. 737 OrdersType NewOrder(Node.ReuseShuffleIndices.size(), RootSize); 738 SmallVector<unsigned, 4> OrderCounter(Order.size(), 0); 739 // The algorithm of the order extension is: 740 // 1. Calculate the number of the same instructions for the order. 741 // 2. Calculate the index of the new order: total number of instructions 742 // with order less than the order of the current instruction + reuse 743 // number of the current instruction. 744 // 3. The new order is just the index of the instruction in the original 745 // vector of the instructions. 746 for (unsigned I : Node.ReuseShuffleIndices) 747 ++OrderCounter[Order[I]]; 748 SmallVector<unsigned, 4> CurrentCounter(Order.size(), 0); 749 for (unsigned I = 0, E = Node.ReuseShuffleIndices.size(); I < E; ++I) { 750 unsigned ReusedIdx = Node.ReuseShuffleIndices[I]; 751 unsigned OrderIdx = Order[ReusedIdx]; 752 unsigned NewIdx = 0; 753 for (unsigned J = 0; J < OrderIdx; ++J) 754 NewIdx += OrderCounter[J]; 755 NewIdx += CurrentCounter[OrderIdx]; 756 ++CurrentCounter[OrderIdx]; 757 assert(NewOrder[NewIdx] == RootSize && 758 "The order index should not be written already."); 759 NewOrder[NewIdx] = I; 760 } 761 std::swap(Order, NewOrder); 762 } 763 assert(Order.size() == RootSize && 764 "Root node is expected or the size of the order must be the same as " 765 "the number of elements in the root node."); 766 assert(llvm::all_of(Order, 767 [RootSize](unsigned Val) { return Val != RootSize; }) && 768 "All indices must be initialized"); 769 } 770 771 /// \return The vector element size in bits to use when vectorizing the 772 /// expression tree ending at \p V. If V is a store, the size is the width of 773 /// the stored value. Otherwise, the size is the width of the largest loaded 774 /// value reaching V. This method is used by the vectorizer to calculate 775 /// vectorization factors. 776 unsigned getVectorElementSize(Value *V); 777 778 /// Compute the minimum type sizes required to represent the entries in a 779 /// vectorizable tree. 780 void computeMinimumValueSizes(); 781 782 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 783 unsigned getMaxVecRegSize() const { 784 return MaxVecRegSize; 785 } 786 787 // \returns minimum vector register size as set by cl::opt. 788 unsigned getMinVecRegSize() const { 789 return MinVecRegSize; 790 } 791 792 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { 793 unsigned MaxVF = MaxVFOption.getNumOccurrences() ? 794 MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode); 795 return MaxVF ? MaxVF : UINT_MAX; 796 } 797 798 /// Check if homogeneous aggregate is isomorphic to some VectorType. 799 /// Accepts homogeneous multidimensional aggregate of scalars/vectors like 800 /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> }, 801 /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on. 802 /// 803 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 804 unsigned canMapToVector(Type *T, const DataLayout &DL) const; 805 806 /// \returns True if the VectorizableTree is both tiny and not fully 807 /// vectorizable. We do not vectorize such trees. 808 bool isTreeTinyAndNotFullyVectorizable() const; 809 810 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values 811 /// can be load combined in the backend. Load combining may not be allowed in 812 /// the IR optimizer, so we do not want to alter the pattern. For example, 813 /// partially transforming a scalar bswap() pattern into vector code is 814 /// effectively impossible for the backend to undo. 815 /// TODO: If load combining is allowed in the IR optimizer, this analysis 816 /// may not be necessary. 817 bool isLoadCombineReductionCandidate(RecurKind RdxKind) const; 818 819 /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values 820 /// can be load combined in the backend. Load combining may not be allowed in 821 /// the IR optimizer, so we do not want to alter the pattern. For example, 822 /// partially transforming a scalar bswap() pattern into vector code is 823 /// effectively impossible for the backend to undo. 824 /// TODO: If load combining is allowed in the IR optimizer, this analysis 825 /// may not be necessary. 826 bool isLoadCombineCandidate() const; 827 828 OptimizationRemarkEmitter *getORE() { return ORE; } 829 830 /// This structure holds any data we need about the edges being traversed 831 /// during buildTree_rec(). We keep track of: 832 /// (i) the user TreeEntry index, and 833 /// (ii) the index of the edge. 834 struct EdgeInfo { 835 EdgeInfo() = default; 836 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx) 837 : UserTE(UserTE), EdgeIdx(EdgeIdx) {} 838 /// The user TreeEntry. 839 TreeEntry *UserTE = nullptr; 840 /// The operand index of the use. 841 unsigned EdgeIdx = UINT_MAX; 842 #ifndef NDEBUG 843 friend inline raw_ostream &operator<<(raw_ostream &OS, 844 const BoUpSLP::EdgeInfo &EI) { 845 EI.dump(OS); 846 return OS; 847 } 848 /// Debug print. 849 void dump(raw_ostream &OS) const { 850 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null") 851 << " EdgeIdx:" << EdgeIdx << "}"; 852 } 853 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); } 854 #endif 855 }; 856 857 /// A helper data structure to hold the operands of a vector of instructions. 858 /// This supports a fixed vector length for all operand vectors. 859 class VLOperands { 860 /// For each operand we need (i) the value, and (ii) the opcode that it 861 /// would be attached to if the expression was in a left-linearized form. 862 /// This is required to avoid illegal operand reordering. 863 /// For example: 864 /// \verbatim 865 /// 0 Op1 866 /// |/ 867 /// Op1 Op2 Linearized + Op2 868 /// \ / ----------> |/ 869 /// - - 870 /// 871 /// Op1 - Op2 (0 + Op1) - Op2 872 /// \endverbatim 873 /// 874 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'. 875 /// 876 /// Another way to think of this is to track all the operations across the 877 /// path from the operand all the way to the root of the tree and to 878 /// calculate the operation that corresponds to this path. For example, the 879 /// path from Op2 to the root crosses the RHS of the '-', therefore the 880 /// corresponding operation is a '-' (which matches the one in the 881 /// linearized tree, as shown above). 882 /// 883 /// For lack of a better term, we refer to this operation as Accumulated 884 /// Path Operation (APO). 885 struct OperandData { 886 OperandData() = default; 887 OperandData(Value *V, bool APO, bool IsUsed) 888 : V(V), APO(APO), IsUsed(IsUsed) {} 889 /// The operand value. 890 Value *V = nullptr; 891 /// TreeEntries only allow a single opcode, or an alternate sequence of 892 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the 893 /// APO. It is set to 'true' if 'V' is attached to an inverse operation 894 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise 895 /// (e.g., Add/Mul) 896 bool APO = false; 897 /// Helper data for the reordering function. 898 bool IsUsed = false; 899 }; 900 901 /// During operand reordering, we are trying to select the operand at lane 902 /// that matches best with the operand at the neighboring lane. Our 903 /// selection is based on the type of value we are looking for. For example, 904 /// if the neighboring lane has a load, we need to look for a load that is 905 /// accessing a consecutive address. These strategies are summarized in the 906 /// 'ReorderingMode' enumerator. 907 enum class ReorderingMode { 908 Load, ///< Matching loads to consecutive memory addresses 909 Opcode, ///< Matching instructions based on opcode (same or alternate) 910 Constant, ///< Matching constants 911 Splat, ///< Matching the same instruction multiple times (broadcast) 912 Failed, ///< We failed to create a vectorizable group 913 }; 914 915 using OperandDataVec = SmallVector<OperandData, 2>; 916 917 /// A vector of operand vectors. 918 SmallVector<OperandDataVec, 4> OpsVec; 919 920 const DataLayout &DL; 921 ScalarEvolution &SE; 922 const BoUpSLP &R; 923 924 /// \returns the operand data at \p OpIdx and \p Lane. 925 OperandData &getData(unsigned OpIdx, unsigned Lane) { 926 return OpsVec[OpIdx][Lane]; 927 } 928 929 /// \returns the operand data at \p OpIdx and \p Lane. Const version. 930 const OperandData &getData(unsigned OpIdx, unsigned Lane) const { 931 return OpsVec[OpIdx][Lane]; 932 } 933 934 /// Clears the used flag for all entries. 935 void clearUsed() { 936 for (unsigned OpIdx = 0, NumOperands = getNumOperands(); 937 OpIdx != NumOperands; ++OpIdx) 938 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 939 ++Lane) 940 OpsVec[OpIdx][Lane].IsUsed = false; 941 } 942 943 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2. 944 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) { 945 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]); 946 } 947 948 // The hard-coded scores listed here are not very important. When computing 949 // the scores of matching one sub-tree with another, we are basically 950 // counting the number of values that are matching. So even if all scores 951 // are set to 1, we would still get a decent matching result. 952 // However, sometimes we have to break ties. For example we may have to 953 // choose between matching loads vs matching opcodes. This is what these 954 // scores are helping us with: they provide the order of preference. 955 956 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]). 957 static const int ScoreConsecutiveLoads = 3; 958 /// ExtractElementInst from same vector and consecutive indexes. 959 static const int ScoreConsecutiveExtracts = 3; 960 /// Constants. 961 static const int ScoreConstants = 2; 962 /// Instructions with the same opcode. 963 static const int ScoreSameOpcode = 2; 964 /// Instructions with alt opcodes (e.g, add + sub). 965 static const int ScoreAltOpcodes = 1; 966 /// Identical instructions (a.k.a. splat or broadcast). 967 static const int ScoreSplat = 1; 968 /// Matching with an undef is preferable to failing. 969 static const int ScoreUndef = 1; 970 /// Score for failing to find a decent match. 971 static const int ScoreFail = 0; 972 /// User exteranl to the vectorized code. 973 static const int ExternalUseCost = 1; 974 /// The user is internal but in a different lane. 975 static const int UserInDiffLaneCost = ExternalUseCost; 976 977 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes. 978 static int getShallowScore(Value *V1, Value *V2, const DataLayout &DL, 979 ScalarEvolution &SE) { 980 auto *LI1 = dyn_cast<LoadInst>(V1); 981 auto *LI2 = dyn_cast<LoadInst>(V2); 982 if (LI1 && LI2) { 983 if (LI1->getParent() != LI2->getParent()) 984 return VLOperands::ScoreFail; 985 986 Optional<int> Dist = 987 getPointersDiff(LI1->getPointerOperand(), LI2->getPointerOperand(), 988 DL, SE, /*StrictCheck=*/true); 989 return (Dist && *Dist == 1) ? VLOperands::ScoreConsecutiveLoads 990 : VLOperands::ScoreFail; 991 } 992 993 auto *C1 = dyn_cast<Constant>(V1); 994 auto *C2 = dyn_cast<Constant>(V2); 995 if (C1 && C2) 996 return VLOperands::ScoreConstants; 997 998 // Extracts from consecutive indexes of the same vector better score as 999 // the extracts could be optimized away. 1000 Value *EV; 1001 ConstantInt *Ex1Idx, *Ex2Idx; 1002 if (match(V1, m_ExtractElt(m_Value(EV), m_ConstantInt(Ex1Idx))) && 1003 match(V2, m_ExtractElt(m_Deferred(EV), m_ConstantInt(Ex2Idx))) && 1004 Ex1Idx->getZExtValue() + 1 == Ex2Idx->getZExtValue()) 1005 return VLOperands::ScoreConsecutiveExtracts; 1006 1007 auto *I1 = dyn_cast<Instruction>(V1); 1008 auto *I2 = dyn_cast<Instruction>(V2); 1009 if (I1 && I2) { 1010 if (I1 == I2) 1011 return VLOperands::ScoreSplat; 1012 InstructionsState S = getSameOpcode({I1, I2}); 1013 // Note: Only consider instructions with <= 2 operands to avoid 1014 // complexity explosion. 1015 if (S.getOpcode() && S.MainOp->getNumOperands() <= 2) 1016 return S.isAltShuffle() ? VLOperands::ScoreAltOpcodes 1017 : VLOperands::ScoreSameOpcode; 1018 } 1019 1020 if (isa<UndefValue>(V2)) 1021 return VLOperands::ScoreUndef; 1022 1023 return VLOperands::ScoreFail; 1024 } 1025 1026 /// Holds the values and their lane that are taking part in the look-ahead 1027 /// score calculation. This is used in the external uses cost calculation. 1028 SmallDenseMap<Value *, int> InLookAheadValues; 1029 1030 /// \Returns the additinal cost due to uses of \p LHS and \p RHS that are 1031 /// either external to the vectorized code, or require shuffling. 1032 int getExternalUsesCost(const std::pair<Value *, int> &LHS, 1033 const std::pair<Value *, int> &RHS) { 1034 int Cost = 0; 1035 std::array<std::pair<Value *, int>, 2> Values = {{LHS, RHS}}; 1036 for (int Idx = 0, IdxE = Values.size(); Idx != IdxE; ++Idx) { 1037 Value *V = Values[Idx].first; 1038 if (isa<Constant>(V)) { 1039 // Since this is a function pass, it doesn't make semantic sense to 1040 // walk the users of a subclass of Constant. The users could be in 1041 // another function, or even another module that happens to be in 1042 // the same LLVMContext. 1043 continue; 1044 } 1045 1046 // Calculate the absolute lane, using the minimum relative lane of LHS 1047 // and RHS as base and Idx as the offset. 1048 int Ln = std::min(LHS.second, RHS.second) + Idx; 1049 assert(Ln >= 0 && "Bad lane calculation"); 1050 unsigned UsersBudget = LookAheadUsersBudget; 1051 for (User *U : V->users()) { 1052 if (const TreeEntry *UserTE = R.getTreeEntry(U)) { 1053 // The user is in the VectorizableTree. Check if we need to insert. 1054 auto It = llvm::find(UserTE->Scalars, U); 1055 assert(It != UserTE->Scalars.end() && "U is in UserTE"); 1056 int UserLn = std::distance(UserTE->Scalars.begin(), It); 1057 assert(UserLn >= 0 && "Bad lane"); 1058 if (UserLn != Ln) 1059 Cost += UserInDiffLaneCost; 1060 } else { 1061 // Check if the user is in the look-ahead code. 1062 auto It2 = InLookAheadValues.find(U); 1063 if (It2 != InLookAheadValues.end()) { 1064 // The user is in the look-ahead code. Check the lane. 1065 if (It2->second != Ln) 1066 Cost += UserInDiffLaneCost; 1067 } else { 1068 // The user is neither in SLP tree nor in the look-ahead code. 1069 Cost += ExternalUseCost; 1070 } 1071 } 1072 // Limit the number of visited uses to cap compilation time. 1073 if (--UsersBudget == 0) 1074 break; 1075 } 1076 } 1077 return Cost; 1078 } 1079 1080 /// Go through the operands of \p LHS and \p RHS recursively until \p 1081 /// MaxLevel, and return the cummulative score. For example: 1082 /// \verbatim 1083 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1] 1084 /// \ / \ / \ / \ / 1085 /// + + + + 1086 /// G1 G2 G3 G4 1087 /// \endverbatim 1088 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at 1089 /// each level recursively, accumulating the score. It starts from matching 1090 /// the additions at level 0, then moves on to the loads (level 1). The 1091 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and 1092 /// {B[0],B[1]} match with VLOperands::ScoreConsecutiveLoads, while 1093 /// {A[0],C[0]} has a score of VLOperands::ScoreFail. 1094 /// Please note that the order of the operands does not matter, as we 1095 /// evaluate the score of all profitable combinations of operands. In 1096 /// other words the score of G1 and G4 is the same as G1 and G2. This 1097 /// heuristic is based on ideas described in: 1098 /// Look-ahead SLP: Auto-vectorization in the presence of commutative 1099 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha, 1100 /// Luís F. W. Góes 1101 int getScoreAtLevelRec(const std::pair<Value *, int> &LHS, 1102 const std::pair<Value *, int> &RHS, int CurrLevel, 1103 int MaxLevel) { 1104 1105 Value *V1 = LHS.first; 1106 Value *V2 = RHS.first; 1107 // Get the shallow score of V1 and V2. 1108 int ShallowScoreAtThisLevel = 1109 std::max((int)ScoreFail, getShallowScore(V1, V2, DL, SE) - 1110 getExternalUsesCost(LHS, RHS)); 1111 int Lane1 = LHS.second; 1112 int Lane2 = RHS.second; 1113 1114 // If reached MaxLevel, 1115 // or if V1 and V2 are not instructions, 1116 // or if they are SPLAT, 1117 // or if they are not consecutive, early return the current cost. 1118 auto *I1 = dyn_cast<Instruction>(V1); 1119 auto *I2 = dyn_cast<Instruction>(V2); 1120 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 || 1121 ShallowScoreAtThisLevel == VLOperands::ScoreFail || 1122 (isa<LoadInst>(I1) && isa<LoadInst>(I2) && ShallowScoreAtThisLevel)) 1123 return ShallowScoreAtThisLevel; 1124 assert(I1 && I2 && "Should have early exited."); 1125 1126 // Keep track of in-tree values for determining the external-use cost. 1127 InLookAheadValues[V1] = Lane1; 1128 InLookAheadValues[V2] = Lane2; 1129 1130 // Contains the I2 operand indexes that got matched with I1 operands. 1131 SmallSet<unsigned, 4> Op2Used; 1132 1133 // Recursion towards the operands of I1 and I2. We are trying all possbile 1134 // operand pairs, and keeping track of the best score. 1135 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands(); 1136 OpIdx1 != NumOperands1; ++OpIdx1) { 1137 // Try to pair op1I with the best operand of I2. 1138 int MaxTmpScore = 0; 1139 unsigned MaxOpIdx2 = 0; 1140 bool FoundBest = false; 1141 // If I2 is commutative try all combinations. 1142 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1; 1143 unsigned ToIdx = isCommutative(I2) 1144 ? I2->getNumOperands() 1145 : std::min(I2->getNumOperands(), OpIdx1 + 1); 1146 assert(FromIdx <= ToIdx && "Bad index"); 1147 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) { 1148 // Skip operands already paired with OpIdx1. 1149 if (Op2Used.count(OpIdx2)) 1150 continue; 1151 // Recursively calculate the cost at each level 1152 int TmpScore = getScoreAtLevelRec({I1->getOperand(OpIdx1), Lane1}, 1153 {I2->getOperand(OpIdx2), Lane2}, 1154 CurrLevel + 1, MaxLevel); 1155 // Look for the best score. 1156 if (TmpScore > VLOperands::ScoreFail && TmpScore > MaxTmpScore) { 1157 MaxTmpScore = TmpScore; 1158 MaxOpIdx2 = OpIdx2; 1159 FoundBest = true; 1160 } 1161 } 1162 if (FoundBest) { 1163 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it. 1164 Op2Used.insert(MaxOpIdx2); 1165 ShallowScoreAtThisLevel += MaxTmpScore; 1166 } 1167 } 1168 return ShallowScoreAtThisLevel; 1169 } 1170 1171 /// \Returns the look-ahead score, which tells us how much the sub-trees 1172 /// rooted at \p LHS and \p RHS match, the more they match the higher the 1173 /// score. This helps break ties in an informed way when we cannot decide on 1174 /// the order of the operands by just considering the immediate 1175 /// predecessors. 1176 int getLookAheadScore(const std::pair<Value *, int> &LHS, 1177 const std::pair<Value *, int> &RHS) { 1178 InLookAheadValues.clear(); 1179 return getScoreAtLevelRec(LHS, RHS, 1, LookAheadMaxDepth); 1180 } 1181 1182 // Search all operands in Ops[*][Lane] for the one that matches best 1183 // Ops[OpIdx][LastLane] and return its opreand index. 1184 // If no good match can be found, return None. 1185 Optional<unsigned> 1186 getBestOperand(unsigned OpIdx, int Lane, int LastLane, 1187 ArrayRef<ReorderingMode> ReorderingModes) { 1188 unsigned NumOperands = getNumOperands(); 1189 1190 // The operand of the previous lane at OpIdx. 1191 Value *OpLastLane = getData(OpIdx, LastLane).V; 1192 1193 // Our strategy mode for OpIdx. 1194 ReorderingMode RMode = ReorderingModes[OpIdx]; 1195 1196 // The linearized opcode of the operand at OpIdx, Lane. 1197 bool OpIdxAPO = getData(OpIdx, Lane).APO; 1198 1199 // The best operand index and its score. 1200 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we 1201 // are using the score to differentiate between the two. 1202 struct BestOpData { 1203 Optional<unsigned> Idx = None; 1204 unsigned Score = 0; 1205 } BestOp; 1206 1207 // Iterate through all unused operands and look for the best. 1208 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) { 1209 // Get the operand at Idx and Lane. 1210 OperandData &OpData = getData(Idx, Lane); 1211 Value *Op = OpData.V; 1212 bool OpAPO = OpData.APO; 1213 1214 // Skip already selected operands. 1215 if (OpData.IsUsed) 1216 continue; 1217 1218 // Skip if we are trying to move the operand to a position with a 1219 // different opcode in the linearized tree form. This would break the 1220 // semantics. 1221 if (OpAPO != OpIdxAPO) 1222 continue; 1223 1224 // Look for an operand that matches the current mode. 1225 switch (RMode) { 1226 case ReorderingMode::Load: 1227 case ReorderingMode::Constant: 1228 case ReorderingMode::Opcode: { 1229 bool LeftToRight = Lane > LastLane; 1230 Value *OpLeft = (LeftToRight) ? OpLastLane : Op; 1231 Value *OpRight = (LeftToRight) ? Op : OpLastLane; 1232 unsigned Score = 1233 getLookAheadScore({OpLeft, LastLane}, {OpRight, Lane}); 1234 if (Score > BestOp.Score) { 1235 BestOp.Idx = Idx; 1236 BestOp.Score = Score; 1237 } 1238 break; 1239 } 1240 case ReorderingMode::Splat: 1241 if (Op == OpLastLane) 1242 BestOp.Idx = Idx; 1243 break; 1244 case ReorderingMode::Failed: 1245 return None; 1246 } 1247 } 1248 1249 if (BestOp.Idx) { 1250 getData(BestOp.Idx.getValue(), Lane).IsUsed = true; 1251 return BestOp.Idx; 1252 } 1253 // If we could not find a good match return None. 1254 return None; 1255 } 1256 1257 /// Helper for reorderOperandVecs. \Returns the lane that we should start 1258 /// reordering from. This is the one which has the least number of operands 1259 /// that can freely move about. 1260 unsigned getBestLaneToStartReordering() const { 1261 unsigned BestLane = 0; 1262 unsigned Min = UINT_MAX; 1263 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 1264 ++Lane) { 1265 unsigned NumFreeOps = getMaxNumOperandsThatCanBeReordered(Lane); 1266 if (NumFreeOps < Min) { 1267 Min = NumFreeOps; 1268 BestLane = Lane; 1269 } 1270 } 1271 return BestLane; 1272 } 1273 1274 /// \Returns the maximum number of operands that are allowed to be reordered 1275 /// for \p Lane. This is used as a heuristic for selecting the first lane to 1276 /// start operand reordering. 1277 unsigned getMaxNumOperandsThatCanBeReordered(unsigned Lane) const { 1278 unsigned CntTrue = 0; 1279 unsigned NumOperands = getNumOperands(); 1280 // Operands with the same APO can be reordered. We therefore need to count 1281 // how many of them we have for each APO, like this: Cnt[APO] = x. 1282 // Since we only have two APOs, namely true and false, we can avoid using 1283 // a map. Instead we can simply count the number of operands that 1284 // correspond to one of them (in this case the 'true' APO), and calculate 1285 // the other by subtracting it from the total number of operands. 1286 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) 1287 if (getData(OpIdx, Lane).APO) 1288 ++CntTrue; 1289 unsigned CntFalse = NumOperands - CntTrue; 1290 return std::max(CntTrue, CntFalse); 1291 } 1292 1293 /// Go through the instructions in VL and append their operands. 1294 void appendOperandsOfVL(ArrayRef<Value *> VL) { 1295 assert(!VL.empty() && "Bad VL"); 1296 assert((empty() || VL.size() == getNumLanes()) && 1297 "Expected same number of lanes"); 1298 assert(isa<Instruction>(VL[0]) && "Expected instruction"); 1299 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands(); 1300 OpsVec.resize(NumOperands); 1301 unsigned NumLanes = VL.size(); 1302 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1303 OpsVec[OpIdx].resize(NumLanes); 1304 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1305 assert(isa<Instruction>(VL[Lane]) && "Expected instruction"); 1306 // Our tree has just 3 nodes: the root and two operands. 1307 // It is therefore trivial to get the APO. We only need to check the 1308 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or 1309 // RHS operand. The LHS operand of both add and sub is never attached 1310 // to an inversese operation in the linearized form, therefore its APO 1311 // is false. The RHS is true only if VL[Lane] is an inverse operation. 1312 1313 // Since operand reordering is performed on groups of commutative 1314 // operations or alternating sequences (e.g., +, -), we can safely 1315 // tell the inverse operations by checking commutativity. 1316 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane])); 1317 bool APO = (OpIdx == 0) ? false : IsInverseOperation; 1318 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx), 1319 APO, false}; 1320 } 1321 } 1322 } 1323 1324 /// \returns the number of operands. 1325 unsigned getNumOperands() const { return OpsVec.size(); } 1326 1327 /// \returns the number of lanes. 1328 unsigned getNumLanes() const { return OpsVec[0].size(); } 1329 1330 /// \returns the operand value at \p OpIdx and \p Lane. 1331 Value *getValue(unsigned OpIdx, unsigned Lane) const { 1332 return getData(OpIdx, Lane).V; 1333 } 1334 1335 /// \returns true if the data structure is empty. 1336 bool empty() const { return OpsVec.empty(); } 1337 1338 /// Clears the data. 1339 void clear() { OpsVec.clear(); } 1340 1341 /// \Returns true if there are enough operands identical to \p Op to fill 1342 /// the whole vector. 1343 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow. 1344 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) { 1345 bool OpAPO = getData(OpIdx, Lane).APO; 1346 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) { 1347 if (Ln == Lane) 1348 continue; 1349 // This is set to true if we found a candidate for broadcast at Lane. 1350 bool FoundCandidate = false; 1351 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) { 1352 OperandData &Data = getData(OpI, Ln); 1353 if (Data.APO != OpAPO || Data.IsUsed) 1354 continue; 1355 if (Data.V == Op) { 1356 FoundCandidate = true; 1357 Data.IsUsed = true; 1358 break; 1359 } 1360 } 1361 if (!FoundCandidate) 1362 return false; 1363 } 1364 return true; 1365 } 1366 1367 public: 1368 /// Initialize with all the operands of the instruction vector \p RootVL. 1369 VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL, 1370 ScalarEvolution &SE, const BoUpSLP &R) 1371 : DL(DL), SE(SE), R(R) { 1372 // Append all the operands of RootVL. 1373 appendOperandsOfVL(RootVL); 1374 } 1375 1376 /// \Returns a value vector with the operands across all lanes for the 1377 /// opearnd at \p OpIdx. 1378 ValueList getVL(unsigned OpIdx) const { 1379 ValueList OpVL(OpsVec[OpIdx].size()); 1380 assert(OpsVec[OpIdx].size() == getNumLanes() && 1381 "Expected same num of lanes across all operands"); 1382 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane) 1383 OpVL[Lane] = OpsVec[OpIdx][Lane].V; 1384 return OpVL; 1385 } 1386 1387 // Performs operand reordering for 2 or more operands. 1388 // The original operands are in OrigOps[OpIdx][Lane]. 1389 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'. 1390 void reorder() { 1391 unsigned NumOperands = getNumOperands(); 1392 unsigned NumLanes = getNumLanes(); 1393 // Each operand has its own mode. We are using this mode to help us select 1394 // the instructions for each lane, so that they match best with the ones 1395 // we have selected so far. 1396 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands); 1397 1398 // This is a greedy single-pass algorithm. We are going over each lane 1399 // once and deciding on the best order right away with no back-tracking. 1400 // However, in order to increase its effectiveness, we start with the lane 1401 // that has operands that can move the least. For example, given the 1402 // following lanes: 1403 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd 1404 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st 1405 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd 1406 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th 1407 // we will start at Lane 1, since the operands of the subtraction cannot 1408 // be reordered. Then we will visit the rest of the lanes in a circular 1409 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3. 1410 1411 // Find the first lane that we will start our search from. 1412 unsigned FirstLane = getBestLaneToStartReordering(); 1413 1414 // Initialize the modes. 1415 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1416 Value *OpLane0 = getValue(OpIdx, FirstLane); 1417 // Keep track if we have instructions with all the same opcode on one 1418 // side. 1419 if (isa<LoadInst>(OpLane0)) 1420 ReorderingModes[OpIdx] = ReorderingMode::Load; 1421 else if (isa<Instruction>(OpLane0)) { 1422 // Check if OpLane0 should be broadcast. 1423 if (shouldBroadcast(OpLane0, OpIdx, FirstLane)) 1424 ReorderingModes[OpIdx] = ReorderingMode::Splat; 1425 else 1426 ReorderingModes[OpIdx] = ReorderingMode::Opcode; 1427 } 1428 else if (isa<Constant>(OpLane0)) 1429 ReorderingModes[OpIdx] = ReorderingMode::Constant; 1430 else if (isa<Argument>(OpLane0)) 1431 // Our best hope is a Splat. It may save some cost in some cases. 1432 ReorderingModes[OpIdx] = ReorderingMode::Splat; 1433 else 1434 // NOTE: This should be unreachable. 1435 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1436 } 1437 1438 // If the initial strategy fails for any of the operand indexes, then we 1439 // perform reordering again in a second pass. This helps avoid assigning 1440 // high priority to the failed strategy, and should improve reordering for 1441 // the non-failed operand indexes. 1442 for (int Pass = 0; Pass != 2; ++Pass) { 1443 // Skip the second pass if the first pass did not fail. 1444 bool StrategyFailed = false; 1445 // Mark all operand data as free to use. 1446 clearUsed(); 1447 // We keep the original operand order for the FirstLane, so reorder the 1448 // rest of the lanes. We are visiting the nodes in a circular fashion, 1449 // using FirstLane as the center point and increasing the radius 1450 // distance. 1451 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) { 1452 // Visit the lane on the right and then the lane on the left. 1453 for (int Direction : {+1, -1}) { 1454 int Lane = FirstLane + Direction * Distance; 1455 if (Lane < 0 || Lane >= (int)NumLanes) 1456 continue; 1457 int LastLane = Lane - Direction; 1458 assert(LastLane >= 0 && LastLane < (int)NumLanes && 1459 "Out of bounds"); 1460 // Look for a good match for each operand. 1461 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1462 // Search for the operand that matches SortedOps[OpIdx][Lane-1]. 1463 Optional<unsigned> BestIdx = 1464 getBestOperand(OpIdx, Lane, LastLane, ReorderingModes); 1465 // By not selecting a value, we allow the operands that follow to 1466 // select a better matching value. We will get a non-null value in 1467 // the next run of getBestOperand(). 1468 if (BestIdx) { 1469 // Swap the current operand with the one returned by 1470 // getBestOperand(). 1471 swap(OpIdx, BestIdx.getValue(), Lane); 1472 } else { 1473 // We failed to find a best operand, set mode to 'Failed'. 1474 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1475 // Enable the second pass. 1476 StrategyFailed = true; 1477 } 1478 } 1479 } 1480 } 1481 // Skip second pass if the strategy did not fail. 1482 if (!StrategyFailed) 1483 break; 1484 } 1485 } 1486 1487 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1488 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) { 1489 switch (RMode) { 1490 case ReorderingMode::Load: 1491 return "Load"; 1492 case ReorderingMode::Opcode: 1493 return "Opcode"; 1494 case ReorderingMode::Constant: 1495 return "Constant"; 1496 case ReorderingMode::Splat: 1497 return "Splat"; 1498 case ReorderingMode::Failed: 1499 return "Failed"; 1500 } 1501 llvm_unreachable("Unimplemented Reordering Type"); 1502 } 1503 1504 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode, 1505 raw_ostream &OS) { 1506 return OS << getModeStr(RMode); 1507 } 1508 1509 /// Debug print. 1510 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) { 1511 printMode(RMode, dbgs()); 1512 } 1513 1514 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) { 1515 return printMode(RMode, OS); 1516 } 1517 1518 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const { 1519 const unsigned Indent = 2; 1520 unsigned Cnt = 0; 1521 for (const OperandDataVec &OpDataVec : OpsVec) { 1522 OS << "Operand " << Cnt++ << "\n"; 1523 for (const OperandData &OpData : OpDataVec) { 1524 OS.indent(Indent) << "{"; 1525 if (Value *V = OpData.V) 1526 OS << *V; 1527 else 1528 OS << "null"; 1529 OS << ", APO:" << OpData.APO << "}\n"; 1530 } 1531 OS << "\n"; 1532 } 1533 return OS; 1534 } 1535 1536 /// Debug print. 1537 LLVM_DUMP_METHOD void dump() const { print(dbgs()); } 1538 #endif 1539 }; 1540 1541 /// Checks if the instruction is marked for deletion. 1542 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); } 1543 1544 /// Marks values operands for later deletion by replacing them with Undefs. 1545 void eraseInstructions(ArrayRef<Value *> AV); 1546 1547 ~BoUpSLP(); 1548 1549 private: 1550 /// Checks if all users of \p I are the part of the vectorization tree. 1551 bool areAllUsersVectorized(Instruction *I) const; 1552 1553 /// \returns the cost of the vectorizable entry. 1554 InstructionCost getEntryCost(const TreeEntry *E); 1555 1556 /// This is the recursive part of buildTree. 1557 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, 1558 const EdgeInfo &EI); 1559 1560 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can 1561 /// be vectorized to use the original vector (or aggregate "bitcast" to a 1562 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise 1563 /// returns false, setting \p CurrentOrder to either an empty vector or a 1564 /// non-identity permutation that allows to reuse extract instructions. 1565 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 1566 SmallVectorImpl<unsigned> &CurrentOrder) const; 1567 1568 /// Vectorize a single entry in the tree. 1569 Value *vectorizeTree(TreeEntry *E); 1570 1571 /// Vectorize a single entry in the tree, starting in \p VL. 1572 Value *vectorizeTree(ArrayRef<Value *> VL); 1573 1574 /// \returns the scalarization cost for this type. Scalarization in this 1575 /// context means the creation of vectors from a group of scalars. 1576 InstructionCost 1577 getGatherCost(FixedVectorType *Ty, 1578 const DenseSet<unsigned> &ShuffledIndices) const; 1579 1580 /// Checks if the gathered \p VL can be represented as shuffle(s) of previous 1581 /// tree entries. 1582 /// \returns ShuffleKind, if gathered values can be represented as shuffles of 1583 /// previous tree entries. \p Mask is filled with the shuffle mask. 1584 Optional<TargetTransformInfo::ShuffleKind> 1585 isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask, 1586 SmallVectorImpl<const TreeEntry *> &Entries); 1587 1588 /// \returns the scalarization cost for this list of values. Assuming that 1589 /// this subtree gets vectorized, we may need to extract the values from the 1590 /// roots. This method calculates the cost of extracting the values. 1591 InstructionCost getGatherCost(ArrayRef<Value *> VL) const; 1592 1593 /// Set the Builder insert point to one after the last instruction in 1594 /// the bundle 1595 void setInsertPointAfterBundle(const TreeEntry *E); 1596 1597 /// \returns a vector from a collection of scalars in \p VL. 1598 Value *gather(ArrayRef<Value *> VL); 1599 1600 /// \returns whether the VectorizableTree is fully vectorizable and will 1601 /// be beneficial even the tree height is tiny. 1602 bool isFullyVectorizableTinyTree() const; 1603 1604 /// Reorder commutative or alt operands to get better probability of 1605 /// generating vectorized code. 1606 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 1607 SmallVectorImpl<Value *> &Left, 1608 SmallVectorImpl<Value *> &Right, 1609 const DataLayout &DL, 1610 ScalarEvolution &SE, 1611 const BoUpSLP &R); 1612 struct TreeEntry { 1613 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>; 1614 TreeEntry(VecTreeTy &Container) : Container(Container) {} 1615 1616 /// \returns true if the scalars in VL are equal to this entry. 1617 bool isSame(ArrayRef<Value *> VL) const { 1618 if (VL.size() == Scalars.size()) 1619 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 1620 return VL.size() == ReuseShuffleIndices.size() && 1621 std::equal( 1622 VL.begin(), VL.end(), ReuseShuffleIndices.begin(), 1623 [this](Value *V, int Idx) { return V == Scalars[Idx]; }); 1624 } 1625 1626 /// A vector of scalars. 1627 ValueList Scalars; 1628 1629 /// The Scalars are vectorized into this value. It is initialized to Null. 1630 Value *VectorizedValue = nullptr; 1631 1632 /// Do we need to gather this sequence or vectorize it 1633 /// (either with vector instruction or with scatter/gather 1634 /// intrinsics for store/load)? 1635 enum EntryState { Vectorize, ScatterVectorize, NeedToGather }; 1636 EntryState State; 1637 1638 /// Does this sequence require some shuffling? 1639 SmallVector<int, 4> ReuseShuffleIndices; 1640 1641 /// Does this entry require reordering? 1642 SmallVector<unsigned, 4> ReorderIndices; 1643 1644 /// Points back to the VectorizableTree. 1645 /// 1646 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 1647 /// to be a pointer and needs to be able to initialize the child iterator. 1648 /// Thus we need a reference back to the container to translate the indices 1649 /// to entries. 1650 VecTreeTy &Container; 1651 1652 /// The TreeEntry index containing the user of this entry. We can actually 1653 /// have multiple users so the data structure is not truly a tree. 1654 SmallVector<EdgeInfo, 1> UserTreeIndices; 1655 1656 /// The index of this treeEntry in VectorizableTree. 1657 int Idx = -1; 1658 1659 private: 1660 /// The operands of each instruction in each lane Operands[op_index][lane]. 1661 /// Note: This helps avoid the replication of the code that performs the 1662 /// reordering of operands during buildTree_rec() and vectorizeTree(). 1663 SmallVector<ValueList, 2> Operands; 1664 1665 /// The main/alternate instruction. 1666 Instruction *MainOp = nullptr; 1667 Instruction *AltOp = nullptr; 1668 1669 public: 1670 /// Set this bundle's \p OpIdx'th operand to \p OpVL. 1671 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) { 1672 if (Operands.size() < OpIdx + 1) 1673 Operands.resize(OpIdx + 1); 1674 assert(Operands[OpIdx].empty() && "Already resized?"); 1675 Operands[OpIdx].resize(Scalars.size()); 1676 for (unsigned Lane = 0, E = Scalars.size(); Lane != E; ++Lane) 1677 Operands[OpIdx][Lane] = OpVL[Lane]; 1678 } 1679 1680 /// Set the operands of this bundle in their original order. 1681 void setOperandsInOrder() { 1682 assert(Operands.empty() && "Already initialized?"); 1683 auto *I0 = cast<Instruction>(Scalars[0]); 1684 Operands.resize(I0->getNumOperands()); 1685 unsigned NumLanes = Scalars.size(); 1686 for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands(); 1687 OpIdx != NumOperands; ++OpIdx) { 1688 Operands[OpIdx].resize(NumLanes); 1689 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1690 auto *I = cast<Instruction>(Scalars[Lane]); 1691 assert(I->getNumOperands() == NumOperands && 1692 "Expected same number of operands"); 1693 Operands[OpIdx][Lane] = I->getOperand(OpIdx); 1694 } 1695 } 1696 } 1697 1698 /// \returns the \p OpIdx operand of this TreeEntry. 1699 ValueList &getOperand(unsigned OpIdx) { 1700 assert(OpIdx < Operands.size() && "Off bounds"); 1701 return Operands[OpIdx]; 1702 } 1703 1704 /// \returns the number of operands. 1705 unsigned getNumOperands() const { return Operands.size(); } 1706 1707 /// \return the single \p OpIdx operand. 1708 Value *getSingleOperand(unsigned OpIdx) const { 1709 assert(OpIdx < Operands.size() && "Off bounds"); 1710 assert(!Operands[OpIdx].empty() && "No operand available"); 1711 return Operands[OpIdx][0]; 1712 } 1713 1714 /// Some of the instructions in the list have alternate opcodes. 1715 bool isAltShuffle() const { 1716 return getOpcode() != getAltOpcode(); 1717 } 1718 1719 bool isOpcodeOrAlt(Instruction *I) const { 1720 unsigned CheckedOpcode = I->getOpcode(); 1721 return (getOpcode() == CheckedOpcode || 1722 getAltOpcode() == CheckedOpcode); 1723 } 1724 1725 /// Chooses the correct key for scheduling data. If \p Op has the same (or 1726 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is 1727 /// \p OpValue. 1728 Value *isOneOf(Value *Op) const { 1729 auto *I = dyn_cast<Instruction>(Op); 1730 if (I && isOpcodeOrAlt(I)) 1731 return Op; 1732 return MainOp; 1733 } 1734 1735 void setOperations(const InstructionsState &S) { 1736 MainOp = S.MainOp; 1737 AltOp = S.AltOp; 1738 } 1739 1740 Instruction *getMainOp() const { 1741 return MainOp; 1742 } 1743 1744 Instruction *getAltOp() const { 1745 return AltOp; 1746 } 1747 1748 /// The main/alternate opcodes for the list of instructions. 1749 unsigned getOpcode() const { 1750 return MainOp ? MainOp->getOpcode() : 0; 1751 } 1752 1753 unsigned getAltOpcode() const { 1754 return AltOp ? AltOp->getOpcode() : 0; 1755 } 1756 1757 /// Update operations state of this entry if reorder occurred. 1758 bool updateStateIfReorder() { 1759 if (ReorderIndices.empty()) 1760 return false; 1761 InstructionsState S = getSameOpcode(Scalars, ReorderIndices.front()); 1762 setOperations(S); 1763 return true; 1764 } 1765 1766 #ifndef NDEBUG 1767 /// Debug printer. 1768 LLVM_DUMP_METHOD void dump() const { 1769 dbgs() << Idx << ".\n"; 1770 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) { 1771 dbgs() << "Operand " << OpI << ":\n"; 1772 for (const Value *V : Operands[OpI]) 1773 dbgs().indent(2) << *V << "\n"; 1774 } 1775 dbgs() << "Scalars: \n"; 1776 for (Value *V : Scalars) 1777 dbgs().indent(2) << *V << "\n"; 1778 dbgs() << "State: "; 1779 switch (State) { 1780 case Vectorize: 1781 dbgs() << "Vectorize\n"; 1782 break; 1783 case ScatterVectorize: 1784 dbgs() << "ScatterVectorize\n"; 1785 break; 1786 case NeedToGather: 1787 dbgs() << "NeedToGather\n"; 1788 break; 1789 } 1790 dbgs() << "MainOp: "; 1791 if (MainOp) 1792 dbgs() << *MainOp << "\n"; 1793 else 1794 dbgs() << "NULL\n"; 1795 dbgs() << "AltOp: "; 1796 if (AltOp) 1797 dbgs() << *AltOp << "\n"; 1798 else 1799 dbgs() << "NULL\n"; 1800 dbgs() << "VectorizedValue: "; 1801 if (VectorizedValue) 1802 dbgs() << *VectorizedValue << "\n"; 1803 else 1804 dbgs() << "NULL\n"; 1805 dbgs() << "ReuseShuffleIndices: "; 1806 if (ReuseShuffleIndices.empty()) 1807 dbgs() << "Empty"; 1808 else 1809 for (unsigned ReuseIdx : ReuseShuffleIndices) 1810 dbgs() << ReuseIdx << ", "; 1811 dbgs() << "\n"; 1812 dbgs() << "ReorderIndices: "; 1813 for (unsigned ReorderIdx : ReorderIndices) 1814 dbgs() << ReorderIdx << ", "; 1815 dbgs() << "\n"; 1816 dbgs() << "UserTreeIndices: "; 1817 for (const auto &EInfo : UserTreeIndices) 1818 dbgs() << EInfo << ", "; 1819 dbgs() << "\n"; 1820 } 1821 #endif 1822 }; 1823 1824 #ifndef NDEBUG 1825 void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost, 1826 InstructionCost VecCost, 1827 InstructionCost ScalarCost) const { 1828 dbgs() << "SLP: Calculated costs for Tree:\n"; E->dump(); 1829 dbgs() << "SLP: Costs:\n"; 1830 dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n"; 1831 dbgs() << "SLP: VectorCost = " << VecCost << "\n"; 1832 dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n"; 1833 dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = " << 1834 ReuseShuffleCost + VecCost - ScalarCost << "\n"; 1835 } 1836 #endif 1837 1838 /// Create a new VectorizableTree entry. 1839 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle, 1840 const InstructionsState &S, 1841 const EdgeInfo &UserTreeIdx, 1842 ArrayRef<unsigned> ReuseShuffleIndices = None, 1843 ArrayRef<unsigned> ReorderIndices = None) { 1844 TreeEntry::EntryState EntryState = 1845 Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather; 1846 return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx, 1847 ReuseShuffleIndices, ReorderIndices); 1848 } 1849 1850 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, 1851 TreeEntry::EntryState EntryState, 1852 Optional<ScheduleData *> Bundle, 1853 const InstructionsState &S, 1854 const EdgeInfo &UserTreeIdx, 1855 ArrayRef<unsigned> ReuseShuffleIndices = None, 1856 ArrayRef<unsigned> ReorderIndices = None) { 1857 assert(((!Bundle && EntryState == TreeEntry::NeedToGather) || 1858 (Bundle && EntryState != TreeEntry::NeedToGather)) && 1859 "Need to vectorize gather entry?"); 1860 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree)); 1861 TreeEntry *Last = VectorizableTree.back().get(); 1862 Last->Idx = VectorizableTree.size() - 1; 1863 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); 1864 Last->State = EntryState; 1865 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(), 1866 ReuseShuffleIndices.end()); 1867 Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end()); 1868 Last->setOperations(S); 1869 if (Last->State != TreeEntry::NeedToGather) { 1870 for (Value *V : VL) { 1871 assert(!getTreeEntry(V) && "Scalar already in tree!"); 1872 ScalarToTreeEntry[V] = Last; 1873 } 1874 // Update the scheduler bundle to point to this TreeEntry. 1875 unsigned Lane = 0; 1876 for (ScheduleData *BundleMember = Bundle.getValue(); BundleMember; 1877 BundleMember = BundleMember->NextInBundle) { 1878 BundleMember->TE = Last; 1879 BundleMember->Lane = Lane; 1880 ++Lane; 1881 } 1882 assert((!Bundle.getValue() || Lane == VL.size()) && 1883 "Bundle and VL out of sync"); 1884 } else { 1885 MustGather.insert(VL.begin(), VL.end()); 1886 } 1887 1888 if (UserTreeIdx.UserTE) 1889 Last->UserTreeIndices.push_back(UserTreeIdx); 1890 1891 return Last; 1892 } 1893 1894 /// -- Vectorization State -- 1895 /// Holds all of the tree entries. 1896 TreeEntry::VecTreeTy VectorizableTree; 1897 1898 #ifndef NDEBUG 1899 /// Debug printer. 1900 LLVM_DUMP_METHOD void dumpVectorizableTree() const { 1901 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) { 1902 VectorizableTree[Id]->dump(); 1903 dbgs() << "\n"; 1904 } 1905 } 1906 #endif 1907 1908 TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); } 1909 1910 const TreeEntry *getTreeEntry(Value *V) const { 1911 return ScalarToTreeEntry.lookup(V); 1912 } 1913 1914 /// Maps a specific scalar to its tree entry. 1915 SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry; 1916 1917 /// Maps a value to the proposed vectorizable size. 1918 SmallDenseMap<Value *, unsigned> InstrElementSize; 1919 1920 /// A list of scalars that we found that we need to keep as scalars. 1921 ValueSet MustGather; 1922 1923 /// This POD struct describes one external user in the vectorized tree. 1924 struct ExternalUser { 1925 ExternalUser(Value *S, llvm::User *U, int L) 1926 : Scalar(S), User(U), Lane(L) {} 1927 1928 // Which scalar in our function. 1929 Value *Scalar; 1930 1931 // Which user that uses the scalar. 1932 llvm::User *User; 1933 1934 // Which lane does the scalar belong to. 1935 int Lane; 1936 }; 1937 using UserList = SmallVector<ExternalUser, 16>; 1938 1939 /// Checks if two instructions may access the same memory. 1940 /// 1941 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 1942 /// is invariant in the calling loop. 1943 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 1944 Instruction *Inst2) { 1945 // First check if the result is already in the cache. 1946 AliasCacheKey key = std::make_pair(Inst1, Inst2); 1947 Optional<bool> &result = AliasCache[key]; 1948 if (result.hasValue()) { 1949 return result.getValue(); 1950 } 1951 MemoryLocation Loc2 = getLocation(Inst2, AA); 1952 bool aliased = true; 1953 if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) { 1954 // Do the alias check. 1955 aliased = !AA->isNoAlias(Loc1, Loc2); 1956 } 1957 // Store the result in the cache. 1958 result = aliased; 1959 return aliased; 1960 } 1961 1962 using AliasCacheKey = std::pair<Instruction *, Instruction *>; 1963 1964 /// Cache for alias results. 1965 /// TODO: consider moving this to the AliasAnalysis itself. 1966 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 1967 1968 /// Removes an instruction from its block and eventually deletes it. 1969 /// It's like Instruction::eraseFromParent() except that the actual deletion 1970 /// is delayed until BoUpSLP is destructed. 1971 /// This is required to ensure that there are no incorrect collisions in the 1972 /// AliasCache, which can happen if a new instruction is allocated at the 1973 /// same address as a previously deleted instruction. 1974 void eraseInstruction(Instruction *I, bool ReplaceOpsWithUndef = false) { 1975 auto It = DeletedInstructions.try_emplace(I, ReplaceOpsWithUndef).first; 1976 It->getSecond() = It->getSecond() && ReplaceOpsWithUndef; 1977 } 1978 1979 /// Temporary store for deleted instructions. Instructions will be deleted 1980 /// eventually when the BoUpSLP is destructed. 1981 DenseMap<Instruction *, bool> DeletedInstructions; 1982 1983 /// A list of values that need to extracted out of the tree. 1984 /// This list holds pairs of (Internal Scalar : External User). External User 1985 /// can be nullptr, it means that this Internal Scalar will be used later, 1986 /// after vectorization. 1987 UserList ExternalUses; 1988 1989 /// Values used only by @llvm.assume calls. 1990 SmallPtrSet<const Value *, 32> EphValues; 1991 1992 /// Holds all of the instructions that we gathered. 1993 SetVector<Instruction *> GatherSeq; 1994 1995 /// A list of blocks that we are going to CSE. 1996 SetVector<BasicBlock *> CSEBlocks; 1997 1998 /// Contains all scheduling relevant data for an instruction. 1999 /// A ScheduleData either represents a single instruction or a member of an 2000 /// instruction bundle (= a group of instructions which is combined into a 2001 /// vector instruction). 2002 struct ScheduleData { 2003 // The initial value for the dependency counters. It means that the 2004 // dependencies are not calculated yet. 2005 enum { InvalidDeps = -1 }; 2006 2007 ScheduleData() = default; 2008 2009 void init(int BlockSchedulingRegionID, Value *OpVal) { 2010 FirstInBundle = this; 2011 NextInBundle = nullptr; 2012 NextLoadStore = nullptr; 2013 IsScheduled = false; 2014 SchedulingRegionID = BlockSchedulingRegionID; 2015 UnscheduledDepsInBundle = UnscheduledDeps; 2016 clearDependencies(); 2017 OpValue = OpVal; 2018 TE = nullptr; 2019 Lane = -1; 2020 } 2021 2022 /// Returns true if the dependency information has been calculated. 2023 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 2024 2025 /// Returns true for single instructions and for bundle representatives 2026 /// (= the head of a bundle). 2027 bool isSchedulingEntity() const { return FirstInBundle == this; } 2028 2029 /// Returns true if it represents an instruction bundle and not only a 2030 /// single instruction. 2031 bool isPartOfBundle() const { 2032 return NextInBundle != nullptr || FirstInBundle != this; 2033 } 2034 2035 /// Returns true if it is ready for scheduling, i.e. it has no more 2036 /// unscheduled depending instructions/bundles. 2037 bool isReady() const { 2038 assert(isSchedulingEntity() && 2039 "can't consider non-scheduling entity for ready list"); 2040 return UnscheduledDepsInBundle == 0 && !IsScheduled; 2041 } 2042 2043 /// Modifies the number of unscheduled dependencies, also updating it for 2044 /// the whole bundle. 2045 int incrementUnscheduledDeps(int Incr) { 2046 UnscheduledDeps += Incr; 2047 return FirstInBundle->UnscheduledDepsInBundle += Incr; 2048 } 2049 2050 /// Sets the number of unscheduled dependencies to the number of 2051 /// dependencies. 2052 void resetUnscheduledDeps() { 2053 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 2054 } 2055 2056 /// Clears all dependency information. 2057 void clearDependencies() { 2058 Dependencies = InvalidDeps; 2059 resetUnscheduledDeps(); 2060 MemoryDependencies.clear(); 2061 } 2062 2063 void dump(raw_ostream &os) const { 2064 if (!isSchedulingEntity()) { 2065 os << "/ " << *Inst; 2066 } else if (NextInBundle) { 2067 os << '[' << *Inst; 2068 ScheduleData *SD = NextInBundle; 2069 while (SD) { 2070 os << ';' << *SD->Inst; 2071 SD = SD->NextInBundle; 2072 } 2073 os << ']'; 2074 } else { 2075 os << *Inst; 2076 } 2077 } 2078 2079 Instruction *Inst = nullptr; 2080 2081 /// Points to the head in an instruction bundle (and always to this for 2082 /// single instructions). 2083 ScheduleData *FirstInBundle = nullptr; 2084 2085 /// Single linked list of all instructions in a bundle. Null if it is a 2086 /// single instruction. 2087 ScheduleData *NextInBundle = nullptr; 2088 2089 /// Single linked list of all memory instructions (e.g. load, store, call) 2090 /// in the block - until the end of the scheduling region. 2091 ScheduleData *NextLoadStore = nullptr; 2092 2093 /// The dependent memory instructions. 2094 /// This list is derived on demand in calculateDependencies(). 2095 SmallVector<ScheduleData *, 4> MemoryDependencies; 2096 2097 /// This ScheduleData is in the current scheduling region if this matches 2098 /// the current SchedulingRegionID of BlockScheduling. 2099 int SchedulingRegionID = 0; 2100 2101 /// Used for getting a "good" final ordering of instructions. 2102 int SchedulingPriority = 0; 2103 2104 /// The number of dependencies. Constitutes of the number of users of the 2105 /// instruction plus the number of dependent memory instructions (if any). 2106 /// This value is calculated on demand. 2107 /// If InvalidDeps, the number of dependencies is not calculated yet. 2108 int Dependencies = InvalidDeps; 2109 2110 /// The number of dependencies minus the number of dependencies of scheduled 2111 /// instructions. As soon as this is zero, the instruction/bundle gets ready 2112 /// for scheduling. 2113 /// Note that this is negative as long as Dependencies is not calculated. 2114 int UnscheduledDeps = InvalidDeps; 2115 2116 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 2117 /// single instructions. 2118 int UnscheduledDepsInBundle = InvalidDeps; 2119 2120 /// True if this instruction is scheduled (or considered as scheduled in the 2121 /// dry-run). 2122 bool IsScheduled = false; 2123 2124 /// Opcode of the current instruction in the schedule data. 2125 Value *OpValue = nullptr; 2126 2127 /// The TreeEntry that this instruction corresponds to. 2128 TreeEntry *TE = nullptr; 2129 2130 /// The lane of this node in the TreeEntry. 2131 int Lane = -1; 2132 }; 2133 2134 #ifndef NDEBUG 2135 friend inline raw_ostream &operator<<(raw_ostream &os, 2136 const BoUpSLP::ScheduleData &SD) { 2137 SD.dump(os); 2138 return os; 2139 } 2140 #endif 2141 2142 friend struct GraphTraits<BoUpSLP *>; 2143 friend struct DOTGraphTraits<BoUpSLP *>; 2144 2145 /// Contains all scheduling data for a basic block. 2146 struct BlockScheduling { 2147 BlockScheduling(BasicBlock *BB) 2148 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} 2149 2150 void clear() { 2151 ReadyInsts.clear(); 2152 ScheduleStart = nullptr; 2153 ScheduleEnd = nullptr; 2154 FirstLoadStoreInRegion = nullptr; 2155 LastLoadStoreInRegion = nullptr; 2156 2157 // Reduce the maximum schedule region size by the size of the 2158 // previous scheduling run. 2159 ScheduleRegionSizeLimit -= ScheduleRegionSize; 2160 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 2161 ScheduleRegionSizeLimit = MinScheduleRegionSize; 2162 ScheduleRegionSize = 0; 2163 2164 // Make a new scheduling region, i.e. all existing ScheduleData is not 2165 // in the new region yet. 2166 ++SchedulingRegionID; 2167 } 2168 2169 ScheduleData *getScheduleData(Value *V) { 2170 ScheduleData *SD = ScheduleDataMap[V]; 2171 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 2172 return SD; 2173 return nullptr; 2174 } 2175 2176 ScheduleData *getScheduleData(Value *V, Value *Key) { 2177 if (V == Key) 2178 return getScheduleData(V); 2179 auto I = ExtraScheduleDataMap.find(V); 2180 if (I != ExtraScheduleDataMap.end()) { 2181 ScheduleData *SD = I->second[Key]; 2182 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 2183 return SD; 2184 } 2185 return nullptr; 2186 } 2187 2188 bool isInSchedulingRegion(ScheduleData *SD) const { 2189 return SD->SchedulingRegionID == SchedulingRegionID; 2190 } 2191 2192 /// Marks an instruction as scheduled and puts all dependent ready 2193 /// instructions into the ready-list. 2194 template <typename ReadyListType> 2195 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 2196 SD->IsScheduled = true; 2197 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 2198 2199 ScheduleData *BundleMember = SD; 2200 while (BundleMember) { 2201 if (BundleMember->Inst != BundleMember->OpValue) { 2202 BundleMember = BundleMember->NextInBundle; 2203 continue; 2204 } 2205 // Handle the def-use chain dependencies. 2206 2207 // Decrement the unscheduled counter and insert to ready list if ready. 2208 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) { 2209 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 2210 if (OpDef && OpDef->hasValidDependencies() && 2211 OpDef->incrementUnscheduledDeps(-1) == 0) { 2212 // There are no more unscheduled dependencies after 2213 // decrementing, so we can put the dependent instruction 2214 // into the ready list. 2215 ScheduleData *DepBundle = OpDef->FirstInBundle; 2216 assert(!DepBundle->IsScheduled && 2217 "already scheduled bundle gets ready"); 2218 ReadyList.insert(DepBundle); 2219 LLVM_DEBUG(dbgs() 2220 << "SLP: gets ready (def): " << *DepBundle << "\n"); 2221 } 2222 }); 2223 }; 2224 2225 // If BundleMember is a vector bundle, its operands may have been 2226 // reordered duiring buildTree(). We therefore need to get its operands 2227 // through the TreeEntry. 2228 if (TreeEntry *TE = BundleMember->TE) { 2229 int Lane = BundleMember->Lane; 2230 assert(Lane >= 0 && "Lane not set"); 2231 2232 // Since vectorization tree is being built recursively this assertion 2233 // ensures that the tree entry has all operands set before reaching 2234 // this code. Couple of exceptions known at the moment are extracts 2235 // where their second (immediate) operand is not added. Since 2236 // immediates do not affect scheduler behavior this is considered 2237 // okay. 2238 auto *In = TE->getMainOp(); 2239 assert(In && 2240 (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) || 2241 isa<InsertElementInst>(In) || 2242 In->getNumOperands() == TE->getNumOperands()) && 2243 "Missed TreeEntry operands?"); 2244 (void)In; // fake use to avoid build failure when assertions disabled 2245 2246 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands(); 2247 OpIdx != NumOperands; ++OpIdx) 2248 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane])) 2249 DecrUnsched(I); 2250 } else { 2251 // If BundleMember is a stand-alone instruction, no operand reordering 2252 // has taken place, so we directly access its operands. 2253 for (Use &U : BundleMember->Inst->operands()) 2254 if (auto *I = dyn_cast<Instruction>(U.get())) 2255 DecrUnsched(I); 2256 } 2257 // Handle the memory dependencies. 2258 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 2259 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 2260 // There are no more unscheduled dependencies after decrementing, 2261 // so we can put the dependent instruction into the ready list. 2262 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 2263 assert(!DepBundle->IsScheduled && 2264 "already scheduled bundle gets ready"); 2265 ReadyList.insert(DepBundle); 2266 LLVM_DEBUG(dbgs() 2267 << "SLP: gets ready (mem): " << *DepBundle << "\n"); 2268 } 2269 } 2270 BundleMember = BundleMember->NextInBundle; 2271 } 2272 } 2273 2274 void doForAllOpcodes(Value *V, 2275 function_ref<void(ScheduleData *SD)> Action) { 2276 if (ScheduleData *SD = getScheduleData(V)) 2277 Action(SD); 2278 auto I = ExtraScheduleDataMap.find(V); 2279 if (I != ExtraScheduleDataMap.end()) 2280 for (auto &P : I->second) 2281 if (P.second->SchedulingRegionID == SchedulingRegionID) 2282 Action(P.second); 2283 } 2284 2285 /// Put all instructions into the ReadyList which are ready for scheduling. 2286 template <typename ReadyListType> 2287 void initialFillReadyList(ReadyListType &ReadyList) { 2288 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 2289 doForAllOpcodes(I, [&](ScheduleData *SD) { 2290 if (SD->isSchedulingEntity() && SD->isReady()) { 2291 ReadyList.insert(SD); 2292 LLVM_DEBUG(dbgs() 2293 << "SLP: initially in ready list: " << *I << "\n"); 2294 } 2295 }); 2296 } 2297 } 2298 2299 /// Checks if a bundle of instructions can be scheduled, i.e. has no 2300 /// cyclic dependencies. This is only a dry-run, no instructions are 2301 /// actually moved at this stage. 2302 /// \returns the scheduling bundle. The returned Optional value is non-None 2303 /// if \p VL is allowed to be scheduled. 2304 Optional<ScheduleData *> 2305 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 2306 const InstructionsState &S); 2307 2308 /// Un-bundles a group of instructions. 2309 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 2310 2311 /// Allocates schedule data chunk. 2312 ScheduleData *allocateScheduleDataChunks(); 2313 2314 /// Extends the scheduling region so that V is inside the region. 2315 /// \returns true if the region size is within the limit. 2316 bool extendSchedulingRegion(Value *V, const InstructionsState &S); 2317 2318 /// Initialize the ScheduleData structures for new instructions in the 2319 /// scheduling region. 2320 void initScheduleData(Instruction *FromI, Instruction *ToI, 2321 ScheduleData *PrevLoadStore, 2322 ScheduleData *NextLoadStore); 2323 2324 /// Updates the dependency information of a bundle and of all instructions/ 2325 /// bundles which depend on the original bundle. 2326 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 2327 BoUpSLP *SLP); 2328 2329 /// Sets all instruction in the scheduling region to un-scheduled. 2330 void resetSchedule(); 2331 2332 BasicBlock *BB; 2333 2334 /// Simple memory allocation for ScheduleData. 2335 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 2336 2337 /// The size of a ScheduleData array in ScheduleDataChunks. 2338 int ChunkSize; 2339 2340 /// The allocator position in the current chunk, which is the last entry 2341 /// of ScheduleDataChunks. 2342 int ChunkPos; 2343 2344 /// Attaches ScheduleData to Instruction. 2345 /// Note that the mapping survives during all vectorization iterations, i.e. 2346 /// ScheduleData structures are recycled. 2347 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 2348 2349 /// Attaches ScheduleData to Instruction with the leading key. 2350 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 2351 ExtraScheduleDataMap; 2352 2353 struct ReadyList : SmallVector<ScheduleData *, 8> { 2354 void insert(ScheduleData *SD) { push_back(SD); } 2355 }; 2356 2357 /// The ready-list for scheduling (only used for the dry-run). 2358 ReadyList ReadyInsts; 2359 2360 /// The first instruction of the scheduling region. 2361 Instruction *ScheduleStart = nullptr; 2362 2363 /// The first instruction _after_ the scheduling region. 2364 Instruction *ScheduleEnd = nullptr; 2365 2366 /// The first memory accessing instruction in the scheduling region 2367 /// (can be null). 2368 ScheduleData *FirstLoadStoreInRegion = nullptr; 2369 2370 /// The last memory accessing instruction in the scheduling region 2371 /// (can be null). 2372 ScheduleData *LastLoadStoreInRegion = nullptr; 2373 2374 /// The current size of the scheduling region. 2375 int ScheduleRegionSize = 0; 2376 2377 /// The maximum size allowed for the scheduling region. 2378 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; 2379 2380 /// The ID of the scheduling region. For a new vectorization iteration this 2381 /// is incremented which "removes" all ScheduleData from the region. 2382 // Make sure that the initial SchedulingRegionID is greater than the 2383 // initial SchedulingRegionID in ScheduleData (which is 0). 2384 int SchedulingRegionID = 1; 2385 }; 2386 2387 /// Attaches the BlockScheduling structures to basic blocks. 2388 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 2389 2390 /// Performs the "real" scheduling. Done before vectorization is actually 2391 /// performed in a basic block. 2392 void scheduleBlock(BlockScheduling *BS); 2393 2394 /// List of users to ignore during scheduling and that don't need extracting. 2395 ArrayRef<Value *> UserIgnoreList; 2396 2397 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of 2398 /// sorted SmallVectors of unsigned. 2399 struct OrdersTypeDenseMapInfo { 2400 static OrdersType getEmptyKey() { 2401 OrdersType V; 2402 V.push_back(~1U); 2403 return V; 2404 } 2405 2406 static OrdersType getTombstoneKey() { 2407 OrdersType V; 2408 V.push_back(~2U); 2409 return V; 2410 } 2411 2412 static unsigned getHashValue(const OrdersType &V) { 2413 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); 2414 } 2415 2416 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) { 2417 return LHS == RHS; 2418 } 2419 }; 2420 2421 /// Contains orders of operations along with the number of bundles that have 2422 /// operations in this order. It stores only those orders that require 2423 /// reordering, if reordering is not required it is counted using \a 2424 /// NumOpsWantToKeepOriginalOrder. 2425 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo> NumOpsWantToKeepOrder; 2426 /// Number of bundles that do not require reordering. 2427 unsigned NumOpsWantToKeepOriginalOrder = 0; 2428 2429 // Analysis and block reference. 2430 Function *F; 2431 ScalarEvolution *SE; 2432 TargetTransformInfo *TTI; 2433 TargetLibraryInfo *TLI; 2434 AAResults *AA; 2435 LoopInfo *LI; 2436 DominatorTree *DT; 2437 AssumptionCache *AC; 2438 DemandedBits *DB; 2439 const DataLayout *DL; 2440 OptimizationRemarkEmitter *ORE; 2441 2442 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 2443 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 2444 2445 /// Instruction builder to construct the vectorized tree. 2446 IRBuilder<> Builder; 2447 2448 /// A map of scalar integer values to the smallest bit width with which they 2449 /// can legally be represented. The values map to (width, signed) pairs, 2450 /// where "width" indicates the minimum bit width and "signed" is True if the 2451 /// value must be signed-extended, rather than zero-extended, back to its 2452 /// original width. 2453 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs; 2454 }; 2455 2456 } // end namespace slpvectorizer 2457 2458 template <> struct GraphTraits<BoUpSLP *> { 2459 using TreeEntry = BoUpSLP::TreeEntry; 2460 2461 /// NodeRef has to be a pointer per the GraphWriter. 2462 using NodeRef = TreeEntry *; 2463 2464 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy; 2465 2466 /// Add the VectorizableTree to the index iterator to be able to return 2467 /// TreeEntry pointers. 2468 struct ChildIteratorType 2469 : public iterator_adaptor_base< 2470 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> { 2471 ContainerTy &VectorizableTree; 2472 2473 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W, 2474 ContainerTy &VT) 2475 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 2476 2477 NodeRef operator*() { return I->UserTE; } 2478 }; 2479 2480 static NodeRef getEntryNode(BoUpSLP &R) { 2481 return R.VectorizableTree[0].get(); 2482 } 2483 2484 static ChildIteratorType child_begin(NodeRef N) { 2485 return {N->UserTreeIndices.begin(), N->Container}; 2486 } 2487 2488 static ChildIteratorType child_end(NodeRef N) { 2489 return {N->UserTreeIndices.end(), N->Container}; 2490 } 2491 2492 /// For the node iterator we just need to turn the TreeEntry iterator into a 2493 /// TreeEntry* iterator so that it dereferences to NodeRef. 2494 class nodes_iterator { 2495 using ItTy = ContainerTy::iterator; 2496 ItTy It; 2497 2498 public: 2499 nodes_iterator(const ItTy &It2) : It(It2) {} 2500 NodeRef operator*() { return It->get(); } 2501 nodes_iterator operator++() { 2502 ++It; 2503 return *this; 2504 } 2505 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; } 2506 }; 2507 2508 static nodes_iterator nodes_begin(BoUpSLP *R) { 2509 return nodes_iterator(R->VectorizableTree.begin()); 2510 } 2511 2512 static nodes_iterator nodes_end(BoUpSLP *R) { 2513 return nodes_iterator(R->VectorizableTree.end()); 2514 } 2515 2516 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 2517 }; 2518 2519 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 2520 using TreeEntry = BoUpSLP::TreeEntry; 2521 2522 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 2523 2524 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 2525 std::string Str; 2526 raw_string_ostream OS(Str); 2527 if (isSplat(Entry->Scalars)) { 2528 OS << "<splat> " << *Entry->Scalars[0]; 2529 return Str; 2530 } 2531 for (auto V : Entry->Scalars) { 2532 OS << *V; 2533 if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) { 2534 return EU.Scalar == V; 2535 })) 2536 OS << " <extract>"; 2537 OS << "\n"; 2538 } 2539 return Str; 2540 } 2541 2542 static std::string getNodeAttributes(const TreeEntry *Entry, 2543 const BoUpSLP *) { 2544 if (Entry->State == TreeEntry::NeedToGather) 2545 return "color=red"; 2546 return ""; 2547 } 2548 }; 2549 2550 } // end namespace llvm 2551 2552 BoUpSLP::~BoUpSLP() { 2553 for (const auto &Pair : DeletedInstructions) { 2554 // Replace operands of ignored instructions with Undefs in case if they were 2555 // marked for deletion. 2556 if (Pair.getSecond()) { 2557 Value *Undef = UndefValue::get(Pair.getFirst()->getType()); 2558 Pair.getFirst()->replaceAllUsesWith(Undef); 2559 } 2560 Pair.getFirst()->dropAllReferences(); 2561 } 2562 for (const auto &Pair : DeletedInstructions) { 2563 assert(Pair.getFirst()->use_empty() && 2564 "trying to erase instruction with users."); 2565 Pair.getFirst()->eraseFromParent(); 2566 } 2567 #ifdef EXPENSIVE_CHECKS 2568 // If we could guarantee that this call is not extremely slow, we could 2569 // remove the ifdef limitation (see PR47712). 2570 assert(!verifyFunction(*F, &dbgs())); 2571 #endif 2572 } 2573 2574 void BoUpSLP::eraseInstructions(ArrayRef<Value *> AV) { 2575 for (auto *V : AV) { 2576 if (auto *I = dyn_cast<Instruction>(V)) 2577 eraseInstruction(I, /*ReplaceOpsWithUndef=*/true); 2578 }; 2579 } 2580 2581 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 2582 ArrayRef<Value *> UserIgnoreLst) { 2583 ExtraValueToDebugLocsMap ExternallyUsedValues; 2584 buildTree(Roots, ExternallyUsedValues, UserIgnoreLst); 2585 } 2586 2587 static int findLaneForValue(ArrayRef<Value *> Scalars, 2588 ArrayRef<int> ReuseShuffleIndices, Value *V) { 2589 unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V)); 2590 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 2591 if (!ReuseShuffleIndices.empty()) { 2592 FoundLane = std::distance(ReuseShuffleIndices.begin(), 2593 find(ReuseShuffleIndices, FoundLane)); 2594 } 2595 return FoundLane; 2596 } 2597 2598 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 2599 ExtraValueToDebugLocsMap &ExternallyUsedValues, 2600 ArrayRef<Value *> UserIgnoreLst) { 2601 deleteTree(); 2602 UserIgnoreList = UserIgnoreLst; 2603 if (!allSameType(Roots)) 2604 return; 2605 buildTree_rec(Roots, 0, EdgeInfo()); 2606 2607 // Collect the values that we need to extract from the tree. 2608 for (auto &TEPtr : VectorizableTree) { 2609 TreeEntry *Entry = TEPtr.get(); 2610 2611 // No need to handle users of gathered values. 2612 if (Entry->State == TreeEntry::NeedToGather) 2613 continue; 2614 2615 // For each lane: 2616 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 2617 Value *Scalar = Entry->Scalars[Lane]; 2618 int FoundLane = 2619 findLaneForValue(Entry->Scalars, Entry->ReuseShuffleIndices, Scalar); 2620 2621 // Check if the scalar is externally used as an extra arg. 2622 auto ExtI = ExternallyUsedValues.find(Scalar); 2623 if (ExtI != ExternallyUsedValues.end()) { 2624 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " 2625 << Lane << " from " << *Scalar << ".\n"); 2626 ExternalUses.emplace_back(Scalar, nullptr, FoundLane); 2627 } 2628 for (User *U : Scalar->users()) { 2629 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 2630 2631 Instruction *UserInst = dyn_cast<Instruction>(U); 2632 if (!UserInst) 2633 continue; 2634 2635 // Skip in-tree scalars that become vectors 2636 if (TreeEntry *UseEntry = getTreeEntry(U)) { 2637 Value *UseScalar = UseEntry->Scalars[0]; 2638 // Some in-tree scalars will remain as scalar in vectorized 2639 // instructions. If that is the case, the one in Lane 0 will 2640 // be used. 2641 if (UseScalar != U || 2642 UseEntry->State == TreeEntry::ScatterVectorize || 2643 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 2644 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 2645 << ".\n"); 2646 assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state"); 2647 continue; 2648 } 2649 } 2650 2651 // Ignore users in the user ignore list. 2652 if (is_contained(UserIgnoreList, UserInst)) 2653 continue; 2654 2655 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " 2656 << Lane << " from " << *Scalar << ".\n"); 2657 ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane)); 2658 } 2659 } 2660 } 2661 } 2662 2663 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 2664 const EdgeInfo &UserTreeIdx) { 2665 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 2666 2667 InstructionsState S = getSameOpcode(VL); 2668 if (Depth == RecursionMaxDepth) { 2669 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 2670 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2671 return; 2672 } 2673 2674 // Don't handle vectors. 2675 if (S.OpValue->getType()->isVectorTy() && 2676 !isa<InsertElementInst>(S.OpValue)) { 2677 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 2678 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2679 return; 2680 } 2681 2682 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 2683 if (SI->getValueOperand()->getType()->isVectorTy()) { 2684 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 2685 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2686 return; 2687 } 2688 2689 // If all of the operands are identical or constant we have a simple solution. 2690 if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.getOpcode()) { 2691 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 2692 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2693 return; 2694 } 2695 2696 // We now know that this is a vector of instructions of the same type from 2697 // the same block. 2698 2699 // Don't vectorize ephemeral values. 2700 for (Value *V : VL) { 2701 if (EphValues.count(V)) { 2702 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 2703 << ") is ephemeral.\n"); 2704 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2705 return; 2706 } 2707 } 2708 2709 // Check if this is a duplicate of another entry. 2710 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 2711 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n"); 2712 if (!E->isSame(VL)) { 2713 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 2714 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2715 return; 2716 } 2717 // Record the reuse of the tree node. FIXME, currently this is only used to 2718 // properly draw the graph rather than for the actual vectorization. 2719 E->UserTreeIndices.push_back(UserTreeIdx); 2720 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue 2721 << ".\n"); 2722 return; 2723 } 2724 2725 // Check that none of the instructions in the bundle are already in the tree. 2726 for (Value *V : VL) { 2727 auto *I = dyn_cast<Instruction>(V); 2728 if (!I) 2729 continue; 2730 if (getTreeEntry(I)) { 2731 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 2732 << ") is already in tree.\n"); 2733 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2734 return; 2735 } 2736 } 2737 2738 // If any of the scalars is marked as a value that needs to stay scalar, then 2739 // we need to gather the scalars. 2740 // The reduction nodes (stored in UserIgnoreList) also should stay scalar. 2741 for (Value *V : VL) { 2742 if (MustGather.count(V) || is_contained(UserIgnoreList, V)) { 2743 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 2744 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2745 return; 2746 } 2747 } 2748 2749 // Check that all of the users of the scalars that we want to vectorize are 2750 // schedulable. 2751 auto *VL0 = cast<Instruction>(S.OpValue); 2752 BasicBlock *BB = VL0->getParent(); 2753 2754 if (!DT->isReachableFromEntry(BB)) { 2755 // Don't go into unreachable blocks. They may contain instructions with 2756 // dependency cycles which confuse the final scheduling. 2757 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 2758 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2759 return; 2760 } 2761 2762 // Check that every instruction appears once in this bundle. 2763 SmallVector<unsigned, 4> ReuseShuffleIndicies; 2764 SmallVector<Value *, 4> UniqueValues; 2765 DenseMap<Value *, unsigned> UniquePositions; 2766 for (Value *V : VL) { 2767 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 2768 ReuseShuffleIndicies.emplace_back(Res.first->second); 2769 if (Res.second) 2770 UniqueValues.emplace_back(V); 2771 } 2772 size_t NumUniqueScalarValues = UniqueValues.size(); 2773 if (NumUniqueScalarValues == VL.size()) { 2774 ReuseShuffleIndicies.clear(); 2775 } else { 2776 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n"); 2777 if (NumUniqueScalarValues <= 1 || 2778 !llvm::isPowerOf2_32(NumUniqueScalarValues)) { 2779 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 2780 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 2781 return; 2782 } 2783 VL = UniqueValues; 2784 } 2785 2786 auto &BSRef = BlocksSchedules[BB]; 2787 if (!BSRef) 2788 BSRef = std::make_unique<BlockScheduling>(BB); 2789 2790 BlockScheduling &BS = *BSRef.get(); 2791 2792 Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S); 2793 if (!Bundle) { 2794 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 2795 assert((!BS.getScheduleData(VL0) || 2796 !BS.getScheduleData(VL0)->isPartOfBundle()) && 2797 "tryScheduleBundle should cancelScheduling on failure"); 2798 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 2799 ReuseShuffleIndicies); 2800 return; 2801 } 2802 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 2803 2804 unsigned ShuffleOrOp = S.isAltShuffle() ? 2805 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 2806 switch (ShuffleOrOp) { 2807 case Instruction::PHI: { 2808 auto *PH = cast<PHINode>(VL0); 2809 2810 // Check for terminator values (e.g. invoke). 2811 for (Value *V : VL) 2812 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { 2813 Instruction *Term = dyn_cast<Instruction>( 2814 cast<PHINode>(V)->getIncomingValueForBlock( 2815 PH->getIncomingBlock(I))); 2816 if (Term && Term->isTerminator()) { 2817 LLVM_DEBUG(dbgs() 2818 << "SLP: Need to swizzle PHINodes (terminator use).\n"); 2819 BS.cancelScheduling(VL, VL0); 2820 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 2821 ReuseShuffleIndicies); 2822 return; 2823 } 2824 } 2825 2826 TreeEntry *TE = 2827 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies); 2828 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 2829 2830 // Keeps the reordered operands to avoid code duplication. 2831 SmallVector<ValueList, 2> OperandsVec; 2832 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { 2833 ValueList Operands; 2834 // Prepare the operand vector. 2835 for (Value *V : VL) 2836 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock( 2837 PH->getIncomingBlock(I))); 2838 TE->setOperand(I, Operands); 2839 OperandsVec.push_back(Operands); 2840 } 2841 for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx) 2842 buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx}); 2843 return; 2844 } 2845 case Instruction::ExtractValue: 2846 case Instruction::ExtractElement: { 2847 OrdersType CurrentOrder; 2848 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder); 2849 if (Reuse) { 2850 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n"); 2851 ++NumOpsWantToKeepOriginalOrder; 2852 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 2853 ReuseShuffleIndicies); 2854 // This is a special case, as it does not gather, but at the same time 2855 // we are not extending buildTree_rec() towards the operands. 2856 ValueList Op0; 2857 Op0.assign(VL.size(), VL0->getOperand(0)); 2858 VectorizableTree.back()->setOperand(0, Op0); 2859 return; 2860 } 2861 if (!CurrentOrder.empty()) { 2862 LLVM_DEBUG({ 2863 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " 2864 "with order"; 2865 for (unsigned Idx : CurrentOrder) 2866 dbgs() << " " << Idx; 2867 dbgs() << "\n"; 2868 }); 2869 // Insert new order with initial value 0, if it does not exist, 2870 // otherwise return the iterator to the existing one. 2871 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 2872 ReuseShuffleIndicies, CurrentOrder); 2873 findRootOrder(CurrentOrder); 2874 ++NumOpsWantToKeepOrder[CurrentOrder]; 2875 // This is a special case, as it does not gather, but at the same time 2876 // we are not extending buildTree_rec() towards the operands. 2877 ValueList Op0; 2878 Op0.assign(VL.size(), VL0->getOperand(0)); 2879 VectorizableTree.back()->setOperand(0, Op0); 2880 return; 2881 } 2882 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n"); 2883 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 2884 ReuseShuffleIndicies); 2885 BS.cancelScheduling(VL, VL0); 2886 return; 2887 } 2888 case Instruction::InsertElement: { 2889 assert(ReuseShuffleIndicies.empty() && "All inserts should be unique"); 2890 2891 // Check that we have a buildvector and not a shuffle of 2 or more 2892 // different vectors. 2893 ValueSet SourceVectors; 2894 for (Value *V : VL) 2895 SourceVectors.insert(cast<Instruction>(V)->getOperand(0)); 2896 2897 if (count_if(VL, [&SourceVectors](Value *V) { 2898 return !SourceVectors.contains(V); 2899 }) >= 2) { 2900 // Found 2nd source vector - cancel. 2901 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with " 2902 "different source vectors.\n"); 2903 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 2904 ReuseShuffleIndicies); 2905 BS.cancelScheduling(VL, VL0); 2906 return; 2907 } 2908 2909 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx); 2910 LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n"); 2911 2912 constexpr int NumOps = 2; 2913 ValueList VectorOperands[NumOps]; 2914 for (int I = 0; I < NumOps; ++I) { 2915 for (Value *V : VL) 2916 VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I)); 2917 2918 TE->setOperand(I, VectorOperands[I]); 2919 } 2920 buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, 0}); 2921 return; 2922 } 2923 case Instruction::Load: { 2924 // Check that a vectorized load would load the same memory as a scalar 2925 // load. For example, we don't want to vectorize loads that are smaller 2926 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 2927 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 2928 // from such a struct, we read/write packed bits disagreeing with the 2929 // unvectorized version. 2930 Type *ScalarTy = VL0->getType(); 2931 2932 if (DL->getTypeSizeInBits(ScalarTy) != 2933 DL->getTypeAllocSizeInBits(ScalarTy)) { 2934 BS.cancelScheduling(VL, VL0); 2935 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 2936 ReuseShuffleIndicies); 2937 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 2938 return; 2939 } 2940 2941 // Make sure all loads in the bundle are simple - we can't vectorize 2942 // atomic or volatile loads. 2943 SmallVector<Value *, 4> PointerOps(VL.size()); 2944 auto POIter = PointerOps.begin(); 2945 for (Value *V : VL) { 2946 auto *L = cast<LoadInst>(V); 2947 if (!L->isSimple()) { 2948 BS.cancelScheduling(VL, VL0); 2949 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 2950 ReuseShuffleIndicies); 2951 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 2952 return; 2953 } 2954 *POIter = L->getPointerOperand(); 2955 ++POIter; 2956 } 2957 2958 OrdersType CurrentOrder; 2959 // Check the order of pointer operands. 2960 if (llvm::sortPtrAccesses(PointerOps, *DL, *SE, CurrentOrder)) { 2961 Value *Ptr0; 2962 Value *PtrN; 2963 if (CurrentOrder.empty()) { 2964 Ptr0 = PointerOps.front(); 2965 PtrN = PointerOps.back(); 2966 } else { 2967 Ptr0 = PointerOps[CurrentOrder.front()]; 2968 PtrN = PointerOps[CurrentOrder.back()]; 2969 } 2970 Optional<int> Diff = getPointersDiff(Ptr0, PtrN, *DL, *SE); 2971 // Check that the sorted loads are consecutive. 2972 if (static_cast<unsigned>(*Diff) == VL.size() - 1) { 2973 if (CurrentOrder.empty()) { 2974 // Original loads are consecutive and does not require reordering. 2975 ++NumOpsWantToKeepOriginalOrder; 2976 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, 2977 UserTreeIdx, ReuseShuffleIndicies); 2978 TE->setOperandsInOrder(); 2979 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 2980 } else { 2981 // Need to reorder. 2982 TreeEntry *TE = 2983 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 2984 ReuseShuffleIndicies, CurrentOrder); 2985 TE->setOperandsInOrder(); 2986 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n"); 2987 findRootOrder(CurrentOrder); 2988 ++NumOpsWantToKeepOrder[CurrentOrder]; 2989 } 2990 return; 2991 } 2992 // Vectorizing non-consecutive loads with `llvm.masked.gather`. 2993 TreeEntry *TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S, 2994 UserTreeIdx, ReuseShuffleIndicies); 2995 TE->setOperandsInOrder(); 2996 buildTree_rec(PointerOps, Depth + 1, {TE, 0}); 2997 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n"); 2998 return; 2999 } 3000 3001 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 3002 BS.cancelScheduling(VL, VL0); 3003 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3004 ReuseShuffleIndicies); 3005 return; 3006 } 3007 case Instruction::ZExt: 3008 case Instruction::SExt: 3009 case Instruction::FPToUI: 3010 case Instruction::FPToSI: 3011 case Instruction::FPExt: 3012 case Instruction::PtrToInt: 3013 case Instruction::IntToPtr: 3014 case Instruction::SIToFP: 3015 case Instruction::UIToFP: 3016 case Instruction::Trunc: 3017 case Instruction::FPTrunc: 3018 case Instruction::BitCast: { 3019 Type *SrcTy = VL0->getOperand(0)->getType(); 3020 for (Value *V : VL) { 3021 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType(); 3022 if (Ty != SrcTy || !isValidElementType(Ty)) { 3023 BS.cancelScheduling(VL, VL0); 3024 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3025 ReuseShuffleIndicies); 3026 LLVM_DEBUG(dbgs() 3027 << "SLP: Gathering casts with different src types.\n"); 3028 return; 3029 } 3030 } 3031 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3032 ReuseShuffleIndicies); 3033 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 3034 3035 TE->setOperandsInOrder(); 3036 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 3037 ValueList Operands; 3038 // Prepare the operand vector. 3039 for (Value *V : VL) 3040 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3041 3042 buildTree_rec(Operands, Depth + 1, {TE, i}); 3043 } 3044 return; 3045 } 3046 case Instruction::ICmp: 3047 case Instruction::FCmp: { 3048 // Check that all of the compares have the same predicate. 3049 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 3050 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0); 3051 Type *ComparedTy = VL0->getOperand(0)->getType(); 3052 for (Value *V : VL) { 3053 CmpInst *Cmp = cast<CmpInst>(V); 3054 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) || 3055 Cmp->getOperand(0)->getType() != ComparedTy) { 3056 BS.cancelScheduling(VL, VL0); 3057 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3058 ReuseShuffleIndicies); 3059 LLVM_DEBUG(dbgs() 3060 << "SLP: Gathering cmp with different predicate.\n"); 3061 return; 3062 } 3063 } 3064 3065 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3066 ReuseShuffleIndicies); 3067 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 3068 3069 ValueList Left, Right; 3070 if (cast<CmpInst>(VL0)->isCommutative()) { 3071 // Commutative predicate - collect + sort operands of the instructions 3072 // so that each side is more likely to have the same opcode. 3073 assert(P0 == SwapP0 && "Commutative Predicate mismatch"); 3074 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 3075 } else { 3076 // Collect operands - commute if it uses the swapped predicate. 3077 for (Value *V : VL) { 3078 auto *Cmp = cast<CmpInst>(V); 3079 Value *LHS = Cmp->getOperand(0); 3080 Value *RHS = Cmp->getOperand(1); 3081 if (Cmp->getPredicate() != P0) 3082 std::swap(LHS, RHS); 3083 Left.push_back(LHS); 3084 Right.push_back(RHS); 3085 } 3086 } 3087 TE->setOperand(0, Left); 3088 TE->setOperand(1, Right); 3089 buildTree_rec(Left, Depth + 1, {TE, 0}); 3090 buildTree_rec(Right, Depth + 1, {TE, 1}); 3091 return; 3092 } 3093 case Instruction::Select: 3094 case Instruction::FNeg: 3095 case Instruction::Add: 3096 case Instruction::FAdd: 3097 case Instruction::Sub: 3098 case Instruction::FSub: 3099 case Instruction::Mul: 3100 case Instruction::FMul: 3101 case Instruction::UDiv: 3102 case Instruction::SDiv: 3103 case Instruction::FDiv: 3104 case Instruction::URem: 3105 case Instruction::SRem: 3106 case Instruction::FRem: 3107 case Instruction::Shl: 3108 case Instruction::LShr: 3109 case Instruction::AShr: 3110 case Instruction::And: 3111 case Instruction::Or: 3112 case Instruction::Xor: { 3113 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3114 ReuseShuffleIndicies); 3115 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n"); 3116 3117 // Sort operands of the instructions so that each side is more likely to 3118 // have the same opcode. 3119 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 3120 ValueList Left, Right; 3121 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 3122 TE->setOperand(0, Left); 3123 TE->setOperand(1, Right); 3124 buildTree_rec(Left, Depth + 1, {TE, 0}); 3125 buildTree_rec(Right, Depth + 1, {TE, 1}); 3126 return; 3127 } 3128 3129 TE->setOperandsInOrder(); 3130 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 3131 ValueList Operands; 3132 // Prepare the operand vector. 3133 for (Value *V : VL) 3134 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3135 3136 buildTree_rec(Operands, Depth + 1, {TE, i}); 3137 } 3138 return; 3139 } 3140 case Instruction::GetElementPtr: { 3141 // We don't combine GEPs with complicated (nested) indexing. 3142 for (Value *V : VL) { 3143 if (cast<Instruction>(V)->getNumOperands() != 2) { 3144 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 3145 BS.cancelScheduling(VL, VL0); 3146 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3147 ReuseShuffleIndicies); 3148 return; 3149 } 3150 } 3151 3152 // We can't combine several GEPs into one vector if they operate on 3153 // different types. 3154 Type *Ty0 = VL0->getOperand(0)->getType(); 3155 for (Value *V : VL) { 3156 Type *CurTy = cast<Instruction>(V)->getOperand(0)->getType(); 3157 if (Ty0 != CurTy) { 3158 LLVM_DEBUG(dbgs() 3159 << "SLP: not-vectorizable GEP (different types).\n"); 3160 BS.cancelScheduling(VL, VL0); 3161 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3162 ReuseShuffleIndicies); 3163 return; 3164 } 3165 } 3166 3167 // We don't combine GEPs with non-constant indexes. 3168 Type *Ty1 = VL0->getOperand(1)->getType(); 3169 for (Value *V : VL) { 3170 auto Op = cast<Instruction>(V)->getOperand(1); 3171 if (!isa<ConstantInt>(Op) || 3172 (Op->getType() != Ty1 && 3173 Op->getType()->getScalarSizeInBits() > 3174 DL->getIndexSizeInBits( 3175 V->getType()->getPointerAddressSpace()))) { 3176 LLVM_DEBUG(dbgs() 3177 << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 3178 BS.cancelScheduling(VL, VL0); 3179 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3180 ReuseShuffleIndicies); 3181 return; 3182 } 3183 } 3184 3185 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3186 ReuseShuffleIndicies); 3187 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 3188 TE->setOperandsInOrder(); 3189 for (unsigned i = 0, e = 2; i < e; ++i) { 3190 ValueList Operands; 3191 // Prepare the operand vector. 3192 for (Value *V : VL) 3193 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3194 3195 buildTree_rec(Operands, Depth + 1, {TE, i}); 3196 } 3197 return; 3198 } 3199 case Instruction::Store: { 3200 // Check if the stores are consecutive or if we need to swizzle them. 3201 llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType(); 3202 // Avoid types that are padded when being allocated as scalars, while 3203 // being packed together in a vector (such as i1). 3204 if (DL->getTypeSizeInBits(ScalarTy) != 3205 DL->getTypeAllocSizeInBits(ScalarTy)) { 3206 BS.cancelScheduling(VL, VL0); 3207 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3208 ReuseShuffleIndicies); 3209 LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n"); 3210 return; 3211 } 3212 // Make sure all stores in the bundle are simple - we can't vectorize 3213 // atomic or volatile stores. 3214 SmallVector<Value *, 4> PointerOps(VL.size()); 3215 ValueList Operands(VL.size()); 3216 auto POIter = PointerOps.begin(); 3217 auto OIter = Operands.begin(); 3218 for (Value *V : VL) { 3219 auto *SI = cast<StoreInst>(V); 3220 if (!SI->isSimple()) { 3221 BS.cancelScheduling(VL, VL0); 3222 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3223 ReuseShuffleIndicies); 3224 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n"); 3225 return; 3226 } 3227 *POIter = SI->getPointerOperand(); 3228 *OIter = SI->getValueOperand(); 3229 ++POIter; 3230 ++OIter; 3231 } 3232 3233 OrdersType CurrentOrder; 3234 // Check the order of pointer operands. 3235 if (llvm::sortPtrAccesses(PointerOps, *DL, *SE, CurrentOrder)) { 3236 Value *Ptr0; 3237 Value *PtrN; 3238 if (CurrentOrder.empty()) { 3239 Ptr0 = PointerOps.front(); 3240 PtrN = PointerOps.back(); 3241 } else { 3242 Ptr0 = PointerOps[CurrentOrder.front()]; 3243 PtrN = PointerOps[CurrentOrder.back()]; 3244 } 3245 Optional<int> Dist = getPointersDiff(Ptr0, PtrN, *DL, *SE); 3246 // Check that the sorted pointer operands are consecutive. 3247 if (static_cast<unsigned>(*Dist) == VL.size() - 1) { 3248 if (CurrentOrder.empty()) { 3249 // Original stores are consecutive and does not require reordering. 3250 ++NumOpsWantToKeepOriginalOrder; 3251 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, 3252 UserTreeIdx, ReuseShuffleIndicies); 3253 TE->setOperandsInOrder(); 3254 buildTree_rec(Operands, Depth + 1, {TE, 0}); 3255 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 3256 } else { 3257 TreeEntry *TE = 3258 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3259 ReuseShuffleIndicies, CurrentOrder); 3260 TE->setOperandsInOrder(); 3261 buildTree_rec(Operands, Depth + 1, {TE, 0}); 3262 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n"); 3263 findRootOrder(CurrentOrder); 3264 ++NumOpsWantToKeepOrder[CurrentOrder]; 3265 } 3266 return; 3267 } 3268 } 3269 3270 BS.cancelScheduling(VL, VL0); 3271 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3272 ReuseShuffleIndicies); 3273 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 3274 return; 3275 } 3276 case Instruction::Call: { 3277 // Check if the calls are all to the same vectorizable intrinsic or 3278 // library function. 3279 CallInst *CI = cast<CallInst>(VL0); 3280 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3281 3282 VFShape Shape = VFShape::get( 3283 *CI, ElementCount::getFixed(static_cast<unsigned int>(VL.size())), 3284 false /*HasGlobalPred*/); 3285 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3286 3287 if (!VecFunc && !isTriviallyVectorizable(ID)) { 3288 BS.cancelScheduling(VL, VL0); 3289 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3290 ReuseShuffleIndicies); 3291 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 3292 return; 3293 } 3294 Function *F = CI->getCalledFunction(); 3295 unsigned NumArgs = CI->getNumArgOperands(); 3296 SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr); 3297 for (unsigned j = 0; j != NumArgs; ++j) 3298 if (hasVectorInstrinsicScalarOpd(ID, j)) 3299 ScalarArgs[j] = CI->getArgOperand(j); 3300 for (Value *V : VL) { 3301 CallInst *CI2 = dyn_cast<CallInst>(V); 3302 if (!CI2 || CI2->getCalledFunction() != F || 3303 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 3304 (VecFunc && 3305 VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) || 3306 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 3307 BS.cancelScheduling(VL, VL0); 3308 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3309 ReuseShuffleIndicies); 3310 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V 3311 << "\n"); 3312 return; 3313 } 3314 // Some intrinsics have scalar arguments and should be same in order for 3315 // them to be vectorized. 3316 for (unsigned j = 0; j != NumArgs; ++j) { 3317 if (hasVectorInstrinsicScalarOpd(ID, j)) { 3318 Value *A1J = CI2->getArgOperand(j); 3319 if (ScalarArgs[j] != A1J) { 3320 BS.cancelScheduling(VL, VL0); 3321 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3322 ReuseShuffleIndicies); 3323 LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 3324 << " argument " << ScalarArgs[j] << "!=" << A1J 3325 << "\n"); 3326 return; 3327 } 3328 } 3329 } 3330 // Verify that the bundle operands are identical between the two calls. 3331 if (CI->hasOperandBundles() && 3332 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 3333 CI->op_begin() + CI->getBundleOperandsEndIndex(), 3334 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 3335 BS.cancelScheduling(VL, VL0); 3336 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3337 ReuseShuffleIndicies); 3338 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" 3339 << *CI << "!=" << *V << '\n'); 3340 return; 3341 } 3342 } 3343 3344 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3345 ReuseShuffleIndicies); 3346 TE->setOperandsInOrder(); 3347 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { 3348 ValueList Operands; 3349 // Prepare the operand vector. 3350 for (Value *V : VL) { 3351 auto *CI2 = cast<CallInst>(V); 3352 Operands.push_back(CI2->getArgOperand(i)); 3353 } 3354 buildTree_rec(Operands, Depth + 1, {TE, i}); 3355 } 3356 return; 3357 } 3358 case Instruction::ShuffleVector: { 3359 // If this is not an alternate sequence of opcode like add-sub 3360 // then do not vectorize this instruction. 3361 if (!S.isAltShuffle()) { 3362 BS.cancelScheduling(VL, VL0); 3363 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3364 ReuseShuffleIndicies); 3365 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 3366 return; 3367 } 3368 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3369 ReuseShuffleIndicies); 3370 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 3371 3372 // Reorder operands if reordering would enable vectorization. 3373 if (isa<BinaryOperator>(VL0)) { 3374 ValueList Left, Right; 3375 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 3376 TE->setOperand(0, Left); 3377 TE->setOperand(1, Right); 3378 buildTree_rec(Left, Depth + 1, {TE, 0}); 3379 buildTree_rec(Right, Depth + 1, {TE, 1}); 3380 return; 3381 } 3382 3383 TE->setOperandsInOrder(); 3384 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 3385 ValueList Operands; 3386 // Prepare the operand vector. 3387 for (Value *V : VL) 3388 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3389 3390 buildTree_rec(Operands, Depth + 1, {TE, i}); 3391 } 3392 return; 3393 } 3394 default: 3395 BS.cancelScheduling(VL, VL0); 3396 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3397 ReuseShuffleIndicies); 3398 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 3399 return; 3400 } 3401 } 3402 3403 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { 3404 unsigned N = 1; 3405 Type *EltTy = T; 3406 3407 while (isa<StructType>(EltTy) || isa<ArrayType>(EltTy) || 3408 isa<VectorType>(EltTy)) { 3409 if (auto *ST = dyn_cast<StructType>(EltTy)) { 3410 // Check that struct is homogeneous. 3411 for (const auto *Ty : ST->elements()) 3412 if (Ty != *ST->element_begin()) 3413 return 0; 3414 N *= ST->getNumElements(); 3415 EltTy = *ST->element_begin(); 3416 } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) { 3417 N *= AT->getNumElements(); 3418 EltTy = AT->getElementType(); 3419 } else { 3420 auto *VT = cast<FixedVectorType>(EltTy); 3421 N *= VT->getNumElements(); 3422 EltTy = VT->getElementType(); 3423 } 3424 } 3425 3426 if (!isValidElementType(EltTy)) 3427 return 0; 3428 uint64_t VTSize = DL.getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N)); 3429 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) 3430 return 0; 3431 return N; 3432 } 3433 3434 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 3435 SmallVectorImpl<unsigned> &CurrentOrder) const { 3436 Instruction *E0 = cast<Instruction>(OpValue); 3437 assert(E0->getOpcode() == Instruction::ExtractElement || 3438 E0->getOpcode() == Instruction::ExtractValue); 3439 assert(E0->getOpcode() == getSameOpcode(VL).getOpcode() && "Invalid opcode"); 3440 // Check if all of the extracts come from the same vector and from the 3441 // correct offset. 3442 Value *Vec = E0->getOperand(0); 3443 3444 CurrentOrder.clear(); 3445 3446 // We have to extract from a vector/aggregate with the same number of elements. 3447 unsigned NElts; 3448 if (E0->getOpcode() == Instruction::ExtractValue) { 3449 const DataLayout &DL = E0->getModule()->getDataLayout(); 3450 NElts = canMapToVector(Vec->getType(), DL); 3451 if (!NElts) 3452 return false; 3453 // Check if load can be rewritten as load of vector. 3454 LoadInst *LI = dyn_cast<LoadInst>(Vec); 3455 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 3456 return false; 3457 } else { 3458 NElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); 3459 } 3460 3461 if (NElts != VL.size()) 3462 return false; 3463 3464 // Check that all of the indices extract from the correct offset. 3465 bool ShouldKeepOrder = true; 3466 unsigned E = VL.size(); 3467 // Assign to all items the initial value E + 1 so we can check if the extract 3468 // instruction index was used already. 3469 // Also, later we can check that all the indices are used and we have a 3470 // consecutive access in the extract instructions, by checking that no 3471 // element of CurrentOrder still has value E + 1. 3472 CurrentOrder.assign(E, E + 1); 3473 unsigned I = 0; 3474 for (; I < E; ++I) { 3475 auto *Inst = cast<Instruction>(VL[I]); 3476 if (Inst->getOperand(0) != Vec) 3477 break; 3478 Optional<unsigned> Idx = getExtractIndex(Inst); 3479 if (!Idx) 3480 break; 3481 const unsigned ExtIdx = *Idx; 3482 if (ExtIdx != I) { 3483 if (ExtIdx >= E || CurrentOrder[ExtIdx] != E + 1) 3484 break; 3485 ShouldKeepOrder = false; 3486 CurrentOrder[ExtIdx] = I; 3487 } else { 3488 if (CurrentOrder[I] != E + 1) 3489 break; 3490 CurrentOrder[I] = I; 3491 } 3492 } 3493 if (I < E) { 3494 CurrentOrder.clear(); 3495 return false; 3496 } 3497 3498 return ShouldKeepOrder; 3499 } 3500 3501 bool BoUpSLP::areAllUsersVectorized(Instruction *I) const { 3502 return I->hasOneUse() || llvm::all_of(I->users(), [this](User *U) { 3503 return ScalarToTreeEntry.count(U) > 0; 3504 }); 3505 } 3506 3507 static std::pair<InstructionCost, InstructionCost> 3508 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy, 3509 TargetTransformInfo *TTI, TargetLibraryInfo *TLI) { 3510 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3511 3512 // Calculate the cost of the scalar and vector calls. 3513 SmallVector<Type *, 4> VecTys; 3514 for (Use &Arg : CI->args()) 3515 VecTys.push_back( 3516 FixedVectorType::get(Arg->getType(), VecTy->getNumElements())); 3517 FastMathFlags FMF; 3518 if (auto *FPCI = dyn_cast<FPMathOperator>(CI)) 3519 FMF = FPCI->getFastMathFlags(); 3520 SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end()); 3521 IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF, 3522 dyn_cast<IntrinsicInst>(CI)); 3523 auto IntrinsicCost = 3524 TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput); 3525 3526 auto Shape = VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>( 3527 VecTy->getNumElements())), 3528 false /*HasGlobalPred*/); 3529 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3530 auto LibCost = IntrinsicCost; 3531 if (!CI->isNoBuiltin() && VecFunc) { 3532 // Calculate the cost of the vector library call. 3533 // If the corresponding vector call is cheaper, return its cost. 3534 LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys, 3535 TTI::TCK_RecipThroughput); 3536 } 3537 return {IntrinsicCost, LibCost}; 3538 } 3539 3540 /// Compute the cost of creating a vector of type \p VecTy containing the 3541 /// extracted values from \p VL. 3542 static InstructionCost 3543 computeExtractCost(ArrayRef<Value *> VL, FixedVectorType *VecTy, 3544 TargetTransformInfo::ShuffleKind ShuffleKind, 3545 ArrayRef<int> Mask, TargetTransformInfo &TTI) { 3546 unsigned NumOfParts = TTI.getNumberOfParts(VecTy); 3547 3548 if (ShuffleKind != TargetTransformInfo::SK_PermuteSingleSrc || !NumOfParts || 3549 VecTy->getNumElements() < NumOfParts) 3550 return TTI.getShuffleCost(ShuffleKind, VecTy, Mask); 3551 3552 bool AllConsecutive = true; 3553 unsigned EltsPerVector = VecTy->getNumElements() / NumOfParts; 3554 unsigned Idx = -1; 3555 InstructionCost Cost = 0; 3556 3557 // Process extracts in blocks of EltsPerVector to check if the source vector 3558 // operand can be re-used directly. If not, add the cost of creating a shuffle 3559 // to extract the values into a vector register. 3560 for (auto *V : VL) { 3561 ++Idx; 3562 3563 // Reached the start of a new vector registers. 3564 if (Idx % EltsPerVector == 0) { 3565 AllConsecutive = true; 3566 continue; 3567 } 3568 3569 // Check all extracts for a vector register on the target directly 3570 // extract values in order. 3571 unsigned CurrentIdx = *getExtractIndex(cast<Instruction>(V)); 3572 unsigned PrevIdx = *getExtractIndex(cast<Instruction>(VL[Idx - 1])); 3573 AllConsecutive &= PrevIdx + 1 == CurrentIdx && 3574 CurrentIdx % EltsPerVector == Idx % EltsPerVector; 3575 3576 if (AllConsecutive) 3577 continue; 3578 3579 // Skip all indices, except for the last index per vector block. 3580 if ((Idx + 1) % EltsPerVector != 0 && Idx + 1 != VL.size()) 3581 continue; 3582 3583 // If we have a series of extracts which are not consecutive and hence 3584 // cannot re-use the source vector register directly, compute the shuffle 3585 // cost to extract the a vector with EltsPerVector elements. 3586 Cost += TTI.getShuffleCost( 3587 TargetTransformInfo::SK_PermuteSingleSrc, 3588 FixedVectorType::get(VecTy->getElementType(), EltsPerVector)); 3589 } 3590 return Cost; 3591 } 3592 3593 InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E) { 3594 ArrayRef<Value*> VL = E->Scalars; 3595 3596 Type *ScalarTy = VL[0]->getType(); 3597 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 3598 ScalarTy = SI->getValueOperand()->getType(); 3599 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0])) 3600 ScalarTy = CI->getOperand(0)->getType(); 3601 else if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 3602 ScalarTy = IE->getOperand(1)->getType(); 3603 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 3604 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 3605 3606 // If we have computed a smaller type for the expression, update VecTy so 3607 // that the costs will be accurate. 3608 if (MinBWs.count(VL[0])) 3609 VecTy = FixedVectorType::get( 3610 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size()); 3611 3612 unsigned ReuseShuffleNumbers = E->ReuseShuffleIndices.size(); 3613 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 3614 InstructionCost ReuseShuffleCost = 0; 3615 if (NeedToShuffleReuses) { 3616 ReuseShuffleCost = 3617 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy, 3618 E->ReuseShuffleIndices); 3619 } 3620 // FIXME: it tries to fix a problem with MSVC buildbots. 3621 TargetTransformInfo &TTIRef = *TTI; 3622 auto &&AdjustExtractsCost = [this, &TTIRef, CostKind, VL, 3623 VecTy](InstructionCost &Cost, bool IsGather) { 3624 DenseMap<Value *, int> ExtractVectorsTys; 3625 for (auto *V : VL) { 3626 // If all users of instruction are going to be vectorized and this 3627 // instruction itself is not going to be vectorized, consider this 3628 // instruction as dead and remove its cost from the final cost of the 3629 // vectorized tree. 3630 if (IsGather && (!areAllUsersVectorized(cast<Instruction>(V)) || 3631 ScalarToTreeEntry.count(V))) 3632 continue; 3633 auto *EE = cast<ExtractElementInst>(V); 3634 unsigned Idx = *getExtractIndex(EE); 3635 if (TTIRef.getNumberOfParts(VecTy) != 3636 TTIRef.getNumberOfParts(EE->getVectorOperandType())) { 3637 auto It = 3638 ExtractVectorsTys.try_emplace(EE->getVectorOperand(), Idx).first; 3639 It->getSecond() = std::min<int>(It->second, Idx); 3640 } 3641 // Take credit for instruction that will become dead. 3642 if (EE->hasOneUse()) { 3643 Instruction *Ext = EE->user_back(); 3644 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 3645 all_of(Ext->users(), 3646 [](User *U) { return isa<GetElementPtrInst>(U); })) { 3647 // Use getExtractWithExtendCost() to calculate the cost of 3648 // extractelement/ext pair. 3649 Cost -= 3650 TTIRef.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(), 3651 EE->getVectorOperandType(), Idx); 3652 // Add back the cost of s|zext which is subtracted separately. 3653 Cost += TTIRef.getCastInstrCost( 3654 Ext->getOpcode(), Ext->getType(), EE->getType(), 3655 TTI::getCastContextHint(Ext), CostKind, Ext); 3656 continue; 3657 } 3658 } 3659 Cost -= TTIRef.getVectorInstrCost(Instruction::ExtractElement, 3660 EE->getVectorOperandType(), Idx); 3661 } 3662 // Add a cost for subvector extracts/inserts if required. 3663 for (const auto &Data : ExtractVectorsTys) { 3664 auto *EEVTy = cast<FixedVectorType>(Data.first->getType()); 3665 unsigned NumElts = VecTy->getNumElements(); 3666 if (TTIRef.getNumberOfParts(EEVTy) > TTIRef.getNumberOfParts(VecTy)) { 3667 unsigned Idx = (Data.second / NumElts) * NumElts; 3668 unsigned EENumElts = EEVTy->getNumElements(); 3669 if (Idx + NumElts <= EENumElts) { 3670 Cost += 3671 TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 3672 EEVTy, None, Idx, VecTy); 3673 } else { 3674 // Need to round up the subvector type vectorization factor to avoid a 3675 // crash in cost model functions. Make SubVT so that Idx + VF of SubVT 3676 // <= EENumElts. 3677 auto *SubVT = 3678 FixedVectorType::get(VecTy->getElementType(), EENumElts - Idx); 3679 Cost += 3680 TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 3681 EEVTy, None, Idx, SubVT); 3682 } 3683 } else { 3684 Cost += TTIRef.getShuffleCost(TargetTransformInfo::SK_InsertSubvector, 3685 VecTy, None, 0, EEVTy); 3686 } 3687 } 3688 }; 3689 if (E->State == TreeEntry::NeedToGather) { 3690 if (allConstant(VL)) 3691 return 0; 3692 if (isSplat(VL)) { 3693 return ReuseShuffleCost + 3694 TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, None, 3695 0); 3696 } 3697 if (isa<InsertElementInst>(VL[0])) 3698 return InstructionCost::getInvalid(); 3699 if (E->getOpcode() == Instruction::ExtractElement && 3700 allSameType(VL) && allSameBlock(VL)) { 3701 SmallVector<int> Mask; 3702 Optional<TargetTransformInfo::ShuffleKind> ShuffleKind = 3703 isShuffle(VL, Mask); 3704 if (ShuffleKind.hasValue()) { 3705 InstructionCost Cost = 3706 computeExtractCost(VL, VecTy, *ShuffleKind, Mask, *TTI); 3707 AdjustExtractsCost(Cost, /*IsGather=*/true); 3708 return ReuseShuffleCost + Cost; 3709 } 3710 } 3711 InstructionCost GatherCost = 0; 3712 SmallVector<int> Mask; 3713 SmallVector<const TreeEntry *> Entries; 3714 Optional<TargetTransformInfo::ShuffleKind> Shuffle = 3715 isGatherShuffledEntry(E, Mask, Entries); 3716 if (Shuffle.hasValue()) { 3717 if (ShuffleVectorInst::isIdentityMask(Mask)) { 3718 LLVM_DEBUG( 3719 dbgs() 3720 << "SLP: perfect diamond match for gather bundle that starts with " 3721 << *VL.front() << ".\n"); 3722 } else { 3723 LLVM_DEBUG(dbgs() << "SLP: shuffled " << Entries.size() 3724 << " entries for bundle that starts with " 3725 << *VL.front() << ".\n"); 3726 GatherCost = TTI->getShuffleCost(*Shuffle, VecTy, Mask); 3727 } 3728 } else { 3729 GatherCost = getGatherCost(VL); 3730 } 3731 return ReuseShuffleCost + GatherCost; 3732 } 3733 assert((E->State == TreeEntry::Vectorize || 3734 E->State == TreeEntry::ScatterVectorize) && 3735 "Unhandled state"); 3736 assert(E->getOpcode() && allSameType(VL) && allSameBlock(VL) && "Invalid VL"); 3737 Instruction *VL0 = E->getMainOp(); 3738 unsigned ShuffleOrOp = 3739 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 3740 switch (ShuffleOrOp) { 3741 case Instruction::PHI: 3742 return 0; 3743 3744 case Instruction::ExtractValue: 3745 case Instruction::ExtractElement: { 3746 // The common cost of removal ExtractElement/ExtractValue instructions + 3747 // the cost of shuffles, if required to resuffle the original vector. 3748 InstructionCost CommonCost = 0; 3749 if (NeedToShuffleReuses) { 3750 unsigned Idx = 0; 3751 for (unsigned I : E->ReuseShuffleIndices) { 3752 if (ShuffleOrOp == Instruction::ExtractElement) { 3753 auto *EE = cast<ExtractElementInst>(VL[I]); 3754 ReuseShuffleCost -= TTI->getVectorInstrCost( 3755 Instruction::ExtractElement, EE->getVectorOperandType(), 3756 *getExtractIndex(EE)); 3757 } else { 3758 ReuseShuffleCost -= TTI->getVectorInstrCost( 3759 Instruction::ExtractElement, VecTy, Idx); 3760 ++Idx; 3761 } 3762 } 3763 Idx = ReuseShuffleNumbers; 3764 for (Value *V : VL) { 3765 if (ShuffleOrOp == Instruction::ExtractElement) { 3766 auto *EE = cast<ExtractElementInst>(V); 3767 ReuseShuffleCost += TTI->getVectorInstrCost( 3768 Instruction::ExtractElement, EE->getVectorOperandType(), 3769 *getExtractIndex(EE)); 3770 } else { 3771 --Idx; 3772 ReuseShuffleCost += TTI->getVectorInstrCost( 3773 Instruction::ExtractElement, VecTy, Idx); 3774 } 3775 } 3776 CommonCost = ReuseShuffleCost; 3777 } else if (!E->ReorderIndices.empty()) { 3778 SmallVector<int> NewMask; 3779 inversePermutation(E->ReorderIndices, NewMask); 3780 CommonCost = TTI->getShuffleCost( 3781 TargetTransformInfo::SK_PermuteSingleSrc, VecTy, NewMask); 3782 } 3783 if (ShuffleOrOp == Instruction::ExtractValue) { 3784 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 3785 auto *EI = cast<Instruction>(VL[I]); 3786 // Take credit for instruction that will become dead. 3787 if (EI->hasOneUse()) { 3788 Instruction *Ext = EI->user_back(); 3789 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 3790 all_of(Ext->users(), 3791 [](User *U) { return isa<GetElementPtrInst>(U); })) { 3792 // Use getExtractWithExtendCost() to calculate the cost of 3793 // extractelement/ext pair. 3794 CommonCost -= TTI->getExtractWithExtendCost( 3795 Ext->getOpcode(), Ext->getType(), VecTy, I); 3796 // Add back the cost of s|zext which is subtracted separately. 3797 CommonCost += TTI->getCastInstrCost( 3798 Ext->getOpcode(), Ext->getType(), EI->getType(), 3799 TTI::getCastContextHint(Ext), CostKind, Ext); 3800 continue; 3801 } 3802 } 3803 CommonCost -= 3804 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, I); 3805 } 3806 } else { 3807 AdjustExtractsCost(CommonCost, /*IsGather=*/false); 3808 } 3809 return CommonCost; 3810 } 3811 case Instruction::InsertElement: { 3812 auto *SrcVecTy = cast<FixedVectorType>(VL0->getType()); 3813 3814 unsigned const NumElts = SrcVecTy->getNumElements(); 3815 unsigned const NumScalars = VL.size(); 3816 APInt DemandedElts = APInt::getNullValue(NumElts); 3817 // TODO: Add support for Instruction::InsertValue. 3818 unsigned Offset = UINT_MAX; 3819 bool IsIdentity = true; 3820 SmallVector<int> ShuffleMask(NumElts, UndefMaskElem); 3821 for (unsigned I = 0; I < NumScalars; ++I) { 3822 Optional<int> InsertIdx = getInsertIndex(VL[I], 0); 3823 if (!InsertIdx || *InsertIdx == UndefMaskElem) 3824 continue; 3825 unsigned Idx = *InsertIdx; 3826 DemandedElts.setBit(Idx); 3827 if (Idx < Offset) { 3828 Offset = Idx; 3829 IsIdentity &= I == 0; 3830 } else { 3831 assert(Idx >= Offset && "Failed to find vector index offset"); 3832 IsIdentity &= Idx - Offset == I; 3833 } 3834 ShuffleMask[Idx] = I; 3835 } 3836 assert(Offset < NumElts && "Failed to find vector index offset"); 3837 3838 InstructionCost Cost = 0; 3839 Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts, 3840 /*Insert*/ true, /*Extract*/ false); 3841 3842 if (IsIdentity && NumElts != NumScalars && Offset % NumScalars != 0) 3843 Cost += TTI->getShuffleCost( 3844 TargetTransformInfo::SK_InsertSubvector, SrcVecTy, /*Mask*/ None, 3845 Offset, 3846 FixedVectorType::get(SrcVecTy->getElementType(), NumScalars)); 3847 else if (!IsIdentity) 3848 Cost += TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, SrcVecTy, 3849 ShuffleMask); 3850 3851 return Cost; 3852 } 3853 case Instruction::ZExt: 3854 case Instruction::SExt: 3855 case Instruction::FPToUI: 3856 case Instruction::FPToSI: 3857 case Instruction::FPExt: 3858 case Instruction::PtrToInt: 3859 case Instruction::IntToPtr: 3860 case Instruction::SIToFP: 3861 case Instruction::UIToFP: 3862 case Instruction::Trunc: 3863 case Instruction::FPTrunc: 3864 case Instruction::BitCast: { 3865 Type *SrcTy = VL0->getOperand(0)->getType(); 3866 InstructionCost ScalarEltCost = 3867 TTI->getCastInstrCost(E->getOpcode(), ScalarTy, SrcTy, 3868 TTI::getCastContextHint(VL0), CostKind, VL0); 3869 if (NeedToShuffleReuses) { 3870 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 3871 } 3872 3873 // Calculate the cost of this instruction. 3874 InstructionCost ScalarCost = VL.size() * ScalarEltCost; 3875 3876 auto *SrcVecTy = FixedVectorType::get(SrcTy, VL.size()); 3877 InstructionCost VecCost = 0; 3878 // Check if the values are candidates to demote. 3879 if (!MinBWs.count(VL0) || VecTy != SrcVecTy) { 3880 VecCost = 3881 ReuseShuffleCost + 3882 TTI->getCastInstrCost(E->getOpcode(), VecTy, SrcVecTy, 3883 TTI::getCastContextHint(VL0), CostKind, VL0); 3884 } 3885 LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecCost, ScalarCost)); 3886 return VecCost - ScalarCost; 3887 } 3888 case Instruction::FCmp: 3889 case Instruction::ICmp: 3890 case Instruction::Select: { 3891 // Calculate the cost of this instruction. 3892 InstructionCost ScalarEltCost = 3893 TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, Builder.getInt1Ty(), 3894 CmpInst::BAD_ICMP_PREDICATE, CostKind, VL0); 3895 if (NeedToShuffleReuses) { 3896 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 3897 } 3898 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); 3899 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 3900 3901 // Check if all entries in VL are either compares or selects with compares 3902 // as condition that have the same predicates. 3903 CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE; 3904 bool First = true; 3905 for (auto *V : VL) { 3906 CmpInst::Predicate CurrentPred; 3907 auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value()); 3908 if ((!match(V, m_Select(MatchCmp, m_Value(), m_Value())) && 3909 !match(V, MatchCmp)) || 3910 (!First && VecPred != CurrentPred)) { 3911 VecPred = CmpInst::BAD_ICMP_PREDICATE; 3912 break; 3913 } 3914 First = false; 3915 VecPred = CurrentPred; 3916 } 3917 3918 InstructionCost VecCost = TTI->getCmpSelInstrCost( 3919 E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0); 3920 // Check if it is possible and profitable to use min/max for selects in 3921 // VL. 3922 // 3923 auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL); 3924 if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) { 3925 IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy, 3926 {VecTy, VecTy}); 3927 InstructionCost IntrinsicCost = 3928 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 3929 // If the selects are the only uses of the compares, they will be dead 3930 // and we can adjust the cost by removing their cost. 3931 if (IntrinsicAndUse.second) 3932 IntrinsicCost -= 3933 TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy, MaskTy, 3934 CmpInst::BAD_ICMP_PREDICATE, CostKind); 3935 VecCost = std::min(VecCost, IntrinsicCost); 3936 } 3937 LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecCost, ScalarCost)); 3938 return ReuseShuffleCost + VecCost - ScalarCost; 3939 } 3940 case Instruction::FNeg: 3941 case Instruction::Add: 3942 case Instruction::FAdd: 3943 case Instruction::Sub: 3944 case Instruction::FSub: 3945 case Instruction::Mul: 3946 case Instruction::FMul: 3947 case Instruction::UDiv: 3948 case Instruction::SDiv: 3949 case Instruction::FDiv: 3950 case Instruction::URem: 3951 case Instruction::SRem: 3952 case Instruction::FRem: 3953 case Instruction::Shl: 3954 case Instruction::LShr: 3955 case Instruction::AShr: 3956 case Instruction::And: 3957 case Instruction::Or: 3958 case Instruction::Xor: { 3959 // Certain instructions can be cheaper to vectorize if they have a 3960 // constant second vector operand. 3961 TargetTransformInfo::OperandValueKind Op1VK = 3962 TargetTransformInfo::OK_AnyValue; 3963 TargetTransformInfo::OperandValueKind Op2VK = 3964 TargetTransformInfo::OK_UniformConstantValue; 3965 TargetTransformInfo::OperandValueProperties Op1VP = 3966 TargetTransformInfo::OP_None; 3967 TargetTransformInfo::OperandValueProperties Op2VP = 3968 TargetTransformInfo::OP_PowerOf2; 3969 3970 // If all operands are exactly the same ConstantInt then set the 3971 // operand kind to OK_UniformConstantValue. 3972 // If instead not all operands are constants, then set the operand kind 3973 // to OK_AnyValue. If all operands are constants but not the same, 3974 // then set the operand kind to OK_NonUniformConstantValue. 3975 ConstantInt *CInt0 = nullptr; 3976 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 3977 const Instruction *I = cast<Instruction>(VL[i]); 3978 unsigned OpIdx = isa<BinaryOperator>(I) ? 1 : 0; 3979 ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(OpIdx)); 3980 if (!CInt) { 3981 Op2VK = TargetTransformInfo::OK_AnyValue; 3982 Op2VP = TargetTransformInfo::OP_None; 3983 break; 3984 } 3985 if (Op2VP == TargetTransformInfo::OP_PowerOf2 && 3986 !CInt->getValue().isPowerOf2()) 3987 Op2VP = TargetTransformInfo::OP_None; 3988 if (i == 0) { 3989 CInt0 = CInt; 3990 continue; 3991 } 3992 if (CInt0 != CInt) 3993 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 3994 } 3995 3996 SmallVector<const Value *, 4> Operands(VL0->operand_values()); 3997 InstructionCost ScalarEltCost = 3998 TTI->getArithmeticInstrCost(E->getOpcode(), ScalarTy, CostKind, Op1VK, 3999 Op2VK, Op1VP, Op2VP, Operands, VL0); 4000 if (NeedToShuffleReuses) { 4001 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4002 } 4003 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 4004 InstructionCost VecCost = 4005 TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind, Op1VK, 4006 Op2VK, Op1VP, Op2VP, Operands, VL0); 4007 LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecCost, ScalarCost)); 4008 return ReuseShuffleCost + VecCost - ScalarCost; 4009 } 4010 case Instruction::GetElementPtr: { 4011 TargetTransformInfo::OperandValueKind Op1VK = 4012 TargetTransformInfo::OK_AnyValue; 4013 TargetTransformInfo::OperandValueKind Op2VK = 4014 TargetTransformInfo::OK_UniformConstantValue; 4015 4016 InstructionCost ScalarEltCost = TTI->getArithmeticInstrCost( 4017 Instruction::Add, ScalarTy, CostKind, Op1VK, Op2VK); 4018 if (NeedToShuffleReuses) { 4019 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4020 } 4021 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 4022 InstructionCost VecCost = TTI->getArithmeticInstrCost( 4023 Instruction::Add, VecTy, CostKind, Op1VK, Op2VK); 4024 LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecCost, ScalarCost)); 4025 return ReuseShuffleCost + VecCost - ScalarCost; 4026 } 4027 case Instruction::Load: { 4028 // Cost of wide load - cost of scalar loads. 4029 Align alignment = cast<LoadInst>(VL0)->getAlign(); 4030 InstructionCost ScalarEltCost = TTI->getMemoryOpCost( 4031 Instruction::Load, ScalarTy, alignment, 0, CostKind, VL0); 4032 if (NeedToShuffleReuses) { 4033 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4034 } 4035 InstructionCost ScalarLdCost = VecTy->getNumElements() * ScalarEltCost; 4036 InstructionCost VecLdCost; 4037 if (E->State == TreeEntry::Vectorize) { 4038 VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, alignment, 0, 4039 CostKind, VL0); 4040 } else { 4041 assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState"); 4042 VecLdCost = TTI->getGatherScatterOpCost( 4043 Instruction::Load, VecTy, cast<LoadInst>(VL0)->getPointerOperand(), 4044 /*VariableMask=*/false, alignment, CostKind, VL0); 4045 } 4046 if (!NeedToShuffleReuses && !E->ReorderIndices.empty()) { 4047 SmallVector<int> NewMask; 4048 inversePermutation(E->ReorderIndices, NewMask); 4049 VecLdCost += TTI->getShuffleCost( 4050 TargetTransformInfo::SK_PermuteSingleSrc, VecTy, NewMask); 4051 } 4052 LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecLdCost, ScalarLdCost)); 4053 return ReuseShuffleCost + VecLdCost - ScalarLdCost; 4054 } 4055 case Instruction::Store: { 4056 // We know that we can merge the stores. Calculate the cost. 4057 bool IsReorder = !E->ReorderIndices.empty(); 4058 auto *SI = 4059 cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0); 4060 Align Alignment = SI->getAlign(); 4061 InstructionCost ScalarEltCost = TTI->getMemoryOpCost( 4062 Instruction::Store, ScalarTy, Alignment, 0, CostKind, VL0); 4063 InstructionCost ScalarStCost = VecTy->getNumElements() * ScalarEltCost; 4064 InstructionCost VecStCost = TTI->getMemoryOpCost( 4065 Instruction::Store, VecTy, Alignment, 0, CostKind, VL0); 4066 if (IsReorder) { 4067 SmallVector<int> NewMask; 4068 inversePermutation(E->ReorderIndices, NewMask); 4069 VecStCost += TTI->getShuffleCost( 4070 TargetTransformInfo::SK_PermuteSingleSrc, VecTy, NewMask); 4071 } 4072 LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecStCost, ScalarStCost)); 4073 return VecStCost - ScalarStCost; 4074 } 4075 case Instruction::Call: { 4076 CallInst *CI = cast<CallInst>(VL0); 4077 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4078 4079 // Calculate the cost of the scalar and vector calls. 4080 IntrinsicCostAttributes CostAttrs(ID, *CI, 1); 4081 InstructionCost ScalarEltCost = 4082 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 4083 if (NeedToShuffleReuses) { 4084 ReuseShuffleCost -= (ReuseShuffleNumbers - VL.size()) * ScalarEltCost; 4085 } 4086 InstructionCost ScalarCallCost = VecTy->getNumElements() * ScalarEltCost; 4087 4088 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 4089 InstructionCost VecCallCost = 4090 std::min(VecCallCosts.first, VecCallCosts.second); 4091 4092 LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost 4093 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 4094 << " for " << *CI << "\n"); 4095 4096 return ReuseShuffleCost + VecCallCost - ScalarCallCost; 4097 } 4098 case Instruction::ShuffleVector: { 4099 assert(E->isAltShuffle() && 4100 ((Instruction::isBinaryOp(E->getOpcode()) && 4101 Instruction::isBinaryOp(E->getAltOpcode())) || 4102 (Instruction::isCast(E->getOpcode()) && 4103 Instruction::isCast(E->getAltOpcode()))) && 4104 "Invalid Shuffle Vector Operand"); 4105 InstructionCost ScalarCost = 0; 4106 if (NeedToShuffleReuses) { 4107 for (unsigned Idx : E->ReuseShuffleIndices) { 4108 Instruction *I = cast<Instruction>(VL[Idx]); 4109 ReuseShuffleCost -= TTI->getInstructionCost(I, CostKind); 4110 } 4111 for (Value *V : VL) { 4112 Instruction *I = cast<Instruction>(V); 4113 ReuseShuffleCost += TTI->getInstructionCost(I, CostKind); 4114 } 4115 } 4116 for (Value *V : VL) { 4117 Instruction *I = cast<Instruction>(V); 4118 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 4119 ScalarCost += TTI->getInstructionCost(I, CostKind); 4120 } 4121 // VecCost is equal to sum of the cost of creating 2 vectors 4122 // and the cost of creating shuffle. 4123 InstructionCost VecCost = 0; 4124 if (Instruction::isBinaryOp(E->getOpcode())) { 4125 VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind); 4126 VecCost += TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy, 4127 CostKind); 4128 } else { 4129 Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType(); 4130 Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType(); 4131 auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size()); 4132 auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size()); 4133 VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty, 4134 TTI::CastContextHint::None, CostKind); 4135 VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty, 4136 TTI::CastContextHint::None, CostKind); 4137 } 4138 4139 SmallVector<int> Mask(E->Scalars.size()); 4140 for (unsigned I = 0, End = E->Scalars.size(); I < End; ++I) { 4141 auto *OpInst = cast<Instruction>(E->Scalars[I]); 4142 assert(E->isOpcodeOrAlt(OpInst) && "Unexpected main/alternate opcode"); 4143 Mask[I] = I + (OpInst->getOpcode() == E->getAltOpcode() ? End : 0); 4144 } 4145 VecCost += 4146 TTI->getShuffleCost(TargetTransformInfo::SK_Select, VecTy, Mask, 0); 4147 LLVM_DEBUG(dumpTreeCosts(E, ReuseShuffleCost, VecCost, ScalarCost)); 4148 return ReuseShuffleCost + VecCost - ScalarCost; 4149 } 4150 default: 4151 llvm_unreachable("Unknown instruction"); 4152 } 4153 } 4154 4155 bool BoUpSLP::isFullyVectorizableTinyTree() const { 4156 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height " 4157 << VectorizableTree.size() << " is fully vectorizable .\n"); 4158 4159 // We only handle trees of heights 1 and 2. 4160 if (VectorizableTree.size() == 1 && 4161 VectorizableTree[0]->State == TreeEntry::Vectorize) 4162 return true; 4163 4164 if (VectorizableTree.size() != 2) 4165 return false; 4166 4167 // Handle splat and all-constants stores. Also try to vectorize tiny trees 4168 // with the second gather nodes if they have less scalar operands rather than 4169 // the initial tree element (may be profitable to shuffle the second gather) 4170 // or they are extractelements, which form shuffle. 4171 SmallVector<int> Mask; 4172 if (VectorizableTree[0]->State == TreeEntry::Vectorize && 4173 (allConstant(VectorizableTree[1]->Scalars) || 4174 isSplat(VectorizableTree[1]->Scalars) || 4175 (VectorizableTree[1]->State == TreeEntry::NeedToGather && 4176 VectorizableTree[1]->Scalars.size() < 4177 VectorizableTree[0]->Scalars.size()) || 4178 (VectorizableTree[1]->State == TreeEntry::NeedToGather && 4179 VectorizableTree[1]->getOpcode() == Instruction::ExtractElement && 4180 isShuffle(VectorizableTree[1]->Scalars, Mask)))) 4181 return true; 4182 4183 // Gathering cost would be too much for tiny trees. 4184 if (VectorizableTree[0]->State == TreeEntry::NeedToGather || 4185 VectorizableTree[1]->State == TreeEntry::NeedToGather) 4186 return false; 4187 4188 return true; 4189 } 4190 4191 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts, 4192 TargetTransformInfo *TTI, 4193 bool MustMatchOrInst) { 4194 // Look past the root to find a source value. Arbitrarily follow the 4195 // path through operand 0 of any 'or'. Also, peek through optional 4196 // shift-left-by-multiple-of-8-bits. 4197 Value *ZextLoad = Root; 4198 const APInt *ShAmtC; 4199 bool FoundOr = false; 4200 while (!isa<ConstantExpr>(ZextLoad) && 4201 (match(ZextLoad, m_Or(m_Value(), m_Value())) || 4202 (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) && 4203 ShAmtC->urem(8) == 0))) { 4204 auto *BinOp = cast<BinaryOperator>(ZextLoad); 4205 ZextLoad = BinOp->getOperand(0); 4206 if (BinOp->getOpcode() == Instruction::Or) 4207 FoundOr = true; 4208 } 4209 // Check if the input is an extended load of the required or/shift expression. 4210 Value *LoadPtr; 4211 if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root || 4212 !match(ZextLoad, m_ZExt(m_Load(m_Value(LoadPtr))))) 4213 return false; 4214 4215 // Require that the total load bit width is a legal integer type. 4216 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target. 4217 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it. 4218 Type *SrcTy = LoadPtr->getType()->getPointerElementType(); 4219 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts; 4220 if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth))) 4221 return false; 4222 4223 // Everything matched - assume that we can fold the whole sequence using 4224 // load combining. 4225 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at " 4226 << *(cast<Instruction>(Root)) << "\n"); 4227 4228 return true; 4229 } 4230 4231 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const { 4232 if (RdxKind != RecurKind::Or) 4233 return false; 4234 4235 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 4236 Value *FirstReduced = VectorizableTree[0]->Scalars[0]; 4237 return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI, 4238 /* MatchOr */ false); 4239 } 4240 4241 bool BoUpSLP::isLoadCombineCandidate() const { 4242 // Peek through a final sequence of stores and check if all operations are 4243 // likely to be load-combined. 4244 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 4245 for (Value *Scalar : VectorizableTree[0]->Scalars) { 4246 Value *X; 4247 if (!match(Scalar, m_Store(m_Value(X), m_Value())) || 4248 !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true)) 4249 return false; 4250 } 4251 return true; 4252 } 4253 4254 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() const { 4255 // No need to vectorize inserts of gathered values. 4256 if (VectorizableTree.size() == 2 && 4257 isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) && 4258 VectorizableTree[1]->State == TreeEntry::NeedToGather) 4259 return true; 4260 4261 // We can vectorize the tree if its size is greater than or equal to the 4262 // minimum size specified by the MinTreeSize command line option. 4263 if (VectorizableTree.size() >= MinTreeSize) 4264 return false; 4265 4266 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 4267 // can vectorize it if we can prove it fully vectorizable. 4268 if (isFullyVectorizableTinyTree()) 4269 return false; 4270 4271 assert(VectorizableTree.empty() 4272 ? ExternalUses.empty() 4273 : true && "We shouldn't have any external users"); 4274 4275 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 4276 // vectorizable. 4277 return true; 4278 } 4279 4280 InstructionCost BoUpSLP::getSpillCost() const { 4281 // Walk from the bottom of the tree to the top, tracking which values are 4282 // live. When we see a call instruction that is not part of our tree, 4283 // query TTI to see if there is a cost to keeping values live over it 4284 // (for example, if spills and fills are required). 4285 unsigned BundleWidth = VectorizableTree.front()->Scalars.size(); 4286 InstructionCost Cost = 0; 4287 4288 SmallPtrSet<Instruction*, 4> LiveValues; 4289 Instruction *PrevInst = nullptr; 4290 4291 // The entries in VectorizableTree are not necessarily ordered by their 4292 // position in basic blocks. Collect them and order them by dominance so later 4293 // instructions are guaranteed to be visited first. For instructions in 4294 // different basic blocks, we only scan to the beginning of the block, so 4295 // their order does not matter, as long as all instructions in a basic block 4296 // are grouped together. Using dominance ensures a deterministic order. 4297 SmallVector<Instruction *, 16> OrderedScalars; 4298 for (const auto &TEPtr : VectorizableTree) { 4299 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]); 4300 if (!Inst) 4301 continue; 4302 OrderedScalars.push_back(Inst); 4303 } 4304 llvm::stable_sort(OrderedScalars, [this](Instruction *A, Instruction *B) { 4305 return DT->dominates(B, A); 4306 }); 4307 4308 for (Instruction *Inst : OrderedScalars) { 4309 if (!PrevInst) { 4310 PrevInst = Inst; 4311 continue; 4312 } 4313 4314 // Update LiveValues. 4315 LiveValues.erase(PrevInst); 4316 for (auto &J : PrevInst->operands()) { 4317 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 4318 LiveValues.insert(cast<Instruction>(&*J)); 4319 } 4320 4321 LLVM_DEBUG({ 4322 dbgs() << "SLP: #LV: " << LiveValues.size(); 4323 for (auto *X : LiveValues) 4324 dbgs() << " " << X->getName(); 4325 dbgs() << ", Looking at "; 4326 Inst->dump(); 4327 }); 4328 4329 // Now find the sequence of instructions between PrevInst and Inst. 4330 unsigned NumCalls = 0; 4331 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 4332 PrevInstIt = 4333 PrevInst->getIterator().getReverse(); 4334 while (InstIt != PrevInstIt) { 4335 if (PrevInstIt == PrevInst->getParent()->rend()) { 4336 PrevInstIt = Inst->getParent()->rbegin(); 4337 continue; 4338 } 4339 4340 // Debug information does not impact spill cost. 4341 if ((isa<CallInst>(&*PrevInstIt) && 4342 !isa<DbgInfoIntrinsic>(&*PrevInstIt)) && 4343 &*PrevInstIt != PrevInst) 4344 NumCalls++; 4345 4346 ++PrevInstIt; 4347 } 4348 4349 if (NumCalls) { 4350 SmallVector<Type*, 4> V; 4351 for (auto *II : LiveValues) { 4352 auto *ScalarTy = II->getType(); 4353 if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy)) 4354 ScalarTy = VectorTy->getElementType(); 4355 V.push_back(FixedVectorType::get(ScalarTy, BundleWidth)); 4356 } 4357 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V); 4358 } 4359 4360 PrevInst = Inst; 4361 } 4362 4363 return Cost; 4364 } 4365 4366 InstructionCost BoUpSLP::getTreeCost() { 4367 InstructionCost Cost = 0; 4368 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size " 4369 << VectorizableTree.size() << ".\n"); 4370 4371 unsigned BundleWidth = VectorizableTree[0]->Scalars.size(); 4372 4373 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) { 4374 TreeEntry &TE = *VectorizableTree[I].get(); 4375 4376 InstructionCost C = getEntryCost(&TE); 4377 Cost += C; 4378 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 4379 << " for bundle that starts with " << *TE.Scalars[0] 4380 << ".\n" 4381 << "SLP: Current total cost = " << Cost << "\n"); 4382 } 4383 4384 SmallPtrSet<Value *, 16> ExtractCostCalculated; 4385 InstructionCost ExtractCost = 0; 4386 SmallBitVector IsIdentity; 4387 SmallVector<unsigned> VF; 4388 SmallVector<SmallVector<int>> ShuffleMask; 4389 SmallVector<Value *> FirstUsers; 4390 SmallVector<APInt> DemandedElts; 4391 for (ExternalUser &EU : ExternalUses) { 4392 // We only add extract cost once for the same scalar. 4393 if (!ExtractCostCalculated.insert(EU.Scalar).second) 4394 continue; 4395 4396 // Uses by ephemeral values are free (because the ephemeral value will be 4397 // removed prior to code generation, and so the extraction will be 4398 // removed as well). 4399 if (EphValues.count(EU.User)) 4400 continue; 4401 4402 // No extract cost for vector "scalar" 4403 if (isa<FixedVectorType>(EU.Scalar->getType())) 4404 continue; 4405 4406 // If found user is an insertelement, do not calculate extract cost but try 4407 // to detect it as a final shuffled/identity match. 4408 if (EU.User && isa<InsertElementInst>(EU.User)) { 4409 if (auto *FTy = dyn_cast<FixedVectorType>(EU.User->getType())) { 4410 Optional<int> InsertIdx = getInsertIndex(EU.User, 0); 4411 if (!InsertIdx || *InsertIdx == UndefMaskElem) 4412 continue; 4413 Value *VU = EU.User; 4414 auto *It = find_if(FirstUsers, [VU](Value *V) { 4415 // Checks if 2 insertelements are from the same buildvector. 4416 if (VU->getType() != V->getType()) 4417 return false; 4418 auto *IE1 = cast<InsertElementInst>(VU); 4419 auto *IE2 = cast<InsertElementInst>(V); 4420 do { 4421 if (IE1 == VU || IE2 == V) 4422 return true; 4423 if (IE1) 4424 IE1 = dyn_cast<InsertElementInst>(IE1->getOperand(0)); 4425 if (IE2) 4426 IE2 = dyn_cast<InsertElementInst>(IE2->getOperand(0)); 4427 } while (IE1 || IE2); 4428 return false; 4429 }); 4430 int VecId = -1; 4431 if (It == FirstUsers.end()) { 4432 VF.push_back(FTy->getNumElements()); 4433 ShuffleMask.emplace_back(VF.back(), UndefMaskElem); 4434 FirstUsers.push_back(EU.User); 4435 DemandedElts.push_back(APInt::getNullValue(VF.back())); 4436 IsIdentity.push_back(true); 4437 VecId = FirstUsers.size() - 1; 4438 } else { 4439 VecId = std::distance(FirstUsers.begin(), It); 4440 } 4441 int Idx = *InsertIdx; 4442 ShuffleMask[VecId][Idx] = EU.Lane; 4443 IsIdentity.set(IsIdentity.test(VecId) & 4444 (EU.Lane == Idx || EU.Lane == UndefMaskElem)); 4445 DemandedElts[VecId].setBit(Idx); 4446 } 4447 } 4448 4449 // If we plan to rewrite the tree in a smaller type, we will need to sign 4450 // extend the extracted value back to the original type. Here, we account 4451 // for the extract and the added cost of the sign extend if needed. 4452 auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth); 4453 auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; 4454 if (MinBWs.count(ScalarRoot)) { 4455 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 4456 auto Extend = 4457 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; 4458 VecTy = FixedVectorType::get(MinTy, BundleWidth); 4459 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 4460 VecTy, EU.Lane); 4461 } else { 4462 ExtractCost += 4463 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); 4464 } 4465 } 4466 4467 InstructionCost SpillCost = getSpillCost(); 4468 Cost += SpillCost + ExtractCost; 4469 for (int I = 0, E = FirstUsers.size(); I < E; ++I) { 4470 if (!IsIdentity.test(I)) { 4471 InstructionCost C = TTI->getShuffleCost( 4472 TTI::SK_PermuteSingleSrc, 4473 cast<FixedVectorType>(FirstUsers[I]->getType()), ShuffleMask[I]); 4474 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 4475 << " for final shuffle of insertelement external users " 4476 << *VectorizableTree.front()->Scalars.front() << ".\n" 4477 << "SLP: Current total cost = " << Cost << "\n"); 4478 Cost += C; 4479 } 4480 unsigned VF = ShuffleMask[I].size(); 4481 for (int &Mask : ShuffleMask[I]) 4482 Mask = (Mask == UndefMaskElem ? 0 : VF) + Mask; 4483 InstructionCost C = TTI->getShuffleCost( 4484 TTI::SK_PermuteTwoSrc, cast<FixedVectorType>(FirstUsers[I]->getType()), 4485 ShuffleMask[I]); 4486 LLVM_DEBUG( 4487 dbgs() 4488 << "SLP: Adding cost " << C 4489 << " for final shuffle of vector node and external insertelement users " 4490 << *VectorizableTree.front()->Scalars.front() << ".\n" 4491 << "SLP: Current total cost = " << Cost << "\n"); 4492 Cost += C; 4493 InstructionCost InsertCost = TTI->getScalarizationOverhead( 4494 cast<FixedVectorType>(FirstUsers[I]->getType()), DemandedElts[I], 4495 /*Insert*/ true, 4496 /*Extract*/ false); 4497 Cost -= InsertCost; 4498 LLVM_DEBUG(dbgs() << "SLP: subtracting the cost " << InsertCost 4499 << " for insertelements gather.\n" 4500 << "SLP: Current total cost = " << Cost << "\n"); 4501 } 4502 4503 #ifndef NDEBUG 4504 SmallString<256> Str; 4505 { 4506 raw_svector_ostream OS(Str); 4507 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 4508 << "SLP: Extract Cost = " << ExtractCost << ".\n" 4509 << "SLP: Total Cost = " << Cost << ".\n"; 4510 } 4511 LLVM_DEBUG(dbgs() << Str); 4512 if (ViewSLPTree) 4513 ViewGraph(this, "SLP" + F->getName(), false, Str); 4514 #endif 4515 4516 return Cost; 4517 } 4518 4519 Optional<TargetTransformInfo::ShuffleKind> 4520 BoUpSLP::isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask, 4521 SmallVectorImpl<const TreeEntry *> &Entries) { 4522 Mask.assign(TE->Scalars.size(), UndefMaskElem); 4523 Entries.clear(); 4524 DenseMap<Value *, const TreeEntry *> UsedValuesEntry; 4525 unsigned VF = 0; 4526 // FIXME: Shall be replaced by GetVF function once non-power-2 patch is 4527 // landed. 4528 auto &&GetVF = [](const TreeEntry *TE) { 4529 if (!TE->ReuseShuffleIndices.empty()) 4530 return TE->ReuseShuffleIndices.size(); 4531 return TE->Scalars.size(); 4532 }; 4533 for (int I = 0, E = TE->Scalars.size(); I < E; ++I) { 4534 Value *V = TE->Scalars[I]; 4535 if (isa<UndefValue>(V)) 4536 continue; 4537 const TreeEntry *VTE = UsedValuesEntry.lookup(V); 4538 if (!VTE) { 4539 if (Entries.size() == 2) 4540 return None; 4541 VTE = getTreeEntry(V); 4542 if (!VTE || find_if( 4543 VectorizableTree, 4544 [VTE, TE](const std::unique_ptr<TreeEntry> &EntryPtr) { 4545 return EntryPtr.get() == VTE || EntryPtr.get() == TE; 4546 })->get() == TE) { 4547 // Check if it is used in one of the gathered entries. 4548 const auto *It = 4549 find_if(VectorizableTree, 4550 [V, TE](const std::unique_ptr<TreeEntry> &EntryPtr) { 4551 return EntryPtr.get() == TE || 4552 (EntryPtr->State == TreeEntry::NeedToGather && 4553 is_contained(EntryPtr->Scalars, V)); 4554 }); 4555 // The vector factor of shuffled entries must be the same. 4556 if (It->get() == TE) 4557 return None; 4558 VTE = It->get(); 4559 } 4560 Entries.push_back(VTE); 4561 if (Entries.size() == 1) { 4562 VF = GetVF(VTE); 4563 } else if (VF != GetVF(VTE)) { 4564 assert(Entries.size() == 2 && "Expected shuffle of 1 or 2 entries."); 4565 assert(VF > 0 && "Expected non-zero vector factor."); 4566 return None; 4567 } 4568 for (Value *SV : VTE->Scalars) 4569 UsedValuesEntry.try_emplace(SV, VTE); 4570 } 4571 int FoundLane = findLaneForValue(VTE->Scalars, VTE->ReuseShuffleIndices, V); 4572 Mask[I] = (Entries.front() == VTE ? 0 : VF) + FoundLane; 4573 // Extra check required by isSingleSourceMaskImpl function (called by 4574 // ShuffleVectorInst::isSingleSourceMask). 4575 if (Mask[I] >= 2 * E) 4576 return None; 4577 } 4578 switch (Entries.size()) { 4579 case 1: 4580 return TargetTransformInfo::SK_PermuteSingleSrc; 4581 case 2: 4582 return TargetTransformInfo::SK_PermuteTwoSrc; 4583 default: 4584 break; 4585 } 4586 return None; 4587 } 4588 4589 InstructionCost 4590 BoUpSLP::getGatherCost(FixedVectorType *Ty, 4591 const DenseSet<unsigned> &ShuffledIndices) const { 4592 unsigned NumElts = Ty->getNumElements(); 4593 APInt DemandedElts = APInt::getNullValue(NumElts); 4594 for (unsigned I = 0; I < NumElts; ++I) 4595 if (!ShuffledIndices.count(I)) 4596 DemandedElts.setBit(I); 4597 InstructionCost Cost = 4598 TTI->getScalarizationOverhead(Ty, DemandedElts, /*Insert*/ true, 4599 /*Extract*/ false); 4600 if (!ShuffledIndices.empty()) 4601 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty); 4602 return Cost; 4603 } 4604 4605 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const { 4606 // Find the type of the operands in VL. 4607 Type *ScalarTy = VL[0]->getType(); 4608 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 4609 ScalarTy = SI->getValueOperand()->getType(); 4610 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 4611 // Find the cost of inserting/extracting values from the vector. 4612 // Check if the same elements are inserted several times and count them as 4613 // shuffle candidates. 4614 DenseSet<unsigned> ShuffledElements; 4615 DenseSet<Value *> UniqueElements; 4616 // Iterate in reverse order to consider insert elements with the high cost. 4617 for (unsigned I = VL.size(); I > 0; --I) { 4618 unsigned Idx = I - 1; 4619 if (!UniqueElements.insert(VL[Idx]).second) 4620 ShuffledElements.insert(Idx); 4621 } 4622 return getGatherCost(VecTy, ShuffledElements); 4623 } 4624 4625 // Perform operand reordering on the instructions in VL and return the reordered 4626 // operands in Left and Right. 4627 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 4628 SmallVectorImpl<Value *> &Left, 4629 SmallVectorImpl<Value *> &Right, 4630 const DataLayout &DL, 4631 ScalarEvolution &SE, 4632 const BoUpSLP &R) { 4633 if (VL.empty()) 4634 return; 4635 VLOperands Ops(VL, DL, SE, R); 4636 // Reorder the operands in place. 4637 Ops.reorder(); 4638 Left = Ops.getVL(0); 4639 Right = Ops.getVL(1); 4640 } 4641 4642 void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) { 4643 // Get the basic block this bundle is in. All instructions in the bundle 4644 // should be in this block. 4645 auto *Front = E->getMainOp(); 4646 auto *BB = Front->getParent(); 4647 assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool { 4648 auto *I = cast<Instruction>(V); 4649 return !E->isOpcodeOrAlt(I) || I->getParent() == BB; 4650 })); 4651 4652 // The last instruction in the bundle in program order. 4653 Instruction *LastInst = nullptr; 4654 4655 // Find the last instruction. The common case should be that BB has been 4656 // scheduled, and the last instruction is VL.back(). So we start with 4657 // VL.back() and iterate over schedule data until we reach the end of the 4658 // bundle. The end of the bundle is marked by null ScheduleData. 4659 if (BlocksSchedules.count(BB)) { 4660 auto *Bundle = 4661 BlocksSchedules[BB]->getScheduleData(E->isOneOf(E->Scalars.back())); 4662 if (Bundle && Bundle->isPartOfBundle()) 4663 for (; Bundle; Bundle = Bundle->NextInBundle) 4664 if (Bundle->OpValue == Bundle->Inst) 4665 LastInst = Bundle->Inst; 4666 } 4667 4668 // LastInst can still be null at this point if there's either not an entry 4669 // for BB in BlocksSchedules or there's no ScheduleData available for 4670 // VL.back(). This can be the case if buildTree_rec aborts for various 4671 // reasons (e.g., the maximum recursion depth is reached, the maximum region 4672 // size is reached, etc.). ScheduleData is initialized in the scheduling 4673 // "dry-run". 4674 // 4675 // If this happens, we can still find the last instruction by brute force. We 4676 // iterate forwards from Front (inclusive) until we either see all 4677 // instructions in the bundle or reach the end of the block. If Front is the 4678 // last instruction in program order, LastInst will be set to Front, and we 4679 // will visit all the remaining instructions in the block. 4680 // 4681 // One of the reasons we exit early from buildTree_rec is to place an upper 4682 // bound on compile-time. Thus, taking an additional compile-time hit here is 4683 // not ideal. However, this should be exceedingly rare since it requires that 4684 // we both exit early from buildTree_rec and that the bundle be out-of-order 4685 // (causing us to iterate all the way to the end of the block). 4686 if (!LastInst) { 4687 SmallPtrSet<Value *, 16> Bundle(E->Scalars.begin(), E->Scalars.end()); 4688 for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) { 4689 if (Bundle.erase(&I) && E->isOpcodeOrAlt(&I)) 4690 LastInst = &I; 4691 if (Bundle.empty()) 4692 break; 4693 } 4694 } 4695 assert(LastInst && "Failed to find last instruction in bundle"); 4696 4697 // Set the insertion point after the last instruction in the bundle. Set the 4698 // debug location to Front. 4699 Builder.SetInsertPoint(BB, ++LastInst->getIterator()); 4700 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 4701 } 4702 4703 Value *BoUpSLP::gather(ArrayRef<Value *> VL) { 4704 Value *Val0 = 4705 isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0]; 4706 FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size()); 4707 Value *Vec = PoisonValue::get(VecTy); 4708 unsigned InsIndex = 0; 4709 for (Value *Val : VL) { 4710 Vec = Builder.CreateInsertElement(Vec, Val, Builder.getInt32(InsIndex++)); 4711 auto *InsElt = dyn_cast<InsertElementInst>(Vec); 4712 if (!InsElt) 4713 continue; 4714 GatherSeq.insert(InsElt); 4715 CSEBlocks.insert(InsElt->getParent()); 4716 // Add to our 'need-to-extract' list. 4717 if (TreeEntry *Entry = getTreeEntry(Val)) { 4718 // Find which lane we need to extract. 4719 int FoundLane = 4720 findLaneForValue(Entry->Scalars, Entry->ReuseShuffleIndices, Val); 4721 ExternalUses.push_back(ExternalUser(Val, InsElt, FoundLane)); 4722 } 4723 } 4724 4725 return Vec; 4726 } 4727 4728 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 4729 InstructionsState S = getSameOpcode(VL); 4730 if (S.getOpcode()) { 4731 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 4732 if (E->isSame(VL)) { 4733 Value *V = vectorizeTree(E); 4734 if (VL.size() == E->Scalars.size() && !E->ReuseShuffleIndices.empty()) { 4735 // Reshuffle to get only unique values. 4736 // If some of the scalars are duplicated in the vectorization tree 4737 // entry, we do not vectorize them but instead generate a mask for the 4738 // reuses. But if there are several users of the same entry, they may 4739 // have different vectorization factors. This is especially important 4740 // for PHI nodes. In this case, we need to adapt the resulting 4741 // instruction for the user vectorization factor and have to reshuffle 4742 // it again to take only unique elements of the vector. Without this 4743 // code the function incorrectly returns reduced vector instruction 4744 // with the same elements, not with the unique ones. 4745 // block: 4746 // %phi = phi <2 x > { .., %entry} {%shuffle, %block} 4747 // %2 = shuffle <2 x > %phi, %poison, <4 x > <0, 0, 1, 1> 4748 // ... (use %2) 4749 // %shuffle = shuffle <2 x> %2, poison, <2 x> {0, 2} 4750 // br %block 4751 SmallVector<int, 4> UniqueIdxs; 4752 SmallSet<int, 4> UsedIdxs; 4753 int Pos = 0; 4754 for (int Idx : E->ReuseShuffleIndices) { 4755 if (UsedIdxs.insert(Idx).second) 4756 UniqueIdxs.emplace_back(Pos); 4757 ++Pos; 4758 } 4759 V = Builder.CreateShuffleVector(V, UniqueIdxs, "shrink.shuffle"); 4760 } 4761 return V; 4762 } 4763 } 4764 } 4765 4766 // Check that every instruction appears once in this bundle. 4767 SmallVector<int, 4> ReuseShuffleIndicies; 4768 SmallVector<Value *, 4> UniqueValues; 4769 if (VL.size() > 2) { 4770 DenseMap<Value *, unsigned> UniquePositions; 4771 for (Value *V : VL) { 4772 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 4773 ReuseShuffleIndicies.emplace_back(Res.first->second); 4774 if (Res.second || isa<Constant>(V)) 4775 UniqueValues.emplace_back(V); 4776 } 4777 // Do not shuffle single element or if number of unique values is not power 4778 // of 2. 4779 if (UniqueValues.size() == VL.size() || UniqueValues.size() <= 1 || 4780 !llvm::isPowerOf2_32(UniqueValues.size())) 4781 ReuseShuffleIndicies.clear(); 4782 else 4783 VL = UniqueValues; 4784 } 4785 4786 Value *Vec = gather(VL); 4787 if (!ReuseShuffleIndicies.empty()) { 4788 Vec = Builder.CreateShuffleVector(Vec, ReuseShuffleIndicies, "shuffle"); 4789 if (auto *I = dyn_cast<Instruction>(Vec)) { 4790 GatherSeq.insert(I); 4791 CSEBlocks.insert(I->getParent()); 4792 } 4793 } 4794 return Vec; 4795 } 4796 4797 namespace { 4798 /// Merges shuffle masks and emits final shuffle instruction, if required. 4799 class ShuffleInstructionBuilder { 4800 IRBuilderBase &Builder; 4801 bool IsFinalized = false; 4802 SmallVector<int, 4> Mask; 4803 4804 public: 4805 ShuffleInstructionBuilder(IRBuilderBase &Builder) : Builder(Builder) {} 4806 4807 /// Adds a mask, inverting it before applying. 4808 void addInversedMask(ArrayRef<unsigned> SubMask) { 4809 if (SubMask.empty()) 4810 return; 4811 SmallVector<int, 4> NewMask; 4812 inversePermutation(SubMask, NewMask); 4813 addMask(NewMask); 4814 } 4815 4816 /// Functions adds masks, merging them into single one. 4817 void addMask(ArrayRef<unsigned> SubMask) { 4818 SmallVector<int, 4> NewMask(SubMask.begin(), SubMask.end()); 4819 addMask(NewMask); 4820 } 4821 4822 void addMask(ArrayRef<int> SubMask) { 4823 if (SubMask.empty()) 4824 return; 4825 if (Mask.empty()) { 4826 Mask.append(SubMask.begin(), SubMask.end()); 4827 return; 4828 } 4829 SmallVector<int, 4> NewMask(SubMask.size(), SubMask.size()); 4830 int TermValue = std::min(Mask.size(), SubMask.size()); 4831 for (int I = 0, E = SubMask.size(); I < E; ++I) { 4832 if (SubMask[I] >= TermValue || Mask[SubMask[I]] >= TermValue) { 4833 NewMask[I] = E; 4834 continue; 4835 } 4836 NewMask[I] = Mask[SubMask[I]]; 4837 } 4838 Mask.swap(NewMask); 4839 } 4840 4841 Value *finalize(Value *V) { 4842 IsFinalized = true; 4843 if (Mask.empty()) 4844 return V; 4845 return Builder.CreateShuffleVector(V, Mask, "shuffle"); 4846 } 4847 4848 ~ShuffleInstructionBuilder() { 4849 assert((IsFinalized || Mask.empty()) && 4850 "Shuffle construction must be finalized."); 4851 } 4852 }; 4853 } // namespace 4854 4855 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 4856 IRBuilder<>::InsertPointGuard Guard(Builder); 4857 4858 if (E->VectorizedValue) { 4859 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 4860 return E->VectorizedValue; 4861 } 4862 4863 ShuffleInstructionBuilder ShuffleBuilder(Builder); 4864 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 4865 if (E->State == TreeEntry::NeedToGather) { 4866 setInsertPointAfterBundle(E); 4867 Value *Vec; 4868 SmallVector<int> Mask; 4869 SmallVector<const TreeEntry *> Entries; 4870 Optional<TargetTransformInfo::ShuffleKind> Shuffle = 4871 isGatherShuffledEntry(E, Mask, Entries); 4872 if (Shuffle.hasValue()) { 4873 assert((Entries.size() == 1 || Entries.size() == 2) && 4874 "Expected shuffle of 1 or 2 entries."); 4875 Vec = Builder.CreateShuffleVector(Entries.front()->VectorizedValue, 4876 Entries.back()->VectorizedValue, Mask); 4877 } else { 4878 Vec = gather(E->Scalars); 4879 } 4880 if (NeedToShuffleReuses) { 4881 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 4882 Vec = ShuffleBuilder.finalize(Vec); 4883 if (auto *I = dyn_cast<Instruction>(Vec)) { 4884 GatherSeq.insert(I); 4885 CSEBlocks.insert(I->getParent()); 4886 } 4887 } 4888 E->VectorizedValue = Vec; 4889 return Vec; 4890 } 4891 4892 assert((E->State == TreeEntry::Vectorize || 4893 E->State == TreeEntry::ScatterVectorize) && 4894 "Unhandled state"); 4895 unsigned ShuffleOrOp = 4896 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 4897 Instruction *VL0 = E->getMainOp(); 4898 Type *ScalarTy = VL0->getType(); 4899 if (auto *Store = dyn_cast<StoreInst>(VL0)) 4900 ScalarTy = Store->getValueOperand()->getType(); 4901 else if (auto *IE = dyn_cast<InsertElementInst>(VL0)) 4902 ScalarTy = IE->getOperand(1)->getType(); 4903 auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size()); 4904 switch (ShuffleOrOp) { 4905 case Instruction::PHI: { 4906 auto *PH = cast<PHINode>(VL0); 4907 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 4908 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 4909 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 4910 Value *V = NewPhi; 4911 if (NeedToShuffleReuses) 4912 V = Builder.CreateShuffleVector(V, E->ReuseShuffleIndices, "shuffle"); 4913 4914 E->VectorizedValue = V; 4915 4916 // PHINodes may have multiple entries from the same block. We want to 4917 // visit every block once. 4918 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 4919 4920 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 4921 ValueList Operands; 4922 BasicBlock *IBB = PH->getIncomingBlock(i); 4923 4924 if (!VisitedBBs.insert(IBB).second) { 4925 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 4926 continue; 4927 } 4928 4929 Builder.SetInsertPoint(IBB->getTerminator()); 4930 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 4931 Value *Vec = vectorizeTree(E->getOperand(i)); 4932 NewPhi->addIncoming(Vec, IBB); 4933 } 4934 4935 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 4936 "Invalid number of incoming values"); 4937 return V; 4938 } 4939 4940 case Instruction::ExtractElement: { 4941 Value *V = E->getSingleOperand(0); 4942 Builder.SetInsertPoint(VL0); 4943 ShuffleBuilder.addInversedMask(E->ReorderIndices); 4944 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 4945 V = ShuffleBuilder.finalize(V); 4946 E->VectorizedValue = V; 4947 return V; 4948 } 4949 case Instruction::ExtractValue: { 4950 auto *LI = cast<LoadInst>(E->getSingleOperand(0)); 4951 Builder.SetInsertPoint(LI); 4952 auto *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); 4953 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); 4954 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign()); 4955 Value *NewV = propagateMetadata(V, E->Scalars); 4956 ShuffleBuilder.addInversedMask(E->ReorderIndices); 4957 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 4958 NewV = ShuffleBuilder.finalize(NewV); 4959 E->VectorizedValue = NewV; 4960 return NewV; 4961 } 4962 case Instruction::InsertElement: { 4963 Builder.SetInsertPoint(VL0); 4964 Value *V = vectorizeTree(E->getOperand(1)); 4965 4966 const unsigned NumElts = 4967 cast<FixedVectorType>(VL0->getType())->getNumElements(); 4968 const unsigned NumScalars = E->Scalars.size(); 4969 4970 // Create InsertVector shuffle if necessary 4971 Instruction *FirstInsert = nullptr; 4972 bool IsIdentity = true; 4973 unsigned Offset = UINT_MAX; 4974 for (unsigned I = 0; I < NumScalars; ++I) { 4975 Value *Scalar = E->Scalars[I]; 4976 if (!FirstInsert && 4977 !is_contained(E->Scalars, cast<Instruction>(Scalar)->getOperand(0))) 4978 FirstInsert = cast<Instruction>(Scalar); 4979 Optional<int> InsertIdx = getInsertIndex(Scalar, 0); 4980 if (!InsertIdx || *InsertIdx == UndefMaskElem) 4981 continue; 4982 unsigned Idx = *InsertIdx; 4983 if (Idx < Offset) { 4984 Offset = Idx; 4985 IsIdentity &= I == 0; 4986 } else { 4987 assert(Idx >= Offset && "Failed to find vector index offset"); 4988 IsIdentity &= Idx - Offset == I; 4989 } 4990 } 4991 assert(Offset < NumElts && "Failed to find vector index offset"); 4992 4993 // Create shuffle to resize vector 4994 SmallVector<int> Mask(NumElts, UndefMaskElem); 4995 if (!IsIdentity) { 4996 for (unsigned I = 0; I < NumScalars; ++I) { 4997 Value *Scalar = E->Scalars[I]; 4998 Optional<int> InsertIdx = getInsertIndex(Scalar, 0); 4999 if (!InsertIdx || *InsertIdx == UndefMaskElem) 5000 continue; 5001 Mask[*InsertIdx - Offset] = I; 5002 } 5003 } else { 5004 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); 5005 } 5006 if (!IsIdentity || NumElts != NumScalars) 5007 V = Builder.CreateShuffleVector(V, UndefValue::get(V->getType()), Mask); 5008 5009 if (NumElts != NumScalars) { 5010 SmallVector<int> InsertMask(NumElts); 5011 std::iota(InsertMask.begin(), InsertMask.end(), 0); 5012 for (unsigned I = 0; I < NumElts; I++) { 5013 if (Mask[I] != UndefMaskElem) 5014 InsertMask[Offset + I] = NumElts + I; 5015 } 5016 5017 V = Builder.CreateShuffleVector( 5018 FirstInsert->getOperand(0), V, InsertMask, 5019 cast<Instruction>(E->Scalars.back())->getName()); 5020 } 5021 5022 ++NumVectorInstructions; 5023 E->VectorizedValue = V; 5024 return V; 5025 } 5026 case Instruction::ZExt: 5027 case Instruction::SExt: 5028 case Instruction::FPToUI: 5029 case Instruction::FPToSI: 5030 case Instruction::FPExt: 5031 case Instruction::PtrToInt: 5032 case Instruction::IntToPtr: 5033 case Instruction::SIToFP: 5034 case Instruction::UIToFP: 5035 case Instruction::Trunc: 5036 case Instruction::FPTrunc: 5037 case Instruction::BitCast: { 5038 setInsertPointAfterBundle(E); 5039 5040 Value *InVec = vectorizeTree(E->getOperand(0)); 5041 5042 if (E->VectorizedValue) { 5043 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 5044 return E->VectorizedValue; 5045 } 5046 5047 auto *CI = cast<CastInst>(VL0); 5048 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 5049 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5050 V = ShuffleBuilder.finalize(V); 5051 5052 E->VectorizedValue = V; 5053 ++NumVectorInstructions; 5054 return V; 5055 } 5056 case Instruction::FCmp: 5057 case Instruction::ICmp: { 5058 setInsertPointAfterBundle(E); 5059 5060 Value *L = vectorizeTree(E->getOperand(0)); 5061 Value *R = vectorizeTree(E->getOperand(1)); 5062 5063 if (E->VectorizedValue) { 5064 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 5065 return E->VectorizedValue; 5066 } 5067 5068 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 5069 Value *V = Builder.CreateCmp(P0, L, R); 5070 propagateIRFlags(V, E->Scalars, VL0); 5071 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5072 V = ShuffleBuilder.finalize(V); 5073 5074 E->VectorizedValue = V; 5075 ++NumVectorInstructions; 5076 return V; 5077 } 5078 case Instruction::Select: { 5079 setInsertPointAfterBundle(E); 5080 5081 Value *Cond = vectorizeTree(E->getOperand(0)); 5082 Value *True = vectorizeTree(E->getOperand(1)); 5083 Value *False = vectorizeTree(E->getOperand(2)); 5084 5085 if (E->VectorizedValue) { 5086 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 5087 return E->VectorizedValue; 5088 } 5089 5090 Value *V = Builder.CreateSelect(Cond, True, False); 5091 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5092 V = ShuffleBuilder.finalize(V); 5093 5094 E->VectorizedValue = V; 5095 ++NumVectorInstructions; 5096 return V; 5097 } 5098 case Instruction::FNeg: { 5099 setInsertPointAfterBundle(E); 5100 5101 Value *Op = vectorizeTree(E->getOperand(0)); 5102 5103 if (E->VectorizedValue) { 5104 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 5105 return E->VectorizedValue; 5106 } 5107 5108 Value *V = Builder.CreateUnOp( 5109 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op); 5110 propagateIRFlags(V, E->Scalars, VL0); 5111 if (auto *I = dyn_cast<Instruction>(V)) 5112 V = propagateMetadata(I, E->Scalars); 5113 5114 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5115 V = ShuffleBuilder.finalize(V); 5116 5117 E->VectorizedValue = V; 5118 ++NumVectorInstructions; 5119 5120 return V; 5121 } 5122 case Instruction::Add: 5123 case Instruction::FAdd: 5124 case Instruction::Sub: 5125 case Instruction::FSub: 5126 case Instruction::Mul: 5127 case Instruction::FMul: 5128 case Instruction::UDiv: 5129 case Instruction::SDiv: 5130 case Instruction::FDiv: 5131 case Instruction::URem: 5132 case Instruction::SRem: 5133 case Instruction::FRem: 5134 case Instruction::Shl: 5135 case Instruction::LShr: 5136 case Instruction::AShr: 5137 case Instruction::And: 5138 case Instruction::Or: 5139 case Instruction::Xor: { 5140 setInsertPointAfterBundle(E); 5141 5142 Value *LHS = vectorizeTree(E->getOperand(0)); 5143 Value *RHS = vectorizeTree(E->getOperand(1)); 5144 5145 if (E->VectorizedValue) { 5146 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 5147 return E->VectorizedValue; 5148 } 5149 5150 Value *V = Builder.CreateBinOp( 5151 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, 5152 RHS); 5153 propagateIRFlags(V, E->Scalars, VL0); 5154 if (auto *I = dyn_cast<Instruction>(V)) 5155 V = propagateMetadata(I, E->Scalars); 5156 5157 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5158 V = ShuffleBuilder.finalize(V); 5159 5160 E->VectorizedValue = V; 5161 ++NumVectorInstructions; 5162 5163 return V; 5164 } 5165 case Instruction::Load: { 5166 // Loads are inserted at the head of the tree because we don't want to 5167 // sink them all the way down past store instructions. 5168 bool IsReorder = E->updateStateIfReorder(); 5169 if (IsReorder) 5170 VL0 = E->getMainOp(); 5171 setInsertPointAfterBundle(E); 5172 5173 LoadInst *LI = cast<LoadInst>(VL0); 5174 Instruction *NewLI; 5175 unsigned AS = LI->getPointerAddressSpace(); 5176 Value *PO = LI->getPointerOperand(); 5177 if (E->State == TreeEntry::Vectorize) { 5178 5179 Value *VecPtr = Builder.CreateBitCast(PO, VecTy->getPointerTo(AS)); 5180 5181 // The pointer operand uses an in-tree scalar so we add the new BitCast 5182 // to ExternalUses list to make sure that an extract will be generated 5183 // in the future. 5184 if (getTreeEntry(PO)) 5185 ExternalUses.emplace_back(PO, cast<User>(VecPtr), 0); 5186 5187 NewLI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign()); 5188 } else { 5189 assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state"); 5190 Value *VecPtr = vectorizeTree(E->getOperand(0)); 5191 // Use the minimum alignment of the gathered loads. 5192 Align CommonAlignment = LI->getAlign(); 5193 for (Value *V : E->Scalars) 5194 CommonAlignment = 5195 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 5196 NewLI = Builder.CreateMaskedGather(VecPtr, CommonAlignment); 5197 } 5198 Value *V = propagateMetadata(NewLI, E->Scalars); 5199 5200 ShuffleBuilder.addInversedMask(E->ReorderIndices); 5201 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5202 V = ShuffleBuilder.finalize(V); 5203 E->VectorizedValue = V; 5204 ++NumVectorInstructions; 5205 return V; 5206 } 5207 case Instruction::Store: { 5208 bool IsReorder = !E->ReorderIndices.empty(); 5209 auto *SI = cast<StoreInst>( 5210 IsReorder ? E->Scalars[E->ReorderIndices.front()] : VL0); 5211 unsigned AS = SI->getPointerAddressSpace(); 5212 5213 setInsertPointAfterBundle(E); 5214 5215 Value *VecValue = vectorizeTree(E->getOperand(0)); 5216 ShuffleBuilder.addMask(E->ReorderIndices); 5217 VecValue = ShuffleBuilder.finalize(VecValue); 5218 5219 Value *ScalarPtr = SI->getPointerOperand(); 5220 Value *VecPtr = Builder.CreateBitCast( 5221 ScalarPtr, VecValue->getType()->getPointerTo(AS)); 5222 StoreInst *ST = Builder.CreateAlignedStore(VecValue, VecPtr, 5223 SI->getAlign()); 5224 5225 // The pointer operand uses an in-tree scalar, so add the new BitCast to 5226 // ExternalUses to make sure that an extract will be generated in the 5227 // future. 5228 if (getTreeEntry(ScalarPtr)) 5229 ExternalUses.push_back(ExternalUser(ScalarPtr, cast<User>(VecPtr), 0)); 5230 5231 Value *V = propagateMetadata(ST, E->Scalars); 5232 5233 E->VectorizedValue = V; 5234 ++NumVectorInstructions; 5235 return V; 5236 } 5237 case Instruction::GetElementPtr: { 5238 setInsertPointAfterBundle(E); 5239 5240 Value *Op0 = vectorizeTree(E->getOperand(0)); 5241 5242 std::vector<Value *> OpVecs; 5243 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; 5244 ++j) { 5245 ValueList &VL = E->getOperand(j); 5246 // Need to cast all elements to the same type before vectorization to 5247 // avoid crash. 5248 Type *VL0Ty = VL0->getOperand(j)->getType(); 5249 Type *Ty = llvm::all_of( 5250 VL, [VL0Ty](Value *V) { return VL0Ty == V->getType(); }) 5251 ? VL0Ty 5252 : DL->getIndexType(cast<GetElementPtrInst>(VL0) 5253 ->getPointerOperandType() 5254 ->getScalarType()); 5255 for (Value *&V : VL) { 5256 auto *CI = cast<ConstantInt>(V); 5257 V = ConstantExpr::getIntegerCast(CI, Ty, 5258 CI->getValue().isSignBitSet()); 5259 } 5260 Value *OpVec = vectorizeTree(VL); 5261 OpVecs.push_back(OpVec); 5262 } 5263 5264 Value *V = Builder.CreateGEP( 5265 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs); 5266 if (Instruction *I = dyn_cast<Instruction>(V)) 5267 V = propagateMetadata(I, E->Scalars); 5268 5269 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5270 V = ShuffleBuilder.finalize(V); 5271 5272 E->VectorizedValue = V; 5273 ++NumVectorInstructions; 5274 5275 return V; 5276 } 5277 case Instruction::Call: { 5278 CallInst *CI = cast<CallInst>(VL0); 5279 setInsertPointAfterBundle(E); 5280 5281 Intrinsic::ID IID = Intrinsic::not_intrinsic; 5282 if (Function *FI = CI->getCalledFunction()) 5283 IID = FI->getIntrinsicID(); 5284 5285 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 5286 5287 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 5288 bool UseIntrinsic = ID != Intrinsic::not_intrinsic && 5289 VecCallCosts.first <= VecCallCosts.second; 5290 5291 Value *ScalarArg = nullptr; 5292 std::vector<Value *> OpVecs; 5293 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) { 5294 ValueList OpVL; 5295 // Some intrinsics have scalar arguments. This argument should not be 5296 // vectorized. 5297 if (UseIntrinsic && hasVectorInstrinsicScalarOpd(IID, j)) { 5298 CallInst *CEI = cast<CallInst>(VL0); 5299 ScalarArg = CEI->getArgOperand(j); 5300 OpVecs.push_back(CEI->getArgOperand(j)); 5301 continue; 5302 } 5303 5304 Value *OpVec = vectorizeTree(E->getOperand(j)); 5305 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 5306 OpVecs.push_back(OpVec); 5307 } 5308 5309 Function *CF; 5310 if (!UseIntrinsic) { 5311 VFShape Shape = 5312 VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>( 5313 VecTy->getNumElements())), 5314 false /*HasGlobalPred*/); 5315 CF = VFDatabase(*CI).getVectorizedFunction(Shape); 5316 } else { 5317 Type *Tys[] = {FixedVectorType::get(CI->getType(), E->Scalars.size())}; 5318 CF = Intrinsic::getDeclaration(F->getParent(), ID, Tys); 5319 } 5320 5321 SmallVector<OperandBundleDef, 1> OpBundles; 5322 CI->getOperandBundlesAsDefs(OpBundles); 5323 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 5324 5325 // The scalar argument uses an in-tree scalar so we add the new vectorized 5326 // call to ExternalUses list to make sure that an extract will be 5327 // generated in the future. 5328 if (ScalarArg && getTreeEntry(ScalarArg)) 5329 ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0)); 5330 5331 propagateIRFlags(V, E->Scalars, VL0); 5332 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5333 V = ShuffleBuilder.finalize(V); 5334 5335 E->VectorizedValue = V; 5336 ++NumVectorInstructions; 5337 return V; 5338 } 5339 case Instruction::ShuffleVector: { 5340 assert(E->isAltShuffle() && 5341 ((Instruction::isBinaryOp(E->getOpcode()) && 5342 Instruction::isBinaryOp(E->getAltOpcode())) || 5343 (Instruction::isCast(E->getOpcode()) && 5344 Instruction::isCast(E->getAltOpcode()))) && 5345 "Invalid Shuffle Vector Operand"); 5346 5347 Value *LHS = nullptr, *RHS = nullptr; 5348 if (Instruction::isBinaryOp(E->getOpcode())) { 5349 setInsertPointAfterBundle(E); 5350 LHS = vectorizeTree(E->getOperand(0)); 5351 RHS = vectorizeTree(E->getOperand(1)); 5352 } else { 5353 setInsertPointAfterBundle(E); 5354 LHS = vectorizeTree(E->getOperand(0)); 5355 } 5356 5357 if (E->VectorizedValue) { 5358 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 5359 return E->VectorizedValue; 5360 } 5361 5362 Value *V0, *V1; 5363 if (Instruction::isBinaryOp(E->getOpcode())) { 5364 V0 = Builder.CreateBinOp( 5365 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS); 5366 V1 = Builder.CreateBinOp( 5367 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS); 5368 } else { 5369 V0 = Builder.CreateCast( 5370 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy); 5371 V1 = Builder.CreateCast( 5372 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy); 5373 } 5374 5375 // Create shuffle to take alternate operations from the vector. 5376 // Also, gather up main and alt scalar ops to propagate IR flags to 5377 // each vector operation. 5378 ValueList OpScalars, AltScalars; 5379 unsigned e = E->Scalars.size(); 5380 SmallVector<int, 8> Mask(e); 5381 for (unsigned i = 0; i < e; ++i) { 5382 auto *OpInst = cast<Instruction>(E->Scalars[i]); 5383 assert(E->isOpcodeOrAlt(OpInst) && "Unexpected main/alternate opcode"); 5384 if (OpInst->getOpcode() == E->getAltOpcode()) { 5385 Mask[i] = e + i; 5386 AltScalars.push_back(E->Scalars[i]); 5387 } else { 5388 Mask[i] = i; 5389 OpScalars.push_back(E->Scalars[i]); 5390 } 5391 } 5392 5393 propagateIRFlags(V0, OpScalars); 5394 propagateIRFlags(V1, AltScalars); 5395 5396 Value *V = Builder.CreateShuffleVector(V0, V1, Mask); 5397 if (Instruction *I = dyn_cast<Instruction>(V)) 5398 V = propagateMetadata(I, E->Scalars); 5399 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 5400 V = ShuffleBuilder.finalize(V); 5401 5402 E->VectorizedValue = V; 5403 ++NumVectorInstructions; 5404 5405 return V; 5406 } 5407 default: 5408 llvm_unreachable("unknown inst"); 5409 } 5410 return nullptr; 5411 } 5412 5413 Value *BoUpSLP::vectorizeTree() { 5414 ExtraValueToDebugLocsMap ExternallyUsedValues; 5415 return vectorizeTree(ExternallyUsedValues); 5416 } 5417 5418 Value * 5419 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { 5420 // All blocks must be scheduled before any instructions are inserted. 5421 for (auto &BSIter : BlocksSchedules) { 5422 scheduleBlock(BSIter.second.get()); 5423 } 5424 5425 Builder.SetInsertPoint(&F->getEntryBlock().front()); 5426 auto *VectorRoot = vectorizeTree(VectorizableTree[0].get()); 5427 5428 // If the vectorized tree can be rewritten in a smaller type, we truncate the 5429 // vectorized root. InstCombine will then rewrite the entire expression. We 5430 // sign extend the extracted values below. 5431 auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; 5432 if (MinBWs.count(ScalarRoot)) { 5433 if (auto *I = dyn_cast<Instruction>(VectorRoot)) { 5434 // If current instr is a phi and not the last phi, insert it after the 5435 // last phi node. 5436 if (isa<PHINode>(I)) 5437 Builder.SetInsertPoint(&*I->getParent()->getFirstInsertionPt()); 5438 else 5439 Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); 5440 } 5441 auto BundleWidth = VectorizableTree[0]->Scalars.size(); 5442 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 5443 auto *VecTy = FixedVectorType::get(MinTy, BundleWidth); 5444 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); 5445 VectorizableTree[0]->VectorizedValue = Trunc; 5446 } 5447 5448 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() 5449 << " values .\n"); 5450 5451 // Extract all of the elements with the external uses. 5452 for (const auto &ExternalUse : ExternalUses) { 5453 Value *Scalar = ExternalUse.Scalar; 5454 llvm::User *User = ExternalUse.User; 5455 5456 // Skip users that we already RAUW. This happens when one instruction 5457 // has multiple uses of the same value. 5458 if (User && !is_contained(Scalar->users(), User)) 5459 continue; 5460 TreeEntry *E = getTreeEntry(Scalar); 5461 assert(E && "Invalid scalar"); 5462 assert(E->State != TreeEntry::NeedToGather && 5463 "Extracting from a gather list"); 5464 5465 Value *Vec = E->VectorizedValue; 5466 assert(Vec && "Can't find vectorizable value"); 5467 5468 Value *Lane = Builder.getInt32(ExternalUse.Lane); 5469 auto ExtractAndExtendIfNeeded = [&](Value *Vec) { 5470 if (Scalar->getType() != Vec->getType()) { 5471 Value *Ex = Builder.CreateExtractElement(Vec, Lane); 5472 // If necessary, sign-extend or zero-extend ScalarRoot 5473 // to the larger type. 5474 if (!MinBWs.count(ScalarRoot)) 5475 return Ex; 5476 if (MinBWs[ScalarRoot].second) 5477 return Builder.CreateSExt(Ex, Scalar->getType()); 5478 return Builder.CreateZExt(Ex, Scalar->getType()); 5479 } else { 5480 assert(isa<FixedVectorType>(Scalar->getType()) && 5481 isa<InsertElementInst>(Scalar) && 5482 "In-tree scalar of vector type is not insertelement?"); 5483 return Vec; 5484 } 5485 }; 5486 // If User == nullptr, the Scalar is used as extra arg. Generate 5487 // ExtractElement instruction and update the record for this scalar in 5488 // ExternallyUsedValues. 5489 if (!User) { 5490 assert(ExternallyUsedValues.count(Scalar) && 5491 "Scalar with nullptr as an external user must be registered in " 5492 "ExternallyUsedValues map"); 5493 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 5494 Builder.SetInsertPoint(VecI->getParent(), 5495 std::next(VecI->getIterator())); 5496 } else { 5497 Builder.SetInsertPoint(&F->getEntryBlock().front()); 5498 } 5499 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 5500 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent()); 5501 auto &Locs = ExternallyUsedValues[Scalar]; 5502 ExternallyUsedValues.insert({NewInst, Locs}); 5503 ExternallyUsedValues.erase(Scalar); 5504 // Required to update internally referenced instructions. 5505 Scalar->replaceAllUsesWith(NewInst); 5506 continue; 5507 } 5508 5509 // Generate extracts for out-of-tree users. 5510 // Find the insertion point for the extractelement lane. 5511 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 5512 if (PHINode *PH = dyn_cast<PHINode>(User)) { 5513 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 5514 if (PH->getIncomingValue(i) == Scalar) { 5515 Instruction *IncomingTerminator = 5516 PH->getIncomingBlock(i)->getTerminator(); 5517 if (isa<CatchSwitchInst>(IncomingTerminator)) { 5518 Builder.SetInsertPoint(VecI->getParent(), 5519 std::next(VecI->getIterator())); 5520 } else { 5521 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 5522 } 5523 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 5524 CSEBlocks.insert(PH->getIncomingBlock(i)); 5525 PH->setOperand(i, NewInst); 5526 } 5527 } 5528 } else { 5529 Builder.SetInsertPoint(cast<Instruction>(User)); 5530 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 5531 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 5532 User->replaceUsesOfWith(Scalar, NewInst); 5533 } 5534 } else { 5535 Builder.SetInsertPoint(&F->getEntryBlock().front()); 5536 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 5537 CSEBlocks.insert(&F->getEntryBlock()); 5538 User->replaceUsesOfWith(Scalar, NewInst); 5539 } 5540 5541 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 5542 } 5543 5544 // For each vectorized value: 5545 for (auto &TEPtr : VectorizableTree) { 5546 TreeEntry *Entry = TEPtr.get(); 5547 5548 // No need to handle users of gathered values. 5549 if (Entry->State == TreeEntry::NeedToGather) 5550 continue; 5551 5552 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 5553 5554 // For each lane: 5555 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 5556 Value *Scalar = Entry->Scalars[Lane]; 5557 5558 #ifndef NDEBUG 5559 Type *Ty = Scalar->getType(); 5560 if (!Ty->isVoidTy()) { 5561 for (User *U : Scalar->users()) { 5562 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 5563 5564 // It is legal to delete users in the ignorelist. 5565 assert((getTreeEntry(U) || is_contained(UserIgnoreList, U)) && 5566 "Deleting out-of-tree value"); 5567 } 5568 } 5569 #endif 5570 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 5571 eraseInstruction(cast<Instruction>(Scalar)); 5572 } 5573 } 5574 5575 Builder.ClearInsertionPoint(); 5576 InstrElementSize.clear(); 5577 5578 return VectorizableTree[0]->VectorizedValue; 5579 } 5580 5581 void BoUpSLP::optimizeGatherSequence() { 5582 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size() 5583 << " gather sequences instructions.\n"); 5584 // LICM InsertElementInst sequences. 5585 for (Instruction *I : GatherSeq) { 5586 if (isDeleted(I)) 5587 continue; 5588 5589 // Check if this block is inside a loop. 5590 Loop *L = LI->getLoopFor(I->getParent()); 5591 if (!L) 5592 continue; 5593 5594 // Check if it has a preheader. 5595 BasicBlock *PreHeader = L->getLoopPreheader(); 5596 if (!PreHeader) 5597 continue; 5598 5599 // If the vector or the element that we insert into it are 5600 // instructions that are defined in this basic block then we can't 5601 // hoist this instruction. 5602 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 5603 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 5604 if (Op0 && L->contains(Op0)) 5605 continue; 5606 if (Op1 && L->contains(Op1)) 5607 continue; 5608 5609 // We can hoist this instruction. Move it to the pre-header. 5610 I->moveBefore(PreHeader->getTerminator()); 5611 } 5612 5613 // Make a list of all reachable blocks in our CSE queue. 5614 SmallVector<const DomTreeNode *, 8> CSEWorkList; 5615 CSEWorkList.reserve(CSEBlocks.size()); 5616 for (BasicBlock *BB : CSEBlocks) 5617 if (DomTreeNode *N = DT->getNode(BB)) { 5618 assert(DT->isReachableFromEntry(N)); 5619 CSEWorkList.push_back(N); 5620 } 5621 5622 // Sort blocks by domination. This ensures we visit a block after all blocks 5623 // dominating it are visited. 5624 llvm::stable_sort(CSEWorkList, 5625 [this](const DomTreeNode *A, const DomTreeNode *B) { 5626 return DT->properlyDominates(A, B); 5627 }); 5628 5629 // Perform O(N^2) search over the gather sequences and merge identical 5630 // instructions. TODO: We can further optimize this scan if we split the 5631 // instructions into different buckets based on the insert lane. 5632 SmallVector<Instruction *, 16> Visited; 5633 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 5634 assert(*I && 5635 (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 5636 "Worklist not sorted properly!"); 5637 BasicBlock *BB = (*I)->getBlock(); 5638 // For all instructions in blocks containing gather sequences: 5639 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) { 5640 Instruction *In = &*it++; 5641 if (isDeleted(In)) 5642 continue; 5643 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In)) 5644 continue; 5645 5646 // Check if we can replace this instruction with any of the 5647 // visited instructions. 5648 for (Instruction *v : Visited) { 5649 if (In->isIdenticalTo(v) && 5650 DT->dominates(v->getParent(), In->getParent())) { 5651 In->replaceAllUsesWith(v); 5652 eraseInstruction(In); 5653 In = nullptr; 5654 break; 5655 } 5656 } 5657 if (In) { 5658 assert(!is_contained(Visited, In)); 5659 Visited.push_back(In); 5660 } 5661 } 5662 } 5663 CSEBlocks.clear(); 5664 GatherSeq.clear(); 5665 } 5666 5667 // Groups the instructions to a bundle (which is then a single scheduling entity) 5668 // and schedules instructions until the bundle gets ready. 5669 Optional<BoUpSLP::ScheduleData *> 5670 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 5671 const InstructionsState &S) { 5672 if (isa<PHINode>(S.OpValue)) 5673 return nullptr; 5674 5675 // Initialize the instruction bundle. 5676 Instruction *OldScheduleEnd = ScheduleEnd; 5677 ScheduleData *PrevInBundle = nullptr; 5678 ScheduleData *Bundle = nullptr; 5679 bool ReSchedule = false; 5680 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n"); 5681 5682 auto &&TryScheduleBundle = [this, OldScheduleEnd, SLP](bool ReSchedule, 5683 ScheduleData *Bundle) { 5684 // The scheduling region got new instructions at the lower end (or it is a 5685 // new region for the first bundle). This makes it necessary to 5686 // recalculate all dependencies. 5687 // It is seldom that this needs to be done a second time after adding the 5688 // initial bundle to the region. 5689 if (ScheduleEnd != OldScheduleEnd) { 5690 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) 5691 doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); }); 5692 ReSchedule = true; 5693 } 5694 if (ReSchedule) { 5695 resetSchedule(); 5696 initialFillReadyList(ReadyInsts); 5697 } 5698 if (Bundle) { 5699 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle 5700 << " in block " << BB->getName() << "\n"); 5701 calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP); 5702 } 5703 5704 // Now try to schedule the new bundle or (if no bundle) just calculate 5705 // dependencies. As soon as the bundle is "ready" it means that there are no 5706 // cyclic dependencies and we can schedule it. Note that's important that we 5707 // don't "schedule" the bundle yet (see cancelScheduling). 5708 while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) && 5709 !ReadyInsts.empty()) { 5710 ScheduleData *Picked = ReadyInsts.pop_back_val(); 5711 if (Picked->isSchedulingEntity() && Picked->isReady()) 5712 schedule(Picked, ReadyInsts); 5713 } 5714 }; 5715 5716 // Make sure that the scheduling region contains all 5717 // instructions of the bundle. 5718 for (Value *V : VL) { 5719 if (!extendSchedulingRegion(V, S)) { 5720 // If the scheduling region got new instructions at the lower end (or it 5721 // is a new region for the first bundle). This makes it necessary to 5722 // recalculate all dependencies. 5723 // Otherwise the compiler may crash trying to incorrectly calculate 5724 // dependencies and emit instruction in the wrong order at the actual 5725 // scheduling. 5726 TryScheduleBundle(/*ReSchedule=*/false, nullptr); 5727 return None; 5728 } 5729 } 5730 5731 for (Value *V : VL) { 5732 ScheduleData *BundleMember = getScheduleData(V); 5733 assert(BundleMember && 5734 "no ScheduleData for bundle member (maybe not in same basic block)"); 5735 if (BundleMember->IsScheduled) { 5736 // A bundle member was scheduled as single instruction before and now 5737 // needs to be scheduled as part of the bundle. We just get rid of the 5738 // existing schedule. 5739 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 5740 << " was already scheduled\n"); 5741 ReSchedule = true; 5742 } 5743 assert(BundleMember->isSchedulingEntity() && 5744 "bundle member already part of other bundle"); 5745 if (PrevInBundle) { 5746 PrevInBundle->NextInBundle = BundleMember; 5747 } else { 5748 Bundle = BundleMember; 5749 } 5750 BundleMember->UnscheduledDepsInBundle = 0; 5751 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 5752 5753 // Group the instructions to a bundle. 5754 BundleMember->FirstInBundle = Bundle; 5755 PrevInBundle = BundleMember; 5756 } 5757 assert(Bundle && "Failed to find schedule bundle"); 5758 TryScheduleBundle(ReSchedule, Bundle); 5759 if (!Bundle->isReady()) { 5760 cancelScheduling(VL, S.OpValue); 5761 return None; 5762 } 5763 return Bundle; 5764 } 5765 5766 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 5767 Value *OpValue) { 5768 if (isa<PHINode>(OpValue)) 5769 return; 5770 5771 ScheduleData *Bundle = getScheduleData(OpValue); 5772 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 5773 assert(!Bundle->IsScheduled && 5774 "Can't cancel bundle which is already scheduled"); 5775 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 5776 "tried to unbundle something which is not a bundle"); 5777 5778 // Un-bundle: make single instructions out of the bundle. 5779 ScheduleData *BundleMember = Bundle; 5780 while (BundleMember) { 5781 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 5782 BundleMember->FirstInBundle = BundleMember; 5783 ScheduleData *Next = BundleMember->NextInBundle; 5784 BundleMember->NextInBundle = nullptr; 5785 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 5786 if (BundleMember->UnscheduledDepsInBundle == 0) { 5787 ReadyInsts.insert(BundleMember); 5788 } 5789 BundleMember = Next; 5790 } 5791 } 5792 5793 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 5794 // Allocate a new ScheduleData for the instruction. 5795 if (ChunkPos >= ChunkSize) { 5796 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize)); 5797 ChunkPos = 0; 5798 } 5799 return &(ScheduleDataChunks.back()[ChunkPos++]); 5800 } 5801 5802 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 5803 const InstructionsState &S) { 5804 if (getScheduleData(V, isOneOf(S, V))) 5805 return true; 5806 Instruction *I = dyn_cast<Instruction>(V); 5807 assert(I && "bundle member must be an instruction"); 5808 assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled"); 5809 auto &&CheckSheduleForI = [this, &S](Instruction *I) -> bool { 5810 ScheduleData *ISD = getScheduleData(I); 5811 if (!ISD) 5812 return false; 5813 assert(isInSchedulingRegion(ISD) && 5814 "ScheduleData not in scheduling region"); 5815 ScheduleData *SD = allocateScheduleDataChunks(); 5816 SD->Inst = I; 5817 SD->init(SchedulingRegionID, S.OpValue); 5818 ExtraScheduleDataMap[I][S.OpValue] = SD; 5819 return true; 5820 }; 5821 if (CheckSheduleForI(I)) 5822 return true; 5823 if (!ScheduleStart) { 5824 // It's the first instruction in the new region. 5825 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 5826 ScheduleStart = I; 5827 ScheduleEnd = I->getNextNode(); 5828 if (isOneOf(S, I) != I) 5829 CheckSheduleForI(I); 5830 assert(ScheduleEnd && "tried to vectorize a terminator?"); 5831 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 5832 return true; 5833 } 5834 // Search up and down at the same time, because we don't know if the new 5835 // instruction is above or below the existing scheduling region. 5836 BasicBlock::reverse_iterator UpIter = 5837 ++ScheduleStart->getIterator().getReverse(); 5838 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 5839 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 5840 BasicBlock::iterator LowerEnd = BB->end(); 5841 while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I && 5842 &*DownIter != I) { 5843 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 5844 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 5845 return false; 5846 } 5847 5848 ++UpIter; 5849 ++DownIter; 5850 } 5851 if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) { 5852 assert(I->getParent() == ScheduleStart->getParent() && 5853 "Instruction is in wrong basic block."); 5854 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 5855 ScheduleStart = I; 5856 if (isOneOf(S, I) != I) 5857 CheckSheduleForI(I); 5858 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I 5859 << "\n"); 5860 return true; 5861 } 5862 assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && 5863 "Expected to reach top of the basic block or instruction down the " 5864 "lower end."); 5865 assert(I->getParent() == ScheduleEnd->getParent() && 5866 "Instruction is in wrong basic block."); 5867 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 5868 nullptr); 5869 ScheduleEnd = I->getNextNode(); 5870 if (isOneOf(S, I) != I) 5871 CheckSheduleForI(I); 5872 assert(ScheduleEnd && "tried to vectorize a terminator?"); 5873 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 5874 return true; 5875 } 5876 5877 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 5878 Instruction *ToI, 5879 ScheduleData *PrevLoadStore, 5880 ScheduleData *NextLoadStore) { 5881 ScheduleData *CurrentLoadStore = PrevLoadStore; 5882 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 5883 ScheduleData *SD = ScheduleDataMap[I]; 5884 if (!SD) { 5885 SD = allocateScheduleDataChunks(); 5886 ScheduleDataMap[I] = SD; 5887 SD->Inst = I; 5888 } 5889 assert(!isInSchedulingRegion(SD) && 5890 "new ScheduleData already in scheduling region"); 5891 SD->init(SchedulingRegionID, I); 5892 5893 if (I->mayReadOrWriteMemory() && 5894 (!isa<IntrinsicInst>(I) || 5895 (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect && 5896 cast<IntrinsicInst>(I)->getIntrinsicID() != 5897 Intrinsic::pseudoprobe))) { 5898 // Update the linked list of memory accessing instructions. 5899 if (CurrentLoadStore) { 5900 CurrentLoadStore->NextLoadStore = SD; 5901 } else { 5902 FirstLoadStoreInRegion = SD; 5903 } 5904 CurrentLoadStore = SD; 5905 } 5906 } 5907 if (NextLoadStore) { 5908 if (CurrentLoadStore) 5909 CurrentLoadStore->NextLoadStore = NextLoadStore; 5910 } else { 5911 LastLoadStoreInRegion = CurrentLoadStore; 5912 } 5913 } 5914 5915 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 5916 bool InsertInReadyList, 5917 BoUpSLP *SLP) { 5918 assert(SD->isSchedulingEntity()); 5919 5920 SmallVector<ScheduleData *, 10> WorkList; 5921 WorkList.push_back(SD); 5922 5923 while (!WorkList.empty()) { 5924 ScheduleData *SD = WorkList.pop_back_val(); 5925 5926 ScheduleData *BundleMember = SD; 5927 while (BundleMember) { 5928 assert(isInSchedulingRegion(BundleMember)); 5929 if (!BundleMember->hasValidDependencies()) { 5930 5931 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember 5932 << "\n"); 5933 BundleMember->Dependencies = 0; 5934 BundleMember->resetUnscheduledDeps(); 5935 5936 // Handle def-use chain dependencies. 5937 if (BundleMember->OpValue != BundleMember->Inst) { 5938 ScheduleData *UseSD = getScheduleData(BundleMember->Inst); 5939 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 5940 BundleMember->Dependencies++; 5941 ScheduleData *DestBundle = UseSD->FirstInBundle; 5942 if (!DestBundle->IsScheduled) 5943 BundleMember->incrementUnscheduledDeps(1); 5944 if (!DestBundle->hasValidDependencies()) 5945 WorkList.push_back(DestBundle); 5946 } 5947 } else { 5948 for (User *U : BundleMember->Inst->users()) { 5949 if (isa<Instruction>(U)) { 5950 ScheduleData *UseSD = getScheduleData(U); 5951 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle) && 5952 // Ignore inner deps for insertelement 5953 !(UseSD->FirstInBundle == SD && 5954 isa<InsertElementInst>(BundleMember->Inst))) { 5955 BundleMember->Dependencies++; 5956 ScheduleData *DestBundle = UseSD->FirstInBundle; 5957 if (!DestBundle->IsScheduled) 5958 BundleMember->incrementUnscheduledDeps(1); 5959 if (!DestBundle->hasValidDependencies()) 5960 WorkList.push_back(DestBundle); 5961 } 5962 } else { 5963 // I'm not sure if this can ever happen. But we need to be safe. 5964 // This lets the instruction/bundle never be scheduled and 5965 // eventually disable vectorization. 5966 BundleMember->Dependencies++; 5967 BundleMember->incrementUnscheduledDeps(1); 5968 } 5969 } 5970 } 5971 5972 // Handle the memory dependencies. 5973 ScheduleData *DepDest = BundleMember->NextLoadStore; 5974 if (DepDest) { 5975 Instruction *SrcInst = BundleMember->Inst; 5976 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); 5977 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 5978 unsigned numAliased = 0; 5979 unsigned DistToSrc = 1; 5980 5981 while (DepDest) { 5982 assert(isInSchedulingRegion(DepDest)); 5983 5984 // We have two limits to reduce the complexity: 5985 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 5986 // SLP->isAliased (which is the expensive part in this loop). 5987 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 5988 // the whole loop (even if the loop is fast, it's quadratic). 5989 // It's important for the loop break condition (see below) to 5990 // check this limit even between two read-only instructions. 5991 if (DistToSrc >= MaxMemDepDistance || 5992 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 5993 (numAliased >= AliasedCheckLimit || 5994 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 5995 5996 // We increment the counter only if the locations are aliased 5997 // (instead of counting all alias checks). This gives a better 5998 // balance between reduced runtime and accurate dependencies. 5999 numAliased++; 6000 6001 DepDest->MemoryDependencies.push_back(BundleMember); 6002 BundleMember->Dependencies++; 6003 ScheduleData *DestBundle = DepDest->FirstInBundle; 6004 if (!DestBundle->IsScheduled) { 6005 BundleMember->incrementUnscheduledDeps(1); 6006 } 6007 if (!DestBundle->hasValidDependencies()) { 6008 WorkList.push_back(DestBundle); 6009 } 6010 } 6011 DepDest = DepDest->NextLoadStore; 6012 6013 // Example, explaining the loop break condition: Let's assume our 6014 // starting instruction is i0 and MaxMemDepDistance = 3. 6015 // 6016 // +--------v--v--v 6017 // i0,i1,i2,i3,i4,i5,i6,i7,i8 6018 // +--------^--^--^ 6019 // 6020 // MaxMemDepDistance let us stop alias-checking at i3 and we add 6021 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 6022 // Previously we already added dependencies from i3 to i6,i7,i8 6023 // (because of MaxMemDepDistance). As we added a dependency from 6024 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 6025 // and we can abort this loop at i6. 6026 if (DistToSrc >= 2 * MaxMemDepDistance) 6027 break; 6028 DistToSrc++; 6029 } 6030 } 6031 } 6032 BundleMember = BundleMember->NextInBundle; 6033 } 6034 if (InsertInReadyList && SD->isReady()) { 6035 ReadyInsts.push_back(SD); 6036 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst 6037 << "\n"); 6038 } 6039 } 6040 } 6041 6042 void BoUpSLP::BlockScheduling::resetSchedule() { 6043 assert(ScheduleStart && 6044 "tried to reset schedule on block which has not been scheduled"); 6045 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 6046 doForAllOpcodes(I, [&](ScheduleData *SD) { 6047 assert(isInSchedulingRegion(SD) && 6048 "ScheduleData not in scheduling region"); 6049 SD->IsScheduled = false; 6050 SD->resetUnscheduledDeps(); 6051 }); 6052 } 6053 ReadyInsts.clear(); 6054 } 6055 6056 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 6057 if (!BS->ScheduleStart) 6058 return; 6059 6060 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 6061 6062 BS->resetSchedule(); 6063 6064 // For the real scheduling we use a more sophisticated ready-list: it is 6065 // sorted by the original instruction location. This lets the final schedule 6066 // be as close as possible to the original instruction order. 6067 struct ScheduleDataCompare { 6068 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 6069 return SD2->SchedulingPriority < SD1->SchedulingPriority; 6070 } 6071 }; 6072 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 6073 6074 // Ensure that all dependency data is updated and fill the ready-list with 6075 // initial instructions. 6076 int Idx = 0; 6077 int NumToSchedule = 0; 6078 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 6079 I = I->getNextNode()) { 6080 BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) { 6081 assert(SD->isPartOfBundle() == 6082 (getTreeEntry(SD->Inst) != nullptr) && 6083 "scheduler and vectorizer bundle mismatch"); 6084 SD->FirstInBundle->SchedulingPriority = Idx++; 6085 if (SD->isSchedulingEntity()) { 6086 BS->calculateDependencies(SD, false, this); 6087 NumToSchedule++; 6088 } 6089 }); 6090 } 6091 BS->initialFillReadyList(ReadyInsts); 6092 6093 Instruction *LastScheduledInst = BS->ScheduleEnd; 6094 6095 // Do the "real" scheduling. 6096 while (!ReadyInsts.empty()) { 6097 ScheduleData *picked = *ReadyInsts.begin(); 6098 ReadyInsts.erase(ReadyInsts.begin()); 6099 6100 // Move the scheduled instruction(s) to their dedicated places, if not 6101 // there yet. 6102 ScheduleData *BundleMember = picked; 6103 while (BundleMember) { 6104 Instruction *pickedInst = BundleMember->Inst; 6105 if (LastScheduledInst->getNextNode() != pickedInst) { 6106 BS->BB->getInstList().remove(pickedInst); 6107 BS->BB->getInstList().insert(LastScheduledInst->getIterator(), 6108 pickedInst); 6109 } 6110 LastScheduledInst = pickedInst; 6111 BundleMember = BundleMember->NextInBundle; 6112 } 6113 6114 BS->schedule(picked, ReadyInsts); 6115 NumToSchedule--; 6116 } 6117 assert(NumToSchedule == 0 && "could not schedule all instructions"); 6118 6119 // Avoid duplicate scheduling of the block. 6120 BS->ScheduleStart = nullptr; 6121 } 6122 6123 unsigned BoUpSLP::getVectorElementSize(Value *V) { 6124 // If V is a store, just return the width of the stored value (or value 6125 // truncated just before storing) without traversing the expression tree. 6126 // This is the common case. 6127 if (auto *Store = dyn_cast<StoreInst>(V)) { 6128 if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand())) 6129 return DL->getTypeSizeInBits(Trunc->getSrcTy()); 6130 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 6131 } 6132 6133 if (auto *IEI = dyn_cast<InsertElementInst>(V)) 6134 return getVectorElementSize(IEI->getOperand(1)); 6135 6136 auto E = InstrElementSize.find(V); 6137 if (E != InstrElementSize.end()) 6138 return E->second; 6139 6140 // If V is not a store, we can traverse the expression tree to find loads 6141 // that feed it. The type of the loaded value may indicate a more suitable 6142 // width than V's type. We want to base the vector element size on the width 6143 // of memory operations where possible. 6144 SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist; 6145 SmallPtrSet<Instruction *, 16> Visited; 6146 if (auto *I = dyn_cast<Instruction>(V)) { 6147 Worklist.emplace_back(I, I->getParent()); 6148 Visited.insert(I); 6149 } 6150 6151 // Traverse the expression tree in bottom-up order looking for loads. If we 6152 // encounter an instruction we don't yet handle, we give up. 6153 auto Width = 0u; 6154 while (!Worklist.empty()) { 6155 Instruction *I; 6156 BasicBlock *Parent; 6157 std::tie(I, Parent) = Worklist.pop_back_val(); 6158 6159 // We should only be looking at scalar instructions here. If the current 6160 // instruction has a vector type, skip. 6161 auto *Ty = I->getType(); 6162 if (isa<VectorType>(Ty)) 6163 continue; 6164 6165 // If the current instruction is a load, update MaxWidth to reflect the 6166 // width of the loaded value. 6167 if (isa<LoadInst>(I) || isa<ExtractElementInst>(I) || 6168 isa<ExtractValueInst>(I)) 6169 Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty)); 6170 6171 // Otherwise, we need to visit the operands of the instruction. We only 6172 // handle the interesting cases from buildTree here. If an operand is an 6173 // instruction we haven't yet visited and from the same basic block as the 6174 // user or the use is a PHI node, we add it to the worklist. 6175 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 6176 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I) || 6177 isa<UnaryOperator>(I)) { 6178 for (Use &U : I->operands()) 6179 if (auto *J = dyn_cast<Instruction>(U.get())) 6180 if (Visited.insert(J).second && 6181 (isa<PHINode>(I) || J->getParent() == Parent)) 6182 Worklist.emplace_back(J, J->getParent()); 6183 } else { 6184 break; 6185 } 6186 } 6187 6188 // If we didn't encounter a memory access in the expression tree, or if we 6189 // gave up for some reason, just return the width of V. Otherwise, return the 6190 // maximum width we found. 6191 if (!Width) { 6192 if (auto *CI = dyn_cast<CmpInst>(V)) 6193 V = CI->getOperand(0); 6194 Width = DL->getTypeSizeInBits(V->getType()); 6195 } 6196 6197 for (Instruction *I : Visited) 6198 InstrElementSize[I] = Width; 6199 6200 return Width; 6201 } 6202 6203 // Determine if a value V in a vectorizable expression Expr can be demoted to a 6204 // smaller type with a truncation. We collect the values that will be demoted 6205 // in ToDemote and additional roots that require investigating in Roots. 6206 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, 6207 SmallVectorImpl<Value *> &ToDemote, 6208 SmallVectorImpl<Value *> &Roots) { 6209 // We can always demote constants. 6210 if (isa<Constant>(V)) { 6211 ToDemote.push_back(V); 6212 return true; 6213 } 6214 6215 // If the value is not an instruction in the expression with only one use, it 6216 // cannot be demoted. 6217 auto *I = dyn_cast<Instruction>(V); 6218 if (!I || !I->hasOneUse() || !Expr.count(I)) 6219 return false; 6220 6221 switch (I->getOpcode()) { 6222 6223 // We can always demote truncations and extensions. Since truncations can 6224 // seed additional demotion, we save the truncated value. 6225 case Instruction::Trunc: 6226 Roots.push_back(I->getOperand(0)); 6227 break; 6228 case Instruction::ZExt: 6229 case Instruction::SExt: 6230 if (isa<ExtractElementInst>(I->getOperand(0)) || 6231 isa<InsertElementInst>(I->getOperand(0))) 6232 return false; 6233 break; 6234 6235 // We can demote certain binary operations if we can demote both of their 6236 // operands. 6237 case Instruction::Add: 6238 case Instruction::Sub: 6239 case Instruction::Mul: 6240 case Instruction::And: 6241 case Instruction::Or: 6242 case Instruction::Xor: 6243 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || 6244 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) 6245 return false; 6246 break; 6247 6248 // We can demote selects if we can demote their true and false values. 6249 case Instruction::Select: { 6250 SelectInst *SI = cast<SelectInst>(I); 6251 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || 6252 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) 6253 return false; 6254 break; 6255 } 6256 6257 // We can demote phis if we can demote all their incoming operands. Note that 6258 // we don't need to worry about cycles since we ensure single use above. 6259 case Instruction::PHI: { 6260 PHINode *PN = cast<PHINode>(I); 6261 for (Value *IncValue : PN->incoming_values()) 6262 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) 6263 return false; 6264 break; 6265 } 6266 6267 // Otherwise, conservatively give up. 6268 default: 6269 return false; 6270 } 6271 6272 // Record the value that we can demote. 6273 ToDemote.push_back(V); 6274 return true; 6275 } 6276 6277 void BoUpSLP::computeMinimumValueSizes() { 6278 // If there are no external uses, the expression tree must be rooted by a 6279 // store. We can't demote in-memory values, so there is nothing to do here. 6280 if (ExternalUses.empty()) 6281 return; 6282 6283 // We only attempt to truncate integer expressions. 6284 auto &TreeRoot = VectorizableTree[0]->Scalars; 6285 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 6286 if (!TreeRootIT) 6287 return; 6288 6289 // If the expression is not rooted by a store, these roots should have 6290 // external uses. We will rely on InstCombine to rewrite the expression in 6291 // the narrower type. However, InstCombine only rewrites single-use values. 6292 // This means that if a tree entry other than a root is used externally, it 6293 // must have multiple uses and InstCombine will not rewrite it. The code 6294 // below ensures that only the roots are used externally. 6295 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); 6296 for (auto &EU : ExternalUses) 6297 if (!Expr.erase(EU.Scalar)) 6298 return; 6299 if (!Expr.empty()) 6300 return; 6301 6302 // Collect the scalar values of the vectorizable expression. We will use this 6303 // context to determine which values can be demoted. If we see a truncation, 6304 // we mark it as seeding another demotion. 6305 for (auto &EntryPtr : VectorizableTree) 6306 Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end()); 6307 6308 // Ensure the roots of the vectorizable tree don't form a cycle. They must 6309 // have a single external user that is not in the vectorizable tree. 6310 for (auto *Root : TreeRoot) 6311 if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) 6312 return; 6313 6314 // Conservatively determine if we can actually truncate the roots of the 6315 // expression. Collect the values that can be demoted in ToDemote and 6316 // additional roots that require investigating in Roots. 6317 SmallVector<Value *, 32> ToDemote; 6318 SmallVector<Value *, 4> Roots; 6319 for (auto *Root : TreeRoot) 6320 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) 6321 return; 6322 6323 // The maximum bit width required to represent all the values that can be 6324 // demoted without loss of precision. It would be safe to truncate the roots 6325 // of the expression to this width. 6326 auto MaxBitWidth = 8u; 6327 6328 // We first check if all the bits of the roots are demanded. If they're not, 6329 // we can truncate the roots to this narrower type. 6330 for (auto *Root : TreeRoot) { 6331 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 6332 MaxBitWidth = std::max<unsigned>( 6333 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); 6334 } 6335 6336 // True if the roots can be zero-extended back to their original type, rather 6337 // than sign-extended. We know that if the leading bits are not demanded, we 6338 // can safely zero-extend. So we initialize IsKnownPositive to True. 6339 bool IsKnownPositive = true; 6340 6341 // If all the bits of the roots are demanded, we can try a little harder to 6342 // compute a narrower type. This can happen, for example, if the roots are 6343 // getelementptr indices. InstCombine promotes these indices to the pointer 6344 // width. Thus, all their bits are technically demanded even though the 6345 // address computation might be vectorized in a smaller type. 6346 // 6347 // We start by looking at each entry that can be demoted. We compute the 6348 // maximum bit width required to store the scalar by using ValueTracking to 6349 // compute the number of high-order bits we can truncate. 6350 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) && 6351 llvm::all_of(TreeRoot, [](Value *R) { 6352 assert(R->hasOneUse() && "Root should have only one use!"); 6353 return isa<GetElementPtrInst>(R->user_back()); 6354 })) { 6355 MaxBitWidth = 8u; 6356 6357 // Determine if the sign bit of all the roots is known to be zero. If not, 6358 // IsKnownPositive is set to False. 6359 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { 6360 KnownBits Known = computeKnownBits(R, *DL); 6361 return Known.isNonNegative(); 6362 }); 6363 6364 // Determine the maximum number of bits required to store the scalar 6365 // values. 6366 for (auto *Scalar : ToDemote) { 6367 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); 6368 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 6369 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 6370 } 6371 6372 // If we can't prove that the sign bit is zero, we must add one to the 6373 // maximum bit width to account for the unknown sign bit. This preserves 6374 // the existing sign bit so we can safely sign-extend the root back to the 6375 // original type. Otherwise, if we know the sign bit is zero, we will 6376 // zero-extend the root instead. 6377 // 6378 // FIXME: This is somewhat suboptimal, as there will be cases where adding 6379 // one to the maximum bit width will yield a larger-than-necessary 6380 // type. In general, we need to add an extra bit only if we can't 6381 // prove that the upper bit of the original type is equal to the 6382 // upper bit of the proposed smaller type. If these two bits are the 6383 // same (either zero or one) we know that sign-extending from the 6384 // smaller type will result in the same value. Here, since we can't 6385 // yet prove this, we are just making the proposed smaller type 6386 // larger to ensure correctness. 6387 if (!IsKnownPositive) 6388 ++MaxBitWidth; 6389 } 6390 6391 // Round MaxBitWidth up to the next power-of-two. 6392 if (!isPowerOf2_64(MaxBitWidth)) 6393 MaxBitWidth = NextPowerOf2(MaxBitWidth); 6394 6395 // If the maximum bit width we compute is less than the with of the roots' 6396 // type, we can proceed with the narrowing. Otherwise, do nothing. 6397 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 6398 return; 6399 6400 // If we can truncate the root, we must collect additional values that might 6401 // be demoted as a result. That is, those seeded by truncations we will 6402 // modify. 6403 while (!Roots.empty()) 6404 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); 6405 6406 // Finally, map the values we can demote to the maximum bit with we computed. 6407 for (auto *Scalar : ToDemote) 6408 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive); 6409 } 6410 6411 namespace { 6412 6413 /// The SLPVectorizer Pass. 6414 struct SLPVectorizer : public FunctionPass { 6415 SLPVectorizerPass Impl; 6416 6417 /// Pass identification, replacement for typeid 6418 static char ID; 6419 6420 explicit SLPVectorizer() : FunctionPass(ID) { 6421 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 6422 } 6423 6424 bool doInitialization(Module &M) override { 6425 return false; 6426 } 6427 6428 bool runOnFunction(Function &F) override { 6429 if (skipFunction(F)) 6430 return false; 6431 6432 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 6433 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 6434 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 6435 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 6436 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 6437 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 6438 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 6439 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 6440 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 6441 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 6442 6443 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 6444 } 6445 6446 void getAnalysisUsage(AnalysisUsage &AU) const override { 6447 FunctionPass::getAnalysisUsage(AU); 6448 AU.addRequired<AssumptionCacheTracker>(); 6449 AU.addRequired<ScalarEvolutionWrapperPass>(); 6450 AU.addRequired<AAResultsWrapperPass>(); 6451 AU.addRequired<TargetTransformInfoWrapperPass>(); 6452 AU.addRequired<LoopInfoWrapperPass>(); 6453 AU.addRequired<DominatorTreeWrapperPass>(); 6454 AU.addRequired<DemandedBitsWrapperPass>(); 6455 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 6456 AU.addRequired<InjectTLIMappingsLegacy>(); 6457 AU.addPreserved<LoopInfoWrapperPass>(); 6458 AU.addPreserved<DominatorTreeWrapperPass>(); 6459 AU.addPreserved<AAResultsWrapperPass>(); 6460 AU.addPreserved<GlobalsAAWrapperPass>(); 6461 AU.setPreservesCFG(); 6462 } 6463 }; 6464 6465 } // end anonymous namespace 6466 6467 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 6468 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 6469 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 6470 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 6471 auto *AA = &AM.getResult<AAManager>(F); 6472 auto *LI = &AM.getResult<LoopAnalysis>(F); 6473 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 6474 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 6475 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 6476 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 6477 6478 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 6479 if (!Changed) 6480 return PreservedAnalyses::all(); 6481 6482 PreservedAnalyses PA; 6483 PA.preserveSet<CFGAnalyses>(); 6484 return PA; 6485 } 6486 6487 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 6488 TargetTransformInfo *TTI_, 6489 TargetLibraryInfo *TLI_, AAResults *AA_, 6490 LoopInfo *LI_, DominatorTree *DT_, 6491 AssumptionCache *AC_, DemandedBits *DB_, 6492 OptimizationRemarkEmitter *ORE_) { 6493 if (!RunSLPVectorization) 6494 return false; 6495 SE = SE_; 6496 TTI = TTI_; 6497 TLI = TLI_; 6498 AA = AA_; 6499 LI = LI_; 6500 DT = DT_; 6501 AC = AC_; 6502 DB = DB_; 6503 DL = &F.getParent()->getDataLayout(); 6504 6505 Stores.clear(); 6506 GEPs.clear(); 6507 bool Changed = false; 6508 6509 // If the target claims to have no vector registers don't attempt 6510 // vectorization. 6511 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) 6512 return false; 6513 6514 // Don't vectorize when the attribute NoImplicitFloat is used. 6515 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 6516 return false; 6517 6518 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 6519 6520 // Use the bottom up slp vectorizer to construct chains that start with 6521 // store instructions. 6522 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 6523 6524 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 6525 // delete instructions. 6526 6527 // Scan the blocks in the function in post order. 6528 for (auto BB : post_order(&F.getEntryBlock())) { 6529 collectSeedInstructions(BB); 6530 6531 // Vectorize trees that end at stores. 6532 if (!Stores.empty()) { 6533 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 6534 << " underlying objects.\n"); 6535 Changed |= vectorizeStoreChains(R); 6536 } 6537 6538 // Vectorize trees that end at reductions. 6539 Changed |= vectorizeChainsInBlock(BB, R); 6540 6541 // Vectorize the index computations of getelementptr instructions. This 6542 // is primarily intended to catch gather-like idioms ending at 6543 // non-consecutive loads. 6544 if (!GEPs.empty()) { 6545 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 6546 << " underlying objects.\n"); 6547 Changed |= vectorizeGEPIndices(BB, R); 6548 } 6549 } 6550 6551 if (Changed) { 6552 R.optimizeGatherSequence(); 6553 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 6554 } 6555 return Changed; 6556 } 6557 6558 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 6559 unsigned Idx) { 6560 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size() 6561 << "\n"); 6562 const unsigned Sz = R.getVectorElementSize(Chain[0]); 6563 const unsigned MinVF = R.getMinVecRegSize() / Sz; 6564 unsigned VF = Chain.size(); 6565 6566 if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF) 6567 return false; 6568 6569 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx 6570 << "\n"); 6571 6572 R.buildTree(Chain); 6573 Optional<ArrayRef<unsigned>> Order = R.bestOrder(); 6574 // TODO: Handle orders of size less than number of elements in the vector. 6575 if (Order && Order->size() == Chain.size()) { 6576 // TODO: reorder tree nodes without tree rebuilding. 6577 SmallVector<Value *, 4> ReorderedOps(Chain.rbegin(), Chain.rend()); 6578 llvm::transform(*Order, ReorderedOps.begin(), 6579 [Chain](const unsigned Idx) { return Chain[Idx]; }); 6580 R.buildTree(ReorderedOps); 6581 } 6582 if (R.isTreeTinyAndNotFullyVectorizable()) 6583 return false; 6584 if (R.isLoadCombineCandidate()) 6585 return false; 6586 6587 R.computeMinimumValueSizes(); 6588 6589 InstructionCost Cost = R.getTreeCost(); 6590 6591 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF =" << VF << "\n"); 6592 if (Cost < -SLPCostThreshold) { 6593 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n"); 6594 6595 using namespace ore; 6596 6597 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 6598 cast<StoreInst>(Chain[0])) 6599 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 6600 << " and with tree size " 6601 << NV("TreeSize", R.getTreeSize())); 6602 6603 R.vectorizeTree(); 6604 return true; 6605 } 6606 6607 return false; 6608 } 6609 6610 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 6611 BoUpSLP &R) { 6612 // We may run into multiple chains that merge into a single chain. We mark the 6613 // stores that we vectorized so that we don't visit the same store twice. 6614 BoUpSLP::ValueSet VectorizedStores; 6615 bool Changed = false; 6616 6617 int E = Stores.size(); 6618 SmallBitVector Tails(E, false); 6619 int MaxIter = MaxStoreLookup.getValue(); 6620 SmallVector<std::pair<int, int>, 16> ConsecutiveChain( 6621 E, std::make_pair(E, INT_MAX)); 6622 SmallVector<SmallBitVector, 4> CheckedPairs(E, SmallBitVector(E, false)); 6623 int IterCnt; 6624 auto &&FindConsecutiveAccess = [this, &Stores, &Tails, &IterCnt, MaxIter, 6625 &CheckedPairs, 6626 &ConsecutiveChain](int K, int Idx) { 6627 if (IterCnt >= MaxIter) 6628 return true; 6629 if (CheckedPairs[Idx].test(K)) 6630 return ConsecutiveChain[K].second == 1 && 6631 ConsecutiveChain[K].first == Idx; 6632 ++IterCnt; 6633 CheckedPairs[Idx].set(K); 6634 CheckedPairs[K].set(Idx); 6635 Optional<int> Diff = getPointersDiff(Stores[K]->getPointerOperand(), 6636 Stores[Idx]->getPointerOperand(), *DL, 6637 *SE, /*StrictCheck=*/true); 6638 if (!Diff || *Diff == 0) 6639 return false; 6640 int Val = *Diff; 6641 if (Val < 0) { 6642 if (ConsecutiveChain[Idx].second > -Val) { 6643 Tails.set(K); 6644 ConsecutiveChain[Idx] = std::make_pair(K, -Val); 6645 } 6646 return false; 6647 } 6648 if (ConsecutiveChain[K].second <= Val) 6649 return false; 6650 6651 Tails.set(Idx); 6652 ConsecutiveChain[K] = std::make_pair(Idx, Val); 6653 return Val == 1; 6654 }; 6655 // Do a quadratic search on all of the given stores in reverse order and find 6656 // all of the pairs of stores that follow each other. 6657 for (int Idx = E - 1; Idx >= 0; --Idx) { 6658 // If a store has multiple consecutive store candidates, search according 6659 // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ... 6660 // This is because usually pairing with immediate succeeding or preceding 6661 // candidate create the best chance to find slp vectorization opportunity. 6662 const int MaxLookDepth = std::max(E - Idx, Idx + 1); 6663 IterCnt = 0; 6664 for (int Offset = 1, F = MaxLookDepth; Offset < F; ++Offset) 6665 if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) || 6666 (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx))) 6667 break; 6668 } 6669 6670 // Tracks if we tried to vectorize stores starting from the given tail 6671 // already. 6672 SmallBitVector TriedTails(E, false); 6673 // For stores that start but don't end a link in the chain: 6674 for (int Cnt = E; Cnt > 0; --Cnt) { 6675 int I = Cnt - 1; 6676 if (ConsecutiveChain[I].first == E || Tails.test(I)) 6677 continue; 6678 // We found a store instr that starts a chain. Now follow the chain and try 6679 // to vectorize it. 6680 BoUpSLP::ValueList Operands; 6681 // Collect the chain into a list. 6682 while (I != E && !VectorizedStores.count(Stores[I])) { 6683 Operands.push_back(Stores[I]); 6684 Tails.set(I); 6685 if (ConsecutiveChain[I].second != 1) { 6686 // Mark the new end in the chain and go back, if required. It might be 6687 // required if the original stores come in reversed order, for example. 6688 if (ConsecutiveChain[I].first != E && 6689 Tails.test(ConsecutiveChain[I].first) && !TriedTails.test(I) && 6690 !VectorizedStores.count(Stores[ConsecutiveChain[I].first])) { 6691 TriedTails.set(I); 6692 Tails.reset(ConsecutiveChain[I].first); 6693 if (Cnt < ConsecutiveChain[I].first + 2) 6694 Cnt = ConsecutiveChain[I].first + 2; 6695 } 6696 break; 6697 } 6698 // Move to the next value in the chain. 6699 I = ConsecutiveChain[I].first; 6700 } 6701 assert(!Operands.empty() && "Expected non-empty list of stores."); 6702 6703 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 6704 unsigned EltSize = R.getVectorElementSize(Operands[0]); 6705 unsigned MaxElts = llvm::PowerOf2Floor(MaxVecRegSize / EltSize); 6706 6707 unsigned MinVF = std::max(2U, R.getMinVecRegSize() / EltSize); 6708 unsigned MaxVF = std::min(R.getMaximumVF(EltSize, Instruction::Store), 6709 MaxElts); 6710 6711 // FIXME: Is division-by-2 the correct step? Should we assert that the 6712 // register size is a power-of-2? 6713 unsigned StartIdx = 0; 6714 for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) { 6715 for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) { 6716 ArrayRef<Value *> Slice = makeArrayRef(Operands).slice(Cnt, Size); 6717 if (!VectorizedStores.count(Slice.front()) && 6718 !VectorizedStores.count(Slice.back()) && 6719 vectorizeStoreChain(Slice, R, Cnt)) { 6720 // Mark the vectorized stores so that we don't vectorize them again. 6721 VectorizedStores.insert(Slice.begin(), Slice.end()); 6722 Changed = true; 6723 // If we vectorized initial block, no need to try to vectorize it 6724 // again. 6725 if (Cnt == StartIdx) 6726 StartIdx += Size; 6727 Cnt += Size; 6728 continue; 6729 } 6730 ++Cnt; 6731 } 6732 // Check if the whole array was vectorized already - exit. 6733 if (StartIdx >= Operands.size()) 6734 break; 6735 } 6736 } 6737 6738 return Changed; 6739 } 6740 6741 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 6742 // Initialize the collections. We will make a single pass over the block. 6743 Stores.clear(); 6744 GEPs.clear(); 6745 6746 // Visit the store and getelementptr instructions in BB and organize them in 6747 // Stores and GEPs according to the underlying objects of their pointer 6748 // operands. 6749 for (Instruction &I : *BB) { 6750 // Ignore store instructions that are volatile or have a pointer operand 6751 // that doesn't point to a scalar type. 6752 if (auto *SI = dyn_cast<StoreInst>(&I)) { 6753 if (!SI->isSimple()) 6754 continue; 6755 if (!isValidElementType(SI->getValueOperand()->getType())) 6756 continue; 6757 Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI); 6758 } 6759 6760 // Ignore getelementptr instructions that have more than one index, a 6761 // constant index, or a pointer operand that doesn't point to a scalar 6762 // type. 6763 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 6764 auto Idx = GEP->idx_begin()->get(); 6765 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) 6766 continue; 6767 if (!isValidElementType(Idx->getType())) 6768 continue; 6769 if (GEP->getType()->isVectorTy()) 6770 continue; 6771 GEPs[GEP->getPointerOperand()].push_back(GEP); 6772 } 6773 } 6774 } 6775 6776 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 6777 if (!A || !B) 6778 return false; 6779 Value *VL[] = {A, B}; 6780 return tryToVectorizeList(VL, R, /*AllowReorder=*/true); 6781 } 6782 6783 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 6784 bool AllowReorder) { 6785 if (VL.size() < 2) 6786 return false; 6787 6788 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " 6789 << VL.size() << ".\n"); 6790 6791 // Check that all of the parts are instructions of the same type, 6792 // we permit an alternate opcode via InstructionsState. 6793 InstructionsState S = getSameOpcode(VL); 6794 if (!S.getOpcode()) 6795 return false; 6796 6797 Instruction *I0 = cast<Instruction>(S.OpValue); 6798 // Make sure invalid types (including vector type) are rejected before 6799 // determining vectorization factor for scalar instructions. 6800 for (Value *V : VL) { 6801 Type *Ty = V->getType(); 6802 if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) { 6803 // NOTE: the following will give user internal llvm type name, which may 6804 // not be useful. 6805 R.getORE()->emit([&]() { 6806 std::string type_str; 6807 llvm::raw_string_ostream rso(type_str); 6808 Ty->print(rso); 6809 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0) 6810 << "Cannot SLP vectorize list: type " 6811 << rso.str() + " is unsupported by vectorizer"; 6812 }); 6813 return false; 6814 } 6815 } 6816 6817 unsigned Sz = R.getVectorElementSize(I0); 6818 unsigned MinVF = std::max(2U, R.getMinVecRegSize() / Sz); 6819 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF); 6820 MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF); 6821 if (MaxVF < 2) { 6822 R.getORE()->emit([&]() { 6823 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0) 6824 << "Cannot SLP vectorize list: vectorization factor " 6825 << "less than 2 is not supported"; 6826 }); 6827 return false; 6828 } 6829 6830 bool Changed = false; 6831 bool CandidateFound = false; 6832 InstructionCost MinCost = SLPCostThreshold.getValue(); 6833 Type *ScalarTy = VL[0]->getType(); 6834 if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 6835 ScalarTy = IE->getOperand(1)->getType(); 6836 6837 unsigned NextInst = 0, MaxInst = VL.size(); 6838 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) { 6839 // No actual vectorization should happen, if number of parts is the same as 6840 // provided vectorization factor (i.e. the scalar type is used for vector 6841 // code during codegen). 6842 auto *VecTy = FixedVectorType::get(ScalarTy, VF); 6843 if (TTI->getNumberOfParts(VecTy) == VF) 6844 continue; 6845 for (unsigned I = NextInst; I < MaxInst; ++I) { 6846 unsigned OpsWidth = 0; 6847 6848 if (I + VF > MaxInst) 6849 OpsWidth = MaxInst - I; 6850 else 6851 OpsWidth = VF; 6852 6853 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2) 6854 break; 6855 6856 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth); 6857 // Check that a previous iteration of this loop did not delete the Value. 6858 if (llvm::any_of(Ops, [&R](Value *V) { 6859 auto *I = dyn_cast<Instruction>(V); 6860 return I && R.isDeleted(I); 6861 })) 6862 continue; 6863 6864 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 6865 << "\n"); 6866 6867 R.buildTree(Ops); 6868 Optional<ArrayRef<unsigned>> Order = R.bestOrder(); 6869 // TODO: check if we can allow reordering for more cases. 6870 if (AllowReorder && Order) { 6871 // TODO: reorder tree nodes without tree rebuilding. 6872 // Conceptually, there is nothing actually preventing us from trying to 6873 // reorder a larger list. In fact, we do exactly this when vectorizing 6874 // reductions. However, at this point, we only expect to get here when 6875 // there are exactly two operations. 6876 assert(Ops.size() == 2); 6877 Value *ReorderedOps[] = {Ops[1], Ops[0]}; 6878 R.buildTree(ReorderedOps, None); 6879 } 6880 if (R.isTreeTinyAndNotFullyVectorizable()) 6881 continue; 6882 6883 R.computeMinimumValueSizes(); 6884 InstructionCost Cost = R.getTreeCost(); 6885 CandidateFound = true; 6886 MinCost = std::min(MinCost, Cost); 6887 6888 if (Cost < -SLPCostThreshold) { 6889 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 6890 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 6891 cast<Instruction>(Ops[0])) 6892 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 6893 << " and with tree size " 6894 << ore::NV("TreeSize", R.getTreeSize())); 6895 6896 R.vectorizeTree(); 6897 // Move to the next bundle. 6898 I += VF - 1; 6899 NextInst = I + 1; 6900 Changed = true; 6901 } 6902 } 6903 } 6904 6905 if (!Changed && CandidateFound) { 6906 R.getORE()->emit([&]() { 6907 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0) 6908 << "List vectorization was possible but not beneficial with cost " 6909 << ore::NV("Cost", MinCost) << " >= " 6910 << ore::NV("Treshold", -SLPCostThreshold); 6911 }); 6912 } else if (!Changed) { 6913 R.getORE()->emit([&]() { 6914 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0) 6915 << "Cannot SLP vectorize list: vectorization was impossible" 6916 << " with available vectorization factors"; 6917 }); 6918 } 6919 return Changed; 6920 } 6921 6922 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 6923 if (!I) 6924 return false; 6925 6926 if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I)) 6927 return false; 6928 6929 Value *P = I->getParent(); 6930 6931 // Vectorize in current basic block only. 6932 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 6933 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 6934 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 6935 return false; 6936 6937 // Try to vectorize V. 6938 if (tryToVectorizePair(Op0, Op1, R)) 6939 return true; 6940 6941 auto *A = dyn_cast<BinaryOperator>(Op0); 6942 auto *B = dyn_cast<BinaryOperator>(Op1); 6943 // Try to skip B. 6944 if (B && B->hasOneUse()) { 6945 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 6946 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 6947 if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R)) 6948 return true; 6949 if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R)) 6950 return true; 6951 } 6952 6953 // Try to skip A. 6954 if (A && A->hasOneUse()) { 6955 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 6956 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 6957 if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R)) 6958 return true; 6959 if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R)) 6960 return true; 6961 } 6962 return false; 6963 } 6964 6965 namespace { 6966 6967 /// Model horizontal reductions. 6968 /// 6969 /// A horizontal reduction is a tree of reduction instructions that has values 6970 /// that can be put into a vector as its leaves. For example: 6971 /// 6972 /// mul mul mul mul 6973 /// \ / \ / 6974 /// + + 6975 /// \ / 6976 /// + 6977 /// This tree has "mul" as its leaf values and "+" as its reduction 6978 /// instructions. A reduction can feed into a store or a binary operation 6979 /// feeding a phi. 6980 /// ... 6981 /// \ / 6982 /// + 6983 /// | 6984 /// phi += 6985 /// 6986 /// Or: 6987 /// ... 6988 /// \ / 6989 /// + 6990 /// | 6991 /// *p = 6992 /// 6993 class HorizontalReduction { 6994 using ReductionOpsType = SmallVector<Value *, 16>; 6995 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; 6996 ReductionOpsListType ReductionOps; 6997 SmallVector<Value *, 32> ReducedVals; 6998 // Use map vector to make stable output. 6999 MapVector<Instruction *, Value *> ExtraArgs; 7000 WeakTrackingVH ReductionRoot; 7001 /// The type of reduction operation. 7002 RecurKind RdxKind; 7003 7004 /// Checks if instruction is associative and can be vectorized. 7005 static bool isVectorizable(RecurKind Kind, Instruction *I) { 7006 if (Kind == RecurKind::None) 7007 return false; 7008 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind)) 7009 return true; 7010 7011 if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) { 7012 // FP min/max are associative except for NaN and -0.0. We do not 7013 // have to rule out -0.0 here because the intrinsic semantics do not 7014 // specify a fixed result for it. 7015 return I->getFastMathFlags().noNaNs(); 7016 } 7017 7018 return I->isAssociative(); 7019 } 7020 7021 /// Checks if the ParentStackElem.first should be marked as a reduction 7022 /// operation with an extra argument or as extra argument itself. 7023 void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem, 7024 Value *ExtraArg) { 7025 if (ExtraArgs.count(ParentStackElem.first)) { 7026 ExtraArgs[ParentStackElem.first] = nullptr; 7027 // We ran into something like: 7028 // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg. 7029 // The whole ParentStackElem.first should be considered as an extra value 7030 // in this case. 7031 // Do not perform analysis of remaining operands of ParentStackElem.first 7032 // instruction, this whole instruction is an extra argument. 7033 ParentStackElem.second = getNumberOfOperands(ParentStackElem.first); 7034 } else { 7035 // We ran into something like: 7036 // ParentStackElem.first += ... + ExtraArg + ... 7037 ExtraArgs[ParentStackElem.first] = ExtraArg; 7038 } 7039 } 7040 7041 /// Creates reduction operation with the current opcode. 7042 static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS, 7043 Value *RHS, const Twine &Name, bool UseSelect) { 7044 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind); 7045 switch (Kind) { 7046 case RecurKind::Add: 7047 case RecurKind::Mul: 7048 case RecurKind::Or: 7049 case RecurKind::And: 7050 case RecurKind::Xor: 7051 case RecurKind::FAdd: 7052 case RecurKind::FMul: 7053 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 7054 Name); 7055 case RecurKind::FMax: 7056 return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS); 7057 case RecurKind::FMin: 7058 return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS); 7059 case RecurKind::SMax: 7060 if (UseSelect) { 7061 Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name); 7062 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 7063 } 7064 return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS); 7065 case RecurKind::SMin: 7066 if (UseSelect) { 7067 Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name); 7068 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 7069 } 7070 return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS); 7071 case RecurKind::UMax: 7072 if (UseSelect) { 7073 Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name); 7074 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 7075 } 7076 return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS); 7077 case RecurKind::UMin: 7078 if (UseSelect) { 7079 Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name); 7080 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 7081 } 7082 return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS); 7083 default: 7084 llvm_unreachable("Unknown reduction operation."); 7085 } 7086 } 7087 7088 /// Creates reduction operation with the current opcode with the IR flags 7089 /// from \p ReductionOps. 7090 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 7091 Value *RHS, const Twine &Name, 7092 const ReductionOpsListType &ReductionOps) { 7093 bool UseSelect = ReductionOps.size() == 2; 7094 assert((!UseSelect || isa<SelectInst>(ReductionOps[1][0])) && 7095 "Expected cmp + select pairs for reduction"); 7096 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect); 7097 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 7098 if (auto *Sel = dyn_cast<SelectInst>(Op)) { 7099 propagateIRFlags(Sel->getCondition(), ReductionOps[0]); 7100 propagateIRFlags(Op, ReductionOps[1]); 7101 return Op; 7102 } 7103 } 7104 propagateIRFlags(Op, ReductionOps[0]); 7105 return Op; 7106 } 7107 7108 /// Creates reduction operation with the current opcode with the IR flags 7109 /// from \p I. 7110 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 7111 Value *RHS, const Twine &Name, Instruction *I) { 7112 auto *SelI = dyn_cast<SelectInst>(I); 7113 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, SelI != nullptr); 7114 if (SelI && RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 7115 if (auto *Sel = dyn_cast<SelectInst>(Op)) 7116 propagateIRFlags(Sel->getCondition(), SelI->getCondition()); 7117 } 7118 propagateIRFlags(Op, I); 7119 return Op; 7120 } 7121 7122 static RecurKind getRdxKind(Instruction *I) { 7123 assert(I && "Expected instruction for reduction matching"); 7124 TargetTransformInfo::ReductionFlags RdxFlags; 7125 if (match(I, m_Add(m_Value(), m_Value()))) 7126 return RecurKind::Add; 7127 if (match(I, m_Mul(m_Value(), m_Value()))) 7128 return RecurKind::Mul; 7129 if (match(I, m_And(m_Value(), m_Value()))) 7130 return RecurKind::And; 7131 if (match(I, m_Or(m_Value(), m_Value()))) 7132 return RecurKind::Or; 7133 if (match(I, m_Xor(m_Value(), m_Value()))) 7134 return RecurKind::Xor; 7135 if (match(I, m_FAdd(m_Value(), m_Value()))) 7136 return RecurKind::FAdd; 7137 if (match(I, m_FMul(m_Value(), m_Value()))) 7138 return RecurKind::FMul; 7139 7140 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value()))) 7141 return RecurKind::FMax; 7142 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value()))) 7143 return RecurKind::FMin; 7144 7145 // This matches either cmp+select or intrinsics. SLP is expected to handle 7146 // either form. 7147 // TODO: If we are canonicalizing to intrinsics, we can remove several 7148 // special-case paths that deal with selects. 7149 if (match(I, m_SMax(m_Value(), m_Value()))) 7150 return RecurKind::SMax; 7151 if (match(I, m_SMin(m_Value(), m_Value()))) 7152 return RecurKind::SMin; 7153 if (match(I, m_UMax(m_Value(), m_Value()))) 7154 return RecurKind::UMax; 7155 if (match(I, m_UMin(m_Value(), m_Value()))) 7156 return RecurKind::UMin; 7157 7158 if (auto *Select = dyn_cast<SelectInst>(I)) { 7159 // Try harder: look for min/max pattern based on instructions producing 7160 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2). 7161 // During the intermediate stages of SLP, it's very common to have 7162 // pattern like this (since optimizeGatherSequence is run only once 7163 // at the end): 7164 // %1 = extractelement <2 x i32> %a, i32 0 7165 // %2 = extractelement <2 x i32> %a, i32 1 7166 // %cond = icmp sgt i32 %1, %2 7167 // %3 = extractelement <2 x i32> %a, i32 0 7168 // %4 = extractelement <2 x i32> %a, i32 1 7169 // %select = select i1 %cond, i32 %3, i32 %4 7170 CmpInst::Predicate Pred; 7171 Instruction *L1; 7172 Instruction *L2; 7173 7174 Value *LHS = Select->getTrueValue(); 7175 Value *RHS = Select->getFalseValue(); 7176 Value *Cond = Select->getCondition(); 7177 7178 // TODO: Support inverse predicates. 7179 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) { 7180 if (!isa<ExtractElementInst>(RHS) || 7181 !L2->isIdenticalTo(cast<Instruction>(RHS))) 7182 return RecurKind::None; 7183 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) { 7184 if (!isa<ExtractElementInst>(LHS) || 7185 !L1->isIdenticalTo(cast<Instruction>(LHS))) 7186 return RecurKind::None; 7187 } else { 7188 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS)) 7189 return RecurKind::None; 7190 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) || 7191 !L1->isIdenticalTo(cast<Instruction>(LHS)) || 7192 !L2->isIdenticalTo(cast<Instruction>(RHS))) 7193 return RecurKind::None; 7194 } 7195 7196 TargetTransformInfo::ReductionFlags RdxFlags; 7197 switch (Pred) { 7198 default: 7199 return RecurKind::None; 7200 case CmpInst::ICMP_SGT: 7201 case CmpInst::ICMP_SGE: 7202 return RecurKind::SMax; 7203 case CmpInst::ICMP_SLT: 7204 case CmpInst::ICMP_SLE: 7205 return RecurKind::SMin; 7206 case CmpInst::ICMP_UGT: 7207 case CmpInst::ICMP_UGE: 7208 return RecurKind::UMax; 7209 case CmpInst::ICMP_ULT: 7210 case CmpInst::ICMP_ULE: 7211 return RecurKind::UMin; 7212 } 7213 } 7214 return RecurKind::None; 7215 } 7216 7217 /// Get the index of the first operand. 7218 static unsigned getFirstOperandIndex(Instruction *I) { 7219 return isa<SelectInst>(I) ? 1 : 0; 7220 } 7221 7222 /// Total number of operands in the reduction operation. 7223 static unsigned getNumberOfOperands(Instruction *I) { 7224 return isa<SelectInst>(I) ? 3 : 2; 7225 } 7226 7227 /// Checks if the instruction is in basic block \p BB. 7228 /// For a min/max reduction check that both compare and select are in \p BB. 7229 static bool hasSameParent(Instruction *I, BasicBlock *BB, bool IsRedOp) { 7230 auto *Sel = dyn_cast<SelectInst>(I); 7231 if (IsRedOp && Sel) { 7232 auto *Cmp = cast<Instruction>(Sel->getCondition()); 7233 return Sel->getParent() == BB && Cmp->getParent() == BB; 7234 } 7235 return I->getParent() == BB; 7236 } 7237 7238 /// Expected number of uses for reduction operations/reduced values. 7239 static bool hasRequiredNumberOfUses(bool MatchCmpSel, Instruction *I) { 7240 // SelectInst must be used twice while the condition op must have single 7241 // use only. 7242 if (MatchCmpSel) { 7243 if (auto *Sel = dyn_cast<SelectInst>(I)) 7244 return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse(); 7245 return I->hasNUses(2); 7246 } 7247 7248 // Arithmetic reduction operation must be used once only. 7249 return I->hasOneUse(); 7250 } 7251 7252 /// Initializes the list of reduction operations. 7253 void initReductionOps(Instruction *I) { 7254 if (isa<SelectInst>(I)) 7255 ReductionOps.assign(2, ReductionOpsType()); 7256 else 7257 ReductionOps.assign(1, ReductionOpsType()); 7258 } 7259 7260 /// Add all reduction operations for the reduction instruction \p I. 7261 void addReductionOps(Instruction *I) { 7262 if (auto *Sel = dyn_cast<SelectInst>(I)) { 7263 ReductionOps[0].emplace_back(Sel->getCondition()); 7264 ReductionOps[1].emplace_back(Sel); 7265 } else { 7266 ReductionOps[0].emplace_back(I); 7267 } 7268 } 7269 7270 static Value *getLHS(RecurKind Kind, Instruction *I) { 7271 if (Kind == RecurKind::None) 7272 return nullptr; 7273 return I->getOperand(getFirstOperandIndex(I)); 7274 } 7275 static Value *getRHS(RecurKind Kind, Instruction *I) { 7276 if (Kind == RecurKind::None) 7277 return nullptr; 7278 return I->getOperand(getFirstOperandIndex(I) + 1); 7279 } 7280 7281 public: 7282 HorizontalReduction() = default; 7283 7284 /// Try to find a reduction tree. 7285 bool matchAssociativeReduction(PHINode *Phi, Instruction *B) { 7286 assert((!Phi || is_contained(Phi->operands(), B)) && 7287 "Phi needs to use the binary operator"); 7288 7289 RdxKind = getRdxKind(B); 7290 7291 // We could have a initial reductions that is not an add. 7292 // r *= v1 + v2 + v3 + v4 7293 // In such a case start looking for a tree rooted in the first '+'. 7294 if (Phi) { 7295 if (getLHS(RdxKind, B) == Phi) { 7296 Phi = nullptr; 7297 B = dyn_cast<Instruction>(getRHS(RdxKind, B)); 7298 if (!B) 7299 return false; 7300 RdxKind = getRdxKind(B); 7301 } else if (getRHS(RdxKind, B) == Phi) { 7302 Phi = nullptr; 7303 B = dyn_cast<Instruction>(getLHS(RdxKind, B)); 7304 if (!B) 7305 return false; 7306 RdxKind = getRdxKind(B); 7307 } 7308 } 7309 7310 if (!isVectorizable(RdxKind, B)) 7311 return false; 7312 7313 // Analyze "regular" integer/FP types for reductions - no target-specific 7314 // types or pointers. 7315 Type *Ty = B->getType(); 7316 if (!isValidElementType(Ty) || Ty->isPointerTy()) 7317 return false; 7318 7319 // Though the ultimate reduction may have multiple uses, its condition must 7320 // have only single use. 7321 if (auto *SI = dyn_cast<SelectInst>(B)) 7322 if (!SI->getCondition()->hasOneUse()) 7323 return false; 7324 7325 ReductionRoot = B; 7326 7327 // The opcode for leaf values that we perform a reduction on. 7328 // For example: load(x) + load(y) + load(z) + fptoui(w) 7329 // The leaf opcode for 'w' does not match, so we don't include it as a 7330 // potential candidate for the reduction. 7331 unsigned LeafOpcode = 0; 7332 7333 // Post order traverse the reduction tree starting at B. We only handle true 7334 // trees containing only binary operators. 7335 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; 7336 Stack.push_back(std::make_pair(B, getFirstOperandIndex(B))); 7337 initReductionOps(B); 7338 while (!Stack.empty()) { 7339 Instruction *TreeN = Stack.back().first; 7340 unsigned EdgeToVisit = Stack.back().second++; 7341 const RecurKind TreeRdxKind = getRdxKind(TreeN); 7342 bool IsReducedValue = TreeRdxKind != RdxKind; 7343 7344 // Postorder visit. 7345 if (IsReducedValue || EdgeToVisit == getNumberOfOperands(TreeN)) { 7346 if (IsReducedValue) 7347 ReducedVals.push_back(TreeN); 7348 else { 7349 auto ExtraArgsIter = ExtraArgs.find(TreeN); 7350 if (ExtraArgsIter != ExtraArgs.end() && !ExtraArgsIter->second) { 7351 // Check if TreeN is an extra argument of its parent operation. 7352 if (Stack.size() <= 1) { 7353 // TreeN can't be an extra argument as it is a root reduction 7354 // operation. 7355 return false; 7356 } 7357 // Yes, TreeN is an extra argument, do not add it to a list of 7358 // reduction operations. 7359 // Stack[Stack.size() - 2] always points to the parent operation. 7360 markExtraArg(Stack[Stack.size() - 2], TreeN); 7361 ExtraArgs.erase(TreeN); 7362 } else 7363 addReductionOps(TreeN); 7364 } 7365 // Retract. 7366 Stack.pop_back(); 7367 continue; 7368 } 7369 7370 // Visit left or right. 7371 Value *EdgeVal = TreeN->getOperand(EdgeToVisit); 7372 auto *EdgeInst = dyn_cast<Instruction>(EdgeVal); 7373 if (!EdgeInst) { 7374 // Edge value is not a reduction instruction or a leaf instruction. 7375 // (It may be a constant, function argument, or something else.) 7376 markExtraArg(Stack.back(), EdgeVal); 7377 continue; 7378 } 7379 RecurKind EdgeRdxKind = getRdxKind(EdgeInst); 7380 // Continue analysis if the next operand is a reduction operation or 7381 // (possibly) a leaf value. If the leaf value opcode is not set, 7382 // the first met operation != reduction operation is considered as the 7383 // leaf opcode. 7384 // Only handle trees in the current basic block. 7385 // Each tree node needs to have minimal number of users except for the 7386 // ultimate reduction. 7387 const bool IsRdxInst = EdgeRdxKind == RdxKind; 7388 if (EdgeInst != Phi && EdgeInst != B && 7389 hasSameParent(EdgeInst, B->getParent(), IsRdxInst) && 7390 hasRequiredNumberOfUses(isa<SelectInst>(B), EdgeInst) && 7391 (!LeafOpcode || LeafOpcode == EdgeInst->getOpcode() || IsRdxInst)) { 7392 if (IsRdxInst) { 7393 // We need to be able to reassociate the reduction operations. 7394 if (!isVectorizable(EdgeRdxKind, EdgeInst)) { 7395 // I is an extra argument for TreeN (its parent operation). 7396 markExtraArg(Stack.back(), EdgeInst); 7397 continue; 7398 } 7399 } else if (!LeafOpcode) { 7400 LeafOpcode = EdgeInst->getOpcode(); 7401 } 7402 Stack.push_back( 7403 std::make_pair(EdgeInst, getFirstOperandIndex(EdgeInst))); 7404 continue; 7405 } 7406 // I is an extra argument for TreeN (its parent operation). 7407 markExtraArg(Stack.back(), EdgeInst); 7408 } 7409 return true; 7410 } 7411 7412 /// Attempt to vectorize the tree found by matchAssociativeReduction. 7413 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 7414 // If there are a sufficient number of reduction values, reduce 7415 // to a nearby power-of-2. We can safely generate oversized 7416 // vectors and rely on the backend to split them to legal sizes. 7417 unsigned NumReducedVals = ReducedVals.size(); 7418 if (NumReducedVals < 4) 7419 return false; 7420 7421 // Intersect the fast-math-flags from all reduction operations. 7422 FastMathFlags RdxFMF; 7423 RdxFMF.set(); 7424 for (ReductionOpsType &RdxOp : ReductionOps) { 7425 for (Value *RdxVal : RdxOp) { 7426 if (auto *FPMO = dyn_cast<FPMathOperator>(RdxVal)) 7427 RdxFMF &= FPMO->getFastMathFlags(); 7428 } 7429 } 7430 7431 IRBuilder<> Builder(cast<Instruction>(ReductionRoot)); 7432 Builder.setFastMathFlags(RdxFMF); 7433 7434 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 7435 // The same extra argument may be used several times, so log each attempt 7436 // to use it. 7437 for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) { 7438 assert(Pair.first && "DebugLoc must be set."); 7439 ExternallyUsedValues[Pair.second].push_back(Pair.first); 7440 } 7441 7442 // The compare instruction of a min/max is the insertion point for new 7443 // instructions and may be replaced with a new compare instruction. 7444 auto getCmpForMinMaxReduction = [](Instruction *RdxRootInst) { 7445 assert(isa<SelectInst>(RdxRootInst) && 7446 "Expected min/max reduction to have select root instruction"); 7447 Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition(); 7448 assert(isa<Instruction>(ScalarCond) && 7449 "Expected min/max reduction to have compare condition"); 7450 return cast<Instruction>(ScalarCond); 7451 }; 7452 7453 // The reduction root is used as the insertion point for new instructions, 7454 // so set it as externally used to prevent it from being deleted. 7455 ExternallyUsedValues[ReductionRoot]; 7456 SmallVector<Value *, 16> IgnoreList; 7457 for (ReductionOpsType &RdxOp : ReductionOps) 7458 IgnoreList.append(RdxOp.begin(), RdxOp.end()); 7459 7460 unsigned ReduxWidth = PowerOf2Floor(NumReducedVals); 7461 if (NumReducedVals > ReduxWidth) { 7462 // In the loop below, we are building a tree based on a window of 7463 // 'ReduxWidth' values. 7464 // If the operands of those values have common traits (compare predicate, 7465 // constant operand, etc), then we want to group those together to 7466 // minimize the cost of the reduction. 7467 7468 // TODO: This should be extended to count common operands for 7469 // compares and binops. 7470 7471 // Step 1: Count the number of times each compare predicate occurs. 7472 SmallDenseMap<unsigned, unsigned> PredCountMap; 7473 for (Value *RdxVal : ReducedVals) { 7474 CmpInst::Predicate Pred; 7475 if (match(RdxVal, m_Cmp(Pred, m_Value(), m_Value()))) 7476 ++PredCountMap[Pred]; 7477 } 7478 // Step 2: Sort the values so the most common predicates come first. 7479 stable_sort(ReducedVals, [&PredCountMap](Value *A, Value *B) { 7480 CmpInst::Predicate PredA, PredB; 7481 if (match(A, m_Cmp(PredA, m_Value(), m_Value())) && 7482 match(B, m_Cmp(PredB, m_Value(), m_Value()))) { 7483 return PredCountMap[PredA] > PredCountMap[PredB]; 7484 } 7485 return false; 7486 }); 7487 } 7488 7489 Value *VectorizedTree = nullptr; 7490 unsigned i = 0; 7491 while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) { 7492 ArrayRef<Value *> VL(&ReducedVals[i], ReduxWidth); 7493 V.buildTree(VL, ExternallyUsedValues, IgnoreList); 7494 Optional<ArrayRef<unsigned>> Order = V.bestOrder(); 7495 if (Order) { 7496 assert(Order->size() == VL.size() && 7497 "Order size must be the same as number of vectorized " 7498 "instructions."); 7499 // TODO: reorder tree nodes without tree rebuilding. 7500 SmallVector<Value *, 4> ReorderedOps(VL.size()); 7501 llvm::transform(*Order, ReorderedOps.begin(), 7502 [VL](const unsigned Idx) { return VL[Idx]; }); 7503 V.buildTree(ReorderedOps, ExternallyUsedValues, IgnoreList); 7504 } 7505 if (V.isTreeTinyAndNotFullyVectorizable()) 7506 break; 7507 if (V.isLoadCombineReductionCandidate(RdxKind)) 7508 break; 7509 7510 V.computeMinimumValueSizes(); 7511 7512 // Estimate cost. 7513 InstructionCost TreeCost = V.getTreeCost(); 7514 InstructionCost ReductionCost = 7515 getReductionCost(TTI, ReducedVals[i], ReduxWidth); 7516 InstructionCost Cost = TreeCost + ReductionCost; 7517 if (!Cost.isValid()) { 7518 LLVM_DEBUG(dbgs() << "Encountered invalid baseline cost.\n"); 7519 return false; 7520 } 7521 if (Cost >= -SLPCostThreshold) { 7522 V.getORE()->emit([&]() { 7523 return OptimizationRemarkMissed(SV_NAME, "HorSLPNotBeneficial", 7524 cast<Instruction>(VL[0])) 7525 << "Vectorizing horizontal reduction is possible" 7526 << "but not beneficial with cost " << ore::NV("Cost", Cost) 7527 << " and threshold " 7528 << ore::NV("Threshold", -SLPCostThreshold); 7529 }); 7530 break; 7531 } 7532 7533 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" 7534 << Cost << ". (HorRdx)\n"); 7535 V.getORE()->emit([&]() { 7536 return OptimizationRemark(SV_NAME, "VectorizedHorizontalReduction", 7537 cast<Instruction>(VL[0])) 7538 << "Vectorized horizontal reduction with cost " 7539 << ore::NV("Cost", Cost) << " and with tree size " 7540 << ore::NV("TreeSize", V.getTreeSize()); 7541 }); 7542 7543 // Vectorize a tree. 7544 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 7545 Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues); 7546 7547 // Emit a reduction. If the root is a select (min/max idiom), the insert 7548 // point is the compare condition of that select. 7549 Instruction *RdxRootInst = cast<Instruction>(ReductionRoot); 7550 if (isa<SelectInst>(RdxRootInst)) 7551 Builder.SetInsertPoint(getCmpForMinMaxReduction(RdxRootInst)); 7552 else 7553 Builder.SetInsertPoint(RdxRootInst); 7554 7555 Value *ReducedSubTree = 7556 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); 7557 7558 if (!VectorizedTree) { 7559 // Initialize the final value in the reduction. 7560 VectorizedTree = ReducedSubTree; 7561 } else { 7562 // Update the final value in the reduction. 7563 Builder.SetCurrentDebugLocation(Loc); 7564 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree, 7565 ReducedSubTree, "op.rdx", ReductionOps); 7566 } 7567 i += ReduxWidth; 7568 ReduxWidth = PowerOf2Floor(NumReducedVals - i); 7569 } 7570 7571 if (VectorizedTree) { 7572 // Finish the reduction. 7573 for (; i < NumReducedVals; ++i) { 7574 auto *I = cast<Instruction>(ReducedVals[i]); 7575 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 7576 VectorizedTree = 7577 createOp(Builder, RdxKind, VectorizedTree, I, "", ReductionOps); 7578 } 7579 for (auto &Pair : ExternallyUsedValues) { 7580 // Add each externally used value to the final reduction. 7581 for (auto *I : Pair.second) { 7582 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 7583 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree, 7584 Pair.first, "op.extra", I); 7585 } 7586 } 7587 7588 ReductionRoot->replaceAllUsesWith(VectorizedTree); 7589 7590 // Mark all scalar reduction ops for deletion, they are replaced by the 7591 // vector reductions. 7592 V.eraseInstructions(IgnoreList); 7593 } 7594 return VectorizedTree != nullptr; 7595 } 7596 7597 unsigned numReductionValues() const { return ReducedVals.size(); } 7598 7599 private: 7600 /// Calculate the cost of a reduction. 7601 InstructionCost getReductionCost(TargetTransformInfo *TTI, 7602 Value *FirstReducedVal, 7603 unsigned ReduxWidth) { 7604 Type *ScalarTy = FirstReducedVal->getType(); 7605 FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth); 7606 InstructionCost VectorCost, ScalarCost; 7607 switch (RdxKind) { 7608 case RecurKind::Add: 7609 case RecurKind::Mul: 7610 case RecurKind::Or: 7611 case RecurKind::And: 7612 case RecurKind::Xor: 7613 case RecurKind::FAdd: 7614 case RecurKind::FMul: { 7615 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind); 7616 VectorCost = TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, 7617 /*IsPairwiseForm=*/false); 7618 ScalarCost = TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy); 7619 break; 7620 } 7621 case RecurKind::FMax: 7622 case RecurKind::FMin: { 7623 auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy)); 7624 VectorCost = 7625 TTI->getMinMaxReductionCost(VectorTy, VecCondTy, 7626 /*pairwise=*/false, /*unsigned=*/false); 7627 ScalarCost = 7628 TTI->getCmpSelInstrCost(Instruction::FCmp, ScalarTy) + 7629 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 7630 CmpInst::makeCmpResultType(ScalarTy)); 7631 break; 7632 } 7633 case RecurKind::SMax: 7634 case RecurKind::SMin: 7635 case RecurKind::UMax: 7636 case RecurKind::UMin: { 7637 auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy)); 7638 bool IsUnsigned = 7639 RdxKind == RecurKind::UMax || RdxKind == RecurKind::UMin; 7640 VectorCost = 7641 TTI->getMinMaxReductionCost(VectorTy, VecCondTy, 7642 /*IsPairwiseForm=*/false, IsUnsigned); 7643 ScalarCost = 7644 TTI->getCmpSelInstrCost(Instruction::ICmp, ScalarTy) + 7645 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 7646 CmpInst::makeCmpResultType(ScalarTy)); 7647 break; 7648 } 7649 default: 7650 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 7651 } 7652 7653 // Scalar cost is repeated for N-1 elements. 7654 ScalarCost *= (ReduxWidth - 1); 7655 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost 7656 << " for reduction that starts with " << *FirstReducedVal 7657 << " (It is a splitting reduction)\n"); 7658 return VectorCost - ScalarCost; 7659 } 7660 7661 /// Emit a horizontal reduction of the vectorized value. 7662 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 7663 unsigned ReduxWidth, const TargetTransformInfo *TTI) { 7664 assert(VectorizedValue && "Need to have a vectorized tree node"); 7665 assert(isPowerOf2_32(ReduxWidth) && 7666 "We only handle power-of-two reductions for now"); 7667 7668 return createSimpleTargetReduction(Builder, TTI, VectorizedValue, RdxKind, 7669 ReductionOps.back()); 7670 } 7671 }; 7672 7673 } // end anonymous namespace 7674 7675 static Optional<unsigned> getAggregateSize(Instruction *InsertInst) { 7676 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) 7677 return cast<FixedVectorType>(IE->getType())->getNumElements(); 7678 7679 unsigned AggregateSize = 1; 7680 auto *IV = cast<InsertValueInst>(InsertInst); 7681 Type *CurrentType = IV->getType(); 7682 do { 7683 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 7684 for (auto *Elt : ST->elements()) 7685 if (Elt != ST->getElementType(0)) // check homogeneity 7686 return None; 7687 AggregateSize *= ST->getNumElements(); 7688 CurrentType = ST->getElementType(0); 7689 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 7690 AggregateSize *= AT->getNumElements(); 7691 CurrentType = AT->getElementType(); 7692 } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) { 7693 AggregateSize *= VT->getNumElements(); 7694 return AggregateSize; 7695 } else if (CurrentType->isSingleValueType()) { 7696 return AggregateSize; 7697 } else { 7698 return None; 7699 } 7700 } while (true); 7701 } 7702 7703 static bool findBuildAggregate_rec(Instruction *LastInsertInst, 7704 TargetTransformInfo *TTI, 7705 SmallVectorImpl<Value *> &BuildVectorOpds, 7706 SmallVectorImpl<Value *> &InsertElts, 7707 unsigned OperandOffset) { 7708 do { 7709 Value *InsertedOperand = LastInsertInst->getOperand(1); 7710 Optional<int> OperandIndex = getInsertIndex(LastInsertInst, OperandOffset); 7711 if (!OperandIndex) 7712 return false; 7713 if (isa<InsertElementInst>(InsertedOperand) || 7714 isa<InsertValueInst>(InsertedOperand)) { 7715 if (!findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI, 7716 BuildVectorOpds, InsertElts, *OperandIndex)) 7717 return false; 7718 } else { 7719 BuildVectorOpds[*OperandIndex] = InsertedOperand; 7720 InsertElts[*OperandIndex] = LastInsertInst; 7721 } 7722 LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0)); 7723 } while (LastInsertInst != nullptr && 7724 (isa<InsertValueInst>(LastInsertInst) || 7725 isa<InsertElementInst>(LastInsertInst)) && 7726 LastInsertInst->hasOneUse()); 7727 return true; 7728 } 7729 7730 /// Recognize construction of vectors like 7731 /// %ra = insertelement <4 x float> poison, float %s0, i32 0 7732 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 7733 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 7734 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 7735 /// starting from the last insertelement or insertvalue instruction. 7736 /// 7737 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>}, 7738 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on. 7739 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples. 7740 /// 7741 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type. 7742 /// 7743 /// \return true if it matches. 7744 static bool findBuildAggregate(Instruction *LastInsertInst, 7745 TargetTransformInfo *TTI, 7746 SmallVectorImpl<Value *> &BuildVectorOpds, 7747 SmallVectorImpl<Value *> &InsertElts) { 7748 7749 assert((isa<InsertElementInst>(LastInsertInst) || 7750 isa<InsertValueInst>(LastInsertInst)) && 7751 "Expected insertelement or insertvalue instruction!"); 7752 7753 assert((BuildVectorOpds.empty() && InsertElts.empty()) && 7754 "Expected empty result vectors!"); 7755 7756 Optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst); 7757 if (!AggregateSize) 7758 return false; 7759 BuildVectorOpds.resize(*AggregateSize); 7760 InsertElts.resize(*AggregateSize); 7761 7762 if (findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 7763 0)) { 7764 llvm::erase_value(BuildVectorOpds, nullptr); 7765 llvm::erase_value(InsertElts, nullptr); 7766 if (BuildVectorOpds.size() >= 2) 7767 return true; 7768 } 7769 7770 return false; 7771 } 7772 7773 static bool PhiTypeSorterFunc(Value *V, Value *V2) { 7774 return V->getType() < V2->getType(); 7775 } 7776 7777 /// Try and get a reduction value from a phi node. 7778 /// 7779 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 7780 /// if they come from either \p ParentBB or a containing loop latch. 7781 /// 7782 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 7783 /// if not possible. 7784 static Value *getReductionValue(const DominatorTree *DT, PHINode *P, 7785 BasicBlock *ParentBB, LoopInfo *LI) { 7786 // There are situations where the reduction value is not dominated by the 7787 // reduction phi. Vectorizing such cases has been reported to cause 7788 // miscompiles. See PR25787. 7789 auto DominatedReduxValue = [&](Value *R) { 7790 return isa<Instruction>(R) && 7791 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent()); 7792 }; 7793 7794 Value *Rdx = nullptr; 7795 7796 // Return the incoming value if it comes from the same BB as the phi node. 7797 if (P->getIncomingBlock(0) == ParentBB) { 7798 Rdx = P->getIncomingValue(0); 7799 } else if (P->getIncomingBlock(1) == ParentBB) { 7800 Rdx = P->getIncomingValue(1); 7801 } 7802 7803 if (Rdx && DominatedReduxValue(Rdx)) 7804 return Rdx; 7805 7806 // Otherwise, check whether we have a loop latch to look at. 7807 Loop *BBL = LI->getLoopFor(ParentBB); 7808 if (!BBL) 7809 return nullptr; 7810 BasicBlock *BBLatch = BBL->getLoopLatch(); 7811 if (!BBLatch) 7812 return nullptr; 7813 7814 // There is a loop latch, return the incoming value if it comes from 7815 // that. This reduction pattern occasionally turns up. 7816 if (P->getIncomingBlock(0) == BBLatch) { 7817 Rdx = P->getIncomingValue(0); 7818 } else if (P->getIncomingBlock(1) == BBLatch) { 7819 Rdx = P->getIncomingValue(1); 7820 } 7821 7822 if (Rdx && DominatedReduxValue(Rdx)) 7823 return Rdx; 7824 7825 return nullptr; 7826 } 7827 7828 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) { 7829 if (match(I, m_BinOp(m_Value(V0), m_Value(V1)))) 7830 return true; 7831 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1)))) 7832 return true; 7833 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1)))) 7834 return true; 7835 if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1)))) 7836 return true; 7837 if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1)))) 7838 return true; 7839 if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1)))) 7840 return true; 7841 if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1)))) 7842 return true; 7843 return false; 7844 } 7845 7846 /// Attempt to reduce a horizontal reduction. 7847 /// If it is legal to match a horizontal reduction feeding the phi node \a P 7848 /// with reduction operators \a Root (or one of its operands) in a basic block 7849 /// \a BB, then check if it can be done. If horizontal reduction is not found 7850 /// and root instruction is a binary operation, vectorization of the operands is 7851 /// attempted. 7852 /// \returns true if a horizontal reduction was matched and reduced or operands 7853 /// of one of the binary instruction were vectorized. 7854 /// \returns false if a horizontal reduction was not matched (or not possible) 7855 /// or no vectorization of any binary operation feeding \a Root instruction was 7856 /// performed. 7857 static bool tryToVectorizeHorReductionOrInstOperands( 7858 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, 7859 TargetTransformInfo *TTI, 7860 const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) { 7861 if (!ShouldVectorizeHor) 7862 return false; 7863 7864 if (!Root) 7865 return false; 7866 7867 if (Root->getParent() != BB || isa<PHINode>(Root)) 7868 return false; 7869 // Start analysis starting from Root instruction. If horizontal reduction is 7870 // found, try to vectorize it. If it is not a horizontal reduction or 7871 // vectorization is not possible or not effective, and currently analyzed 7872 // instruction is a binary operation, try to vectorize the operands, using 7873 // pre-order DFS traversal order. If the operands were not vectorized, repeat 7874 // the same procedure considering each operand as a possible root of the 7875 // horizontal reduction. 7876 // Interrupt the process if the Root instruction itself was vectorized or all 7877 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 7878 // Skip the analysis of CmpInsts.Compiler implements postanalysis of the 7879 // CmpInsts so we can skip extra attempts in 7880 // tryToVectorizeHorReductionOrInstOperands and save compile time. 7881 SmallVector<std::pair<Instruction *, unsigned>, 8> Stack(1, {Root, 0}); 7882 SmallPtrSet<Value *, 8> VisitedInstrs; 7883 bool Res = false; 7884 while (!Stack.empty()) { 7885 Instruction *Inst; 7886 unsigned Level; 7887 std::tie(Inst, Level) = Stack.pop_back_val(); 7888 Value *B0, *B1; 7889 bool IsBinop = matchRdxBop(Inst, B0, B1); 7890 bool IsSelect = match(Inst, m_Select(m_Value(), m_Value(), m_Value())); 7891 if (IsBinop || IsSelect) { 7892 HorizontalReduction HorRdx; 7893 if (HorRdx.matchAssociativeReduction(P, Inst)) { 7894 if (HorRdx.tryToReduce(R, TTI)) { 7895 Res = true; 7896 // Set P to nullptr to avoid re-analysis of phi node in 7897 // matchAssociativeReduction function unless this is the root node. 7898 P = nullptr; 7899 continue; 7900 } 7901 } 7902 if (P && IsBinop) { 7903 Inst = dyn_cast<Instruction>(B0); 7904 if (Inst == P) 7905 Inst = dyn_cast<Instruction>(B1); 7906 if (!Inst) { 7907 // Set P to nullptr to avoid re-analysis of phi node in 7908 // matchAssociativeReduction function unless this is the root node. 7909 P = nullptr; 7910 continue; 7911 } 7912 } 7913 } 7914 // Set P to nullptr to avoid re-analysis of phi node in 7915 // matchAssociativeReduction function unless this is the root node. 7916 P = nullptr; 7917 // Do not try to vectorize CmpInst operands, this is done separately. 7918 if (!isa<CmpInst>(Inst) && Vectorize(Inst, R)) { 7919 Res = true; 7920 continue; 7921 } 7922 7923 // Try to vectorize operands. 7924 // Continue analysis for the instruction from the same basic block only to 7925 // save compile time. 7926 if (++Level < RecursionMaxDepth) 7927 for (auto *Op : Inst->operand_values()) 7928 if (VisitedInstrs.insert(Op).second) 7929 if (auto *I = dyn_cast<Instruction>(Op)) 7930 // Do not try to vectorize CmpInst operands, this is done 7931 // separately. 7932 if (!isa<PHINode>(I) && !isa<CmpInst>(I) && !R.isDeleted(I) && 7933 I->getParent() == BB) 7934 Stack.emplace_back(I, Level); 7935 } 7936 return Res; 7937 } 7938 7939 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V, 7940 BasicBlock *BB, BoUpSLP &R, 7941 TargetTransformInfo *TTI) { 7942 auto *I = dyn_cast_or_null<Instruction>(V); 7943 if (!I) 7944 return false; 7945 7946 if (!isa<BinaryOperator>(I)) 7947 P = nullptr; 7948 // Try to match and vectorize a horizontal reduction. 7949 auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool { 7950 return tryToVectorize(I, R); 7951 }; 7952 return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI, 7953 ExtraVectorization); 7954 } 7955 7956 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 7957 BasicBlock *BB, BoUpSLP &R) { 7958 const DataLayout &DL = BB->getModule()->getDataLayout(); 7959 if (!R.canMapToVector(IVI->getType(), DL)) 7960 return false; 7961 7962 SmallVector<Value *, 16> BuildVectorOpds; 7963 SmallVector<Value *, 16> BuildVectorInsts; 7964 if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts)) 7965 return false; 7966 7967 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 7968 // Aggregate value is unlikely to be processed in vector register, we need to 7969 // extract scalars into scalar registers, so NeedExtraction is set true. 7970 return tryToVectorizeList(BuildVectorOpds, R, /*AllowReorder=*/false); 7971 } 7972 7973 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 7974 BasicBlock *BB, BoUpSLP &R) { 7975 SmallVector<Value *, 16> BuildVectorInsts; 7976 SmallVector<Value *, 16> BuildVectorOpds; 7977 SmallVector<int> Mask; 7978 if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) || 7979 (llvm::all_of(BuildVectorOpds, 7980 [](Value *V) { return isa<ExtractElementInst>(V); }) && 7981 isShuffle(BuildVectorOpds, Mask))) 7982 return false; 7983 7984 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n"); 7985 return tryToVectorizeList(BuildVectorInsts, R, /*AllowReorder=*/false); 7986 } 7987 7988 bool SLPVectorizerPass::vectorizeSimpleInstructions( 7989 SmallVectorImpl<Instruction *> &Instructions, BasicBlock *BB, BoUpSLP &R, 7990 bool AtTerminator) { 7991 bool OpsChanged = false; 7992 SmallVector<Instruction *, 4> PostponedCmps; 7993 for (auto *I : reverse(Instructions)) { 7994 if (R.isDeleted(I)) 7995 continue; 7996 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) 7997 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 7998 else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) 7999 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 8000 else if (isa<CmpInst>(I)) 8001 PostponedCmps.push_back(I); 8002 } 8003 if (AtTerminator) { 8004 // Try to find reductions first. 8005 for (Instruction *I : PostponedCmps) { 8006 if (R.isDeleted(I)) 8007 continue; 8008 for (Value *Op : I->operands()) 8009 OpsChanged |= vectorizeRootInstruction(nullptr, Op, BB, R, TTI); 8010 } 8011 // Try to vectorize operands as vector bundles. 8012 for (Instruction *I : PostponedCmps) { 8013 if (R.isDeleted(I)) 8014 continue; 8015 OpsChanged |= tryToVectorize(I, R); 8016 } 8017 Instructions.clear(); 8018 } else { 8019 // Insert in reverse order since the PostponedCmps vector was filled in 8020 // reverse order. 8021 Instructions.assign(PostponedCmps.rbegin(), PostponedCmps.rend()); 8022 } 8023 return OpsChanged; 8024 } 8025 8026 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 8027 bool Changed = false; 8028 SmallVector<Value *, 4> Incoming; 8029 SmallPtrSet<Value *, 16> VisitedInstrs; 8030 8031 bool HaveVectorizedPhiNodes = true; 8032 while (HaveVectorizedPhiNodes) { 8033 HaveVectorizedPhiNodes = false; 8034 8035 // Collect the incoming values from the PHIs. 8036 Incoming.clear(); 8037 for (Instruction &I : *BB) { 8038 PHINode *P = dyn_cast<PHINode>(&I); 8039 if (!P) 8040 break; 8041 8042 if (!VisitedInstrs.count(P) && !R.isDeleted(P)) 8043 Incoming.push_back(P); 8044 } 8045 8046 // Sort by type. 8047 llvm::stable_sort(Incoming, PhiTypeSorterFunc); 8048 8049 // Try to vectorize elements base on their type. 8050 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(), 8051 E = Incoming.end(); 8052 IncIt != E;) { 8053 8054 // Look for the next elements with the same type. 8055 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt; 8056 while (SameTypeIt != E && 8057 (*SameTypeIt)->getType() == (*IncIt)->getType()) { 8058 VisitedInstrs.insert(*SameTypeIt); 8059 ++SameTypeIt; 8060 } 8061 8062 // Try to vectorize them. 8063 unsigned NumElts = (SameTypeIt - IncIt); 8064 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at PHIs (" 8065 << NumElts << ")\n"); 8066 // The order in which the phi nodes appear in the program does not matter. 8067 // So allow tryToVectorizeList to reorder them if it is beneficial. This 8068 // is done when there are exactly two elements since tryToVectorizeList 8069 // asserts that there are only two values when AllowReorder is true. 8070 bool AllowReorder = NumElts == 2; 8071 if (NumElts > 1 && 8072 tryToVectorizeList(makeArrayRef(IncIt, NumElts), R, AllowReorder)) { 8073 // Success start over because instructions might have been changed. 8074 HaveVectorizedPhiNodes = true; 8075 Changed = true; 8076 break; 8077 } 8078 8079 // Start over at the next instruction of a different type (or the end). 8080 IncIt = SameTypeIt; 8081 } 8082 } 8083 8084 VisitedInstrs.clear(); 8085 8086 SmallVector<Instruction *, 8> PostProcessInstructions; 8087 SmallDenseSet<Instruction *, 4> KeyNodes; 8088 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 8089 // Skip instructions with scalable type. The num of elements is unknown at 8090 // compile-time for scalable type. 8091 if (isa<ScalableVectorType>(it->getType())) 8092 continue; 8093 8094 // Skip instructions marked for the deletion. 8095 if (R.isDeleted(&*it)) 8096 continue; 8097 // We may go through BB multiple times so skip the one we have checked. 8098 if (!VisitedInstrs.insert(&*it).second) { 8099 if (it->use_empty() && KeyNodes.contains(&*it) && 8100 vectorizeSimpleInstructions(PostProcessInstructions, BB, R, 8101 it->isTerminator())) { 8102 // We would like to start over since some instructions are deleted 8103 // and the iterator may become invalid value. 8104 Changed = true; 8105 it = BB->begin(); 8106 e = BB->end(); 8107 } 8108 continue; 8109 } 8110 8111 if (isa<DbgInfoIntrinsic>(it)) 8112 continue; 8113 8114 // Try to vectorize reductions that use PHINodes. 8115 if (PHINode *P = dyn_cast<PHINode>(it)) { 8116 // Check that the PHI is a reduction PHI. 8117 if (P->getNumIncomingValues() == 2) { 8118 // Try to match and vectorize a horizontal reduction. 8119 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R, 8120 TTI)) { 8121 Changed = true; 8122 it = BB->begin(); 8123 e = BB->end(); 8124 continue; 8125 } 8126 } 8127 // Try to vectorize the incoming values of the PHI, to catch reductions 8128 // that feed into PHIs. 8129 for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) { 8130 // Skip if the incoming block is the current BB for now. Also, bypass 8131 // unreachable IR for efficiency and to avoid crashing. 8132 // TODO: Collect the skipped incoming values and try to vectorize them 8133 // after processing BB. 8134 if (BB == P->getIncomingBlock(I) || 8135 !DT->isReachableFromEntry(P->getIncomingBlock(I))) 8136 continue; 8137 8138 Changed |= vectorizeRootInstruction(nullptr, P->getIncomingValue(I), 8139 P->getIncomingBlock(I), R, TTI); 8140 } 8141 continue; 8142 } 8143 8144 // Ran into an instruction without users, like terminator, or function call 8145 // with ignored return value, store. Ignore unused instructions (basing on 8146 // instruction type, except for CallInst and InvokeInst). 8147 if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) || 8148 isa<InvokeInst>(it))) { 8149 KeyNodes.insert(&*it); 8150 bool OpsChanged = false; 8151 if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) { 8152 for (auto *V : it->operand_values()) { 8153 // Try to match and vectorize a horizontal reduction. 8154 OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI); 8155 } 8156 } 8157 // Start vectorization of post-process list of instructions from the 8158 // top-tree instructions to try to vectorize as many instructions as 8159 // possible. 8160 OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R, 8161 it->isTerminator()); 8162 if (OpsChanged) { 8163 // We would like to start over since some instructions are deleted 8164 // and the iterator may become invalid value. 8165 Changed = true; 8166 it = BB->begin(); 8167 e = BB->end(); 8168 continue; 8169 } 8170 } 8171 8172 if (isa<InsertElementInst>(it) || isa<CmpInst>(it) || 8173 isa<InsertValueInst>(it)) 8174 PostProcessInstructions.push_back(&*it); 8175 } 8176 8177 return Changed; 8178 } 8179 8180 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 8181 auto Changed = false; 8182 for (auto &Entry : GEPs) { 8183 // If the getelementptr list has fewer than two elements, there's nothing 8184 // to do. 8185 if (Entry.second.size() < 2) 8186 continue; 8187 8188 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 8189 << Entry.second.size() << ".\n"); 8190 8191 // Process the GEP list in chunks suitable for the target's supported 8192 // vector size. If a vector register can't hold 1 element, we are done. We 8193 // are trying to vectorize the index computations, so the maximum number of 8194 // elements is based on the size of the index expression, rather than the 8195 // size of the GEP itself (the target's pointer size). 8196 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 8197 unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin()); 8198 if (MaxVecRegSize < EltSize) 8199 continue; 8200 8201 unsigned MaxElts = MaxVecRegSize / EltSize; 8202 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) { 8203 auto Len = std::min<unsigned>(BE - BI, MaxElts); 8204 ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len); 8205 8206 // Initialize a set a candidate getelementptrs. Note that we use a 8207 // SetVector here to preserve program order. If the index computations 8208 // are vectorizable and begin with loads, we want to minimize the chance 8209 // of having to reorder them later. 8210 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 8211 8212 // Some of the candidates may have already been vectorized after we 8213 // initially collected them. If so, they are marked as deleted, so remove 8214 // them from the set of candidates. 8215 Candidates.remove_if( 8216 [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); }); 8217 8218 // Remove from the set of candidates all pairs of getelementptrs with 8219 // constant differences. Such getelementptrs are likely not good 8220 // candidates for vectorization in a bottom-up phase since one can be 8221 // computed from the other. We also ensure all candidate getelementptr 8222 // indices are unique. 8223 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 8224 auto *GEPI = GEPList[I]; 8225 if (!Candidates.count(GEPI)) 8226 continue; 8227 auto *SCEVI = SE->getSCEV(GEPList[I]); 8228 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 8229 auto *GEPJ = GEPList[J]; 8230 auto *SCEVJ = SE->getSCEV(GEPList[J]); 8231 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 8232 Candidates.remove(GEPI); 8233 Candidates.remove(GEPJ); 8234 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 8235 Candidates.remove(GEPJ); 8236 } 8237 } 8238 } 8239 8240 // We break out of the above computation as soon as we know there are 8241 // fewer than two candidates remaining. 8242 if (Candidates.size() < 2) 8243 continue; 8244 8245 // Add the single, non-constant index of each candidate to the bundle. We 8246 // ensured the indices met these constraints when we originally collected 8247 // the getelementptrs. 8248 SmallVector<Value *, 16> Bundle(Candidates.size()); 8249 auto BundleIndex = 0u; 8250 for (auto *V : Candidates) { 8251 auto *GEP = cast<GetElementPtrInst>(V); 8252 auto *GEPIdx = GEP->idx_begin()->get(); 8253 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 8254 Bundle[BundleIndex++] = GEPIdx; 8255 } 8256 8257 // Try and vectorize the indices. We are currently only interested in 8258 // gather-like cases of the form: 8259 // 8260 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 8261 // 8262 // where the loads of "a", the loads of "b", and the subtractions can be 8263 // performed in parallel. It's likely that detecting this pattern in a 8264 // bottom-up phase will be simpler and less costly than building a 8265 // full-blown top-down phase beginning at the consecutive loads. 8266 Changed |= tryToVectorizeList(Bundle, R); 8267 } 8268 } 8269 return Changed; 8270 } 8271 8272 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 8273 bool Changed = false; 8274 // Attempt to sort and vectorize each of the store-groups. 8275 for (StoreListMap::iterator it = Stores.begin(), e = Stores.end(); it != e; 8276 ++it) { 8277 if (it->second.size() < 2) 8278 continue; 8279 8280 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 8281 << it->second.size() << ".\n"); 8282 8283 Changed |= vectorizeStores(it->second, R); 8284 } 8285 return Changed; 8286 } 8287 8288 char SLPVectorizer::ID = 0; 8289 8290 static const char lv_name[] = "SLP Vectorizer"; 8291 8292 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 8293 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 8294 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 8295 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 8296 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 8297 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 8298 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 8299 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 8300 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 8301 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 8302 8303 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); } 8304