1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 20 #include "llvm/ADT/DenseMap.h" 21 #include "llvm/ADT/DenseSet.h" 22 #include "llvm/ADT/Optional.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/PriorityQueue.h" 25 #include "llvm/ADT/STLExtras.h" 26 #include "llvm/ADT/SetOperations.h" 27 #include "llvm/ADT/SetVector.h" 28 #include "llvm/ADT/SmallBitVector.h" 29 #include "llvm/ADT/SmallPtrSet.h" 30 #include "llvm/ADT/SmallSet.h" 31 #include "llvm/ADT/SmallString.h" 32 #include "llvm/ADT/Statistic.h" 33 #include "llvm/ADT/iterator.h" 34 #include "llvm/ADT/iterator_range.h" 35 #include "llvm/Analysis/AliasAnalysis.h" 36 #include "llvm/Analysis/AssumptionCache.h" 37 #include "llvm/Analysis/CodeMetrics.h" 38 #include "llvm/Analysis/DemandedBits.h" 39 #include "llvm/Analysis/GlobalsModRef.h" 40 #include "llvm/Analysis/IVDescriptors.h" 41 #include "llvm/Analysis/LoopAccessAnalysis.h" 42 #include "llvm/Analysis/LoopInfo.h" 43 #include "llvm/Analysis/MemoryLocation.h" 44 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 45 #include "llvm/Analysis/ScalarEvolution.h" 46 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 47 #include "llvm/Analysis/TargetLibraryInfo.h" 48 #include "llvm/Analysis/TargetTransformInfo.h" 49 #include "llvm/Analysis/ValueTracking.h" 50 #include "llvm/Analysis/VectorUtils.h" 51 #include "llvm/IR/Attributes.h" 52 #include "llvm/IR/BasicBlock.h" 53 #include "llvm/IR/Constant.h" 54 #include "llvm/IR/Constants.h" 55 #include "llvm/IR/DataLayout.h" 56 #include "llvm/IR/DebugLoc.h" 57 #include "llvm/IR/DerivedTypes.h" 58 #include "llvm/IR/Dominators.h" 59 #include "llvm/IR/Function.h" 60 #include "llvm/IR/IRBuilder.h" 61 #include "llvm/IR/InstrTypes.h" 62 #include "llvm/IR/Instruction.h" 63 #include "llvm/IR/Instructions.h" 64 #include "llvm/IR/IntrinsicInst.h" 65 #include "llvm/IR/Intrinsics.h" 66 #include "llvm/IR/Module.h" 67 #include "llvm/IR/NoFolder.h" 68 #include "llvm/IR/Operator.h" 69 #include "llvm/IR/PatternMatch.h" 70 #include "llvm/IR/Type.h" 71 #include "llvm/IR/Use.h" 72 #include "llvm/IR/User.h" 73 #include "llvm/IR/Value.h" 74 #include "llvm/IR/ValueHandle.h" 75 #include "llvm/IR/Verifier.h" 76 #include "llvm/InitializePasses.h" 77 #include "llvm/Pass.h" 78 #include "llvm/Support/Casting.h" 79 #include "llvm/Support/CommandLine.h" 80 #include "llvm/Support/Compiler.h" 81 #include "llvm/Support/DOTGraphTraits.h" 82 #include "llvm/Support/Debug.h" 83 #include "llvm/Support/ErrorHandling.h" 84 #include "llvm/Support/GraphWriter.h" 85 #include "llvm/Support/InstructionCost.h" 86 #include "llvm/Support/KnownBits.h" 87 #include "llvm/Support/MathExtras.h" 88 #include "llvm/Support/raw_ostream.h" 89 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 90 #include "llvm/Transforms/Utils/LoopUtils.h" 91 #include "llvm/Transforms/Vectorize.h" 92 #include <algorithm> 93 #include <cassert> 94 #include <cstdint> 95 #include <iterator> 96 #include <memory> 97 #include <set> 98 #include <string> 99 #include <tuple> 100 #include <utility> 101 #include <vector> 102 103 using namespace llvm; 104 using namespace llvm::PatternMatch; 105 using namespace slpvectorizer; 106 107 #define SV_NAME "slp-vectorizer" 108 #define DEBUG_TYPE "SLP" 109 110 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 111 112 cl::opt<bool> RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden, 113 cl::desc("Run the SLP vectorization passes")); 114 115 static cl::opt<int> 116 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 117 cl::desc("Only vectorize if you gain more than this " 118 "number ")); 119 120 static cl::opt<bool> 121 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 122 cl::desc("Attempt to vectorize horizontal reductions")); 123 124 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 125 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 126 cl::desc( 127 "Attempt to vectorize horizontal reductions feeding into a store")); 128 129 static cl::opt<int> 130 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 131 cl::desc("Attempt to vectorize for this register size in bits")); 132 133 static cl::opt<unsigned> 134 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden, 135 cl::desc("Maximum SLP vectorization factor (0=unlimited)")); 136 137 static cl::opt<int> 138 MaxStoreLookup("slp-max-store-lookup", cl::init(32), cl::Hidden, 139 cl::desc("Maximum depth of the lookup for consecutive stores.")); 140 141 /// Limits the size of scheduling regions in a block. 142 /// It avoid long compile times for _very_ large blocks where vector 143 /// instructions are spread over a wide range. 144 /// This limit is way higher than needed by real-world functions. 145 static cl::opt<int> 146 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 147 cl::desc("Limit the size of the SLP scheduling region per block")); 148 149 static cl::opt<int> MinVectorRegSizeOption( 150 "slp-min-reg-size", cl::init(128), cl::Hidden, 151 cl::desc("Attempt to vectorize for this register size in bits")); 152 153 static cl::opt<unsigned> RecursionMaxDepth( 154 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 155 cl::desc("Limit the recursion depth when building a vectorizable tree")); 156 157 static cl::opt<unsigned> MinTreeSize( 158 "slp-min-tree-size", cl::init(3), cl::Hidden, 159 cl::desc("Only vectorize small trees if they are fully vectorizable")); 160 161 // The maximum depth that the look-ahead score heuristic will explore. 162 // The higher this value, the higher the compilation time overhead. 163 static cl::opt<int> LookAheadMaxDepth( 164 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden, 165 cl::desc("The maximum look-ahead depth for operand reordering scores")); 166 167 // The Look-ahead heuristic goes through the users of the bundle to calculate 168 // the users cost in getExternalUsesCost(). To avoid compilation time increase 169 // we limit the number of users visited to this value. 170 static cl::opt<unsigned> LookAheadUsersBudget( 171 "slp-look-ahead-users-budget", cl::init(2), cl::Hidden, 172 cl::desc("The maximum number of users to visit while visiting the " 173 "predecessors. This prevents compilation time increase.")); 174 175 static cl::opt<bool> 176 ViewSLPTree("view-slp-tree", cl::Hidden, 177 cl::desc("Display the SLP trees with Graphviz")); 178 179 // Limit the number of alias checks. The limit is chosen so that 180 // it has no negative effect on the llvm benchmarks. 181 static const unsigned AliasedCheckLimit = 10; 182 183 // Another limit for the alias checks: The maximum distance between load/store 184 // instructions where alias checks are done. 185 // This limit is useful for very large basic blocks. 186 static const unsigned MaxMemDepDistance = 160; 187 188 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 189 /// regions to be handled. 190 static const int MinScheduleRegionSize = 16; 191 192 /// Predicate for the element types that the SLP vectorizer supports. 193 /// 194 /// The most important thing to filter here are types which are invalid in LLVM 195 /// vectors. We also filter target specific types which have absolutely no 196 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 197 /// avoids spending time checking the cost model and realizing that they will 198 /// be inevitably scalarized. 199 static bool isValidElementType(Type *Ty) { 200 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 201 !Ty->isPPC_FP128Ty(); 202 } 203 204 /// \returns True if the value is a constant (but not globals/constant 205 /// expressions). 206 static bool isConstant(Value *V) { 207 return isa<Constant>(V) && !isa<ConstantExpr>(V) && !isa<GlobalValue>(V); 208 } 209 210 /// Checks if \p V is one of vector-like instructions, i.e. undef, 211 /// insertelement/extractelement with constant indices for fixed vector type or 212 /// extractvalue instruction. 213 static bool isVectorLikeInstWithConstOps(Value *V) { 214 if (!isa<InsertElementInst, ExtractElementInst>(V) && 215 !isa<ExtractValueInst, UndefValue>(V)) 216 return false; 217 auto *I = dyn_cast<Instruction>(V); 218 if (!I || isa<ExtractValueInst>(I)) 219 return true; 220 if (!isa<FixedVectorType>(I->getOperand(0)->getType())) 221 return false; 222 if (isa<ExtractElementInst>(I)) 223 return isConstant(I->getOperand(1)); 224 assert(isa<InsertElementInst>(V) && "Expected only insertelement."); 225 return isConstant(I->getOperand(2)); 226 } 227 228 /// \returns true if all of the instructions in \p VL are in the same block or 229 /// false otherwise. 230 static bool allSameBlock(ArrayRef<Value *> VL) { 231 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 232 if (!I0) 233 return false; 234 if (all_of(VL, isVectorLikeInstWithConstOps)) 235 return true; 236 237 BasicBlock *BB = I0->getParent(); 238 for (int I = 1, E = VL.size(); I < E; I++) { 239 auto *II = dyn_cast<Instruction>(VL[I]); 240 if (!II) 241 return false; 242 243 if (BB != II->getParent()) 244 return false; 245 } 246 return true; 247 } 248 249 /// \returns True if all of the values in \p VL are constants (but not 250 /// globals/constant expressions). 251 static bool allConstant(ArrayRef<Value *> VL) { 252 // Constant expressions and globals can't be vectorized like normal integer/FP 253 // constants. 254 return all_of(VL, isConstant); 255 } 256 257 /// \returns True if all of the values in \p VL are identical or some of them 258 /// are UndefValue. 259 static bool isSplat(ArrayRef<Value *> VL) { 260 Value *FirstNonUndef = nullptr; 261 for (Value *V : VL) { 262 if (isa<UndefValue>(V)) 263 continue; 264 if (!FirstNonUndef) { 265 FirstNonUndef = V; 266 continue; 267 } 268 if (V != FirstNonUndef) 269 return false; 270 } 271 return FirstNonUndef != nullptr; 272 } 273 274 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator. 275 static bool isCommutative(Instruction *I) { 276 if (auto *Cmp = dyn_cast<CmpInst>(I)) 277 return Cmp->isCommutative(); 278 if (auto *BO = dyn_cast<BinaryOperator>(I)) 279 return BO->isCommutative(); 280 // TODO: This should check for generic Instruction::isCommutative(), but 281 // we need to confirm that the caller code correctly handles Intrinsics 282 // for example (does not have 2 operands). 283 return false; 284 } 285 286 /// Checks if the given value is actually an undefined constant vector. 287 static bool isUndefVector(const Value *V) { 288 if (isa<UndefValue>(V)) 289 return true; 290 auto *C = dyn_cast<Constant>(V); 291 if (!C) 292 return false; 293 if (!C->containsUndefOrPoisonElement()) 294 return false; 295 auto *VecTy = dyn_cast<FixedVectorType>(C->getType()); 296 if (!VecTy) 297 return false; 298 for (unsigned I = 0, E = VecTy->getNumElements(); I != E; ++I) { 299 if (Constant *Elem = C->getAggregateElement(I)) 300 if (!isa<UndefValue>(Elem)) 301 return false; 302 } 303 return true; 304 } 305 306 /// Checks if the vector of instructions can be represented as a shuffle, like: 307 /// %x0 = extractelement <4 x i8> %x, i32 0 308 /// %x3 = extractelement <4 x i8> %x, i32 3 309 /// %y1 = extractelement <4 x i8> %y, i32 1 310 /// %y2 = extractelement <4 x i8> %y, i32 2 311 /// %x0x0 = mul i8 %x0, %x0 312 /// %x3x3 = mul i8 %x3, %x3 313 /// %y1y1 = mul i8 %y1, %y1 314 /// %y2y2 = mul i8 %y2, %y2 315 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0 316 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 317 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 318 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 319 /// ret <4 x i8> %ins4 320 /// can be transformed into: 321 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 322 /// i32 6> 323 /// %2 = mul <4 x i8> %1, %1 324 /// ret <4 x i8> %2 325 /// We convert this initially to something like: 326 /// %x0 = extractelement <4 x i8> %x, i32 0 327 /// %x3 = extractelement <4 x i8> %x, i32 3 328 /// %y1 = extractelement <4 x i8> %y, i32 1 329 /// %y2 = extractelement <4 x i8> %y, i32 2 330 /// %1 = insertelement <4 x i8> poison, i8 %x0, i32 0 331 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1 332 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2 333 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3 334 /// %5 = mul <4 x i8> %4, %4 335 /// %6 = extractelement <4 x i8> %5, i32 0 336 /// %ins1 = insertelement <4 x i8> poison, i8 %6, i32 0 337 /// %7 = extractelement <4 x i8> %5, i32 1 338 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1 339 /// %8 = extractelement <4 x i8> %5, i32 2 340 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2 341 /// %9 = extractelement <4 x i8> %5, i32 3 342 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3 343 /// ret <4 x i8> %ins4 344 /// InstCombiner transforms this into a shuffle and vector mul 345 /// Mask will return the Shuffle Mask equivalent to the extracted elements. 346 /// TODO: Can we split off and reuse the shuffle mask detection from 347 /// TargetTransformInfo::getInstructionThroughput? 348 static Optional<TargetTransformInfo::ShuffleKind> 349 isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) { 350 const auto *It = 351 find_if(VL, [](Value *V) { return isa<ExtractElementInst>(V); }); 352 if (It == VL.end()) 353 return None; 354 auto *EI0 = cast<ExtractElementInst>(*It); 355 if (isa<ScalableVectorType>(EI0->getVectorOperandType())) 356 return None; 357 unsigned Size = 358 cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements(); 359 Value *Vec1 = nullptr; 360 Value *Vec2 = nullptr; 361 enum ShuffleMode { Unknown, Select, Permute }; 362 ShuffleMode CommonShuffleMode = Unknown; 363 Mask.assign(VL.size(), UndefMaskElem); 364 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 365 // Undef can be represented as an undef element in a vector. 366 if (isa<UndefValue>(VL[I])) 367 continue; 368 auto *EI = cast<ExtractElementInst>(VL[I]); 369 if (isa<ScalableVectorType>(EI->getVectorOperandType())) 370 return None; 371 auto *Vec = EI->getVectorOperand(); 372 // We can extractelement from undef or poison vector. 373 if (isUndefVector(Vec)) 374 continue; 375 // All vector operands must have the same number of vector elements. 376 if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size) 377 return None; 378 if (isa<UndefValue>(EI->getIndexOperand())) 379 continue; 380 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 381 if (!Idx) 382 return None; 383 // Undefined behavior if Idx is negative or >= Size. 384 if (Idx->getValue().uge(Size)) 385 continue; 386 unsigned IntIdx = Idx->getValue().getZExtValue(); 387 Mask[I] = IntIdx; 388 // For correct shuffling we have to have at most 2 different vector operands 389 // in all extractelement instructions. 390 if (!Vec1 || Vec1 == Vec) { 391 Vec1 = Vec; 392 } else if (!Vec2 || Vec2 == Vec) { 393 Vec2 = Vec; 394 Mask[I] += Size; 395 } else { 396 return None; 397 } 398 if (CommonShuffleMode == Permute) 399 continue; 400 // If the extract index is not the same as the operation number, it is a 401 // permutation. 402 if (IntIdx != I) { 403 CommonShuffleMode = Permute; 404 continue; 405 } 406 CommonShuffleMode = Select; 407 } 408 // If we're not crossing lanes in different vectors, consider it as blending. 409 if (CommonShuffleMode == Select && Vec2) 410 return TargetTransformInfo::SK_Select; 411 // If Vec2 was never used, we have a permutation of a single vector, otherwise 412 // we have permutation of 2 vectors. 413 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 414 : TargetTransformInfo::SK_PermuteSingleSrc; 415 } 416 417 namespace { 418 419 /// Main data required for vectorization of instructions. 420 struct InstructionsState { 421 /// The very first instruction in the list with the main opcode. 422 Value *OpValue = nullptr; 423 424 /// The main/alternate instruction. 425 Instruction *MainOp = nullptr; 426 Instruction *AltOp = nullptr; 427 428 /// The main/alternate opcodes for the list of instructions. 429 unsigned getOpcode() const { 430 return MainOp ? MainOp->getOpcode() : 0; 431 } 432 433 unsigned getAltOpcode() const { 434 return AltOp ? AltOp->getOpcode() : 0; 435 } 436 437 /// Some of the instructions in the list have alternate opcodes. 438 bool isAltShuffle() const { return AltOp != MainOp; } 439 440 bool isOpcodeOrAlt(Instruction *I) const { 441 unsigned CheckedOpcode = I->getOpcode(); 442 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode; 443 } 444 445 InstructionsState() = delete; 446 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp) 447 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {} 448 }; 449 450 } // end anonymous namespace 451 452 /// Chooses the correct key for scheduling data. If \p Op has the same (or 453 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 454 /// OpValue. 455 static Value *isOneOf(const InstructionsState &S, Value *Op) { 456 auto *I = dyn_cast<Instruction>(Op); 457 if (I && S.isOpcodeOrAlt(I)) 458 return Op; 459 return S.OpValue; 460 } 461 462 /// \returns true if \p Opcode is allowed as part of of the main/alternate 463 /// instruction for SLP vectorization. 464 /// 465 /// Example of unsupported opcode is SDIV that can potentially cause UB if the 466 /// "shuffled out" lane would result in division by zero. 467 static bool isValidForAlternation(unsigned Opcode) { 468 if (Instruction::isIntDivRem(Opcode)) 469 return false; 470 471 return true; 472 } 473 474 /// \returns analysis of the Instructions in \p VL described in 475 /// InstructionsState, the Opcode that we suppose the whole list 476 /// could be vectorized even if its structure is diverse. 477 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 478 unsigned BaseIndex = 0) { 479 // Make sure these are all Instructions. 480 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); })) 481 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 482 483 bool IsCastOp = isa<CastInst>(VL[BaseIndex]); 484 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]); 485 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode(); 486 unsigned AltOpcode = Opcode; 487 unsigned AltIndex = BaseIndex; 488 489 // Check for one alternate opcode from another BinaryOperator. 490 // TODO - generalize to support all operators (types, calls etc.). 491 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { 492 unsigned InstOpcode = cast<Instruction>(VL[Cnt])->getOpcode(); 493 if (IsBinOp && isa<BinaryOperator>(VL[Cnt])) { 494 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 495 continue; 496 if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) && 497 isValidForAlternation(Opcode)) { 498 AltOpcode = InstOpcode; 499 AltIndex = Cnt; 500 continue; 501 } 502 } else if (IsCastOp && isa<CastInst>(VL[Cnt])) { 503 Type *Ty0 = cast<Instruction>(VL[BaseIndex])->getOperand(0)->getType(); 504 Type *Ty1 = cast<Instruction>(VL[Cnt])->getOperand(0)->getType(); 505 if (Ty0 == Ty1) { 506 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 507 continue; 508 if (Opcode == AltOpcode) { 509 assert(isValidForAlternation(Opcode) && 510 isValidForAlternation(InstOpcode) && 511 "Cast isn't safe for alternation, logic needs to be updated!"); 512 AltOpcode = InstOpcode; 513 AltIndex = Cnt; 514 continue; 515 } 516 } 517 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode) 518 continue; 519 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 520 } 521 522 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]), 523 cast<Instruction>(VL[AltIndex])); 524 } 525 526 /// \returns true if all of the values in \p VL have the same type or false 527 /// otherwise. 528 static bool allSameType(ArrayRef<Value *> VL) { 529 Type *Ty = VL[0]->getType(); 530 for (int i = 1, e = VL.size(); i < e; i++) 531 if (VL[i]->getType() != Ty) 532 return false; 533 534 return true; 535 } 536 537 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 538 static Optional<unsigned> getExtractIndex(Instruction *E) { 539 unsigned Opcode = E->getOpcode(); 540 assert((Opcode == Instruction::ExtractElement || 541 Opcode == Instruction::ExtractValue) && 542 "Expected extractelement or extractvalue instruction."); 543 if (Opcode == Instruction::ExtractElement) { 544 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 545 if (!CI) 546 return None; 547 return CI->getZExtValue(); 548 } 549 ExtractValueInst *EI = cast<ExtractValueInst>(E); 550 if (EI->getNumIndices() != 1) 551 return None; 552 return *EI->idx_begin(); 553 } 554 555 /// \returns True if in-tree use also needs extract. This refers to 556 /// possible scalar operand in vectorized instruction. 557 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 558 TargetLibraryInfo *TLI) { 559 unsigned Opcode = UserInst->getOpcode(); 560 switch (Opcode) { 561 case Instruction::Load: { 562 LoadInst *LI = cast<LoadInst>(UserInst); 563 return (LI->getPointerOperand() == Scalar); 564 } 565 case Instruction::Store: { 566 StoreInst *SI = cast<StoreInst>(UserInst); 567 return (SI->getPointerOperand() == Scalar); 568 } 569 case Instruction::Call: { 570 CallInst *CI = cast<CallInst>(UserInst); 571 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 572 for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) { 573 if (hasVectorInstrinsicScalarOpd(ID, i)) 574 return (CI->getArgOperand(i) == Scalar); 575 } 576 LLVM_FALLTHROUGH; 577 } 578 default: 579 return false; 580 } 581 } 582 583 /// \returns the AA location that is being access by the instruction. 584 static MemoryLocation getLocation(Instruction *I, AAResults *AA) { 585 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 586 return MemoryLocation::get(SI); 587 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 588 return MemoryLocation::get(LI); 589 return MemoryLocation(); 590 } 591 592 /// \returns True if the instruction is not a volatile or atomic load/store. 593 static bool isSimple(Instruction *I) { 594 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 595 return LI->isSimple(); 596 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 597 return SI->isSimple(); 598 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 599 return !MI->isVolatile(); 600 return true; 601 } 602 603 /// Shuffles \p Mask in accordance with the given \p SubMask. 604 static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask) { 605 if (SubMask.empty()) 606 return; 607 if (Mask.empty()) { 608 Mask.append(SubMask.begin(), SubMask.end()); 609 return; 610 } 611 SmallVector<int> NewMask(SubMask.size(), UndefMaskElem); 612 int TermValue = std::min(Mask.size(), SubMask.size()); 613 for (int I = 0, E = SubMask.size(); I < E; ++I) { 614 if (SubMask[I] >= TermValue || SubMask[I] == UndefMaskElem || 615 Mask[SubMask[I]] >= TermValue) 616 continue; 617 NewMask[I] = Mask[SubMask[I]]; 618 } 619 Mask.swap(NewMask); 620 } 621 622 /// Order may have elements assigned special value (size) which is out of 623 /// bounds. Such indices only appear on places which correspond to undef values 624 /// (see canReuseExtract for details) and used in order to avoid undef values 625 /// have effect on operands ordering. 626 /// The first loop below simply finds all unused indices and then the next loop 627 /// nest assigns these indices for undef values positions. 628 /// As an example below Order has two undef positions and they have assigned 629 /// values 3 and 7 respectively: 630 /// before: 6 9 5 4 9 2 1 0 631 /// after: 6 3 5 4 7 2 1 0 632 static void fixupOrderingIndices(SmallVectorImpl<unsigned> &Order) { 633 const unsigned Sz = Order.size(); 634 SmallBitVector UnusedIndices(Sz, /*t=*/true); 635 SmallBitVector MaskedIndices(Sz); 636 for (unsigned I = 0; I < Sz; ++I) { 637 if (Order[I] < Sz) 638 UnusedIndices.reset(Order[I]); 639 else 640 MaskedIndices.set(I); 641 } 642 if (MaskedIndices.none()) 643 return; 644 assert(UnusedIndices.count() == MaskedIndices.count() && 645 "Non-synced masked/available indices."); 646 int Idx = UnusedIndices.find_first(); 647 int MIdx = MaskedIndices.find_first(); 648 while (MIdx >= 0) { 649 assert(Idx >= 0 && "Indices must be synced."); 650 Order[MIdx] = Idx; 651 Idx = UnusedIndices.find_next(Idx); 652 MIdx = MaskedIndices.find_next(MIdx); 653 } 654 } 655 656 namespace llvm { 657 658 static void inversePermutation(ArrayRef<unsigned> Indices, 659 SmallVectorImpl<int> &Mask) { 660 Mask.clear(); 661 const unsigned E = Indices.size(); 662 Mask.resize(E, UndefMaskElem); 663 for (unsigned I = 0; I < E; ++I) 664 Mask[Indices[I]] = I; 665 } 666 667 /// \returns inserting index of InsertElement or InsertValue instruction, 668 /// using Offset as base offset for index. 669 static Optional<int> getInsertIndex(Value *InsertInst, unsigned Offset) { 670 int Index = Offset; 671 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) { 672 if (auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2))) { 673 auto *VT = cast<FixedVectorType>(IE->getType()); 674 if (CI->getValue().uge(VT->getNumElements())) 675 return UndefMaskElem; 676 Index *= VT->getNumElements(); 677 Index += CI->getZExtValue(); 678 return Index; 679 } 680 if (isa<UndefValue>(IE->getOperand(2))) 681 return UndefMaskElem; 682 return None; 683 } 684 685 auto *IV = cast<InsertValueInst>(InsertInst); 686 Type *CurrentType = IV->getType(); 687 for (unsigned I : IV->indices()) { 688 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 689 Index *= ST->getNumElements(); 690 CurrentType = ST->getElementType(I); 691 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 692 Index *= AT->getNumElements(); 693 CurrentType = AT->getElementType(); 694 } else { 695 return None; 696 } 697 Index += I; 698 } 699 return Index; 700 } 701 702 /// Reorders the list of scalars in accordance with the given \p Order and then 703 /// the \p Mask. \p Order - is the original order of the scalars, need to 704 /// reorder scalars into an unordered state at first according to the given 705 /// order. Then the ordered scalars are shuffled once again in accordance with 706 /// the provided mask. 707 static void reorderScalars(SmallVectorImpl<Value *> &Scalars, 708 ArrayRef<int> Mask) { 709 assert(!Mask.empty() && "Expected non-empty mask."); 710 SmallVector<Value *> Prev(Scalars.size(), 711 UndefValue::get(Scalars.front()->getType())); 712 Prev.swap(Scalars); 713 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 714 if (Mask[I] != UndefMaskElem) 715 Scalars[Mask[I]] = Prev[I]; 716 } 717 718 namespace slpvectorizer { 719 720 /// Bottom Up SLP Vectorizer. 721 class BoUpSLP { 722 struct TreeEntry; 723 struct ScheduleData; 724 725 public: 726 using ValueList = SmallVector<Value *, 8>; 727 using InstrList = SmallVector<Instruction *, 16>; 728 using ValueSet = SmallPtrSet<Value *, 16>; 729 using StoreList = SmallVector<StoreInst *, 8>; 730 using ExtraValueToDebugLocsMap = 731 MapVector<Value *, SmallVector<Instruction *, 2>>; 732 using OrdersType = SmallVector<unsigned, 4>; 733 734 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 735 TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li, 736 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 737 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 738 : F(Func), SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt), AC(AC), 739 DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { 740 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 741 // Use the vector register size specified by the target unless overridden 742 // by a command-line option. 743 // TODO: It would be better to limit the vectorization factor based on 744 // data type rather than just register size. For example, x86 AVX has 745 // 256-bit registers, but it does not support integer operations 746 // at that width (that requires AVX2). 747 if (MaxVectorRegSizeOption.getNumOccurrences()) 748 MaxVecRegSize = MaxVectorRegSizeOption; 749 else 750 MaxVecRegSize = 751 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 752 .getFixedSize(); 753 754 if (MinVectorRegSizeOption.getNumOccurrences()) 755 MinVecRegSize = MinVectorRegSizeOption; 756 else 757 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 758 } 759 760 /// Vectorize the tree that starts with the elements in \p VL. 761 /// Returns the vectorized root. 762 Value *vectorizeTree(); 763 764 /// Vectorize the tree but with the list of externally used values \p 765 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 766 /// generated extractvalue instructions. 767 Value *vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues); 768 769 /// \returns the cost incurred by unwanted spills and fills, caused by 770 /// holding live values over call sites. 771 InstructionCost getSpillCost() const; 772 773 /// \returns the vectorization cost of the subtree that starts at \p VL. 774 /// A negative number means that this is profitable. 775 InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = None); 776 777 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 778 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 779 void buildTree(ArrayRef<Value *> Roots, 780 ArrayRef<Value *> UserIgnoreLst = None); 781 782 /// Builds external uses of the vectorized scalars, i.e. the list of 783 /// vectorized scalars to be extracted, their lanes and their scalar users. \p 784 /// ExternallyUsedValues contains additional list of external uses to handle 785 /// vectorization of reductions. 786 void 787 buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {}); 788 789 /// Clear the internal data structures that are created by 'buildTree'. 790 void deleteTree() { 791 VectorizableTree.clear(); 792 ScalarToTreeEntry.clear(); 793 MustGather.clear(); 794 ExternalUses.clear(); 795 for (auto &Iter : BlocksSchedules) { 796 BlockScheduling *BS = Iter.second.get(); 797 BS->clear(); 798 } 799 MinBWs.clear(); 800 InstrElementSize.clear(); 801 } 802 803 unsigned getTreeSize() const { return VectorizableTree.size(); } 804 805 /// Perform LICM and CSE on the newly generated gather sequences. 806 void optimizeGatherSequence(); 807 808 /// Checks if the specified gather tree entry \p TE can be represented as a 809 /// shuffled vector entry + (possibly) permutation with other gathers. It 810 /// implements the checks only for possibly ordered scalars (Loads, 811 /// ExtractElement, ExtractValue), which can be part of the graph. 812 Optional<OrdersType> findReusedOrderedScalars(const TreeEntry &TE); 813 814 /// Gets reordering data for the given tree entry. If the entry is vectorized 815 /// - just return ReorderIndices, otherwise check if the scalars can be 816 /// reordered and return the most optimal order. 817 /// \param TopToBottom If true, include the order of vectorized stores and 818 /// insertelement nodes, otherwise skip them. 819 Optional<OrdersType> getReorderingData(const TreeEntry &TE, bool TopToBottom); 820 821 /// Reorders the current graph to the most profitable order starting from the 822 /// root node to the leaf nodes. The best order is chosen only from the nodes 823 /// of the same size (vectorization factor). Smaller nodes are considered 824 /// parts of subgraph with smaller VF and they are reordered independently. We 825 /// can make it because we still need to extend smaller nodes to the wider VF 826 /// and we can merge reordering shuffles with the widening shuffles. 827 void reorderTopToBottom(); 828 829 /// Reorders the current graph to the most profitable order starting from 830 /// leaves to the root. It allows to rotate small subgraphs and reduce the 831 /// number of reshuffles if the leaf nodes use the same order. In this case we 832 /// can merge the orders and just shuffle user node instead of shuffling its 833 /// operands. Plus, even the leaf nodes have different orders, it allows to 834 /// sink reordering in the graph closer to the root node and merge it later 835 /// during analysis. 836 void reorderBottomToTop(bool IgnoreReorder = false); 837 838 /// \return The vector element size in bits to use when vectorizing the 839 /// expression tree ending at \p V. If V is a store, the size is the width of 840 /// the stored value. Otherwise, the size is the width of the largest loaded 841 /// value reaching V. This method is used by the vectorizer to calculate 842 /// vectorization factors. 843 unsigned getVectorElementSize(Value *V); 844 845 /// Compute the minimum type sizes required to represent the entries in a 846 /// vectorizable tree. 847 void computeMinimumValueSizes(); 848 849 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 850 unsigned getMaxVecRegSize() const { 851 return MaxVecRegSize; 852 } 853 854 // \returns minimum vector register size as set by cl::opt. 855 unsigned getMinVecRegSize() const { 856 return MinVecRegSize; 857 } 858 859 unsigned getMinVF(unsigned Sz) const { 860 return std::max(2U, getMinVecRegSize() / Sz); 861 } 862 863 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { 864 unsigned MaxVF = MaxVFOption.getNumOccurrences() ? 865 MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode); 866 return MaxVF ? MaxVF : UINT_MAX; 867 } 868 869 /// Check if homogeneous aggregate is isomorphic to some VectorType. 870 /// Accepts homogeneous multidimensional aggregate of scalars/vectors like 871 /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> }, 872 /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on. 873 /// 874 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 875 unsigned canMapToVector(Type *T, const DataLayout &DL) const; 876 877 /// \returns True if the VectorizableTree is both tiny and not fully 878 /// vectorizable. We do not vectorize such trees. 879 bool isTreeTinyAndNotFullyVectorizable(bool ForReduction = false) const; 880 881 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values 882 /// can be load combined in the backend. Load combining may not be allowed in 883 /// the IR optimizer, so we do not want to alter the pattern. For example, 884 /// partially transforming a scalar bswap() pattern into vector code is 885 /// effectively impossible for the backend to undo. 886 /// TODO: If load combining is allowed in the IR optimizer, this analysis 887 /// may not be necessary. 888 bool isLoadCombineReductionCandidate(RecurKind RdxKind) const; 889 890 /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values 891 /// can be load combined in the backend. Load combining may not be allowed in 892 /// the IR optimizer, so we do not want to alter the pattern. For example, 893 /// partially transforming a scalar bswap() pattern into vector code is 894 /// effectively impossible for the backend to undo. 895 /// TODO: If load combining is allowed in the IR optimizer, this analysis 896 /// may not be necessary. 897 bool isLoadCombineCandidate() const; 898 899 OptimizationRemarkEmitter *getORE() { return ORE; } 900 901 /// This structure holds any data we need about the edges being traversed 902 /// during buildTree_rec(). We keep track of: 903 /// (i) the user TreeEntry index, and 904 /// (ii) the index of the edge. 905 struct EdgeInfo { 906 EdgeInfo() = default; 907 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx) 908 : UserTE(UserTE), EdgeIdx(EdgeIdx) {} 909 /// The user TreeEntry. 910 TreeEntry *UserTE = nullptr; 911 /// The operand index of the use. 912 unsigned EdgeIdx = UINT_MAX; 913 #ifndef NDEBUG 914 friend inline raw_ostream &operator<<(raw_ostream &OS, 915 const BoUpSLP::EdgeInfo &EI) { 916 EI.dump(OS); 917 return OS; 918 } 919 /// Debug print. 920 void dump(raw_ostream &OS) const { 921 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null") 922 << " EdgeIdx:" << EdgeIdx << "}"; 923 } 924 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); } 925 #endif 926 }; 927 928 /// A helper data structure to hold the operands of a vector of instructions. 929 /// This supports a fixed vector length for all operand vectors. 930 class VLOperands { 931 /// For each operand we need (i) the value, and (ii) the opcode that it 932 /// would be attached to if the expression was in a left-linearized form. 933 /// This is required to avoid illegal operand reordering. 934 /// For example: 935 /// \verbatim 936 /// 0 Op1 937 /// |/ 938 /// Op1 Op2 Linearized + Op2 939 /// \ / ----------> |/ 940 /// - - 941 /// 942 /// Op1 - Op2 (0 + Op1) - Op2 943 /// \endverbatim 944 /// 945 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'. 946 /// 947 /// Another way to think of this is to track all the operations across the 948 /// path from the operand all the way to the root of the tree and to 949 /// calculate the operation that corresponds to this path. For example, the 950 /// path from Op2 to the root crosses the RHS of the '-', therefore the 951 /// corresponding operation is a '-' (which matches the one in the 952 /// linearized tree, as shown above). 953 /// 954 /// For lack of a better term, we refer to this operation as Accumulated 955 /// Path Operation (APO). 956 struct OperandData { 957 OperandData() = default; 958 OperandData(Value *V, bool APO, bool IsUsed) 959 : V(V), APO(APO), IsUsed(IsUsed) {} 960 /// The operand value. 961 Value *V = nullptr; 962 /// TreeEntries only allow a single opcode, or an alternate sequence of 963 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the 964 /// APO. It is set to 'true' if 'V' is attached to an inverse operation 965 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise 966 /// (e.g., Add/Mul) 967 bool APO = false; 968 /// Helper data for the reordering function. 969 bool IsUsed = false; 970 }; 971 972 /// During operand reordering, we are trying to select the operand at lane 973 /// that matches best with the operand at the neighboring lane. Our 974 /// selection is based on the type of value we are looking for. For example, 975 /// if the neighboring lane has a load, we need to look for a load that is 976 /// accessing a consecutive address. These strategies are summarized in the 977 /// 'ReorderingMode' enumerator. 978 enum class ReorderingMode { 979 Load, ///< Matching loads to consecutive memory addresses 980 Opcode, ///< Matching instructions based on opcode (same or alternate) 981 Constant, ///< Matching constants 982 Splat, ///< Matching the same instruction multiple times (broadcast) 983 Failed, ///< We failed to create a vectorizable group 984 }; 985 986 using OperandDataVec = SmallVector<OperandData, 2>; 987 988 /// A vector of operand vectors. 989 SmallVector<OperandDataVec, 4> OpsVec; 990 991 const DataLayout &DL; 992 ScalarEvolution &SE; 993 const BoUpSLP &R; 994 995 /// \returns the operand data at \p OpIdx and \p Lane. 996 OperandData &getData(unsigned OpIdx, unsigned Lane) { 997 return OpsVec[OpIdx][Lane]; 998 } 999 1000 /// \returns the operand data at \p OpIdx and \p Lane. Const version. 1001 const OperandData &getData(unsigned OpIdx, unsigned Lane) const { 1002 return OpsVec[OpIdx][Lane]; 1003 } 1004 1005 /// Clears the used flag for all entries. 1006 void clearUsed() { 1007 for (unsigned OpIdx = 0, NumOperands = getNumOperands(); 1008 OpIdx != NumOperands; ++OpIdx) 1009 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 1010 ++Lane) 1011 OpsVec[OpIdx][Lane].IsUsed = false; 1012 } 1013 1014 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2. 1015 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) { 1016 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]); 1017 } 1018 1019 // The hard-coded scores listed here are not very important, though it shall 1020 // be higher for better matches to improve the resulting cost. When 1021 // computing the scores of matching one sub-tree with another, we are 1022 // basically counting the number of values that are matching. So even if all 1023 // scores are set to 1, we would still get a decent matching result. 1024 // However, sometimes we have to break ties. For example we may have to 1025 // choose between matching loads vs matching opcodes. This is what these 1026 // scores are helping us with: they provide the order of preference. Also, 1027 // this is important if the scalar is externally used or used in another 1028 // tree entry node in the different lane. 1029 1030 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]). 1031 static const int ScoreConsecutiveLoads = 4; 1032 /// Loads from reversed memory addresses, e.g. load(A[i+1]), load(A[i]). 1033 static const int ScoreReversedLoads = 3; 1034 /// ExtractElementInst from same vector and consecutive indexes. 1035 static const int ScoreConsecutiveExtracts = 4; 1036 /// ExtractElementInst from same vector and reversed indices. 1037 static const int ScoreReversedExtracts = 3; 1038 /// Constants. 1039 static const int ScoreConstants = 2; 1040 /// Instructions with the same opcode. 1041 static const int ScoreSameOpcode = 2; 1042 /// Instructions with alt opcodes (e.g, add + sub). 1043 static const int ScoreAltOpcodes = 1; 1044 /// Identical instructions (a.k.a. splat or broadcast). 1045 static const int ScoreSplat = 1; 1046 /// Matching with an undef is preferable to failing. 1047 static const int ScoreUndef = 1; 1048 /// Score for failing to find a decent match. 1049 static const int ScoreFail = 0; 1050 /// User exteranl to the vectorized code. 1051 static const int ExternalUseCost = 1; 1052 /// The user is internal but in a different lane. 1053 static const int UserInDiffLaneCost = ExternalUseCost; 1054 1055 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes. 1056 static int getShallowScore(Value *V1, Value *V2, const DataLayout &DL, 1057 ScalarEvolution &SE, int NumLanes) { 1058 if (V1 == V2) 1059 return VLOperands::ScoreSplat; 1060 1061 auto *LI1 = dyn_cast<LoadInst>(V1); 1062 auto *LI2 = dyn_cast<LoadInst>(V2); 1063 if (LI1 && LI2) { 1064 if (LI1->getParent() != LI2->getParent()) 1065 return VLOperands::ScoreFail; 1066 1067 Optional<int> Dist = getPointersDiff( 1068 LI1->getType(), LI1->getPointerOperand(), LI2->getType(), 1069 LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true); 1070 if (!Dist) 1071 return VLOperands::ScoreFail; 1072 // The distance is too large - still may be profitable to use masked 1073 // loads/gathers. 1074 if (std::abs(*Dist) > NumLanes / 2) 1075 return VLOperands::ScoreAltOpcodes; 1076 // This still will detect consecutive loads, but we might have "holes" 1077 // in some cases. It is ok for non-power-2 vectorization and may produce 1078 // better results. It should not affect current vectorization. 1079 return (*Dist > 0) ? VLOperands::ScoreConsecutiveLoads 1080 : VLOperands::ScoreReversedLoads; 1081 } 1082 1083 auto *C1 = dyn_cast<Constant>(V1); 1084 auto *C2 = dyn_cast<Constant>(V2); 1085 if (C1 && C2) 1086 return VLOperands::ScoreConstants; 1087 1088 // Extracts from consecutive indexes of the same vector better score as 1089 // the extracts could be optimized away. 1090 Value *EV1; 1091 ConstantInt *Ex1Idx; 1092 if (match(V1, m_ExtractElt(m_Value(EV1), m_ConstantInt(Ex1Idx)))) { 1093 // Undefs are always profitable for extractelements. 1094 if (isa<UndefValue>(V2)) 1095 return VLOperands::ScoreConsecutiveExtracts; 1096 Value *EV2 = nullptr; 1097 ConstantInt *Ex2Idx = nullptr; 1098 if (match(V2, 1099 m_ExtractElt(m_Value(EV2), m_CombineOr(m_ConstantInt(Ex2Idx), 1100 m_Undef())))) { 1101 // Undefs are always profitable for extractelements. 1102 if (!Ex2Idx) 1103 return VLOperands::ScoreConsecutiveExtracts; 1104 if (isUndefVector(EV2) && EV2->getType() == EV1->getType()) 1105 return VLOperands::ScoreConsecutiveExtracts; 1106 if (EV2 == EV1) { 1107 int Idx1 = Ex1Idx->getZExtValue(); 1108 int Idx2 = Ex2Idx->getZExtValue(); 1109 int Dist = Idx2 - Idx1; 1110 // The distance is too large - still may be profitable to use 1111 // shuffles. 1112 if (std::abs(Dist) > NumLanes / 2) 1113 return VLOperands::ScoreAltOpcodes; 1114 return (Dist > 0) ? VLOperands::ScoreConsecutiveExtracts 1115 : VLOperands::ScoreReversedExtracts; 1116 } 1117 } 1118 } 1119 1120 auto *I1 = dyn_cast<Instruction>(V1); 1121 auto *I2 = dyn_cast<Instruction>(V2); 1122 if (I1 && I2) { 1123 if (I1->getParent() != I2->getParent()) 1124 return VLOperands::ScoreFail; 1125 InstructionsState S = getSameOpcode({I1, I2}); 1126 // Note: Only consider instructions with <= 2 operands to avoid 1127 // complexity explosion. 1128 if (S.getOpcode() && S.MainOp->getNumOperands() <= 2) 1129 return S.isAltShuffle() ? VLOperands::ScoreAltOpcodes 1130 : VLOperands::ScoreSameOpcode; 1131 } 1132 1133 if (isa<UndefValue>(V2)) 1134 return VLOperands::ScoreUndef; 1135 1136 return VLOperands::ScoreFail; 1137 } 1138 1139 /// Holds the values and their lanes that are taking part in the look-ahead 1140 /// score calculation. This is used in the external uses cost calculation. 1141 /// Need to hold all the lanes in case of splat/broadcast at least to 1142 /// correctly check for the use in the different lane. 1143 SmallDenseMap<Value *, SmallSet<int, 4>> InLookAheadValues; 1144 1145 /// \returns the additional cost due to uses of \p LHS and \p RHS that are 1146 /// either external to the vectorized code, or require shuffling. 1147 int getExternalUsesCost(const std::pair<Value *, int> &LHS, 1148 const std::pair<Value *, int> &RHS) { 1149 int Cost = 0; 1150 std::array<std::pair<Value *, int>, 2> Values = {{LHS, RHS}}; 1151 for (int Idx = 0, IdxE = Values.size(); Idx != IdxE; ++Idx) { 1152 Value *V = Values[Idx].first; 1153 if (isa<Constant>(V)) { 1154 // Since this is a function pass, it doesn't make semantic sense to 1155 // walk the users of a subclass of Constant. The users could be in 1156 // another function, or even another module that happens to be in 1157 // the same LLVMContext. 1158 continue; 1159 } 1160 1161 // Calculate the absolute lane, using the minimum relative lane of LHS 1162 // and RHS as base and Idx as the offset. 1163 int Ln = std::min(LHS.second, RHS.second) + Idx; 1164 assert(Ln >= 0 && "Bad lane calculation"); 1165 unsigned UsersBudget = LookAheadUsersBudget; 1166 for (User *U : V->users()) { 1167 if (const TreeEntry *UserTE = R.getTreeEntry(U)) { 1168 // The user is in the VectorizableTree. Check if we need to insert. 1169 int UserLn = UserTE->findLaneForValue(U); 1170 assert(UserLn >= 0 && "Bad lane"); 1171 // If the values are different, check just the line of the current 1172 // value. If the values are the same, need to add UserInDiffLaneCost 1173 // only if UserLn does not match both line numbers. 1174 if ((LHS.first != RHS.first && UserLn != Ln) || 1175 (LHS.first == RHS.first && UserLn != LHS.second && 1176 UserLn != RHS.second)) { 1177 Cost += UserInDiffLaneCost; 1178 break; 1179 } 1180 } else { 1181 // Check if the user is in the look-ahead code. 1182 auto It2 = InLookAheadValues.find(U); 1183 if (It2 != InLookAheadValues.end()) { 1184 // The user is in the look-ahead code. Check the lane. 1185 if (!It2->getSecond().contains(Ln)) { 1186 Cost += UserInDiffLaneCost; 1187 break; 1188 } 1189 } else { 1190 // The user is neither in SLP tree nor in the look-ahead code. 1191 Cost += ExternalUseCost; 1192 break; 1193 } 1194 } 1195 // Limit the number of visited uses to cap compilation time. 1196 if (--UsersBudget == 0) 1197 break; 1198 } 1199 } 1200 return Cost; 1201 } 1202 1203 /// Go through the operands of \p LHS and \p RHS recursively until \p 1204 /// MaxLevel, and return the cummulative score. For example: 1205 /// \verbatim 1206 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1] 1207 /// \ / \ / \ / \ / 1208 /// + + + + 1209 /// G1 G2 G3 G4 1210 /// \endverbatim 1211 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at 1212 /// each level recursively, accumulating the score. It starts from matching 1213 /// the additions at level 0, then moves on to the loads (level 1). The 1214 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and 1215 /// {B[0],B[1]} match with VLOperands::ScoreConsecutiveLoads, while 1216 /// {A[0],C[0]} has a score of VLOperands::ScoreFail. 1217 /// Please note that the order of the operands does not matter, as we 1218 /// evaluate the score of all profitable combinations of operands. In 1219 /// other words the score of G1 and G4 is the same as G1 and G2. This 1220 /// heuristic is based on ideas described in: 1221 /// Look-ahead SLP: Auto-vectorization in the presence of commutative 1222 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha, 1223 /// Luís F. W. Góes 1224 int getScoreAtLevelRec(const std::pair<Value *, int> &LHS, 1225 const std::pair<Value *, int> &RHS, int CurrLevel, 1226 int MaxLevel) { 1227 1228 Value *V1 = LHS.first; 1229 Value *V2 = RHS.first; 1230 // Get the shallow score of V1 and V2. 1231 int ShallowScoreAtThisLevel = std::max( 1232 (int)ScoreFail, getShallowScore(V1, V2, DL, SE, getNumLanes()) - 1233 getExternalUsesCost(LHS, RHS)); 1234 int Lane1 = LHS.second; 1235 int Lane2 = RHS.second; 1236 1237 // If reached MaxLevel, 1238 // or if V1 and V2 are not instructions, 1239 // or if they are SPLAT, 1240 // or if they are not consecutive, 1241 // or if profitable to vectorize loads or extractelements, early return 1242 // the current cost. 1243 auto *I1 = dyn_cast<Instruction>(V1); 1244 auto *I2 = dyn_cast<Instruction>(V2); 1245 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 || 1246 ShallowScoreAtThisLevel == VLOperands::ScoreFail || 1247 (((isa<LoadInst>(I1) && isa<LoadInst>(I2)) || 1248 (isa<ExtractElementInst>(I1) && isa<ExtractElementInst>(I2))) && 1249 ShallowScoreAtThisLevel)) 1250 return ShallowScoreAtThisLevel; 1251 assert(I1 && I2 && "Should have early exited."); 1252 1253 // Keep track of in-tree values for determining the external-use cost. 1254 InLookAheadValues[V1].insert(Lane1); 1255 InLookAheadValues[V2].insert(Lane2); 1256 1257 // Contains the I2 operand indexes that got matched with I1 operands. 1258 SmallSet<unsigned, 4> Op2Used; 1259 1260 // Recursion towards the operands of I1 and I2. We are trying all possible 1261 // operand pairs, and keeping track of the best score. 1262 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands(); 1263 OpIdx1 != NumOperands1; ++OpIdx1) { 1264 // Try to pair op1I with the best operand of I2. 1265 int MaxTmpScore = 0; 1266 unsigned MaxOpIdx2 = 0; 1267 bool FoundBest = false; 1268 // If I2 is commutative try all combinations. 1269 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1; 1270 unsigned ToIdx = isCommutative(I2) 1271 ? I2->getNumOperands() 1272 : std::min(I2->getNumOperands(), OpIdx1 + 1); 1273 assert(FromIdx <= ToIdx && "Bad index"); 1274 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) { 1275 // Skip operands already paired with OpIdx1. 1276 if (Op2Used.count(OpIdx2)) 1277 continue; 1278 // Recursively calculate the cost at each level 1279 int TmpScore = getScoreAtLevelRec({I1->getOperand(OpIdx1), Lane1}, 1280 {I2->getOperand(OpIdx2), Lane2}, 1281 CurrLevel + 1, MaxLevel); 1282 // Look for the best score. 1283 if (TmpScore > VLOperands::ScoreFail && TmpScore > MaxTmpScore) { 1284 MaxTmpScore = TmpScore; 1285 MaxOpIdx2 = OpIdx2; 1286 FoundBest = true; 1287 } 1288 } 1289 if (FoundBest) { 1290 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it. 1291 Op2Used.insert(MaxOpIdx2); 1292 ShallowScoreAtThisLevel += MaxTmpScore; 1293 } 1294 } 1295 return ShallowScoreAtThisLevel; 1296 } 1297 1298 /// \Returns the look-ahead score, which tells us how much the sub-trees 1299 /// rooted at \p LHS and \p RHS match, the more they match the higher the 1300 /// score. This helps break ties in an informed way when we cannot decide on 1301 /// the order of the operands by just considering the immediate 1302 /// predecessors. 1303 int getLookAheadScore(const std::pair<Value *, int> &LHS, 1304 const std::pair<Value *, int> &RHS) { 1305 InLookAheadValues.clear(); 1306 return getScoreAtLevelRec(LHS, RHS, 1, LookAheadMaxDepth); 1307 } 1308 1309 // Search all operands in Ops[*][Lane] for the one that matches best 1310 // Ops[OpIdx][LastLane] and return its opreand index. 1311 // If no good match can be found, return None. 1312 Optional<unsigned> 1313 getBestOperand(unsigned OpIdx, int Lane, int LastLane, 1314 ArrayRef<ReorderingMode> ReorderingModes) { 1315 unsigned NumOperands = getNumOperands(); 1316 1317 // The operand of the previous lane at OpIdx. 1318 Value *OpLastLane = getData(OpIdx, LastLane).V; 1319 1320 // Our strategy mode for OpIdx. 1321 ReorderingMode RMode = ReorderingModes[OpIdx]; 1322 1323 // The linearized opcode of the operand at OpIdx, Lane. 1324 bool OpIdxAPO = getData(OpIdx, Lane).APO; 1325 1326 // The best operand index and its score. 1327 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we 1328 // are using the score to differentiate between the two. 1329 struct BestOpData { 1330 Optional<unsigned> Idx = None; 1331 unsigned Score = 0; 1332 } BestOp; 1333 1334 // Iterate through all unused operands and look for the best. 1335 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) { 1336 // Get the operand at Idx and Lane. 1337 OperandData &OpData = getData(Idx, Lane); 1338 Value *Op = OpData.V; 1339 bool OpAPO = OpData.APO; 1340 1341 // Skip already selected operands. 1342 if (OpData.IsUsed) 1343 continue; 1344 1345 // Skip if we are trying to move the operand to a position with a 1346 // different opcode in the linearized tree form. This would break the 1347 // semantics. 1348 if (OpAPO != OpIdxAPO) 1349 continue; 1350 1351 // Look for an operand that matches the current mode. 1352 switch (RMode) { 1353 case ReorderingMode::Load: 1354 case ReorderingMode::Constant: 1355 case ReorderingMode::Opcode: { 1356 bool LeftToRight = Lane > LastLane; 1357 Value *OpLeft = (LeftToRight) ? OpLastLane : Op; 1358 Value *OpRight = (LeftToRight) ? Op : OpLastLane; 1359 unsigned Score = 1360 getLookAheadScore({OpLeft, LastLane}, {OpRight, Lane}); 1361 if (Score > BestOp.Score) { 1362 BestOp.Idx = Idx; 1363 BestOp.Score = Score; 1364 } 1365 break; 1366 } 1367 case ReorderingMode::Splat: 1368 if (Op == OpLastLane) 1369 BestOp.Idx = Idx; 1370 break; 1371 case ReorderingMode::Failed: 1372 return None; 1373 } 1374 } 1375 1376 if (BestOp.Idx) { 1377 getData(BestOp.Idx.getValue(), Lane).IsUsed = true; 1378 return BestOp.Idx; 1379 } 1380 // If we could not find a good match return None. 1381 return None; 1382 } 1383 1384 /// Helper for reorderOperandVecs. 1385 /// \returns the lane that we should start reordering from. This is the one 1386 /// which has the least number of operands that can freely move about or 1387 /// less profitable because it already has the most optimal set of operands. 1388 unsigned getBestLaneToStartReordering() const { 1389 unsigned Min = UINT_MAX; 1390 unsigned SameOpNumber = 0; 1391 // std::pair<unsigned, unsigned> is used to implement a simple voting 1392 // algorithm and choose the lane with the least number of operands that 1393 // can freely move about or less profitable because it already has the 1394 // most optimal set of operands. The first unsigned is a counter for 1395 // voting, the second unsigned is the counter of lanes with instructions 1396 // with same/alternate opcodes and same parent basic block. 1397 MapVector<unsigned, std::pair<unsigned, unsigned>> HashMap; 1398 // Try to be closer to the original results, if we have multiple lanes 1399 // with same cost. If 2 lanes have the same cost, use the one with the 1400 // lowest index. 1401 for (int I = getNumLanes(); I > 0; --I) { 1402 unsigned Lane = I - 1; 1403 OperandsOrderData NumFreeOpsHash = 1404 getMaxNumOperandsThatCanBeReordered(Lane); 1405 // Compare the number of operands that can move and choose the one with 1406 // the least number. 1407 if (NumFreeOpsHash.NumOfAPOs < Min) { 1408 Min = NumFreeOpsHash.NumOfAPOs; 1409 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent; 1410 HashMap.clear(); 1411 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1412 } else if (NumFreeOpsHash.NumOfAPOs == Min && 1413 NumFreeOpsHash.NumOpsWithSameOpcodeParent < SameOpNumber) { 1414 // Select the most optimal lane in terms of number of operands that 1415 // should be moved around. 1416 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent; 1417 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1418 } else if (NumFreeOpsHash.NumOfAPOs == Min && 1419 NumFreeOpsHash.NumOpsWithSameOpcodeParent == SameOpNumber) { 1420 ++HashMap[NumFreeOpsHash.Hash].first; 1421 } 1422 } 1423 // Select the lane with the minimum counter. 1424 unsigned BestLane = 0; 1425 unsigned CntMin = UINT_MAX; 1426 for (const auto &Data : reverse(HashMap)) { 1427 if (Data.second.first < CntMin) { 1428 CntMin = Data.second.first; 1429 BestLane = Data.second.second; 1430 } 1431 } 1432 return BestLane; 1433 } 1434 1435 /// Data structure that helps to reorder operands. 1436 struct OperandsOrderData { 1437 /// The best number of operands with the same APOs, which can be 1438 /// reordered. 1439 unsigned NumOfAPOs = UINT_MAX; 1440 /// Number of operands with the same/alternate instruction opcode and 1441 /// parent. 1442 unsigned NumOpsWithSameOpcodeParent = 0; 1443 /// Hash for the actual operands ordering. 1444 /// Used to count operands, actually their position id and opcode 1445 /// value. It is used in the voting mechanism to find the lane with the 1446 /// least number of operands that can freely move about or less profitable 1447 /// because it already has the most optimal set of operands. Can be 1448 /// replaced with SmallVector<unsigned> instead but hash code is faster 1449 /// and requires less memory. 1450 unsigned Hash = 0; 1451 }; 1452 /// \returns the maximum number of operands that are allowed to be reordered 1453 /// for \p Lane and the number of compatible instructions(with the same 1454 /// parent/opcode). This is used as a heuristic for selecting the first lane 1455 /// to start operand reordering. 1456 OperandsOrderData getMaxNumOperandsThatCanBeReordered(unsigned Lane) const { 1457 unsigned CntTrue = 0; 1458 unsigned NumOperands = getNumOperands(); 1459 // Operands with the same APO can be reordered. We therefore need to count 1460 // how many of them we have for each APO, like this: Cnt[APO] = x. 1461 // Since we only have two APOs, namely true and false, we can avoid using 1462 // a map. Instead we can simply count the number of operands that 1463 // correspond to one of them (in this case the 'true' APO), and calculate 1464 // the other by subtracting it from the total number of operands. 1465 // Operands with the same instruction opcode and parent are more 1466 // profitable since we don't need to move them in many cases, with a high 1467 // probability such lane already can be vectorized effectively. 1468 bool AllUndefs = true; 1469 unsigned NumOpsWithSameOpcodeParent = 0; 1470 Instruction *OpcodeI = nullptr; 1471 BasicBlock *Parent = nullptr; 1472 unsigned Hash = 0; 1473 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1474 const OperandData &OpData = getData(OpIdx, Lane); 1475 if (OpData.APO) 1476 ++CntTrue; 1477 // Use Boyer-Moore majority voting for finding the majority opcode and 1478 // the number of times it occurs. 1479 if (auto *I = dyn_cast<Instruction>(OpData.V)) { 1480 if (!OpcodeI || !getSameOpcode({OpcodeI, I}).getOpcode() || 1481 I->getParent() != Parent) { 1482 if (NumOpsWithSameOpcodeParent == 0) { 1483 NumOpsWithSameOpcodeParent = 1; 1484 OpcodeI = I; 1485 Parent = I->getParent(); 1486 } else { 1487 --NumOpsWithSameOpcodeParent; 1488 } 1489 } else { 1490 ++NumOpsWithSameOpcodeParent; 1491 } 1492 } 1493 Hash = hash_combine( 1494 Hash, hash_value((OpIdx + 1) * (OpData.V->getValueID() + 1))); 1495 AllUndefs = AllUndefs && isa<UndefValue>(OpData.V); 1496 } 1497 if (AllUndefs) 1498 return {}; 1499 OperandsOrderData Data; 1500 Data.NumOfAPOs = std::max(CntTrue, NumOperands - CntTrue); 1501 Data.NumOpsWithSameOpcodeParent = NumOpsWithSameOpcodeParent; 1502 Data.Hash = Hash; 1503 return Data; 1504 } 1505 1506 /// Go through the instructions in VL and append their operands. 1507 void appendOperandsOfVL(ArrayRef<Value *> VL) { 1508 assert(!VL.empty() && "Bad VL"); 1509 assert((empty() || VL.size() == getNumLanes()) && 1510 "Expected same number of lanes"); 1511 assert(isa<Instruction>(VL[0]) && "Expected instruction"); 1512 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands(); 1513 OpsVec.resize(NumOperands); 1514 unsigned NumLanes = VL.size(); 1515 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1516 OpsVec[OpIdx].resize(NumLanes); 1517 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1518 assert(isa<Instruction>(VL[Lane]) && "Expected instruction"); 1519 // Our tree has just 3 nodes: the root and two operands. 1520 // It is therefore trivial to get the APO. We only need to check the 1521 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or 1522 // RHS operand. The LHS operand of both add and sub is never attached 1523 // to an inversese operation in the linearized form, therefore its APO 1524 // is false. The RHS is true only if VL[Lane] is an inverse operation. 1525 1526 // Since operand reordering is performed on groups of commutative 1527 // operations or alternating sequences (e.g., +, -), we can safely 1528 // tell the inverse operations by checking commutativity. 1529 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane])); 1530 bool APO = (OpIdx == 0) ? false : IsInverseOperation; 1531 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx), 1532 APO, false}; 1533 } 1534 } 1535 } 1536 1537 /// \returns the number of operands. 1538 unsigned getNumOperands() const { return OpsVec.size(); } 1539 1540 /// \returns the number of lanes. 1541 unsigned getNumLanes() const { return OpsVec[0].size(); } 1542 1543 /// \returns the operand value at \p OpIdx and \p Lane. 1544 Value *getValue(unsigned OpIdx, unsigned Lane) const { 1545 return getData(OpIdx, Lane).V; 1546 } 1547 1548 /// \returns true if the data structure is empty. 1549 bool empty() const { return OpsVec.empty(); } 1550 1551 /// Clears the data. 1552 void clear() { OpsVec.clear(); } 1553 1554 /// \Returns true if there are enough operands identical to \p Op to fill 1555 /// the whole vector. 1556 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow. 1557 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) { 1558 bool OpAPO = getData(OpIdx, Lane).APO; 1559 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) { 1560 if (Ln == Lane) 1561 continue; 1562 // This is set to true if we found a candidate for broadcast at Lane. 1563 bool FoundCandidate = false; 1564 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) { 1565 OperandData &Data = getData(OpI, Ln); 1566 if (Data.APO != OpAPO || Data.IsUsed) 1567 continue; 1568 if (Data.V == Op) { 1569 FoundCandidate = true; 1570 Data.IsUsed = true; 1571 break; 1572 } 1573 } 1574 if (!FoundCandidate) 1575 return false; 1576 } 1577 return true; 1578 } 1579 1580 public: 1581 /// Initialize with all the operands of the instruction vector \p RootVL. 1582 VLOperands(ArrayRef<Value *> RootVL, const DataLayout &DL, 1583 ScalarEvolution &SE, const BoUpSLP &R) 1584 : DL(DL), SE(SE), R(R) { 1585 // Append all the operands of RootVL. 1586 appendOperandsOfVL(RootVL); 1587 } 1588 1589 /// \Returns a value vector with the operands across all lanes for the 1590 /// opearnd at \p OpIdx. 1591 ValueList getVL(unsigned OpIdx) const { 1592 ValueList OpVL(OpsVec[OpIdx].size()); 1593 assert(OpsVec[OpIdx].size() == getNumLanes() && 1594 "Expected same num of lanes across all operands"); 1595 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane) 1596 OpVL[Lane] = OpsVec[OpIdx][Lane].V; 1597 return OpVL; 1598 } 1599 1600 // Performs operand reordering for 2 or more operands. 1601 // The original operands are in OrigOps[OpIdx][Lane]. 1602 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'. 1603 void reorder() { 1604 unsigned NumOperands = getNumOperands(); 1605 unsigned NumLanes = getNumLanes(); 1606 // Each operand has its own mode. We are using this mode to help us select 1607 // the instructions for each lane, so that they match best with the ones 1608 // we have selected so far. 1609 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands); 1610 1611 // This is a greedy single-pass algorithm. We are going over each lane 1612 // once and deciding on the best order right away with no back-tracking. 1613 // However, in order to increase its effectiveness, we start with the lane 1614 // that has operands that can move the least. For example, given the 1615 // following lanes: 1616 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd 1617 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st 1618 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd 1619 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th 1620 // we will start at Lane 1, since the operands of the subtraction cannot 1621 // be reordered. Then we will visit the rest of the lanes in a circular 1622 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3. 1623 1624 // Find the first lane that we will start our search from. 1625 unsigned FirstLane = getBestLaneToStartReordering(); 1626 1627 // Initialize the modes. 1628 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1629 Value *OpLane0 = getValue(OpIdx, FirstLane); 1630 // Keep track if we have instructions with all the same opcode on one 1631 // side. 1632 if (isa<LoadInst>(OpLane0)) 1633 ReorderingModes[OpIdx] = ReorderingMode::Load; 1634 else if (isa<Instruction>(OpLane0)) { 1635 // Check if OpLane0 should be broadcast. 1636 if (shouldBroadcast(OpLane0, OpIdx, FirstLane)) 1637 ReorderingModes[OpIdx] = ReorderingMode::Splat; 1638 else 1639 ReorderingModes[OpIdx] = ReorderingMode::Opcode; 1640 } 1641 else if (isa<Constant>(OpLane0)) 1642 ReorderingModes[OpIdx] = ReorderingMode::Constant; 1643 else if (isa<Argument>(OpLane0)) 1644 // Our best hope is a Splat. It may save some cost in some cases. 1645 ReorderingModes[OpIdx] = ReorderingMode::Splat; 1646 else 1647 // NOTE: This should be unreachable. 1648 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1649 } 1650 1651 // Check that we don't have same operands. No need to reorder if operands 1652 // are just perfect diamond or shuffled diamond match. Do not do it only 1653 // for possible broadcasts or non-power of 2 number of scalars (just for 1654 // now). 1655 auto &&SkipReordering = [this]() { 1656 SmallPtrSet<Value *, 4> UniqueValues; 1657 ArrayRef<OperandData> Op0 = OpsVec.front(); 1658 for (const OperandData &Data : Op0) 1659 UniqueValues.insert(Data.V); 1660 for (ArrayRef<OperandData> Op : drop_begin(OpsVec, 1)) { 1661 if (any_of(Op, [&UniqueValues](const OperandData &Data) { 1662 return !UniqueValues.contains(Data.V); 1663 })) 1664 return false; 1665 } 1666 // TODO: Check if we can remove a check for non-power-2 number of 1667 // scalars after full support of non-power-2 vectorization. 1668 return UniqueValues.size() != 2 && isPowerOf2_32(UniqueValues.size()); 1669 }; 1670 1671 // If the initial strategy fails for any of the operand indexes, then we 1672 // perform reordering again in a second pass. This helps avoid assigning 1673 // high priority to the failed strategy, and should improve reordering for 1674 // the non-failed operand indexes. 1675 for (int Pass = 0; Pass != 2; ++Pass) { 1676 // Check if no need to reorder operands since they're are perfect or 1677 // shuffled diamond match. 1678 // Need to to do it to avoid extra external use cost counting for 1679 // shuffled matches, which may cause regressions. 1680 if (SkipReordering()) 1681 break; 1682 // Skip the second pass if the first pass did not fail. 1683 bool StrategyFailed = false; 1684 // Mark all operand data as free to use. 1685 clearUsed(); 1686 // We keep the original operand order for the FirstLane, so reorder the 1687 // rest of the lanes. We are visiting the nodes in a circular fashion, 1688 // using FirstLane as the center point and increasing the radius 1689 // distance. 1690 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) { 1691 // Visit the lane on the right and then the lane on the left. 1692 for (int Direction : {+1, -1}) { 1693 int Lane = FirstLane + Direction * Distance; 1694 if (Lane < 0 || Lane >= (int)NumLanes) 1695 continue; 1696 int LastLane = Lane - Direction; 1697 assert(LastLane >= 0 && LastLane < (int)NumLanes && 1698 "Out of bounds"); 1699 // Look for a good match for each operand. 1700 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1701 // Search for the operand that matches SortedOps[OpIdx][Lane-1]. 1702 Optional<unsigned> BestIdx = 1703 getBestOperand(OpIdx, Lane, LastLane, ReorderingModes); 1704 // By not selecting a value, we allow the operands that follow to 1705 // select a better matching value. We will get a non-null value in 1706 // the next run of getBestOperand(). 1707 if (BestIdx) { 1708 // Swap the current operand with the one returned by 1709 // getBestOperand(). 1710 swap(OpIdx, BestIdx.getValue(), Lane); 1711 } else { 1712 // We failed to find a best operand, set mode to 'Failed'. 1713 ReorderingModes[OpIdx] = ReorderingMode::Failed; 1714 // Enable the second pass. 1715 StrategyFailed = true; 1716 } 1717 } 1718 } 1719 } 1720 // Skip second pass if the strategy did not fail. 1721 if (!StrategyFailed) 1722 break; 1723 } 1724 } 1725 1726 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1727 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) { 1728 switch (RMode) { 1729 case ReorderingMode::Load: 1730 return "Load"; 1731 case ReorderingMode::Opcode: 1732 return "Opcode"; 1733 case ReorderingMode::Constant: 1734 return "Constant"; 1735 case ReorderingMode::Splat: 1736 return "Splat"; 1737 case ReorderingMode::Failed: 1738 return "Failed"; 1739 } 1740 llvm_unreachable("Unimplemented Reordering Type"); 1741 } 1742 1743 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode, 1744 raw_ostream &OS) { 1745 return OS << getModeStr(RMode); 1746 } 1747 1748 /// Debug print. 1749 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) { 1750 printMode(RMode, dbgs()); 1751 } 1752 1753 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) { 1754 return printMode(RMode, OS); 1755 } 1756 1757 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const { 1758 const unsigned Indent = 2; 1759 unsigned Cnt = 0; 1760 for (const OperandDataVec &OpDataVec : OpsVec) { 1761 OS << "Operand " << Cnt++ << "\n"; 1762 for (const OperandData &OpData : OpDataVec) { 1763 OS.indent(Indent) << "{"; 1764 if (Value *V = OpData.V) 1765 OS << *V; 1766 else 1767 OS << "null"; 1768 OS << ", APO:" << OpData.APO << "}\n"; 1769 } 1770 OS << "\n"; 1771 } 1772 return OS; 1773 } 1774 1775 /// Debug print. 1776 LLVM_DUMP_METHOD void dump() const { print(dbgs()); } 1777 #endif 1778 }; 1779 1780 /// Checks if the instruction is marked for deletion. 1781 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); } 1782 1783 /// Marks values operands for later deletion by replacing them with Undefs. 1784 void eraseInstructions(ArrayRef<Value *> AV); 1785 1786 ~BoUpSLP(); 1787 1788 private: 1789 /// Checks if all users of \p I are the part of the vectorization tree. 1790 bool areAllUsersVectorized(Instruction *I, 1791 ArrayRef<Value *> VectorizedVals) const; 1792 1793 /// \returns the cost of the vectorizable entry. 1794 InstructionCost getEntryCost(const TreeEntry *E, 1795 ArrayRef<Value *> VectorizedVals); 1796 1797 /// This is the recursive part of buildTree. 1798 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, 1799 const EdgeInfo &EI); 1800 1801 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can 1802 /// be vectorized to use the original vector (or aggregate "bitcast" to a 1803 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise 1804 /// returns false, setting \p CurrentOrder to either an empty vector or a 1805 /// non-identity permutation that allows to reuse extract instructions. 1806 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 1807 SmallVectorImpl<unsigned> &CurrentOrder) const; 1808 1809 /// Vectorize a single entry in the tree. 1810 Value *vectorizeTree(TreeEntry *E); 1811 1812 /// Vectorize a single entry in the tree, starting in \p VL. 1813 Value *vectorizeTree(ArrayRef<Value *> VL); 1814 1815 /// \returns the scalarization cost for this type. Scalarization in this 1816 /// context means the creation of vectors from a group of scalars. If \p 1817 /// NeedToShuffle is true, need to add a cost of reshuffling some of the 1818 /// vector elements. 1819 InstructionCost getGatherCost(FixedVectorType *Ty, 1820 const DenseSet<unsigned> &ShuffledIndices, 1821 bool NeedToShuffle) const; 1822 1823 /// Checks if the gathered \p VL can be represented as shuffle(s) of previous 1824 /// tree entries. 1825 /// \returns ShuffleKind, if gathered values can be represented as shuffles of 1826 /// previous tree entries. \p Mask is filled with the shuffle mask. 1827 Optional<TargetTransformInfo::ShuffleKind> 1828 isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask, 1829 SmallVectorImpl<const TreeEntry *> &Entries); 1830 1831 /// \returns the scalarization cost for this list of values. Assuming that 1832 /// this subtree gets vectorized, we may need to extract the values from the 1833 /// roots. This method calculates the cost of extracting the values. 1834 InstructionCost getGatherCost(ArrayRef<Value *> VL) const; 1835 1836 /// Set the Builder insert point to one after the last instruction in 1837 /// the bundle 1838 void setInsertPointAfterBundle(const TreeEntry *E); 1839 1840 /// \returns a vector from a collection of scalars in \p VL. 1841 Value *gather(ArrayRef<Value *> VL); 1842 1843 /// \returns whether the VectorizableTree is fully vectorizable and will 1844 /// be beneficial even the tree height is tiny. 1845 bool isFullyVectorizableTinyTree(bool ForReduction) const; 1846 1847 /// Reorder commutative or alt operands to get better probability of 1848 /// generating vectorized code. 1849 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 1850 SmallVectorImpl<Value *> &Left, 1851 SmallVectorImpl<Value *> &Right, 1852 const DataLayout &DL, 1853 ScalarEvolution &SE, 1854 const BoUpSLP &R); 1855 struct TreeEntry { 1856 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>; 1857 TreeEntry(VecTreeTy &Container) : Container(Container) {} 1858 1859 /// \returns true if the scalars in VL are equal to this entry. 1860 bool isSame(ArrayRef<Value *> VL) const { 1861 auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) { 1862 if (Mask.size() != VL.size() && VL.size() == Scalars.size()) 1863 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 1864 return VL.size() == Mask.size() && 1865 std::equal(VL.begin(), VL.end(), Mask.begin(), 1866 [Scalars](Value *V, int Idx) { 1867 return (isa<UndefValue>(V) && 1868 Idx == UndefMaskElem) || 1869 (Idx != UndefMaskElem && V == Scalars[Idx]); 1870 }); 1871 }; 1872 if (!ReorderIndices.empty()) { 1873 // TODO: implement matching if the nodes are just reordered, still can 1874 // treat the vector as the same if the list of scalars matches VL 1875 // directly, without reordering. 1876 SmallVector<int> Mask; 1877 inversePermutation(ReorderIndices, Mask); 1878 if (VL.size() == Scalars.size()) 1879 return IsSame(Scalars, Mask); 1880 if (VL.size() == ReuseShuffleIndices.size()) { 1881 ::addMask(Mask, ReuseShuffleIndices); 1882 return IsSame(Scalars, Mask); 1883 } 1884 return false; 1885 } 1886 return IsSame(Scalars, ReuseShuffleIndices); 1887 } 1888 1889 /// \returns true if current entry has same operands as \p TE. 1890 bool hasEqualOperands(const TreeEntry &TE) const { 1891 if (TE.getNumOperands() != getNumOperands()) 1892 return false; 1893 SmallBitVector Used(getNumOperands()); 1894 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) { 1895 unsigned PrevCount = Used.count(); 1896 for (unsigned K = 0; K < E; ++K) { 1897 if (Used.test(K)) 1898 continue; 1899 if (getOperand(K) == TE.getOperand(I)) { 1900 Used.set(K); 1901 break; 1902 } 1903 } 1904 // Check if we actually found the matching operand. 1905 if (PrevCount == Used.count()) 1906 return false; 1907 } 1908 return true; 1909 } 1910 1911 /// \return Final vectorization factor for the node. Defined by the total 1912 /// number of vectorized scalars, including those, used several times in the 1913 /// entry and counted in the \a ReuseShuffleIndices, if any. 1914 unsigned getVectorFactor() const { 1915 if (!ReuseShuffleIndices.empty()) 1916 return ReuseShuffleIndices.size(); 1917 return Scalars.size(); 1918 }; 1919 1920 /// A vector of scalars. 1921 ValueList Scalars; 1922 1923 /// The Scalars are vectorized into this value. It is initialized to Null. 1924 Value *VectorizedValue = nullptr; 1925 1926 /// Do we need to gather this sequence or vectorize it 1927 /// (either with vector instruction or with scatter/gather 1928 /// intrinsics for store/load)? 1929 enum EntryState { Vectorize, ScatterVectorize, NeedToGather }; 1930 EntryState State; 1931 1932 /// Does this sequence require some shuffling? 1933 SmallVector<int, 4> ReuseShuffleIndices; 1934 1935 /// Does this entry require reordering? 1936 SmallVector<unsigned, 4> ReorderIndices; 1937 1938 /// Points back to the VectorizableTree. 1939 /// 1940 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 1941 /// to be a pointer and needs to be able to initialize the child iterator. 1942 /// Thus we need a reference back to the container to translate the indices 1943 /// to entries. 1944 VecTreeTy &Container; 1945 1946 /// The TreeEntry index containing the user of this entry. We can actually 1947 /// have multiple users so the data structure is not truly a tree. 1948 SmallVector<EdgeInfo, 1> UserTreeIndices; 1949 1950 /// The index of this treeEntry in VectorizableTree. 1951 int Idx = -1; 1952 1953 private: 1954 /// The operands of each instruction in each lane Operands[op_index][lane]. 1955 /// Note: This helps avoid the replication of the code that performs the 1956 /// reordering of operands during buildTree_rec() and vectorizeTree(). 1957 SmallVector<ValueList, 2> Operands; 1958 1959 /// The main/alternate instruction. 1960 Instruction *MainOp = nullptr; 1961 Instruction *AltOp = nullptr; 1962 1963 public: 1964 /// Set this bundle's \p OpIdx'th operand to \p OpVL. 1965 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) { 1966 if (Operands.size() < OpIdx + 1) 1967 Operands.resize(OpIdx + 1); 1968 assert(Operands[OpIdx].empty() && "Already resized?"); 1969 assert(OpVL.size() <= Scalars.size() && 1970 "Number of operands is greater than the number of scalars."); 1971 Operands[OpIdx].resize(OpVL.size()); 1972 copy(OpVL, Operands[OpIdx].begin()); 1973 } 1974 1975 /// Set the operands of this bundle in their original order. 1976 void setOperandsInOrder() { 1977 assert(Operands.empty() && "Already initialized?"); 1978 auto *I0 = cast<Instruction>(Scalars[0]); 1979 Operands.resize(I0->getNumOperands()); 1980 unsigned NumLanes = Scalars.size(); 1981 for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands(); 1982 OpIdx != NumOperands; ++OpIdx) { 1983 Operands[OpIdx].resize(NumLanes); 1984 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1985 auto *I = cast<Instruction>(Scalars[Lane]); 1986 assert(I->getNumOperands() == NumOperands && 1987 "Expected same number of operands"); 1988 Operands[OpIdx][Lane] = I->getOperand(OpIdx); 1989 } 1990 } 1991 } 1992 1993 /// Reorders operands of the node to the given mask \p Mask. 1994 void reorderOperands(ArrayRef<int> Mask) { 1995 for (ValueList &Operand : Operands) 1996 reorderScalars(Operand, Mask); 1997 } 1998 1999 /// \returns the \p OpIdx operand of this TreeEntry. 2000 ValueList &getOperand(unsigned OpIdx) { 2001 assert(OpIdx < Operands.size() && "Off bounds"); 2002 return Operands[OpIdx]; 2003 } 2004 2005 /// \returns the \p OpIdx operand of this TreeEntry. 2006 ArrayRef<Value *> getOperand(unsigned OpIdx) const { 2007 assert(OpIdx < Operands.size() && "Off bounds"); 2008 return Operands[OpIdx]; 2009 } 2010 2011 /// \returns the number of operands. 2012 unsigned getNumOperands() const { return Operands.size(); } 2013 2014 /// \return the single \p OpIdx operand. 2015 Value *getSingleOperand(unsigned OpIdx) const { 2016 assert(OpIdx < Operands.size() && "Off bounds"); 2017 assert(!Operands[OpIdx].empty() && "No operand available"); 2018 return Operands[OpIdx][0]; 2019 } 2020 2021 /// Some of the instructions in the list have alternate opcodes. 2022 bool isAltShuffle() const { return MainOp != AltOp; } 2023 2024 bool isOpcodeOrAlt(Instruction *I) const { 2025 unsigned CheckedOpcode = I->getOpcode(); 2026 return (getOpcode() == CheckedOpcode || 2027 getAltOpcode() == CheckedOpcode); 2028 } 2029 2030 /// Chooses the correct key for scheduling data. If \p Op has the same (or 2031 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is 2032 /// \p OpValue. 2033 Value *isOneOf(Value *Op) const { 2034 auto *I = dyn_cast<Instruction>(Op); 2035 if (I && isOpcodeOrAlt(I)) 2036 return Op; 2037 return MainOp; 2038 } 2039 2040 void setOperations(const InstructionsState &S) { 2041 MainOp = S.MainOp; 2042 AltOp = S.AltOp; 2043 } 2044 2045 Instruction *getMainOp() const { 2046 return MainOp; 2047 } 2048 2049 Instruction *getAltOp() const { 2050 return AltOp; 2051 } 2052 2053 /// The main/alternate opcodes for the list of instructions. 2054 unsigned getOpcode() const { 2055 return MainOp ? MainOp->getOpcode() : 0; 2056 } 2057 2058 unsigned getAltOpcode() const { 2059 return AltOp ? AltOp->getOpcode() : 0; 2060 } 2061 2062 /// When ReuseReorderShuffleIndices is empty it just returns position of \p 2063 /// V within vector of Scalars. Otherwise, try to remap on its reuse index. 2064 int findLaneForValue(Value *V) const { 2065 unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V)); 2066 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 2067 if (!ReorderIndices.empty()) 2068 FoundLane = ReorderIndices[FoundLane]; 2069 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 2070 if (!ReuseShuffleIndices.empty()) { 2071 FoundLane = std::distance(ReuseShuffleIndices.begin(), 2072 find(ReuseShuffleIndices, FoundLane)); 2073 } 2074 return FoundLane; 2075 } 2076 2077 #ifndef NDEBUG 2078 /// Debug printer. 2079 LLVM_DUMP_METHOD void dump() const { 2080 dbgs() << Idx << ".\n"; 2081 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) { 2082 dbgs() << "Operand " << OpI << ":\n"; 2083 for (const Value *V : Operands[OpI]) 2084 dbgs().indent(2) << *V << "\n"; 2085 } 2086 dbgs() << "Scalars: \n"; 2087 for (Value *V : Scalars) 2088 dbgs().indent(2) << *V << "\n"; 2089 dbgs() << "State: "; 2090 switch (State) { 2091 case Vectorize: 2092 dbgs() << "Vectorize\n"; 2093 break; 2094 case ScatterVectorize: 2095 dbgs() << "ScatterVectorize\n"; 2096 break; 2097 case NeedToGather: 2098 dbgs() << "NeedToGather\n"; 2099 break; 2100 } 2101 dbgs() << "MainOp: "; 2102 if (MainOp) 2103 dbgs() << *MainOp << "\n"; 2104 else 2105 dbgs() << "NULL\n"; 2106 dbgs() << "AltOp: "; 2107 if (AltOp) 2108 dbgs() << *AltOp << "\n"; 2109 else 2110 dbgs() << "NULL\n"; 2111 dbgs() << "VectorizedValue: "; 2112 if (VectorizedValue) 2113 dbgs() << *VectorizedValue << "\n"; 2114 else 2115 dbgs() << "NULL\n"; 2116 dbgs() << "ReuseShuffleIndices: "; 2117 if (ReuseShuffleIndices.empty()) 2118 dbgs() << "Empty"; 2119 else 2120 for (int ReuseIdx : ReuseShuffleIndices) 2121 dbgs() << ReuseIdx << ", "; 2122 dbgs() << "\n"; 2123 dbgs() << "ReorderIndices: "; 2124 for (unsigned ReorderIdx : ReorderIndices) 2125 dbgs() << ReorderIdx << ", "; 2126 dbgs() << "\n"; 2127 dbgs() << "UserTreeIndices: "; 2128 for (const auto &EInfo : UserTreeIndices) 2129 dbgs() << EInfo << ", "; 2130 dbgs() << "\n"; 2131 } 2132 #endif 2133 }; 2134 2135 #ifndef NDEBUG 2136 void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost, 2137 InstructionCost VecCost, 2138 InstructionCost ScalarCost) const { 2139 dbgs() << "SLP: Calculated costs for Tree:\n"; E->dump(); 2140 dbgs() << "SLP: Costs:\n"; 2141 dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n"; 2142 dbgs() << "SLP: VectorCost = " << VecCost << "\n"; 2143 dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n"; 2144 dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = " << 2145 ReuseShuffleCost + VecCost - ScalarCost << "\n"; 2146 } 2147 #endif 2148 2149 /// Create a new VectorizableTree entry. 2150 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, Optional<ScheduleData *> Bundle, 2151 const InstructionsState &S, 2152 const EdgeInfo &UserTreeIdx, 2153 ArrayRef<int> ReuseShuffleIndices = None, 2154 ArrayRef<unsigned> ReorderIndices = None) { 2155 TreeEntry::EntryState EntryState = 2156 Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather; 2157 return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx, 2158 ReuseShuffleIndices, ReorderIndices); 2159 } 2160 2161 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, 2162 TreeEntry::EntryState EntryState, 2163 Optional<ScheduleData *> Bundle, 2164 const InstructionsState &S, 2165 const EdgeInfo &UserTreeIdx, 2166 ArrayRef<int> ReuseShuffleIndices = None, 2167 ArrayRef<unsigned> ReorderIndices = None) { 2168 assert(((!Bundle && EntryState == TreeEntry::NeedToGather) || 2169 (Bundle && EntryState != TreeEntry::NeedToGather)) && 2170 "Need to vectorize gather entry?"); 2171 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree)); 2172 TreeEntry *Last = VectorizableTree.back().get(); 2173 Last->Idx = VectorizableTree.size() - 1; 2174 Last->State = EntryState; 2175 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(), 2176 ReuseShuffleIndices.end()); 2177 if (ReorderIndices.empty()) { 2178 Last->Scalars.assign(VL.begin(), VL.end()); 2179 Last->setOperations(S); 2180 } else { 2181 // Reorder scalars and build final mask. 2182 Last->Scalars.assign(VL.size(), nullptr); 2183 transform(ReorderIndices, Last->Scalars.begin(), 2184 [VL](unsigned Idx) -> Value * { 2185 if (Idx >= VL.size()) 2186 return UndefValue::get(VL.front()->getType()); 2187 return VL[Idx]; 2188 }); 2189 InstructionsState S = getSameOpcode(Last->Scalars); 2190 Last->setOperations(S); 2191 Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end()); 2192 } 2193 if (Last->State != TreeEntry::NeedToGather) { 2194 for (Value *V : VL) { 2195 assert(!getTreeEntry(V) && "Scalar already in tree!"); 2196 ScalarToTreeEntry[V] = Last; 2197 } 2198 // Update the scheduler bundle to point to this TreeEntry. 2199 unsigned Lane = 0; 2200 for (ScheduleData *BundleMember = Bundle.getValue(); BundleMember; 2201 BundleMember = BundleMember->NextInBundle) { 2202 BundleMember->TE = Last; 2203 BundleMember->Lane = Lane; 2204 ++Lane; 2205 } 2206 assert((!Bundle.getValue() || Lane == VL.size()) && 2207 "Bundle and VL out of sync"); 2208 } else { 2209 MustGather.insert(VL.begin(), VL.end()); 2210 } 2211 2212 if (UserTreeIdx.UserTE) 2213 Last->UserTreeIndices.push_back(UserTreeIdx); 2214 2215 return Last; 2216 } 2217 2218 /// -- Vectorization State -- 2219 /// Holds all of the tree entries. 2220 TreeEntry::VecTreeTy VectorizableTree; 2221 2222 #ifndef NDEBUG 2223 /// Debug printer. 2224 LLVM_DUMP_METHOD void dumpVectorizableTree() const { 2225 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) { 2226 VectorizableTree[Id]->dump(); 2227 dbgs() << "\n"; 2228 } 2229 } 2230 #endif 2231 2232 TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); } 2233 2234 const TreeEntry *getTreeEntry(Value *V) const { 2235 return ScalarToTreeEntry.lookup(V); 2236 } 2237 2238 /// Maps a specific scalar to its tree entry. 2239 SmallDenseMap<Value*, TreeEntry *> ScalarToTreeEntry; 2240 2241 /// Maps a value to the proposed vectorizable size. 2242 SmallDenseMap<Value *, unsigned> InstrElementSize; 2243 2244 /// A list of scalars that we found that we need to keep as scalars. 2245 ValueSet MustGather; 2246 2247 /// This POD struct describes one external user in the vectorized tree. 2248 struct ExternalUser { 2249 ExternalUser(Value *S, llvm::User *U, int L) 2250 : Scalar(S), User(U), Lane(L) {} 2251 2252 // Which scalar in our function. 2253 Value *Scalar; 2254 2255 // Which user that uses the scalar. 2256 llvm::User *User; 2257 2258 // Which lane does the scalar belong to. 2259 int Lane; 2260 }; 2261 using UserList = SmallVector<ExternalUser, 16>; 2262 2263 /// Checks if two instructions may access the same memory. 2264 /// 2265 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 2266 /// is invariant in the calling loop. 2267 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 2268 Instruction *Inst2) { 2269 // First check if the result is already in the cache. 2270 AliasCacheKey key = std::make_pair(Inst1, Inst2); 2271 Optional<bool> &result = AliasCache[key]; 2272 if (result.hasValue()) { 2273 return result.getValue(); 2274 } 2275 bool aliased = true; 2276 if (Loc1.Ptr && isSimple(Inst1)) 2277 aliased = isModOrRefSet(AA->getModRefInfo(Inst2, Loc1)); 2278 // Store the result in the cache. 2279 result = aliased; 2280 return aliased; 2281 } 2282 2283 using AliasCacheKey = std::pair<Instruction *, Instruction *>; 2284 2285 /// Cache for alias results. 2286 /// TODO: consider moving this to the AliasAnalysis itself. 2287 DenseMap<AliasCacheKey, Optional<bool>> AliasCache; 2288 2289 /// Removes an instruction from its block and eventually deletes it. 2290 /// It's like Instruction::eraseFromParent() except that the actual deletion 2291 /// is delayed until BoUpSLP is destructed. 2292 /// This is required to ensure that there are no incorrect collisions in the 2293 /// AliasCache, which can happen if a new instruction is allocated at the 2294 /// same address as a previously deleted instruction. 2295 void eraseInstruction(Instruction *I, bool ReplaceOpsWithUndef = false) { 2296 auto It = DeletedInstructions.try_emplace(I, ReplaceOpsWithUndef).first; 2297 It->getSecond() = It->getSecond() && ReplaceOpsWithUndef; 2298 } 2299 2300 /// Temporary store for deleted instructions. Instructions will be deleted 2301 /// eventually when the BoUpSLP is destructed. 2302 DenseMap<Instruction *, bool> DeletedInstructions; 2303 2304 /// A list of values that need to extracted out of the tree. 2305 /// This list holds pairs of (Internal Scalar : External User). External User 2306 /// can be nullptr, it means that this Internal Scalar will be used later, 2307 /// after vectorization. 2308 UserList ExternalUses; 2309 2310 /// Values used only by @llvm.assume calls. 2311 SmallPtrSet<const Value *, 32> EphValues; 2312 2313 /// Holds all of the instructions that we gathered. 2314 SetVector<Instruction *> GatherShuffleSeq; 2315 2316 /// A list of blocks that we are going to CSE. 2317 SetVector<BasicBlock *> CSEBlocks; 2318 2319 /// Contains all scheduling relevant data for an instruction. 2320 /// A ScheduleData either represents a single instruction or a member of an 2321 /// instruction bundle (= a group of instructions which is combined into a 2322 /// vector instruction). 2323 struct ScheduleData { 2324 // The initial value for the dependency counters. It means that the 2325 // dependencies are not calculated yet. 2326 enum { InvalidDeps = -1 }; 2327 2328 ScheduleData() = default; 2329 2330 void init(int BlockSchedulingRegionID, Value *OpVal) { 2331 FirstInBundle = this; 2332 NextInBundle = nullptr; 2333 NextLoadStore = nullptr; 2334 IsScheduled = false; 2335 SchedulingRegionID = BlockSchedulingRegionID; 2336 UnscheduledDepsInBundle = UnscheduledDeps; 2337 clearDependencies(); 2338 OpValue = OpVal; 2339 TE = nullptr; 2340 Lane = -1; 2341 } 2342 2343 /// Returns true if the dependency information has been calculated. 2344 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 2345 2346 /// Returns true for single instructions and for bundle representatives 2347 /// (= the head of a bundle). 2348 bool isSchedulingEntity() const { return FirstInBundle == this; } 2349 2350 /// Returns true if it represents an instruction bundle and not only a 2351 /// single instruction. 2352 bool isPartOfBundle() const { 2353 return NextInBundle != nullptr || FirstInBundle != this; 2354 } 2355 2356 /// Returns true if it is ready for scheduling, i.e. it has no more 2357 /// unscheduled depending instructions/bundles. 2358 bool isReady() const { 2359 assert(isSchedulingEntity() && 2360 "can't consider non-scheduling entity for ready list"); 2361 return UnscheduledDepsInBundle == 0 && !IsScheduled; 2362 } 2363 2364 /// Modifies the number of unscheduled dependencies, also updating it for 2365 /// the whole bundle. 2366 int incrementUnscheduledDeps(int Incr) { 2367 UnscheduledDeps += Incr; 2368 return FirstInBundle->UnscheduledDepsInBundle += Incr; 2369 } 2370 2371 /// Sets the number of unscheduled dependencies to the number of 2372 /// dependencies. 2373 void resetUnscheduledDeps() { 2374 incrementUnscheduledDeps(Dependencies - UnscheduledDeps); 2375 } 2376 2377 /// Clears all dependency information. 2378 void clearDependencies() { 2379 Dependencies = InvalidDeps; 2380 resetUnscheduledDeps(); 2381 MemoryDependencies.clear(); 2382 } 2383 2384 void dump(raw_ostream &os) const { 2385 if (!isSchedulingEntity()) { 2386 os << "/ " << *Inst; 2387 } else if (NextInBundle) { 2388 os << '[' << *Inst; 2389 ScheduleData *SD = NextInBundle; 2390 while (SD) { 2391 os << ';' << *SD->Inst; 2392 SD = SD->NextInBundle; 2393 } 2394 os << ']'; 2395 } else { 2396 os << *Inst; 2397 } 2398 } 2399 2400 Instruction *Inst = nullptr; 2401 2402 /// Points to the head in an instruction bundle (and always to this for 2403 /// single instructions). 2404 ScheduleData *FirstInBundle = nullptr; 2405 2406 /// Single linked list of all instructions in a bundle. Null if it is a 2407 /// single instruction. 2408 ScheduleData *NextInBundle = nullptr; 2409 2410 /// Single linked list of all memory instructions (e.g. load, store, call) 2411 /// in the block - until the end of the scheduling region. 2412 ScheduleData *NextLoadStore = nullptr; 2413 2414 /// The dependent memory instructions. 2415 /// This list is derived on demand in calculateDependencies(). 2416 SmallVector<ScheduleData *, 4> MemoryDependencies; 2417 2418 /// This ScheduleData is in the current scheduling region if this matches 2419 /// the current SchedulingRegionID of BlockScheduling. 2420 int SchedulingRegionID = 0; 2421 2422 /// Used for getting a "good" final ordering of instructions. 2423 int SchedulingPriority = 0; 2424 2425 /// The number of dependencies. Constitutes of the number of users of the 2426 /// instruction plus the number of dependent memory instructions (if any). 2427 /// This value is calculated on demand. 2428 /// If InvalidDeps, the number of dependencies is not calculated yet. 2429 int Dependencies = InvalidDeps; 2430 2431 /// The number of dependencies minus the number of dependencies of scheduled 2432 /// instructions. As soon as this is zero, the instruction/bundle gets ready 2433 /// for scheduling. 2434 /// Note that this is negative as long as Dependencies is not calculated. 2435 int UnscheduledDeps = InvalidDeps; 2436 2437 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for 2438 /// single instructions. 2439 int UnscheduledDepsInBundle = InvalidDeps; 2440 2441 /// True if this instruction is scheduled (or considered as scheduled in the 2442 /// dry-run). 2443 bool IsScheduled = false; 2444 2445 /// Opcode of the current instruction in the schedule data. 2446 Value *OpValue = nullptr; 2447 2448 /// The TreeEntry that this instruction corresponds to. 2449 TreeEntry *TE = nullptr; 2450 2451 /// The lane of this node in the TreeEntry. 2452 int Lane = -1; 2453 }; 2454 2455 #ifndef NDEBUG 2456 friend inline raw_ostream &operator<<(raw_ostream &os, 2457 const BoUpSLP::ScheduleData &SD) { 2458 SD.dump(os); 2459 return os; 2460 } 2461 #endif 2462 2463 friend struct GraphTraits<BoUpSLP *>; 2464 friend struct DOTGraphTraits<BoUpSLP *>; 2465 2466 /// Contains all scheduling data for a basic block. 2467 struct BlockScheduling { 2468 BlockScheduling(BasicBlock *BB) 2469 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} 2470 2471 void clear() { 2472 ReadyInsts.clear(); 2473 ScheduleStart = nullptr; 2474 ScheduleEnd = nullptr; 2475 FirstLoadStoreInRegion = nullptr; 2476 LastLoadStoreInRegion = nullptr; 2477 2478 // Reduce the maximum schedule region size by the size of the 2479 // previous scheduling run. 2480 ScheduleRegionSizeLimit -= ScheduleRegionSize; 2481 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 2482 ScheduleRegionSizeLimit = MinScheduleRegionSize; 2483 ScheduleRegionSize = 0; 2484 2485 // Make a new scheduling region, i.e. all existing ScheduleData is not 2486 // in the new region yet. 2487 ++SchedulingRegionID; 2488 } 2489 2490 ScheduleData *getScheduleData(Value *V) { 2491 ScheduleData *SD = ScheduleDataMap[V]; 2492 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 2493 return SD; 2494 return nullptr; 2495 } 2496 2497 ScheduleData *getScheduleData(Value *V, Value *Key) { 2498 if (V == Key) 2499 return getScheduleData(V); 2500 auto I = ExtraScheduleDataMap.find(V); 2501 if (I != ExtraScheduleDataMap.end()) { 2502 ScheduleData *SD = I->second[Key]; 2503 if (SD && SD->SchedulingRegionID == SchedulingRegionID) 2504 return SD; 2505 } 2506 return nullptr; 2507 } 2508 2509 bool isInSchedulingRegion(ScheduleData *SD) const { 2510 return SD->SchedulingRegionID == SchedulingRegionID; 2511 } 2512 2513 /// Marks an instruction as scheduled and puts all dependent ready 2514 /// instructions into the ready-list. 2515 template <typename ReadyListType> 2516 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 2517 SD->IsScheduled = true; 2518 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 2519 2520 ScheduleData *BundleMember = SD; 2521 while (BundleMember) { 2522 if (BundleMember->Inst != BundleMember->OpValue) { 2523 BundleMember = BundleMember->NextInBundle; 2524 continue; 2525 } 2526 // Handle the def-use chain dependencies. 2527 2528 // Decrement the unscheduled counter and insert to ready list if ready. 2529 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) { 2530 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 2531 if (OpDef && OpDef->hasValidDependencies() && 2532 OpDef->incrementUnscheduledDeps(-1) == 0) { 2533 // There are no more unscheduled dependencies after 2534 // decrementing, so we can put the dependent instruction 2535 // into the ready list. 2536 ScheduleData *DepBundle = OpDef->FirstInBundle; 2537 assert(!DepBundle->IsScheduled && 2538 "already scheduled bundle gets ready"); 2539 ReadyList.insert(DepBundle); 2540 LLVM_DEBUG(dbgs() 2541 << "SLP: gets ready (def): " << *DepBundle << "\n"); 2542 } 2543 }); 2544 }; 2545 2546 // If BundleMember is a vector bundle, its operands may have been 2547 // reordered duiring buildTree(). We therefore need to get its operands 2548 // through the TreeEntry. 2549 if (TreeEntry *TE = BundleMember->TE) { 2550 int Lane = BundleMember->Lane; 2551 assert(Lane >= 0 && "Lane not set"); 2552 2553 // Since vectorization tree is being built recursively this assertion 2554 // ensures that the tree entry has all operands set before reaching 2555 // this code. Couple of exceptions known at the moment are extracts 2556 // where their second (immediate) operand is not added. Since 2557 // immediates do not affect scheduler behavior this is considered 2558 // okay. 2559 auto *In = TE->getMainOp(); 2560 assert(In && 2561 (isa<ExtractValueInst>(In) || isa<ExtractElementInst>(In) || 2562 In->getNumOperands() == TE->getNumOperands()) && 2563 "Missed TreeEntry operands?"); 2564 (void)In; // fake use to avoid build failure when assertions disabled 2565 2566 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands(); 2567 OpIdx != NumOperands; ++OpIdx) 2568 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane])) 2569 DecrUnsched(I); 2570 } else { 2571 // If BundleMember is a stand-alone instruction, no operand reordering 2572 // has taken place, so we directly access its operands. 2573 for (Use &U : BundleMember->Inst->operands()) 2574 if (auto *I = dyn_cast<Instruction>(U.get())) 2575 DecrUnsched(I); 2576 } 2577 // Handle the memory dependencies. 2578 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 2579 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 2580 // There are no more unscheduled dependencies after decrementing, 2581 // so we can put the dependent instruction into the ready list. 2582 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 2583 assert(!DepBundle->IsScheduled && 2584 "already scheduled bundle gets ready"); 2585 ReadyList.insert(DepBundle); 2586 LLVM_DEBUG(dbgs() 2587 << "SLP: gets ready (mem): " << *DepBundle << "\n"); 2588 } 2589 } 2590 BundleMember = BundleMember->NextInBundle; 2591 } 2592 } 2593 2594 void doForAllOpcodes(Value *V, 2595 function_ref<void(ScheduleData *SD)> Action) { 2596 if (ScheduleData *SD = getScheduleData(V)) 2597 Action(SD); 2598 auto I = ExtraScheduleDataMap.find(V); 2599 if (I != ExtraScheduleDataMap.end()) 2600 for (auto &P : I->second) 2601 if (P.second->SchedulingRegionID == SchedulingRegionID) 2602 Action(P.second); 2603 } 2604 2605 /// Put all instructions into the ReadyList which are ready for scheduling. 2606 template <typename ReadyListType> 2607 void initialFillReadyList(ReadyListType &ReadyList) { 2608 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 2609 doForAllOpcodes(I, [&](ScheduleData *SD) { 2610 if (SD->isSchedulingEntity() && SD->isReady()) { 2611 ReadyList.insert(SD); 2612 LLVM_DEBUG(dbgs() 2613 << "SLP: initially in ready list: " << *I << "\n"); 2614 } 2615 }); 2616 } 2617 } 2618 2619 /// Checks if a bundle of instructions can be scheduled, i.e. has no 2620 /// cyclic dependencies. This is only a dry-run, no instructions are 2621 /// actually moved at this stage. 2622 /// \returns the scheduling bundle. The returned Optional value is non-None 2623 /// if \p VL is allowed to be scheduled. 2624 Optional<ScheduleData *> 2625 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 2626 const InstructionsState &S); 2627 2628 /// Un-bundles a group of instructions. 2629 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 2630 2631 /// Allocates schedule data chunk. 2632 ScheduleData *allocateScheduleDataChunks(); 2633 2634 /// Extends the scheduling region so that V is inside the region. 2635 /// \returns true if the region size is within the limit. 2636 bool extendSchedulingRegion(Value *V, const InstructionsState &S); 2637 2638 /// Initialize the ScheduleData structures for new instructions in the 2639 /// scheduling region. 2640 void initScheduleData(Instruction *FromI, Instruction *ToI, 2641 ScheduleData *PrevLoadStore, 2642 ScheduleData *NextLoadStore); 2643 2644 /// Updates the dependency information of a bundle and of all instructions/ 2645 /// bundles which depend on the original bundle. 2646 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 2647 BoUpSLP *SLP); 2648 2649 /// Sets all instruction in the scheduling region to un-scheduled. 2650 void resetSchedule(); 2651 2652 BasicBlock *BB; 2653 2654 /// Simple memory allocation for ScheduleData. 2655 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 2656 2657 /// The size of a ScheduleData array in ScheduleDataChunks. 2658 int ChunkSize; 2659 2660 /// The allocator position in the current chunk, which is the last entry 2661 /// of ScheduleDataChunks. 2662 int ChunkPos; 2663 2664 /// Attaches ScheduleData to Instruction. 2665 /// Note that the mapping survives during all vectorization iterations, i.e. 2666 /// ScheduleData structures are recycled. 2667 DenseMap<Value *, ScheduleData *> ScheduleDataMap; 2668 2669 /// Attaches ScheduleData to Instruction with the leading key. 2670 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 2671 ExtraScheduleDataMap; 2672 2673 struct ReadyList : SmallVector<ScheduleData *, 8> { 2674 void insert(ScheduleData *SD) { push_back(SD); } 2675 }; 2676 2677 /// The ready-list for scheduling (only used for the dry-run). 2678 ReadyList ReadyInsts; 2679 2680 /// The first instruction of the scheduling region. 2681 Instruction *ScheduleStart = nullptr; 2682 2683 /// The first instruction _after_ the scheduling region. 2684 Instruction *ScheduleEnd = nullptr; 2685 2686 /// The first memory accessing instruction in the scheduling region 2687 /// (can be null). 2688 ScheduleData *FirstLoadStoreInRegion = nullptr; 2689 2690 /// The last memory accessing instruction in the scheduling region 2691 /// (can be null). 2692 ScheduleData *LastLoadStoreInRegion = nullptr; 2693 2694 /// The current size of the scheduling region. 2695 int ScheduleRegionSize = 0; 2696 2697 /// The maximum size allowed for the scheduling region. 2698 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; 2699 2700 /// The ID of the scheduling region. For a new vectorization iteration this 2701 /// is incremented which "removes" all ScheduleData from the region. 2702 // Make sure that the initial SchedulingRegionID is greater than the 2703 // initial SchedulingRegionID in ScheduleData (which is 0). 2704 int SchedulingRegionID = 1; 2705 }; 2706 2707 /// Attaches the BlockScheduling structures to basic blocks. 2708 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 2709 2710 /// Performs the "real" scheduling. Done before vectorization is actually 2711 /// performed in a basic block. 2712 void scheduleBlock(BlockScheduling *BS); 2713 2714 /// List of users to ignore during scheduling and that don't need extracting. 2715 ArrayRef<Value *> UserIgnoreList; 2716 2717 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of 2718 /// sorted SmallVectors of unsigned. 2719 struct OrdersTypeDenseMapInfo { 2720 static OrdersType getEmptyKey() { 2721 OrdersType V; 2722 V.push_back(~1U); 2723 return V; 2724 } 2725 2726 static OrdersType getTombstoneKey() { 2727 OrdersType V; 2728 V.push_back(~2U); 2729 return V; 2730 } 2731 2732 static unsigned getHashValue(const OrdersType &V) { 2733 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); 2734 } 2735 2736 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) { 2737 return LHS == RHS; 2738 } 2739 }; 2740 2741 // Analysis and block reference. 2742 Function *F; 2743 ScalarEvolution *SE; 2744 TargetTransformInfo *TTI; 2745 TargetLibraryInfo *TLI; 2746 AAResults *AA; 2747 LoopInfo *LI; 2748 DominatorTree *DT; 2749 AssumptionCache *AC; 2750 DemandedBits *DB; 2751 const DataLayout *DL; 2752 OptimizationRemarkEmitter *ORE; 2753 2754 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 2755 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 2756 2757 /// Instruction builder to construct the vectorized tree. 2758 IRBuilder<> Builder; 2759 2760 /// A map of scalar integer values to the smallest bit width with which they 2761 /// can legally be represented. The values map to (width, signed) pairs, 2762 /// where "width" indicates the minimum bit width and "signed" is True if the 2763 /// value must be signed-extended, rather than zero-extended, back to its 2764 /// original width. 2765 MapVector<Value *, std::pair<uint64_t, bool>> MinBWs; 2766 }; 2767 2768 } // end namespace slpvectorizer 2769 2770 template <> struct GraphTraits<BoUpSLP *> { 2771 using TreeEntry = BoUpSLP::TreeEntry; 2772 2773 /// NodeRef has to be a pointer per the GraphWriter. 2774 using NodeRef = TreeEntry *; 2775 2776 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy; 2777 2778 /// Add the VectorizableTree to the index iterator to be able to return 2779 /// TreeEntry pointers. 2780 struct ChildIteratorType 2781 : public iterator_adaptor_base< 2782 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> { 2783 ContainerTy &VectorizableTree; 2784 2785 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W, 2786 ContainerTy &VT) 2787 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 2788 2789 NodeRef operator*() { return I->UserTE; } 2790 }; 2791 2792 static NodeRef getEntryNode(BoUpSLP &R) { 2793 return R.VectorizableTree[0].get(); 2794 } 2795 2796 static ChildIteratorType child_begin(NodeRef N) { 2797 return {N->UserTreeIndices.begin(), N->Container}; 2798 } 2799 2800 static ChildIteratorType child_end(NodeRef N) { 2801 return {N->UserTreeIndices.end(), N->Container}; 2802 } 2803 2804 /// For the node iterator we just need to turn the TreeEntry iterator into a 2805 /// TreeEntry* iterator so that it dereferences to NodeRef. 2806 class nodes_iterator { 2807 using ItTy = ContainerTy::iterator; 2808 ItTy It; 2809 2810 public: 2811 nodes_iterator(const ItTy &It2) : It(It2) {} 2812 NodeRef operator*() { return It->get(); } 2813 nodes_iterator operator++() { 2814 ++It; 2815 return *this; 2816 } 2817 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; } 2818 }; 2819 2820 static nodes_iterator nodes_begin(BoUpSLP *R) { 2821 return nodes_iterator(R->VectorizableTree.begin()); 2822 } 2823 2824 static nodes_iterator nodes_end(BoUpSLP *R) { 2825 return nodes_iterator(R->VectorizableTree.end()); 2826 } 2827 2828 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 2829 }; 2830 2831 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 2832 using TreeEntry = BoUpSLP::TreeEntry; 2833 2834 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 2835 2836 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 2837 std::string Str; 2838 raw_string_ostream OS(Str); 2839 if (isSplat(Entry->Scalars)) 2840 OS << "<splat> "; 2841 for (auto V : Entry->Scalars) { 2842 OS << *V; 2843 if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) { 2844 return EU.Scalar == V; 2845 })) 2846 OS << " <extract>"; 2847 OS << "\n"; 2848 } 2849 return Str; 2850 } 2851 2852 static std::string getNodeAttributes(const TreeEntry *Entry, 2853 const BoUpSLP *) { 2854 if (Entry->State == TreeEntry::NeedToGather) 2855 return "color=red"; 2856 return ""; 2857 } 2858 }; 2859 2860 } // end namespace llvm 2861 2862 BoUpSLP::~BoUpSLP() { 2863 for (const auto &Pair : DeletedInstructions) { 2864 // Replace operands of ignored instructions with Undefs in case if they were 2865 // marked for deletion. 2866 if (Pair.getSecond()) { 2867 Value *Undef = UndefValue::get(Pair.getFirst()->getType()); 2868 Pair.getFirst()->replaceAllUsesWith(Undef); 2869 } 2870 Pair.getFirst()->dropAllReferences(); 2871 } 2872 for (const auto &Pair : DeletedInstructions) { 2873 assert(Pair.getFirst()->use_empty() && 2874 "trying to erase instruction with users."); 2875 Pair.getFirst()->eraseFromParent(); 2876 } 2877 #ifdef EXPENSIVE_CHECKS 2878 // If we could guarantee that this call is not extremely slow, we could 2879 // remove the ifdef limitation (see PR47712). 2880 assert(!verifyFunction(*F, &dbgs())); 2881 #endif 2882 } 2883 2884 void BoUpSLP::eraseInstructions(ArrayRef<Value *> AV) { 2885 for (auto *V : AV) { 2886 if (auto *I = dyn_cast<Instruction>(V)) 2887 eraseInstruction(I, /*ReplaceOpsWithUndef=*/true); 2888 }; 2889 } 2890 2891 /// Reorders the given \p Reuses mask according to the given \p Mask. \p Reuses 2892 /// contains original mask for the scalars reused in the node. Procedure 2893 /// transform this mask in accordance with the given \p Mask. 2894 static void reorderReuses(SmallVectorImpl<int> &Reuses, ArrayRef<int> Mask) { 2895 assert(!Mask.empty() && Reuses.size() == Mask.size() && 2896 "Expected non-empty mask."); 2897 SmallVector<int> Prev(Reuses.begin(), Reuses.end()); 2898 Prev.swap(Reuses); 2899 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 2900 if (Mask[I] != UndefMaskElem) 2901 Reuses[Mask[I]] = Prev[I]; 2902 } 2903 2904 /// Reorders the given \p Order according to the given \p Mask. \p Order - is 2905 /// the original order of the scalars. Procedure transforms the provided order 2906 /// in accordance with the given \p Mask. If the resulting \p Order is just an 2907 /// identity order, \p Order is cleared. 2908 static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask) { 2909 assert(!Mask.empty() && "Expected non-empty mask."); 2910 SmallVector<int> MaskOrder; 2911 if (Order.empty()) { 2912 MaskOrder.resize(Mask.size()); 2913 std::iota(MaskOrder.begin(), MaskOrder.end(), 0); 2914 } else { 2915 inversePermutation(Order, MaskOrder); 2916 } 2917 reorderReuses(MaskOrder, Mask); 2918 if (ShuffleVectorInst::isIdentityMask(MaskOrder)) { 2919 Order.clear(); 2920 return; 2921 } 2922 Order.assign(Mask.size(), Mask.size()); 2923 for (unsigned I = 0, E = Mask.size(); I < E; ++I) 2924 if (MaskOrder[I] != UndefMaskElem) 2925 Order[MaskOrder[I]] = I; 2926 fixupOrderingIndices(Order); 2927 } 2928 2929 Optional<BoUpSLP::OrdersType> 2930 BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) { 2931 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only."); 2932 unsigned NumScalars = TE.Scalars.size(); 2933 OrdersType CurrentOrder(NumScalars, NumScalars); 2934 SmallVector<int> Positions; 2935 SmallBitVector UsedPositions(NumScalars); 2936 const TreeEntry *STE = nullptr; 2937 // Try to find all gathered scalars that are gets vectorized in other 2938 // vectorize node. Here we can have only one single tree vector node to 2939 // correctly identify order of the gathered scalars. 2940 for (unsigned I = 0; I < NumScalars; ++I) { 2941 Value *V = TE.Scalars[I]; 2942 if (!isa<LoadInst, ExtractElementInst, ExtractValueInst>(V)) 2943 continue; 2944 if (const auto *LocalSTE = getTreeEntry(V)) { 2945 if (!STE) 2946 STE = LocalSTE; 2947 else if (STE != LocalSTE) 2948 // Take the order only from the single vector node. 2949 return None; 2950 unsigned Lane = 2951 std::distance(STE->Scalars.begin(), find(STE->Scalars, V)); 2952 if (Lane >= NumScalars) 2953 return None; 2954 if (CurrentOrder[Lane] != NumScalars) { 2955 if (Lane != I) 2956 continue; 2957 UsedPositions.reset(CurrentOrder[Lane]); 2958 } 2959 // The partial identity (where only some elements of the gather node are 2960 // in the identity order) is good. 2961 CurrentOrder[Lane] = I; 2962 UsedPositions.set(I); 2963 } 2964 } 2965 // Need to keep the order if we have a vector entry and at least 2 scalars or 2966 // the vectorized entry has just 2 scalars. 2967 if (STE && (UsedPositions.count() > 1 || STE->Scalars.size() == 2)) { 2968 auto &&IsIdentityOrder = [NumScalars](ArrayRef<unsigned> CurrentOrder) { 2969 for (unsigned I = 0; I < NumScalars; ++I) 2970 if (CurrentOrder[I] != I && CurrentOrder[I] != NumScalars) 2971 return false; 2972 return true; 2973 }; 2974 if (IsIdentityOrder(CurrentOrder)) { 2975 CurrentOrder.clear(); 2976 return CurrentOrder; 2977 } 2978 auto *It = CurrentOrder.begin(); 2979 for (unsigned I = 0; I < NumScalars;) { 2980 if (UsedPositions.test(I)) { 2981 ++I; 2982 continue; 2983 } 2984 if (*It == NumScalars) { 2985 *It = I; 2986 ++I; 2987 } 2988 ++It; 2989 } 2990 return CurrentOrder; 2991 } 2992 return None; 2993 } 2994 2995 Optional<BoUpSLP::OrdersType> BoUpSLP::getReorderingData(const TreeEntry &TE, 2996 bool TopToBottom) { 2997 // No need to reorder if need to shuffle reuses, still need to shuffle the 2998 // node. 2999 if (!TE.ReuseShuffleIndices.empty()) 3000 return None; 3001 if (TE.State == TreeEntry::Vectorize && 3002 (isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE.getMainOp()) || 3003 (TopToBottom && isa<StoreInst, InsertElementInst>(TE.getMainOp()))) && 3004 !TE.isAltShuffle()) 3005 return TE.ReorderIndices; 3006 if (TE.State == TreeEntry::NeedToGather) { 3007 // TODO: add analysis of other gather nodes with extractelement 3008 // instructions and other values/instructions, not only undefs. 3009 if (((TE.getOpcode() == Instruction::ExtractElement && 3010 !TE.isAltShuffle()) || 3011 (all_of(TE.Scalars, 3012 [](Value *V) { 3013 return isa<UndefValue, ExtractElementInst>(V); 3014 }) && 3015 any_of(TE.Scalars, 3016 [](Value *V) { return isa<ExtractElementInst>(V); }))) && 3017 all_of(TE.Scalars, 3018 [](Value *V) { 3019 auto *EE = dyn_cast<ExtractElementInst>(V); 3020 return !EE || isa<FixedVectorType>(EE->getVectorOperandType()); 3021 }) && 3022 allSameType(TE.Scalars)) { 3023 // Check that gather of extractelements can be represented as 3024 // just a shuffle of a single vector. 3025 OrdersType CurrentOrder; 3026 bool Reuse = canReuseExtract(TE.Scalars, TE.getMainOp(), CurrentOrder); 3027 if (Reuse || !CurrentOrder.empty()) { 3028 if (!CurrentOrder.empty()) 3029 fixupOrderingIndices(CurrentOrder); 3030 return CurrentOrder; 3031 } 3032 } 3033 if (Optional<OrdersType> CurrentOrder = findReusedOrderedScalars(TE)) 3034 return CurrentOrder; 3035 } 3036 return None; 3037 } 3038 3039 void BoUpSLP::reorderTopToBottom() { 3040 // Maps VF to the graph nodes. 3041 DenseMap<unsigned, SetVector<TreeEntry *>> VFToOrderedEntries; 3042 // ExtractElement gather nodes which can be vectorized and need to handle 3043 // their ordering. 3044 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 3045 // Find all reorderable nodes with the given VF. 3046 // Currently the are vectorized stores,loads,extracts + some gathering of 3047 // extracts. 3048 for_each(VectorizableTree, [this, &VFToOrderedEntries, &GathersToOrders]( 3049 const std::unique_ptr<TreeEntry> &TE) { 3050 if (Optional<OrdersType> CurrentOrder = 3051 getReorderingData(*TE.get(), /*TopToBottom=*/true)) { 3052 VFToOrderedEntries[TE->Scalars.size()].insert(TE.get()); 3053 if (TE->State != TreeEntry::Vectorize) 3054 GathersToOrders.try_emplace(TE.get(), *CurrentOrder); 3055 } 3056 }); 3057 3058 // Reorder the graph nodes according to their vectorization factor. 3059 for (unsigned VF = VectorizableTree.front()->Scalars.size(); VF > 1; 3060 VF /= 2) { 3061 auto It = VFToOrderedEntries.find(VF); 3062 if (It == VFToOrderedEntries.end()) 3063 continue; 3064 // Try to find the most profitable order. We just are looking for the most 3065 // used order and reorder scalar elements in the nodes according to this 3066 // mostly used order. 3067 ArrayRef<TreeEntry *> OrderedEntries = It->second.getArrayRef(); 3068 // All operands are reordered and used only in this node - propagate the 3069 // most used order to the user node. 3070 MapVector<OrdersType, unsigned, 3071 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>> 3072 OrdersUses; 3073 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 3074 for (const TreeEntry *OpTE : OrderedEntries) { 3075 // No need to reorder this nodes, still need to extend and to use shuffle, 3076 // just need to merge reordering shuffle and the reuse shuffle. 3077 if (!OpTE->ReuseShuffleIndices.empty()) 3078 continue; 3079 // Count number of orders uses. 3080 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { 3081 if (OpTE->State == TreeEntry::NeedToGather) 3082 return GathersToOrders.find(OpTE)->second; 3083 return OpTE->ReorderIndices; 3084 }(); 3085 // Stores actually store the mask, not the order, need to invert. 3086 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 3087 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 3088 SmallVector<int> Mask; 3089 inversePermutation(Order, Mask); 3090 unsigned E = Order.size(); 3091 OrdersType CurrentOrder(E, E); 3092 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 3093 return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx); 3094 }); 3095 fixupOrderingIndices(CurrentOrder); 3096 ++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second; 3097 } else { 3098 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second; 3099 } 3100 } 3101 // Set order of the user node. 3102 if (OrdersUses.empty()) 3103 continue; 3104 // Choose the most used order. 3105 ArrayRef<unsigned> BestOrder = OrdersUses.front().first; 3106 unsigned Cnt = OrdersUses.front().second; 3107 for (const auto &Pair : drop_begin(OrdersUses)) { 3108 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 3109 BestOrder = Pair.first; 3110 Cnt = Pair.second; 3111 } 3112 } 3113 // Set order of the user node. 3114 if (BestOrder.empty()) 3115 continue; 3116 SmallVector<int> Mask; 3117 inversePermutation(BestOrder, Mask); 3118 SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem); 3119 unsigned E = BestOrder.size(); 3120 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 3121 return I < E ? static_cast<int>(I) : UndefMaskElem; 3122 }); 3123 // Do an actual reordering, if profitable. 3124 for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 3125 // Just do the reordering for the nodes with the given VF. 3126 if (TE->Scalars.size() != VF) { 3127 if (TE->ReuseShuffleIndices.size() == VF) { 3128 // Need to reorder the reuses masks of the operands with smaller VF to 3129 // be able to find the match between the graph nodes and scalar 3130 // operands of the given node during vectorization/cost estimation. 3131 assert(all_of(TE->UserTreeIndices, 3132 [VF, &TE](const EdgeInfo &EI) { 3133 return EI.UserTE->Scalars.size() == VF || 3134 EI.UserTE->Scalars.size() == 3135 TE->Scalars.size(); 3136 }) && 3137 "All users must be of VF size."); 3138 // Update ordering of the operands with the smaller VF than the given 3139 // one. 3140 reorderReuses(TE->ReuseShuffleIndices, Mask); 3141 } 3142 continue; 3143 } 3144 if (TE->State == TreeEntry::Vectorize && 3145 isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst, 3146 InsertElementInst>(TE->getMainOp()) && 3147 !TE->isAltShuffle()) { 3148 // Build correct orders for extract{element,value}, loads and 3149 // stores. 3150 reorderOrder(TE->ReorderIndices, Mask); 3151 if (isa<InsertElementInst, StoreInst>(TE->getMainOp())) 3152 TE->reorderOperands(Mask); 3153 } else { 3154 // Reorder the node and its operands. 3155 TE->reorderOperands(Mask); 3156 assert(TE->ReorderIndices.empty() && 3157 "Expected empty reorder sequence."); 3158 reorderScalars(TE->Scalars, Mask); 3159 } 3160 if (!TE->ReuseShuffleIndices.empty()) { 3161 // Apply reversed order to keep the original ordering of the reused 3162 // elements to avoid extra reorder indices shuffling. 3163 OrdersType CurrentOrder; 3164 reorderOrder(CurrentOrder, MaskOrder); 3165 SmallVector<int> NewReuses; 3166 inversePermutation(CurrentOrder, NewReuses); 3167 addMask(NewReuses, TE->ReuseShuffleIndices); 3168 TE->ReuseShuffleIndices.swap(NewReuses); 3169 } 3170 } 3171 } 3172 } 3173 3174 void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) { 3175 SetVector<TreeEntry *> OrderedEntries; 3176 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 3177 // Find all reorderable leaf nodes with the given VF. 3178 // Currently the are vectorized loads,extracts without alternate operands + 3179 // some gathering of extracts. 3180 SmallVector<TreeEntry *> NonVectorized; 3181 for_each(VectorizableTree, [this, &OrderedEntries, &GathersToOrders, 3182 &NonVectorized]( 3183 const std::unique_ptr<TreeEntry> &TE) { 3184 if (TE->State != TreeEntry::Vectorize) 3185 NonVectorized.push_back(TE.get()); 3186 if (Optional<OrdersType> CurrentOrder = 3187 getReorderingData(*TE.get(), /*TopToBottom=*/false)) { 3188 OrderedEntries.insert(TE.get()); 3189 if (TE->State != TreeEntry::Vectorize) 3190 GathersToOrders.try_emplace(TE.get(), *CurrentOrder); 3191 } 3192 }); 3193 3194 // Checks if the operands of the users are reordarable and have only single 3195 // use. 3196 auto &&CheckOperands = 3197 [this, &NonVectorized](const auto &Data, 3198 SmallVectorImpl<TreeEntry *> &GatherOps) { 3199 for (unsigned I = 0, E = Data.first->getNumOperands(); I < E; ++I) { 3200 if (any_of(Data.second, 3201 [I](const std::pair<unsigned, TreeEntry *> &OpData) { 3202 return OpData.first == I && 3203 OpData.second->State == TreeEntry::Vectorize; 3204 })) 3205 continue; 3206 ArrayRef<Value *> VL = Data.first->getOperand(I); 3207 const TreeEntry *TE = nullptr; 3208 const auto *It = find_if(VL, [this, &TE](Value *V) { 3209 TE = getTreeEntry(V); 3210 return TE; 3211 }); 3212 if (It != VL.end() && TE->isSame(VL)) 3213 return false; 3214 TreeEntry *Gather = nullptr; 3215 if (count_if(NonVectorized, [VL, &Gather](TreeEntry *TE) { 3216 assert(TE->State != TreeEntry::Vectorize && 3217 "Only non-vectorized nodes are expected."); 3218 if (TE->isSame(VL)) { 3219 Gather = TE; 3220 return true; 3221 } 3222 return false; 3223 }) > 1) 3224 return false; 3225 if (Gather) 3226 GatherOps.push_back(Gather); 3227 } 3228 return true; 3229 }; 3230 // 1. Propagate order to the graph nodes, which use only reordered nodes. 3231 // I.e., if the node has operands, that are reordered, try to make at least 3232 // one operand order in the natural order and reorder others + reorder the 3233 // user node itself. 3234 SmallPtrSet<const TreeEntry *, 4> Visited; 3235 while (!OrderedEntries.empty()) { 3236 // 1. Filter out only reordered nodes. 3237 // 2. If the entry has multiple uses - skip it and jump to the next node. 3238 MapVector<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>> Users; 3239 SmallVector<TreeEntry *> Filtered; 3240 for (TreeEntry *TE : OrderedEntries) { 3241 if (!(TE->State == TreeEntry::Vectorize || 3242 (TE->State == TreeEntry::NeedToGather && 3243 GathersToOrders.count(TE))) || 3244 TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() || 3245 !all_of(drop_begin(TE->UserTreeIndices), 3246 [TE](const EdgeInfo &EI) { 3247 return EI.UserTE == TE->UserTreeIndices.front().UserTE; 3248 }) || 3249 !Visited.insert(TE).second) { 3250 Filtered.push_back(TE); 3251 continue; 3252 } 3253 // Build a map between user nodes and their operands order to speedup 3254 // search. The graph currently does not provide this dependency directly. 3255 for (EdgeInfo &EI : TE->UserTreeIndices) { 3256 TreeEntry *UserTE = EI.UserTE; 3257 auto It = Users.find(UserTE); 3258 if (It == Users.end()) 3259 It = Users.insert({UserTE, {}}).first; 3260 It->second.emplace_back(EI.EdgeIdx, TE); 3261 } 3262 } 3263 // Erase filtered entries. 3264 for_each(Filtered, 3265 [&OrderedEntries](TreeEntry *TE) { OrderedEntries.remove(TE); }); 3266 for (const auto &Data : Users) { 3267 // Check that operands are used only in the User node. 3268 SmallVector<TreeEntry *> GatherOps; 3269 if (!CheckOperands(Data, GatherOps)) { 3270 for_each(Data.second, 3271 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { 3272 OrderedEntries.remove(Op.second); 3273 }); 3274 continue; 3275 } 3276 // All operands are reordered and used only in this node - propagate the 3277 // most used order to the user node. 3278 MapVector<OrdersType, unsigned, 3279 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>> 3280 OrdersUses; 3281 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 3282 for (const auto &Op : Data.second) { 3283 TreeEntry *OpTE = Op.second; 3284 if (!OpTE->ReuseShuffleIndices.empty() || 3285 (IgnoreReorder && OpTE == VectorizableTree.front().get())) 3286 continue; 3287 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { 3288 if (OpTE->State == TreeEntry::NeedToGather) 3289 return GathersToOrders.find(OpTE)->second; 3290 return OpTE->ReorderIndices; 3291 }(); 3292 // Stores actually store the mask, not the order, need to invert. 3293 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 3294 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 3295 SmallVector<int> Mask; 3296 inversePermutation(Order, Mask); 3297 unsigned E = Order.size(); 3298 OrdersType CurrentOrder(E, E); 3299 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 3300 return Idx == UndefMaskElem ? E : static_cast<unsigned>(Idx); 3301 }); 3302 fixupOrderingIndices(CurrentOrder); 3303 ++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second; 3304 } else { 3305 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second; 3306 } 3307 if (VisitedOps.insert(OpTE).second) 3308 OrdersUses.insert(std::make_pair(OrdersType(), 0)).first->second += 3309 OpTE->UserTreeIndices.size(); 3310 assert(OrdersUses[{}] > 0 && "Counter cannot be less than 0."); 3311 --OrdersUses[{}]; 3312 } 3313 // If no orders - skip current nodes and jump to the next one, if any. 3314 if (OrdersUses.empty()) { 3315 for_each(Data.second, 3316 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { 3317 OrderedEntries.remove(Op.second); 3318 }); 3319 continue; 3320 } 3321 // Choose the best order. 3322 ArrayRef<unsigned> BestOrder = OrdersUses.front().first; 3323 unsigned Cnt = OrdersUses.front().second; 3324 for (const auto &Pair : drop_begin(OrdersUses)) { 3325 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 3326 BestOrder = Pair.first; 3327 Cnt = Pair.second; 3328 } 3329 } 3330 // Set order of the user node (reordering of operands and user nodes). 3331 if (BestOrder.empty()) { 3332 for_each(Data.second, 3333 [&OrderedEntries](const std::pair<unsigned, TreeEntry *> &Op) { 3334 OrderedEntries.remove(Op.second); 3335 }); 3336 continue; 3337 } 3338 // Erase operands from OrderedEntries list and adjust their orders. 3339 VisitedOps.clear(); 3340 SmallVector<int> Mask; 3341 inversePermutation(BestOrder, Mask); 3342 SmallVector<int> MaskOrder(BestOrder.size(), UndefMaskElem); 3343 unsigned E = BestOrder.size(); 3344 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 3345 return I < E ? static_cast<int>(I) : UndefMaskElem; 3346 }); 3347 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) { 3348 TreeEntry *TE = Op.second; 3349 OrderedEntries.remove(TE); 3350 if (!VisitedOps.insert(TE).second) 3351 continue; 3352 if (!TE->ReuseShuffleIndices.empty() && TE->ReorderIndices.empty()) { 3353 // Just reorder reuses indices. 3354 reorderReuses(TE->ReuseShuffleIndices, Mask); 3355 continue; 3356 } 3357 // Gathers are processed separately. 3358 if (TE->State != TreeEntry::Vectorize) 3359 continue; 3360 assert((BestOrder.size() == TE->ReorderIndices.size() || 3361 TE->ReorderIndices.empty()) && 3362 "Non-matching sizes of user/operand entries."); 3363 reorderOrder(TE->ReorderIndices, Mask); 3364 } 3365 // For gathers just need to reorder its scalars. 3366 for (TreeEntry *Gather : GatherOps) { 3367 assert(Gather->ReorderIndices.empty() && 3368 "Unexpected reordering of gathers."); 3369 if (!Gather->ReuseShuffleIndices.empty()) { 3370 // Just reorder reuses indices. 3371 reorderReuses(Gather->ReuseShuffleIndices, Mask); 3372 continue; 3373 } 3374 reorderScalars(Gather->Scalars, Mask); 3375 OrderedEntries.remove(Gather); 3376 } 3377 // Reorder operands of the user node and set the ordering for the user 3378 // node itself. 3379 if (Data.first->State != TreeEntry::Vectorize || 3380 !isa<ExtractElementInst, ExtractValueInst, LoadInst>( 3381 Data.first->getMainOp()) || 3382 Data.first->isAltShuffle()) 3383 Data.first->reorderOperands(Mask); 3384 if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) || 3385 Data.first->isAltShuffle()) { 3386 reorderScalars(Data.first->Scalars, Mask); 3387 reorderOrder(Data.first->ReorderIndices, MaskOrder); 3388 if (Data.first->ReuseShuffleIndices.empty() && 3389 !Data.first->ReorderIndices.empty() && 3390 !Data.first->isAltShuffle()) { 3391 // Insert user node to the list to try to sink reordering deeper in 3392 // the graph. 3393 OrderedEntries.insert(Data.first); 3394 } 3395 } else { 3396 reorderOrder(Data.first->ReorderIndices, Mask); 3397 } 3398 } 3399 } 3400 // If the reordering is unnecessary, just remove the reorder. 3401 if (IgnoreReorder && !VectorizableTree.front()->ReorderIndices.empty() && 3402 VectorizableTree.front()->ReuseShuffleIndices.empty()) 3403 VectorizableTree.front()->ReorderIndices.clear(); 3404 } 3405 3406 void BoUpSLP::buildExternalUses( 3407 const ExtraValueToDebugLocsMap &ExternallyUsedValues) { 3408 // Collect the values that we need to extract from the tree. 3409 for (auto &TEPtr : VectorizableTree) { 3410 TreeEntry *Entry = TEPtr.get(); 3411 3412 // No need to handle users of gathered values. 3413 if (Entry->State == TreeEntry::NeedToGather) 3414 continue; 3415 3416 // For each lane: 3417 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 3418 Value *Scalar = Entry->Scalars[Lane]; 3419 int FoundLane = Entry->findLaneForValue(Scalar); 3420 3421 // Check if the scalar is externally used as an extra arg. 3422 auto ExtI = ExternallyUsedValues.find(Scalar); 3423 if (ExtI != ExternallyUsedValues.end()) { 3424 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " 3425 << Lane << " from " << *Scalar << ".\n"); 3426 ExternalUses.emplace_back(Scalar, nullptr, FoundLane); 3427 } 3428 for (User *U : Scalar->users()) { 3429 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 3430 3431 Instruction *UserInst = dyn_cast<Instruction>(U); 3432 if (!UserInst) 3433 continue; 3434 3435 if (isDeleted(UserInst)) 3436 continue; 3437 3438 // Skip in-tree scalars that become vectors 3439 if (TreeEntry *UseEntry = getTreeEntry(U)) { 3440 Value *UseScalar = UseEntry->Scalars[0]; 3441 // Some in-tree scalars will remain as scalar in vectorized 3442 // instructions. If that is the case, the one in Lane 0 will 3443 // be used. 3444 if (UseScalar != U || 3445 UseEntry->State == TreeEntry::ScatterVectorize || 3446 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) { 3447 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 3448 << ".\n"); 3449 assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state"); 3450 continue; 3451 } 3452 } 3453 3454 // Ignore users in the user ignore list. 3455 if (is_contained(UserIgnoreList, UserInst)) 3456 continue; 3457 3458 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " 3459 << Lane << " from " << *Scalar << ".\n"); 3460 ExternalUses.push_back(ExternalUser(Scalar, U, FoundLane)); 3461 } 3462 } 3463 } 3464 } 3465 3466 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 3467 ArrayRef<Value *> UserIgnoreLst) { 3468 deleteTree(); 3469 UserIgnoreList = UserIgnoreLst; 3470 if (!allSameType(Roots)) 3471 return; 3472 buildTree_rec(Roots, 0, EdgeInfo()); 3473 } 3474 3475 namespace { 3476 /// Tracks the state we can represent the loads in the given sequence. 3477 enum class LoadsState { Gather, Vectorize, ScatterVectorize }; 3478 } // anonymous namespace 3479 3480 /// Checks if the given array of loads can be represented as a vectorized, 3481 /// scatter or just simple gather. 3482 static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0, 3483 const TargetTransformInfo &TTI, 3484 const DataLayout &DL, ScalarEvolution &SE, 3485 SmallVectorImpl<unsigned> &Order, 3486 SmallVectorImpl<Value *> &PointerOps) { 3487 // Check that a vectorized load would load the same memory as a scalar 3488 // load. For example, we don't want to vectorize loads that are smaller 3489 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 3490 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 3491 // from such a struct, we read/write packed bits disagreeing with the 3492 // unvectorized version. 3493 Type *ScalarTy = VL0->getType(); 3494 3495 if (DL.getTypeSizeInBits(ScalarTy) != DL.getTypeAllocSizeInBits(ScalarTy)) 3496 return LoadsState::Gather; 3497 3498 // Make sure all loads in the bundle are simple - we can't vectorize 3499 // atomic or volatile loads. 3500 PointerOps.clear(); 3501 PointerOps.resize(VL.size()); 3502 auto *POIter = PointerOps.begin(); 3503 for (Value *V : VL) { 3504 auto *L = cast<LoadInst>(V); 3505 if (!L->isSimple()) 3506 return LoadsState::Gather; 3507 *POIter = L->getPointerOperand(); 3508 ++POIter; 3509 } 3510 3511 Order.clear(); 3512 // Check the order of pointer operands. 3513 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, DL, SE, Order)) { 3514 Value *Ptr0; 3515 Value *PtrN; 3516 if (Order.empty()) { 3517 Ptr0 = PointerOps.front(); 3518 PtrN = PointerOps.back(); 3519 } else { 3520 Ptr0 = PointerOps[Order.front()]; 3521 PtrN = PointerOps[Order.back()]; 3522 } 3523 Optional<int> Diff = 3524 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE); 3525 // Check that the sorted loads are consecutive. 3526 if (static_cast<unsigned>(*Diff) == VL.size() - 1) 3527 return LoadsState::Vectorize; 3528 Align CommonAlignment = cast<LoadInst>(VL0)->getAlign(); 3529 for (Value *V : VL) 3530 CommonAlignment = 3531 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 3532 if (TTI.isLegalMaskedGather(FixedVectorType::get(ScalarTy, VL.size()), 3533 CommonAlignment)) 3534 return LoadsState::ScatterVectorize; 3535 } 3536 3537 return LoadsState::Gather; 3538 } 3539 3540 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 3541 const EdgeInfo &UserTreeIdx) { 3542 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 3543 3544 SmallVector<int> ReuseShuffleIndicies; 3545 SmallVector<Value *> UniqueValues; 3546 auto &&TryToFindDuplicates = [&VL, &ReuseShuffleIndicies, &UniqueValues, 3547 &UserTreeIdx, 3548 this](const InstructionsState &S) { 3549 // Check that every instruction appears once in this bundle. 3550 DenseMap<Value *, unsigned> UniquePositions; 3551 for (Value *V : VL) { 3552 if (isConstant(V)) { 3553 ReuseShuffleIndicies.emplace_back( 3554 isa<UndefValue>(V) ? UndefMaskElem : UniqueValues.size()); 3555 UniqueValues.emplace_back(V); 3556 continue; 3557 } 3558 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 3559 ReuseShuffleIndicies.emplace_back(Res.first->second); 3560 if (Res.second) 3561 UniqueValues.emplace_back(V); 3562 } 3563 size_t NumUniqueScalarValues = UniqueValues.size(); 3564 if (NumUniqueScalarValues == VL.size()) { 3565 ReuseShuffleIndicies.clear(); 3566 } else { 3567 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n"); 3568 if (NumUniqueScalarValues <= 1 || 3569 (UniquePositions.size() == 1 && all_of(UniqueValues, 3570 [](Value *V) { 3571 return isa<UndefValue>(V) || 3572 !isConstant(V); 3573 })) || 3574 !llvm::isPowerOf2_32(NumUniqueScalarValues)) { 3575 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 3576 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3577 return false; 3578 } 3579 VL = UniqueValues; 3580 } 3581 return true; 3582 }; 3583 3584 InstructionsState S = getSameOpcode(VL); 3585 if (Depth == RecursionMaxDepth) { 3586 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 3587 if (TryToFindDuplicates(S)) 3588 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3589 ReuseShuffleIndicies); 3590 return; 3591 } 3592 3593 // Don't handle scalable vectors 3594 if (S.getOpcode() == Instruction::ExtractElement && 3595 isa<ScalableVectorType>( 3596 cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) { 3597 LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n"); 3598 if (TryToFindDuplicates(S)) 3599 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3600 ReuseShuffleIndicies); 3601 return; 3602 } 3603 3604 // Don't handle vectors. 3605 if (S.OpValue->getType()->isVectorTy() && 3606 !isa<InsertElementInst>(S.OpValue)) { 3607 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 3608 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3609 return; 3610 } 3611 3612 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 3613 if (SI->getValueOperand()->getType()->isVectorTy()) { 3614 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 3615 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3616 return; 3617 } 3618 3619 // If all of the operands are identical or constant we have a simple solution. 3620 // If we deal with insert/extract instructions, they all must have constant 3621 // indices, otherwise we should gather them, not try to vectorize. 3622 if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.getOpcode() || 3623 (isa<InsertElementInst, ExtractValueInst, ExtractElementInst>(S.MainOp) && 3624 !all_of(VL, isVectorLikeInstWithConstOps))) { 3625 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); 3626 if (TryToFindDuplicates(S)) 3627 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3628 ReuseShuffleIndicies); 3629 return; 3630 } 3631 3632 // We now know that this is a vector of instructions of the same type from 3633 // the same block. 3634 3635 // Don't vectorize ephemeral values. 3636 for (Value *V : VL) { 3637 if (EphValues.count(V)) { 3638 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 3639 << ") is ephemeral.\n"); 3640 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3641 return; 3642 } 3643 } 3644 3645 // Check if this is a duplicate of another entry. 3646 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 3647 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n"); 3648 if (!E->isSame(VL)) { 3649 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 3650 if (TryToFindDuplicates(S)) 3651 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3652 ReuseShuffleIndicies); 3653 return; 3654 } 3655 // Record the reuse of the tree node. FIXME, currently this is only used to 3656 // properly draw the graph rather than for the actual vectorization. 3657 E->UserTreeIndices.push_back(UserTreeIdx); 3658 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue 3659 << ".\n"); 3660 return; 3661 } 3662 3663 // Check that none of the instructions in the bundle are already in the tree. 3664 for (Value *V : VL) { 3665 auto *I = dyn_cast<Instruction>(V); 3666 if (!I) 3667 continue; 3668 if (getTreeEntry(I)) { 3669 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 3670 << ") is already in tree.\n"); 3671 if (TryToFindDuplicates(S)) 3672 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3673 ReuseShuffleIndicies); 3674 return; 3675 } 3676 } 3677 3678 // The reduction nodes (stored in UserIgnoreList) also should stay scalar. 3679 for (Value *V : VL) { 3680 if (is_contained(UserIgnoreList, V)) { 3681 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 3682 if (TryToFindDuplicates(S)) 3683 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3684 ReuseShuffleIndicies); 3685 return; 3686 } 3687 } 3688 3689 // Check that all of the users of the scalars that we want to vectorize are 3690 // schedulable. 3691 auto *VL0 = cast<Instruction>(S.OpValue); 3692 BasicBlock *BB = VL0->getParent(); 3693 3694 if (!DT->isReachableFromEntry(BB)) { 3695 // Don't go into unreachable blocks. They may contain instructions with 3696 // dependency cycles which confuse the final scheduling. 3697 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 3698 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3699 return; 3700 } 3701 3702 // Check that every instruction appears once in this bundle. 3703 if (!TryToFindDuplicates(S)) 3704 return; 3705 3706 auto &BSRef = BlocksSchedules[BB]; 3707 if (!BSRef) 3708 BSRef = std::make_unique<BlockScheduling>(BB); 3709 3710 BlockScheduling &BS = *BSRef.get(); 3711 3712 Optional<ScheduleData *> Bundle = BS.tryScheduleBundle(VL, this, S); 3713 if (!Bundle) { 3714 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 3715 assert((!BS.getScheduleData(VL0) || 3716 !BS.getScheduleData(VL0)->isPartOfBundle()) && 3717 "tryScheduleBundle should cancelScheduling on failure"); 3718 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3719 ReuseShuffleIndicies); 3720 return; 3721 } 3722 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 3723 3724 unsigned ShuffleOrOp = S.isAltShuffle() ? 3725 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 3726 switch (ShuffleOrOp) { 3727 case Instruction::PHI: { 3728 auto *PH = cast<PHINode>(VL0); 3729 3730 // Check for terminator values (e.g. invoke). 3731 for (Value *V : VL) 3732 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { 3733 Instruction *Term = dyn_cast<Instruction>( 3734 cast<PHINode>(V)->getIncomingValueForBlock( 3735 PH->getIncomingBlock(I))); 3736 if (Term && Term->isTerminator()) { 3737 LLVM_DEBUG(dbgs() 3738 << "SLP: Need to swizzle PHINodes (terminator use).\n"); 3739 BS.cancelScheduling(VL, VL0); 3740 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3741 ReuseShuffleIndicies); 3742 return; 3743 } 3744 } 3745 3746 TreeEntry *TE = 3747 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies); 3748 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 3749 3750 // Keeps the reordered operands to avoid code duplication. 3751 SmallVector<ValueList, 2> OperandsVec; 3752 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { 3753 if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) { 3754 ValueList Operands(VL.size(), PoisonValue::get(PH->getType())); 3755 TE->setOperand(I, Operands); 3756 OperandsVec.push_back(Operands); 3757 continue; 3758 } 3759 ValueList Operands; 3760 // Prepare the operand vector. 3761 for (Value *V : VL) 3762 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock( 3763 PH->getIncomingBlock(I))); 3764 TE->setOperand(I, Operands); 3765 OperandsVec.push_back(Operands); 3766 } 3767 for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx) 3768 buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx}); 3769 return; 3770 } 3771 case Instruction::ExtractValue: 3772 case Instruction::ExtractElement: { 3773 OrdersType CurrentOrder; 3774 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder); 3775 if (Reuse) { 3776 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n"); 3777 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3778 ReuseShuffleIndicies); 3779 // This is a special case, as it does not gather, but at the same time 3780 // we are not extending buildTree_rec() towards the operands. 3781 ValueList Op0; 3782 Op0.assign(VL.size(), VL0->getOperand(0)); 3783 VectorizableTree.back()->setOperand(0, Op0); 3784 return; 3785 } 3786 if (!CurrentOrder.empty()) { 3787 LLVM_DEBUG({ 3788 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " 3789 "with order"; 3790 for (unsigned Idx : CurrentOrder) 3791 dbgs() << " " << Idx; 3792 dbgs() << "\n"; 3793 }); 3794 fixupOrderingIndices(CurrentOrder); 3795 // Insert new order with initial value 0, if it does not exist, 3796 // otherwise return the iterator to the existing one. 3797 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3798 ReuseShuffleIndicies, CurrentOrder); 3799 // This is a special case, as it does not gather, but at the same time 3800 // we are not extending buildTree_rec() towards the operands. 3801 ValueList Op0; 3802 Op0.assign(VL.size(), VL0->getOperand(0)); 3803 VectorizableTree.back()->setOperand(0, Op0); 3804 return; 3805 } 3806 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n"); 3807 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3808 ReuseShuffleIndicies); 3809 BS.cancelScheduling(VL, VL0); 3810 return; 3811 } 3812 case Instruction::InsertElement: { 3813 assert(ReuseShuffleIndicies.empty() && "All inserts should be unique"); 3814 3815 // Check that we have a buildvector and not a shuffle of 2 or more 3816 // different vectors. 3817 ValueSet SourceVectors; 3818 int MinIdx = std::numeric_limits<int>::max(); 3819 for (Value *V : VL) { 3820 SourceVectors.insert(cast<Instruction>(V)->getOperand(0)); 3821 Optional<int> Idx = *getInsertIndex(V, 0); 3822 if (!Idx || *Idx == UndefMaskElem) 3823 continue; 3824 MinIdx = std::min(MinIdx, *Idx); 3825 } 3826 3827 if (count_if(VL, [&SourceVectors](Value *V) { 3828 return !SourceVectors.contains(V); 3829 }) >= 2) { 3830 // Found 2nd source vector - cancel. 3831 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with " 3832 "different source vectors.\n"); 3833 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx); 3834 BS.cancelScheduling(VL, VL0); 3835 return; 3836 } 3837 3838 auto OrdCompare = [](const std::pair<int, int> &P1, 3839 const std::pair<int, int> &P2) { 3840 return P1.first > P2.first; 3841 }; 3842 PriorityQueue<std::pair<int, int>, SmallVector<std::pair<int, int>>, 3843 decltype(OrdCompare)> 3844 Indices(OrdCompare); 3845 for (int I = 0, E = VL.size(); I < E; ++I) { 3846 Optional<int> Idx = *getInsertIndex(VL[I], 0); 3847 if (!Idx || *Idx == UndefMaskElem) 3848 continue; 3849 Indices.emplace(*Idx, I); 3850 } 3851 OrdersType CurrentOrder(VL.size(), VL.size()); 3852 bool IsIdentity = true; 3853 for (int I = 0, E = VL.size(); I < E; ++I) { 3854 CurrentOrder[Indices.top().second] = I; 3855 IsIdentity &= Indices.top().second == I; 3856 Indices.pop(); 3857 } 3858 if (IsIdentity) 3859 CurrentOrder.clear(); 3860 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3861 None, CurrentOrder); 3862 LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n"); 3863 3864 constexpr int NumOps = 2; 3865 ValueList VectorOperands[NumOps]; 3866 for (int I = 0; I < NumOps; ++I) { 3867 for (Value *V : VL) 3868 VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I)); 3869 3870 TE->setOperand(I, VectorOperands[I]); 3871 } 3872 buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, NumOps - 1}); 3873 return; 3874 } 3875 case Instruction::Load: { 3876 // Check that a vectorized load would load the same memory as a scalar 3877 // load. For example, we don't want to vectorize loads that are smaller 3878 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 3879 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 3880 // from such a struct, we read/write packed bits disagreeing with the 3881 // unvectorized version. 3882 SmallVector<Value *> PointerOps; 3883 OrdersType CurrentOrder; 3884 TreeEntry *TE = nullptr; 3885 switch (canVectorizeLoads(VL, VL0, *TTI, *DL, *SE, CurrentOrder, 3886 PointerOps)) { 3887 case LoadsState::Vectorize: 3888 if (CurrentOrder.empty()) { 3889 // Original loads are consecutive and does not require reordering. 3890 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3891 ReuseShuffleIndicies); 3892 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 3893 } else { 3894 fixupOrderingIndices(CurrentOrder); 3895 // Need to reorder. 3896 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3897 ReuseShuffleIndicies, CurrentOrder); 3898 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n"); 3899 } 3900 TE->setOperandsInOrder(); 3901 break; 3902 case LoadsState::ScatterVectorize: 3903 // Vectorizing non-consecutive loads with `llvm.masked.gather`. 3904 TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S, 3905 UserTreeIdx, ReuseShuffleIndicies); 3906 TE->setOperandsInOrder(); 3907 buildTree_rec(PointerOps, Depth + 1, {TE, 0}); 3908 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n"); 3909 break; 3910 case LoadsState::Gather: 3911 BS.cancelScheduling(VL, VL0); 3912 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3913 ReuseShuffleIndicies); 3914 #ifndef NDEBUG 3915 Type *ScalarTy = VL0->getType(); 3916 if (DL->getTypeSizeInBits(ScalarTy) != 3917 DL->getTypeAllocSizeInBits(ScalarTy)) 3918 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 3919 else if (any_of(VL, [](Value *V) { 3920 return !cast<LoadInst>(V)->isSimple(); 3921 })) 3922 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 3923 else 3924 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 3925 #endif // NDEBUG 3926 break; 3927 } 3928 return; 3929 } 3930 case Instruction::ZExt: 3931 case Instruction::SExt: 3932 case Instruction::FPToUI: 3933 case Instruction::FPToSI: 3934 case Instruction::FPExt: 3935 case Instruction::PtrToInt: 3936 case Instruction::IntToPtr: 3937 case Instruction::SIToFP: 3938 case Instruction::UIToFP: 3939 case Instruction::Trunc: 3940 case Instruction::FPTrunc: 3941 case Instruction::BitCast: { 3942 Type *SrcTy = VL0->getOperand(0)->getType(); 3943 for (Value *V : VL) { 3944 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType(); 3945 if (Ty != SrcTy || !isValidElementType(Ty)) { 3946 BS.cancelScheduling(VL, VL0); 3947 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3948 ReuseShuffleIndicies); 3949 LLVM_DEBUG(dbgs() 3950 << "SLP: Gathering casts with different src types.\n"); 3951 return; 3952 } 3953 } 3954 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3955 ReuseShuffleIndicies); 3956 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 3957 3958 TE->setOperandsInOrder(); 3959 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 3960 ValueList Operands; 3961 // Prepare the operand vector. 3962 for (Value *V : VL) 3963 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 3964 3965 buildTree_rec(Operands, Depth + 1, {TE, i}); 3966 } 3967 return; 3968 } 3969 case Instruction::ICmp: 3970 case Instruction::FCmp: { 3971 // Check that all of the compares have the same predicate. 3972 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 3973 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0); 3974 Type *ComparedTy = VL0->getOperand(0)->getType(); 3975 for (Value *V : VL) { 3976 CmpInst *Cmp = cast<CmpInst>(V); 3977 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) || 3978 Cmp->getOperand(0)->getType() != ComparedTy) { 3979 BS.cancelScheduling(VL, VL0); 3980 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 3981 ReuseShuffleIndicies); 3982 LLVM_DEBUG(dbgs() 3983 << "SLP: Gathering cmp with different predicate.\n"); 3984 return; 3985 } 3986 } 3987 3988 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 3989 ReuseShuffleIndicies); 3990 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 3991 3992 ValueList Left, Right; 3993 if (cast<CmpInst>(VL0)->isCommutative()) { 3994 // Commutative predicate - collect + sort operands of the instructions 3995 // so that each side is more likely to have the same opcode. 3996 assert(P0 == SwapP0 && "Commutative Predicate mismatch"); 3997 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 3998 } else { 3999 // Collect operands - commute if it uses the swapped predicate. 4000 for (Value *V : VL) { 4001 auto *Cmp = cast<CmpInst>(V); 4002 Value *LHS = Cmp->getOperand(0); 4003 Value *RHS = Cmp->getOperand(1); 4004 if (Cmp->getPredicate() != P0) 4005 std::swap(LHS, RHS); 4006 Left.push_back(LHS); 4007 Right.push_back(RHS); 4008 } 4009 } 4010 TE->setOperand(0, Left); 4011 TE->setOperand(1, Right); 4012 buildTree_rec(Left, Depth + 1, {TE, 0}); 4013 buildTree_rec(Right, Depth + 1, {TE, 1}); 4014 return; 4015 } 4016 case Instruction::Select: 4017 case Instruction::FNeg: 4018 case Instruction::Add: 4019 case Instruction::FAdd: 4020 case Instruction::Sub: 4021 case Instruction::FSub: 4022 case Instruction::Mul: 4023 case Instruction::FMul: 4024 case Instruction::UDiv: 4025 case Instruction::SDiv: 4026 case Instruction::FDiv: 4027 case Instruction::URem: 4028 case Instruction::SRem: 4029 case Instruction::FRem: 4030 case Instruction::Shl: 4031 case Instruction::LShr: 4032 case Instruction::AShr: 4033 case Instruction::And: 4034 case Instruction::Or: 4035 case Instruction::Xor: { 4036 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 4037 ReuseShuffleIndicies); 4038 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n"); 4039 4040 // Sort operands of the instructions so that each side is more likely to 4041 // have the same opcode. 4042 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 4043 ValueList Left, Right; 4044 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 4045 TE->setOperand(0, Left); 4046 TE->setOperand(1, Right); 4047 buildTree_rec(Left, Depth + 1, {TE, 0}); 4048 buildTree_rec(Right, Depth + 1, {TE, 1}); 4049 return; 4050 } 4051 4052 TE->setOperandsInOrder(); 4053 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 4054 ValueList Operands; 4055 // Prepare the operand vector. 4056 for (Value *V : VL) 4057 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 4058 4059 buildTree_rec(Operands, Depth + 1, {TE, i}); 4060 } 4061 return; 4062 } 4063 case Instruction::GetElementPtr: { 4064 // We don't combine GEPs with complicated (nested) indexing. 4065 for (Value *V : VL) { 4066 if (cast<Instruction>(V)->getNumOperands() != 2) { 4067 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 4068 BS.cancelScheduling(VL, VL0); 4069 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4070 ReuseShuffleIndicies); 4071 return; 4072 } 4073 } 4074 4075 // We can't combine several GEPs into one vector if they operate on 4076 // different types. 4077 Type *Ty0 = VL0->getOperand(0)->getType(); 4078 for (Value *V : VL) { 4079 Type *CurTy = cast<Instruction>(V)->getOperand(0)->getType(); 4080 if (Ty0 != CurTy) { 4081 LLVM_DEBUG(dbgs() 4082 << "SLP: not-vectorizable GEP (different types).\n"); 4083 BS.cancelScheduling(VL, VL0); 4084 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4085 ReuseShuffleIndicies); 4086 return; 4087 } 4088 } 4089 4090 // We don't combine GEPs with non-constant indexes. 4091 Type *Ty1 = VL0->getOperand(1)->getType(); 4092 for (Value *V : VL) { 4093 auto Op = cast<Instruction>(V)->getOperand(1); 4094 if (!isa<ConstantInt>(Op) || 4095 (Op->getType() != Ty1 && 4096 Op->getType()->getScalarSizeInBits() > 4097 DL->getIndexSizeInBits( 4098 V->getType()->getPointerAddressSpace()))) { 4099 LLVM_DEBUG(dbgs() 4100 << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 4101 BS.cancelScheduling(VL, VL0); 4102 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4103 ReuseShuffleIndicies); 4104 return; 4105 } 4106 } 4107 4108 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 4109 ReuseShuffleIndicies); 4110 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 4111 SmallVector<ValueList, 2> Operands(2); 4112 // Prepare the operand vector for pointer operands. 4113 for (Value *V : VL) 4114 Operands.front().push_back( 4115 cast<GetElementPtrInst>(V)->getPointerOperand()); 4116 TE->setOperand(0, Operands.front()); 4117 // Need to cast all indices to the same type before vectorization to 4118 // avoid crash. 4119 // Required to be able to find correct matches between different gather 4120 // nodes and reuse the vectorized values rather than trying to gather them 4121 // again. 4122 int IndexIdx = 1; 4123 Type *VL0Ty = VL0->getOperand(IndexIdx)->getType(); 4124 Type *Ty = all_of(VL, 4125 [VL0Ty, IndexIdx](Value *V) { 4126 return VL0Ty == cast<GetElementPtrInst>(V) 4127 ->getOperand(IndexIdx) 4128 ->getType(); 4129 }) 4130 ? VL0Ty 4131 : DL->getIndexType(cast<GetElementPtrInst>(VL0) 4132 ->getPointerOperandType() 4133 ->getScalarType()); 4134 // Prepare the operand vector. 4135 for (Value *V : VL) { 4136 auto *Op = cast<Instruction>(V)->getOperand(IndexIdx); 4137 auto *CI = cast<ConstantInt>(Op); 4138 Operands.back().push_back(ConstantExpr::getIntegerCast( 4139 CI, Ty, CI->getValue().isSignBitSet())); 4140 } 4141 TE->setOperand(IndexIdx, Operands.back()); 4142 4143 for (unsigned I = 0, Ops = Operands.size(); I < Ops; ++I) 4144 buildTree_rec(Operands[I], Depth + 1, {TE, I}); 4145 return; 4146 } 4147 case Instruction::Store: { 4148 // Check if the stores are consecutive or if we need to swizzle them. 4149 llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType(); 4150 // Avoid types that are padded when being allocated as scalars, while 4151 // being packed together in a vector (such as i1). 4152 if (DL->getTypeSizeInBits(ScalarTy) != 4153 DL->getTypeAllocSizeInBits(ScalarTy)) { 4154 BS.cancelScheduling(VL, VL0); 4155 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4156 ReuseShuffleIndicies); 4157 LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n"); 4158 return; 4159 } 4160 // Make sure all stores in the bundle are simple - we can't vectorize 4161 // atomic or volatile stores. 4162 SmallVector<Value *, 4> PointerOps(VL.size()); 4163 ValueList Operands(VL.size()); 4164 auto POIter = PointerOps.begin(); 4165 auto OIter = Operands.begin(); 4166 for (Value *V : VL) { 4167 auto *SI = cast<StoreInst>(V); 4168 if (!SI->isSimple()) { 4169 BS.cancelScheduling(VL, VL0); 4170 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4171 ReuseShuffleIndicies); 4172 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n"); 4173 return; 4174 } 4175 *POIter = SI->getPointerOperand(); 4176 *OIter = SI->getValueOperand(); 4177 ++POIter; 4178 ++OIter; 4179 } 4180 4181 OrdersType CurrentOrder; 4182 // Check the order of pointer operands. 4183 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) { 4184 Value *Ptr0; 4185 Value *PtrN; 4186 if (CurrentOrder.empty()) { 4187 Ptr0 = PointerOps.front(); 4188 PtrN = PointerOps.back(); 4189 } else { 4190 Ptr0 = PointerOps[CurrentOrder.front()]; 4191 PtrN = PointerOps[CurrentOrder.back()]; 4192 } 4193 Optional<int> Dist = 4194 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE); 4195 // Check that the sorted pointer operands are consecutive. 4196 if (static_cast<unsigned>(*Dist) == VL.size() - 1) { 4197 if (CurrentOrder.empty()) { 4198 // Original stores are consecutive and does not require reordering. 4199 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, 4200 UserTreeIdx, ReuseShuffleIndicies); 4201 TE->setOperandsInOrder(); 4202 buildTree_rec(Operands, Depth + 1, {TE, 0}); 4203 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 4204 } else { 4205 fixupOrderingIndices(CurrentOrder); 4206 TreeEntry *TE = 4207 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 4208 ReuseShuffleIndicies, CurrentOrder); 4209 TE->setOperandsInOrder(); 4210 buildTree_rec(Operands, Depth + 1, {TE, 0}); 4211 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n"); 4212 } 4213 return; 4214 } 4215 } 4216 4217 BS.cancelScheduling(VL, VL0); 4218 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4219 ReuseShuffleIndicies); 4220 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 4221 return; 4222 } 4223 case Instruction::Call: { 4224 // Check if the calls are all to the same vectorizable intrinsic or 4225 // library function. 4226 CallInst *CI = cast<CallInst>(VL0); 4227 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4228 4229 VFShape Shape = VFShape::get( 4230 *CI, ElementCount::getFixed(static_cast<unsigned int>(VL.size())), 4231 false /*HasGlobalPred*/); 4232 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 4233 4234 if (!VecFunc && !isTriviallyVectorizable(ID)) { 4235 BS.cancelScheduling(VL, VL0); 4236 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4237 ReuseShuffleIndicies); 4238 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 4239 return; 4240 } 4241 Function *F = CI->getCalledFunction(); 4242 unsigned NumArgs = CI->arg_size(); 4243 SmallVector<Value*, 4> ScalarArgs(NumArgs, nullptr); 4244 for (unsigned j = 0; j != NumArgs; ++j) 4245 if (hasVectorInstrinsicScalarOpd(ID, j)) 4246 ScalarArgs[j] = CI->getArgOperand(j); 4247 for (Value *V : VL) { 4248 CallInst *CI2 = dyn_cast<CallInst>(V); 4249 if (!CI2 || CI2->getCalledFunction() != F || 4250 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 4251 (VecFunc && 4252 VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) || 4253 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 4254 BS.cancelScheduling(VL, VL0); 4255 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4256 ReuseShuffleIndicies); 4257 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V 4258 << "\n"); 4259 return; 4260 } 4261 // Some intrinsics have scalar arguments and should be same in order for 4262 // them to be vectorized. 4263 for (unsigned j = 0; j != NumArgs; ++j) { 4264 if (hasVectorInstrinsicScalarOpd(ID, j)) { 4265 Value *A1J = CI2->getArgOperand(j); 4266 if (ScalarArgs[j] != A1J) { 4267 BS.cancelScheduling(VL, VL0); 4268 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4269 ReuseShuffleIndicies); 4270 LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI 4271 << " argument " << ScalarArgs[j] << "!=" << A1J 4272 << "\n"); 4273 return; 4274 } 4275 } 4276 } 4277 // Verify that the bundle operands are identical between the two calls. 4278 if (CI->hasOperandBundles() && 4279 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 4280 CI->op_begin() + CI->getBundleOperandsEndIndex(), 4281 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 4282 BS.cancelScheduling(VL, VL0); 4283 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4284 ReuseShuffleIndicies); 4285 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" 4286 << *CI << "!=" << *V << '\n'); 4287 return; 4288 } 4289 } 4290 4291 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 4292 ReuseShuffleIndicies); 4293 TE->setOperandsInOrder(); 4294 for (unsigned i = 0, e = CI->arg_size(); i != e; ++i) { 4295 // For scalar operands no need to to create an entry since no need to 4296 // vectorize it. 4297 if (hasVectorInstrinsicScalarOpd(ID, i)) 4298 continue; 4299 ValueList Operands; 4300 // Prepare the operand vector. 4301 for (Value *V : VL) { 4302 auto *CI2 = cast<CallInst>(V); 4303 Operands.push_back(CI2->getArgOperand(i)); 4304 } 4305 buildTree_rec(Operands, Depth + 1, {TE, i}); 4306 } 4307 return; 4308 } 4309 case Instruction::ShuffleVector: { 4310 // If this is not an alternate sequence of opcode like add-sub 4311 // then do not vectorize this instruction. 4312 if (!S.isAltShuffle()) { 4313 BS.cancelScheduling(VL, VL0); 4314 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4315 ReuseShuffleIndicies); 4316 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 4317 return; 4318 } 4319 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 4320 ReuseShuffleIndicies); 4321 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 4322 4323 // Reorder operands if reordering would enable vectorization. 4324 if (isa<BinaryOperator>(VL0)) { 4325 ValueList Left, Right; 4326 reorderInputsAccordingToOpcode(VL, Left, Right, *DL, *SE, *this); 4327 TE->setOperand(0, Left); 4328 TE->setOperand(1, Right); 4329 buildTree_rec(Left, Depth + 1, {TE, 0}); 4330 buildTree_rec(Right, Depth + 1, {TE, 1}); 4331 return; 4332 } 4333 4334 TE->setOperandsInOrder(); 4335 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { 4336 ValueList Operands; 4337 // Prepare the operand vector. 4338 for (Value *V : VL) 4339 Operands.push_back(cast<Instruction>(V)->getOperand(i)); 4340 4341 buildTree_rec(Operands, Depth + 1, {TE, i}); 4342 } 4343 return; 4344 } 4345 default: 4346 BS.cancelScheduling(VL, VL0); 4347 newTreeEntry(VL, None /*not vectorized*/, S, UserTreeIdx, 4348 ReuseShuffleIndicies); 4349 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 4350 return; 4351 } 4352 } 4353 4354 unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const { 4355 unsigned N = 1; 4356 Type *EltTy = T; 4357 4358 while (isa<StructType>(EltTy) || isa<ArrayType>(EltTy) || 4359 isa<VectorType>(EltTy)) { 4360 if (auto *ST = dyn_cast<StructType>(EltTy)) { 4361 // Check that struct is homogeneous. 4362 for (const auto *Ty : ST->elements()) 4363 if (Ty != *ST->element_begin()) 4364 return 0; 4365 N *= ST->getNumElements(); 4366 EltTy = *ST->element_begin(); 4367 } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) { 4368 N *= AT->getNumElements(); 4369 EltTy = AT->getElementType(); 4370 } else { 4371 auto *VT = cast<FixedVectorType>(EltTy); 4372 N *= VT->getNumElements(); 4373 EltTy = VT->getElementType(); 4374 } 4375 } 4376 4377 if (!isValidElementType(EltTy)) 4378 return 0; 4379 uint64_t VTSize = DL.getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N)); 4380 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || VTSize != DL.getTypeStoreSizeInBits(T)) 4381 return 0; 4382 return N; 4383 } 4384 4385 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 4386 SmallVectorImpl<unsigned> &CurrentOrder) const { 4387 const auto *It = find_if(VL, [](Value *V) { 4388 return isa<ExtractElementInst, ExtractValueInst>(V); 4389 }); 4390 assert(It != VL.end() && "Expected at least one extract instruction."); 4391 auto *E0 = cast<Instruction>(*It); 4392 assert(all_of(VL, 4393 [](Value *V) { 4394 return isa<UndefValue, ExtractElementInst, ExtractValueInst>( 4395 V); 4396 }) && 4397 "Invalid opcode"); 4398 // Check if all of the extracts come from the same vector and from the 4399 // correct offset. 4400 Value *Vec = E0->getOperand(0); 4401 4402 CurrentOrder.clear(); 4403 4404 // We have to extract from a vector/aggregate with the same number of elements. 4405 unsigned NElts; 4406 if (E0->getOpcode() == Instruction::ExtractValue) { 4407 const DataLayout &DL = E0->getModule()->getDataLayout(); 4408 NElts = canMapToVector(Vec->getType(), DL); 4409 if (!NElts) 4410 return false; 4411 // Check if load can be rewritten as load of vector. 4412 LoadInst *LI = dyn_cast<LoadInst>(Vec); 4413 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 4414 return false; 4415 } else { 4416 NElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); 4417 } 4418 4419 if (NElts != VL.size()) 4420 return false; 4421 4422 // Check that all of the indices extract from the correct offset. 4423 bool ShouldKeepOrder = true; 4424 unsigned E = VL.size(); 4425 // Assign to all items the initial value E + 1 so we can check if the extract 4426 // instruction index was used already. 4427 // Also, later we can check that all the indices are used and we have a 4428 // consecutive access in the extract instructions, by checking that no 4429 // element of CurrentOrder still has value E + 1. 4430 CurrentOrder.assign(E, E); 4431 unsigned I = 0; 4432 for (; I < E; ++I) { 4433 auto *Inst = dyn_cast<Instruction>(VL[I]); 4434 if (!Inst) 4435 continue; 4436 if (Inst->getOperand(0) != Vec) 4437 break; 4438 if (auto *EE = dyn_cast<ExtractElementInst>(Inst)) 4439 if (isa<UndefValue>(EE->getIndexOperand())) 4440 continue; 4441 Optional<unsigned> Idx = getExtractIndex(Inst); 4442 if (!Idx) 4443 break; 4444 const unsigned ExtIdx = *Idx; 4445 if (ExtIdx != I) { 4446 if (ExtIdx >= E || CurrentOrder[ExtIdx] != E) 4447 break; 4448 ShouldKeepOrder = false; 4449 CurrentOrder[ExtIdx] = I; 4450 } else { 4451 if (CurrentOrder[I] != E) 4452 break; 4453 CurrentOrder[I] = I; 4454 } 4455 } 4456 if (I < E) { 4457 CurrentOrder.clear(); 4458 return false; 4459 } 4460 if (ShouldKeepOrder) 4461 CurrentOrder.clear(); 4462 4463 return ShouldKeepOrder; 4464 } 4465 4466 bool BoUpSLP::areAllUsersVectorized(Instruction *I, 4467 ArrayRef<Value *> VectorizedVals) const { 4468 return (I->hasOneUse() && is_contained(VectorizedVals, I)) || 4469 all_of(I->users(), [this](User *U) { 4470 return ScalarToTreeEntry.count(U) > 0 || MustGather.contains(U); 4471 }); 4472 } 4473 4474 static std::pair<InstructionCost, InstructionCost> 4475 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy, 4476 TargetTransformInfo *TTI, TargetLibraryInfo *TLI) { 4477 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4478 4479 // Calculate the cost of the scalar and vector calls. 4480 SmallVector<Type *, 4> VecTys; 4481 for (Use &Arg : CI->args()) 4482 VecTys.push_back( 4483 FixedVectorType::get(Arg->getType(), VecTy->getNumElements())); 4484 FastMathFlags FMF; 4485 if (auto *FPCI = dyn_cast<FPMathOperator>(CI)) 4486 FMF = FPCI->getFastMathFlags(); 4487 SmallVector<const Value *> Arguments(CI->args()); 4488 IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF, 4489 dyn_cast<IntrinsicInst>(CI)); 4490 auto IntrinsicCost = 4491 TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput); 4492 4493 auto Shape = VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>( 4494 VecTy->getNumElements())), 4495 false /*HasGlobalPred*/); 4496 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 4497 auto LibCost = IntrinsicCost; 4498 if (!CI->isNoBuiltin() && VecFunc) { 4499 // Calculate the cost of the vector library call. 4500 // If the corresponding vector call is cheaper, return its cost. 4501 LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys, 4502 TTI::TCK_RecipThroughput); 4503 } 4504 return {IntrinsicCost, LibCost}; 4505 } 4506 4507 /// Compute the cost of creating a vector of type \p VecTy containing the 4508 /// extracted values from \p VL. 4509 static InstructionCost 4510 computeExtractCost(ArrayRef<Value *> VL, FixedVectorType *VecTy, 4511 TargetTransformInfo::ShuffleKind ShuffleKind, 4512 ArrayRef<int> Mask, TargetTransformInfo &TTI) { 4513 unsigned NumOfParts = TTI.getNumberOfParts(VecTy); 4514 4515 if (ShuffleKind != TargetTransformInfo::SK_PermuteSingleSrc || !NumOfParts || 4516 VecTy->getNumElements() < NumOfParts) 4517 return TTI.getShuffleCost(ShuffleKind, VecTy, Mask); 4518 4519 bool AllConsecutive = true; 4520 unsigned EltsPerVector = VecTy->getNumElements() / NumOfParts; 4521 unsigned Idx = -1; 4522 InstructionCost Cost = 0; 4523 4524 // Process extracts in blocks of EltsPerVector to check if the source vector 4525 // operand can be re-used directly. If not, add the cost of creating a shuffle 4526 // to extract the values into a vector register. 4527 for (auto *V : VL) { 4528 ++Idx; 4529 4530 // Need to exclude undefs from analysis. 4531 if (isa<UndefValue>(V) || Mask[Idx] == UndefMaskElem) 4532 continue; 4533 4534 // Reached the start of a new vector registers. 4535 if (Idx % EltsPerVector == 0) { 4536 AllConsecutive = true; 4537 continue; 4538 } 4539 4540 // Check all extracts for a vector register on the target directly 4541 // extract values in order. 4542 unsigned CurrentIdx = *getExtractIndex(cast<Instruction>(V)); 4543 if (!isa<UndefValue>(VL[Idx - 1]) && Mask[Idx - 1] != UndefMaskElem) { 4544 unsigned PrevIdx = *getExtractIndex(cast<Instruction>(VL[Idx - 1])); 4545 AllConsecutive &= PrevIdx + 1 == CurrentIdx && 4546 CurrentIdx % EltsPerVector == Idx % EltsPerVector; 4547 } 4548 4549 if (AllConsecutive) 4550 continue; 4551 4552 // Skip all indices, except for the last index per vector block. 4553 if ((Idx + 1) % EltsPerVector != 0 && Idx + 1 != VL.size()) 4554 continue; 4555 4556 // If we have a series of extracts which are not consecutive and hence 4557 // cannot re-use the source vector register directly, compute the shuffle 4558 // cost to extract the a vector with EltsPerVector elements. 4559 Cost += TTI.getShuffleCost( 4560 TargetTransformInfo::SK_PermuteSingleSrc, 4561 FixedVectorType::get(VecTy->getElementType(), EltsPerVector)); 4562 } 4563 return Cost; 4564 } 4565 4566 /// Build shuffle mask for shuffle graph entries and lists of main and alternate 4567 /// operations operands. 4568 static void 4569 buildSuffleEntryMask(ArrayRef<Value *> VL, ArrayRef<unsigned> ReorderIndices, 4570 ArrayRef<int> ReusesIndices, 4571 const function_ref<bool(Instruction *)> IsAltOp, 4572 SmallVectorImpl<int> &Mask, 4573 SmallVectorImpl<Value *> *OpScalars = nullptr, 4574 SmallVectorImpl<Value *> *AltScalars = nullptr) { 4575 unsigned Sz = VL.size(); 4576 Mask.assign(Sz, UndefMaskElem); 4577 SmallVector<int> OrderMask; 4578 if (!ReorderIndices.empty()) 4579 inversePermutation(ReorderIndices, OrderMask); 4580 for (unsigned I = 0; I < Sz; ++I) { 4581 unsigned Idx = I; 4582 if (!ReorderIndices.empty()) 4583 Idx = OrderMask[I]; 4584 auto *OpInst = cast<Instruction>(VL[Idx]); 4585 if (IsAltOp(OpInst)) { 4586 Mask[I] = Sz + Idx; 4587 if (AltScalars) 4588 AltScalars->push_back(OpInst); 4589 } else { 4590 Mask[I] = Idx; 4591 if (OpScalars) 4592 OpScalars->push_back(OpInst); 4593 } 4594 } 4595 if (!ReusesIndices.empty()) { 4596 SmallVector<int> NewMask(ReusesIndices.size(), UndefMaskElem); 4597 transform(ReusesIndices, NewMask.begin(), [&Mask](int Idx) { 4598 return Idx != UndefMaskElem ? Mask[Idx] : UndefMaskElem; 4599 }); 4600 Mask.swap(NewMask); 4601 } 4602 } 4603 4604 InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E, 4605 ArrayRef<Value *> VectorizedVals) { 4606 ArrayRef<Value*> VL = E->Scalars; 4607 4608 Type *ScalarTy = VL[0]->getType(); 4609 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 4610 ScalarTy = SI->getValueOperand()->getType(); 4611 else if (CmpInst *CI = dyn_cast<CmpInst>(VL[0])) 4612 ScalarTy = CI->getOperand(0)->getType(); 4613 else if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 4614 ScalarTy = IE->getOperand(1)->getType(); 4615 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 4616 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 4617 4618 // If we have computed a smaller type for the expression, update VecTy so 4619 // that the costs will be accurate. 4620 if (MinBWs.count(VL[0])) 4621 VecTy = FixedVectorType::get( 4622 IntegerType::get(F->getContext(), MinBWs[VL[0]].first), VL.size()); 4623 unsigned EntryVF = E->getVectorFactor(); 4624 auto *FinalVecTy = FixedVectorType::get(VecTy->getElementType(), EntryVF); 4625 4626 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 4627 // FIXME: it tries to fix a problem with MSVC buildbots. 4628 TargetTransformInfo &TTIRef = *TTI; 4629 auto &&AdjustExtractsCost = [this, &TTIRef, CostKind, VL, VecTy, 4630 VectorizedVals, E](InstructionCost &Cost) { 4631 DenseMap<Value *, int> ExtractVectorsTys; 4632 SmallPtrSet<Value *, 4> CheckedExtracts; 4633 for (auto *V : VL) { 4634 if (isa<UndefValue>(V)) 4635 continue; 4636 // If all users of instruction are going to be vectorized and this 4637 // instruction itself is not going to be vectorized, consider this 4638 // instruction as dead and remove its cost from the final cost of the 4639 // vectorized tree. 4640 // Also, avoid adjusting the cost for extractelements with multiple uses 4641 // in different graph entries. 4642 const TreeEntry *VE = getTreeEntry(V); 4643 if (!CheckedExtracts.insert(V).second || 4644 !areAllUsersVectorized(cast<Instruction>(V), VectorizedVals) || 4645 (VE && VE != E)) 4646 continue; 4647 auto *EE = cast<ExtractElementInst>(V); 4648 Optional<unsigned> EEIdx = getExtractIndex(EE); 4649 if (!EEIdx) 4650 continue; 4651 unsigned Idx = *EEIdx; 4652 if (TTIRef.getNumberOfParts(VecTy) != 4653 TTIRef.getNumberOfParts(EE->getVectorOperandType())) { 4654 auto It = 4655 ExtractVectorsTys.try_emplace(EE->getVectorOperand(), Idx).first; 4656 It->getSecond() = std::min<int>(It->second, Idx); 4657 } 4658 // Take credit for instruction that will become dead. 4659 if (EE->hasOneUse()) { 4660 Instruction *Ext = EE->user_back(); 4661 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 4662 all_of(Ext->users(), 4663 [](User *U) { return isa<GetElementPtrInst>(U); })) { 4664 // Use getExtractWithExtendCost() to calculate the cost of 4665 // extractelement/ext pair. 4666 Cost -= 4667 TTIRef.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(), 4668 EE->getVectorOperandType(), Idx); 4669 // Add back the cost of s|zext which is subtracted separately. 4670 Cost += TTIRef.getCastInstrCost( 4671 Ext->getOpcode(), Ext->getType(), EE->getType(), 4672 TTI::getCastContextHint(Ext), CostKind, Ext); 4673 continue; 4674 } 4675 } 4676 Cost -= TTIRef.getVectorInstrCost(Instruction::ExtractElement, 4677 EE->getVectorOperandType(), Idx); 4678 } 4679 // Add a cost for subvector extracts/inserts if required. 4680 for (const auto &Data : ExtractVectorsTys) { 4681 auto *EEVTy = cast<FixedVectorType>(Data.first->getType()); 4682 unsigned NumElts = VecTy->getNumElements(); 4683 if (Data.second % NumElts == 0) 4684 continue; 4685 if (TTIRef.getNumberOfParts(EEVTy) > TTIRef.getNumberOfParts(VecTy)) { 4686 unsigned Idx = (Data.second / NumElts) * NumElts; 4687 unsigned EENumElts = EEVTy->getNumElements(); 4688 if (Idx + NumElts <= EENumElts) { 4689 Cost += 4690 TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 4691 EEVTy, None, Idx, VecTy); 4692 } else { 4693 // Need to round up the subvector type vectorization factor to avoid a 4694 // crash in cost model functions. Make SubVT so that Idx + VF of SubVT 4695 // <= EENumElts. 4696 auto *SubVT = 4697 FixedVectorType::get(VecTy->getElementType(), EENumElts - Idx); 4698 Cost += 4699 TTIRef.getShuffleCost(TargetTransformInfo::SK_ExtractSubvector, 4700 EEVTy, None, Idx, SubVT); 4701 } 4702 } else { 4703 Cost += TTIRef.getShuffleCost(TargetTransformInfo::SK_InsertSubvector, 4704 VecTy, None, 0, EEVTy); 4705 } 4706 } 4707 }; 4708 if (E->State == TreeEntry::NeedToGather) { 4709 if (allConstant(VL)) 4710 return 0; 4711 if (isa<InsertElementInst>(VL[0])) 4712 return InstructionCost::getInvalid(); 4713 SmallVector<int> Mask; 4714 SmallVector<const TreeEntry *> Entries; 4715 Optional<TargetTransformInfo::ShuffleKind> Shuffle = 4716 isGatherShuffledEntry(E, Mask, Entries); 4717 if (Shuffle.hasValue()) { 4718 InstructionCost GatherCost = 0; 4719 if (ShuffleVectorInst::isIdentityMask(Mask)) { 4720 // Perfect match in the graph, will reuse the previously vectorized 4721 // node. Cost is 0. 4722 LLVM_DEBUG( 4723 dbgs() 4724 << "SLP: perfect diamond match for gather bundle that starts with " 4725 << *VL.front() << ".\n"); 4726 if (NeedToShuffleReuses) 4727 GatherCost = 4728 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, 4729 FinalVecTy, E->ReuseShuffleIndices); 4730 } else { 4731 LLVM_DEBUG(dbgs() << "SLP: shuffled " << Entries.size() 4732 << " entries for bundle that starts with " 4733 << *VL.front() << ".\n"); 4734 // Detected that instead of gather we can emit a shuffle of single/two 4735 // previously vectorized nodes. Add the cost of the permutation rather 4736 // than gather. 4737 ::addMask(Mask, E->ReuseShuffleIndices); 4738 GatherCost = TTI->getShuffleCost(*Shuffle, FinalVecTy, Mask); 4739 } 4740 return GatherCost; 4741 } 4742 if ((E->getOpcode() == Instruction::ExtractElement || 4743 all_of(E->Scalars, 4744 [](Value *V) { 4745 return isa<ExtractElementInst, UndefValue>(V); 4746 })) && 4747 allSameType(VL)) { 4748 // Check that gather of extractelements can be represented as just a 4749 // shuffle of a single/two vectors the scalars are extracted from. 4750 SmallVector<int> Mask; 4751 Optional<TargetTransformInfo::ShuffleKind> ShuffleKind = 4752 isFixedVectorShuffle(VL, Mask); 4753 if (ShuffleKind.hasValue()) { 4754 // Found the bunch of extractelement instructions that must be gathered 4755 // into a vector and can be represented as a permutation elements in a 4756 // single input vector or of 2 input vectors. 4757 InstructionCost Cost = 4758 computeExtractCost(VL, VecTy, *ShuffleKind, Mask, *TTI); 4759 AdjustExtractsCost(Cost); 4760 if (NeedToShuffleReuses) 4761 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, 4762 FinalVecTy, E->ReuseShuffleIndices); 4763 return Cost; 4764 } 4765 } 4766 if (isSplat(VL)) { 4767 // Found the broadcasting of the single scalar, calculate the cost as the 4768 // broadcast. 4769 assert(VecTy == FinalVecTy && 4770 "No reused scalars expected for broadcast."); 4771 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy); 4772 } 4773 InstructionCost ReuseShuffleCost = 0; 4774 if (NeedToShuffleReuses) 4775 ReuseShuffleCost = TTI->getShuffleCost( 4776 TTI::SK_PermuteSingleSrc, FinalVecTy, E->ReuseShuffleIndices); 4777 // Improve gather cost for gather of loads, if we can group some of the 4778 // loads into vector loads. 4779 if (VL.size() > 2 && E->getOpcode() == Instruction::Load && 4780 !E->isAltShuffle()) { 4781 BoUpSLP::ValueSet VectorizedLoads; 4782 unsigned StartIdx = 0; 4783 unsigned VF = VL.size() / 2; 4784 unsigned VectorizedCnt = 0; 4785 unsigned ScatterVectorizeCnt = 0; 4786 const unsigned Sz = DL->getTypeSizeInBits(E->getMainOp()->getType()); 4787 for (unsigned MinVF = getMinVF(2 * Sz); VF >= MinVF; VF /= 2) { 4788 for (unsigned Cnt = StartIdx, End = VL.size(); Cnt + VF <= End; 4789 Cnt += VF) { 4790 ArrayRef<Value *> Slice = VL.slice(Cnt, VF); 4791 if (!VectorizedLoads.count(Slice.front()) && 4792 !VectorizedLoads.count(Slice.back()) && allSameBlock(Slice)) { 4793 SmallVector<Value *> PointerOps; 4794 OrdersType CurrentOrder; 4795 LoadsState LS = canVectorizeLoads(Slice, Slice.front(), *TTI, *DL, 4796 *SE, CurrentOrder, PointerOps); 4797 switch (LS) { 4798 case LoadsState::Vectorize: 4799 case LoadsState::ScatterVectorize: 4800 // Mark the vectorized loads so that we don't vectorize them 4801 // again. 4802 if (LS == LoadsState::Vectorize) 4803 ++VectorizedCnt; 4804 else 4805 ++ScatterVectorizeCnt; 4806 VectorizedLoads.insert(Slice.begin(), Slice.end()); 4807 // If we vectorized initial block, no need to try to vectorize it 4808 // again. 4809 if (Cnt == StartIdx) 4810 StartIdx += VF; 4811 break; 4812 case LoadsState::Gather: 4813 break; 4814 } 4815 } 4816 } 4817 // Check if the whole array was vectorized already - exit. 4818 if (StartIdx >= VL.size()) 4819 break; 4820 // Found vectorizable parts - exit. 4821 if (!VectorizedLoads.empty()) 4822 break; 4823 } 4824 if (!VectorizedLoads.empty()) { 4825 InstructionCost GatherCost = 0; 4826 unsigned NumParts = TTI->getNumberOfParts(VecTy); 4827 bool NeedInsertSubvectorAnalysis = 4828 !NumParts || (VL.size() / VF) > NumParts; 4829 // Get the cost for gathered loads. 4830 for (unsigned I = 0, End = VL.size(); I < End; I += VF) { 4831 if (VectorizedLoads.contains(VL[I])) 4832 continue; 4833 GatherCost += getGatherCost(VL.slice(I, VF)); 4834 } 4835 // The cost for vectorized loads. 4836 InstructionCost ScalarsCost = 0; 4837 for (Value *V : VectorizedLoads) { 4838 auto *LI = cast<LoadInst>(V); 4839 ScalarsCost += TTI->getMemoryOpCost( 4840 Instruction::Load, LI->getType(), LI->getAlign(), 4841 LI->getPointerAddressSpace(), CostKind, LI); 4842 } 4843 auto *LI = cast<LoadInst>(E->getMainOp()); 4844 auto *LoadTy = FixedVectorType::get(LI->getType(), VF); 4845 Align Alignment = LI->getAlign(); 4846 GatherCost += 4847 VectorizedCnt * 4848 TTI->getMemoryOpCost(Instruction::Load, LoadTy, Alignment, 4849 LI->getPointerAddressSpace(), CostKind, LI); 4850 GatherCost += ScatterVectorizeCnt * 4851 TTI->getGatherScatterOpCost( 4852 Instruction::Load, LoadTy, LI->getPointerOperand(), 4853 /*VariableMask=*/false, Alignment, CostKind, LI); 4854 if (NeedInsertSubvectorAnalysis) { 4855 // Add the cost for the subvectors insert. 4856 for (int I = VF, E = VL.size(); I < E; I += VF) 4857 GatherCost += TTI->getShuffleCost(TTI::SK_InsertSubvector, VecTy, 4858 None, I, LoadTy); 4859 } 4860 return ReuseShuffleCost + GatherCost - ScalarsCost; 4861 } 4862 } 4863 return ReuseShuffleCost + getGatherCost(VL); 4864 } 4865 InstructionCost CommonCost = 0; 4866 SmallVector<int> Mask; 4867 if (!E->ReorderIndices.empty()) { 4868 SmallVector<int> NewMask; 4869 if (E->getOpcode() == Instruction::Store) { 4870 // For stores the order is actually a mask. 4871 NewMask.resize(E->ReorderIndices.size()); 4872 copy(E->ReorderIndices, NewMask.begin()); 4873 } else { 4874 inversePermutation(E->ReorderIndices, NewMask); 4875 } 4876 ::addMask(Mask, NewMask); 4877 } 4878 if (NeedToShuffleReuses) 4879 ::addMask(Mask, E->ReuseShuffleIndices); 4880 if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask)) 4881 CommonCost = 4882 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask); 4883 assert((E->State == TreeEntry::Vectorize || 4884 E->State == TreeEntry::ScatterVectorize) && 4885 "Unhandled state"); 4886 assert(E->getOpcode() && allSameType(VL) && allSameBlock(VL) && "Invalid VL"); 4887 Instruction *VL0 = E->getMainOp(); 4888 unsigned ShuffleOrOp = 4889 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 4890 switch (ShuffleOrOp) { 4891 case Instruction::PHI: 4892 return 0; 4893 4894 case Instruction::ExtractValue: 4895 case Instruction::ExtractElement: { 4896 // The common cost of removal ExtractElement/ExtractValue instructions + 4897 // the cost of shuffles, if required to resuffle the original vector. 4898 if (NeedToShuffleReuses) { 4899 unsigned Idx = 0; 4900 for (unsigned I : E->ReuseShuffleIndices) { 4901 if (ShuffleOrOp == Instruction::ExtractElement) { 4902 auto *EE = cast<ExtractElementInst>(VL[I]); 4903 CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement, 4904 EE->getVectorOperandType(), 4905 *getExtractIndex(EE)); 4906 } else { 4907 CommonCost -= TTI->getVectorInstrCost(Instruction::ExtractElement, 4908 VecTy, Idx); 4909 ++Idx; 4910 } 4911 } 4912 Idx = EntryVF; 4913 for (Value *V : VL) { 4914 if (ShuffleOrOp == Instruction::ExtractElement) { 4915 auto *EE = cast<ExtractElementInst>(V); 4916 CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement, 4917 EE->getVectorOperandType(), 4918 *getExtractIndex(EE)); 4919 } else { 4920 --Idx; 4921 CommonCost += TTI->getVectorInstrCost(Instruction::ExtractElement, 4922 VecTy, Idx); 4923 } 4924 } 4925 } 4926 if (ShuffleOrOp == Instruction::ExtractValue) { 4927 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 4928 auto *EI = cast<Instruction>(VL[I]); 4929 // Take credit for instruction that will become dead. 4930 if (EI->hasOneUse()) { 4931 Instruction *Ext = EI->user_back(); 4932 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 4933 all_of(Ext->users(), 4934 [](User *U) { return isa<GetElementPtrInst>(U); })) { 4935 // Use getExtractWithExtendCost() to calculate the cost of 4936 // extractelement/ext pair. 4937 CommonCost -= TTI->getExtractWithExtendCost( 4938 Ext->getOpcode(), Ext->getType(), VecTy, I); 4939 // Add back the cost of s|zext which is subtracted separately. 4940 CommonCost += TTI->getCastInstrCost( 4941 Ext->getOpcode(), Ext->getType(), EI->getType(), 4942 TTI::getCastContextHint(Ext), CostKind, Ext); 4943 continue; 4944 } 4945 } 4946 CommonCost -= 4947 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, I); 4948 } 4949 } else { 4950 AdjustExtractsCost(CommonCost); 4951 } 4952 return CommonCost; 4953 } 4954 case Instruction::InsertElement: { 4955 assert(E->ReuseShuffleIndices.empty() && 4956 "Unique insertelements only are expected."); 4957 auto *SrcVecTy = cast<FixedVectorType>(VL0->getType()); 4958 4959 unsigned const NumElts = SrcVecTy->getNumElements(); 4960 unsigned const NumScalars = VL.size(); 4961 APInt DemandedElts = APInt::getZero(NumElts); 4962 // TODO: Add support for Instruction::InsertValue. 4963 SmallVector<int> Mask; 4964 if (!E->ReorderIndices.empty()) { 4965 inversePermutation(E->ReorderIndices, Mask); 4966 Mask.append(NumElts - NumScalars, UndefMaskElem); 4967 } else { 4968 Mask.assign(NumElts, UndefMaskElem); 4969 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); 4970 } 4971 unsigned Offset = *getInsertIndex(VL0, 0); 4972 bool IsIdentity = true; 4973 SmallVector<int> PrevMask(NumElts, UndefMaskElem); 4974 Mask.swap(PrevMask); 4975 for (unsigned I = 0; I < NumScalars; ++I) { 4976 Optional<int> InsertIdx = getInsertIndex(VL[PrevMask[I]], 0); 4977 if (!InsertIdx || *InsertIdx == UndefMaskElem) 4978 continue; 4979 DemandedElts.setBit(*InsertIdx); 4980 IsIdentity &= *InsertIdx - Offset == I; 4981 Mask[*InsertIdx - Offset] = I; 4982 } 4983 assert(Offset < NumElts && "Failed to find vector index offset"); 4984 4985 InstructionCost Cost = 0; 4986 Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts, 4987 /*Insert*/ true, /*Extract*/ false); 4988 4989 if (IsIdentity && NumElts != NumScalars && Offset % NumScalars != 0) { 4990 // FIXME: Replace with SK_InsertSubvector once it is properly supported. 4991 unsigned Sz = PowerOf2Ceil(Offset + NumScalars); 4992 Cost += TTI->getShuffleCost( 4993 TargetTransformInfo::SK_PermuteSingleSrc, 4994 FixedVectorType::get(SrcVecTy->getElementType(), Sz)); 4995 } else if (!IsIdentity) { 4996 auto *FirstInsert = 4997 cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 4998 return !is_contained(E->Scalars, 4999 cast<Instruction>(V)->getOperand(0)); 5000 })); 5001 if (isUndefVector(FirstInsert->getOperand(0))) { 5002 Cost += TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, SrcVecTy, Mask); 5003 } else { 5004 SmallVector<int> InsertMask(NumElts); 5005 std::iota(InsertMask.begin(), InsertMask.end(), 0); 5006 for (unsigned I = 0; I < NumElts; I++) { 5007 if (Mask[I] != UndefMaskElem) 5008 InsertMask[Offset + I] = NumElts + I; 5009 } 5010 Cost += 5011 TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVecTy, InsertMask); 5012 } 5013 } 5014 5015 return Cost; 5016 } 5017 case Instruction::ZExt: 5018 case Instruction::SExt: 5019 case Instruction::FPToUI: 5020 case Instruction::FPToSI: 5021 case Instruction::FPExt: 5022 case Instruction::PtrToInt: 5023 case Instruction::IntToPtr: 5024 case Instruction::SIToFP: 5025 case Instruction::UIToFP: 5026 case Instruction::Trunc: 5027 case Instruction::FPTrunc: 5028 case Instruction::BitCast: { 5029 Type *SrcTy = VL0->getOperand(0)->getType(); 5030 InstructionCost ScalarEltCost = 5031 TTI->getCastInstrCost(E->getOpcode(), ScalarTy, SrcTy, 5032 TTI::getCastContextHint(VL0), CostKind, VL0); 5033 if (NeedToShuffleReuses) { 5034 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; 5035 } 5036 5037 // Calculate the cost of this instruction. 5038 InstructionCost ScalarCost = VL.size() * ScalarEltCost; 5039 5040 auto *SrcVecTy = FixedVectorType::get(SrcTy, VL.size()); 5041 InstructionCost VecCost = 0; 5042 // Check if the values are candidates to demote. 5043 if (!MinBWs.count(VL0) || VecTy != SrcVecTy) { 5044 VecCost = CommonCost + TTI->getCastInstrCost( 5045 E->getOpcode(), VecTy, SrcVecTy, 5046 TTI::getCastContextHint(VL0), CostKind, VL0); 5047 } 5048 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 5049 return VecCost - ScalarCost; 5050 } 5051 case Instruction::FCmp: 5052 case Instruction::ICmp: 5053 case Instruction::Select: { 5054 // Calculate the cost of this instruction. 5055 InstructionCost ScalarEltCost = 5056 TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, Builder.getInt1Ty(), 5057 CmpInst::BAD_ICMP_PREDICATE, CostKind, VL0); 5058 if (NeedToShuffleReuses) { 5059 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; 5060 } 5061 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); 5062 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 5063 5064 // Check if all entries in VL are either compares or selects with compares 5065 // as condition that have the same predicates. 5066 CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE; 5067 bool First = true; 5068 for (auto *V : VL) { 5069 CmpInst::Predicate CurrentPred; 5070 auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value()); 5071 if ((!match(V, m_Select(MatchCmp, m_Value(), m_Value())) && 5072 !match(V, MatchCmp)) || 5073 (!First && VecPred != CurrentPred)) { 5074 VecPred = CmpInst::BAD_ICMP_PREDICATE; 5075 break; 5076 } 5077 First = false; 5078 VecPred = CurrentPred; 5079 } 5080 5081 InstructionCost VecCost = TTI->getCmpSelInstrCost( 5082 E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0); 5083 // Check if it is possible and profitable to use min/max for selects in 5084 // VL. 5085 // 5086 auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL); 5087 if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) { 5088 IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy, 5089 {VecTy, VecTy}); 5090 InstructionCost IntrinsicCost = 5091 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 5092 // If the selects are the only uses of the compares, they will be dead 5093 // and we can adjust the cost by removing their cost. 5094 if (IntrinsicAndUse.second) 5095 IntrinsicCost -= 5096 TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy, MaskTy, 5097 CmpInst::BAD_ICMP_PREDICATE, CostKind); 5098 VecCost = std::min(VecCost, IntrinsicCost); 5099 } 5100 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 5101 return CommonCost + VecCost - ScalarCost; 5102 } 5103 case Instruction::FNeg: 5104 case Instruction::Add: 5105 case Instruction::FAdd: 5106 case Instruction::Sub: 5107 case Instruction::FSub: 5108 case Instruction::Mul: 5109 case Instruction::FMul: 5110 case Instruction::UDiv: 5111 case Instruction::SDiv: 5112 case Instruction::FDiv: 5113 case Instruction::URem: 5114 case Instruction::SRem: 5115 case Instruction::FRem: 5116 case Instruction::Shl: 5117 case Instruction::LShr: 5118 case Instruction::AShr: 5119 case Instruction::And: 5120 case Instruction::Or: 5121 case Instruction::Xor: { 5122 // Certain instructions can be cheaper to vectorize if they have a 5123 // constant second vector operand. 5124 TargetTransformInfo::OperandValueKind Op1VK = 5125 TargetTransformInfo::OK_AnyValue; 5126 TargetTransformInfo::OperandValueKind Op2VK = 5127 TargetTransformInfo::OK_UniformConstantValue; 5128 TargetTransformInfo::OperandValueProperties Op1VP = 5129 TargetTransformInfo::OP_None; 5130 TargetTransformInfo::OperandValueProperties Op2VP = 5131 TargetTransformInfo::OP_PowerOf2; 5132 5133 // If all operands are exactly the same ConstantInt then set the 5134 // operand kind to OK_UniformConstantValue. 5135 // If instead not all operands are constants, then set the operand kind 5136 // to OK_AnyValue. If all operands are constants but not the same, 5137 // then set the operand kind to OK_NonUniformConstantValue. 5138 ConstantInt *CInt0 = nullptr; 5139 for (unsigned i = 0, e = VL.size(); i < e; ++i) { 5140 const Instruction *I = cast<Instruction>(VL[i]); 5141 unsigned OpIdx = isa<BinaryOperator>(I) ? 1 : 0; 5142 ConstantInt *CInt = dyn_cast<ConstantInt>(I->getOperand(OpIdx)); 5143 if (!CInt) { 5144 Op2VK = TargetTransformInfo::OK_AnyValue; 5145 Op2VP = TargetTransformInfo::OP_None; 5146 break; 5147 } 5148 if (Op2VP == TargetTransformInfo::OP_PowerOf2 && 5149 !CInt->getValue().isPowerOf2()) 5150 Op2VP = TargetTransformInfo::OP_None; 5151 if (i == 0) { 5152 CInt0 = CInt; 5153 continue; 5154 } 5155 if (CInt0 != CInt) 5156 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; 5157 } 5158 5159 SmallVector<const Value *, 4> Operands(VL0->operand_values()); 5160 InstructionCost ScalarEltCost = 5161 TTI->getArithmeticInstrCost(E->getOpcode(), ScalarTy, CostKind, Op1VK, 5162 Op2VK, Op1VP, Op2VP, Operands, VL0); 5163 if (NeedToShuffleReuses) { 5164 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; 5165 } 5166 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 5167 InstructionCost VecCost = 5168 TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind, Op1VK, 5169 Op2VK, Op1VP, Op2VP, Operands, VL0); 5170 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 5171 return CommonCost + VecCost - ScalarCost; 5172 } 5173 case Instruction::GetElementPtr: { 5174 TargetTransformInfo::OperandValueKind Op1VK = 5175 TargetTransformInfo::OK_AnyValue; 5176 TargetTransformInfo::OperandValueKind Op2VK = 5177 TargetTransformInfo::OK_UniformConstantValue; 5178 5179 InstructionCost ScalarEltCost = TTI->getArithmeticInstrCost( 5180 Instruction::Add, ScalarTy, CostKind, Op1VK, Op2VK); 5181 if (NeedToShuffleReuses) { 5182 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; 5183 } 5184 InstructionCost ScalarCost = VecTy->getNumElements() * ScalarEltCost; 5185 InstructionCost VecCost = TTI->getArithmeticInstrCost( 5186 Instruction::Add, VecTy, CostKind, Op1VK, Op2VK); 5187 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 5188 return CommonCost + VecCost - ScalarCost; 5189 } 5190 case Instruction::Load: { 5191 // Cost of wide load - cost of scalar loads. 5192 Align Alignment = cast<LoadInst>(VL0)->getAlign(); 5193 InstructionCost ScalarEltCost = TTI->getMemoryOpCost( 5194 Instruction::Load, ScalarTy, Alignment, 0, CostKind, VL0); 5195 if (NeedToShuffleReuses) { 5196 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; 5197 } 5198 InstructionCost ScalarLdCost = VecTy->getNumElements() * ScalarEltCost; 5199 InstructionCost VecLdCost; 5200 if (E->State == TreeEntry::Vectorize) { 5201 VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, Alignment, 0, 5202 CostKind, VL0); 5203 } else { 5204 assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState"); 5205 Align CommonAlignment = Alignment; 5206 for (Value *V : VL) 5207 CommonAlignment = 5208 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 5209 VecLdCost = TTI->getGatherScatterOpCost( 5210 Instruction::Load, VecTy, cast<LoadInst>(VL0)->getPointerOperand(), 5211 /*VariableMask=*/false, CommonAlignment, CostKind, VL0); 5212 } 5213 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecLdCost, ScalarLdCost)); 5214 return CommonCost + VecLdCost - ScalarLdCost; 5215 } 5216 case Instruction::Store: { 5217 // We know that we can merge the stores. Calculate the cost. 5218 bool IsReorder = !E->ReorderIndices.empty(); 5219 auto *SI = 5220 cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0); 5221 Align Alignment = SI->getAlign(); 5222 InstructionCost ScalarEltCost = TTI->getMemoryOpCost( 5223 Instruction::Store, ScalarTy, Alignment, 0, CostKind, VL0); 5224 InstructionCost ScalarStCost = VecTy->getNumElements() * ScalarEltCost; 5225 InstructionCost VecStCost = TTI->getMemoryOpCost( 5226 Instruction::Store, VecTy, Alignment, 0, CostKind, VL0); 5227 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecStCost, ScalarStCost)); 5228 return CommonCost + VecStCost - ScalarStCost; 5229 } 5230 case Instruction::Call: { 5231 CallInst *CI = cast<CallInst>(VL0); 5232 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 5233 5234 // Calculate the cost of the scalar and vector calls. 5235 IntrinsicCostAttributes CostAttrs(ID, *CI, 1); 5236 InstructionCost ScalarEltCost = 5237 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 5238 if (NeedToShuffleReuses) { 5239 CommonCost -= (EntryVF - VL.size()) * ScalarEltCost; 5240 } 5241 InstructionCost ScalarCallCost = VecTy->getNumElements() * ScalarEltCost; 5242 5243 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 5244 InstructionCost VecCallCost = 5245 std::min(VecCallCosts.first, VecCallCosts.second); 5246 5247 LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost - ScalarCallCost 5248 << " (" << VecCallCost << "-" << ScalarCallCost << ")" 5249 << " for " << *CI << "\n"); 5250 5251 return CommonCost + VecCallCost - ScalarCallCost; 5252 } 5253 case Instruction::ShuffleVector: { 5254 assert(E->isAltShuffle() && 5255 ((Instruction::isBinaryOp(E->getOpcode()) && 5256 Instruction::isBinaryOp(E->getAltOpcode())) || 5257 (Instruction::isCast(E->getOpcode()) && 5258 Instruction::isCast(E->getAltOpcode()))) && 5259 "Invalid Shuffle Vector Operand"); 5260 InstructionCost ScalarCost = 0; 5261 if (NeedToShuffleReuses) { 5262 for (unsigned Idx : E->ReuseShuffleIndices) { 5263 Instruction *I = cast<Instruction>(VL[Idx]); 5264 CommonCost -= TTI->getInstructionCost(I, CostKind); 5265 } 5266 for (Value *V : VL) { 5267 Instruction *I = cast<Instruction>(V); 5268 CommonCost += TTI->getInstructionCost(I, CostKind); 5269 } 5270 } 5271 for (Value *V : VL) { 5272 Instruction *I = cast<Instruction>(V); 5273 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 5274 ScalarCost += TTI->getInstructionCost(I, CostKind); 5275 } 5276 // VecCost is equal to sum of the cost of creating 2 vectors 5277 // and the cost of creating shuffle. 5278 InstructionCost VecCost = 0; 5279 // Try to find the previous shuffle node with the same operands and same 5280 // main/alternate ops. 5281 auto &&TryFindNodeWithEqualOperands = [this, E]() { 5282 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 5283 if (TE.get() == E) 5284 break; 5285 if (TE->isAltShuffle() && 5286 ((TE->getOpcode() == E->getOpcode() && 5287 TE->getAltOpcode() == E->getAltOpcode()) || 5288 (TE->getOpcode() == E->getAltOpcode() && 5289 TE->getAltOpcode() == E->getOpcode())) && 5290 TE->hasEqualOperands(*E)) 5291 return true; 5292 } 5293 return false; 5294 }; 5295 if (TryFindNodeWithEqualOperands()) { 5296 LLVM_DEBUG({ 5297 dbgs() << "SLP: diamond match for alternate node found.\n"; 5298 E->dump(); 5299 }); 5300 // No need to add new vector costs here since we're going to reuse 5301 // same main/alternate vector ops, just do different shuffling. 5302 } else if (Instruction::isBinaryOp(E->getOpcode())) { 5303 VecCost = TTI->getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind); 5304 VecCost += TTI->getArithmeticInstrCost(E->getAltOpcode(), VecTy, 5305 CostKind); 5306 } else { 5307 Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType(); 5308 Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType(); 5309 auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size()); 5310 auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size()); 5311 VecCost = TTI->getCastInstrCost(E->getOpcode(), VecTy, Src0Ty, 5312 TTI::CastContextHint::None, CostKind); 5313 VecCost += TTI->getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty, 5314 TTI::CastContextHint::None, CostKind); 5315 } 5316 5317 SmallVector<int> Mask; 5318 buildSuffleEntryMask( 5319 E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices, 5320 [E](Instruction *I) { 5321 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 5322 return I->getOpcode() == E->getAltOpcode(); 5323 }, 5324 Mask); 5325 CommonCost = 5326 TTI->getShuffleCost(TargetTransformInfo::SK_Select, FinalVecTy, Mask); 5327 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost, ScalarCost)); 5328 return CommonCost + VecCost - ScalarCost; 5329 } 5330 default: 5331 llvm_unreachable("Unknown instruction"); 5332 } 5333 } 5334 5335 bool BoUpSLP::isFullyVectorizableTinyTree(bool ForReduction) const { 5336 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height " 5337 << VectorizableTree.size() << " is fully vectorizable .\n"); 5338 5339 auto &&AreVectorizableGathers = [this](const TreeEntry *TE, unsigned Limit) { 5340 SmallVector<int> Mask; 5341 return TE->State == TreeEntry::NeedToGather && 5342 !any_of(TE->Scalars, 5343 [this](Value *V) { return EphValues.contains(V); }) && 5344 (allConstant(TE->Scalars) || isSplat(TE->Scalars) || 5345 TE->Scalars.size() < Limit || 5346 ((TE->getOpcode() == Instruction::ExtractElement || 5347 all_of(TE->Scalars, 5348 [](Value *V) { 5349 return isa<ExtractElementInst, UndefValue>(V); 5350 })) && 5351 isFixedVectorShuffle(TE->Scalars, Mask)) || 5352 (TE->State == TreeEntry::NeedToGather && 5353 TE->getOpcode() == Instruction::Load && !TE->isAltShuffle())); 5354 }; 5355 5356 // We only handle trees of heights 1 and 2. 5357 if (VectorizableTree.size() == 1 && 5358 (VectorizableTree[0]->State == TreeEntry::Vectorize || 5359 (ForReduction && 5360 AreVectorizableGathers(VectorizableTree[0].get(), 5361 VectorizableTree[0]->Scalars.size()) && 5362 VectorizableTree[0]->getVectorFactor() > 2))) 5363 return true; 5364 5365 if (VectorizableTree.size() != 2) 5366 return false; 5367 5368 // Handle splat and all-constants stores. Also try to vectorize tiny trees 5369 // with the second gather nodes if they have less scalar operands rather than 5370 // the initial tree element (may be profitable to shuffle the second gather) 5371 // or they are extractelements, which form shuffle. 5372 SmallVector<int> Mask; 5373 if (VectorizableTree[0]->State == TreeEntry::Vectorize && 5374 AreVectorizableGathers(VectorizableTree[1].get(), 5375 VectorizableTree[0]->Scalars.size())) 5376 return true; 5377 5378 // Gathering cost would be too much for tiny trees. 5379 if (VectorizableTree[0]->State == TreeEntry::NeedToGather || 5380 (VectorizableTree[1]->State == TreeEntry::NeedToGather && 5381 VectorizableTree[0]->State != TreeEntry::ScatterVectorize)) 5382 return false; 5383 5384 return true; 5385 } 5386 5387 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts, 5388 TargetTransformInfo *TTI, 5389 bool MustMatchOrInst) { 5390 // Look past the root to find a source value. Arbitrarily follow the 5391 // path through operand 0 of any 'or'. Also, peek through optional 5392 // shift-left-by-multiple-of-8-bits. 5393 Value *ZextLoad = Root; 5394 const APInt *ShAmtC; 5395 bool FoundOr = false; 5396 while (!isa<ConstantExpr>(ZextLoad) && 5397 (match(ZextLoad, m_Or(m_Value(), m_Value())) || 5398 (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) && 5399 ShAmtC->urem(8) == 0))) { 5400 auto *BinOp = cast<BinaryOperator>(ZextLoad); 5401 ZextLoad = BinOp->getOperand(0); 5402 if (BinOp->getOpcode() == Instruction::Or) 5403 FoundOr = true; 5404 } 5405 // Check if the input is an extended load of the required or/shift expression. 5406 Value *Load; 5407 if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root || 5408 !match(ZextLoad, m_ZExt(m_Value(Load))) || !isa<LoadInst>(Load)) 5409 return false; 5410 5411 // Require that the total load bit width is a legal integer type. 5412 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target. 5413 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it. 5414 Type *SrcTy = Load->getType(); 5415 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts; 5416 if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth))) 5417 return false; 5418 5419 // Everything matched - assume that we can fold the whole sequence using 5420 // load combining. 5421 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at " 5422 << *(cast<Instruction>(Root)) << "\n"); 5423 5424 return true; 5425 } 5426 5427 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const { 5428 if (RdxKind != RecurKind::Or) 5429 return false; 5430 5431 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 5432 Value *FirstReduced = VectorizableTree[0]->Scalars[0]; 5433 return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI, 5434 /* MatchOr */ false); 5435 } 5436 5437 bool BoUpSLP::isLoadCombineCandidate() const { 5438 // Peek through a final sequence of stores and check if all operations are 5439 // likely to be load-combined. 5440 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 5441 for (Value *Scalar : VectorizableTree[0]->Scalars) { 5442 Value *X; 5443 if (!match(Scalar, m_Store(m_Value(X), m_Value())) || 5444 !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true)) 5445 return false; 5446 } 5447 return true; 5448 } 5449 5450 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const { 5451 // No need to vectorize inserts of gathered values. 5452 if (VectorizableTree.size() == 2 && 5453 isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) && 5454 VectorizableTree[1]->State == TreeEntry::NeedToGather) 5455 return true; 5456 5457 // We can vectorize the tree if its size is greater than or equal to the 5458 // minimum size specified by the MinTreeSize command line option. 5459 if (VectorizableTree.size() >= MinTreeSize) 5460 return false; 5461 5462 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 5463 // can vectorize it if we can prove it fully vectorizable. 5464 if (isFullyVectorizableTinyTree(ForReduction)) 5465 return false; 5466 5467 assert(VectorizableTree.empty() 5468 ? ExternalUses.empty() 5469 : true && "We shouldn't have any external users"); 5470 5471 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 5472 // vectorizable. 5473 return true; 5474 } 5475 5476 InstructionCost BoUpSLP::getSpillCost() const { 5477 // Walk from the bottom of the tree to the top, tracking which values are 5478 // live. When we see a call instruction that is not part of our tree, 5479 // query TTI to see if there is a cost to keeping values live over it 5480 // (for example, if spills and fills are required). 5481 unsigned BundleWidth = VectorizableTree.front()->Scalars.size(); 5482 InstructionCost Cost = 0; 5483 5484 SmallPtrSet<Instruction*, 4> LiveValues; 5485 Instruction *PrevInst = nullptr; 5486 5487 // The entries in VectorizableTree are not necessarily ordered by their 5488 // position in basic blocks. Collect them and order them by dominance so later 5489 // instructions are guaranteed to be visited first. For instructions in 5490 // different basic blocks, we only scan to the beginning of the block, so 5491 // their order does not matter, as long as all instructions in a basic block 5492 // are grouped together. Using dominance ensures a deterministic order. 5493 SmallVector<Instruction *, 16> OrderedScalars; 5494 for (const auto &TEPtr : VectorizableTree) { 5495 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]); 5496 if (!Inst) 5497 continue; 5498 OrderedScalars.push_back(Inst); 5499 } 5500 llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) { 5501 auto *NodeA = DT->getNode(A->getParent()); 5502 auto *NodeB = DT->getNode(B->getParent()); 5503 assert(NodeA && "Should only process reachable instructions"); 5504 assert(NodeB && "Should only process reachable instructions"); 5505 assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 5506 "Different nodes should have different DFS numbers"); 5507 if (NodeA != NodeB) 5508 return NodeA->getDFSNumIn() < NodeB->getDFSNumIn(); 5509 return B->comesBefore(A); 5510 }); 5511 5512 for (Instruction *Inst : OrderedScalars) { 5513 if (!PrevInst) { 5514 PrevInst = Inst; 5515 continue; 5516 } 5517 5518 // Update LiveValues. 5519 LiveValues.erase(PrevInst); 5520 for (auto &J : PrevInst->operands()) { 5521 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 5522 LiveValues.insert(cast<Instruction>(&*J)); 5523 } 5524 5525 LLVM_DEBUG({ 5526 dbgs() << "SLP: #LV: " << LiveValues.size(); 5527 for (auto *X : LiveValues) 5528 dbgs() << " " << X->getName(); 5529 dbgs() << ", Looking at "; 5530 Inst->dump(); 5531 }); 5532 5533 // Now find the sequence of instructions between PrevInst and Inst. 5534 unsigned NumCalls = 0; 5535 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 5536 PrevInstIt = 5537 PrevInst->getIterator().getReverse(); 5538 while (InstIt != PrevInstIt) { 5539 if (PrevInstIt == PrevInst->getParent()->rend()) { 5540 PrevInstIt = Inst->getParent()->rbegin(); 5541 continue; 5542 } 5543 5544 // Debug information does not impact spill cost. 5545 if ((isa<CallInst>(&*PrevInstIt) && 5546 !isa<DbgInfoIntrinsic>(&*PrevInstIt)) && 5547 &*PrevInstIt != PrevInst) 5548 NumCalls++; 5549 5550 ++PrevInstIt; 5551 } 5552 5553 if (NumCalls) { 5554 SmallVector<Type*, 4> V; 5555 for (auto *II : LiveValues) { 5556 auto *ScalarTy = II->getType(); 5557 if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy)) 5558 ScalarTy = VectorTy->getElementType(); 5559 V.push_back(FixedVectorType::get(ScalarTy, BundleWidth)); 5560 } 5561 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V); 5562 } 5563 5564 PrevInst = Inst; 5565 } 5566 5567 return Cost; 5568 } 5569 5570 /// Check if two insertelement instructions are from the same buildvector. 5571 static bool areTwoInsertFromSameBuildVector(InsertElementInst *VU, 5572 InsertElementInst *V) { 5573 // Instructions must be from the same basic blocks. 5574 if (VU->getParent() != V->getParent()) 5575 return false; 5576 // Checks if 2 insertelements are from the same buildvector. 5577 if (VU->getType() != V->getType()) 5578 return false; 5579 // Multiple used inserts are separate nodes. 5580 if (!VU->hasOneUse() && !V->hasOneUse()) 5581 return false; 5582 auto *IE1 = VU; 5583 auto *IE2 = V; 5584 // Go through the vector operand of insertelement instructions trying to find 5585 // either VU as the original vector for IE2 or V as the original vector for 5586 // IE1. 5587 do { 5588 if (IE2 == VU || IE1 == V) 5589 return true; 5590 if (IE1) { 5591 if (IE1 != VU && !IE1->hasOneUse()) 5592 IE1 = nullptr; 5593 else 5594 IE1 = dyn_cast<InsertElementInst>(IE1->getOperand(0)); 5595 } 5596 if (IE2) { 5597 if (IE2 != V && !IE2->hasOneUse()) 5598 IE2 = nullptr; 5599 else 5600 IE2 = dyn_cast<InsertElementInst>(IE2->getOperand(0)); 5601 } 5602 } while (IE1 || IE2); 5603 return false; 5604 } 5605 5606 InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) { 5607 InstructionCost Cost = 0; 5608 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size " 5609 << VectorizableTree.size() << ".\n"); 5610 5611 unsigned BundleWidth = VectorizableTree[0]->Scalars.size(); 5612 5613 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) { 5614 TreeEntry &TE = *VectorizableTree[I].get(); 5615 5616 InstructionCost C = getEntryCost(&TE, VectorizedVals); 5617 Cost += C; 5618 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 5619 << " for bundle that starts with " << *TE.Scalars[0] 5620 << ".\n" 5621 << "SLP: Current total cost = " << Cost << "\n"); 5622 } 5623 5624 SmallPtrSet<Value *, 16> ExtractCostCalculated; 5625 InstructionCost ExtractCost = 0; 5626 SmallVector<unsigned> VF; 5627 SmallVector<SmallVector<int>> ShuffleMask; 5628 SmallVector<Value *> FirstUsers; 5629 SmallVector<APInt> DemandedElts; 5630 for (ExternalUser &EU : ExternalUses) { 5631 // We only add extract cost once for the same scalar. 5632 if (!isa_and_nonnull<InsertElementInst>(EU.User) && 5633 !ExtractCostCalculated.insert(EU.Scalar).second) 5634 continue; 5635 5636 // Uses by ephemeral values are free (because the ephemeral value will be 5637 // removed prior to code generation, and so the extraction will be 5638 // removed as well). 5639 if (EphValues.count(EU.User)) 5640 continue; 5641 5642 // No extract cost for vector "scalar" 5643 if (isa<FixedVectorType>(EU.Scalar->getType())) 5644 continue; 5645 5646 // Already counted the cost for external uses when tried to adjust the cost 5647 // for extractelements, no need to add it again. 5648 if (isa<ExtractElementInst>(EU.Scalar)) 5649 continue; 5650 5651 // If found user is an insertelement, do not calculate extract cost but try 5652 // to detect it as a final shuffled/identity match. 5653 if (auto *VU = dyn_cast_or_null<InsertElementInst>(EU.User)) { 5654 if (auto *FTy = dyn_cast<FixedVectorType>(VU->getType())) { 5655 Optional<int> InsertIdx = getInsertIndex(VU, 0); 5656 if (!InsertIdx || *InsertIdx == UndefMaskElem) 5657 continue; 5658 auto *It = find_if(FirstUsers, [VU](Value *V) { 5659 return areTwoInsertFromSameBuildVector(VU, 5660 cast<InsertElementInst>(V)); 5661 }); 5662 int VecId = -1; 5663 if (It == FirstUsers.end()) { 5664 VF.push_back(FTy->getNumElements()); 5665 ShuffleMask.emplace_back(VF.back(), UndefMaskElem); 5666 // Find the insertvector, vectorized in tree, if any. 5667 Value *Base = VU; 5668 while (isa<InsertElementInst>(Base)) { 5669 // Build the mask for the vectorized insertelement instructions. 5670 if (const TreeEntry *E = getTreeEntry(Base)) { 5671 VU = cast<InsertElementInst>(Base); 5672 do { 5673 int Idx = E->findLaneForValue(Base); 5674 ShuffleMask.back()[Idx] = Idx; 5675 Base = cast<InsertElementInst>(Base)->getOperand(0); 5676 } while (E == getTreeEntry(Base)); 5677 break; 5678 } 5679 Base = cast<InsertElementInst>(Base)->getOperand(0); 5680 } 5681 FirstUsers.push_back(VU); 5682 DemandedElts.push_back(APInt::getZero(VF.back())); 5683 VecId = FirstUsers.size() - 1; 5684 } else { 5685 VecId = std::distance(FirstUsers.begin(), It); 5686 } 5687 int Idx = *InsertIdx; 5688 ShuffleMask[VecId][Idx] = EU.Lane; 5689 DemandedElts[VecId].setBit(Idx); 5690 continue; 5691 } 5692 } 5693 5694 // If we plan to rewrite the tree in a smaller type, we will need to sign 5695 // extend the extracted value back to the original type. Here, we account 5696 // for the extract and the added cost of the sign extend if needed. 5697 auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth); 5698 auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; 5699 if (MinBWs.count(ScalarRoot)) { 5700 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 5701 auto Extend = 5702 MinBWs[ScalarRoot].second ? Instruction::SExt : Instruction::ZExt; 5703 VecTy = FixedVectorType::get(MinTy, BundleWidth); 5704 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 5705 VecTy, EU.Lane); 5706 } else { 5707 ExtractCost += 5708 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, EU.Lane); 5709 } 5710 } 5711 5712 InstructionCost SpillCost = getSpillCost(); 5713 Cost += SpillCost + ExtractCost; 5714 if (FirstUsers.size() == 1) { 5715 int Limit = ShuffleMask.front().size() * 2; 5716 if (all_of(ShuffleMask.front(), [Limit](int Idx) { return Idx < Limit; }) && 5717 !ShuffleVectorInst::isIdentityMask(ShuffleMask.front())) { 5718 InstructionCost C = TTI->getShuffleCost( 5719 TTI::SK_PermuteSingleSrc, 5720 cast<FixedVectorType>(FirstUsers.front()->getType()), 5721 ShuffleMask.front()); 5722 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 5723 << " for final shuffle of insertelement external users " 5724 << *VectorizableTree.front()->Scalars.front() << ".\n" 5725 << "SLP: Current total cost = " << Cost << "\n"); 5726 Cost += C; 5727 } 5728 InstructionCost InsertCost = TTI->getScalarizationOverhead( 5729 cast<FixedVectorType>(FirstUsers.front()->getType()), 5730 DemandedElts.front(), /*Insert*/ true, /*Extract*/ false); 5731 LLVM_DEBUG(dbgs() << "SLP: subtracting the cost " << InsertCost 5732 << " for insertelements gather.\n" 5733 << "SLP: Current total cost = " << Cost << "\n"); 5734 Cost -= InsertCost; 5735 } else if (FirstUsers.size() >= 2) { 5736 unsigned MaxVF = *std::max_element(VF.begin(), VF.end()); 5737 // Combined masks of the first 2 vectors. 5738 SmallVector<int> CombinedMask(MaxVF, UndefMaskElem); 5739 copy(ShuffleMask.front(), CombinedMask.begin()); 5740 APInt CombinedDemandedElts = DemandedElts.front().zextOrSelf(MaxVF); 5741 auto *VecTy = FixedVectorType::get( 5742 cast<VectorType>(FirstUsers.front()->getType())->getElementType(), 5743 MaxVF); 5744 for (int I = 0, E = ShuffleMask[1].size(); I < E; ++I) { 5745 if (ShuffleMask[1][I] != UndefMaskElem) { 5746 CombinedMask[I] = ShuffleMask[1][I] + MaxVF; 5747 CombinedDemandedElts.setBit(I); 5748 } 5749 } 5750 InstructionCost C = 5751 TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, VecTy, CombinedMask); 5752 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 5753 << " for final shuffle of vector node and external " 5754 "insertelement users " 5755 << *VectorizableTree.front()->Scalars.front() << ".\n" 5756 << "SLP: Current total cost = " << Cost << "\n"); 5757 Cost += C; 5758 InstructionCost InsertCost = TTI->getScalarizationOverhead( 5759 VecTy, CombinedDemandedElts, /*Insert*/ true, /*Extract*/ false); 5760 LLVM_DEBUG(dbgs() << "SLP: subtracting the cost " << InsertCost 5761 << " for insertelements gather.\n" 5762 << "SLP: Current total cost = " << Cost << "\n"); 5763 Cost -= InsertCost; 5764 for (int I = 2, E = FirstUsers.size(); I < E; ++I) { 5765 // Other elements - permutation of 2 vectors (the initial one and the 5766 // next Ith incoming vector). 5767 unsigned VF = ShuffleMask[I].size(); 5768 for (unsigned Idx = 0; Idx < VF; ++Idx) { 5769 int Mask = ShuffleMask[I][Idx]; 5770 if (Mask != UndefMaskElem) 5771 CombinedMask[Idx] = MaxVF + Mask; 5772 else if (CombinedMask[Idx] != UndefMaskElem) 5773 CombinedMask[Idx] = Idx; 5774 } 5775 for (unsigned Idx = VF; Idx < MaxVF; ++Idx) 5776 if (CombinedMask[Idx] != UndefMaskElem) 5777 CombinedMask[Idx] = Idx; 5778 InstructionCost C = 5779 TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, VecTy, CombinedMask); 5780 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 5781 << " for final shuffle of vector node and external " 5782 "insertelement users " 5783 << *VectorizableTree.front()->Scalars.front() << ".\n" 5784 << "SLP: Current total cost = " << Cost << "\n"); 5785 Cost += C; 5786 InstructionCost InsertCost = TTI->getScalarizationOverhead( 5787 cast<FixedVectorType>(FirstUsers[I]->getType()), DemandedElts[I], 5788 /*Insert*/ true, /*Extract*/ false); 5789 LLVM_DEBUG(dbgs() << "SLP: subtracting the cost " << InsertCost 5790 << " for insertelements gather.\n" 5791 << "SLP: Current total cost = " << Cost << "\n"); 5792 Cost -= InsertCost; 5793 } 5794 } 5795 5796 #ifndef NDEBUG 5797 SmallString<256> Str; 5798 { 5799 raw_svector_ostream OS(Str); 5800 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 5801 << "SLP: Extract Cost = " << ExtractCost << ".\n" 5802 << "SLP: Total Cost = " << Cost << ".\n"; 5803 } 5804 LLVM_DEBUG(dbgs() << Str); 5805 if (ViewSLPTree) 5806 ViewGraph(this, "SLP" + F->getName(), false, Str); 5807 #endif 5808 5809 return Cost; 5810 } 5811 5812 Optional<TargetTransformInfo::ShuffleKind> 5813 BoUpSLP::isGatherShuffledEntry(const TreeEntry *TE, SmallVectorImpl<int> &Mask, 5814 SmallVectorImpl<const TreeEntry *> &Entries) { 5815 // TODO: currently checking only for Scalars in the tree entry, need to count 5816 // reused elements too for better cost estimation. 5817 Mask.assign(TE->Scalars.size(), UndefMaskElem); 5818 Entries.clear(); 5819 // Build a lists of values to tree entries. 5820 DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>> ValueToTEs; 5821 for (const std::unique_ptr<TreeEntry> &EntryPtr : VectorizableTree) { 5822 if (EntryPtr.get() == TE) 5823 break; 5824 if (EntryPtr->State != TreeEntry::NeedToGather) 5825 continue; 5826 for (Value *V : EntryPtr->Scalars) 5827 ValueToTEs.try_emplace(V).first->getSecond().insert(EntryPtr.get()); 5828 } 5829 // Find all tree entries used by the gathered values. If no common entries 5830 // found - not a shuffle. 5831 // Here we build a set of tree nodes for each gathered value and trying to 5832 // find the intersection between these sets. If we have at least one common 5833 // tree node for each gathered value - we have just a permutation of the 5834 // single vector. If we have 2 different sets, we're in situation where we 5835 // have a permutation of 2 input vectors. 5836 SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs; 5837 DenseMap<Value *, int> UsedValuesEntry; 5838 for (Value *V : TE->Scalars) { 5839 if (isa<UndefValue>(V)) 5840 continue; 5841 // Build a list of tree entries where V is used. 5842 SmallPtrSet<const TreeEntry *, 4> VToTEs; 5843 auto It = ValueToTEs.find(V); 5844 if (It != ValueToTEs.end()) 5845 VToTEs = It->second; 5846 if (const TreeEntry *VTE = getTreeEntry(V)) 5847 VToTEs.insert(VTE); 5848 if (VToTEs.empty()) 5849 return None; 5850 if (UsedTEs.empty()) { 5851 // The first iteration, just insert the list of nodes to vector. 5852 UsedTEs.push_back(VToTEs); 5853 } else { 5854 // Need to check if there are any previously used tree nodes which use V. 5855 // If there are no such nodes, consider that we have another one input 5856 // vector. 5857 SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs); 5858 unsigned Idx = 0; 5859 for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) { 5860 // Do we have a non-empty intersection of previously listed tree entries 5861 // and tree entries using current V? 5862 set_intersect(VToTEs, Set); 5863 if (!VToTEs.empty()) { 5864 // Yes, write the new subset and continue analysis for the next 5865 // scalar. 5866 Set.swap(VToTEs); 5867 break; 5868 } 5869 VToTEs = SavedVToTEs; 5870 ++Idx; 5871 } 5872 // No non-empty intersection found - need to add a second set of possible 5873 // source vectors. 5874 if (Idx == UsedTEs.size()) { 5875 // If the number of input vectors is greater than 2 - not a permutation, 5876 // fallback to the regular gather. 5877 if (UsedTEs.size() == 2) 5878 return None; 5879 UsedTEs.push_back(SavedVToTEs); 5880 Idx = UsedTEs.size() - 1; 5881 } 5882 UsedValuesEntry.try_emplace(V, Idx); 5883 } 5884 } 5885 5886 unsigned VF = 0; 5887 if (UsedTEs.size() == 1) { 5888 // Try to find the perfect match in another gather node at first. 5889 auto It = find_if(UsedTEs.front(), [TE](const TreeEntry *EntryPtr) { 5890 return EntryPtr->isSame(TE->Scalars); 5891 }); 5892 if (It != UsedTEs.front().end()) { 5893 Entries.push_back(*It); 5894 std::iota(Mask.begin(), Mask.end(), 0); 5895 return TargetTransformInfo::SK_PermuteSingleSrc; 5896 } 5897 // No perfect match, just shuffle, so choose the first tree node. 5898 Entries.push_back(*UsedTEs.front().begin()); 5899 } else { 5900 // Try to find nodes with the same vector factor. 5901 assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries."); 5902 DenseMap<int, const TreeEntry *> VFToTE; 5903 for (const TreeEntry *TE : UsedTEs.front()) 5904 VFToTE.try_emplace(TE->getVectorFactor(), TE); 5905 for (const TreeEntry *TE : UsedTEs.back()) { 5906 auto It = VFToTE.find(TE->getVectorFactor()); 5907 if (It != VFToTE.end()) { 5908 VF = It->first; 5909 Entries.push_back(It->second); 5910 Entries.push_back(TE); 5911 break; 5912 } 5913 } 5914 // No 2 source vectors with the same vector factor - give up and do regular 5915 // gather. 5916 if (Entries.empty()) 5917 return None; 5918 } 5919 5920 // Build a shuffle mask for better cost estimation and vector emission. 5921 for (int I = 0, E = TE->Scalars.size(); I < E; ++I) { 5922 Value *V = TE->Scalars[I]; 5923 if (isa<UndefValue>(V)) 5924 continue; 5925 unsigned Idx = UsedValuesEntry.lookup(V); 5926 const TreeEntry *VTE = Entries[Idx]; 5927 int FoundLane = VTE->findLaneForValue(V); 5928 Mask[I] = Idx * VF + FoundLane; 5929 // Extra check required by isSingleSourceMaskImpl function (called by 5930 // ShuffleVectorInst::isSingleSourceMask). 5931 if (Mask[I] >= 2 * E) 5932 return None; 5933 } 5934 switch (Entries.size()) { 5935 case 1: 5936 return TargetTransformInfo::SK_PermuteSingleSrc; 5937 case 2: 5938 return TargetTransformInfo::SK_PermuteTwoSrc; 5939 default: 5940 break; 5941 } 5942 return None; 5943 } 5944 5945 InstructionCost 5946 BoUpSLP::getGatherCost(FixedVectorType *Ty, 5947 const DenseSet<unsigned> &ShuffledIndices, 5948 bool NeedToShuffle) const { 5949 unsigned NumElts = Ty->getNumElements(); 5950 APInt DemandedElts = APInt::getZero(NumElts); 5951 for (unsigned I = 0; I < NumElts; ++I) 5952 if (!ShuffledIndices.count(I)) 5953 DemandedElts.setBit(I); 5954 InstructionCost Cost = 5955 TTI->getScalarizationOverhead(Ty, DemandedElts, /*Insert*/ true, 5956 /*Extract*/ false); 5957 if (NeedToShuffle) 5958 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, Ty); 5959 return Cost; 5960 } 5961 5962 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL) const { 5963 // Find the type of the operands in VL. 5964 Type *ScalarTy = VL[0]->getType(); 5965 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 5966 ScalarTy = SI->getValueOperand()->getType(); 5967 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 5968 bool DuplicateNonConst = false; 5969 // Find the cost of inserting/extracting values from the vector. 5970 // Check if the same elements are inserted several times and count them as 5971 // shuffle candidates. 5972 DenseSet<unsigned> ShuffledElements; 5973 DenseSet<Value *> UniqueElements; 5974 // Iterate in reverse order to consider insert elements with the high cost. 5975 for (unsigned I = VL.size(); I > 0; --I) { 5976 unsigned Idx = I - 1; 5977 // No need to shuffle duplicates for constants. 5978 if (isConstant(VL[Idx])) { 5979 ShuffledElements.insert(Idx); 5980 continue; 5981 } 5982 if (!UniqueElements.insert(VL[Idx]).second) { 5983 DuplicateNonConst = true; 5984 ShuffledElements.insert(Idx); 5985 } 5986 } 5987 return getGatherCost(VecTy, ShuffledElements, DuplicateNonConst); 5988 } 5989 5990 // Perform operand reordering on the instructions in VL and return the reordered 5991 // operands in Left and Right. 5992 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL, 5993 SmallVectorImpl<Value *> &Left, 5994 SmallVectorImpl<Value *> &Right, 5995 const DataLayout &DL, 5996 ScalarEvolution &SE, 5997 const BoUpSLP &R) { 5998 if (VL.empty()) 5999 return; 6000 VLOperands Ops(VL, DL, SE, R); 6001 // Reorder the operands in place. 6002 Ops.reorder(); 6003 Left = Ops.getVL(0); 6004 Right = Ops.getVL(1); 6005 } 6006 6007 void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) { 6008 // Get the basic block this bundle is in. All instructions in the bundle 6009 // should be in this block. 6010 auto *Front = E->getMainOp(); 6011 auto *BB = Front->getParent(); 6012 assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool { 6013 auto *I = cast<Instruction>(V); 6014 return !E->isOpcodeOrAlt(I) || I->getParent() == BB; 6015 })); 6016 6017 // The last instruction in the bundle in program order. 6018 Instruction *LastInst = nullptr; 6019 6020 // Find the last instruction. The common case should be that BB has been 6021 // scheduled, and the last instruction is VL.back(). So we start with 6022 // VL.back() and iterate over schedule data until we reach the end of the 6023 // bundle. The end of the bundle is marked by null ScheduleData. 6024 if (BlocksSchedules.count(BB)) { 6025 auto *Bundle = 6026 BlocksSchedules[BB]->getScheduleData(E->isOneOf(E->Scalars.back())); 6027 if (Bundle && Bundle->isPartOfBundle()) 6028 for (; Bundle; Bundle = Bundle->NextInBundle) 6029 if (Bundle->OpValue == Bundle->Inst) 6030 LastInst = Bundle->Inst; 6031 } 6032 6033 // LastInst can still be null at this point if there's either not an entry 6034 // for BB in BlocksSchedules or there's no ScheduleData available for 6035 // VL.back(). This can be the case if buildTree_rec aborts for various 6036 // reasons (e.g., the maximum recursion depth is reached, the maximum region 6037 // size is reached, etc.). ScheduleData is initialized in the scheduling 6038 // "dry-run". 6039 // 6040 // If this happens, we can still find the last instruction by brute force. We 6041 // iterate forwards from Front (inclusive) until we either see all 6042 // instructions in the bundle or reach the end of the block. If Front is the 6043 // last instruction in program order, LastInst will be set to Front, and we 6044 // will visit all the remaining instructions in the block. 6045 // 6046 // One of the reasons we exit early from buildTree_rec is to place an upper 6047 // bound on compile-time. Thus, taking an additional compile-time hit here is 6048 // not ideal. However, this should be exceedingly rare since it requires that 6049 // we both exit early from buildTree_rec and that the bundle be out-of-order 6050 // (causing us to iterate all the way to the end of the block). 6051 if (!LastInst) { 6052 SmallPtrSet<Value *, 16> Bundle(E->Scalars.begin(), E->Scalars.end()); 6053 for (auto &I : make_range(BasicBlock::iterator(Front), BB->end())) { 6054 if (Bundle.erase(&I) && E->isOpcodeOrAlt(&I)) 6055 LastInst = &I; 6056 if (Bundle.empty()) 6057 break; 6058 } 6059 } 6060 assert(LastInst && "Failed to find last instruction in bundle"); 6061 6062 // Set the insertion point after the last instruction in the bundle. Set the 6063 // debug location to Front. 6064 Builder.SetInsertPoint(BB, ++LastInst->getIterator()); 6065 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 6066 } 6067 6068 Value *BoUpSLP::gather(ArrayRef<Value *> VL) { 6069 // List of instructions/lanes from current block and/or the blocks which are 6070 // part of the current loop. These instructions will be inserted at the end to 6071 // make it possible to optimize loops and hoist invariant instructions out of 6072 // the loops body with better chances for success. 6073 SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts; 6074 SmallSet<int, 4> PostponedIndices; 6075 Loop *L = LI->getLoopFor(Builder.GetInsertBlock()); 6076 auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) { 6077 SmallPtrSet<BasicBlock *, 4> Visited; 6078 while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second) 6079 InsertBB = InsertBB->getSinglePredecessor(); 6080 return InsertBB && InsertBB == InstBB; 6081 }; 6082 for (int I = 0, E = VL.size(); I < E; ++I) { 6083 if (auto *Inst = dyn_cast<Instruction>(VL[I])) 6084 if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) || 6085 getTreeEntry(Inst) || (L && (L->contains(Inst)))) && 6086 PostponedIndices.insert(I).second) 6087 PostponedInsts.emplace_back(Inst, I); 6088 } 6089 6090 auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos) { 6091 Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(Pos)); 6092 auto *InsElt = dyn_cast<InsertElementInst>(Vec); 6093 if (!InsElt) 6094 return Vec; 6095 GatherShuffleSeq.insert(InsElt); 6096 CSEBlocks.insert(InsElt->getParent()); 6097 // Add to our 'need-to-extract' list. 6098 if (TreeEntry *Entry = getTreeEntry(V)) { 6099 // Find which lane we need to extract. 6100 unsigned FoundLane = Entry->findLaneForValue(V); 6101 ExternalUses.emplace_back(V, InsElt, FoundLane); 6102 } 6103 return Vec; 6104 }; 6105 Value *Val0 = 6106 isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0]; 6107 FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size()); 6108 Value *Vec = PoisonValue::get(VecTy); 6109 SmallVector<int> NonConsts; 6110 // Insert constant values at first. 6111 for (int I = 0, E = VL.size(); I < E; ++I) { 6112 if (PostponedIndices.contains(I)) 6113 continue; 6114 if (!isConstant(VL[I])) { 6115 NonConsts.push_back(I); 6116 continue; 6117 } 6118 Vec = CreateInsertElement(Vec, VL[I], I); 6119 } 6120 // Insert non-constant values. 6121 for (int I : NonConsts) 6122 Vec = CreateInsertElement(Vec, VL[I], I); 6123 // Append instructions, which are/may be part of the loop, in the end to make 6124 // it possible to hoist non-loop-based instructions. 6125 for (const std::pair<Value *, unsigned> &Pair : PostponedInsts) 6126 Vec = CreateInsertElement(Vec, Pair.first, Pair.second); 6127 6128 return Vec; 6129 } 6130 6131 namespace { 6132 /// Merges shuffle masks and emits final shuffle instruction, if required. 6133 class ShuffleInstructionBuilder { 6134 IRBuilderBase &Builder; 6135 const unsigned VF = 0; 6136 bool IsFinalized = false; 6137 SmallVector<int, 4> Mask; 6138 /// Holds all of the instructions that we gathered. 6139 SetVector<Instruction *> &GatherShuffleSeq; 6140 /// A list of blocks that we are going to CSE. 6141 SetVector<BasicBlock *> &CSEBlocks; 6142 6143 public: 6144 ShuffleInstructionBuilder(IRBuilderBase &Builder, unsigned VF, 6145 SetVector<Instruction *> &GatherShuffleSeq, 6146 SetVector<BasicBlock *> &CSEBlocks) 6147 : Builder(Builder), VF(VF), GatherShuffleSeq(GatherShuffleSeq), 6148 CSEBlocks(CSEBlocks) {} 6149 6150 /// Adds a mask, inverting it before applying. 6151 void addInversedMask(ArrayRef<unsigned> SubMask) { 6152 if (SubMask.empty()) 6153 return; 6154 SmallVector<int, 4> NewMask; 6155 inversePermutation(SubMask, NewMask); 6156 addMask(NewMask); 6157 } 6158 6159 /// Functions adds masks, merging them into single one. 6160 void addMask(ArrayRef<unsigned> SubMask) { 6161 SmallVector<int, 4> NewMask(SubMask.begin(), SubMask.end()); 6162 addMask(NewMask); 6163 } 6164 6165 void addMask(ArrayRef<int> SubMask) { ::addMask(Mask, SubMask); } 6166 6167 Value *finalize(Value *V) { 6168 IsFinalized = true; 6169 unsigned ValueVF = cast<FixedVectorType>(V->getType())->getNumElements(); 6170 if (VF == ValueVF && Mask.empty()) 6171 return V; 6172 SmallVector<int, 4> NormalizedMask(VF, UndefMaskElem); 6173 std::iota(NormalizedMask.begin(), NormalizedMask.end(), 0); 6174 addMask(NormalizedMask); 6175 6176 if (VF == ValueVF && ShuffleVectorInst::isIdentityMask(Mask)) 6177 return V; 6178 Value *Vec = Builder.CreateShuffleVector(V, Mask, "shuffle"); 6179 if (auto *I = dyn_cast<Instruction>(Vec)) { 6180 GatherShuffleSeq.insert(I); 6181 CSEBlocks.insert(I->getParent()); 6182 } 6183 return Vec; 6184 } 6185 6186 ~ShuffleInstructionBuilder() { 6187 assert((IsFinalized || Mask.empty()) && 6188 "Shuffle construction must be finalized."); 6189 } 6190 }; 6191 } // namespace 6192 6193 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { 6194 unsigned VF = VL.size(); 6195 InstructionsState S = getSameOpcode(VL); 6196 if (S.getOpcode()) { 6197 if (TreeEntry *E = getTreeEntry(S.OpValue)) 6198 if (E->isSame(VL)) { 6199 Value *V = vectorizeTree(E); 6200 if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) { 6201 if (!E->ReuseShuffleIndices.empty()) { 6202 // Reshuffle to get only unique values. 6203 // If some of the scalars are duplicated in the vectorization tree 6204 // entry, we do not vectorize them but instead generate a mask for 6205 // the reuses. But if there are several users of the same entry, 6206 // they may have different vectorization factors. This is especially 6207 // important for PHI nodes. In this case, we need to adapt the 6208 // resulting instruction for the user vectorization factor and have 6209 // to reshuffle it again to take only unique elements of the vector. 6210 // Without this code the function incorrectly returns reduced vector 6211 // instruction with the same elements, not with the unique ones. 6212 6213 // block: 6214 // %phi = phi <2 x > { .., %entry} {%shuffle, %block} 6215 // %2 = shuffle <2 x > %phi, poison, <4 x > <1, 1, 0, 0> 6216 // ... (use %2) 6217 // %shuffle = shuffle <2 x> %2, poison, <2 x> {2, 0} 6218 // br %block 6219 SmallVector<int> UniqueIdxs(VF, UndefMaskElem); 6220 SmallSet<int, 4> UsedIdxs; 6221 int Pos = 0; 6222 int Sz = VL.size(); 6223 for (int Idx : E->ReuseShuffleIndices) { 6224 if (Idx != Sz && Idx != UndefMaskElem && 6225 UsedIdxs.insert(Idx).second) 6226 UniqueIdxs[Idx] = Pos; 6227 ++Pos; 6228 } 6229 assert(VF >= UsedIdxs.size() && "Expected vectorization factor " 6230 "less than original vector size."); 6231 UniqueIdxs.append(VF - UsedIdxs.size(), UndefMaskElem); 6232 V = Builder.CreateShuffleVector(V, UniqueIdxs, "shrink.shuffle"); 6233 } else { 6234 assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() && 6235 "Expected vectorization factor less " 6236 "than original vector size."); 6237 SmallVector<int> UniformMask(VF, 0); 6238 std::iota(UniformMask.begin(), UniformMask.end(), 0); 6239 V = Builder.CreateShuffleVector(V, UniformMask, "shrink.shuffle"); 6240 } 6241 if (auto *I = dyn_cast<Instruction>(V)) { 6242 GatherShuffleSeq.insert(I); 6243 CSEBlocks.insert(I->getParent()); 6244 } 6245 } 6246 return V; 6247 } 6248 } 6249 6250 // Check that every instruction appears once in this bundle. 6251 SmallVector<int> ReuseShuffleIndicies; 6252 SmallVector<Value *> UniqueValues; 6253 if (VL.size() > 2) { 6254 DenseMap<Value *, unsigned> UniquePositions; 6255 unsigned NumValues = 6256 std::distance(VL.begin(), find_if(reverse(VL), [](Value *V) { 6257 return !isa<UndefValue>(V); 6258 }).base()); 6259 VF = std::max<unsigned>(VF, PowerOf2Ceil(NumValues)); 6260 int UniqueVals = 0; 6261 for (Value *V : VL.drop_back(VL.size() - VF)) { 6262 if (isa<UndefValue>(V)) { 6263 ReuseShuffleIndicies.emplace_back(UndefMaskElem); 6264 continue; 6265 } 6266 if (isConstant(V)) { 6267 ReuseShuffleIndicies.emplace_back(UniqueValues.size()); 6268 UniqueValues.emplace_back(V); 6269 continue; 6270 } 6271 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 6272 ReuseShuffleIndicies.emplace_back(Res.first->second); 6273 if (Res.second) { 6274 UniqueValues.emplace_back(V); 6275 ++UniqueVals; 6276 } 6277 } 6278 if (UniqueVals == 1 && UniqueValues.size() == 1) { 6279 // Emit pure splat vector. 6280 ReuseShuffleIndicies.append(VF - ReuseShuffleIndicies.size(), 6281 UndefMaskElem); 6282 } else if (UniqueValues.size() >= VF - 1 || UniqueValues.size() <= 1) { 6283 ReuseShuffleIndicies.clear(); 6284 UniqueValues.clear(); 6285 UniqueValues.append(VL.begin(), std::next(VL.begin(), NumValues)); 6286 } 6287 UniqueValues.append(VF - UniqueValues.size(), 6288 PoisonValue::get(VL[0]->getType())); 6289 VL = UniqueValues; 6290 } 6291 6292 ShuffleInstructionBuilder ShuffleBuilder(Builder, VF, GatherShuffleSeq, 6293 CSEBlocks); 6294 Value *Vec = gather(VL); 6295 if (!ReuseShuffleIndicies.empty()) { 6296 ShuffleBuilder.addMask(ReuseShuffleIndicies); 6297 Vec = ShuffleBuilder.finalize(Vec); 6298 } 6299 return Vec; 6300 } 6301 6302 Value *BoUpSLP::vectorizeTree(TreeEntry *E) { 6303 IRBuilder<>::InsertPointGuard Guard(Builder); 6304 6305 if (E->VectorizedValue) { 6306 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 6307 return E->VectorizedValue; 6308 } 6309 6310 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 6311 unsigned VF = E->getVectorFactor(); 6312 ShuffleInstructionBuilder ShuffleBuilder(Builder, VF, GatherShuffleSeq, 6313 CSEBlocks); 6314 if (E->State == TreeEntry::NeedToGather) { 6315 if (E->getMainOp()) 6316 setInsertPointAfterBundle(E); 6317 Value *Vec; 6318 SmallVector<int> Mask; 6319 SmallVector<const TreeEntry *> Entries; 6320 Optional<TargetTransformInfo::ShuffleKind> Shuffle = 6321 isGatherShuffledEntry(E, Mask, Entries); 6322 if (Shuffle.hasValue()) { 6323 assert((Entries.size() == 1 || Entries.size() == 2) && 6324 "Expected shuffle of 1 or 2 entries."); 6325 Vec = Builder.CreateShuffleVector(Entries.front()->VectorizedValue, 6326 Entries.back()->VectorizedValue, Mask); 6327 if (auto *I = dyn_cast<Instruction>(Vec)) { 6328 GatherShuffleSeq.insert(I); 6329 CSEBlocks.insert(I->getParent()); 6330 } 6331 } else { 6332 Vec = gather(E->Scalars); 6333 } 6334 if (NeedToShuffleReuses) { 6335 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6336 Vec = ShuffleBuilder.finalize(Vec); 6337 } 6338 E->VectorizedValue = Vec; 6339 return Vec; 6340 } 6341 6342 assert((E->State == TreeEntry::Vectorize || 6343 E->State == TreeEntry::ScatterVectorize) && 6344 "Unhandled state"); 6345 unsigned ShuffleOrOp = 6346 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 6347 Instruction *VL0 = E->getMainOp(); 6348 Type *ScalarTy = VL0->getType(); 6349 if (auto *Store = dyn_cast<StoreInst>(VL0)) 6350 ScalarTy = Store->getValueOperand()->getType(); 6351 else if (auto *IE = dyn_cast<InsertElementInst>(VL0)) 6352 ScalarTy = IE->getOperand(1)->getType(); 6353 auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size()); 6354 switch (ShuffleOrOp) { 6355 case Instruction::PHI: { 6356 assert( 6357 (E->ReorderIndices.empty() || E != VectorizableTree.front().get()) && 6358 "PHI reordering is free."); 6359 auto *PH = cast<PHINode>(VL0); 6360 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI()); 6361 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 6362 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 6363 Value *V = NewPhi; 6364 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6365 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6366 V = ShuffleBuilder.finalize(V); 6367 6368 E->VectorizedValue = V; 6369 6370 // PHINodes may have multiple entries from the same block. We want to 6371 // visit every block once. 6372 SmallPtrSet<BasicBlock*, 4> VisitedBBs; 6373 6374 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { 6375 ValueList Operands; 6376 BasicBlock *IBB = PH->getIncomingBlock(i); 6377 6378 if (!VisitedBBs.insert(IBB).second) { 6379 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 6380 continue; 6381 } 6382 6383 Builder.SetInsertPoint(IBB->getTerminator()); 6384 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 6385 Value *Vec = vectorizeTree(E->getOperand(i)); 6386 NewPhi->addIncoming(Vec, IBB); 6387 } 6388 6389 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 6390 "Invalid number of incoming values"); 6391 return V; 6392 } 6393 6394 case Instruction::ExtractElement: { 6395 Value *V = E->getSingleOperand(0); 6396 Builder.SetInsertPoint(VL0); 6397 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6398 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6399 V = ShuffleBuilder.finalize(V); 6400 E->VectorizedValue = V; 6401 return V; 6402 } 6403 case Instruction::ExtractValue: { 6404 auto *LI = cast<LoadInst>(E->getSingleOperand(0)); 6405 Builder.SetInsertPoint(LI); 6406 auto *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); 6407 Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); 6408 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign()); 6409 Value *NewV = propagateMetadata(V, E->Scalars); 6410 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6411 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6412 NewV = ShuffleBuilder.finalize(NewV); 6413 E->VectorizedValue = NewV; 6414 return NewV; 6415 } 6416 case Instruction::InsertElement: { 6417 assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique"); 6418 Builder.SetInsertPoint(cast<Instruction>(E->Scalars.back())); 6419 Value *V = vectorizeTree(E->getOperand(1)); 6420 6421 // Create InsertVector shuffle if necessary 6422 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 6423 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0)); 6424 })); 6425 const unsigned NumElts = 6426 cast<FixedVectorType>(FirstInsert->getType())->getNumElements(); 6427 const unsigned NumScalars = E->Scalars.size(); 6428 6429 unsigned Offset = *getInsertIndex(VL0, 0); 6430 assert(Offset < NumElts && "Failed to find vector index offset"); 6431 6432 // Create shuffle to resize vector 6433 SmallVector<int> Mask; 6434 if (!E->ReorderIndices.empty()) { 6435 inversePermutation(E->ReorderIndices, Mask); 6436 Mask.append(NumElts - NumScalars, UndefMaskElem); 6437 } else { 6438 Mask.assign(NumElts, UndefMaskElem); 6439 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); 6440 } 6441 // Create InsertVector shuffle if necessary 6442 bool IsIdentity = true; 6443 SmallVector<int> PrevMask(NumElts, UndefMaskElem); 6444 Mask.swap(PrevMask); 6445 for (unsigned I = 0; I < NumScalars; ++I) { 6446 Value *Scalar = E->Scalars[PrevMask[I]]; 6447 Optional<int> InsertIdx = getInsertIndex(Scalar, 0); 6448 if (!InsertIdx || *InsertIdx == UndefMaskElem) 6449 continue; 6450 IsIdentity &= *InsertIdx - Offset == I; 6451 Mask[*InsertIdx - Offset] = I; 6452 } 6453 if (!IsIdentity || NumElts != NumScalars) { 6454 V = Builder.CreateShuffleVector(V, Mask); 6455 if (auto *I = dyn_cast<Instruction>(V)) { 6456 GatherShuffleSeq.insert(I); 6457 CSEBlocks.insert(I->getParent()); 6458 } 6459 } 6460 6461 if ((!IsIdentity || Offset != 0 || 6462 !isUndefVector(FirstInsert->getOperand(0))) && 6463 NumElts != NumScalars) { 6464 SmallVector<int> InsertMask(NumElts); 6465 std::iota(InsertMask.begin(), InsertMask.end(), 0); 6466 for (unsigned I = 0; I < NumElts; I++) { 6467 if (Mask[I] != UndefMaskElem) 6468 InsertMask[Offset + I] = NumElts + I; 6469 } 6470 6471 V = Builder.CreateShuffleVector( 6472 FirstInsert->getOperand(0), V, InsertMask, 6473 cast<Instruction>(E->Scalars.back())->getName()); 6474 if (auto *I = dyn_cast<Instruction>(V)) { 6475 GatherShuffleSeq.insert(I); 6476 CSEBlocks.insert(I->getParent()); 6477 } 6478 } 6479 6480 ++NumVectorInstructions; 6481 E->VectorizedValue = V; 6482 return V; 6483 } 6484 case Instruction::ZExt: 6485 case Instruction::SExt: 6486 case Instruction::FPToUI: 6487 case Instruction::FPToSI: 6488 case Instruction::FPExt: 6489 case Instruction::PtrToInt: 6490 case Instruction::IntToPtr: 6491 case Instruction::SIToFP: 6492 case Instruction::UIToFP: 6493 case Instruction::Trunc: 6494 case Instruction::FPTrunc: 6495 case Instruction::BitCast: { 6496 setInsertPointAfterBundle(E); 6497 6498 Value *InVec = vectorizeTree(E->getOperand(0)); 6499 6500 if (E->VectorizedValue) { 6501 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6502 return E->VectorizedValue; 6503 } 6504 6505 auto *CI = cast<CastInst>(VL0); 6506 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy); 6507 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6508 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6509 V = ShuffleBuilder.finalize(V); 6510 6511 E->VectorizedValue = V; 6512 ++NumVectorInstructions; 6513 return V; 6514 } 6515 case Instruction::FCmp: 6516 case Instruction::ICmp: { 6517 setInsertPointAfterBundle(E); 6518 6519 Value *L = vectorizeTree(E->getOperand(0)); 6520 Value *R = vectorizeTree(E->getOperand(1)); 6521 6522 if (E->VectorizedValue) { 6523 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6524 return E->VectorizedValue; 6525 } 6526 6527 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 6528 Value *V = Builder.CreateCmp(P0, L, R); 6529 propagateIRFlags(V, E->Scalars, VL0); 6530 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6531 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6532 V = ShuffleBuilder.finalize(V); 6533 6534 E->VectorizedValue = V; 6535 ++NumVectorInstructions; 6536 return V; 6537 } 6538 case Instruction::Select: { 6539 setInsertPointAfterBundle(E); 6540 6541 Value *Cond = vectorizeTree(E->getOperand(0)); 6542 Value *True = vectorizeTree(E->getOperand(1)); 6543 Value *False = vectorizeTree(E->getOperand(2)); 6544 6545 if (E->VectorizedValue) { 6546 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6547 return E->VectorizedValue; 6548 } 6549 6550 Value *V = Builder.CreateSelect(Cond, True, False); 6551 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6552 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6553 V = ShuffleBuilder.finalize(V); 6554 6555 E->VectorizedValue = V; 6556 ++NumVectorInstructions; 6557 return V; 6558 } 6559 case Instruction::FNeg: { 6560 setInsertPointAfterBundle(E); 6561 6562 Value *Op = vectorizeTree(E->getOperand(0)); 6563 6564 if (E->VectorizedValue) { 6565 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6566 return E->VectorizedValue; 6567 } 6568 6569 Value *V = Builder.CreateUnOp( 6570 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op); 6571 propagateIRFlags(V, E->Scalars, VL0); 6572 if (auto *I = dyn_cast<Instruction>(V)) 6573 V = propagateMetadata(I, E->Scalars); 6574 6575 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6576 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6577 V = ShuffleBuilder.finalize(V); 6578 6579 E->VectorizedValue = V; 6580 ++NumVectorInstructions; 6581 6582 return V; 6583 } 6584 case Instruction::Add: 6585 case Instruction::FAdd: 6586 case Instruction::Sub: 6587 case Instruction::FSub: 6588 case Instruction::Mul: 6589 case Instruction::FMul: 6590 case Instruction::UDiv: 6591 case Instruction::SDiv: 6592 case Instruction::FDiv: 6593 case Instruction::URem: 6594 case Instruction::SRem: 6595 case Instruction::FRem: 6596 case Instruction::Shl: 6597 case Instruction::LShr: 6598 case Instruction::AShr: 6599 case Instruction::And: 6600 case Instruction::Or: 6601 case Instruction::Xor: { 6602 setInsertPointAfterBundle(E); 6603 6604 Value *LHS = vectorizeTree(E->getOperand(0)); 6605 Value *RHS = vectorizeTree(E->getOperand(1)); 6606 6607 if (E->VectorizedValue) { 6608 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6609 return E->VectorizedValue; 6610 } 6611 6612 Value *V = Builder.CreateBinOp( 6613 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, 6614 RHS); 6615 propagateIRFlags(V, E->Scalars, VL0); 6616 if (auto *I = dyn_cast<Instruction>(V)) 6617 V = propagateMetadata(I, E->Scalars); 6618 6619 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6620 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6621 V = ShuffleBuilder.finalize(V); 6622 6623 E->VectorizedValue = V; 6624 ++NumVectorInstructions; 6625 6626 return V; 6627 } 6628 case Instruction::Load: { 6629 // Loads are inserted at the head of the tree because we don't want to 6630 // sink them all the way down past store instructions. 6631 setInsertPointAfterBundle(E); 6632 6633 LoadInst *LI = cast<LoadInst>(VL0); 6634 Instruction *NewLI; 6635 unsigned AS = LI->getPointerAddressSpace(); 6636 Value *PO = LI->getPointerOperand(); 6637 if (E->State == TreeEntry::Vectorize) { 6638 6639 Value *VecPtr = Builder.CreateBitCast(PO, VecTy->getPointerTo(AS)); 6640 6641 // The pointer operand uses an in-tree scalar so we add the new BitCast 6642 // to ExternalUses list to make sure that an extract will be generated 6643 // in the future. 6644 if (TreeEntry *Entry = getTreeEntry(PO)) { 6645 // Find which lane we need to extract. 6646 unsigned FoundLane = Entry->findLaneForValue(PO); 6647 ExternalUses.emplace_back(PO, cast<User>(VecPtr), FoundLane); 6648 } 6649 6650 NewLI = Builder.CreateAlignedLoad(VecTy, VecPtr, LI->getAlign()); 6651 } else { 6652 assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state"); 6653 Value *VecPtr = vectorizeTree(E->getOperand(0)); 6654 // Use the minimum alignment of the gathered loads. 6655 Align CommonAlignment = LI->getAlign(); 6656 for (Value *V : E->Scalars) 6657 CommonAlignment = 6658 commonAlignment(CommonAlignment, cast<LoadInst>(V)->getAlign()); 6659 NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment); 6660 } 6661 Value *V = propagateMetadata(NewLI, E->Scalars); 6662 6663 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6664 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6665 V = ShuffleBuilder.finalize(V); 6666 E->VectorizedValue = V; 6667 ++NumVectorInstructions; 6668 return V; 6669 } 6670 case Instruction::Store: { 6671 auto *SI = cast<StoreInst>(VL0); 6672 unsigned AS = SI->getPointerAddressSpace(); 6673 6674 setInsertPointAfterBundle(E); 6675 6676 Value *VecValue = vectorizeTree(E->getOperand(0)); 6677 ShuffleBuilder.addMask(E->ReorderIndices); 6678 VecValue = ShuffleBuilder.finalize(VecValue); 6679 6680 Value *ScalarPtr = SI->getPointerOperand(); 6681 Value *VecPtr = Builder.CreateBitCast( 6682 ScalarPtr, VecValue->getType()->getPointerTo(AS)); 6683 StoreInst *ST = Builder.CreateAlignedStore(VecValue, VecPtr, 6684 SI->getAlign()); 6685 6686 // The pointer operand uses an in-tree scalar, so add the new BitCast to 6687 // ExternalUses to make sure that an extract will be generated in the 6688 // future. 6689 if (TreeEntry *Entry = getTreeEntry(ScalarPtr)) { 6690 // Find which lane we need to extract. 6691 unsigned FoundLane = Entry->findLaneForValue(ScalarPtr); 6692 ExternalUses.push_back( 6693 ExternalUser(ScalarPtr, cast<User>(VecPtr), FoundLane)); 6694 } 6695 6696 Value *V = propagateMetadata(ST, E->Scalars); 6697 6698 E->VectorizedValue = V; 6699 ++NumVectorInstructions; 6700 return V; 6701 } 6702 case Instruction::GetElementPtr: { 6703 auto *GEP0 = cast<GetElementPtrInst>(VL0); 6704 setInsertPointAfterBundle(E); 6705 6706 Value *Op0 = vectorizeTree(E->getOperand(0)); 6707 6708 SmallVector<Value *> OpVecs; 6709 for (int J = 1, N = GEP0->getNumOperands(); J < N; ++J) { 6710 Value *OpVec = vectorizeTree(E->getOperand(J)); 6711 OpVecs.push_back(OpVec); 6712 } 6713 6714 Value *V = Builder.CreateGEP(GEP0->getSourceElementType(), Op0, OpVecs); 6715 if (Instruction *I = dyn_cast<Instruction>(V)) 6716 V = propagateMetadata(I, E->Scalars); 6717 6718 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6719 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6720 V = ShuffleBuilder.finalize(V); 6721 6722 E->VectorizedValue = V; 6723 ++NumVectorInstructions; 6724 6725 return V; 6726 } 6727 case Instruction::Call: { 6728 CallInst *CI = cast<CallInst>(VL0); 6729 setInsertPointAfterBundle(E); 6730 6731 Intrinsic::ID IID = Intrinsic::not_intrinsic; 6732 if (Function *FI = CI->getCalledFunction()) 6733 IID = FI->getIntrinsicID(); 6734 6735 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6736 6737 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 6738 bool UseIntrinsic = ID != Intrinsic::not_intrinsic && 6739 VecCallCosts.first <= VecCallCosts.second; 6740 6741 Value *ScalarArg = nullptr; 6742 std::vector<Value *> OpVecs; 6743 SmallVector<Type *, 2> TysForDecl = 6744 {FixedVectorType::get(CI->getType(), E->Scalars.size())}; 6745 for (int j = 0, e = CI->arg_size(); j < e; ++j) { 6746 ValueList OpVL; 6747 // Some intrinsics have scalar arguments. This argument should not be 6748 // vectorized. 6749 if (UseIntrinsic && hasVectorInstrinsicScalarOpd(IID, j)) { 6750 CallInst *CEI = cast<CallInst>(VL0); 6751 ScalarArg = CEI->getArgOperand(j); 6752 OpVecs.push_back(CEI->getArgOperand(j)); 6753 if (hasVectorInstrinsicOverloadedScalarOpd(IID, j)) 6754 TysForDecl.push_back(ScalarArg->getType()); 6755 continue; 6756 } 6757 6758 Value *OpVec = vectorizeTree(E->getOperand(j)); 6759 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); 6760 OpVecs.push_back(OpVec); 6761 } 6762 6763 Function *CF; 6764 if (!UseIntrinsic) { 6765 VFShape Shape = 6766 VFShape::get(*CI, ElementCount::getFixed(static_cast<unsigned>( 6767 VecTy->getNumElements())), 6768 false /*HasGlobalPred*/); 6769 CF = VFDatabase(*CI).getVectorizedFunction(Shape); 6770 } else { 6771 CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl); 6772 } 6773 6774 SmallVector<OperandBundleDef, 1> OpBundles; 6775 CI->getOperandBundlesAsDefs(OpBundles); 6776 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 6777 6778 // The scalar argument uses an in-tree scalar so we add the new vectorized 6779 // call to ExternalUses list to make sure that an extract will be 6780 // generated in the future. 6781 if (ScalarArg) { 6782 if (TreeEntry *Entry = getTreeEntry(ScalarArg)) { 6783 // Find which lane we need to extract. 6784 unsigned FoundLane = Entry->findLaneForValue(ScalarArg); 6785 ExternalUses.push_back( 6786 ExternalUser(ScalarArg, cast<User>(V), FoundLane)); 6787 } 6788 } 6789 6790 propagateIRFlags(V, E->Scalars, VL0); 6791 ShuffleBuilder.addInversedMask(E->ReorderIndices); 6792 ShuffleBuilder.addMask(E->ReuseShuffleIndices); 6793 V = ShuffleBuilder.finalize(V); 6794 6795 E->VectorizedValue = V; 6796 ++NumVectorInstructions; 6797 return V; 6798 } 6799 case Instruction::ShuffleVector: { 6800 assert(E->isAltShuffle() && 6801 ((Instruction::isBinaryOp(E->getOpcode()) && 6802 Instruction::isBinaryOp(E->getAltOpcode())) || 6803 (Instruction::isCast(E->getOpcode()) && 6804 Instruction::isCast(E->getAltOpcode()))) && 6805 "Invalid Shuffle Vector Operand"); 6806 6807 Value *LHS = nullptr, *RHS = nullptr; 6808 if (Instruction::isBinaryOp(E->getOpcode())) { 6809 setInsertPointAfterBundle(E); 6810 LHS = vectorizeTree(E->getOperand(0)); 6811 RHS = vectorizeTree(E->getOperand(1)); 6812 } else { 6813 setInsertPointAfterBundle(E); 6814 LHS = vectorizeTree(E->getOperand(0)); 6815 } 6816 6817 if (E->VectorizedValue) { 6818 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 6819 return E->VectorizedValue; 6820 } 6821 6822 Value *V0, *V1; 6823 if (Instruction::isBinaryOp(E->getOpcode())) { 6824 V0 = Builder.CreateBinOp( 6825 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS); 6826 V1 = Builder.CreateBinOp( 6827 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS); 6828 } else { 6829 V0 = Builder.CreateCast( 6830 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy); 6831 V1 = Builder.CreateCast( 6832 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy); 6833 } 6834 // Add V0 and V1 to later analysis to try to find and remove matching 6835 // instruction, if any. 6836 for (Value *V : {V0, V1}) { 6837 if (auto *I = dyn_cast<Instruction>(V)) { 6838 GatherShuffleSeq.insert(I); 6839 CSEBlocks.insert(I->getParent()); 6840 } 6841 } 6842 6843 // Create shuffle to take alternate operations from the vector. 6844 // Also, gather up main and alt scalar ops to propagate IR flags to 6845 // each vector operation. 6846 ValueList OpScalars, AltScalars; 6847 SmallVector<int> Mask; 6848 buildSuffleEntryMask( 6849 E->Scalars, E->ReorderIndices, E->ReuseShuffleIndices, 6850 [E](Instruction *I) { 6851 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 6852 return I->getOpcode() == E->getAltOpcode(); 6853 }, 6854 Mask, &OpScalars, &AltScalars); 6855 6856 propagateIRFlags(V0, OpScalars); 6857 propagateIRFlags(V1, AltScalars); 6858 6859 Value *V = Builder.CreateShuffleVector(V0, V1, Mask); 6860 if (auto *I = dyn_cast<Instruction>(V)) { 6861 V = propagateMetadata(I, E->Scalars); 6862 GatherShuffleSeq.insert(I); 6863 CSEBlocks.insert(I->getParent()); 6864 } 6865 V = ShuffleBuilder.finalize(V); 6866 6867 E->VectorizedValue = V; 6868 ++NumVectorInstructions; 6869 6870 return V; 6871 } 6872 default: 6873 llvm_unreachable("unknown inst"); 6874 } 6875 return nullptr; 6876 } 6877 6878 Value *BoUpSLP::vectorizeTree() { 6879 ExtraValueToDebugLocsMap ExternallyUsedValues; 6880 return vectorizeTree(ExternallyUsedValues); 6881 } 6882 6883 Value * 6884 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) { 6885 // All blocks must be scheduled before any instructions are inserted. 6886 for (auto &BSIter : BlocksSchedules) { 6887 scheduleBlock(BSIter.second.get()); 6888 } 6889 6890 Builder.SetInsertPoint(&F->getEntryBlock().front()); 6891 auto *VectorRoot = vectorizeTree(VectorizableTree[0].get()); 6892 6893 // If the vectorized tree can be rewritten in a smaller type, we truncate the 6894 // vectorized root. InstCombine will then rewrite the entire expression. We 6895 // sign extend the extracted values below. 6896 auto *ScalarRoot = VectorizableTree[0]->Scalars[0]; 6897 if (MinBWs.count(ScalarRoot)) { 6898 if (auto *I = dyn_cast<Instruction>(VectorRoot)) { 6899 // If current instr is a phi and not the last phi, insert it after the 6900 // last phi node. 6901 if (isa<PHINode>(I)) 6902 Builder.SetInsertPoint(&*I->getParent()->getFirstInsertionPt()); 6903 else 6904 Builder.SetInsertPoint(&*++BasicBlock::iterator(I)); 6905 } 6906 auto BundleWidth = VectorizableTree[0]->Scalars.size(); 6907 auto *MinTy = IntegerType::get(F->getContext(), MinBWs[ScalarRoot].first); 6908 auto *VecTy = FixedVectorType::get(MinTy, BundleWidth); 6909 auto *Trunc = Builder.CreateTrunc(VectorRoot, VecTy); 6910 VectorizableTree[0]->VectorizedValue = Trunc; 6911 } 6912 6913 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() 6914 << " values .\n"); 6915 6916 // Extract all of the elements with the external uses. 6917 for (const auto &ExternalUse : ExternalUses) { 6918 Value *Scalar = ExternalUse.Scalar; 6919 llvm::User *User = ExternalUse.User; 6920 6921 // Skip users that we already RAUW. This happens when one instruction 6922 // has multiple uses of the same value. 6923 if (User && !is_contained(Scalar->users(), User)) 6924 continue; 6925 TreeEntry *E = getTreeEntry(Scalar); 6926 assert(E && "Invalid scalar"); 6927 assert(E->State != TreeEntry::NeedToGather && 6928 "Extracting from a gather list"); 6929 6930 Value *Vec = E->VectorizedValue; 6931 assert(Vec && "Can't find vectorizable value"); 6932 6933 Value *Lane = Builder.getInt32(ExternalUse.Lane); 6934 auto ExtractAndExtendIfNeeded = [&](Value *Vec) { 6935 if (Scalar->getType() != Vec->getType()) { 6936 Value *Ex; 6937 // "Reuse" the existing extract to improve final codegen. 6938 if (auto *ES = dyn_cast<ExtractElementInst>(Scalar)) { 6939 Ex = Builder.CreateExtractElement(ES->getOperand(0), 6940 ES->getOperand(1)); 6941 } else { 6942 Ex = Builder.CreateExtractElement(Vec, Lane); 6943 } 6944 // If necessary, sign-extend or zero-extend ScalarRoot 6945 // to the larger type. 6946 if (!MinBWs.count(ScalarRoot)) 6947 return Ex; 6948 if (MinBWs[ScalarRoot].second) 6949 return Builder.CreateSExt(Ex, Scalar->getType()); 6950 return Builder.CreateZExt(Ex, Scalar->getType()); 6951 } 6952 assert(isa<FixedVectorType>(Scalar->getType()) && 6953 isa<InsertElementInst>(Scalar) && 6954 "In-tree scalar of vector type is not insertelement?"); 6955 return Vec; 6956 }; 6957 // If User == nullptr, the Scalar is used as extra arg. Generate 6958 // ExtractElement instruction and update the record for this scalar in 6959 // ExternallyUsedValues. 6960 if (!User) { 6961 assert(ExternallyUsedValues.count(Scalar) && 6962 "Scalar with nullptr as an external user must be registered in " 6963 "ExternallyUsedValues map"); 6964 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 6965 Builder.SetInsertPoint(VecI->getParent(), 6966 std::next(VecI->getIterator())); 6967 } else { 6968 Builder.SetInsertPoint(&F->getEntryBlock().front()); 6969 } 6970 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 6971 CSEBlocks.insert(cast<Instruction>(Scalar)->getParent()); 6972 auto &NewInstLocs = ExternallyUsedValues[NewInst]; 6973 auto It = ExternallyUsedValues.find(Scalar); 6974 assert(It != ExternallyUsedValues.end() && 6975 "Externally used scalar is not found in ExternallyUsedValues"); 6976 NewInstLocs.append(It->second); 6977 ExternallyUsedValues.erase(Scalar); 6978 // Required to update internally referenced instructions. 6979 Scalar->replaceAllUsesWith(NewInst); 6980 continue; 6981 } 6982 6983 // Generate extracts for out-of-tree users. 6984 // Find the insertion point for the extractelement lane. 6985 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 6986 if (PHINode *PH = dyn_cast<PHINode>(User)) { 6987 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) { 6988 if (PH->getIncomingValue(i) == Scalar) { 6989 Instruction *IncomingTerminator = 6990 PH->getIncomingBlock(i)->getTerminator(); 6991 if (isa<CatchSwitchInst>(IncomingTerminator)) { 6992 Builder.SetInsertPoint(VecI->getParent(), 6993 std::next(VecI->getIterator())); 6994 } else { 6995 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator()); 6996 } 6997 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 6998 CSEBlocks.insert(PH->getIncomingBlock(i)); 6999 PH->setOperand(i, NewInst); 7000 } 7001 } 7002 } else { 7003 Builder.SetInsertPoint(cast<Instruction>(User)); 7004 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 7005 CSEBlocks.insert(cast<Instruction>(User)->getParent()); 7006 User->replaceUsesOfWith(Scalar, NewInst); 7007 } 7008 } else { 7009 Builder.SetInsertPoint(&F->getEntryBlock().front()); 7010 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 7011 CSEBlocks.insert(&F->getEntryBlock()); 7012 User->replaceUsesOfWith(Scalar, NewInst); 7013 } 7014 7015 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 7016 } 7017 7018 // For each vectorized value: 7019 for (auto &TEPtr : VectorizableTree) { 7020 TreeEntry *Entry = TEPtr.get(); 7021 7022 // No need to handle users of gathered values. 7023 if (Entry->State == TreeEntry::NeedToGather) 7024 continue; 7025 7026 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 7027 7028 // For each lane: 7029 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 7030 Value *Scalar = Entry->Scalars[Lane]; 7031 7032 #ifndef NDEBUG 7033 Type *Ty = Scalar->getType(); 7034 if (!Ty->isVoidTy()) { 7035 for (User *U : Scalar->users()) { 7036 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 7037 7038 // It is legal to delete users in the ignorelist. 7039 assert((getTreeEntry(U) || is_contained(UserIgnoreList, U) || 7040 (isa_and_nonnull<Instruction>(U) && 7041 isDeleted(cast<Instruction>(U)))) && 7042 "Deleting out-of-tree value"); 7043 } 7044 } 7045 #endif 7046 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 7047 eraseInstruction(cast<Instruction>(Scalar)); 7048 } 7049 } 7050 7051 Builder.ClearInsertionPoint(); 7052 InstrElementSize.clear(); 7053 7054 return VectorizableTree[0]->VectorizedValue; 7055 } 7056 7057 void BoUpSLP::optimizeGatherSequence() { 7058 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherShuffleSeq.size() 7059 << " gather sequences instructions.\n"); 7060 // LICM InsertElementInst sequences. 7061 for (Instruction *I : GatherShuffleSeq) { 7062 if (isDeleted(I)) 7063 continue; 7064 7065 // Check if this block is inside a loop. 7066 Loop *L = LI->getLoopFor(I->getParent()); 7067 if (!L) 7068 continue; 7069 7070 // Check if it has a preheader. 7071 BasicBlock *PreHeader = L->getLoopPreheader(); 7072 if (!PreHeader) 7073 continue; 7074 7075 // If the vector or the element that we insert into it are 7076 // instructions that are defined in this basic block then we can't 7077 // hoist this instruction. 7078 if (any_of(I->operands(), [L](Value *V) { 7079 auto *OpI = dyn_cast<Instruction>(V); 7080 return OpI && L->contains(OpI); 7081 })) 7082 continue; 7083 7084 // We can hoist this instruction. Move it to the pre-header. 7085 I->moveBefore(PreHeader->getTerminator()); 7086 } 7087 7088 // Make a list of all reachable blocks in our CSE queue. 7089 SmallVector<const DomTreeNode *, 8> CSEWorkList; 7090 CSEWorkList.reserve(CSEBlocks.size()); 7091 for (BasicBlock *BB : CSEBlocks) 7092 if (DomTreeNode *N = DT->getNode(BB)) { 7093 assert(DT->isReachableFromEntry(N)); 7094 CSEWorkList.push_back(N); 7095 } 7096 7097 // Sort blocks by domination. This ensures we visit a block after all blocks 7098 // dominating it are visited. 7099 llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) { 7100 assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) && 7101 "Different nodes should have different DFS numbers"); 7102 return A->getDFSNumIn() < B->getDFSNumIn(); 7103 }); 7104 7105 // Less defined shuffles can be replaced by the more defined copies. 7106 // Between two shuffles one is less defined if it has the same vector operands 7107 // and its mask indeces are the same as in the first one or undefs. E.g. 7108 // shuffle %0, poison, <0, 0, 0, undef> is less defined than shuffle %0, 7109 // poison, <0, 0, 0, 0>. 7110 auto &&IsIdenticalOrLessDefined = [this](Instruction *I1, Instruction *I2, 7111 SmallVectorImpl<int> &NewMask) { 7112 if (I1->getType() != I2->getType()) 7113 return false; 7114 auto *SI1 = dyn_cast<ShuffleVectorInst>(I1); 7115 auto *SI2 = dyn_cast<ShuffleVectorInst>(I2); 7116 if (!SI1 || !SI2) 7117 return I1->isIdenticalTo(I2); 7118 if (SI1->isIdenticalTo(SI2)) 7119 return true; 7120 for (int I = 0, E = SI1->getNumOperands(); I < E; ++I) 7121 if (SI1->getOperand(I) != SI2->getOperand(I)) 7122 return false; 7123 // Check if the second instruction is more defined than the first one. 7124 NewMask.assign(SI2->getShuffleMask().begin(), SI2->getShuffleMask().end()); 7125 ArrayRef<int> SM1 = SI1->getShuffleMask(); 7126 // Count trailing undefs in the mask to check the final number of used 7127 // registers. 7128 unsigned LastUndefsCnt = 0; 7129 for (int I = 0, E = NewMask.size(); I < E; ++I) { 7130 if (SM1[I] == UndefMaskElem) 7131 ++LastUndefsCnt; 7132 else 7133 LastUndefsCnt = 0; 7134 if (NewMask[I] != UndefMaskElem && SM1[I] != UndefMaskElem && 7135 NewMask[I] != SM1[I]) 7136 return false; 7137 if (NewMask[I] == UndefMaskElem) 7138 NewMask[I] = SM1[I]; 7139 } 7140 // Check if the last undefs actually change the final number of used vector 7141 // registers. 7142 return SM1.size() - LastUndefsCnt > 1 && 7143 TTI->getNumberOfParts(SI1->getType()) == 7144 TTI->getNumberOfParts( 7145 FixedVectorType::get(SI1->getType()->getElementType(), 7146 SM1.size() - LastUndefsCnt)); 7147 }; 7148 // Perform O(N^2) search over the gather/shuffle sequences and merge identical 7149 // instructions. TODO: We can further optimize this scan if we split the 7150 // instructions into different buckets based on the insert lane. 7151 SmallVector<Instruction *, 16> Visited; 7152 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 7153 assert(*I && 7154 (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 7155 "Worklist not sorted properly!"); 7156 BasicBlock *BB = (*I)->getBlock(); 7157 // For all instructions in blocks containing gather sequences: 7158 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 7159 if (isDeleted(&In)) 7160 continue; 7161 if (!isa<InsertElementInst>(&In) && !isa<ExtractElementInst>(&In) && 7162 !isa<ShuffleVectorInst>(&In) && !GatherShuffleSeq.contains(&In)) 7163 continue; 7164 7165 // Check if we can replace this instruction with any of the 7166 // visited instructions. 7167 bool Replaced = false; 7168 for (Instruction *&V : Visited) { 7169 SmallVector<int> NewMask; 7170 if (IsIdenticalOrLessDefined(&In, V, NewMask) && 7171 DT->dominates(V->getParent(), In.getParent())) { 7172 In.replaceAllUsesWith(V); 7173 eraseInstruction(&In); 7174 if (auto *SI = dyn_cast<ShuffleVectorInst>(V)) 7175 if (!NewMask.empty()) 7176 SI->setShuffleMask(NewMask); 7177 Replaced = true; 7178 break; 7179 } 7180 if (isa<ShuffleVectorInst>(In) && isa<ShuffleVectorInst>(V) && 7181 GatherShuffleSeq.contains(V) && 7182 IsIdenticalOrLessDefined(V, &In, NewMask) && 7183 DT->dominates(In.getParent(), V->getParent())) { 7184 In.moveAfter(V); 7185 V->replaceAllUsesWith(&In); 7186 eraseInstruction(V); 7187 if (auto *SI = dyn_cast<ShuffleVectorInst>(&In)) 7188 if (!NewMask.empty()) 7189 SI->setShuffleMask(NewMask); 7190 V = &In; 7191 Replaced = true; 7192 break; 7193 } 7194 } 7195 if (!Replaced) { 7196 assert(!is_contained(Visited, &In)); 7197 Visited.push_back(&In); 7198 } 7199 } 7200 } 7201 CSEBlocks.clear(); 7202 GatherShuffleSeq.clear(); 7203 } 7204 7205 // Groups the instructions to a bundle (which is then a single scheduling entity) 7206 // and schedules instructions until the bundle gets ready. 7207 Optional<BoUpSLP::ScheduleData *> 7208 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 7209 const InstructionsState &S) { 7210 // No need to schedule PHIs, insertelement, extractelement and extractvalue 7211 // instructions. 7212 if (isa<PHINode>(S.OpValue) || isVectorLikeInstWithConstOps(S.OpValue)) 7213 return nullptr; 7214 7215 // Initialize the instruction bundle. 7216 Instruction *OldScheduleEnd = ScheduleEnd; 7217 ScheduleData *PrevInBundle = nullptr; 7218 ScheduleData *Bundle = nullptr; 7219 bool ReSchedule = false; 7220 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n"); 7221 7222 auto &&TryScheduleBundle = [this, OldScheduleEnd, SLP](bool ReSchedule, 7223 ScheduleData *Bundle) { 7224 // The scheduling region got new instructions at the lower end (or it is a 7225 // new region for the first bundle). This makes it necessary to 7226 // recalculate all dependencies. 7227 // It is seldom that this needs to be done a second time after adding the 7228 // initial bundle to the region. 7229 if (ScheduleEnd != OldScheduleEnd) { 7230 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) 7231 doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); }); 7232 ReSchedule = true; 7233 } 7234 if (ReSchedule) { 7235 resetSchedule(); 7236 initialFillReadyList(ReadyInsts); 7237 } 7238 if (Bundle) { 7239 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle 7240 << " in block " << BB->getName() << "\n"); 7241 calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP); 7242 } 7243 7244 // Now try to schedule the new bundle or (if no bundle) just calculate 7245 // dependencies. As soon as the bundle is "ready" it means that there are no 7246 // cyclic dependencies and we can schedule it. Note that's important that we 7247 // don't "schedule" the bundle yet (see cancelScheduling). 7248 while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) && 7249 !ReadyInsts.empty()) { 7250 ScheduleData *Picked = ReadyInsts.pop_back_val(); 7251 if (Picked->isSchedulingEntity() && Picked->isReady()) 7252 schedule(Picked, ReadyInsts); 7253 } 7254 }; 7255 7256 // Make sure that the scheduling region contains all 7257 // instructions of the bundle. 7258 for (Value *V : VL) { 7259 if (!extendSchedulingRegion(V, S)) { 7260 // If the scheduling region got new instructions at the lower end (or it 7261 // is a new region for the first bundle). This makes it necessary to 7262 // recalculate all dependencies. 7263 // Otherwise the compiler may crash trying to incorrectly calculate 7264 // dependencies and emit instruction in the wrong order at the actual 7265 // scheduling. 7266 TryScheduleBundle(/*ReSchedule=*/false, nullptr); 7267 return None; 7268 } 7269 } 7270 7271 for (Value *V : VL) { 7272 ScheduleData *BundleMember = getScheduleData(V); 7273 assert(BundleMember && 7274 "no ScheduleData for bundle member (maybe not in same basic block)"); 7275 if (BundleMember->IsScheduled) { 7276 // A bundle member was scheduled as single instruction before and now 7277 // needs to be scheduled as part of the bundle. We just get rid of the 7278 // existing schedule. 7279 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 7280 << " was already scheduled\n"); 7281 ReSchedule = true; 7282 } 7283 assert(BundleMember->isSchedulingEntity() && 7284 "bundle member already part of other bundle"); 7285 if (PrevInBundle) { 7286 PrevInBundle->NextInBundle = BundleMember; 7287 } else { 7288 Bundle = BundleMember; 7289 } 7290 BundleMember->UnscheduledDepsInBundle = 0; 7291 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps; 7292 7293 // Group the instructions to a bundle. 7294 BundleMember->FirstInBundle = Bundle; 7295 PrevInBundle = BundleMember; 7296 } 7297 assert(Bundle && "Failed to find schedule bundle"); 7298 TryScheduleBundle(ReSchedule, Bundle); 7299 if (!Bundle->isReady()) { 7300 cancelScheduling(VL, S.OpValue); 7301 return None; 7302 } 7303 return Bundle; 7304 } 7305 7306 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 7307 Value *OpValue) { 7308 if (isa<PHINode>(OpValue) || isVectorLikeInstWithConstOps(OpValue)) 7309 return; 7310 7311 ScheduleData *Bundle = getScheduleData(OpValue); 7312 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 7313 assert(!Bundle->IsScheduled && 7314 "Can't cancel bundle which is already scheduled"); 7315 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() && 7316 "tried to unbundle something which is not a bundle"); 7317 7318 // Un-bundle: make single instructions out of the bundle. 7319 ScheduleData *BundleMember = Bundle; 7320 while (BundleMember) { 7321 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 7322 BundleMember->FirstInBundle = BundleMember; 7323 ScheduleData *Next = BundleMember->NextInBundle; 7324 BundleMember->NextInBundle = nullptr; 7325 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps; 7326 if (BundleMember->UnscheduledDepsInBundle == 0) { 7327 ReadyInsts.insert(BundleMember); 7328 } 7329 BundleMember = Next; 7330 } 7331 } 7332 7333 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 7334 // Allocate a new ScheduleData for the instruction. 7335 if (ChunkPos >= ChunkSize) { 7336 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize)); 7337 ChunkPos = 0; 7338 } 7339 return &(ScheduleDataChunks.back()[ChunkPos++]); 7340 } 7341 7342 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 7343 const InstructionsState &S) { 7344 if (getScheduleData(V, isOneOf(S, V))) 7345 return true; 7346 Instruction *I = dyn_cast<Instruction>(V); 7347 assert(I && "bundle member must be an instruction"); 7348 assert(!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) && 7349 "phi nodes/insertelements/extractelements/extractvalues don't need to " 7350 "be scheduled"); 7351 auto &&CheckSheduleForI = [this, &S](Instruction *I) -> bool { 7352 ScheduleData *ISD = getScheduleData(I); 7353 if (!ISD) 7354 return false; 7355 assert(isInSchedulingRegion(ISD) && 7356 "ScheduleData not in scheduling region"); 7357 ScheduleData *SD = allocateScheduleDataChunks(); 7358 SD->Inst = I; 7359 SD->init(SchedulingRegionID, S.OpValue); 7360 ExtraScheduleDataMap[I][S.OpValue] = SD; 7361 return true; 7362 }; 7363 if (CheckSheduleForI(I)) 7364 return true; 7365 if (!ScheduleStart) { 7366 // It's the first instruction in the new region. 7367 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 7368 ScheduleStart = I; 7369 ScheduleEnd = I->getNextNode(); 7370 if (isOneOf(S, I) != I) 7371 CheckSheduleForI(I); 7372 assert(ScheduleEnd && "tried to vectorize a terminator?"); 7373 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 7374 return true; 7375 } 7376 // Search up and down at the same time, because we don't know if the new 7377 // instruction is above or below the existing scheduling region. 7378 BasicBlock::reverse_iterator UpIter = 7379 ++ScheduleStart->getIterator().getReverse(); 7380 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 7381 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 7382 BasicBlock::iterator LowerEnd = BB->end(); 7383 while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I && 7384 &*DownIter != I) { 7385 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 7386 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 7387 return false; 7388 } 7389 7390 ++UpIter; 7391 ++DownIter; 7392 } 7393 if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) { 7394 assert(I->getParent() == ScheduleStart->getParent() && 7395 "Instruction is in wrong basic block."); 7396 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 7397 ScheduleStart = I; 7398 if (isOneOf(S, I) != I) 7399 CheckSheduleForI(I); 7400 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I 7401 << "\n"); 7402 return true; 7403 } 7404 assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && 7405 "Expected to reach top of the basic block or instruction down the " 7406 "lower end."); 7407 assert(I->getParent() == ScheduleEnd->getParent() && 7408 "Instruction is in wrong basic block."); 7409 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 7410 nullptr); 7411 ScheduleEnd = I->getNextNode(); 7412 if (isOneOf(S, I) != I) 7413 CheckSheduleForI(I); 7414 assert(ScheduleEnd && "tried to vectorize a terminator?"); 7415 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 7416 return true; 7417 } 7418 7419 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 7420 Instruction *ToI, 7421 ScheduleData *PrevLoadStore, 7422 ScheduleData *NextLoadStore) { 7423 ScheduleData *CurrentLoadStore = PrevLoadStore; 7424 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 7425 ScheduleData *SD = ScheduleDataMap[I]; 7426 if (!SD) { 7427 SD = allocateScheduleDataChunks(); 7428 ScheduleDataMap[I] = SD; 7429 SD->Inst = I; 7430 } 7431 assert(!isInSchedulingRegion(SD) && 7432 "new ScheduleData already in scheduling region"); 7433 SD->init(SchedulingRegionID, I); 7434 7435 if (I->mayReadOrWriteMemory() && 7436 (!isa<IntrinsicInst>(I) || 7437 (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect && 7438 cast<IntrinsicInst>(I)->getIntrinsicID() != 7439 Intrinsic::pseudoprobe))) { 7440 // Update the linked list of memory accessing instructions. 7441 if (CurrentLoadStore) { 7442 CurrentLoadStore->NextLoadStore = SD; 7443 } else { 7444 FirstLoadStoreInRegion = SD; 7445 } 7446 CurrentLoadStore = SD; 7447 } 7448 } 7449 if (NextLoadStore) { 7450 if (CurrentLoadStore) 7451 CurrentLoadStore->NextLoadStore = NextLoadStore; 7452 } else { 7453 LastLoadStoreInRegion = CurrentLoadStore; 7454 } 7455 } 7456 7457 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 7458 bool InsertInReadyList, 7459 BoUpSLP *SLP) { 7460 assert(SD->isSchedulingEntity()); 7461 7462 SmallVector<ScheduleData *, 10> WorkList; 7463 WorkList.push_back(SD); 7464 7465 while (!WorkList.empty()) { 7466 ScheduleData *SD = WorkList.pop_back_val(); 7467 7468 ScheduleData *BundleMember = SD; 7469 while (BundleMember) { 7470 assert(isInSchedulingRegion(BundleMember)); 7471 if (!BundleMember->hasValidDependencies()) { 7472 7473 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember 7474 << "\n"); 7475 BundleMember->Dependencies = 0; 7476 BundleMember->resetUnscheduledDeps(); 7477 7478 // Handle def-use chain dependencies. 7479 if (BundleMember->OpValue != BundleMember->Inst) { 7480 ScheduleData *UseSD = getScheduleData(BundleMember->Inst); 7481 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 7482 BundleMember->Dependencies++; 7483 ScheduleData *DestBundle = UseSD->FirstInBundle; 7484 if (!DestBundle->IsScheduled) 7485 BundleMember->incrementUnscheduledDeps(1); 7486 if (!DestBundle->hasValidDependencies()) 7487 WorkList.push_back(DestBundle); 7488 } 7489 } else { 7490 for (User *U : BundleMember->Inst->users()) { 7491 if (isa<Instruction>(U)) { 7492 ScheduleData *UseSD = getScheduleData(U); 7493 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) { 7494 BundleMember->Dependencies++; 7495 ScheduleData *DestBundle = UseSD->FirstInBundle; 7496 if (!DestBundle->IsScheduled) 7497 BundleMember->incrementUnscheduledDeps(1); 7498 if (!DestBundle->hasValidDependencies()) 7499 WorkList.push_back(DestBundle); 7500 } 7501 } else { 7502 // I'm not sure if this can ever happen. But we need to be safe. 7503 // This lets the instruction/bundle never be scheduled and 7504 // eventually disable vectorization. 7505 BundleMember->Dependencies++; 7506 BundleMember->incrementUnscheduledDeps(1); 7507 } 7508 } 7509 } 7510 7511 // Handle the memory dependencies. 7512 ScheduleData *DepDest = BundleMember->NextLoadStore; 7513 if (DepDest) { 7514 Instruction *SrcInst = BundleMember->Inst; 7515 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA); 7516 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 7517 unsigned numAliased = 0; 7518 unsigned DistToSrc = 1; 7519 7520 while (DepDest) { 7521 assert(isInSchedulingRegion(DepDest)); 7522 7523 // We have two limits to reduce the complexity: 7524 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 7525 // SLP->isAliased (which is the expensive part in this loop). 7526 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 7527 // the whole loop (even if the loop is fast, it's quadratic). 7528 // It's important for the loop break condition (see below) to 7529 // check this limit even between two read-only instructions. 7530 if (DistToSrc >= MaxMemDepDistance || 7531 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 7532 (numAliased >= AliasedCheckLimit || 7533 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 7534 7535 // We increment the counter only if the locations are aliased 7536 // (instead of counting all alias checks). This gives a better 7537 // balance between reduced runtime and accurate dependencies. 7538 numAliased++; 7539 7540 DepDest->MemoryDependencies.push_back(BundleMember); 7541 BundleMember->Dependencies++; 7542 ScheduleData *DestBundle = DepDest->FirstInBundle; 7543 if (!DestBundle->IsScheduled) { 7544 BundleMember->incrementUnscheduledDeps(1); 7545 } 7546 if (!DestBundle->hasValidDependencies()) { 7547 WorkList.push_back(DestBundle); 7548 } 7549 } 7550 DepDest = DepDest->NextLoadStore; 7551 7552 // Example, explaining the loop break condition: Let's assume our 7553 // starting instruction is i0 and MaxMemDepDistance = 3. 7554 // 7555 // +--------v--v--v 7556 // i0,i1,i2,i3,i4,i5,i6,i7,i8 7557 // +--------^--^--^ 7558 // 7559 // MaxMemDepDistance let us stop alias-checking at i3 and we add 7560 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 7561 // Previously we already added dependencies from i3 to i6,i7,i8 7562 // (because of MaxMemDepDistance). As we added a dependency from 7563 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 7564 // and we can abort this loop at i6. 7565 if (DistToSrc >= 2 * MaxMemDepDistance) 7566 break; 7567 DistToSrc++; 7568 } 7569 } 7570 } 7571 BundleMember = BundleMember->NextInBundle; 7572 } 7573 if (InsertInReadyList && SD->isReady()) { 7574 ReadyInsts.push_back(SD); 7575 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst 7576 << "\n"); 7577 } 7578 } 7579 } 7580 7581 void BoUpSLP::BlockScheduling::resetSchedule() { 7582 assert(ScheduleStart && 7583 "tried to reset schedule on block which has not been scheduled"); 7584 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 7585 doForAllOpcodes(I, [&](ScheduleData *SD) { 7586 assert(isInSchedulingRegion(SD) && 7587 "ScheduleData not in scheduling region"); 7588 SD->IsScheduled = false; 7589 SD->resetUnscheduledDeps(); 7590 }); 7591 } 7592 ReadyInsts.clear(); 7593 } 7594 7595 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 7596 if (!BS->ScheduleStart) 7597 return; 7598 7599 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 7600 7601 BS->resetSchedule(); 7602 7603 // For the real scheduling we use a more sophisticated ready-list: it is 7604 // sorted by the original instruction location. This lets the final schedule 7605 // be as close as possible to the original instruction order. 7606 struct ScheduleDataCompare { 7607 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 7608 return SD2->SchedulingPriority < SD1->SchedulingPriority; 7609 } 7610 }; 7611 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 7612 7613 // Ensure that all dependency data is updated and fill the ready-list with 7614 // initial instructions. 7615 int Idx = 0; 7616 int NumToSchedule = 0; 7617 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 7618 I = I->getNextNode()) { 7619 BS->doForAllOpcodes(I, [this, &Idx, &NumToSchedule, BS](ScheduleData *SD) { 7620 assert((isVectorLikeInstWithConstOps(SD->Inst) || 7621 SD->isPartOfBundle() == (getTreeEntry(SD->Inst) != nullptr)) && 7622 "scheduler and vectorizer bundle mismatch"); 7623 SD->FirstInBundle->SchedulingPriority = Idx++; 7624 if (SD->isSchedulingEntity()) { 7625 BS->calculateDependencies(SD, false, this); 7626 NumToSchedule++; 7627 } 7628 }); 7629 } 7630 BS->initialFillReadyList(ReadyInsts); 7631 7632 Instruction *LastScheduledInst = BS->ScheduleEnd; 7633 7634 // Do the "real" scheduling. 7635 while (!ReadyInsts.empty()) { 7636 ScheduleData *picked = *ReadyInsts.begin(); 7637 ReadyInsts.erase(ReadyInsts.begin()); 7638 7639 // Move the scheduled instruction(s) to their dedicated places, if not 7640 // there yet. 7641 ScheduleData *BundleMember = picked; 7642 while (BundleMember) { 7643 Instruction *pickedInst = BundleMember->Inst; 7644 if (pickedInst->getNextNode() != LastScheduledInst) { 7645 BS->BB->getInstList().remove(pickedInst); 7646 BS->BB->getInstList().insert(LastScheduledInst->getIterator(), 7647 pickedInst); 7648 } 7649 LastScheduledInst = pickedInst; 7650 BundleMember = BundleMember->NextInBundle; 7651 } 7652 7653 BS->schedule(picked, ReadyInsts); 7654 NumToSchedule--; 7655 } 7656 assert(NumToSchedule == 0 && "could not schedule all instructions"); 7657 7658 // Avoid duplicate scheduling of the block. 7659 BS->ScheduleStart = nullptr; 7660 } 7661 7662 unsigned BoUpSLP::getVectorElementSize(Value *V) { 7663 // If V is a store, just return the width of the stored value (or value 7664 // truncated just before storing) without traversing the expression tree. 7665 // This is the common case. 7666 if (auto *Store = dyn_cast<StoreInst>(V)) { 7667 if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand())) 7668 return DL->getTypeSizeInBits(Trunc->getSrcTy()); 7669 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 7670 } 7671 7672 if (auto *IEI = dyn_cast<InsertElementInst>(V)) 7673 return getVectorElementSize(IEI->getOperand(1)); 7674 7675 auto E = InstrElementSize.find(V); 7676 if (E != InstrElementSize.end()) 7677 return E->second; 7678 7679 // If V is not a store, we can traverse the expression tree to find loads 7680 // that feed it. The type of the loaded value may indicate a more suitable 7681 // width than V's type. We want to base the vector element size on the width 7682 // of memory operations where possible. 7683 SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist; 7684 SmallPtrSet<Instruction *, 16> Visited; 7685 if (auto *I = dyn_cast<Instruction>(V)) { 7686 Worklist.emplace_back(I, I->getParent()); 7687 Visited.insert(I); 7688 } 7689 7690 // Traverse the expression tree in bottom-up order looking for loads. If we 7691 // encounter an instruction we don't yet handle, we give up. 7692 auto Width = 0u; 7693 while (!Worklist.empty()) { 7694 Instruction *I; 7695 BasicBlock *Parent; 7696 std::tie(I, Parent) = Worklist.pop_back_val(); 7697 7698 // We should only be looking at scalar instructions here. If the current 7699 // instruction has a vector type, skip. 7700 auto *Ty = I->getType(); 7701 if (isa<VectorType>(Ty)) 7702 continue; 7703 7704 // If the current instruction is a load, update MaxWidth to reflect the 7705 // width of the loaded value. 7706 if (isa<LoadInst>(I) || isa<ExtractElementInst>(I) || 7707 isa<ExtractValueInst>(I)) 7708 Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty)); 7709 7710 // Otherwise, we need to visit the operands of the instruction. We only 7711 // handle the interesting cases from buildTree here. If an operand is an 7712 // instruction we haven't yet visited and from the same basic block as the 7713 // user or the use is a PHI node, we add it to the worklist. 7714 else if (isa<PHINode>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) || 7715 isa<CmpInst>(I) || isa<SelectInst>(I) || isa<BinaryOperator>(I) || 7716 isa<UnaryOperator>(I)) { 7717 for (Use &U : I->operands()) 7718 if (auto *J = dyn_cast<Instruction>(U.get())) 7719 if (Visited.insert(J).second && 7720 (isa<PHINode>(I) || J->getParent() == Parent)) 7721 Worklist.emplace_back(J, J->getParent()); 7722 } else { 7723 break; 7724 } 7725 } 7726 7727 // If we didn't encounter a memory access in the expression tree, or if we 7728 // gave up for some reason, just return the width of V. Otherwise, return the 7729 // maximum width we found. 7730 if (!Width) { 7731 if (auto *CI = dyn_cast<CmpInst>(V)) 7732 V = CI->getOperand(0); 7733 Width = DL->getTypeSizeInBits(V->getType()); 7734 } 7735 7736 for (Instruction *I : Visited) 7737 InstrElementSize[I] = Width; 7738 7739 return Width; 7740 } 7741 7742 // Determine if a value V in a vectorizable expression Expr can be demoted to a 7743 // smaller type with a truncation. We collect the values that will be demoted 7744 // in ToDemote and additional roots that require investigating in Roots. 7745 static bool collectValuesToDemote(Value *V, SmallPtrSetImpl<Value *> &Expr, 7746 SmallVectorImpl<Value *> &ToDemote, 7747 SmallVectorImpl<Value *> &Roots) { 7748 // We can always demote constants. 7749 if (isa<Constant>(V)) { 7750 ToDemote.push_back(V); 7751 return true; 7752 } 7753 7754 // If the value is not an instruction in the expression with only one use, it 7755 // cannot be demoted. 7756 auto *I = dyn_cast<Instruction>(V); 7757 if (!I || !I->hasOneUse() || !Expr.count(I)) 7758 return false; 7759 7760 switch (I->getOpcode()) { 7761 7762 // We can always demote truncations and extensions. Since truncations can 7763 // seed additional demotion, we save the truncated value. 7764 case Instruction::Trunc: 7765 Roots.push_back(I->getOperand(0)); 7766 break; 7767 case Instruction::ZExt: 7768 case Instruction::SExt: 7769 if (isa<ExtractElementInst>(I->getOperand(0)) || 7770 isa<InsertElementInst>(I->getOperand(0))) 7771 return false; 7772 break; 7773 7774 // We can demote certain binary operations if we can demote both of their 7775 // operands. 7776 case Instruction::Add: 7777 case Instruction::Sub: 7778 case Instruction::Mul: 7779 case Instruction::And: 7780 case Instruction::Or: 7781 case Instruction::Xor: 7782 if (!collectValuesToDemote(I->getOperand(0), Expr, ToDemote, Roots) || 7783 !collectValuesToDemote(I->getOperand(1), Expr, ToDemote, Roots)) 7784 return false; 7785 break; 7786 7787 // We can demote selects if we can demote their true and false values. 7788 case Instruction::Select: { 7789 SelectInst *SI = cast<SelectInst>(I); 7790 if (!collectValuesToDemote(SI->getTrueValue(), Expr, ToDemote, Roots) || 7791 !collectValuesToDemote(SI->getFalseValue(), Expr, ToDemote, Roots)) 7792 return false; 7793 break; 7794 } 7795 7796 // We can demote phis if we can demote all their incoming operands. Note that 7797 // we don't need to worry about cycles since we ensure single use above. 7798 case Instruction::PHI: { 7799 PHINode *PN = cast<PHINode>(I); 7800 for (Value *IncValue : PN->incoming_values()) 7801 if (!collectValuesToDemote(IncValue, Expr, ToDemote, Roots)) 7802 return false; 7803 break; 7804 } 7805 7806 // Otherwise, conservatively give up. 7807 default: 7808 return false; 7809 } 7810 7811 // Record the value that we can demote. 7812 ToDemote.push_back(V); 7813 return true; 7814 } 7815 7816 void BoUpSLP::computeMinimumValueSizes() { 7817 // If there are no external uses, the expression tree must be rooted by a 7818 // store. We can't demote in-memory values, so there is nothing to do here. 7819 if (ExternalUses.empty()) 7820 return; 7821 7822 // We only attempt to truncate integer expressions. 7823 auto &TreeRoot = VectorizableTree[0]->Scalars; 7824 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 7825 if (!TreeRootIT) 7826 return; 7827 7828 // If the expression is not rooted by a store, these roots should have 7829 // external uses. We will rely on InstCombine to rewrite the expression in 7830 // the narrower type. However, InstCombine only rewrites single-use values. 7831 // This means that if a tree entry other than a root is used externally, it 7832 // must have multiple uses and InstCombine will not rewrite it. The code 7833 // below ensures that only the roots are used externally. 7834 SmallPtrSet<Value *, 32> Expr(TreeRoot.begin(), TreeRoot.end()); 7835 for (auto &EU : ExternalUses) 7836 if (!Expr.erase(EU.Scalar)) 7837 return; 7838 if (!Expr.empty()) 7839 return; 7840 7841 // Collect the scalar values of the vectorizable expression. We will use this 7842 // context to determine which values can be demoted. If we see a truncation, 7843 // we mark it as seeding another demotion. 7844 for (auto &EntryPtr : VectorizableTree) 7845 Expr.insert(EntryPtr->Scalars.begin(), EntryPtr->Scalars.end()); 7846 7847 // Ensure the roots of the vectorizable tree don't form a cycle. They must 7848 // have a single external user that is not in the vectorizable tree. 7849 for (auto *Root : TreeRoot) 7850 if (!Root->hasOneUse() || Expr.count(*Root->user_begin())) 7851 return; 7852 7853 // Conservatively determine if we can actually truncate the roots of the 7854 // expression. Collect the values that can be demoted in ToDemote and 7855 // additional roots that require investigating in Roots. 7856 SmallVector<Value *, 32> ToDemote; 7857 SmallVector<Value *, 4> Roots; 7858 for (auto *Root : TreeRoot) 7859 if (!collectValuesToDemote(Root, Expr, ToDemote, Roots)) 7860 return; 7861 7862 // The maximum bit width required to represent all the values that can be 7863 // demoted without loss of precision. It would be safe to truncate the roots 7864 // of the expression to this width. 7865 auto MaxBitWidth = 8u; 7866 7867 // We first check if all the bits of the roots are demanded. If they're not, 7868 // we can truncate the roots to this narrower type. 7869 for (auto *Root : TreeRoot) { 7870 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 7871 MaxBitWidth = std::max<unsigned>( 7872 Mask.getBitWidth() - Mask.countLeadingZeros(), MaxBitWidth); 7873 } 7874 7875 // True if the roots can be zero-extended back to their original type, rather 7876 // than sign-extended. We know that if the leading bits are not demanded, we 7877 // can safely zero-extend. So we initialize IsKnownPositive to True. 7878 bool IsKnownPositive = true; 7879 7880 // If all the bits of the roots are demanded, we can try a little harder to 7881 // compute a narrower type. This can happen, for example, if the roots are 7882 // getelementptr indices. InstCombine promotes these indices to the pointer 7883 // width. Thus, all their bits are technically demanded even though the 7884 // address computation might be vectorized in a smaller type. 7885 // 7886 // We start by looking at each entry that can be demoted. We compute the 7887 // maximum bit width required to store the scalar by using ValueTracking to 7888 // compute the number of high-order bits we can truncate. 7889 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) && 7890 llvm::all_of(TreeRoot, [](Value *R) { 7891 assert(R->hasOneUse() && "Root should have only one use!"); 7892 return isa<GetElementPtrInst>(R->user_back()); 7893 })) { 7894 MaxBitWidth = 8u; 7895 7896 // Determine if the sign bit of all the roots is known to be zero. If not, 7897 // IsKnownPositive is set to False. 7898 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { 7899 KnownBits Known = computeKnownBits(R, *DL); 7900 return Known.isNonNegative(); 7901 }); 7902 7903 // Determine the maximum number of bits required to store the scalar 7904 // values. 7905 for (auto *Scalar : ToDemote) { 7906 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); 7907 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 7908 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 7909 } 7910 7911 // If we can't prove that the sign bit is zero, we must add one to the 7912 // maximum bit width to account for the unknown sign bit. This preserves 7913 // the existing sign bit so we can safely sign-extend the root back to the 7914 // original type. Otherwise, if we know the sign bit is zero, we will 7915 // zero-extend the root instead. 7916 // 7917 // FIXME: This is somewhat suboptimal, as there will be cases where adding 7918 // one to the maximum bit width will yield a larger-than-necessary 7919 // type. In general, we need to add an extra bit only if we can't 7920 // prove that the upper bit of the original type is equal to the 7921 // upper bit of the proposed smaller type. If these two bits are the 7922 // same (either zero or one) we know that sign-extending from the 7923 // smaller type will result in the same value. Here, since we can't 7924 // yet prove this, we are just making the proposed smaller type 7925 // larger to ensure correctness. 7926 if (!IsKnownPositive) 7927 ++MaxBitWidth; 7928 } 7929 7930 // Round MaxBitWidth up to the next power-of-two. 7931 if (!isPowerOf2_64(MaxBitWidth)) 7932 MaxBitWidth = NextPowerOf2(MaxBitWidth); 7933 7934 // If the maximum bit width we compute is less than the with of the roots' 7935 // type, we can proceed with the narrowing. Otherwise, do nothing. 7936 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 7937 return; 7938 7939 // If we can truncate the root, we must collect additional values that might 7940 // be demoted as a result. That is, those seeded by truncations we will 7941 // modify. 7942 while (!Roots.empty()) 7943 collectValuesToDemote(Roots.pop_back_val(), Expr, ToDemote, Roots); 7944 7945 // Finally, map the values we can demote to the maximum bit with we computed. 7946 for (auto *Scalar : ToDemote) 7947 MinBWs[Scalar] = std::make_pair(MaxBitWidth, !IsKnownPositive); 7948 } 7949 7950 namespace { 7951 7952 /// The SLPVectorizer Pass. 7953 struct SLPVectorizer : public FunctionPass { 7954 SLPVectorizerPass Impl; 7955 7956 /// Pass identification, replacement for typeid 7957 static char ID; 7958 7959 explicit SLPVectorizer() : FunctionPass(ID) { 7960 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry()); 7961 } 7962 7963 bool doInitialization(Module &M) override { return false; } 7964 7965 bool runOnFunction(Function &F) override { 7966 if (skipFunction(F)) 7967 return false; 7968 7969 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 7970 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 7971 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 7972 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 7973 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 7974 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 7975 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 7976 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 7977 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 7978 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 7979 7980 return Impl.runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 7981 } 7982 7983 void getAnalysisUsage(AnalysisUsage &AU) const override { 7984 FunctionPass::getAnalysisUsage(AU); 7985 AU.addRequired<AssumptionCacheTracker>(); 7986 AU.addRequired<ScalarEvolutionWrapperPass>(); 7987 AU.addRequired<AAResultsWrapperPass>(); 7988 AU.addRequired<TargetTransformInfoWrapperPass>(); 7989 AU.addRequired<LoopInfoWrapperPass>(); 7990 AU.addRequired<DominatorTreeWrapperPass>(); 7991 AU.addRequired<DemandedBitsWrapperPass>(); 7992 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 7993 AU.addRequired<InjectTLIMappingsLegacy>(); 7994 AU.addPreserved<LoopInfoWrapperPass>(); 7995 AU.addPreserved<DominatorTreeWrapperPass>(); 7996 AU.addPreserved<AAResultsWrapperPass>(); 7997 AU.addPreserved<GlobalsAAWrapperPass>(); 7998 AU.setPreservesCFG(); 7999 } 8000 }; 8001 8002 } // end anonymous namespace 8003 8004 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 8005 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 8006 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 8007 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 8008 auto *AA = &AM.getResult<AAManager>(F); 8009 auto *LI = &AM.getResult<LoopAnalysis>(F); 8010 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 8011 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 8012 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 8013 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 8014 8015 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 8016 if (!Changed) 8017 return PreservedAnalyses::all(); 8018 8019 PreservedAnalyses PA; 8020 PA.preserveSet<CFGAnalyses>(); 8021 return PA; 8022 } 8023 8024 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 8025 TargetTransformInfo *TTI_, 8026 TargetLibraryInfo *TLI_, AAResults *AA_, 8027 LoopInfo *LI_, DominatorTree *DT_, 8028 AssumptionCache *AC_, DemandedBits *DB_, 8029 OptimizationRemarkEmitter *ORE_) { 8030 if (!RunSLPVectorization) 8031 return false; 8032 SE = SE_; 8033 TTI = TTI_; 8034 TLI = TLI_; 8035 AA = AA_; 8036 LI = LI_; 8037 DT = DT_; 8038 AC = AC_; 8039 DB = DB_; 8040 DL = &F.getParent()->getDataLayout(); 8041 8042 Stores.clear(); 8043 GEPs.clear(); 8044 bool Changed = false; 8045 8046 // If the target claims to have no vector registers don't attempt 8047 // vectorization. 8048 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) 8049 return false; 8050 8051 // Don't vectorize when the attribute NoImplicitFloat is used. 8052 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 8053 return false; 8054 8055 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 8056 8057 // Use the bottom up slp vectorizer to construct chains that start with 8058 // store instructions. 8059 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 8060 8061 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 8062 // delete instructions. 8063 8064 // Update DFS numbers now so that we can use them for ordering. 8065 DT->updateDFSNumbers(); 8066 8067 // Scan the blocks in the function in post order. 8068 for (auto BB : post_order(&F.getEntryBlock())) { 8069 collectSeedInstructions(BB); 8070 8071 // Vectorize trees that end at stores. 8072 if (!Stores.empty()) { 8073 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 8074 << " underlying objects.\n"); 8075 Changed |= vectorizeStoreChains(R); 8076 } 8077 8078 // Vectorize trees that end at reductions. 8079 Changed |= vectorizeChainsInBlock(BB, R); 8080 8081 // Vectorize the index computations of getelementptr instructions. This 8082 // is primarily intended to catch gather-like idioms ending at 8083 // non-consecutive loads. 8084 if (!GEPs.empty()) { 8085 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 8086 << " underlying objects.\n"); 8087 Changed |= vectorizeGEPIndices(BB, R); 8088 } 8089 } 8090 8091 if (Changed) { 8092 R.optimizeGatherSequence(); 8093 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 8094 } 8095 return Changed; 8096 } 8097 8098 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 8099 unsigned Idx) { 8100 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size() 8101 << "\n"); 8102 const unsigned Sz = R.getVectorElementSize(Chain[0]); 8103 const unsigned MinVF = R.getMinVecRegSize() / Sz; 8104 unsigned VF = Chain.size(); 8105 8106 if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF) 8107 return false; 8108 8109 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx 8110 << "\n"); 8111 8112 R.buildTree(Chain); 8113 if (R.isTreeTinyAndNotFullyVectorizable()) 8114 return false; 8115 if (R.isLoadCombineCandidate()) 8116 return false; 8117 R.reorderTopToBottom(); 8118 R.reorderBottomToTop(); 8119 R.buildExternalUses(); 8120 8121 R.computeMinimumValueSizes(); 8122 8123 InstructionCost Cost = R.getTreeCost(); 8124 8125 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF =" << VF << "\n"); 8126 if (Cost < -SLPCostThreshold) { 8127 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n"); 8128 8129 using namespace ore; 8130 8131 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 8132 cast<StoreInst>(Chain[0])) 8133 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 8134 << " and with tree size " 8135 << NV("TreeSize", R.getTreeSize())); 8136 8137 R.vectorizeTree(); 8138 return true; 8139 } 8140 8141 return false; 8142 } 8143 8144 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 8145 BoUpSLP &R) { 8146 // We may run into multiple chains that merge into a single chain. We mark the 8147 // stores that we vectorized so that we don't visit the same store twice. 8148 BoUpSLP::ValueSet VectorizedStores; 8149 bool Changed = false; 8150 8151 int E = Stores.size(); 8152 SmallBitVector Tails(E, false); 8153 int MaxIter = MaxStoreLookup.getValue(); 8154 SmallVector<std::pair<int, int>, 16> ConsecutiveChain( 8155 E, std::make_pair(E, INT_MAX)); 8156 SmallVector<SmallBitVector, 4> CheckedPairs(E, SmallBitVector(E, false)); 8157 int IterCnt; 8158 auto &&FindConsecutiveAccess = [this, &Stores, &Tails, &IterCnt, MaxIter, 8159 &CheckedPairs, 8160 &ConsecutiveChain](int K, int Idx) { 8161 if (IterCnt >= MaxIter) 8162 return true; 8163 if (CheckedPairs[Idx].test(K)) 8164 return ConsecutiveChain[K].second == 1 && 8165 ConsecutiveChain[K].first == Idx; 8166 ++IterCnt; 8167 CheckedPairs[Idx].set(K); 8168 CheckedPairs[K].set(Idx); 8169 Optional<int> Diff = getPointersDiff( 8170 Stores[K]->getValueOperand()->getType(), Stores[K]->getPointerOperand(), 8171 Stores[Idx]->getValueOperand()->getType(), 8172 Stores[Idx]->getPointerOperand(), *DL, *SE, /*StrictCheck=*/true); 8173 if (!Diff || *Diff == 0) 8174 return false; 8175 int Val = *Diff; 8176 if (Val < 0) { 8177 if (ConsecutiveChain[Idx].second > -Val) { 8178 Tails.set(K); 8179 ConsecutiveChain[Idx] = std::make_pair(K, -Val); 8180 } 8181 return false; 8182 } 8183 if (ConsecutiveChain[K].second <= Val) 8184 return false; 8185 8186 Tails.set(Idx); 8187 ConsecutiveChain[K] = std::make_pair(Idx, Val); 8188 return Val == 1; 8189 }; 8190 // Do a quadratic search on all of the given stores in reverse order and find 8191 // all of the pairs of stores that follow each other. 8192 for (int Idx = E - 1; Idx >= 0; --Idx) { 8193 // If a store has multiple consecutive store candidates, search according 8194 // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ... 8195 // This is because usually pairing with immediate succeeding or preceding 8196 // candidate create the best chance to find slp vectorization opportunity. 8197 const int MaxLookDepth = std::max(E - Idx, Idx + 1); 8198 IterCnt = 0; 8199 for (int Offset = 1, F = MaxLookDepth; Offset < F; ++Offset) 8200 if ((Idx >= Offset && FindConsecutiveAccess(Idx - Offset, Idx)) || 8201 (Idx + Offset < E && FindConsecutiveAccess(Idx + Offset, Idx))) 8202 break; 8203 } 8204 8205 // Tracks if we tried to vectorize stores starting from the given tail 8206 // already. 8207 SmallBitVector TriedTails(E, false); 8208 // For stores that start but don't end a link in the chain: 8209 for (int Cnt = E; Cnt > 0; --Cnt) { 8210 int I = Cnt - 1; 8211 if (ConsecutiveChain[I].first == E || Tails.test(I)) 8212 continue; 8213 // We found a store instr that starts a chain. Now follow the chain and try 8214 // to vectorize it. 8215 BoUpSLP::ValueList Operands; 8216 // Collect the chain into a list. 8217 while (I != E && !VectorizedStores.count(Stores[I])) { 8218 Operands.push_back(Stores[I]); 8219 Tails.set(I); 8220 if (ConsecutiveChain[I].second != 1) { 8221 // Mark the new end in the chain and go back, if required. It might be 8222 // required if the original stores come in reversed order, for example. 8223 if (ConsecutiveChain[I].first != E && 8224 Tails.test(ConsecutiveChain[I].first) && !TriedTails.test(I) && 8225 !VectorizedStores.count(Stores[ConsecutiveChain[I].first])) { 8226 TriedTails.set(I); 8227 Tails.reset(ConsecutiveChain[I].first); 8228 if (Cnt < ConsecutiveChain[I].first + 2) 8229 Cnt = ConsecutiveChain[I].first + 2; 8230 } 8231 break; 8232 } 8233 // Move to the next value in the chain. 8234 I = ConsecutiveChain[I].first; 8235 } 8236 assert(!Operands.empty() && "Expected non-empty list of stores."); 8237 8238 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 8239 unsigned EltSize = R.getVectorElementSize(Operands[0]); 8240 unsigned MaxElts = llvm::PowerOf2Floor(MaxVecRegSize / EltSize); 8241 8242 unsigned MinVF = R.getMinVF(EltSize); 8243 unsigned MaxVF = std::min(R.getMaximumVF(EltSize, Instruction::Store), 8244 MaxElts); 8245 8246 // FIXME: Is division-by-2 the correct step? Should we assert that the 8247 // register size is a power-of-2? 8248 unsigned StartIdx = 0; 8249 for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) { 8250 for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) { 8251 ArrayRef<Value *> Slice = makeArrayRef(Operands).slice(Cnt, Size); 8252 if (!VectorizedStores.count(Slice.front()) && 8253 !VectorizedStores.count(Slice.back()) && 8254 vectorizeStoreChain(Slice, R, Cnt)) { 8255 // Mark the vectorized stores so that we don't vectorize them again. 8256 VectorizedStores.insert(Slice.begin(), Slice.end()); 8257 Changed = true; 8258 // If we vectorized initial block, no need to try to vectorize it 8259 // again. 8260 if (Cnt == StartIdx) 8261 StartIdx += Size; 8262 Cnt += Size; 8263 continue; 8264 } 8265 ++Cnt; 8266 } 8267 // Check if the whole array was vectorized already - exit. 8268 if (StartIdx >= Operands.size()) 8269 break; 8270 } 8271 } 8272 8273 return Changed; 8274 } 8275 8276 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 8277 // Initialize the collections. We will make a single pass over the block. 8278 Stores.clear(); 8279 GEPs.clear(); 8280 8281 // Visit the store and getelementptr instructions in BB and organize them in 8282 // Stores and GEPs according to the underlying objects of their pointer 8283 // operands. 8284 for (Instruction &I : *BB) { 8285 // Ignore store instructions that are volatile or have a pointer operand 8286 // that doesn't point to a scalar type. 8287 if (auto *SI = dyn_cast<StoreInst>(&I)) { 8288 if (!SI->isSimple()) 8289 continue; 8290 if (!isValidElementType(SI->getValueOperand()->getType())) 8291 continue; 8292 Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI); 8293 } 8294 8295 // Ignore getelementptr instructions that have more than one index, a 8296 // constant index, or a pointer operand that doesn't point to a scalar 8297 // type. 8298 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 8299 auto Idx = GEP->idx_begin()->get(); 8300 if (GEP->getNumIndices() > 1 || isa<Constant>(Idx)) 8301 continue; 8302 if (!isValidElementType(Idx->getType())) 8303 continue; 8304 if (GEP->getType()->isVectorTy()) 8305 continue; 8306 GEPs[GEP->getPointerOperand()].push_back(GEP); 8307 } 8308 } 8309 } 8310 8311 bool SLPVectorizerPass::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) { 8312 if (!A || !B) 8313 return false; 8314 Value *VL[] = {A, B}; 8315 return tryToVectorizeList(VL, R); 8316 } 8317 8318 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 8319 bool LimitForRegisterSize) { 8320 if (VL.size() < 2) 8321 return false; 8322 8323 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " 8324 << VL.size() << ".\n"); 8325 8326 // Check that all of the parts are instructions of the same type, 8327 // we permit an alternate opcode via InstructionsState. 8328 InstructionsState S = getSameOpcode(VL); 8329 if (!S.getOpcode()) 8330 return false; 8331 8332 Instruction *I0 = cast<Instruction>(S.OpValue); 8333 // Make sure invalid types (including vector type) are rejected before 8334 // determining vectorization factor for scalar instructions. 8335 for (Value *V : VL) { 8336 Type *Ty = V->getType(); 8337 if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) { 8338 // NOTE: the following will give user internal llvm type name, which may 8339 // not be useful. 8340 R.getORE()->emit([&]() { 8341 std::string type_str; 8342 llvm::raw_string_ostream rso(type_str); 8343 Ty->print(rso); 8344 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0) 8345 << "Cannot SLP vectorize list: type " 8346 << rso.str() + " is unsupported by vectorizer"; 8347 }); 8348 return false; 8349 } 8350 } 8351 8352 unsigned Sz = R.getVectorElementSize(I0); 8353 unsigned MinVF = R.getMinVF(Sz); 8354 unsigned MaxVF = std::max<unsigned>(PowerOf2Floor(VL.size()), MinVF); 8355 MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF); 8356 if (MaxVF < 2) { 8357 R.getORE()->emit([&]() { 8358 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0) 8359 << "Cannot SLP vectorize list: vectorization factor " 8360 << "less than 2 is not supported"; 8361 }); 8362 return false; 8363 } 8364 8365 bool Changed = false; 8366 bool CandidateFound = false; 8367 InstructionCost MinCost = SLPCostThreshold.getValue(); 8368 Type *ScalarTy = VL[0]->getType(); 8369 if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 8370 ScalarTy = IE->getOperand(1)->getType(); 8371 8372 unsigned NextInst = 0, MaxInst = VL.size(); 8373 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) { 8374 // No actual vectorization should happen, if number of parts is the same as 8375 // provided vectorization factor (i.e. the scalar type is used for vector 8376 // code during codegen). 8377 auto *VecTy = FixedVectorType::get(ScalarTy, VF); 8378 if (TTI->getNumberOfParts(VecTy) == VF) 8379 continue; 8380 for (unsigned I = NextInst; I < MaxInst; ++I) { 8381 unsigned OpsWidth = 0; 8382 8383 if (I + VF > MaxInst) 8384 OpsWidth = MaxInst - I; 8385 else 8386 OpsWidth = VF; 8387 8388 if (!isPowerOf2_32(OpsWidth)) 8389 continue; 8390 8391 if ((LimitForRegisterSize && OpsWidth < MaxVF) || 8392 (VF > MinVF && OpsWidth <= VF / 2) || (VF == MinVF && OpsWidth < 2)) 8393 break; 8394 8395 ArrayRef<Value *> Ops = VL.slice(I, OpsWidth); 8396 // Check that a previous iteration of this loop did not delete the Value. 8397 if (llvm::any_of(Ops, [&R](Value *V) { 8398 auto *I = dyn_cast<Instruction>(V); 8399 return I && R.isDeleted(I); 8400 })) 8401 continue; 8402 8403 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations " 8404 << "\n"); 8405 8406 R.buildTree(Ops); 8407 if (R.isTreeTinyAndNotFullyVectorizable()) 8408 continue; 8409 R.reorderTopToBottom(); 8410 R.reorderBottomToTop(); 8411 R.buildExternalUses(); 8412 8413 R.computeMinimumValueSizes(); 8414 InstructionCost Cost = R.getTreeCost(); 8415 CandidateFound = true; 8416 MinCost = std::min(MinCost, Cost); 8417 8418 if (Cost < -SLPCostThreshold) { 8419 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 8420 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 8421 cast<Instruction>(Ops[0])) 8422 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 8423 << " and with tree size " 8424 << ore::NV("TreeSize", R.getTreeSize())); 8425 8426 R.vectorizeTree(); 8427 // Move to the next bundle. 8428 I += VF - 1; 8429 NextInst = I + 1; 8430 Changed = true; 8431 } 8432 } 8433 } 8434 8435 if (!Changed && CandidateFound) { 8436 R.getORE()->emit([&]() { 8437 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0) 8438 << "List vectorization was possible but not beneficial with cost " 8439 << ore::NV("Cost", MinCost) << " >= " 8440 << ore::NV("Treshold", -SLPCostThreshold); 8441 }); 8442 } else if (!Changed) { 8443 R.getORE()->emit([&]() { 8444 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0) 8445 << "Cannot SLP vectorize list: vectorization was impossible" 8446 << " with available vectorization factors"; 8447 }); 8448 } 8449 return Changed; 8450 } 8451 8452 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 8453 if (!I) 8454 return false; 8455 8456 if (!isa<BinaryOperator>(I) && !isa<CmpInst>(I)) 8457 return false; 8458 8459 Value *P = I->getParent(); 8460 8461 // Vectorize in current basic block only. 8462 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 8463 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 8464 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 8465 return false; 8466 8467 // Try to vectorize V. 8468 if (tryToVectorizePair(Op0, Op1, R)) 8469 return true; 8470 8471 auto *A = dyn_cast<BinaryOperator>(Op0); 8472 auto *B = dyn_cast<BinaryOperator>(Op1); 8473 // Try to skip B. 8474 if (B && B->hasOneUse()) { 8475 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 8476 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 8477 if (B0 && B0->getParent() == P && tryToVectorizePair(A, B0, R)) 8478 return true; 8479 if (B1 && B1->getParent() == P && tryToVectorizePair(A, B1, R)) 8480 return true; 8481 } 8482 8483 // Try to skip A. 8484 if (A && A->hasOneUse()) { 8485 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 8486 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 8487 if (A0 && A0->getParent() == P && tryToVectorizePair(A0, B, R)) 8488 return true; 8489 if (A1 && A1->getParent() == P && tryToVectorizePair(A1, B, R)) 8490 return true; 8491 } 8492 return false; 8493 } 8494 8495 namespace { 8496 8497 /// Model horizontal reductions. 8498 /// 8499 /// A horizontal reduction is a tree of reduction instructions that has values 8500 /// that can be put into a vector as its leaves. For example: 8501 /// 8502 /// mul mul mul mul 8503 /// \ / \ / 8504 /// + + 8505 /// \ / 8506 /// + 8507 /// This tree has "mul" as its leaf values and "+" as its reduction 8508 /// instructions. A reduction can feed into a store or a binary operation 8509 /// feeding a phi. 8510 /// ... 8511 /// \ / 8512 /// + 8513 /// | 8514 /// phi += 8515 /// 8516 /// Or: 8517 /// ... 8518 /// \ / 8519 /// + 8520 /// | 8521 /// *p = 8522 /// 8523 class HorizontalReduction { 8524 using ReductionOpsType = SmallVector<Value *, 16>; 8525 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; 8526 ReductionOpsListType ReductionOps; 8527 SmallVector<Value *, 32> ReducedVals; 8528 // Use map vector to make stable output. 8529 MapVector<Instruction *, Value *> ExtraArgs; 8530 WeakTrackingVH ReductionRoot; 8531 /// The type of reduction operation. 8532 RecurKind RdxKind; 8533 8534 const unsigned INVALID_OPERAND_INDEX = std::numeric_limits<unsigned>::max(); 8535 8536 static bool isCmpSelMinMax(Instruction *I) { 8537 return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) && 8538 RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I)); 8539 } 8540 8541 // And/or are potentially poison-safe logical patterns like: 8542 // select x, y, false 8543 // select x, true, y 8544 static bool isBoolLogicOp(Instruction *I) { 8545 return match(I, m_LogicalAnd(m_Value(), m_Value())) || 8546 match(I, m_LogicalOr(m_Value(), m_Value())); 8547 } 8548 8549 /// Checks if instruction is associative and can be vectorized. 8550 static bool isVectorizable(RecurKind Kind, Instruction *I) { 8551 if (Kind == RecurKind::None) 8552 return false; 8553 8554 // Integer ops that map to select instructions or intrinsics are fine. 8555 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) || 8556 isBoolLogicOp(I)) 8557 return true; 8558 8559 if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) { 8560 // FP min/max are associative except for NaN and -0.0. We do not 8561 // have to rule out -0.0 here because the intrinsic semantics do not 8562 // specify a fixed result for it. 8563 return I->getFastMathFlags().noNaNs(); 8564 } 8565 8566 return I->isAssociative(); 8567 } 8568 8569 static Value *getRdxOperand(Instruction *I, unsigned Index) { 8570 // Poison-safe 'or' takes the form: select X, true, Y 8571 // To make that work with the normal operand processing, we skip the 8572 // true value operand. 8573 // TODO: Change the code and data structures to handle this without a hack. 8574 if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1) 8575 return I->getOperand(2); 8576 return I->getOperand(Index); 8577 } 8578 8579 /// Checks if the ParentStackElem.first should be marked as a reduction 8580 /// operation with an extra argument or as extra argument itself. 8581 void markExtraArg(std::pair<Instruction *, unsigned> &ParentStackElem, 8582 Value *ExtraArg) { 8583 if (ExtraArgs.count(ParentStackElem.first)) { 8584 ExtraArgs[ParentStackElem.first] = nullptr; 8585 // We ran into something like: 8586 // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg. 8587 // The whole ParentStackElem.first should be considered as an extra value 8588 // in this case. 8589 // Do not perform analysis of remaining operands of ParentStackElem.first 8590 // instruction, this whole instruction is an extra argument. 8591 ParentStackElem.second = INVALID_OPERAND_INDEX; 8592 } else { 8593 // We ran into something like: 8594 // ParentStackElem.first += ... + ExtraArg + ... 8595 ExtraArgs[ParentStackElem.first] = ExtraArg; 8596 } 8597 } 8598 8599 /// Creates reduction operation with the current opcode. 8600 static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS, 8601 Value *RHS, const Twine &Name, bool UseSelect) { 8602 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind); 8603 switch (Kind) { 8604 case RecurKind::Or: 8605 if (UseSelect && 8606 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType())) 8607 return Builder.CreateSelect(LHS, Builder.getTrue(), RHS, Name); 8608 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 8609 Name); 8610 case RecurKind::And: 8611 if (UseSelect && 8612 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType())) 8613 return Builder.CreateSelect(LHS, RHS, Builder.getFalse(), Name); 8614 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 8615 Name); 8616 case RecurKind::Add: 8617 case RecurKind::Mul: 8618 case RecurKind::Xor: 8619 case RecurKind::FAdd: 8620 case RecurKind::FMul: 8621 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 8622 Name); 8623 case RecurKind::FMax: 8624 return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS); 8625 case RecurKind::FMin: 8626 return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS); 8627 case RecurKind::SMax: 8628 if (UseSelect) { 8629 Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name); 8630 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 8631 } 8632 return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS); 8633 case RecurKind::SMin: 8634 if (UseSelect) { 8635 Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name); 8636 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 8637 } 8638 return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS); 8639 case RecurKind::UMax: 8640 if (UseSelect) { 8641 Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name); 8642 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 8643 } 8644 return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS); 8645 case RecurKind::UMin: 8646 if (UseSelect) { 8647 Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name); 8648 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 8649 } 8650 return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS); 8651 default: 8652 llvm_unreachable("Unknown reduction operation."); 8653 } 8654 } 8655 8656 /// Creates reduction operation with the current opcode with the IR flags 8657 /// from \p ReductionOps. 8658 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 8659 Value *RHS, const Twine &Name, 8660 const ReductionOpsListType &ReductionOps) { 8661 bool UseSelect = ReductionOps.size() == 2 || 8662 // Logical or/and. 8663 (ReductionOps.size() == 1 && 8664 isa<SelectInst>(ReductionOps.front().front())); 8665 assert((!UseSelect || ReductionOps.size() != 2 || 8666 isa<SelectInst>(ReductionOps[1][0])) && 8667 "Expected cmp + select pairs for reduction"); 8668 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect); 8669 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 8670 if (auto *Sel = dyn_cast<SelectInst>(Op)) { 8671 propagateIRFlags(Sel->getCondition(), ReductionOps[0]); 8672 propagateIRFlags(Op, ReductionOps[1]); 8673 return Op; 8674 } 8675 } 8676 propagateIRFlags(Op, ReductionOps[0]); 8677 return Op; 8678 } 8679 8680 /// Creates reduction operation with the current opcode with the IR flags 8681 /// from \p I. 8682 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 8683 Value *RHS, const Twine &Name, Instruction *I) { 8684 auto *SelI = dyn_cast<SelectInst>(I); 8685 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, SelI != nullptr); 8686 if (SelI && RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 8687 if (auto *Sel = dyn_cast<SelectInst>(Op)) 8688 propagateIRFlags(Sel->getCondition(), SelI->getCondition()); 8689 } 8690 propagateIRFlags(Op, I); 8691 return Op; 8692 } 8693 8694 static RecurKind getRdxKind(Instruction *I) { 8695 assert(I && "Expected instruction for reduction matching"); 8696 TargetTransformInfo::ReductionFlags RdxFlags; 8697 if (match(I, m_Add(m_Value(), m_Value()))) 8698 return RecurKind::Add; 8699 if (match(I, m_Mul(m_Value(), m_Value()))) 8700 return RecurKind::Mul; 8701 if (match(I, m_And(m_Value(), m_Value())) || 8702 match(I, m_LogicalAnd(m_Value(), m_Value()))) 8703 return RecurKind::And; 8704 if (match(I, m_Or(m_Value(), m_Value())) || 8705 match(I, m_LogicalOr(m_Value(), m_Value()))) 8706 return RecurKind::Or; 8707 if (match(I, m_Xor(m_Value(), m_Value()))) 8708 return RecurKind::Xor; 8709 if (match(I, m_FAdd(m_Value(), m_Value()))) 8710 return RecurKind::FAdd; 8711 if (match(I, m_FMul(m_Value(), m_Value()))) 8712 return RecurKind::FMul; 8713 8714 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value()))) 8715 return RecurKind::FMax; 8716 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value()))) 8717 return RecurKind::FMin; 8718 8719 // This matches either cmp+select or intrinsics. SLP is expected to handle 8720 // either form. 8721 // TODO: If we are canonicalizing to intrinsics, we can remove several 8722 // special-case paths that deal with selects. 8723 if (match(I, m_SMax(m_Value(), m_Value()))) 8724 return RecurKind::SMax; 8725 if (match(I, m_SMin(m_Value(), m_Value()))) 8726 return RecurKind::SMin; 8727 if (match(I, m_UMax(m_Value(), m_Value()))) 8728 return RecurKind::UMax; 8729 if (match(I, m_UMin(m_Value(), m_Value()))) 8730 return RecurKind::UMin; 8731 8732 if (auto *Select = dyn_cast<SelectInst>(I)) { 8733 // Try harder: look for min/max pattern based on instructions producing 8734 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2). 8735 // During the intermediate stages of SLP, it's very common to have 8736 // pattern like this (since optimizeGatherSequence is run only once 8737 // at the end): 8738 // %1 = extractelement <2 x i32> %a, i32 0 8739 // %2 = extractelement <2 x i32> %a, i32 1 8740 // %cond = icmp sgt i32 %1, %2 8741 // %3 = extractelement <2 x i32> %a, i32 0 8742 // %4 = extractelement <2 x i32> %a, i32 1 8743 // %select = select i1 %cond, i32 %3, i32 %4 8744 CmpInst::Predicate Pred; 8745 Instruction *L1; 8746 Instruction *L2; 8747 8748 Value *LHS = Select->getTrueValue(); 8749 Value *RHS = Select->getFalseValue(); 8750 Value *Cond = Select->getCondition(); 8751 8752 // TODO: Support inverse predicates. 8753 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) { 8754 if (!isa<ExtractElementInst>(RHS) || 8755 !L2->isIdenticalTo(cast<Instruction>(RHS))) 8756 return RecurKind::None; 8757 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) { 8758 if (!isa<ExtractElementInst>(LHS) || 8759 !L1->isIdenticalTo(cast<Instruction>(LHS))) 8760 return RecurKind::None; 8761 } else { 8762 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS)) 8763 return RecurKind::None; 8764 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) || 8765 !L1->isIdenticalTo(cast<Instruction>(LHS)) || 8766 !L2->isIdenticalTo(cast<Instruction>(RHS))) 8767 return RecurKind::None; 8768 } 8769 8770 TargetTransformInfo::ReductionFlags RdxFlags; 8771 switch (Pred) { 8772 default: 8773 return RecurKind::None; 8774 case CmpInst::ICMP_SGT: 8775 case CmpInst::ICMP_SGE: 8776 return RecurKind::SMax; 8777 case CmpInst::ICMP_SLT: 8778 case CmpInst::ICMP_SLE: 8779 return RecurKind::SMin; 8780 case CmpInst::ICMP_UGT: 8781 case CmpInst::ICMP_UGE: 8782 return RecurKind::UMax; 8783 case CmpInst::ICMP_ULT: 8784 case CmpInst::ICMP_ULE: 8785 return RecurKind::UMin; 8786 } 8787 } 8788 return RecurKind::None; 8789 } 8790 8791 /// Get the index of the first operand. 8792 static unsigned getFirstOperandIndex(Instruction *I) { 8793 return isCmpSelMinMax(I) ? 1 : 0; 8794 } 8795 8796 /// Total number of operands in the reduction operation. 8797 static unsigned getNumberOfOperands(Instruction *I) { 8798 return isCmpSelMinMax(I) ? 3 : 2; 8799 } 8800 8801 /// Checks if the instruction is in basic block \p BB. 8802 /// For a cmp+sel min/max reduction check that both ops are in \p BB. 8803 static bool hasSameParent(Instruction *I, BasicBlock *BB) { 8804 if (isCmpSelMinMax(I) || (isBoolLogicOp(I) && isa<SelectInst>(I))) { 8805 auto *Sel = cast<SelectInst>(I); 8806 auto *Cmp = dyn_cast<Instruction>(Sel->getCondition()); 8807 return Sel->getParent() == BB && Cmp && Cmp->getParent() == BB; 8808 } 8809 return I->getParent() == BB; 8810 } 8811 8812 /// Expected number of uses for reduction operations/reduced values. 8813 static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) { 8814 if (IsCmpSelMinMax) { 8815 // SelectInst must be used twice while the condition op must have single 8816 // use only. 8817 if (auto *Sel = dyn_cast<SelectInst>(I)) 8818 return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse(); 8819 return I->hasNUses(2); 8820 } 8821 8822 // Arithmetic reduction operation must be used once only. 8823 return I->hasOneUse(); 8824 } 8825 8826 /// Initializes the list of reduction operations. 8827 void initReductionOps(Instruction *I) { 8828 if (isCmpSelMinMax(I)) 8829 ReductionOps.assign(2, ReductionOpsType()); 8830 else 8831 ReductionOps.assign(1, ReductionOpsType()); 8832 } 8833 8834 /// Add all reduction operations for the reduction instruction \p I. 8835 void addReductionOps(Instruction *I) { 8836 if (isCmpSelMinMax(I)) { 8837 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition()); 8838 ReductionOps[1].emplace_back(I); 8839 } else { 8840 ReductionOps[0].emplace_back(I); 8841 } 8842 } 8843 8844 static Value *getLHS(RecurKind Kind, Instruction *I) { 8845 if (Kind == RecurKind::None) 8846 return nullptr; 8847 return I->getOperand(getFirstOperandIndex(I)); 8848 } 8849 static Value *getRHS(RecurKind Kind, Instruction *I) { 8850 if (Kind == RecurKind::None) 8851 return nullptr; 8852 return I->getOperand(getFirstOperandIndex(I) + 1); 8853 } 8854 8855 public: 8856 HorizontalReduction() = default; 8857 8858 /// Try to find a reduction tree. 8859 bool matchAssociativeReduction(PHINode *Phi, Instruction *Inst) { 8860 assert((!Phi || is_contained(Phi->operands(), Inst)) && 8861 "Phi needs to use the binary operator"); 8862 assert((isa<BinaryOperator>(Inst) || isa<SelectInst>(Inst) || 8863 isa<IntrinsicInst>(Inst)) && 8864 "Expected binop, select, or intrinsic for reduction matching"); 8865 RdxKind = getRdxKind(Inst); 8866 8867 // We could have a initial reductions that is not an add. 8868 // r *= v1 + v2 + v3 + v4 8869 // In such a case start looking for a tree rooted in the first '+'. 8870 if (Phi) { 8871 if (getLHS(RdxKind, Inst) == Phi) { 8872 Phi = nullptr; 8873 Inst = dyn_cast<Instruction>(getRHS(RdxKind, Inst)); 8874 if (!Inst) 8875 return false; 8876 RdxKind = getRdxKind(Inst); 8877 } else if (getRHS(RdxKind, Inst) == Phi) { 8878 Phi = nullptr; 8879 Inst = dyn_cast<Instruction>(getLHS(RdxKind, Inst)); 8880 if (!Inst) 8881 return false; 8882 RdxKind = getRdxKind(Inst); 8883 } 8884 } 8885 8886 if (!isVectorizable(RdxKind, Inst)) 8887 return false; 8888 8889 // Analyze "regular" integer/FP types for reductions - no target-specific 8890 // types or pointers. 8891 Type *Ty = Inst->getType(); 8892 if (!isValidElementType(Ty) || Ty->isPointerTy()) 8893 return false; 8894 8895 // Though the ultimate reduction may have multiple uses, its condition must 8896 // have only single use. 8897 if (auto *Sel = dyn_cast<SelectInst>(Inst)) 8898 if (!Sel->getCondition()->hasOneUse()) 8899 return false; 8900 8901 ReductionRoot = Inst; 8902 8903 // The opcode for leaf values that we perform a reduction on. 8904 // For example: load(x) + load(y) + load(z) + fptoui(w) 8905 // The leaf opcode for 'w' does not match, so we don't include it as a 8906 // potential candidate for the reduction. 8907 unsigned LeafOpcode = 0; 8908 8909 // Post-order traverse the reduction tree starting at Inst. We only handle 8910 // true trees containing binary operators or selects. 8911 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack; 8912 Stack.push_back(std::make_pair(Inst, getFirstOperandIndex(Inst))); 8913 initReductionOps(Inst); 8914 while (!Stack.empty()) { 8915 Instruction *TreeN = Stack.back().first; 8916 unsigned EdgeToVisit = Stack.back().second++; 8917 const RecurKind TreeRdxKind = getRdxKind(TreeN); 8918 bool IsReducedValue = TreeRdxKind != RdxKind; 8919 8920 // Postorder visit. 8921 if (IsReducedValue || EdgeToVisit >= getNumberOfOperands(TreeN)) { 8922 if (IsReducedValue) 8923 ReducedVals.push_back(TreeN); 8924 else { 8925 auto ExtraArgsIter = ExtraArgs.find(TreeN); 8926 if (ExtraArgsIter != ExtraArgs.end() && !ExtraArgsIter->second) { 8927 // Check if TreeN is an extra argument of its parent operation. 8928 if (Stack.size() <= 1) { 8929 // TreeN can't be an extra argument as it is a root reduction 8930 // operation. 8931 return false; 8932 } 8933 // Yes, TreeN is an extra argument, do not add it to a list of 8934 // reduction operations. 8935 // Stack[Stack.size() - 2] always points to the parent operation. 8936 markExtraArg(Stack[Stack.size() - 2], TreeN); 8937 ExtraArgs.erase(TreeN); 8938 } else 8939 addReductionOps(TreeN); 8940 } 8941 // Retract. 8942 Stack.pop_back(); 8943 continue; 8944 } 8945 8946 // Visit operands. 8947 Value *EdgeVal = getRdxOperand(TreeN, EdgeToVisit); 8948 auto *EdgeInst = dyn_cast<Instruction>(EdgeVal); 8949 if (!EdgeInst) { 8950 // Edge value is not a reduction instruction or a leaf instruction. 8951 // (It may be a constant, function argument, or something else.) 8952 markExtraArg(Stack.back(), EdgeVal); 8953 continue; 8954 } 8955 RecurKind EdgeRdxKind = getRdxKind(EdgeInst); 8956 // Continue analysis if the next operand is a reduction operation or 8957 // (possibly) a leaf value. If the leaf value opcode is not set, 8958 // the first met operation != reduction operation is considered as the 8959 // leaf opcode. 8960 // Only handle trees in the current basic block. 8961 // Each tree node needs to have minimal number of users except for the 8962 // ultimate reduction. 8963 const bool IsRdxInst = EdgeRdxKind == RdxKind; 8964 if (EdgeInst != Phi && EdgeInst != Inst && 8965 hasSameParent(EdgeInst, Inst->getParent()) && 8966 hasRequiredNumberOfUses(isCmpSelMinMax(Inst), EdgeInst) && 8967 (!LeafOpcode || LeafOpcode == EdgeInst->getOpcode() || IsRdxInst)) { 8968 if (IsRdxInst) { 8969 // We need to be able to reassociate the reduction operations. 8970 if (!isVectorizable(EdgeRdxKind, EdgeInst)) { 8971 // I is an extra argument for TreeN (its parent operation). 8972 markExtraArg(Stack.back(), EdgeInst); 8973 continue; 8974 } 8975 } else if (!LeafOpcode) { 8976 LeafOpcode = EdgeInst->getOpcode(); 8977 } 8978 Stack.push_back( 8979 std::make_pair(EdgeInst, getFirstOperandIndex(EdgeInst))); 8980 continue; 8981 } 8982 // I is an extra argument for TreeN (its parent operation). 8983 markExtraArg(Stack.back(), EdgeInst); 8984 } 8985 return true; 8986 } 8987 8988 /// Attempt to vectorize the tree found by matchAssociativeReduction. 8989 Value *tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) { 8990 // If there are a sufficient number of reduction values, reduce 8991 // to a nearby power-of-2. We can safely generate oversized 8992 // vectors and rely on the backend to split them to legal sizes. 8993 unsigned NumReducedVals = ReducedVals.size(); 8994 if (NumReducedVals < 4) 8995 return nullptr; 8996 8997 // Intersect the fast-math-flags from all reduction operations. 8998 FastMathFlags RdxFMF; 8999 RdxFMF.set(); 9000 for (ReductionOpsType &RdxOp : ReductionOps) { 9001 for (Value *RdxVal : RdxOp) { 9002 if (auto *FPMO = dyn_cast<FPMathOperator>(RdxVal)) 9003 RdxFMF &= FPMO->getFastMathFlags(); 9004 } 9005 } 9006 9007 IRBuilder<> Builder(cast<Instruction>(ReductionRoot)); 9008 Builder.setFastMathFlags(RdxFMF); 9009 9010 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 9011 // The same extra argument may be used several times, so log each attempt 9012 // to use it. 9013 for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) { 9014 assert(Pair.first && "DebugLoc must be set."); 9015 ExternallyUsedValues[Pair.second].push_back(Pair.first); 9016 } 9017 9018 // The compare instruction of a min/max is the insertion point for new 9019 // instructions and may be replaced with a new compare instruction. 9020 auto getCmpForMinMaxReduction = [](Instruction *RdxRootInst) { 9021 assert(isa<SelectInst>(RdxRootInst) && 9022 "Expected min/max reduction to have select root instruction"); 9023 Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition(); 9024 assert(isa<Instruction>(ScalarCond) && 9025 "Expected min/max reduction to have compare condition"); 9026 return cast<Instruction>(ScalarCond); 9027 }; 9028 9029 // The reduction root is used as the insertion point for new instructions, 9030 // so set it as externally used to prevent it from being deleted. 9031 ExternallyUsedValues[ReductionRoot]; 9032 SmallVector<Value *, 16> IgnoreList; 9033 for (ReductionOpsType &RdxOp : ReductionOps) 9034 IgnoreList.append(RdxOp.begin(), RdxOp.end()); 9035 9036 unsigned ReduxWidth = PowerOf2Floor(NumReducedVals); 9037 if (NumReducedVals > ReduxWidth) { 9038 // In the loop below, we are building a tree based on a window of 9039 // 'ReduxWidth' values. 9040 // If the operands of those values have common traits (compare predicate, 9041 // constant operand, etc), then we want to group those together to 9042 // minimize the cost of the reduction. 9043 9044 // TODO: This should be extended to count common operands for 9045 // compares and binops. 9046 9047 // Step 1: Count the number of times each compare predicate occurs. 9048 SmallDenseMap<unsigned, unsigned> PredCountMap; 9049 for (Value *RdxVal : ReducedVals) { 9050 CmpInst::Predicate Pred; 9051 if (match(RdxVal, m_Cmp(Pred, m_Value(), m_Value()))) 9052 ++PredCountMap[Pred]; 9053 } 9054 // Step 2: Sort the values so the most common predicates come first. 9055 stable_sort(ReducedVals, [&PredCountMap](Value *A, Value *B) { 9056 CmpInst::Predicate PredA, PredB; 9057 if (match(A, m_Cmp(PredA, m_Value(), m_Value())) && 9058 match(B, m_Cmp(PredB, m_Value(), m_Value()))) { 9059 return PredCountMap[PredA] > PredCountMap[PredB]; 9060 } 9061 return false; 9062 }); 9063 } 9064 9065 Value *VectorizedTree = nullptr; 9066 unsigned i = 0; 9067 while (i < NumReducedVals - ReduxWidth + 1 && ReduxWidth > 2) { 9068 ArrayRef<Value *> VL(&ReducedVals[i], ReduxWidth); 9069 V.buildTree(VL, IgnoreList); 9070 if (V.isTreeTinyAndNotFullyVectorizable(/*ForReduction=*/true)) 9071 break; 9072 if (V.isLoadCombineReductionCandidate(RdxKind)) 9073 break; 9074 V.reorderTopToBottom(); 9075 V.reorderBottomToTop(/*IgnoreReorder=*/true); 9076 V.buildExternalUses(ExternallyUsedValues); 9077 9078 // For a poison-safe boolean logic reduction, do not replace select 9079 // instructions with logic ops. All reduced values will be frozen (see 9080 // below) to prevent leaking poison. 9081 if (isa<SelectInst>(ReductionRoot) && 9082 isBoolLogicOp(cast<Instruction>(ReductionRoot)) && 9083 NumReducedVals != ReduxWidth) 9084 break; 9085 9086 V.computeMinimumValueSizes(); 9087 9088 // Estimate cost. 9089 InstructionCost TreeCost = 9090 V.getTreeCost(makeArrayRef(&ReducedVals[i], ReduxWidth)); 9091 InstructionCost ReductionCost = 9092 getReductionCost(TTI, ReducedVals[i], ReduxWidth, RdxFMF); 9093 InstructionCost Cost = TreeCost + ReductionCost; 9094 if (!Cost.isValid()) { 9095 LLVM_DEBUG(dbgs() << "Encountered invalid baseline cost.\n"); 9096 return nullptr; 9097 } 9098 if (Cost >= -SLPCostThreshold) { 9099 V.getORE()->emit([&]() { 9100 return OptimizationRemarkMissed(SV_NAME, "HorSLPNotBeneficial", 9101 cast<Instruction>(VL[0])) 9102 << "Vectorizing horizontal reduction is possible" 9103 << "but not beneficial with cost " << ore::NV("Cost", Cost) 9104 << " and threshold " 9105 << ore::NV("Threshold", -SLPCostThreshold); 9106 }); 9107 break; 9108 } 9109 9110 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" 9111 << Cost << ". (HorRdx)\n"); 9112 V.getORE()->emit([&]() { 9113 return OptimizationRemark(SV_NAME, "VectorizedHorizontalReduction", 9114 cast<Instruction>(VL[0])) 9115 << "Vectorized horizontal reduction with cost " 9116 << ore::NV("Cost", Cost) << " and with tree size " 9117 << ore::NV("TreeSize", V.getTreeSize()); 9118 }); 9119 9120 // Vectorize a tree. 9121 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc(); 9122 Value *VectorizedRoot = V.vectorizeTree(ExternallyUsedValues); 9123 9124 // Emit a reduction. If the root is a select (min/max idiom), the insert 9125 // point is the compare condition of that select. 9126 Instruction *RdxRootInst = cast<Instruction>(ReductionRoot); 9127 if (isCmpSelMinMax(RdxRootInst)) 9128 Builder.SetInsertPoint(getCmpForMinMaxReduction(RdxRootInst)); 9129 else 9130 Builder.SetInsertPoint(RdxRootInst); 9131 9132 // To prevent poison from leaking across what used to be sequential, safe, 9133 // scalar boolean logic operations, the reduction operand must be frozen. 9134 if (isa<SelectInst>(RdxRootInst) && isBoolLogicOp(RdxRootInst)) 9135 VectorizedRoot = Builder.CreateFreeze(VectorizedRoot); 9136 9137 Value *ReducedSubTree = 9138 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); 9139 9140 if (!VectorizedTree) { 9141 // Initialize the final value in the reduction. 9142 VectorizedTree = ReducedSubTree; 9143 } else { 9144 // Update the final value in the reduction. 9145 Builder.SetCurrentDebugLocation(Loc); 9146 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree, 9147 ReducedSubTree, "op.rdx", ReductionOps); 9148 } 9149 i += ReduxWidth; 9150 ReduxWidth = PowerOf2Floor(NumReducedVals - i); 9151 } 9152 9153 if (VectorizedTree) { 9154 // Finish the reduction. 9155 for (; i < NumReducedVals; ++i) { 9156 auto *I = cast<Instruction>(ReducedVals[i]); 9157 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 9158 VectorizedTree = 9159 createOp(Builder, RdxKind, VectorizedTree, I, "", ReductionOps); 9160 } 9161 for (auto &Pair : ExternallyUsedValues) { 9162 // Add each externally used value to the final reduction. 9163 for (auto *I : Pair.second) { 9164 Builder.SetCurrentDebugLocation(I->getDebugLoc()); 9165 VectorizedTree = createOp(Builder, RdxKind, VectorizedTree, 9166 Pair.first, "op.extra", I); 9167 } 9168 } 9169 9170 ReductionRoot->replaceAllUsesWith(VectorizedTree); 9171 9172 // Mark all scalar reduction ops for deletion, they are replaced by the 9173 // vector reductions. 9174 V.eraseInstructions(IgnoreList); 9175 } 9176 return VectorizedTree; 9177 } 9178 9179 unsigned numReductionValues() const { return ReducedVals.size(); } 9180 9181 private: 9182 /// Calculate the cost of a reduction. 9183 InstructionCost getReductionCost(TargetTransformInfo *TTI, 9184 Value *FirstReducedVal, unsigned ReduxWidth, 9185 FastMathFlags FMF) { 9186 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 9187 Type *ScalarTy = FirstReducedVal->getType(); 9188 FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth); 9189 InstructionCost VectorCost, ScalarCost; 9190 switch (RdxKind) { 9191 case RecurKind::Add: 9192 case RecurKind::Mul: 9193 case RecurKind::Or: 9194 case RecurKind::And: 9195 case RecurKind::Xor: 9196 case RecurKind::FAdd: 9197 case RecurKind::FMul: { 9198 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind); 9199 VectorCost = 9200 TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF, CostKind); 9201 ScalarCost = TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy, CostKind); 9202 break; 9203 } 9204 case RecurKind::FMax: 9205 case RecurKind::FMin: { 9206 auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy); 9207 auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy)); 9208 VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy, 9209 /*unsigned=*/false, CostKind); 9210 CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind); 9211 ScalarCost = TTI->getCmpSelInstrCost(Instruction::FCmp, ScalarTy, 9212 SclCondTy, RdxPred, CostKind) + 9213 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 9214 SclCondTy, RdxPred, CostKind); 9215 break; 9216 } 9217 case RecurKind::SMax: 9218 case RecurKind::SMin: 9219 case RecurKind::UMax: 9220 case RecurKind::UMin: { 9221 auto *SclCondTy = CmpInst::makeCmpResultType(ScalarTy); 9222 auto *VecCondTy = cast<VectorType>(CmpInst::makeCmpResultType(VectorTy)); 9223 bool IsUnsigned = 9224 RdxKind == RecurKind::UMax || RdxKind == RecurKind::UMin; 9225 VectorCost = TTI->getMinMaxReductionCost(VectorTy, VecCondTy, IsUnsigned, 9226 CostKind); 9227 CmpInst::Predicate RdxPred = getMinMaxReductionPredicate(RdxKind); 9228 ScalarCost = TTI->getCmpSelInstrCost(Instruction::ICmp, ScalarTy, 9229 SclCondTy, RdxPred, CostKind) + 9230 TTI->getCmpSelInstrCost(Instruction::Select, ScalarTy, 9231 SclCondTy, RdxPred, CostKind); 9232 break; 9233 } 9234 default: 9235 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 9236 } 9237 9238 // Scalar cost is repeated for N-1 elements. 9239 ScalarCost *= (ReduxWidth - 1); 9240 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost 9241 << " for reduction that starts with " << *FirstReducedVal 9242 << " (It is a splitting reduction)\n"); 9243 return VectorCost - ScalarCost; 9244 } 9245 9246 /// Emit a horizontal reduction of the vectorized value. 9247 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 9248 unsigned ReduxWidth, const TargetTransformInfo *TTI) { 9249 assert(VectorizedValue && "Need to have a vectorized tree node"); 9250 assert(isPowerOf2_32(ReduxWidth) && 9251 "We only handle power-of-two reductions for now"); 9252 assert(RdxKind != RecurKind::FMulAdd && 9253 "A call to the llvm.fmuladd intrinsic is not handled yet"); 9254 9255 ++NumVectorInstructions; 9256 return createSimpleTargetReduction(Builder, TTI, VectorizedValue, RdxKind); 9257 } 9258 }; 9259 9260 } // end anonymous namespace 9261 9262 static Optional<unsigned> getAggregateSize(Instruction *InsertInst) { 9263 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) 9264 return cast<FixedVectorType>(IE->getType())->getNumElements(); 9265 9266 unsigned AggregateSize = 1; 9267 auto *IV = cast<InsertValueInst>(InsertInst); 9268 Type *CurrentType = IV->getType(); 9269 do { 9270 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 9271 for (auto *Elt : ST->elements()) 9272 if (Elt != ST->getElementType(0)) // check homogeneity 9273 return None; 9274 AggregateSize *= ST->getNumElements(); 9275 CurrentType = ST->getElementType(0); 9276 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 9277 AggregateSize *= AT->getNumElements(); 9278 CurrentType = AT->getElementType(); 9279 } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) { 9280 AggregateSize *= VT->getNumElements(); 9281 return AggregateSize; 9282 } else if (CurrentType->isSingleValueType()) { 9283 return AggregateSize; 9284 } else { 9285 return None; 9286 } 9287 } while (true); 9288 } 9289 9290 static bool findBuildAggregate_rec(Instruction *LastInsertInst, 9291 TargetTransformInfo *TTI, 9292 SmallVectorImpl<Value *> &BuildVectorOpds, 9293 SmallVectorImpl<Value *> &InsertElts, 9294 unsigned OperandOffset) { 9295 do { 9296 Value *InsertedOperand = LastInsertInst->getOperand(1); 9297 Optional<int> OperandIndex = getInsertIndex(LastInsertInst, OperandOffset); 9298 if (!OperandIndex) 9299 return false; 9300 if (isa<InsertElementInst>(InsertedOperand) || 9301 isa<InsertValueInst>(InsertedOperand)) { 9302 if (!findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI, 9303 BuildVectorOpds, InsertElts, *OperandIndex)) 9304 return false; 9305 } else { 9306 BuildVectorOpds[*OperandIndex] = InsertedOperand; 9307 InsertElts[*OperandIndex] = LastInsertInst; 9308 } 9309 LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0)); 9310 } while (LastInsertInst != nullptr && 9311 (isa<InsertValueInst>(LastInsertInst) || 9312 isa<InsertElementInst>(LastInsertInst)) && 9313 LastInsertInst->hasOneUse()); 9314 return true; 9315 } 9316 9317 /// Recognize construction of vectors like 9318 /// %ra = insertelement <4 x float> poison, float %s0, i32 0 9319 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 9320 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 9321 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 9322 /// starting from the last insertelement or insertvalue instruction. 9323 /// 9324 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>}, 9325 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on. 9326 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples. 9327 /// 9328 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type. 9329 /// 9330 /// \return true if it matches. 9331 static bool findBuildAggregate(Instruction *LastInsertInst, 9332 TargetTransformInfo *TTI, 9333 SmallVectorImpl<Value *> &BuildVectorOpds, 9334 SmallVectorImpl<Value *> &InsertElts) { 9335 9336 assert((isa<InsertElementInst>(LastInsertInst) || 9337 isa<InsertValueInst>(LastInsertInst)) && 9338 "Expected insertelement or insertvalue instruction!"); 9339 9340 assert((BuildVectorOpds.empty() && InsertElts.empty()) && 9341 "Expected empty result vectors!"); 9342 9343 Optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst); 9344 if (!AggregateSize) 9345 return false; 9346 BuildVectorOpds.resize(*AggregateSize); 9347 InsertElts.resize(*AggregateSize); 9348 9349 if (findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 9350 0)) { 9351 llvm::erase_value(BuildVectorOpds, nullptr); 9352 llvm::erase_value(InsertElts, nullptr); 9353 if (BuildVectorOpds.size() >= 2) 9354 return true; 9355 } 9356 9357 return false; 9358 } 9359 9360 /// Try and get a reduction value from a phi node. 9361 /// 9362 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 9363 /// if they come from either \p ParentBB or a containing loop latch. 9364 /// 9365 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 9366 /// if not possible. 9367 static Value *getReductionValue(const DominatorTree *DT, PHINode *P, 9368 BasicBlock *ParentBB, LoopInfo *LI) { 9369 // There are situations where the reduction value is not dominated by the 9370 // reduction phi. Vectorizing such cases has been reported to cause 9371 // miscompiles. See PR25787. 9372 auto DominatedReduxValue = [&](Value *R) { 9373 return isa<Instruction>(R) && 9374 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent()); 9375 }; 9376 9377 Value *Rdx = nullptr; 9378 9379 // Return the incoming value if it comes from the same BB as the phi node. 9380 if (P->getIncomingBlock(0) == ParentBB) { 9381 Rdx = P->getIncomingValue(0); 9382 } else if (P->getIncomingBlock(1) == ParentBB) { 9383 Rdx = P->getIncomingValue(1); 9384 } 9385 9386 if (Rdx && DominatedReduxValue(Rdx)) 9387 return Rdx; 9388 9389 // Otherwise, check whether we have a loop latch to look at. 9390 Loop *BBL = LI->getLoopFor(ParentBB); 9391 if (!BBL) 9392 return nullptr; 9393 BasicBlock *BBLatch = BBL->getLoopLatch(); 9394 if (!BBLatch) 9395 return nullptr; 9396 9397 // There is a loop latch, return the incoming value if it comes from 9398 // that. This reduction pattern occasionally turns up. 9399 if (P->getIncomingBlock(0) == BBLatch) { 9400 Rdx = P->getIncomingValue(0); 9401 } else if (P->getIncomingBlock(1) == BBLatch) { 9402 Rdx = P->getIncomingValue(1); 9403 } 9404 9405 if (Rdx && DominatedReduxValue(Rdx)) 9406 return Rdx; 9407 9408 return nullptr; 9409 } 9410 9411 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) { 9412 if (match(I, m_BinOp(m_Value(V0), m_Value(V1)))) 9413 return true; 9414 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1)))) 9415 return true; 9416 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1)))) 9417 return true; 9418 if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1)))) 9419 return true; 9420 if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1)))) 9421 return true; 9422 if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1)))) 9423 return true; 9424 if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1)))) 9425 return true; 9426 return false; 9427 } 9428 9429 /// Attempt to reduce a horizontal reduction. 9430 /// If it is legal to match a horizontal reduction feeding the phi node \a P 9431 /// with reduction operators \a Root (or one of its operands) in a basic block 9432 /// \a BB, then check if it can be done. If horizontal reduction is not found 9433 /// and root instruction is a binary operation, vectorization of the operands is 9434 /// attempted. 9435 /// \returns true if a horizontal reduction was matched and reduced or operands 9436 /// of one of the binary instruction were vectorized. 9437 /// \returns false if a horizontal reduction was not matched (or not possible) 9438 /// or no vectorization of any binary operation feeding \a Root instruction was 9439 /// performed. 9440 static bool tryToVectorizeHorReductionOrInstOperands( 9441 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, 9442 TargetTransformInfo *TTI, 9443 const function_ref<bool(Instruction *, BoUpSLP &)> Vectorize) { 9444 if (!ShouldVectorizeHor) 9445 return false; 9446 9447 if (!Root) 9448 return false; 9449 9450 if (Root->getParent() != BB || isa<PHINode>(Root)) 9451 return false; 9452 // Start analysis starting from Root instruction. If horizontal reduction is 9453 // found, try to vectorize it. If it is not a horizontal reduction or 9454 // vectorization is not possible or not effective, and currently analyzed 9455 // instruction is a binary operation, try to vectorize the operands, using 9456 // pre-order DFS traversal order. If the operands were not vectorized, repeat 9457 // the same procedure considering each operand as a possible root of the 9458 // horizontal reduction. 9459 // Interrupt the process if the Root instruction itself was vectorized or all 9460 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 9461 // Skip the analysis of CmpInsts.Compiler implements postanalysis of the 9462 // CmpInsts so we can skip extra attempts in 9463 // tryToVectorizeHorReductionOrInstOperands and save compile time. 9464 std::queue<std::pair<Instruction *, unsigned>> Stack; 9465 Stack.emplace(Root, 0); 9466 SmallPtrSet<Value *, 8> VisitedInstrs; 9467 SmallVector<WeakTrackingVH> PostponedInsts; 9468 bool Res = false; 9469 auto &&TryToReduce = [TTI, &P, &R](Instruction *Inst, Value *&B0, 9470 Value *&B1) -> Value * { 9471 bool IsBinop = matchRdxBop(Inst, B0, B1); 9472 bool IsSelect = match(Inst, m_Select(m_Value(), m_Value(), m_Value())); 9473 if (IsBinop || IsSelect) { 9474 HorizontalReduction HorRdx; 9475 if (HorRdx.matchAssociativeReduction(P, Inst)) 9476 return HorRdx.tryToReduce(R, TTI); 9477 } 9478 return nullptr; 9479 }; 9480 while (!Stack.empty()) { 9481 Instruction *Inst; 9482 unsigned Level; 9483 std::tie(Inst, Level) = Stack.front(); 9484 Stack.pop(); 9485 // Do not try to analyze instruction that has already been vectorized. 9486 // This may happen when we vectorize instruction operands on a previous 9487 // iteration while stack was populated before that happened. 9488 if (R.isDeleted(Inst)) 9489 continue; 9490 Value *B0 = nullptr, *B1 = nullptr; 9491 if (Value *V = TryToReduce(Inst, B0, B1)) { 9492 Res = true; 9493 // Set P to nullptr to avoid re-analysis of phi node in 9494 // matchAssociativeReduction function unless this is the root node. 9495 P = nullptr; 9496 if (auto *I = dyn_cast<Instruction>(V)) { 9497 // Try to find another reduction. 9498 Stack.emplace(I, Level); 9499 continue; 9500 } 9501 } else { 9502 bool IsBinop = B0 && B1; 9503 if (P && IsBinop) { 9504 Inst = dyn_cast<Instruction>(B0); 9505 if (Inst == P) 9506 Inst = dyn_cast<Instruction>(B1); 9507 if (!Inst) { 9508 // Set P to nullptr to avoid re-analysis of phi node in 9509 // matchAssociativeReduction function unless this is the root node. 9510 P = nullptr; 9511 continue; 9512 } 9513 } 9514 // Set P to nullptr to avoid re-analysis of phi node in 9515 // matchAssociativeReduction function unless this is the root node. 9516 P = nullptr; 9517 // Do not try to vectorize CmpInst operands, this is done separately. 9518 // Final attempt for binop args vectorization should happen after the loop 9519 // to try to find reductions. 9520 if (!isa<CmpInst>(Inst)) 9521 PostponedInsts.push_back(Inst); 9522 } 9523 9524 // Try to vectorize operands. 9525 // Continue analysis for the instruction from the same basic block only to 9526 // save compile time. 9527 if (++Level < RecursionMaxDepth) 9528 for (auto *Op : Inst->operand_values()) 9529 if (VisitedInstrs.insert(Op).second) 9530 if (auto *I = dyn_cast<Instruction>(Op)) 9531 // Do not try to vectorize CmpInst operands, this is done 9532 // separately. 9533 if (!isa<PHINode>(I) && !isa<CmpInst>(I) && !R.isDeleted(I) && 9534 I->getParent() == BB) 9535 Stack.emplace(I, Level); 9536 } 9537 // Try to vectorized binops where reductions were not found. 9538 for (Value *V : PostponedInsts) 9539 if (auto *Inst = dyn_cast<Instruction>(V)) 9540 if (!R.isDeleted(Inst)) 9541 Res |= Vectorize(Inst, R); 9542 return Res; 9543 } 9544 9545 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Value *V, 9546 BasicBlock *BB, BoUpSLP &R, 9547 TargetTransformInfo *TTI) { 9548 auto *I = dyn_cast_or_null<Instruction>(V); 9549 if (!I) 9550 return false; 9551 9552 if (!isa<BinaryOperator>(I)) 9553 P = nullptr; 9554 // Try to match and vectorize a horizontal reduction. 9555 auto &&ExtraVectorization = [this](Instruction *I, BoUpSLP &R) -> bool { 9556 return tryToVectorize(I, R); 9557 }; 9558 return tryToVectorizeHorReductionOrInstOperands(P, I, BB, R, TTI, 9559 ExtraVectorization); 9560 } 9561 9562 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 9563 BasicBlock *BB, BoUpSLP &R) { 9564 const DataLayout &DL = BB->getModule()->getDataLayout(); 9565 if (!R.canMapToVector(IVI->getType(), DL)) 9566 return false; 9567 9568 SmallVector<Value *, 16> BuildVectorOpds; 9569 SmallVector<Value *, 16> BuildVectorInsts; 9570 if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts)) 9571 return false; 9572 9573 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 9574 // Aggregate value is unlikely to be processed in vector register. 9575 return tryToVectorizeList(BuildVectorOpds, R); 9576 } 9577 9578 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 9579 BasicBlock *BB, BoUpSLP &R) { 9580 SmallVector<Value *, 16> BuildVectorInsts; 9581 SmallVector<Value *, 16> BuildVectorOpds; 9582 SmallVector<int> Mask; 9583 if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) || 9584 (llvm::all_of( 9585 BuildVectorOpds, 9586 [](Value *V) { return isa<ExtractElementInst, UndefValue>(V); }) && 9587 isFixedVectorShuffle(BuildVectorOpds, Mask))) 9588 return false; 9589 9590 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n"); 9591 return tryToVectorizeList(BuildVectorInsts, R); 9592 } 9593 9594 template <typename T> 9595 static bool 9596 tryToVectorizeSequence(SmallVectorImpl<T *> &Incoming, 9597 function_ref<unsigned(T *)> Limit, 9598 function_ref<bool(T *, T *)> Comparator, 9599 function_ref<bool(T *, T *)> AreCompatible, 9600 function_ref<bool(ArrayRef<T *>, bool)> TryToVectorize, 9601 bool LimitForRegisterSize) { 9602 bool Changed = false; 9603 // Sort by type, parent, operands. 9604 stable_sort(Incoming, Comparator); 9605 9606 // Try to vectorize elements base on their type. 9607 SmallVector<T *> Candidates; 9608 for (auto *IncIt = Incoming.begin(), *E = Incoming.end(); IncIt != E;) { 9609 // Look for the next elements with the same type, parent and operand 9610 // kinds. 9611 auto *SameTypeIt = IncIt; 9612 while (SameTypeIt != E && AreCompatible(*SameTypeIt, *IncIt)) 9613 ++SameTypeIt; 9614 9615 // Try to vectorize them. 9616 unsigned NumElts = (SameTypeIt - IncIt); 9617 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at nodes (" 9618 << NumElts << ")\n"); 9619 // The vectorization is a 3-state attempt: 9620 // 1. Try to vectorize instructions with the same/alternate opcodes with the 9621 // size of maximal register at first. 9622 // 2. Try to vectorize remaining instructions with the same type, if 9623 // possible. This may result in the better vectorization results rather than 9624 // if we try just to vectorize instructions with the same/alternate opcodes. 9625 // 3. Final attempt to try to vectorize all instructions with the 9626 // same/alternate ops only, this may result in some extra final 9627 // vectorization. 9628 if (NumElts > 1 && 9629 TryToVectorize(makeArrayRef(IncIt, NumElts), LimitForRegisterSize)) { 9630 // Success start over because instructions might have been changed. 9631 Changed = true; 9632 } else if (NumElts < Limit(*IncIt) && 9633 (Candidates.empty() || 9634 Candidates.front()->getType() == (*IncIt)->getType())) { 9635 Candidates.append(IncIt, std::next(IncIt, NumElts)); 9636 } 9637 // Final attempt to vectorize instructions with the same types. 9638 if (Candidates.size() > 1 && 9639 (SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType())) { 9640 if (TryToVectorize(Candidates, /*LimitForRegisterSize=*/false)) { 9641 // Success start over because instructions might have been changed. 9642 Changed = true; 9643 } else if (LimitForRegisterSize) { 9644 // Try to vectorize using small vectors. 9645 for (auto *It = Candidates.begin(), *End = Candidates.end(); 9646 It != End;) { 9647 auto *SameTypeIt = It; 9648 while (SameTypeIt != End && AreCompatible(*SameTypeIt, *It)) 9649 ++SameTypeIt; 9650 unsigned NumElts = (SameTypeIt - It); 9651 if (NumElts > 1 && TryToVectorize(makeArrayRef(It, NumElts), 9652 /*LimitForRegisterSize=*/false)) 9653 Changed = true; 9654 It = SameTypeIt; 9655 } 9656 } 9657 Candidates.clear(); 9658 } 9659 9660 // Start over at the next instruction of a different type (or the end). 9661 IncIt = SameTypeIt; 9662 } 9663 return Changed; 9664 } 9665 9666 /// Compare two cmp instructions. If IsCompatibility is true, function returns 9667 /// true if 2 cmps have same/swapped predicates and mos compatible corresponding 9668 /// operands. If IsCompatibility is false, function implements strict weak 9669 /// ordering relation between two cmp instructions, returning true if the first 9670 /// instruction is "less" than the second, i.e. its predicate is less than the 9671 /// predicate of the second or the operands IDs are less than the operands IDs 9672 /// of the second cmp instruction. 9673 template <bool IsCompatibility> 9674 static bool compareCmp(Value *V, Value *V2, 9675 function_ref<bool(Instruction *)> IsDeleted) { 9676 auto *CI1 = cast<CmpInst>(V); 9677 auto *CI2 = cast<CmpInst>(V2); 9678 if (IsDeleted(CI2) || !isValidElementType(CI2->getType())) 9679 return false; 9680 if (CI1->getOperand(0)->getType()->getTypeID() < 9681 CI2->getOperand(0)->getType()->getTypeID()) 9682 return !IsCompatibility; 9683 if (CI1->getOperand(0)->getType()->getTypeID() > 9684 CI2->getOperand(0)->getType()->getTypeID()) 9685 return false; 9686 CmpInst::Predicate Pred1 = CI1->getPredicate(); 9687 CmpInst::Predicate Pred2 = CI2->getPredicate(); 9688 CmpInst::Predicate SwapPred1 = CmpInst::getSwappedPredicate(Pred1); 9689 CmpInst::Predicate SwapPred2 = CmpInst::getSwappedPredicate(Pred2); 9690 CmpInst::Predicate BasePred1 = std::min(Pred1, SwapPred1); 9691 CmpInst::Predicate BasePred2 = std::min(Pred2, SwapPred2); 9692 if (BasePred1 < BasePred2) 9693 return !IsCompatibility; 9694 if (BasePred1 > BasePred2) 9695 return false; 9696 // Compare operands. 9697 bool LEPreds = Pred1 <= Pred2; 9698 bool GEPreds = Pred1 >= Pred2; 9699 for (int I = 0, E = CI1->getNumOperands(); I < E; ++I) { 9700 auto *Op1 = CI1->getOperand(LEPreds ? I : E - I - 1); 9701 auto *Op2 = CI2->getOperand(GEPreds ? I : E - I - 1); 9702 if (Op1->getValueID() < Op2->getValueID()) 9703 return !IsCompatibility; 9704 if (Op1->getValueID() > Op2->getValueID()) 9705 return false; 9706 if (auto *I1 = dyn_cast<Instruction>(Op1)) 9707 if (auto *I2 = dyn_cast<Instruction>(Op2)) { 9708 if (I1->getParent() != I2->getParent()) 9709 return false; 9710 InstructionsState S = getSameOpcode({I1, I2}); 9711 if (S.getOpcode()) 9712 continue; 9713 return false; 9714 } 9715 } 9716 return IsCompatibility; 9717 } 9718 9719 bool SLPVectorizerPass::vectorizeSimpleInstructions( 9720 SmallVectorImpl<Instruction *> &Instructions, BasicBlock *BB, BoUpSLP &R, 9721 bool AtTerminator) { 9722 bool OpsChanged = false; 9723 SmallVector<Instruction *, 4> PostponedCmps; 9724 for (auto *I : reverse(Instructions)) { 9725 if (R.isDeleted(I)) 9726 continue; 9727 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) 9728 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 9729 else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) 9730 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 9731 else if (isa<CmpInst>(I)) 9732 PostponedCmps.push_back(I); 9733 } 9734 if (AtTerminator) { 9735 // Try to find reductions first. 9736 for (Instruction *I : PostponedCmps) { 9737 if (R.isDeleted(I)) 9738 continue; 9739 for (Value *Op : I->operands()) 9740 OpsChanged |= vectorizeRootInstruction(nullptr, Op, BB, R, TTI); 9741 } 9742 // Try to vectorize operands as vector bundles. 9743 for (Instruction *I : PostponedCmps) { 9744 if (R.isDeleted(I)) 9745 continue; 9746 OpsChanged |= tryToVectorize(I, R); 9747 } 9748 // Try to vectorize list of compares. 9749 // Sort by type, compare predicate, etc. 9750 auto &&CompareSorter = [&R](Value *V, Value *V2) { 9751 return compareCmp<false>(V, V2, 9752 [&R](Instruction *I) { return R.isDeleted(I); }); 9753 }; 9754 9755 auto &&AreCompatibleCompares = [&R](Value *V1, Value *V2) { 9756 if (V1 == V2) 9757 return true; 9758 return compareCmp<true>(V1, V2, 9759 [&R](Instruction *I) { return R.isDeleted(I); }); 9760 }; 9761 auto Limit = [&R](Value *V) { 9762 unsigned EltSize = R.getVectorElementSize(V); 9763 return std::max(2U, R.getMaxVecRegSize() / EltSize); 9764 }; 9765 9766 SmallVector<Value *> Vals(PostponedCmps.begin(), PostponedCmps.end()); 9767 OpsChanged |= tryToVectorizeSequence<Value>( 9768 Vals, Limit, CompareSorter, AreCompatibleCompares, 9769 [this, &R](ArrayRef<Value *> Candidates, bool LimitForRegisterSize) { 9770 // Exclude possible reductions from other blocks. 9771 bool ArePossiblyReducedInOtherBlock = 9772 any_of(Candidates, [](Value *V) { 9773 return any_of(V->users(), [V](User *U) { 9774 return isa<SelectInst>(U) && 9775 cast<SelectInst>(U)->getParent() != 9776 cast<Instruction>(V)->getParent(); 9777 }); 9778 }); 9779 if (ArePossiblyReducedInOtherBlock) 9780 return false; 9781 return tryToVectorizeList(Candidates, R, LimitForRegisterSize); 9782 }, 9783 /*LimitForRegisterSize=*/true); 9784 Instructions.clear(); 9785 } else { 9786 // Insert in reverse order since the PostponedCmps vector was filled in 9787 // reverse order. 9788 Instructions.assign(PostponedCmps.rbegin(), PostponedCmps.rend()); 9789 } 9790 return OpsChanged; 9791 } 9792 9793 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 9794 bool Changed = false; 9795 SmallVector<Value *, 4> Incoming; 9796 SmallPtrSet<Value *, 16> VisitedInstrs; 9797 // Maps phi nodes to the non-phi nodes found in the use tree for each phi 9798 // node. Allows better to identify the chains that can be vectorized in the 9799 // better way. 9800 DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes; 9801 auto PHICompare = [this, &PHIToOpcodes](Value *V1, Value *V2) { 9802 assert(isValidElementType(V1->getType()) && 9803 isValidElementType(V2->getType()) && 9804 "Expected vectorizable types only."); 9805 // It is fine to compare type IDs here, since we expect only vectorizable 9806 // types, like ints, floats and pointers, we don't care about other type. 9807 if (V1->getType()->getTypeID() < V2->getType()->getTypeID()) 9808 return true; 9809 if (V1->getType()->getTypeID() > V2->getType()->getTypeID()) 9810 return false; 9811 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 9812 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 9813 if (Opcodes1.size() < Opcodes2.size()) 9814 return true; 9815 if (Opcodes1.size() > Opcodes2.size()) 9816 return false; 9817 Optional<bool> ConstOrder; 9818 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 9819 // Undefs are compatible with any other value. 9820 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) { 9821 if (!ConstOrder) 9822 ConstOrder = 9823 !isa<UndefValue>(Opcodes1[I]) && isa<UndefValue>(Opcodes2[I]); 9824 continue; 9825 } 9826 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 9827 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 9828 DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent()); 9829 DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent()); 9830 if (!NodeI1) 9831 return NodeI2 != nullptr; 9832 if (!NodeI2) 9833 return false; 9834 assert((NodeI1 == NodeI2) == 9835 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 9836 "Different nodes should have different DFS numbers"); 9837 if (NodeI1 != NodeI2) 9838 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 9839 InstructionsState S = getSameOpcode({I1, I2}); 9840 if (S.getOpcode()) 9841 continue; 9842 return I1->getOpcode() < I2->getOpcode(); 9843 } 9844 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) { 9845 if (!ConstOrder) 9846 ConstOrder = Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID(); 9847 continue; 9848 } 9849 if (Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID()) 9850 return true; 9851 if (Opcodes1[I]->getValueID() > Opcodes2[I]->getValueID()) 9852 return false; 9853 } 9854 return ConstOrder && *ConstOrder; 9855 }; 9856 auto AreCompatiblePHIs = [&PHIToOpcodes](Value *V1, Value *V2) { 9857 if (V1 == V2) 9858 return true; 9859 if (V1->getType() != V2->getType()) 9860 return false; 9861 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 9862 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 9863 if (Opcodes1.size() != Opcodes2.size()) 9864 return false; 9865 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 9866 // Undefs are compatible with any other value. 9867 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) 9868 continue; 9869 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 9870 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 9871 if (I1->getParent() != I2->getParent()) 9872 return false; 9873 InstructionsState S = getSameOpcode({I1, I2}); 9874 if (S.getOpcode()) 9875 continue; 9876 return false; 9877 } 9878 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 9879 continue; 9880 if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID()) 9881 return false; 9882 } 9883 return true; 9884 }; 9885 auto Limit = [&R](Value *V) { 9886 unsigned EltSize = R.getVectorElementSize(V); 9887 return std::max(2U, R.getMaxVecRegSize() / EltSize); 9888 }; 9889 9890 bool HaveVectorizedPhiNodes = false; 9891 do { 9892 // Collect the incoming values from the PHIs. 9893 Incoming.clear(); 9894 for (Instruction &I : *BB) { 9895 PHINode *P = dyn_cast<PHINode>(&I); 9896 if (!P) 9897 break; 9898 9899 // No need to analyze deleted, vectorized and non-vectorizable 9900 // instructions. 9901 if (!VisitedInstrs.count(P) && !R.isDeleted(P) && 9902 isValidElementType(P->getType())) 9903 Incoming.push_back(P); 9904 } 9905 9906 // Find the corresponding non-phi nodes for better matching when trying to 9907 // build the tree. 9908 for (Value *V : Incoming) { 9909 SmallVectorImpl<Value *> &Opcodes = 9910 PHIToOpcodes.try_emplace(V).first->getSecond(); 9911 if (!Opcodes.empty()) 9912 continue; 9913 SmallVector<Value *, 4> Nodes(1, V); 9914 SmallPtrSet<Value *, 4> Visited; 9915 while (!Nodes.empty()) { 9916 auto *PHI = cast<PHINode>(Nodes.pop_back_val()); 9917 if (!Visited.insert(PHI).second) 9918 continue; 9919 for (Value *V : PHI->incoming_values()) { 9920 if (auto *PHI1 = dyn_cast<PHINode>((V))) { 9921 Nodes.push_back(PHI1); 9922 continue; 9923 } 9924 Opcodes.emplace_back(V); 9925 } 9926 } 9927 } 9928 9929 HaveVectorizedPhiNodes = tryToVectorizeSequence<Value>( 9930 Incoming, Limit, PHICompare, AreCompatiblePHIs, 9931 [this, &R](ArrayRef<Value *> Candidates, bool LimitForRegisterSize) { 9932 return tryToVectorizeList(Candidates, R, LimitForRegisterSize); 9933 }, 9934 /*LimitForRegisterSize=*/true); 9935 Changed |= HaveVectorizedPhiNodes; 9936 VisitedInstrs.insert(Incoming.begin(), Incoming.end()); 9937 } while (HaveVectorizedPhiNodes); 9938 9939 VisitedInstrs.clear(); 9940 9941 SmallVector<Instruction *, 8> PostProcessInstructions; 9942 SmallDenseSet<Instruction *, 4> KeyNodes; 9943 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { 9944 // Skip instructions with scalable type. The num of elements is unknown at 9945 // compile-time for scalable type. 9946 if (isa<ScalableVectorType>(it->getType())) 9947 continue; 9948 9949 // Skip instructions marked for the deletion. 9950 if (R.isDeleted(&*it)) 9951 continue; 9952 // We may go through BB multiple times so skip the one we have checked. 9953 if (!VisitedInstrs.insert(&*it).second) { 9954 if (it->use_empty() && KeyNodes.contains(&*it) && 9955 vectorizeSimpleInstructions(PostProcessInstructions, BB, R, 9956 it->isTerminator())) { 9957 // We would like to start over since some instructions are deleted 9958 // and the iterator may become invalid value. 9959 Changed = true; 9960 it = BB->begin(); 9961 e = BB->end(); 9962 } 9963 continue; 9964 } 9965 9966 if (isa<DbgInfoIntrinsic>(it)) 9967 continue; 9968 9969 // Try to vectorize reductions that use PHINodes. 9970 if (PHINode *P = dyn_cast<PHINode>(it)) { 9971 // Check that the PHI is a reduction PHI. 9972 if (P->getNumIncomingValues() == 2) { 9973 // Try to match and vectorize a horizontal reduction. 9974 if (vectorizeRootInstruction(P, getReductionValue(DT, P, BB, LI), BB, R, 9975 TTI)) { 9976 Changed = true; 9977 it = BB->begin(); 9978 e = BB->end(); 9979 continue; 9980 } 9981 } 9982 // Try to vectorize the incoming values of the PHI, to catch reductions 9983 // that feed into PHIs. 9984 for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) { 9985 // Skip if the incoming block is the current BB for now. Also, bypass 9986 // unreachable IR for efficiency and to avoid crashing. 9987 // TODO: Collect the skipped incoming values and try to vectorize them 9988 // after processing BB. 9989 if (BB == P->getIncomingBlock(I) || 9990 !DT->isReachableFromEntry(P->getIncomingBlock(I))) 9991 continue; 9992 9993 Changed |= vectorizeRootInstruction(nullptr, P->getIncomingValue(I), 9994 P->getIncomingBlock(I), R, TTI); 9995 } 9996 continue; 9997 } 9998 9999 // Ran into an instruction without users, like terminator, or function call 10000 // with ignored return value, store. Ignore unused instructions (basing on 10001 // instruction type, except for CallInst and InvokeInst). 10002 if (it->use_empty() && (it->getType()->isVoidTy() || isa<CallInst>(it) || 10003 isa<InvokeInst>(it))) { 10004 KeyNodes.insert(&*it); 10005 bool OpsChanged = false; 10006 if (ShouldStartVectorizeHorAtStore || !isa<StoreInst>(it)) { 10007 for (auto *V : it->operand_values()) { 10008 // Try to match and vectorize a horizontal reduction. 10009 OpsChanged |= vectorizeRootInstruction(nullptr, V, BB, R, TTI); 10010 } 10011 } 10012 // Start vectorization of post-process list of instructions from the 10013 // top-tree instructions to try to vectorize as many instructions as 10014 // possible. 10015 OpsChanged |= vectorizeSimpleInstructions(PostProcessInstructions, BB, R, 10016 it->isTerminator()); 10017 if (OpsChanged) { 10018 // We would like to start over since some instructions are deleted 10019 // and the iterator may become invalid value. 10020 Changed = true; 10021 it = BB->begin(); 10022 e = BB->end(); 10023 continue; 10024 } 10025 } 10026 10027 if (isa<InsertElementInst>(it) || isa<CmpInst>(it) || 10028 isa<InsertValueInst>(it)) 10029 PostProcessInstructions.push_back(&*it); 10030 } 10031 10032 return Changed; 10033 } 10034 10035 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 10036 auto Changed = false; 10037 for (auto &Entry : GEPs) { 10038 // If the getelementptr list has fewer than two elements, there's nothing 10039 // to do. 10040 if (Entry.second.size() < 2) 10041 continue; 10042 10043 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 10044 << Entry.second.size() << ".\n"); 10045 10046 // Process the GEP list in chunks suitable for the target's supported 10047 // vector size. If a vector register can't hold 1 element, we are done. We 10048 // are trying to vectorize the index computations, so the maximum number of 10049 // elements is based on the size of the index expression, rather than the 10050 // size of the GEP itself (the target's pointer size). 10051 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 10052 unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin()); 10053 if (MaxVecRegSize < EltSize) 10054 continue; 10055 10056 unsigned MaxElts = MaxVecRegSize / EltSize; 10057 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) { 10058 auto Len = std::min<unsigned>(BE - BI, MaxElts); 10059 ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len); 10060 10061 // Initialize a set a candidate getelementptrs. Note that we use a 10062 // SetVector here to preserve program order. If the index computations 10063 // are vectorizable and begin with loads, we want to minimize the chance 10064 // of having to reorder them later. 10065 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 10066 10067 // Some of the candidates may have already been vectorized after we 10068 // initially collected them. If so, they are marked as deleted, so remove 10069 // them from the set of candidates. 10070 Candidates.remove_if( 10071 [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); }); 10072 10073 // Remove from the set of candidates all pairs of getelementptrs with 10074 // constant differences. Such getelementptrs are likely not good 10075 // candidates for vectorization in a bottom-up phase since one can be 10076 // computed from the other. We also ensure all candidate getelementptr 10077 // indices are unique. 10078 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 10079 auto *GEPI = GEPList[I]; 10080 if (!Candidates.count(GEPI)) 10081 continue; 10082 auto *SCEVI = SE->getSCEV(GEPList[I]); 10083 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 10084 auto *GEPJ = GEPList[J]; 10085 auto *SCEVJ = SE->getSCEV(GEPList[J]); 10086 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 10087 Candidates.remove(GEPI); 10088 Candidates.remove(GEPJ); 10089 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 10090 Candidates.remove(GEPJ); 10091 } 10092 } 10093 } 10094 10095 // We break out of the above computation as soon as we know there are 10096 // fewer than two candidates remaining. 10097 if (Candidates.size() < 2) 10098 continue; 10099 10100 // Add the single, non-constant index of each candidate to the bundle. We 10101 // ensured the indices met these constraints when we originally collected 10102 // the getelementptrs. 10103 SmallVector<Value *, 16> Bundle(Candidates.size()); 10104 auto BundleIndex = 0u; 10105 for (auto *V : Candidates) { 10106 auto *GEP = cast<GetElementPtrInst>(V); 10107 auto *GEPIdx = GEP->idx_begin()->get(); 10108 assert(GEP->getNumIndices() == 1 || !isa<Constant>(GEPIdx)); 10109 Bundle[BundleIndex++] = GEPIdx; 10110 } 10111 10112 // Try and vectorize the indices. We are currently only interested in 10113 // gather-like cases of the form: 10114 // 10115 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 10116 // 10117 // where the loads of "a", the loads of "b", and the subtractions can be 10118 // performed in parallel. It's likely that detecting this pattern in a 10119 // bottom-up phase will be simpler and less costly than building a 10120 // full-blown top-down phase beginning at the consecutive loads. 10121 Changed |= tryToVectorizeList(Bundle, R); 10122 } 10123 } 10124 return Changed; 10125 } 10126 10127 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 10128 bool Changed = false; 10129 // Sort by type, base pointers and values operand. Value operands must be 10130 // compatible (have the same opcode, same parent), otherwise it is 10131 // definitely not profitable to try to vectorize them. 10132 auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) { 10133 if (V->getPointerOperandType()->getTypeID() < 10134 V2->getPointerOperandType()->getTypeID()) 10135 return true; 10136 if (V->getPointerOperandType()->getTypeID() > 10137 V2->getPointerOperandType()->getTypeID()) 10138 return false; 10139 // UndefValues are compatible with all other values. 10140 if (isa<UndefValue>(V->getValueOperand()) || 10141 isa<UndefValue>(V2->getValueOperand())) 10142 return false; 10143 if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand())) 10144 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 10145 DomTreeNodeBase<llvm::BasicBlock> *NodeI1 = 10146 DT->getNode(I1->getParent()); 10147 DomTreeNodeBase<llvm::BasicBlock> *NodeI2 = 10148 DT->getNode(I2->getParent()); 10149 assert(NodeI1 && "Should only process reachable instructions"); 10150 assert(NodeI1 && "Should only process reachable instructions"); 10151 assert((NodeI1 == NodeI2) == 10152 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 10153 "Different nodes should have different DFS numbers"); 10154 if (NodeI1 != NodeI2) 10155 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 10156 InstructionsState S = getSameOpcode({I1, I2}); 10157 if (S.getOpcode()) 10158 return false; 10159 return I1->getOpcode() < I2->getOpcode(); 10160 } 10161 if (isa<Constant>(V->getValueOperand()) && 10162 isa<Constant>(V2->getValueOperand())) 10163 return false; 10164 return V->getValueOperand()->getValueID() < 10165 V2->getValueOperand()->getValueID(); 10166 }; 10167 10168 auto &&AreCompatibleStores = [](StoreInst *V1, StoreInst *V2) { 10169 if (V1 == V2) 10170 return true; 10171 if (V1->getPointerOperandType() != V2->getPointerOperandType()) 10172 return false; 10173 // Undefs are compatible with any other value. 10174 if (isa<UndefValue>(V1->getValueOperand()) || 10175 isa<UndefValue>(V2->getValueOperand())) 10176 return true; 10177 if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand())) 10178 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 10179 if (I1->getParent() != I2->getParent()) 10180 return false; 10181 InstructionsState S = getSameOpcode({I1, I2}); 10182 return S.getOpcode() > 0; 10183 } 10184 if (isa<Constant>(V1->getValueOperand()) && 10185 isa<Constant>(V2->getValueOperand())) 10186 return true; 10187 return V1->getValueOperand()->getValueID() == 10188 V2->getValueOperand()->getValueID(); 10189 }; 10190 auto Limit = [&R, this](StoreInst *SI) { 10191 unsigned EltSize = DL->getTypeSizeInBits(SI->getValueOperand()->getType()); 10192 return R.getMinVF(EltSize); 10193 }; 10194 10195 // Attempt to sort and vectorize each of the store-groups. 10196 for (auto &Pair : Stores) { 10197 if (Pair.second.size() < 2) 10198 continue; 10199 10200 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 10201 << Pair.second.size() << ".\n"); 10202 10203 if (!isValidElementType(Pair.second.front()->getValueOperand()->getType())) 10204 continue; 10205 10206 Changed |= tryToVectorizeSequence<StoreInst>( 10207 Pair.second, Limit, StoreSorter, AreCompatibleStores, 10208 [this, &R](ArrayRef<StoreInst *> Candidates, bool) { 10209 return vectorizeStores(Candidates, R); 10210 }, 10211 /*LimitForRegisterSize=*/false); 10212 } 10213 return Changed; 10214 } 10215 10216 char SLPVectorizer::ID = 0; 10217 10218 static const char lv_name[] = "SLP Vectorizer"; 10219 10220 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) 10221 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 10222 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 10223 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 10224 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 10225 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 10226 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 10227 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 10228 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 10229 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) 10230 10231 Pass *llvm::createSLPVectorizerPass() { return new SLPVectorizer(); } 10232