1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive 10 // stores that can be put together into vector-stores. Next, it attempts to 11 // construct vectorizable tree using the use-def chains. If a profitable tree 12 // was found, the SLP vectorizer performs vectorization on the tree. 13 // 14 // The pass is inspired by the work described in the paper: 15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h" 20 #include "llvm/ADT/DenseMap.h" 21 #include "llvm/ADT/DenseSet.h" 22 #include "llvm/ADT/PriorityQueue.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SetOperations.h" 25 #include "llvm/ADT/SetVector.h" 26 #include "llvm/ADT/SmallBitVector.h" 27 #include "llvm/ADT/SmallPtrSet.h" 28 #include "llvm/ADT/SmallSet.h" 29 #include "llvm/ADT/SmallString.h" 30 #include "llvm/ADT/Statistic.h" 31 #include "llvm/ADT/iterator.h" 32 #include "llvm/ADT/iterator_range.h" 33 #include "llvm/Analysis/AliasAnalysis.h" 34 #include "llvm/Analysis/AssumptionCache.h" 35 #include "llvm/Analysis/CodeMetrics.h" 36 #include "llvm/Analysis/ConstantFolding.h" 37 #include "llvm/Analysis/DemandedBits.h" 38 #include "llvm/Analysis/GlobalsModRef.h" 39 #include "llvm/Analysis/IVDescriptors.h" 40 #include "llvm/Analysis/LoopAccessAnalysis.h" 41 #include "llvm/Analysis/LoopInfo.h" 42 #include "llvm/Analysis/MemoryLocation.h" 43 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 44 #include "llvm/Analysis/ScalarEvolution.h" 45 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 46 #include "llvm/Analysis/TargetLibraryInfo.h" 47 #include "llvm/Analysis/TargetTransformInfo.h" 48 #include "llvm/Analysis/ValueTracking.h" 49 #include "llvm/Analysis/VectorUtils.h" 50 #include "llvm/IR/Attributes.h" 51 #include "llvm/IR/BasicBlock.h" 52 #include "llvm/IR/Constant.h" 53 #include "llvm/IR/Constants.h" 54 #include "llvm/IR/DataLayout.h" 55 #include "llvm/IR/DerivedTypes.h" 56 #include "llvm/IR/Dominators.h" 57 #include "llvm/IR/Function.h" 58 #include "llvm/IR/IRBuilder.h" 59 #include "llvm/IR/InstrTypes.h" 60 #include "llvm/IR/Instruction.h" 61 #include "llvm/IR/Instructions.h" 62 #include "llvm/IR/IntrinsicInst.h" 63 #include "llvm/IR/Intrinsics.h" 64 #include "llvm/IR/Module.h" 65 #include "llvm/IR/Operator.h" 66 #include "llvm/IR/PatternMatch.h" 67 #include "llvm/IR/Type.h" 68 #include "llvm/IR/Use.h" 69 #include "llvm/IR/User.h" 70 #include "llvm/IR/Value.h" 71 #include "llvm/IR/ValueHandle.h" 72 #ifdef EXPENSIVE_CHECKS 73 #include "llvm/IR/Verifier.h" 74 #endif 75 #include "llvm/Pass.h" 76 #include "llvm/Support/Casting.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Compiler.h" 79 #include "llvm/Support/DOTGraphTraits.h" 80 #include "llvm/Support/Debug.h" 81 #include "llvm/Support/ErrorHandling.h" 82 #include "llvm/Support/GraphWriter.h" 83 #include "llvm/Support/InstructionCost.h" 84 #include "llvm/Support/KnownBits.h" 85 #include "llvm/Support/MathExtras.h" 86 #include "llvm/Support/raw_ostream.h" 87 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 88 #include "llvm/Transforms/Utils/Local.h" 89 #include "llvm/Transforms/Utils/LoopUtils.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstdint> 93 #include <iterator> 94 #include <memory> 95 #include <optional> 96 #include <set> 97 #include <string> 98 #include <tuple> 99 #include <utility> 100 101 using namespace llvm; 102 using namespace llvm::PatternMatch; 103 using namespace slpvectorizer; 104 105 #define SV_NAME "slp-vectorizer" 106 #define DEBUG_TYPE "SLP" 107 108 STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); 109 110 static cl::opt<bool> 111 RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden, 112 cl::desc("Run the SLP vectorization passes")); 113 114 static cl::opt<int> 115 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden, 116 cl::desc("Only vectorize if you gain more than this " 117 "number ")); 118 119 static cl::opt<bool> 120 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden, 121 cl::desc("Attempt to vectorize horizontal reductions")); 122 123 static cl::opt<bool> ShouldStartVectorizeHorAtStore( 124 "slp-vectorize-hor-store", cl::init(false), cl::Hidden, 125 cl::desc( 126 "Attempt to vectorize horizontal reductions feeding into a store")); 127 128 // NOTE: If AllowHorRdxIdenityOptimization is true, the optimization will run 129 // even if we match a reduction but do not vectorize in the end. 130 static cl::opt<bool> AllowHorRdxIdenityOptimization( 131 "slp-optimize-identity-hor-reduction-ops", cl::init(true), cl::Hidden, 132 cl::desc("Allow optimization of original scalar identity operations on " 133 "matched horizontal reductions.")); 134 135 static cl::opt<int> 136 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden, 137 cl::desc("Attempt to vectorize for this register size in bits")); 138 139 static cl::opt<unsigned> 140 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden, 141 cl::desc("Maximum SLP vectorization factor (0=unlimited)")); 142 143 /// Limits the size of scheduling regions in a block. 144 /// It avoid long compile times for _very_ large blocks where vector 145 /// instructions are spread over a wide range. 146 /// This limit is way higher than needed by real-world functions. 147 static cl::opt<int> 148 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden, 149 cl::desc("Limit the size of the SLP scheduling region per block")); 150 151 static cl::opt<int> MinVectorRegSizeOption( 152 "slp-min-reg-size", cl::init(128), cl::Hidden, 153 cl::desc("Attempt to vectorize for this register size in bits")); 154 155 static cl::opt<unsigned> RecursionMaxDepth( 156 "slp-recursion-max-depth", cl::init(12), cl::Hidden, 157 cl::desc("Limit the recursion depth when building a vectorizable tree")); 158 159 static cl::opt<unsigned> MinTreeSize( 160 "slp-min-tree-size", cl::init(3), cl::Hidden, 161 cl::desc("Only vectorize small trees if they are fully vectorizable")); 162 163 // The maximum depth that the look-ahead score heuristic will explore. 164 // The higher this value, the higher the compilation time overhead. 165 static cl::opt<int> LookAheadMaxDepth( 166 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden, 167 cl::desc("The maximum look-ahead depth for operand reordering scores")); 168 169 // The maximum depth that the look-ahead score heuristic will explore 170 // when it probing among candidates for vectorization tree roots. 171 // The higher this value, the higher the compilation time overhead but unlike 172 // similar limit for operands ordering this is less frequently used, hence 173 // impact of higher value is less noticeable. 174 static cl::opt<int> RootLookAheadMaxDepth( 175 "slp-max-root-look-ahead-depth", cl::init(2), cl::Hidden, 176 cl::desc("The maximum look-ahead depth for searching best rooting option")); 177 178 static cl::opt<bool> 179 ViewSLPTree("view-slp-tree", cl::Hidden, 180 cl::desc("Display the SLP trees with Graphviz")); 181 182 // Limit the number of alias checks. The limit is chosen so that 183 // it has no negative effect on the llvm benchmarks. 184 static const unsigned AliasedCheckLimit = 10; 185 186 // Another limit for the alias checks: The maximum distance between load/store 187 // instructions where alias checks are done. 188 // This limit is useful for very large basic blocks. 189 static const unsigned MaxMemDepDistance = 160; 190 191 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling 192 /// regions to be handled. 193 static const int MinScheduleRegionSize = 16; 194 195 /// Predicate for the element types that the SLP vectorizer supports. 196 /// 197 /// The most important thing to filter here are types which are invalid in LLVM 198 /// vectors. We also filter target specific types which have absolutely no 199 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just 200 /// avoids spending time checking the cost model and realizing that they will 201 /// be inevitably scalarized. 202 static bool isValidElementType(Type *Ty) { 203 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() && 204 !Ty->isPPC_FP128Ty(); 205 } 206 207 /// \returns True if the value is a constant (but not globals/constant 208 /// expressions). 209 static bool isConstant(Value *V) { 210 return isa<Constant>(V) && !isa<ConstantExpr, GlobalValue>(V); 211 } 212 213 /// Checks if \p V is one of vector-like instructions, i.e. undef, 214 /// insertelement/extractelement with constant indices for fixed vector type or 215 /// extractvalue instruction. 216 static bool isVectorLikeInstWithConstOps(Value *V) { 217 if (!isa<InsertElementInst, ExtractElementInst>(V) && 218 !isa<ExtractValueInst, UndefValue>(V)) 219 return false; 220 auto *I = dyn_cast<Instruction>(V); 221 if (!I || isa<ExtractValueInst>(I)) 222 return true; 223 if (!isa<FixedVectorType>(I->getOperand(0)->getType())) 224 return false; 225 if (isa<ExtractElementInst>(I)) 226 return isConstant(I->getOperand(1)); 227 assert(isa<InsertElementInst>(V) && "Expected only insertelement."); 228 return isConstant(I->getOperand(2)); 229 } 230 231 #if !defined(NDEBUG) 232 /// Print a short descriptor of the instruction bundle suitable for debug output. 233 static std::string shortBundleName(ArrayRef<Value *> VL) { 234 std::string Result; 235 raw_string_ostream OS(Result); 236 OS << "n=" << VL.size() << " [" << *VL.front() << ", ..]"; 237 OS.flush(); 238 return Result; 239 } 240 #endif 241 242 /// \returns true if all of the instructions in \p VL are in the same block or 243 /// false otherwise. 244 static bool allSameBlock(ArrayRef<Value *> VL) { 245 Instruction *I0 = dyn_cast<Instruction>(VL[0]); 246 if (!I0) 247 return false; 248 if (all_of(VL, isVectorLikeInstWithConstOps)) 249 return true; 250 251 BasicBlock *BB = I0->getParent(); 252 for (int I = 1, E = VL.size(); I < E; I++) { 253 auto *II = dyn_cast<Instruction>(VL[I]); 254 if (!II) 255 return false; 256 257 if (BB != II->getParent()) 258 return false; 259 } 260 return true; 261 } 262 263 /// \returns True if all of the values in \p VL are constants (but not 264 /// globals/constant expressions). 265 static bool allConstant(ArrayRef<Value *> VL) { 266 // Constant expressions and globals can't be vectorized like normal integer/FP 267 // constants. 268 return all_of(VL, isConstant); 269 } 270 271 /// \returns True if all of the values in \p VL are identical or some of them 272 /// are UndefValue. 273 static bool isSplat(ArrayRef<Value *> VL) { 274 Value *FirstNonUndef = nullptr; 275 for (Value *V : VL) { 276 if (isa<UndefValue>(V)) 277 continue; 278 if (!FirstNonUndef) { 279 FirstNonUndef = V; 280 continue; 281 } 282 if (V != FirstNonUndef) 283 return false; 284 } 285 return FirstNonUndef != nullptr; 286 } 287 288 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator. 289 static bool isCommutative(Instruction *I) { 290 if (auto *Cmp = dyn_cast<CmpInst>(I)) 291 return Cmp->isCommutative(); 292 if (auto *BO = dyn_cast<BinaryOperator>(I)) 293 return BO->isCommutative(); 294 // TODO: This should check for generic Instruction::isCommutative(), but 295 // we need to confirm that the caller code correctly handles Intrinsics 296 // for example (does not have 2 operands). 297 return false; 298 } 299 300 /// \returns inserting index of InsertElement or InsertValue instruction, 301 /// using Offset as base offset for index. 302 static std::optional<unsigned> getInsertIndex(const Value *InsertInst, 303 unsigned Offset = 0) { 304 int Index = Offset; 305 if (const auto *IE = dyn_cast<InsertElementInst>(InsertInst)) { 306 const auto *VT = dyn_cast<FixedVectorType>(IE->getType()); 307 if (!VT) 308 return std::nullopt; 309 const auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2)); 310 if (!CI) 311 return std::nullopt; 312 if (CI->getValue().uge(VT->getNumElements())) 313 return std::nullopt; 314 Index *= VT->getNumElements(); 315 Index += CI->getZExtValue(); 316 return Index; 317 } 318 319 const auto *IV = cast<InsertValueInst>(InsertInst); 320 Type *CurrentType = IV->getType(); 321 for (unsigned I : IV->indices()) { 322 if (const auto *ST = dyn_cast<StructType>(CurrentType)) { 323 Index *= ST->getNumElements(); 324 CurrentType = ST->getElementType(I); 325 } else if (const auto *AT = dyn_cast<ArrayType>(CurrentType)) { 326 Index *= AT->getNumElements(); 327 CurrentType = AT->getElementType(); 328 } else { 329 return std::nullopt; 330 } 331 Index += I; 332 } 333 return Index; 334 } 335 336 namespace { 337 /// Specifies the way the mask should be analyzed for undefs/poisonous elements 338 /// in the shuffle mask. 339 enum class UseMask { 340 FirstArg, ///< The mask is expected to be for permutation of 1-2 vectors, 341 ///< check for the mask elements for the first argument (mask 342 ///< indices are in range [0:VF)). 343 SecondArg, ///< The mask is expected to be for permutation of 2 vectors, check 344 ///< for the mask elements for the second argument (mask indices 345 ///< are in range [VF:2*VF)) 346 UndefsAsMask ///< Consider undef mask elements (-1) as placeholders for 347 ///< future shuffle elements and mark them as ones as being used 348 ///< in future. Non-undef elements are considered as unused since 349 ///< they're already marked as used in the mask. 350 }; 351 } // namespace 352 353 /// Prepares a use bitset for the given mask either for the first argument or 354 /// for the second. 355 static SmallBitVector buildUseMask(int VF, ArrayRef<int> Mask, 356 UseMask MaskArg) { 357 SmallBitVector UseMask(VF, true); 358 for (auto [Idx, Value] : enumerate(Mask)) { 359 if (Value == PoisonMaskElem) { 360 if (MaskArg == UseMask::UndefsAsMask) 361 UseMask.reset(Idx); 362 continue; 363 } 364 if (MaskArg == UseMask::FirstArg && Value < VF) 365 UseMask.reset(Value); 366 else if (MaskArg == UseMask::SecondArg && Value >= VF) 367 UseMask.reset(Value - VF); 368 } 369 return UseMask; 370 } 371 372 /// Checks if the given value is actually an undefined constant vector. 373 /// Also, if the \p UseMask is not empty, tries to check if the non-masked 374 /// elements actually mask the insertelement buildvector, if any. 375 template <bool IsPoisonOnly = false> 376 static SmallBitVector isUndefVector(const Value *V, 377 const SmallBitVector &UseMask = {}) { 378 SmallBitVector Res(UseMask.empty() ? 1 : UseMask.size(), true); 379 using T = std::conditional_t<IsPoisonOnly, PoisonValue, UndefValue>; 380 if (isa<T>(V)) 381 return Res; 382 auto *VecTy = dyn_cast<FixedVectorType>(V->getType()); 383 if (!VecTy) 384 return Res.reset(); 385 auto *C = dyn_cast<Constant>(V); 386 if (!C) { 387 if (!UseMask.empty()) { 388 const Value *Base = V; 389 while (auto *II = dyn_cast<InsertElementInst>(Base)) { 390 Base = II->getOperand(0); 391 if (isa<T>(II->getOperand(1))) 392 continue; 393 std::optional<unsigned> Idx = getInsertIndex(II); 394 if (!Idx) { 395 Res.reset(); 396 return Res; 397 } 398 if (*Idx < UseMask.size() && !UseMask.test(*Idx)) 399 Res.reset(*Idx); 400 } 401 // TODO: Add analysis for shuffles here too. 402 if (V == Base) { 403 Res.reset(); 404 } else { 405 SmallBitVector SubMask(UseMask.size(), false); 406 Res &= isUndefVector<IsPoisonOnly>(Base, SubMask); 407 } 408 } else { 409 Res.reset(); 410 } 411 return Res; 412 } 413 for (unsigned I = 0, E = VecTy->getNumElements(); I != E; ++I) { 414 if (Constant *Elem = C->getAggregateElement(I)) 415 if (!isa<T>(Elem) && 416 (UseMask.empty() || (I < UseMask.size() && !UseMask.test(I)))) 417 Res.reset(I); 418 } 419 return Res; 420 } 421 422 /// Checks if the vector of instructions can be represented as a shuffle, like: 423 /// %x0 = extractelement <4 x i8> %x, i32 0 424 /// %x3 = extractelement <4 x i8> %x, i32 3 425 /// %y1 = extractelement <4 x i8> %y, i32 1 426 /// %y2 = extractelement <4 x i8> %y, i32 2 427 /// %x0x0 = mul i8 %x0, %x0 428 /// %x3x3 = mul i8 %x3, %x3 429 /// %y1y1 = mul i8 %y1, %y1 430 /// %y2y2 = mul i8 %y2, %y2 431 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0 432 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1 433 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2 434 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3 435 /// ret <4 x i8> %ins4 436 /// can be transformed into: 437 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5, 438 /// i32 6> 439 /// %2 = mul <4 x i8> %1, %1 440 /// ret <4 x i8> %2 441 /// Mask will return the Shuffle Mask equivalent to the extracted elements. 442 /// TODO: Can we split off and reuse the shuffle mask detection from 443 /// ShuffleVectorInst/getShuffleCost? 444 static std::optional<TargetTransformInfo::ShuffleKind> 445 isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) { 446 const auto *It = 447 find_if(VL, [](Value *V) { return isa<ExtractElementInst>(V); }); 448 if (It == VL.end()) 449 return std::nullopt; 450 auto *EI0 = cast<ExtractElementInst>(*It); 451 if (isa<ScalableVectorType>(EI0->getVectorOperandType())) 452 return std::nullopt; 453 unsigned Size = 454 cast<FixedVectorType>(EI0->getVectorOperandType())->getNumElements(); 455 Value *Vec1 = nullptr; 456 Value *Vec2 = nullptr; 457 enum ShuffleMode { Unknown, Select, Permute }; 458 ShuffleMode CommonShuffleMode = Unknown; 459 Mask.assign(VL.size(), PoisonMaskElem); 460 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 461 // Undef can be represented as an undef element in a vector. 462 if (isa<UndefValue>(VL[I])) 463 continue; 464 auto *EI = cast<ExtractElementInst>(VL[I]); 465 if (isa<ScalableVectorType>(EI->getVectorOperandType())) 466 return std::nullopt; 467 auto *Vec = EI->getVectorOperand(); 468 // We can extractelement from undef or poison vector. 469 if (isUndefVector(Vec).all()) 470 continue; 471 // All vector operands must have the same number of vector elements. 472 if (cast<FixedVectorType>(Vec->getType())->getNumElements() != Size) 473 return std::nullopt; 474 if (isa<UndefValue>(EI->getIndexOperand())) 475 continue; 476 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand()); 477 if (!Idx) 478 return std::nullopt; 479 // Undefined behavior if Idx is negative or >= Size. 480 if (Idx->getValue().uge(Size)) 481 continue; 482 unsigned IntIdx = Idx->getValue().getZExtValue(); 483 Mask[I] = IntIdx; 484 // For correct shuffling we have to have at most 2 different vector operands 485 // in all extractelement instructions. 486 if (!Vec1 || Vec1 == Vec) { 487 Vec1 = Vec; 488 } else if (!Vec2 || Vec2 == Vec) { 489 Vec2 = Vec; 490 Mask[I] += Size; 491 } else { 492 return std::nullopt; 493 } 494 if (CommonShuffleMode == Permute) 495 continue; 496 // If the extract index is not the same as the operation number, it is a 497 // permutation. 498 if (IntIdx != I) { 499 CommonShuffleMode = Permute; 500 continue; 501 } 502 CommonShuffleMode = Select; 503 } 504 // If we're not crossing lanes in different vectors, consider it as blending. 505 if (CommonShuffleMode == Select && Vec2) 506 return TargetTransformInfo::SK_Select; 507 // If Vec2 was never used, we have a permutation of a single vector, otherwise 508 // we have permutation of 2 vectors. 509 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc 510 : TargetTransformInfo::SK_PermuteSingleSrc; 511 } 512 513 /// \returns True if Extract{Value,Element} instruction extracts element Idx. 514 static std::optional<unsigned> getExtractIndex(Instruction *E) { 515 unsigned Opcode = E->getOpcode(); 516 assert((Opcode == Instruction::ExtractElement || 517 Opcode == Instruction::ExtractValue) && 518 "Expected extractelement or extractvalue instruction."); 519 if (Opcode == Instruction::ExtractElement) { 520 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1)); 521 if (!CI) 522 return std::nullopt; 523 return CI->getZExtValue(); 524 } 525 auto *EI = cast<ExtractValueInst>(E); 526 if (EI->getNumIndices() != 1) 527 return std::nullopt; 528 return *EI->idx_begin(); 529 } 530 531 namespace { 532 533 /// Main data required for vectorization of instructions. 534 struct InstructionsState { 535 /// The very first instruction in the list with the main opcode. 536 Value *OpValue = nullptr; 537 538 /// The main/alternate instruction. 539 Instruction *MainOp = nullptr; 540 Instruction *AltOp = nullptr; 541 542 /// The main/alternate opcodes for the list of instructions. 543 unsigned getOpcode() const { 544 return MainOp ? MainOp->getOpcode() : 0; 545 } 546 547 unsigned getAltOpcode() const { 548 return AltOp ? AltOp->getOpcode() : 0; 549 } 550 551 /// Some of the instructions in the list have alternate opcodes. 552 bool isAltShuffle() const { return AltOp != MainOp; } 553 554 bool isOpcodeOrAlt(Instruction *I) const { 555 unsigned CheckedOpcode = I->getOpcode(); 556 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode; 557 } 558 559 InstructionsState() = delete; 560 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp) 561 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {} 562 }; 563 564 } // end anonymous namespace 565 566 /// Chooses the correct key for scheduling data. If \p Op has the same (or 567 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p 568 /// OpValue. 569 static Value *isOneOf(const InstructionsState &S, Value *Op) { 570 auto *I = dyn_cast<Instruction>(Op); 571 if (I && S.isOpcodeOrAlt(I)) 572 return Op; 573 return S.OpValue; 574 } 575 576 /// \returns true if \p Opcode is allowed as part of the main/alternate 577 /// instruction for SLP vectorization. 578 /// 579 /// Example of unsupported opcode is SDIV that can potentially cause UB if the 580 /// "shuffled out" lane would result in division by zero. 581 static bool isValidForAlternation(unsigned Opcode) { 582 if (Instruction::isIntDivRem(Opcode)) 583 return false; 584 585 return true; 586 } 587 588 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 589 const TargetLibraryInfo &TLI, 590 unsigned BaseIndex = 0); 591 592 /// Checks if the provided operands of 2 cmp instructions are compatible, i.e. 593 /// compatible instructions or constants, or just some other regular values. 594 static bool areCompatibleCmpOps(Value *BaseOp0, Value *BaseOp1, Value *Op0, 595 Value *Op1, const TargetLibraryInfo &TLI) { 596 return (isConstant(BaseOp0) && isConstant(Op0)) || 597 (isConstant(BaseOp1) && isConstant(Op1)) || 598 (!isa<Instruction>(BaseOp0) && !isa<Instruction>(Op0) && 599 !isa<Instruction>(BaseOp1) && !isa<Instruction>(Op1)) || 600 BaseOp0 == Op0 || BaseOp1 == Op1 || 601 getSameOpcode({BaseOp0, Op0}, TLI).getOpcode() || 602 getSameOpcode({BaseOp1, Op1}, TLI).getOpcode(); 603 } 604 605 /// \returns true if a compare instruction \p CI has similar "look" and 606 /// same predicate as \p BaseCI, "as is" or with its operands and predicate 607 /// swapped, false otherwise. 608 static bool isCmpSameOrSwapped(const CmpInst *BaseCI, const CmpInst *CI, 609 const TargetLibraryInfo &TLI) { 610 assert(BaseCI->getOperand(0)->getType() == CI->getOperand(0)->getType() && 611 "Assessing comparisons of different types?"); 612 CmpInst::Predicate BasePred = BaseCI->getPredicate(); 613 CmpInst::Predicate Pred = CI->getPredicate(); 614 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(Pred); 615 616 Value *BaseOp0 = BaseCI->getOperand(0); 617 Value *BaseOp1 = BaseCI->getOperand(1); 618 Value *Op0 = CI->getOperand(0); 619 Value *Op1 = CI->getOperand(1); 620 621 return (BasePred == Pred && 622 areCompatibleCmpOps(BaseOp0, BaseOp1, Op0, Op1, TLI)) || 623 (BasePred == SwappedPred && 624 areCompatibleCmpOps(BaseOp0, BaseOp1, Op1, Op0, TLI)); 625 } 626 627 /// \returns analysis of the Instructions in \p VL described in 628 /// InstructionsState, the Opcode that we suppose the whole list 629 /// could be vectorized even if its structure is diverse. 630 static InstructionsState getSameOpcode(ArrayRef<Value *> VL, 631 const TargetLibraryInfo &TLI, 632 unsigned BaseIndex) { 633 // Make sure these are all Instructions. 634 if (llvm::any_of(VL, [](Value *V) { return !isa<Instruction>(V); })) 635 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 636 637 bool IsCastOp = isa<CastInst>(VL[BaseIndex]); 638 bool IsBinOp = isa<BinaryOperator>(VL[BaseIndex]); 639 bool IsCmpOp = isa<CmpInst>(VL[BaseIndex]); 640 CmpInst::Predicate BasePred = 641 IsCmpOp ? cast<CmpInst>(VL[BaseIndex])->getPredicate() 642 : CmpInst::BAD_ICMP_PREDICATE; 643 unsigned Opcode = cast<Instruction>(VL[BaseIndex])->getOpcode(); 644 unsigned AltOpcode = Opcode; 645 unsigned AltIndex = BaseIndex; 646 647 // Check for one alternate opcode from another BinaryOperator. 648 // TODO - generalize to support all operators (types, calls etc.). 649 auto *IBase = cast<Instruction>(VL[BaseIndex]); 650 Intrinsic::ID BaseID = 0; 651 SmallVector<VFInfo> BaseMappings; 652 if (auto *CallBase = dyn_cast<CallInst>(IBase)) { 653 BaseID = getVectorIntrinsicIDForCall(CallBase, &TLI); 654 BaseMappings = VFDatabase(*CallBase).getMappings(*CallBase); 655 if (!isTriviallyVectorizable(BaseID) && BaseMappings.empty()) 656 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 657 } 658 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) { 659 auto *I = cast<Instruction>(VL[Cnt]); 660 unsigned InstOpcode = I->getOpcode(); 661 if (IsBinOp && isa<BinaryOperator>(I)) { 662 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 663 continue; 664 if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) && 665 isValidForAlternation(Opcode)) { 666 AltOpcode = InstOpcode; 667 AltIndex = Cnt; 668 continue; 669 } 670 } else if (IsCastOp && isa<CastInst>(I)) { 671 Value *Op0 = IBase->getOperand(0); 672 Type *Ty0 = Op0->getType(); 673 Value *Op1 = I->getOperand(0); 674 Type *Ty1 = Op1->getType(); 675 if (Ty0 == Ty1) { 676 if (InstOpcode == Opcode || InstOpcode == AltOpcode) 677 continue; 678 if (Opcode == AltOpcode) { 679 assert(isValidForAlternation(Opcode) && 680 isValidForAlternation(InstOpcode) && 681 "Cast isn't safe for alternation, logic needs to be updated!"); 682 AltOpcode = InstOpcode; 683 AltIndex = Cnt; 684 continue; 685 } 686 } 687 } else if (auto *Inst = dyn_cast<CmpInst>(VL[Cnt]); Inst && IsCmpOp) { 688 auto *BaseInst = cast<CmpInst>(VL[BaseIndex]); 689 Type *Ty0 = BaseInst->getOperand(0)->getType(); 690 Type *Ty1 = Inst->getOperand(0)->getType(); 691 if (Ty0 == Ty1) { 692 assert(InstOpcode == Opcode && "Expected same CmpInst opcode."); 693 // Check for compatible operands. If the corresponding operands are not 694 // compatible - need to perform alternate vectorization. 695 CmpInst::Predicate CurrentPred = Inst->getPredicate(); 696 CmpInst::Predicate SwappedCurrentPred = 697 CmpInst::getSwappedPredicate(CurrentPred); 698 699 if (E == 2 && 700 (BasePred == CurrentPred || BasePred == SwappedCurrentPred)) 701 continue; 702 703 if (isCmpSameOrSwapped(BaseInst, Inst, TLI)) 704 continue; 705 auto *AltInst = cast<CmpInst>(VL[AltIndex]); 706 if (AltIndex != BaseIndex) { 707 if (isCmpSameOrSwapped(AltInst, Inst, TLI)) 708 continue; 709 } else if (BasePred != CurrentPred) { 710 assert( 711 isValidForAlternation(InstOpcode) && 712 "CmpInst isn't safe for alternation, logic needs to be updated!"); 713 AltIndex = Cnt; 714 continue; 715 } 716 CmpInst::Predicate AltPred = AltInst->getPredicate(); 717 if (BasePred == CurrentPred || BasePred == SwappedCurrentPred || 718 AltPred == CurrentPred || AltPred == SwappedCurrentPred) 719 continue; 720 } 721 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode) { 722 if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) { 723 if (Gep->getNumOperands() != 2 || 724 Gep->getOperand(0)->getType() != IBase->getOperand(0)->getType()) 725 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 726 } else if (auto *EI = dyn_cast<ExtractElementInst>(I)) { 727 if (!isVectorLikeInstWithConstOps(EI)) 728 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 729 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 730 auto *BaseLI = cast<LoadInst>(IBase); 731 if (!LI->isSimple() || !BaseLI->isSimple()) 732 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 733 } else if (auto *Call = dyn_cast<CallInst>(I)) { 734 auto *CallBase = cast<CallInst>(IBase); 735 if (Call->getCalledFunction() != CallBase->getCalledFunction()) 736 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 737 if (Call->hasOperandBundles() && 738 !std::equal(Call->op_begin() + Call->getBundleOperandsStartIndex(), 739 Call->op_begin() + Call->getBundleOperandsEndIndex(), 740 CallBase->op_begin() + 741 CallBase->getBundleOperandsStartIndex())) 742 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 743 Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, &TLI); 744 if (ID != BaseID) 745 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 746 if (!ID) { 747 SmallVector<VFInfo> Mappings = VFDatabase(*Call).getMappings(*Call); 748 if (Mappings.size() != BaseMappings.size() || 749 Mappings.front().ISA != BaseMappings.front().ISA || 750 Mappings.front().ScalarName != BaseMappings.front().ScalarName || 751 Mappings.front().VectorName != BaseMappings.front().VectorName || 752 Mappings.front().Shape.VF != BaseMappings.front().Shape.VF || 753 Mappings.front().Shape.Parameters != 754 BaseMappings.front().Shape.Parameters) 755 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 756 } 757 } 758 continue; 759 } 760 return InstructionsState(VL[BaseIndex], nullptr, nullptr); 761 } 762 763 return InstructionsState(VL[BaseIndex], cast<Instruction>(VL[BaseIndex]), 764 cast<Instruction>(VL[AltIndex])); 765 } 766 767 /// \returns true if all of the values in \p VL have the same type or false 768 /// otherwise. 769 static bool allSameType(ArrayRef<Value *> VL) { 770 Type *Ty = VL.front()->getType(); 771 return all_of(VL.drop_front(), [&](Value *V) { return V->getType() == Ty; }); 772 } 773 774 /// \returns True if in-tree use also needs extract. This refers to 775 /// possible scalar operand in vectorized instruction. 776 static bool doesInTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst, 777 TargetLibraryInfo *TLI) { 778 unsigned Opcode = UserInst->getOpcode(); 779 switch (Opcode) { 780 case Instruction::Load: { 781 LoadInst *LI = cast<LoadInst>(UserInst); 782 return (LI->getPointerOperand() == Scalar); 783 } 784 case Instruction::Store: { 785 StoreInst *SI = cast<StoreInst>(UserInst); 786 return (SI->getPointerOperand() == Scalar); 787 } 788 case Instruction::Call: { 789 CallInst *CI = cast<CallInst>(UserInst); 790 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 791 return any_of(enumerate(CI->args()), [&](auto &&Arg) { 792 return isVectorIntrinsicWithScalarOpAtArg(ID, Arg.index()) && 793 Arg.value().get() == Scalar; 794 }); 795 } 796 default: 797 return false; 798 } 799 } 800 801 /// \returns the AA location that is being access by the instruction. 802 static MemoryLocation getLocation(Instruction *I) { 803 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 804 return MemoryLocation::get(SI); 805 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 806 return MemoryLocation::get(LI); 807 return MemoryLocation(); 808 } 809 810 /// \returns True if the instruction is not a volatile or atomic load/store. 811 static bool isSimple(Instruction *I) { 812 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 813 return LI->isSimple(); 814 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 815 return SI->isSimple(); 816 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) 817 return !MI->isVolatile(); 818 return true; 819 } 820 821 /// Shuffles \p Mask in accordance with the given \p SubMask. 822 /// \param ExtendingManyInputs Supports reshuffling of the mask with not only 823 /// one but two input vectors. 824 static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask, 825 bool ExtendingManyInputs = false) { 826 if (SubMask.empty()) 827 return; 828 assert( 829 (!ExtendingManyInputs || SubMask.size() > Mask.size() || 830 // Check if input scalars were extended to match the size of other node. 831 (SubMask.size() == Mask.size() && 832 std::all_of(std::next(Mask.begin(), Mask.size() / 2), Mask.end(), 833 [](int Idx) { return Idx == PoisonMaskElem; }))) && 834 "SubMask with many inputs support must be larger than the mask."); 835 if (Mask.empty()) { 836 Mask.append(SubMask.begin(), SubMask.end()); 837 return; 838 } 839 SmallVector<int> NewMask(SubMask.size(), PoisonMaskElem); 840 int TermValue = std::min(Mask.size(), SubMask.size()); 841 for (int I = 0, E = SubMask.size(); I < E; ++I) { 842 if (SubMask[I] == PoisonMaskElem || 843 (!ExtendingManyInputs && 844 (SubMask[I] >= TermValue || Mask[SubMask[I]] >= TermValue))) 845 continue; 846 NewMask[I] = Mask[SubMask[I]]; 847 } 848 Mask.swap(NewMask); 849 } 850 851 /// Order may have elements assigned special value (size) which is out of 852 /// bounds. Such indices only appear on places which correspond to undef values 853 /// (see canReuseExtract for details) and used in order to avoid undef values 854 /// have effect on operands ordering. 855 /// The first loop below simply finds all unused indices and then the next loop 856 /// nest assigns these indices for undef values positions. 857 /// As an example below Order has two undef positions and they have assigned 858 /// values 3 and 7 respectively: 859 /// before: 6 9 5 4 9 2 1 0 860 /// after: 6 3 5 4 7 2 1 0 861 static void fixupOrderingIndices(SmallVectorImpl<unsigned> &Order) { 862 const unsigned Sz = Order.size(); 863 SmallBitVector UnusedIndices(Sz, /*t=*/true); 864 SmallBitVector MaskedIndices(Sz); 865 for (unsigned I = 0; I < Sz; ++I) { 866 if (Order[I] < Sz) 867 UnusedIndices.reset(Order[I]); 868 else 869 MaskedIndices.set(I); 870 } 871 if (MaskedIndices.none()) 872 return; 873 assert(UnusedIndices.count() == MaskedIndices.count() && 874 "Non-synced masked/available indices."); 875 int Idx = UnusedIndices.find_first(); 876 int MIdx = MaskedIndices.find_first(); 877 while (MIdx >= 0) { 878 assert(Idx >= 0 && "Indices must be synced."); 879 Order[MIdx] = Idx; 880 Idx = UnusedIndices.find_next(Idx); 881 MIdx = MaskedIndices.find_next(MIdx); 882 } 883 } 884 885 namespace llvm { 886 887 static void inversePermutation(ArrayRef<unsigned> Indices, 888 SmallVectorImpl<int> &Mask) { 889 Mask.clear(); 890 const unsigned E = Indices.size(); 891 Mask.resize(E, PoisonMaskElem); 892 for (unsigned I = 0; I < E; ++I) 893 Mask[Indices[I]] = I; 894 } 895 896 /// Reorders the list of scalars in accordance with the given \p Mask. 897 static void reorderScalars(SmallVectorImpl<Value *> &Scalars, 898 ArrayRef<int> Mask) { 899 assert(!Mask.empty() && "Expected non-empty mask."); 900 SmallVector<Value *> Prev(Scalars.size(), 901 UndefValue::get(Scalars.front()->getType())); 902 Prev.swap(Scalars); 903 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 904 if (Mask[I] != PoisonMaskElem) 905 Scalars[Mask[I]] = Prev[I]; 906 } 907 908 /// Checks if the provided value does not require scheduling. It does not 909 /// require scheduling if this is not an instruction or it is an instruction 910 /// that does not read/write memory and all operands are either not instructions 911 /// or phi nodes or instructions from different blocks. 912 static bool areAllOperandsNonInsts(Value *V) { 913 auto *I = dyn_cast<Instruction>(V); 914 if (!I) 915 return true; 916 return !mayHaveNonDefUseDependency(*I) && 917 all_of(I->operands(), [I](Value *V) { 918 auto *IO = dyn_cast<Instruction>(V); 919 if (!IO) 920 return true; 921 return isa<PHINode>(IO) || IO->getParent() != I->getParent(); 922 }); 923 } 924 925 /// Checks if the provided value does not require scheduling. It does not 926 /// require scheduling if this is not an instruction or it is an instruction 927 /// that does not read/write memory and all users are phi nodes or instructions 928 /// from the different blocks. 929 static bool isUsedOutsideBlock(Value *V) { 930 auto *I = dyn_cast<Instruction>(V); 931 if (!I) 932 return true; 933 // Limits the number of uses to save compile time. 934 constexpr int UsesLimit = 8; 935 return !I->mayReadOrWriteMemory() && !I->hasNUsesOrMore(UsesLimit) && 936 all_of(I->users(), [I](User *U) { 937 auto *IU = dyn_cast<Instruction>(U); 938 if (!IU) 939 return true; 940 return IU->getParent() != I->getParent() || isa<PHINode>(IU); 941 }); 942 } 943 944 /// Checks if the specified value does not require scheduling. It does not 945 /// require scheduling if all operands and all users do not need to be scheduled 946 /// in the current basic block. 947 static bool doesNotNeedToBeScheduled(Value *V) { 948 return areAllOperandsNonInsts(V) && isUsedOutsideBlock(V); 949 } 950 951 /// Checks if the specified array of instructions does not require scheduling. 952 /// It is so if all either instructions have operands that do not require 953 /// scheduling or their users do not require scheduling since they are phis or 954 /// in other basic blocks. 955 static bool doesNotNeedToSchedule(ArrayRef<Value *> VL) { 956 return !VL.empty() && 957 (all_of(VL, isUsedOutsideBlock) || all_of(VL, areAllOperandsNonInsts)); 958 } 959 960 namespace slpvectorizer { 961 962 /// Bottom Up SLP Vectorizer. 963 class BoUpSLP { 964 struct TreeEntry; 965 struct ScheduleData; 966 class ShuffleCostEstimator; 967 class ShuffleInstructionBuilder; 968 969 public: 970 using ValueList = SmallVector<Value *, 8>; 971 using InstrList = SmallVector<Instruction *, 16>; 972 using ValueSet = SmallPtrSet<Value *, 16>; 973 using StoreList = SmallVector<StoreInst *, 8>; 974 using ExtraValueToDebugLocsMap = 975 MapVector<Value *, SmallVector<Instruction *, 2>>; 976 using OrdersType = SmallVector<unsigned, 4>; 977 978 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti, 979 TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li, 980 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB, 981 const DataLayout *DL, OptimizationRemarkEmitter *ORE) 982 : BatchAA(*Aa), F(Func), SE(Se), TTI(Tti), TLI(TLi), LI(Li), 983 DT(Dt), AC(AC), DB(DB), DL(DL), ORE(ORE), Builder(Se->getContext()) { 984 CodeMetrics::collectEphemeralValues(F, AC, EphValues); 985 // Use the vector register size specified by the target unless overridden 986 // by a command-line option. 987 // TODO: It would be better to limit the vectorization factor based on 988 // data type rather than just register size. For example, x86 AVX has 989 // 256-bit registers, but it does not support integer operations 990 // at that width (that requires AVX2). 991 if (MaxVectorRegSizeOption.getNumOccurrences()) 992 MaxVecRegSize = MaxVectorRegSizeOption; 993 else 994 MaxVecRegSize = 995 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 996 .getFixedValue(); 997 998 if (MinVectorRegSizeOption.getNumOccurrences()) 999 MinVecRegSize = MinVectorRegSizeOption; 1000 else 1001 MinVecRegSize = TTI->getMinVectorRegisterBitWidth(); 1002 } 1003 1004 /// Vectorize the tree that starts with the elements in \p VL. 1005 /// Returns the vectorized root. 1006 Value *vectorizeTree(); 1007 1008 /// Vectorize the tree but with the list of externally used values \p 1009 /// ExternallyUsedValues. Values in this MapVector can be replaced but the 1010 /// generated extractvalue instructions. 1011 /// \param ReplacedExternals containd list of replaced external values 1012 /// {scalar, replace} after emitting extractelement for external uses. 1013 Value * 1014 vectorizeTree(const ExtraValueToDebugLocsMap &ExternallyUsedValues, 1015 SmallVectorImpl<std::pair<Value *, Value *>> &ReplacedExternals, 1016 Instruction *ReductionRoot = nullptr); 1017 1018 /// \returns the cost incurred by unwanted spills and fills, caused by 1019 /// holding live values over call sites. 1020 InstructionCost getSpillCost() const; 1021 1022 /// \returns the vectorization cost of the subtree that starts at \p VL. 1023 /// A negative number means that this is profitable. 1024 InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = std::nullopt); 1025 1026 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for 1027 /// the purpose of scheduling and extraction in the \p UserIgnoreLst. 1028 void buildTree(ArrayRef<Value *> Roots, 1029 const SmallDenseSet<Value *> &UserIgnoreLst); 1030 1031 /// Construct a vectorizable tree that starts at \p Roots. 1032 void buildTree(ArrayRef<Value *> Roots); 1033 1034 /// Returns whether the root node has in-tree uses. 1035 bool doesRootHaveInTreeUses() const { 1036 return !VectorizableTree.empty() && 1037 !VectorizableTree.front()->UserTreeIndices.empty(); 1038 } 1039 1040 /// Return the scalars of the root node. 1041 ArrayRef<Value *> getRootNodeScalars() const { 1042 assert(!VectorizableTree.empty() && "No graph to get the first node from"); 1043 return VectorizableTree.front()->Scalars; 1044 } 1045 1046 /// Builds external uses of the vectorized scalars, i.e. the list of 1047 /// vectorized scalars to be extracted, their lanes and their scalar users. \p 1048 /// ExternallyUsedValues contains additional list of external uses to handle 1049 /// vectorization of reductions. 1050 void 1051 buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {}); 1052 1053 /// Clear the internal data structures that are created by 'buildTree'. 1054 void deleteTree() { 1055 VectorizableTree.clear(); 1056 ScalarToTreeEntry.clear(); 1057 MultiNodeScalars.clear(); 1058 MustGather.clear(); 1059 EntryToLastInstruction.clear(); 1060 ExternalUses.clear(); 1061 for (auto &Iter : BlocksSchedules) { 1062 BlockScheduling *BS = Iter.second.get(); 1063 BS->clear(); 1064 } 1065 MinBWs.clear(); 1066 InstrElementSize.clear(); 1067 UserIgnoreList = nullptr; 1068 PostponedGathers.clear(); 1069 ValueToGatherNodes.clear(); 1070 } 1071 1072 unsigned getTreeSize() const { return VectorizableTree.size(); } 1073 1074 /// Perform LICM and CSE on the newly generated gather sequences. 1075 void optimizeGatherSequence(); 1076 1077 /// Checks if the specified gather tree entry \p TE can be represented as a 1078 /// shuffled vector entry + (possibly) permutation with other gathers. It 1079 /// implements the checks only for possibly ordered scalars (Loads, 1080 /// ExtractElement, ExtractValue), which can be part of the graph. 1081 std::optional<OrdersType> findReusedOrderedScalars(const TreeEntry &TE); 1082 1083 /// Sort loads into increasing pointers offsets to allow greater clustering. 1084 std::optional<OrdersType> findPartiallyOrderedLoads(const TreeEntry &TE); 1085 1086 /// Gets reordering data for the given tree entry. If the entry is vectorized 1087 /// - just return ReorderIndices, otherwise check if the scalars can be 1088 /// reordered and return the most optimal order. 1089 /// \return std::nullopt if ordering is not important, empty order, if 1090 /// identity order is important, or the actual order. 1091 /// \param TopToBottom If true, include the order of vectorized stores and 1092 /// insertelement nodes, otherwise skip them. 1093 std::optional<OrdersType> getReorderingData(const TreeEntry &TE, 1094 bool TopToBottom); 1095 1096 /// Reorders the current graph to the most profitable order starting from the 1097 /// root node to the leaf nodes. The best order is chosen only from the nodes 1098 /// of the same size (vectorization factor). Smaller nodes are considered 1099 /// parts of subgraph with smaller VF and they are reordered independently. We 1100 /// can make it because we still need to extend smaller nodes to the wider VF 1101 /// and we can merge reordering shuffles with the widening shuffles. 1102 void reorderTopToBottom(); 1103 1104 /// Reorders the current graph to the most profitable order starting from 1105 /// leaves to the root. It allows to rotate small subgraphs and reduce the 1106 /// number of reshuffles if the leaf nodes use the same order. In this case we 1107 /// can merge the orders and just shuffle user node instead of shuffling its 1108 /// operands. Plus, even the leaf nodes have different orders, it allows to 1109 /// sink reordering in the graph closer to the root node and merge it later 1110 /// during analysis. 1111 void reorderBottomToTop(bool IgnoreReorder = false); 1112 1113 /// \return The vector element size in bits to use when vectorizing the 1114 /// expression tree ending at \p V. If V is a store, the size is the width of 1115 /// the stored value. Otherwise, the size is the width of the largest loaded 1116 /// value reaching V. This method is used by the vectorizer to calculate 1117 /// vectorization factors. 1118 unsigned getVectorElementSize(Value *V); 1119 1120 /// Compute the minimum type sizes required to represent the entries in a 1121 /// vectorizable tree. 1122 void computeMinimumValueSizes(); 1123 1124 // \returns maximum vector register size as set by TTI or overridden by cl::opt. 1125 unsigned getMaxVecRegSize() const { 1126 return MaxVecRegSize; 1127 } 1128 1129 // \returns minimum vector register size as set by cl::opt. 1130 unsigned getMinVecRegSize() const { 1131 return MinVecRegSize; 1132 } 1133 1134 unsigned getMinVF(unsigned Sz) const { 1135 return std::max(2U, getMinVecRegSize() / Sz); 1136 } 1137 1138 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { 1139 unsigned MaxVF = MaxVFOption.getNumOccurrences() ? 1140 MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode); 1141 return MaxVF ? MaxVF : UINT_MAX; 1142 } 1143 1144 /// Check if homogeneous aggregate is isomorphic to some VectorType. 1145 /// Accepts homogeneous multidimensional aggregate of scalars/vectors like 1146 /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> }, 1147 /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on. 1148 /// 1149 /// \returns number of elements in vector if isomorphism exists, 0 otherwise. 1150 unsigned canMapToVector(Type *T) const; 1151 1152 /// \returns True if the VectorizableTree is both tiny and not fully 1153 /// vectorizable. We do not vectorize such trees. 1154 bool isTreeTinyAndNotFullyVectorizable(bool ForReduction = false) const; 1155 1156 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values 1157 /// can be load combined in the backend. Load combining may not be allowed in 1158 /// the IR optimizer, so we do not want to alter the pattern. For example, 1159 /// partially transforming a scalar bswap() pattern into vector code is 1160 /// effectively impossible for the backend to undo. 1161 /// TODO: If load combining is allowed in the IR optimizer, this analysis 1162 /// may not be necessary. 1163 bool isLoadCombineReductionCandidate(RecurKind RdxKind) const; 1164 1165 /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values 1166 /// can be load combined in the backend. Load combining may not be allowed in 1167 /// the IR optimizer, so we do not want to alter the pattern. For example, 1168 /// partially transforming a scalar bswap() pattern into vector code is 1169 /// effectively impossible for the backend to undo. 1170 /// TODO: If load combining is allowed in the IR optimizer, this analysis 1171 /// may not be necessary. 1172 bool isLoadCombineCandidate() const; 1173 1174 OptimizationRemarkEmitter *getORE() { return ORE; } 1175 1176 /// This structure holds any data we need about the edges being traversed 1177 /// during buildTree_rec(). We keep track of: 1178 /// (i) the user TreeEntry index, and 1179 /// (ii) the index of the edge. 1180 struct EdgeInfo { 1181 EdgeInfo() = default; 1182 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx) 1183 : UserTE(UserTE), EdgeIdx(EdgeIdx) {} 1184 /// The user TreeEntry. 1185 TreeEntry *UserTE = nullptr; 1186 /// The operand index of the use. 1187 unsigned EdgeIdx = UINT_MAX; 1188 #ifndef NDEBUG 1189 friend inline raw_ostream &operator<<(raw_ostream &OS, 1190 const BoUpSLP::EdgeInfo &EI) { 1191 EI.dump(OS); 1192 return OS; 1193 } 1194 /// Debug print. 1195 void dump(raw_ostream &OS) const { 1196 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null") 1197 << " EdgeIdx:" << EdgeIdx << "}"; 1198 } 1199 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); } 1200 #endif 1201 bool operator == (const EdgeInfo &Other) const { 1202 return UserTE == Other.UserTE && EdgeIdx == Other.EdgeIdx; 1203 } 1204 }; 1205 1206 /// A helper class used for scoring candidates for two consecutive lanes. 1207 class LookAheadHeuristics { 1208 const TargetLibraryInfo &TLI; 1209 const DataLayout &DL; 1210 ScalarEvolution &SE; 1211 const BoUpSLP &R; 1212 int NumLanes; // Total number of lanes (aka vectorization factor). 1213 int MaxLevel; // The maximum recursion depth for accumulating score. 1214 1215 public: 1216 LookAheadHeuristics(const TargetLibraryInfo &TLI, const DataLayout &DL, 1217 ScalarEvolution &SE, const BoUpSLP &R, int NumLanes, 1218 int MaxLevel) 1219 : TLI(TLI), DL(DL), SE(SE), R(R), NumLanes(NumLanes), 1220 MaxLevel(MaxLevel) {} 1221 1222 // The hard-coded scores listed here are not very important, though it shall 1223 // be higher for better matches to improve the resulting cost. When 1224 // computing the scores of matching one sub-tree with another, we are 1225 // basically counting the number of values that are matching. So even if all 1226 // scores are set to 1, we would still get a decent matching result. 1227 // However, sometimes we have to break ties. For example we may have to 1228 // choose between matching loads vs matching opcodes. This is what these 1229 // scores are helping us with: they provide the order of preference. Also, 1230 // this is important if the scalar is externally used or used in another 1231 // tree entry node in the different lane. 1232 1233 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]). 1234 static const int ScoreConsecutiveLoads = 4; 1235 /// The same load multiple times. This should have a better score than 1236 /// `ScoreSplat` because it in x86 for a 2-lane vector we can represent it 1237 /// with `movddup (%reg), xmm0` which has a throughput of 0.5 versus 0.5 for 1238 /// a vector load and 1.0 for a broadcast. 1239 static const int ScoreSplatLoads = 3; 1240 /// Loads from reversed memory addresses, e.g. load(A[i+1]), load(A[i]). 1241 static const int ScoreReversedLoads = 3; 1242 /// A load candidate for masked gather. 1243 static const int ScoreMaskedGatherCandidate = 1; 1244 /// ExtractElementInst from same vector and consecutive indexes. 1245 static const int ScoreConsecutiveExtracts = 4; 1246 /// ExtractElementInst from same vector and reversed indices. 1247 static const int ScoreReversedExtracts = 3; 1248 /// Constants. 1249 static const int ScoreConstants = 2; 1250 /// Instructions with the same opcode. 1251 static const int ScoreSameOpcode = 2; 1252 /// Instructions with alt opcodes (e.g, add + sub). 1253 static const int ScoreAltOpcodes = 1; 1254 /// Identical instructions (a.k.a. splat or broadcast). 1255 static const int ScoreSplat = 1; 1256 /// Matching with an undef is preferable to failing. 1257 static const int ScoreUndef = 1; 1258 /// Score for failing to find a decent match. 1259 static const int ScoreFail = 0; 1260 /// Score if all users are vectorized. 1261 static const int ScoreAllUserVectorized = 1; 1262 1263 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes. 1264 /// \p U1 and \p U2 are the users of \p V1 and \p V2. 1265 /// Also, checks if \p V1 and \p V2 are compatible with instructions in \p 1266 /// MainAltOps. 1267 int getShallowScore(Value *V1, Value *V2, Instruction *U1, Instruction *U2, 1268 ArrayRef<Value *> MainAltOps) const { 1269 if (!isValidElementType(V1->getType()) || 1270 !isValidElementType(V2->getType())) 1271 return LookAheadHeuristics::ScoreFail; 1272 1273 if (V1 == V2) { 1274 if (isa<LoadInst>(V1)) { 1275 // Retruns true if the users of V1 and V2 won't need to be extracted. 1276 auto AllUsersAreInternal = [U1, U2, this](Value *V1, Value *V2) { 1277 // Bail out if we have too many uses to save compilation time. 1278 static constexpr unsigned Limit = 8; 1279 if (V1->hasNUsesOrMore(Limit) || V2->hasNUsesOrMore(Limit)) 1280 return false; 1281 1282 auto AllUsersVectorized = [U1, U2, this](Value *V) { 1283 return llvm::all_of(V->users(), [U1, U2, this](Value *U) { 1284 return U == U1 || U == U2 || R.getTreeEntry(U) != nullptr; 1285 }); 1286 }; 1287 return AllUsersVectorized(V1) && AllUsersVectorized(V2); 1288 }; 1289 // A broadcast of a load can be cheaper on some targets. 1290 if (R.TTI->isLegalBroadcastLoad(V1->getType(), 1291 ElementCount::getFixed(NumLanes)) && 1292 ((int)V1->getNumUses() == NumLanes || 1293 AllUsersAreInternal(V1, V2))) 1294 return LookAheadHeuristics::ScoreSplatLoads; 1295 } 1296 return LookAheadHeuristics::ScoreSplat; 1297 } 1298 1299 auto *LI1 = dyn_cast<LoadInst>(V1); 1300 auto *LI2 = dyn_cast<LoadInst>(V2); 1301 if (LI1 && LI2) { 1302 if (LI1->getParent() != LI2->getParent() || !LI1->isSimple() || 1303 !LI2->isSimple()) 1304 return LookAheadHeuristics::ScoreFail; 1305 1306 std::optional<int> Dist = getPointersDiff( 1307 LI1->getType(), LI1->getPointerOperand(), LI2->getType(), 1308 LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true); 1309 if (!Dist || *Dist == 0) { 1310 if (getUnderlyingObject(LI1->getPointerOperand()) == 1311 getUnderlyingObject(LI2->getPointerOperand()) && 1312 R.TTI->isLegalMaskedGather( 1313 FixedVectorType::get(LI1->getType(), NumLanes), 1314 LI1->getAlign())) 1315 return LookAheadHeuristics::ScoreMaskedGatherCandidate; 1316 return LookAheadHeuristics::ScoreFail; 1317 } 1318 // The distance is too large - still may be profitable to use masked 1319 // loads/gathers. 1320 if (std::abs(*Dist) > NumLanes / 2) 1321 return LookAheadHeuristics::ScoreMaskedGatherCandidate; 1322 // This still will detect consecutive loads, but we might have "holes" 1323 // in some cases. It is ok for non-power-2 vectorization and may produce 1324 // better results. It should not affect current vectorization. 1325 return (*Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveLoads 1326 : LookAheadHeuristics::ScoreReversedLoads; 1327 } 1328 1329 auto *C1 = dyn_cast<Constant>(V1); 1330 auto *C2 = dyn_cast<Constant>(V2); 1331 if (C1 && C2) 1332 return LookAheadHeuristics::ScoreConstants; 1333 1334 // Extracts from consecutive indexes of the same vector better score as 1335 // the extracts could be optimized away. 1336 Value *EV1; 1337 ConstantInt *Ex1Idx; 1338 if (match(V1, m_ExtractElt(m_Value(EV1), m_ConstantInt(Ex1Idx)))) { 1339 // Undefs are always profitable for extractelements. 1340 // Compiler can easily combine poison and extractelement <non-poison> or 1341 // undef and extractelement <poison>. But combining undef + 1342 // extractelement <non-poison-but-may-produce-poison> requires some 1343 // extra operations. 1344 if (isa<UndefValue>(V2)) 1345 return (isa<PoisonValue>(V2) || isUndefVector(EV1).all()) 1346 ? LookAheadHeuristics::ScoreConsecutiveExtracts 1347 : LookAheadHeuristics::ScoreSameOpcode; 1348 Value *EV2 = nullptr; 1349 ConstantInt *Ex2Idx = nullptr; 1350 if (match(V2, 1351 m_ExtractElt(m_Value(EV2), m_CombineOr(m_ConstantInt(Ex2Idx), 1352 m_Undef())))) { 1353 // Undefs are always profitable for extractelements. 1354 if (!Ex2Idx) 1355 return LookAheadHeuristics::ScoreConsecutiveExtracts; 1356 if (isUndefVector(EV2).all() && EV2->getType() == EV1->getType()) 1357 return LookAheadHeuristics::ScoreConsecutiveExtracts; 1358 if (EV2 == EV1) { 1359 int Idx1 = Ex1Idx->getZExtValue(); 1360 int Idx2 = Ex2Idx->getZExtValue(); 1361 int Dist = Idx2 - Idx1; 1362 // The distance is too large - still may be profitable to use 1363 // shuffles. 1364 if (std::abs(Dist) == 0) 1365 return LookAheadHeuristics::ScoreSplat; 1366 if (std::abs(Dist) > NumLanes / 2) 1367 return LookAheadHeuristics::ScoreSameOpcode; 1368 return (Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveExtracts 1369 : LookAheadHeuristics::ScoreReversedExtracts; 1370 } 1371 return LookAheadHeuristics::ScoreAltOpcodes; 1372 } 1373 return LookAheadHeuristics::ScoreFail; 1374 } 1375 1376 auto *I1 = dyn_cast<Instruction>(V1); 1377 auto *I2 = dyn_cast<Instruction>(V2); 1378 if (I1 && I2) { 1379 if (I1->getParent() != I2->getParent()) 1380 return LookAheadHeuristics::ScoreFail; 1381 SmallVector<Value *, 4> Ops(MainAltOps.begin(), MainAltOps.end()); 1382 Ops.push_back(I1); 1383 Ops.push_back(I2); 1384 InstructionsState S = getSameOpcode(Ops, TLI); 1385 // Note: Only consider instructions with <= 2 operands to avoid 1386 // complexity explosion. 1387 if (S.getOpcode() && 1388 (S.MainOp->getNumOperands() <= 2 || !MainAltOps.empty() || 1389 !S.isAltShuffle()) && 1390 all_of(Ops, [&S](Value *V) { 1391 return cast<Instruction>(V)->getNumOperands() == 1392 S.MainOp->getNumOperands(); 1393 })) 1394 return S.isAltShuffle() ? LookAheadHeuristics::ScoreAltOpcodes 1395 : LookAheadHeuristics::ScoreSameOpcode; 1396 } 1397 1398 if (isa<UndefValue>(V2)) 1399 return LookAheadHeuristics::ScoreUndef; 1400 1401 return LookAheadHeuristics::ScoreFail; 1402 } 1403 1404 /// Go through the operands of \p LHS and \p RHS recursively until 1405 /// MaxLevel, and return the cummulative score. \p U1 and \p U2 are 1406 /// the users of \p LHS and \p RHS (that is \p LHS and \p RHS are operands 1407 /// of \p U1 and \p U2), except at the beginning of the recursion where 1408 /// these are set to nullptr. 1409 /// 1410 /// For example: 1411 /// \verbatim 1412 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1] 1413 /// \ / \ / \ / \ / 1414 /// + + + + 1415 /// G1 G2 G3 G4 1416 /// \endverbatim 1417 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at 1418 /// each level recursively, accumulating the score. It starts from matching 1419 /// the additions at level 0, then moves on to the loads (level 1). The 1420 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and 1421 /// {B[0],B[1]} match with LookAheadHeuristics::ScoreConsecutiveLoads, while 1422 /// {A[0],C[0]} has a score of LookAheadHeuristics::ScoreFail. 1423 /// Please note that the order of the operands does not matter, as we 1424 /// evaluate the score of all profitable combinations of operands. In 1425 /// other words the score of G1 and G4 is the same as G1 and G2. This 1426 /// heuristic is based on ideas described in: 1427 /// Look-ahead SLP: Auto-vectorization in the presence of commutative 1428 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha, 1429 /// Luís F. W. Góes 1430 int getScoreAtLevelRec(Value *LHS, Value *RHS, Instruction *U1, 1431 Instruction *U2, int CurrLevel, 1432 ArrayRef<Value *> MainAltOps) const { 1433 1434 // Get the shallow score of V1 and V2. 1435 int ShallowScoreAtThisLevel = 1436 getShallowScore(LHS, RHS, U1, U2, MainAltOps); 1437 1438 // If reached MaxLevel, 1439 // or if V1 and V2 are not instructions, 1440 // or if they are SPLAT, 1441 // or if they are not consecutive, 1442 // or if profitable to vectorize loads or extractelements, early return 1443 // the current cost. 1444 auto *I1 = dyn_cast<Instruction>(LHS); 1445 auto *I2 = dyn_cast<Instruction>(RHS); 1446 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 || 1447 ShallowScoreAtThisLevel == LookAheadHeuristics::ScoreFail || 1448 (((isa<LoadInst>(I1) && isa<LoadInst>(I2)) || 1449 (I1->getNumOperands() > 2 && I2->getNumOperands() > 2) || 1450 (isa<ExtractElementInst>(I1) && isa<ExtractElementInst>(I2))) && 1451 ShallowScoreAtThisLevel)) 1452 return ShallowScoreAtThisLevel; 1453 assert(I1 && I2 && "Should have early exited."); 1454 1455 // Contains the I2 operand indexes that got matched with I1 operands. 1456 SmallSet<unsigned, 4> Op2Used; 1457 1458 // Recursion towards the operands of I1 and I2. We are trying all possible 1459 // operand pairs, and keeping track of the best score. 1460 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands(); 1461 OpIdx1 != NumOperands1; ++OpIdx1) { 1462 // Try to pair op1I with the best operand of I2. 1463 int MaxTmpScore = 0; 1464 unsigned MaxOpIdx2 = 0; 1465 bool FoundBest = false; 1466 // If I2 is commutative try all combinations. 1467 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1; 1468 unsigned ToIdx = isCommutative(I2) 1469 ? I2->getNumOperands() 1470 : std::min(I2->getNumOperands(), OpIdx1 + 1); 1471 assert(FromIdx <= ToIdx && "Bad index"); 1472 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) { 1473 // Skip operands already paired with OpIdx1. 1474 if (Op2Used.count(OpIdx2)) 1475 continue; 1476 // Recursively calculate the cost at each level 1477 int TmpScore = 1478 getScoreAtLevelRec(I1->getOperand(OpIdx1), I2->getOperand(OpIdx2), 1479 I1, I2, CurrLevel + 1, std::nullopt); 1480 // Look for the best score. 1481 if (TmpScore > LookAheadHeuristics::ScoreFail && 1482 TmpScore > MaxTmpScore) { 1483 MaxTmpScore = TmpScore; 1484 MaxOpIdx2 = OpIdx2; 1485 FoundBest = true; 1486 } 1487 } 1488 if (FoundBest) { 1489 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it. 1490 Op2Used.insert(MaxOpIdx2); 1491 ShallowScoreAtThisLevel += MaxTmpScore; 1492 } 1493 } 1494 return ShallowScoreAtThisLevel; 1495 } 1496 }; 1497 /// A helper data structure to hold the operands of a vector of instructions. 1498 /// This supports a fixed vector length for all operand vectors. 1499 class VLOperands { 1500 /// For each operand we need (i) the value, and (ii) the opcode that it 1501 /// would be attached to if the expression was in a left-linearized form. 1502 /// This is required to avoid illegal operand reordering. 1503 /// For example: 1504 /// \verbatim 1505 /// 0 Op1 1506 /// |/ 1507 /// Op1 Op2 Linearized + Op2 1508 /// \ / ----------> |/ 1509 /// - - 1510 /// 1511 /// Op1 - Op2 (0 + Op1) - Op2 1512 /// \endverbatim 1513 /// 1514 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'. 1515 /// 1516 /// Another way to think of this is to track all the operations across the 1517 /// path from the operand all the way to the root of the tree and to 1518 /// calculate the operation that corresponds to this path. For example, the 1519 /// path from Op2 to the root crosses the RHS of the '-', therefore the 1520 /// corresponding operation is a '-' (which matches the one in the 1521 /// linearized tree, as shown above). 1522 /// 1523 /// For lack of a better term, we refer to this operation as Accumulated 1524 /// Path Operation (APO). 1525 struct OperandData { 1526 OperandData() = default; 1527 OperandData(Value *V, bool APO, bool IsUsed) 1528 : V(V), APO(APO), IsUsed(IsUsed) {} 1529 /// The operand value. 1530 Value *V = nullptr; 1531 /// TreeEntries only allow a single opcode, or an alternate sequence of 1532 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the 1533 /// APO. It is set to 'true' if 'V' is attached to an inverse operation 1534 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise 1535 /// (e.g., Add/Mul) 1536 bool APO = false; 1537 /// Helper data for the reordering function. 1538 bool IsUsed = false; 1539 }; 1540 1541 /// During operand reordering, we are trying to select the operand at lane 1542 /// that matches best with the operand at the neighboring lane. Our 1543 /// selection is based on the type of value we are looking for. For example, 1544 /// if the neighboring lane has a load, we need to look for a load that is 1545 /// accessing a consecutive address. These strategies are summarized in the 1546 /// 'ReorderingMode' enumerator. 1547 enum class ReorderingMode { 1548 Load, ///< Matching loads to consecutive memory addresses 1549 Opcode, ///< Matching instructions based on opcode (same or alternate) 1550 Constant, ///< Matching constants 1551 Splat, ///< Matching the same instruction multiple times (broadcast) 1552 Failed, ///< We failed to create a vectorizable group 1553 }; 1554 1555 using OperandDataVec = SmallVector<OperandData, 2>; 1556 1557 /// A vector of operand vectors. 1558 SmallVector<OperandDataVec, 4> OpsVec; 1559 1560 const TargetLibraryInfo &TLI; 1561 const DataLayout &DL; 1562 ScalarEvolution &SE; 1563 const BoUpSLP &R; 1564 1565 /// \returns the operand data at \p OpIdx and \p Lane. 1566 OperandData &getData(unsigned OpIdx, unsigned Lane) { 1567 return OpsVec[OpIdx][Lane]; 1568 } 1569 1570 /// \returns the operand data at \p OpIdx and \p Lane. Const version. 1571 const OperandData &getData(unsigned OpIdx, unsigned Lane) const { 1572 return OpsVec[OpIdx][Lane]; 1573 } 1574 1575 /// Clears the used flag for all entries. 1576 void clearUsed() { 1577 for (unsigned OpIdx = 0, NumOperands = getNumOperands(); 1578 OpIdx != NumOperands; ++OpIdx) 1579 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes; 1580 ++Lane) 1581 OpsVec[OpIdx][Lane].IsUsed = false; 1582 } 1583 1584 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2. 1585 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) { 1586 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]); 1587 } 1588 1589 /// \param Lane lane of the operands under analysis. 1590 /// \param OpIdx operand index in \p Lane lane we're looking the best 1591 /// candidate for. 1592 /// \param Idx operand index of the current candidate value. 1593 /// \returns The additional score due to possible broadcasting of the 1594 /// elements in the lane. It is more profitable to have power-of-2 unique 1595 /// elements in the lane, it will be vectorized with higher probability 1596 /// after removing duplicates. Currently the SLP vectorizer supports only 1597 /// vectorization of the power-of-2 number of unique scalars. 1598 int getSplatScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const { 1599 Value *IdxLaneV = getData(Idx, Lane).V; 1600 if (!isa<Instruction>(IdxLaneV) || IdxLaneV == getData(OpIdx, Lane).V) 1601 return 0; 1602 SmallPtrSet<Value *, 4> Uniques; 1603 for (unsigned Ln = 0, E = getNumLanes(); Ln < E; ++Ln) { 1604 if (Ln == Lane) 1605 continue; 1606 Value *OpIdxLnV = getData(OpIdx, Ln).V; 1607 if (!isa<Instruction>(OpIdxLnV)) 1608 return 0; 1609 Uniques.insert(OpIdxLnV); 1610 } 1611 int UniquesCount = Uniques.size(); 1612 int UniquesCntWithIdxLaneV = 1613 Uniques.contains(IdxLaneV) ? UniquesCount : UniquesCount + 1; 1614 Value *OpIdxLaneV = getData(OpIdx, Lane).V; 1615 int UniquesCntWithOpIdxLaneV = 1616 Uniques.contains(OpIdxLaneV) ? UniquesCount : UniquesCount + 1; 1617 if (UniquesCntWithIdxLaneV == UniquesCntWithOpIdxLaneV) 1618 return 0; 1619 return (PowerOf2Ceil(UniquesCntWithOpIdxLaneV) - 1620 UniquesCntWithOpIdxLaneV) - 1621 (PowerOf2Ceil(UniquesCntWithIdxLaneV) - UniquesCntWithIdxLaneV); 1622 } 1623 1624 /// \param Lane lane of the operands under analysis. 1625 /// \param OpIdx operand index in \p Lane lane we're looking the best 1626 /// candidate for. 1627 /// \param Idx operand index of the current candidate value. 1628 /// \returns The additional score for the scalar which users are all 1629 /// vectorized. 1630 int getExternalUseScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const { 1631 Value *IdxLaneV = getData(Idx, Lane).V; 1632 Value *OpIdxLaneV = getData(OpIdx, Lane).V; 1633 // Do not care about number of uses for vector-like instructions 1634 // (extractelement/extractvalue with constant indices), they are extracts 1635 // themselves and already externally used. Vectorization of such 1636 // instructions does not add extra extractelement instruction, just may 1637 // remove it. 1638 if (isVectorLikeInstWithConstOps(IdxLaneV) && 1639 isVectorLikeInstWithConstOps(OpIdxLaneV)) 1640 return LookAheadHeuristics::ScoreAllUserVectorized; 1641 auto *IdxLaneI = dyn_cast<Instruction>(IdxLaneV); 1642 if (!IdxLaneI || !isa<Instruction>(OpIdxLaneV)) 1643 return 0; 1644 return R.areAllUsersVectorized(IdxLaneI) 1645 ? LookAheadHeuristics::ScoreAllUserVectorized 1646 : 0; 1647 } 1648 1649 /// Score scaling factor for fully compatible instructions but with 1650 /// different number of external uses. Allows better selection of the 1651 /// instructions with less external uses. 1652 static const int ScoreScaleFactor = 10; 1653 1654 /// \Returns the look-ahead score, which tells us how much the sub-trees 1655 /// rooted at \p LHS and \p RHS match, the more they match the higher the 1656 /// score. This helps break ties in an informed way when we cannot decide on 1657 /// the order of the operands by just considering the immediate 1658 /// predecessors. 1659 int getLookAheadScore(Value *LHS, Value *RHS, ArrayRef<Value *> MainAltOps, 1660 int Lane, unsigned OpIdx, unsigned Idx, 1661 bool &IsUsed) { 1662 LookAheadHeuristics LookAhead(TLI, DL, SE, R, getNumLanes(), 1663 LookAheadMaxDepth); 1664 // Keep track of the instruction stack as we recurse into the operands 1665 // during the look-ahead score exploration. 1666 int Score = 1667 LookAhead.getScoreAtLevelRec(LHS, RHS, /*U1=*/nullptr, /*U2=*/nullptr, 1668 /*CurrLevel=*/1, MainAltOps); 1669 if (Score) { 1670 int SplatScore = getSplatScore(Lane, OpIdx, Idx); 1671 if (Score <= -SplatScore) { 1672 // Set the minimum score for splat-like sequence to avoid setting 1673 // failed state. 1674 Score = 1; 1675 } else { 1676 Score += SplatScore; 1677 // Scale score to see the difference between different operands 1678 // and similar operands but all vectorized/not all vectorized 1679 // uses. It does not affect actual selection of the best 1680 // compatible operand in general, just allows to select the 1681 // operand with all vectorized uses. 1682 Score *= ScoreScaleFactor; 1683 Score += getExternalUseScore(Lane, OpIdx, Idx); 1684 IsUsed = true; 1685 } 1686 } 1687 return Score; 1688 } 1689 1690 /// Best defined scores per lanes between the passes. Used to choose the 1691 /// best operand (with the highest score) between the passes. 1692 /// The key - {Operand Index, Lane}. 1693 /// The value - the best score between the passes for the lane and the 1694 /// operand. 1695 SmallDenseMap<std::pair<unsigned, unsigned>, unsigned, 8> 1696 BestScoresPerLanes; 1697 1698 // Search all operands in Ops[*][Lane] for the one that matches best 1699 // Ops[OpIdx][LastLane] and return its opreand index. 1700 // If no good match can be found, return std::nullopt. 1701 std::optional<unsigned> 1702 getBestOperand(unsigned OpIdx, int Lane, int LastLane, 1703 ArrayRef<ReorderingMode> ReorderingModes, 1704 ArrayRef<Value *> MainAltOps) { 1705 unsigned NumOperands = getNumOperands(); 1706 1707 // The operand of the previous lane at OpIdx. 1708 Value *OpLastLane = getData(OpIdx, LastLane).V; 1709 1710 // Our strategy mode for OpIdx. 1711 ReorderingMode RMode = ReorderingModes[OpIdx]; 1712 if (RMode == ReorderingMode::Failed) 1713 return std::nullopt; 1714 1715 // The linearized opcode of the operand at OpIdx, Lane. 1716 bool OpIdxAPO = getData(OpIdx, Lane).APO; 1717 1718 // The best operand index and its score. 1719 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we 1720 // are using the score to differentiate between the two. 1721 struct BestOpData { 1722 std::optional<unsigned> Idx; 1723 unsigned Score = 0; 1724 } BestOp; 1725 BestOp.Score = 1726 BestScoresPerLanes.try_emplace(std::make_pair(OpIdx, Lane), 0) 1727 .first->second; 1728 1729 // Track if the operand must be marked as used. If the operand is set to 1730 // Score 1 explicitly (because of non power-of-2 unique scalars, we may 1731 // want to reestimate the operands again on the following iterations). 1732 bool IsUsed = 1733 RMode == ReorderingMode::Splat || RMode == ReorderingMode::Constant; 1734 // Iterate through all unused operands and look for the best. 1735 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) { 1736 // Get the operand at Idx and Lane. 1737 OperandData &OpData = getData(Idx, Lane); 1738 Value *Op = OpData.V; 1739 bool OpAPO = OpData.APO; 1740 1741 // Skip already selected operands. 1742 if (OpData.IsUsed) 1743 continue; 1744 1745 // Skip if we are trying to move the operand to a position with a 1746 // different opcode in the linearized tree form. This would break the 1747 // semantics. 1748 if (OpAPO != OpIdxAPO) 1749 continue; 1750 1751 // Look for an operand that matches the current mode. 1752 switch (RMode) { 1753 case ReorderingMode::Load: 1754 case ReorderingMode::Constant: 1755 case ReorderingMode::Opcode: { 1756 bool LeftToRight = Lane > LastLane; 1757 Value *OpLeft = (LeftToRight) ? OpLastLane : Op; 1758 Value *OpRight = (LeftToRight) ? Op : OpLastLane; 1759 int Score = getLookAheadScore(OpLeft, OpRight, MainAltOps, Lane, 1760 OpIdx, Idx, IsUsed); 1761 if (Score > static_cast<int>(BestOp.Score)) { 1762 BestOp.Idx = Idx; 1763 BestOp.Score = Score; 1764 BestScoresPerLanes[std::make_pair(OpIdx, Lane)] = Score; 1765 } 1766 break; 1767 } 1768 case ReorderingMode::Splat: 1769 if (Op == OpLastLane) 1770 BestOp.Idx = Idx; 1771 break; 1772 case ReorderingMode::Failed: 1773 llvm_unreachable("Not expected Failed reordering mode."); 1774 } 1775 } 1776 1777 if (BestOp.Idx) { 1778 getData(*BestOp.Idx, Lane).IsUsed = IsUsed; 1779 return BestOp.Idx; 1780 } 1781 // If we could not find a good match return std::nullopt. 1782 return std::nullopt; 1783 } 1784 1785 /// Helper for reorderOperandVecs. 1786 /// \returns the lane that we should start reordering from. This is the one 1787 /// which has the least number of operands that can freely move about or 1788 /// less profitable because it already has the most optimal set of operands. 1789 unsigned getBestLaneToStartReordering() const { 1790 unsigned Min = UINT_MAX; 1791 unsigned SameOpNumber = 0; 1792 // std::pair<unsigned, unsigned> is used to implement a simple voting 1793 // algorithm and choose the lane with the least number of operands that 1794 // can freely move about or less profitable because it already has the 1795 // most optimal set of operands. The first unsigned is a counter for 1796 // voting, the second unsigned is the counter of lanes with instructions 1797 // with same/alternate opcodes and same parent basic block. 1798 MapVector<unsigned, std::pair<unsigned, unsigned>> HashMap; 1799 // Try to be closer to the original results, if we have multiple lanes 1800 // with same cost. If 2 lanes have the same cost, use the one with the 1801 // lowest index. 1802 for (int I = getNumLanes(); I > 0; --I) { 1803 unsigned Lane = I - 1; 1804 OperandsOrderData NumFreeOpsHash = 1805 getMaxNumOperandsThatCanBeReordered(Lane); 1806 // Compare the number of operands that can move and choose the one with 1807 // the least number. 1808 if (NumFreeOpsHash.NumOfAPOs < Min) { 1809 Min = NumFreeOpsHash.NumOfAPOs; 1810 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent; 1811 HashMap.clear(); 1812 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1813 } else if (NumFreeOpsHash.NumOfAPOs == Min && 1814 NumFreeOpsHash.NumOpsWithSameOpcodeParent < SameOpNumber) { 1815 // Select the most optimal lane in terms of number of operands that 1816 // should be moved around. 1817 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent; 1818 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1819 } else if (NumFreeOpsHash.NumOfAPOs == Min && 1820 NumFreeOpsHash.NumOpsWithSameOpcodeParent == SameOpNumber) { 1821 auto *It = HashMap.find(NumFreeOpsHash.Hash); 1822 if (It == HashMap.end()) 1823 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane); 1824 else 1825 ++It->second.first; 1826 } 1827 } 1828 // Select the lane with the minimum counter. 1829 unsigned BestLane = 0; 1830 unsigned CntMin = UINT_MAX; 1831 for (const auto &Data : reverse(HashMap)) { 1832 if (Data.second.first < CntMin) { 1833 CntMin = Data.second.first; 1834 BestLane = Data.second.second; 1835 } 1836 } 1837 return BestLane; 1838 } 1839 1840 /// Data structure that helps to reorder operands. 1841 struct OperandsOrderData { 1842 /// The best number of operands with the same APOs, which can be 1843 /// reordered. 1844 unsigned NumOfAPOs = UINT_MAX; 1845 /// Number of operands with the same/alternate instruction opcode and 1846 /// parent. 1847 unsigned NumOpsWithSameOpcodeParent = 0; 1848 /// Hash for the actual operands ordering. 1849 /// Used to count operands, actually their position id and opcode 1850 /// value. It is used in the voting mechanism to find the lane with the 1851 /// least number of operands that can freely move about or less profitable 1852 /// because it already has the most optimal set of operands. Can be 1853 /// replaced with SmallVector<unsigned> instead but hash code is faster 1854 /// and requires less memory. 1855 unsigned Hash = 0; 1856 }; 1857 /// \returns the maximum number of operands that are allowed to be reordered 1858 /// for \p Lane and the number of compatible instructions(with the same 1859 /// parent/opcode). This is used as a heuristic for selecting the first lane 1860 /// to start operand reordering. 1861 OperandsOrderData getMaxNumOperandsThatCanBeReordered(unsigned Lane) const { 1862 unsigned CntTrue = 0; 1863 unsigned NumOperands = getNumOperands(); 1864 // Operands with the same APO can be reordered. We therefore need to count 1865 // how many of them we have for each APO, like this: Cnt[APO] = x. 1866 // Since we only have two APOs, namely true and false, we can avoid using 1867 // a map. Instead we can simply count the number of operands that 1868 // correspond to one of them (in this case the 'true' APO), and calculate 1869 // the other by subtracting it from the total number of operands. 1870 // Operands with the same instruction opcode and parent are more 1871 // profitable since we don't need to move them in many cases, with a high 1872 // probability such lane already can be vectorized effectively. 1873 bool AllUndefs = true; 1874 unsigned NumOpsWithSameOpcodeParent = 0; 1875 Instruction *OpcodeI = nullptr; 1876 BasicBlock *Parent = nullptr; 1877 unsigned Hash = 0; 1878 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1879 const OperandData &OpData = getData(OpIdx, Lane); 1880 if (OpData.APO) 1881 ++CntTrue; 1882 // Use Boyer-Moore majority voting for finding the majority opcode and 1883 // the number of times it occurs. 1884 if (auto *I = dyn_cast<Instruction>(OpData.V)) { 1885 if (!OpcodeI || !getSameOpcode({OpcodeI, I}, TLI).getOpcode() || 1886 I->getParent() != Parent) { 1887 if (NumOpsWithSameOpcodeParent == 0) { 1888 NumOpsWithSameOpcodeParent = 1; 1889 OpcodeI = I; 1890 Parent = I->getParent(); 1891 } else { 1892 --NumOpsWithSameOpcodeParent; 1893 } 1894 } else { 1895 ++NumOpsWithSameOpcodeParent; 1896 } 1897 } 1898 Hash = hash_combine( 1899 Hash, hash_value((OpIdx + 1) * (OpData.V->getValueID() + 1))); 1900 AllUndefs = AllUndefs && isa<UndefValue>(OpData.V); 1901 } 1902 if (AllUndefs) 1903 return {}; 1904 OperandsOrderData Data; 1905 Data.NumOfAPOs = std::max(CntTrue, NumOperands - CntTrue); 1906 Data.NumOpsWithSameOpcodeParent = NumOpsWithSameOpcodeParent; 1907 Data.Hash = Hash; 1908 return Data; 1909 } 1910 1911 /// Go through the instructions in VL and append their operands. 1912 void appendOperandsOfVL(ArrayRef<Value *> VL) { 1913 assert(!VL.empty() && "Bad VL"); 1914 assert((empty() || VL.size() == getNumLanes()) && 1915 "Expected same number of lanes"); 1916 assert(isa<Instruction>(VL[0]) && "Expected instruction"); 1917 unsigned NumOperands = cast<Instruction>(VL[0])->getNumOperands(); 1918 OpsVec.resize(NumOperands); 1919 unsigned NumLanes = VL.size(); 1920 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 1921 OpsVec[OpIdx].resize(NumLanes); 1922 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1923 assert(isa<Instruction>(VL[Lane]) && "Expected instruction"); 1924 // Our tree has just 3 nodes: the root and two operands. 1925 // It is therefore trivial to get the APO. We only need to check the 1926 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or 1927 // RHS operand. The LHS operand of both add and sub is never attached 1928 // to an inversese operation in the linearized form, therefore its APO 1929 // is false. The RHS is true only if VL[Lane] is an inverse operation. 1930 1931 // Since operand reordering is performed on groups of commutative 1932 // operations or alternating sequences (e.g., +, -), we can safely 1933 // tell the inverse operations by checking commutativity. 1934 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane])); 1935 bool APO = (OpIdx == 0) ? false : IsInverseOperation; 1936 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx), 1937 APO, false}; 1938 } 1939 } 1940 } 1941 1942 /// \returns the number of operands. 1943 unsigned getNumOperands() const { return OpsVec.size(); } 1944 1945 /// \returns the number of lanes. 1946 unsigned getNumLanes() const { return OpsVec[0].size(); } 1947 1948 /// \returns the operand value at \p OpIdx and \p Lane. 1949 Value *getValue(unsigned OpIdx, unsigned Lane) const { 1950 return getData(OpIdx, Lane).V; 1951 } 1952 1953 /// \returns true if the data structure is empty. 1954 bool empty() const { return OpsVec.empty(); } 1955 1956 /// Clears the data. 1957 void clear() { OpsVec.clear(); } 1958 1959 /// \Returns true if there are enough operands identical to \p Op to fill 1960 /// the whole vector. 1961 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow. 1962 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) { 1963 bool OpAPO = getData(OpIdx, Lane).APO; 1964 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) { 1965 if (Ln == Lane) 1966 continue; 1967 // This is set to true if we found a candidate for broadcast at Lane. 1968 bool FoundCandidate = false; 1969 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) { 1970 OperandData &Data = getData(OpI, Ln); 1971 if (Data.APO != OpAPO || Data.IsUsed) 1972 continue; 1973 if (Data.V == Op) { 1974 FoundCandidate = true; 1975 Data.IsUsed = true; 1976 break; 1977 } 1978 } 1979 if (!FoundCandidate) 1980 return false; 1981 } 1982 return true; 1983 } 1984 1985 public: 1986 /// Initialize with all the operands of the instruction vector \p RootVL. 1987 VLOperands(ArrayRef<Value *> RootVL, const TargetLibraryInfo &TLI, 1988 const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R) 1989 : TLI(TLI), DL(DL), SE(SE), R(R) { 1990 // Append all the operands of RootVL. 1991 appendOperandsOfVL(RootVL); 1992 } 1993 1994 /// \Returns a value vector with the operands across all lanes for the 1995 /// opearnd at \p OpIdx. 1996 ValueList getVL(unsigned OpIdx) const { 1997 ValueList OpVL(OpsVec[OpIdx].size()); 1998 assert(OpsVec[OpIdx].size() == getNumLanes() && 1999 "Expected same num of lanes across all operands"); 2000 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane) 2001 OpVL[Lane] = OpsVec[OpIdx][Lane].V; 2002 return OpVL; 2003 } 2004 2005 // Performs operand reordering for 2 or more operands. 2006 // The original operands are in OrigOps[OpIdx][Lane]. 2007 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'. 2008 void reorder() { 2009 unsigned NumOperands = getNumOperands(); 2010 unsigned NumLanes = getNumLanes(); 2011 // Each operand has its own mode. We are using this mode to help us select 2012 // the instructions for each lane, so that they match best with the ones 2013 // we have selected so far. 2014 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands); 2015 2016 // This is a greedy single-pass algorithm. We are going over each lane 2017 // once and deciding on the best order right away with no back-tracking. 2018 // However, in order to increase its effectiveness, we start with the lane 2019 // that has operands that can move the least. For example, given the 2020 // following lanes: 2021 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd 2022 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st 2023 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd 2024 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th 2025 // we will start at Lane 1, since the operands of the subtraction cannot 2026 // be reordered. Then we will visit the rest of the lanes in a circular 2027 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3. 2028 2029 // Find the first lane that we will start our search from. 2030 unsigned FirstLane = getBestLaneToStartReordering(); 2031 2032 // Initialize the modes. 2033 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 2034 Value *OpLane0 = getValue(OpIdx, FirstLane); 2035 // Keep track if we have instructions with all the same opcode on one 2036 // side. 2037 if (isa<LoadInst>(OpLane0)) 2038 ReorderingModes[OpIdx] = ReorderingMode::Load; 2039 else if (isa<Instruction>(OpLane0)) { 2040 // Check if OpLane0 should be broadcast. 2041 if (shouldBroadcast(OpLane0, OpIdx, FirstLane)) 2042 ReorderingModes[OpIdx] = ReorderingMode::Splat; 2043 else 2044 ReorderingModes[OpIdx] = ReorderingMode::Opcode; 2045 } 2046 else if (isa<Constant>(OpLane0)) 2047 ReorderingModes[OpIdx] = ReorderingMode::Constant; 2048 else if (isa<Argument>(OpLane0)) 2049 // Our best hope is a Splat. It may save some cost in some cases. 2050 ReorderingModes[OpIdx] = ReorderingMode::Splat; 2051 else 2052 // NOTE: This should be unreachable. 2053 ReorderingModes[OpIdx] = ReorderingMode::Failed; 2054 } 2055 2056 // Check that we don't have same operands. No need to reorder if operands 2057 // are just perfect diamond or shuffled diamond match. Do not do it only 2058 // for possible broadcasts or non-power of 2 number of scalars (just for 2059 // now). 2060 auto &&SkipReordering = [this]() { 2061 SmallPtrSet<Value *, 4> UniqueValues; 2062 ArrayRef<OperandData> Op0 = OpsVec.front(); 2063 for (const OperandData &Data : Op0) 2064 UniqueValues.insert(Data.V); 2065 for (ArrayRef<OperandData> Op : drop_begin(OpsVec, 1)) { 2066 if (any_of(Op, [&UniqueValues](const OperandData &Data) { 2067 return !UniqueValues.contains(Data.V); 2068 })) 2069 return false; 2070 } 2071 // TODO: Check if we can remove a check for non-power-2 number of 2072 // scalars after full support of non-power-2 vectorization. 2073 return UniqueValues.size() != 2 && isPowerOf2_32(UniqueValues.size()); 2074 }; 2075 2076 // If the initial strategy fails for any of the operand indexes, then we 2077 // perform reordering again in a second pass. This helps avoid assigning 2078 // high priority to the failed strategy, and should improve reordering for 2079 // the non-failed operand indexes. 2080 for (int Pass = 0; Pass != 2; ++Pass) { 2081 // Check if no need to reorder operands since they're are perfect or 2082 // shuffled diamond match. 2083 // Need to do it to avoid extra external use cost counting for 2084 // shuffled matches, which may cause regressions. 2085 if (SkipReordering()) 2086 break; 2087 // Skip the second pass if the first pass did not fail. 2088 bool StrategyFailed = false; 2089 // Mark all operand data as free to use. 2090 clearUsed(); 2091 // We keep the original operand order for the FirstLane, so reorder the 2092 // rest of the lanes. We are visiting the nodes in a circular fashion, 2093 // using FirstLane as the center point and increasing the radius 2094 // distance. 2095 SmallVector<SmallVector<Value *, 2>> MainAltOps(NumOperands); 2096 for (unsigned I = 0; I < NumOperands; ++I) 2097 MainAltOps[I].push_back(getData(I, FirstLane).V); 2098 2099 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) { 2100 // Visit the lane on the right and then the lane on the left. 2101 for (int Direction : {+1, -1}) { 2102 int Lane = FirstLane + Direction * Distance; 2103 if (Lane < 0 || Lane >= (int)NumLanes) 2104 continue; 2105 int LastLane = Lane - Direction; 2106 assert(LastLane >= 0 && LastLane < (int)NumLanes && 2107 "Out of bounds"); 2108 // Look for a good match for each operand. 2109 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) { 2110 // Search for the operand that matches SortedOps[OpIdx][Lane-1]. 2111 std::optional<unsigned> BestIdx = getBestOperand( 2112 OpIdx, Lane, LastLane, ReorderingModes, MainAltOps[OpIdx]); 2113 // By not selecting a value, we allow the operands that follow to 2114 // select a better matching value. We will get a non-null value in 2115 // the next run of getBestOperand(). 2116 if (BestIdx) { 2117 // Swap the current operand with the one returned by 2118 // getBestOperand(). 2119 swap(OpIdx, *BestIdx, Lane); 2120 } else { 2121 // We failed to find a best operand, set mode to 'Failed'. 2122 ReorderingModes[OpIdx] = ReorderingMode::Failed; 2123 // Enable the second pass. 2124 StrategyFailed = true; 2125 } 2126 // Try to get the alternate opcode and follow it during analysis. 2127 if (MainAltOps[OpIdx].size() != 2) { 2128 OperandData &AltOp = getData(OpIdx, Lane); 2129 InstructionsState OpS = 2130 getSameOpcode({MainAltOps[OpIdx].front(), AltOp.V}, TLI); 2131 if (OpS.getOpcode() && OpS.isAltShuffle()) 2132 MainAltOps[OpIdx].push_back(AltOp.V); 2133 } 2134 } 2135 } 2136 } 2137 // Skip second pass if the strategy did not fail. 2138 if (!StrategyFailed) 2139 break; 2140 } 2141 } 2142 2143 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2144 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) { 2145 switch (RMode) { 2146 case ReorderingMode::Load: 2147 return "Load"; 2148 case ReorderingMode::Opcode: 2149 return "Opcode"; 2150 case ReorderingMode::Constant: 2151 return "Constant"; 2152 case ReorderingMode::Splat: 2153 return "Splat"; 2154 case ReorderingMode::Failed: 2155 return "Failed"; 2156 } 2157 llvm_unreachable("Unimplemented Reordering Type"); 2158 } 2159 2160 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode, 2161 raw_ostream &OS) { 2162 return OS << getModeStr(RMode); 2163 } 2164 2165 /// Debug print. 2166 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) { 2167 printMode(RMode, dbgs()); 2168 } 2169 2170 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) { 2171 return printMode(RMode, OS); 2172 } 2173 2174 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const { 2175 const unsigned Indent = 2; 2176 unsigned Cnt = 0; 2177 for (const OperandDataVec &OpDataVec : OpsVec) { 2178 OS << "Operand " << Cnt++ << "\n"; 2179 for (const OperandData &OpData : OpDataVec) { 2180 OS.indent(Indent) << "{"; 2181 if (Value *V = OpData.V) 2182 OS << *V; 2183 else 2184 OS << "null"; 2185 OS << ", APO:" << OpData.APO << "}\n"; 2186 } 2187 OS << "\n"; 2188 } 2189 return OS; 2190 } 2191 2192 /// Debug print. 2193 LLVM_DUMP_METHOD void dump() const { print(dbgs()); } 2194 #endif 2195 }; 2196 2197 /// Evaluate each pair in \p Candidates and return index into \p Candidates 2198 /// for a pair which have highest score deemed to have best chance to form 2199 /// root of profitable tree to vectorize. Return std::nullopt if no candidate 2200 /// scored above the LookAheadHeuristics::ScoreFail. \param Limit Lower limit 2201 /// of the cost, considered to be good enough score. 2202 std::optional<int> 2203 findBestRootPair(ArrayRef<std::pair<Value *, Value *>> Candidates, 2204 int Limit = LookAheadHeuristics::ScoreFail) { 2205 LookAheadHeuristics LookAhead(*TLI, *DL, *SE, *this, /*NumLanes=*/2, 2206 RootLookAheadMaxDepth); 2207 int BestScore = Limit; 2208 std::optional<int> Index; 2209 for (int I : seq<int>(0, Candidates.size())) { 2210 int Score = LookAhead.getScoreAtLevelRec(Candidates[I].first, 2211 Candidates[I].second, 2212 /*U1=*/nullptr, /*U2=*/nullptr, 2213 /*Level=*/1, std::nullopt); 2214 if (Score > BestScore) { 2215 BestScore = Score; 2216 Index = I; 2217 } 2218 } 2219 return Index; 2220 } 2221 2222 /// Checks if the instruction is marked for deletion. 2223 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); } 2224 2225 /// Removes an instruction from its block and eventually deletes it. 2226 /// It's like Instruction::eraseFromParent() except that the actual deletion 2227 /// is delayed until BoUpSLP is destructed. 2228 void eraseInstruction(Instruction *I) { 2229 DeletedInstructions.insert(I); 2230 } 2231 2232 /// Checks if the instruction was already analyzed for being possible 2233 /// reduction root. 2234 bool isAnalyzedReductionRoot(Instruction *I) const { 2235 return AnalyzedReductionsRoots.count(I); 2236 } 2237 /// Register given instruction as already analyzed for being possible 2238 /// reduction root. 2239 void analyzedReductionRoot(Instruction *I) { 2240 AnalyzedReductionsRoots.insert(I); 2241 } 2242 /// Checks if the provided list of reduced values was checked already for 2243 /// vectorization. 2244 bool areAnalyzedReductionVals(ArrayRef<Value *> VL) const { 2245 return AnalyzedReductionVals.contains(hash_value(VL)); 2246 } 2247 /// Adds the list of reduced values to list of already checked values for the 2248 /// vectorization. 2249 void analyzedReductionVals(ArrayRef<Value *> VL) { 2250 AnalyzedReductionVals.insert(hash_value(VL)); 2251 } 2252 /// Clear the list of the analyzed reduction root instructions. 2253 void clearReductionData() { 2254 AnalyzedReductionsRoots.clear(); 2255 AnalyzedReductionVals.clear(); 2256 } 2257 /// Checks if the given value is gathered in one of the nodes. 2258 bool isAnyGathered(const SmallDenseSet<Value *> &Vals) const { 2259 return any_of(MustGather, [&](Value *V) { return Vals.contains(V); }); 2260 } 2261 2262 /// Check if the value is vectorized in the tree. 2263 bool isVectorized(Value *V) const { return getTreeEntry(V); } 2264 2265 ~BoUpSLP(); 2266 2267 private: 2268 /// Determine if a vectorized value \p V in can be demoted to 2269 /// a smaller type with a truncation. We collect the values that will be 2270 /// demoted in ToDemote and additional roots that require investigating in 2271 /// Roots. 2272 /// \param DemotedConsts list of Instruction/OperandIndex pairs that are 2273 /// constant and to be demoted. Required to correctly identify constant nodes 2274 /// to be demoted. 2275 bool collectValuesToDemote( 2276 Value *V, SmallVectorImpl<Value *> &ToDemote, 2277 DenseMap<Instruction *, SmallVector<unsigned>> &DemotedConsts, 2278 SmallVectorImpl<Value *> &Roots, DenseSet<Value *> &Visited) const; 2279 2280 /// Check if the operands on the edges \p Edges of the \p UserTE allows 2281 /// reordering (i.e. the operands can be reordered because they have only one 2282 /// user and reordarable). 2283 /// \param ReorderableGathers List of all gather nodes that require reordering 2284 /// (e.g., gather of extractlements or partially vectorizable loads). 2285 /// \param GatherOps List of gather operand nodes for \p UserTE that require 2286 /// reordering, subset of \p NonVectorized. 2287 bool 2288 canReorderOperands(TreeEntry *UserTE, 2289 SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges, 2290 ArrayRef<TreeEntry *> ReorderableGathers, 2291 SmallVectorImpl<TreeEntry *> &GatherOps); 2292 2293 /// Checks if the given \p TE is a gather node with clustered reused scalars 2294 /// and reorders it per given \p Mask. 2295 void reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const; 2296 2297 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph, 2298 /// if any. If it is not vectorized (gather node), returns nullptr. 2299 TreeEntry *getVectorizedOperand(TreeEntry *UserTE, unsigned OpIdx) { 2300 ArrayRef<Value *> VL = UserTE->getOperand(OpIdx); 2301 TreeEntry *TE = nullptr; 2302 const auto *It = find_if(VL, [&](Value *V) { 2303 TE = getTreeEntry(V); 2304 if (TE && is_contained(TE->UserTreeIndices, EdgeInfo(UserTE, OpIdx))) 2305 return true; 2306 auto It = MultiNodeScalars.find(V); 2307 if (It != MultiNodeScalars.end()) { 2308 for (TreeEntry *E : It->second) { 2309 if (is_contained(E->UserTreeIndices, EdgeInfo(UserTE, OpIdx))) { 2310 TE = E; 2311 return true; 2312 } 2313 } 2314 } 2315 return false; 2316 }); 2317 if (It != VL.end()) { 2318 assert(TE->isSame(VL) && "Expected same scalars."); 2319 return TE; 2320 } 2321 return nullptr; 2322 } 2323 2324 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph, 2325 /// if any. If it is not vectorized (gather node), returns nullptr. 2326 const TreeEntry *getVectorizedOperand(const TreeEntry *UserTE, 2327 unsigned OpIdx) const { 2328 return const_cast<BoUpSLP *>(this)->getVectorizedOperand( 2329 const_cast<TreeEntry *>(UserTE), OpIdx); 2330 } 2331 2332 /// Checks if all users of \p I are the part of the vectorization tree. 2333 bool areAllUsersVectorized( 2334 Instruction *I, 2335 const SmallDenseSet<Value *> *VectorizedVals = nullptr) const; 2336 2337 /// Return information about the vector formed for the specified index 2338 /// of a vector of (the same) instruction. 2339 TargetTransformInfo::OperandValueInfo getOperandInfo(ArrayRef<Value *> Ops); 2340 2341 /// \ returns the graph entry for the \p Idx operand of the \p E entry. 2342 const TreeEntry *getOperandEntry(const TreeEntry *E, unsigned Idx) const; 2343 2344 /// \returns the cost of the vectorizable entry. 2345 InstructionCost getEntryCost(const TreeEntry *E, 2346 ArrayRef<Value *> VectorizedVals, 2347 SmallPtrSetImpl<Value *> &CheckedExtracts); 2348 2349 /// This is the recursive part of buildTree. 2350 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, 2351 const EdgeInfo &EI); 2352 2353 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can 2354 /// be vectorized to use the original vector (or aggregate "bitcast" to a 2355 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise 2356 /// returns false, setting \p CurrentOrder to either an empty vector or a 2357 /// non-identity permutation that allows to reuse extract instructions. 2358 /// \param ResizeAllowed indicates whether it is allowed to handle subvector 2359 /// extract order. 2360 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 2361 SmallVectorImpl<unsigned> &CurrentOrder, 2362 bool ResizeAllowed = false) const; 2363 2364 /// Vectorize a single entry in the tree. 2365 /// \param PostponedPHIs true, if need to postpone emission of phi nodes to 2366 /// avoid issues with def-use order. 2367 Value *vectorizeTree(TreeEntry *E, bool PostponedPHIs); 2368 2369 /// Vectorize a single entry in the tree, the \p Idx-th operand of the entry 2370 /// \p E. 2371 /// \param PostponedPHIs true, if need to postpone emission of phi nodes to 2372 /// avoid issues with def-use order. 2373 Value *vectorizeOperand(TreeEntry *E, unsigned NodeIdx, bool PostponedPHIs); 2374 2375 /// Create a new vector from a list of scalar values. Produces a sequence 2376 /// which exploits values reused across lanes, and arranges the inserts 2377 /// for ease of later optimization. 2378 template <typename BVTy, typename ResTy, typename... Args> 2379 ResTy processBuildVector(const TreeEntry *E, Args &...Params); 2380 2381 /// Create a new vector from a list of scalar values. Produces a sequence 2382 /// which exploits values reused across lanes, and arranges the inserts 2383 /// for ease of later optimization. 2384 Value *createBuildVector(const TreeEntry *E); 2385 2386 /// Returns the instruction in the bundle, which can be used as a base point 2387 /// for scheduling. Usually it is the last instruction in the bundle, except 2388 /// for the case when all operands are external (in this case, it is the first 2389 /// instruction in the list). 2390 Instruction &getLastInstructionInBundle(const TreeEntry *E); 2391 2392 /// Tries to find extractelement instructions with constant indices from fixed 2393 /// vector type and gather such instructions into a bunch, which highly likely 2394 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt 2395 /// was successful, the matched scalars are replaced by poison values in \p VL 2396 /// for future analysis. 2397 std::optional<TargetTransformInfo::ShuffleKind> 2398 tryToGatherSingleRegisterExtractElements(MutableArrayRef<Value *> VL, 2399 SmallVectorImpl<int> &Mask) const; 2400 2401 /// Tries to find extractelement instructions with constant indices from fixed 2402 /// vector type and gather such instructions into a bunch, which highly likely 2403 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt 2404 /// was successful, the matched scalars are replaced by poison values in \p VL 2405 /// for future analysis. 2406 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> 2407 tryToGatherExtractElements(SmallVectorImpl<Value *> &VL, 2408 SmallVectorImpl<int> &Mask, 2409 unsigned NumParts) const; 2410 2411 /// Checks if the gathered \p VL can be represented as a single register 2412 /// shuffle(s) of previous tree entries. 2413 /// \param TE Tree entry checked for permutation. 2414 /// \param VL List of scalars (a subset of the TE scalar), checked for 2415 /// permutations. Must form single-register vector. 2416 /// \returns ShuffleKind, if gathered values can be represented as shuffles of 2417 /// previous tree entries. \p Part of \p Mask is filled with the shuffle mask. 2418 std::optional<TargetTransformInfo::ShuffleKind> 2419 isGatherShuffledSingleRegisterEntry( 2420 const TreeEntry *TE, ArrayRef<Value *> VL, MutableArrayRef<int> Mask, 2421 SmallVectorImpl<const TreeEntry *> &Entries, unsigned Part); 2422 2423 /// Checks if the gathered \p VL can be represented as multi-register 2424 /// shuffle(s) of previous tree entries. 2425 /// \param TE Tree entry checked for permutation. 2426 /// \param VL List of scalars (a subset of the TE scalar), checked for 2427 /// permutations. 2428 /// \returns per-register series of ShuffleKind, if gathered values can be 2429 /// represented as shuffles of previous tree entries. \p Mask is filled with 2430 /// the shuffle mask (also on per-register base). 2431 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> 2432 isGatherShuffledEntry( 2433 const TreeEntry *TE, ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask, 2434 SmallVectorImpl<SmallVector<const TreeEntry *>> &Entries, 2435 unsigned NumParts); 2436 2437 /// \returns the scalarization cost for this list of values. Assuming that 2438 /// this subtree gets vectorized, we may need to extract the values from the 2439 /// roots. This method calculates the cost of extracting the values. 2440 /// \param ForPoisonSrc true if initial vector is poison, false otherwise. 2441 InstructionCost getGatherCost(ArrayRef<Value *> VL, bool ForPoisonSrc) const; 2442 2443 /// Set the Builder insert point to one after the last instruction in 2444 /// the bundle 2445 void setInsertPointAfterBundle(const TreeEntry *E); 2446 2447 /// \returns a vector from a collection of scalars in \p VL. if \p Root is not 2448 /// specified, the starting vector value is poison. 2449 Value *gather(ArrayRef<Value *> VL, Value *Root); 2450 2451 /// \returns whether the VectorizableTree is fully vectorizable and will 2452 /// be beneficial even the tree height is tiny. 2453 bool isFullyVectorizableTinyTree(bool ForReduction) const; 2454 2455 /// Reorder commutative or alt operands to get better probability of 2456 /// generating vectorized code. 2457 static void reorderInputsAccordingToOpcode( 2458 ArrayRef<Value *> VL, SmallVectorImpl<Value *> &Left, 2459 SmallVectorImpl<Value *> &Right, const TargetLibraryInfo &TLI, 2460 const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R); 2461 2462 /// Helper for `findExternalStoreUsersReorderIndices()`. It iterates over the 2463 /// users of \p TE and collects the stores. It returns the map from the store 2464 /// pointers to the collected stores. 2465 DenseMap<Value *, SmallVector<StoreInst *>> 2466 collectUserStores(const BoUpSLP::TreeEntry *TE) const; 2467 2468 /// Helper for `findExternalStoreUsersReorderIndices()`. It checks if the 2469 /// stores in \p StoresVec can form a vector instruction. If so it returns 2470 /// true and populates \p ReorderIndices with the shuffle indices of the 2471 /// stores when compared to the sorted vector. 2472 bool canFormVector(ArrayRef<StoreInst *> StoresVec, 2473 OrdersType &ReorderIndices) const; 2474 2475 /// Iterates through the users of \p TE, looking for scalar stores that can be 2476 /// potentially vectorized in a future SLP-tree. If found, it keeps track of 2477 /// their order and builds an order index vector for each store bundle. It 2478 /// returns all these order vectors found. 2479 /// We run this after the tree has formed, otherwise we may come across user 2480 /// instructions that are not yet in the tree. 2481 SmallVector<OrdersType, 1> 2482 findExternalStoreUsersReorderIndices(TreeEntry *TE) const; 2483 2484 struct TreeEntry { 2485 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>; 2486 TreeEntry(VecTreeTy &Container) : Container(Container) {} 2487 2488 /// \returns Common mask for reorder indices and reused scalars. 2489 SmallVector<int> getCommonMask() const { 2490 SmallVector<int> Mask; 2491 inversePermutation(ReorderIndices, Mask); 2492 ::addMask(Mask, ReuseShuffleIndices); 2493 return Mask; 2494 } 2495 2496 /// \returns true if the scalars in VL are equal to this entry. 2497 bool isSame(ArrayRef<Value *> VL) const { 2498 auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) { 2499 if (Mask.size() != VL.size() && VL.size() == Scalars.size()) 2500 return std::equal(VL.begin(), VL.end(), Scalars.begin()); 2501 return VL.size() == Mask.size() && 2502 std::equal(VL.begin(), VL.end(), Mask.begin(), 2503 [Scalars](Value *V, int Idx) { 2504 return (isa<UndefValue>(V) && 2505 Idx == PoisonMaskElem) || 2506 (Idx != PoisonMaskElem && V == Scalars[Idx]); 2507 }); 2508 }; 2509 if (!ReorderIndices.empty()) { 2510 // TODO: implement matching if the nodes are just reordered, still can 2511 // treat the vector as the same if the list of scalars matches VL 2512 // directly, without reordering. 2513 SmallVector<int> Mask; 2514 inversePermutation(ReorderIndices, Mask); 2515 if (VL.size() == Scalars.size()) 2516 return IsSame(Scalars, Mask); 2517 if (VL.size() == ReuseShuffleIndices.size()) { 2518 ::addMask(Mask, ReuseShuffleIndices); 2519 return IsSame(Scalars, Mask); 2520 } 2521 return false; 2522 } 2523 return IsSame(Scalars, ReuseShuffleIndices); 2524 } 2525 2526 bool isOperandGatherNode(const EdgeInfo &UserEI) const { 2527 return State == TreeEntry::NeedToGather && 2528 UserTreeIndices.front().EdgeIdx == UserEI.EdgeIdx && 2529 UserTreeIndices.front().UserTE == UserEI.UserTE; 2530 } 2531 2532 /// \returns true if current entry has same operands as \p TE. 2533 bool hasEqualOperands(const TreeEntry &TE) const { 2534 if (TE.getNumOperands() != getNumOperands()) 2535 return false; 2536 SmallBitVector Used(getNumOperands()); 2537 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) { 2538 unsigned PrevCount = Used.count(); 2539 for (unsigned K = 0; K < E; ++K) { 2540 if (Used.test(K)) 2541 continue; 2542 if (getOperand(K) == TE.getOperand(I)) { 2543 Used.set(K); 2544 break; 2545 } 2546 } 2547 // Check if we actually found the matching operand. 2548 if (PrevCount == Used.count()) 2549 return false; 2550 } 2551 return true; 2552 } 2553 2554 /// \return Final vectorization factor for the node. Defined by the total 2555 /// number of vectorized scalars, including those, used several times in the 2556 /// entry and counted in the \a ReuseShuffleIndices, if any. 2557 unsigned getVectorFactor() const { 2558 if (!ReuseShuffleIndices.empty()) 2559 return ReuseShuffleIndices.size(); 2560 return Scalars.size(); 2561 }; 2562 2563 /// A vector of scalars. 2564 ValueList Scalars; 2565 2566 /// The Scalars are vectorized into this value. It is initialized to Null. 2567 WeakTrackingVH VectorizedValue = nullptr; 2568 2569 /// New vector phi instructions emitted for the vectorized phi nodes. 2570 PHINode *PHI = nullptr; 2571 2572 /// Do we need to gather this sequence or vectorize it 2573 /// (either with vector instruction or with scatter/gather 2574 /// intrinsics for store/load)? 2575 enum EntryState { 2576 Vectorize, 2577 ScatterVectorize, 2578 PossibleStridedVectorize, 2579 NeedToGather 2580 }; 2581 EntryState State; 2582 2583 /// Does this sequence require some shuffling? 2584 SmallVector<int, 4> ReuseShuffleIndices; 2585 2586 /// Does this entry require reordering? 2587 SmallVector<unsigned, 4> ReorderIndices; 2588 2589 /// Points back to the VectorizableTree. 2590 /// 2591 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has 2592 /// to be a pointer and needs to be able to initialize the child iterator. 2593 /// Thus we need a reference back to the container to translate the indices 2594 /// to entries. 2595 VecTreeTy &Container; 2596 2597 /// The TreeEntry index containing the user of this entry. We can actually 2598 /// have multiple users so the data structure is not truly a tree. 2599 SmallVector<EdgeInfo, 1> UserTreeIndices; 2600 2601 /// The index of this treeEntry in VectorizableTree. 2602 int Idx = -1; 2603 2604 private: 2605 /// The operands of each instruction in each lane Operands[op_index][lane]. 2606 /// Note: This helps avoid the replication of the code that performs the 2607 /// reordering of operands during buildTree_rec() and vectorizeTree(). 2608 SmallVector<ValueList, 2> Operands; 2609 2610 /// The main/alternate instruction. 2611 Instruction *MainOp = nullptr; 2612 Instruction *AltOp = nullptr; 2613 2614 public: 2615 /// Set this bundle's \p OpIdx'th operand to \p OpVL. 2616 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) { 2617 if (Operands.size() < OpIdx + 1) 2618 Operands.resize(OpIdx + 1); 2619 assert(Operands[OpIdx].empty() && "Already resized?"); 2620 assert(OpVL.size() <= Scalars.size() && 2621 "Number of operands is greater than the number of scalars."); 2622 Operands[OpIdx].resize(OpVL.size()); 2623 copy(OpVL, Operands[OpIdx].begin()); 2624 } 2625 2626 /// Set the operands of this bundle in their original order. 2627 void setOperandsInOrder() { 2628 assert(Operands.empty() && "Already initialized?"); 2629 auto *I0 = cast<Instruction>(Scalars[0]); 2630 Operands.resize(I0->getNumOperands()); 2631 unsigned NumLanes = Scalars.size(); 2632 for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands(); 2633 OpIdx != NumOperands; ++OpIdx) { 2634 Operands[OpIdx].resize(NumLanes); 2635 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 2636 auto *I = cast<Instruction>(Scalars[Lane]); 2637 assert(I->getNumOperands() == NumOperands && 2638 "Expected same number of operands"); 2639 Operands[OpIdx][Lane] = I->getOperand(OpIdx); 2640 } 2641 } 2642 } 2643 2644 /// Reorders operands of the node to the given mask \p Mask. 2645 void reorderOperands(ArrayRef<int> Mask) { 2646 for (ValueList &Operand : Operands) 2647 reorderScalars(Operand, Mask); 2648 } 2649 2650 /// \returns the \p OpIdx operand of this TreeEntry. 2651 ValueList &getOperand(unsigned OpIdx) { 2652 assert(OpIdx < Operands.size() && "Off bounds"); 2653 return Operands[OpIdx]; 2654 } 2655 2656 /// \returns the \p OpIdx operand of this TreeEntry. 2657 ArrayRef<Value *> getOperand(unsigned OpIdx) const { 2658 assert(OpIdx < Operands.size() && "Off bounds"); 2659 return Operands[OpIdx]; 2660 } 2661 2662 /// \returns the number of operands. 2663 unsigned getNumOperands() const { return Operands.size(); } 2664 2665 /// \return the single \p OpIdx operand. 2666 Value *getSingleOperand(unsigned OpIdx) const { 2667 assert(OpIdx < Operands.size() && "Off bounds"); 2668 assert(!Operands[OpIdx].empty() && "No operand available"); 2669 return Operands[OpIdx][0]; 2670 } 2671 2672 /// Some of the instructions in the list have alternate opcodes. 2673 bool isAltShuffle() const { return MainOp != AltOp; } 2674 2675 bool isOpcodeOrAlt(Instruction *I) const { 2676 unsigned CheckedOpcode = I->getOpcode(); 2677 return (getOpcode() == CheckedOpcode || 2678 getAltOpcode() == CheckedOpcode); 2679 } 2680 2681 /// Chooses the correct key for scheduling data. If \p Op has the same (or 2682 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is 2683 /// \p OpValue. 2684 Value *isOneOf(Value *Op) const { 2685 auto *I = dyn_cast<Instruction>(Op); 2686 if (I && isOpcodeOrAlt(I)) 2687 return Op; 2688 return MainOp; 2689 } 2690 2691 void setOperations(const InstructionsState &S) { 2692 MainOp = S.MainOp; 2693 AltOp = S.AltOp; 2694 } 2695 2696 Instruction *getMainOp() const { 2697 return MainOp; 2698 } 2699 2700 Instruction *getAltOp() const { 2701 return AltOp; 2702 } 2703 2704 /// The main/alternate opcodes for the list of instructions. 2705 unsigned getOpcode() const { 2706 return MainOp ? MainOp->getOpcode() : 0; 2707 } 2708 2709 unsigned getAltOpcode() const { 2710 return AltOp ? AltOp->getOpcode() : 0; 2711 } 2712 2713 /// When ReuseReorderShuffleIndices is empty it just returns position of \p 2714 /// V within vector of Scalars. Otherwise, try to remap on its reuse index. 2715 int findLaneForValue(Value *V) const { 2716 unsigned FoundLane = std::distance(Scalars.begin(), find(Scalars, V)); 2717 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 2718 if (!ReorderIndices.empty()) 2719 FoundLane = ReorderIndices[FoundLane]; 2720 assert(FoundLane < Scalars.size() && "Couldn't find extract lane"); 2721 if (!ReuseShuffleIndices.empty()) { 2722 FoundLane = std::distance(ReuseShuffleIndices.begin(), 2723 find(ReuseShuffleIndices, FoundLane)); 2724 } 2725 return FoundLane; 2726 } 2727 2728 /// Build a shuffle mask for graph entry which represents a merge of main 2729 /// and alternate operations. 2730 void 2731 buildAltOpShuffleMask(const function_ref<bool(Instruction *)> IsAltOp, 2732 SmallVectorImpl<int> &Mask, 2733 SmallVectorImpl<Value *> *OpScalars = nullptr, 2734 SmallVectorImpl<Value *> *AltScalars = nullptr) const; 2735 2736 #ifndef NDEBUG 2737 /// Debug printer. 2738 LLVM_DUMP_METHOD void dump() const { 2739 dbgs() << Idx << ".\n"; 2740 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) { 2741 dbgs() << "Operand " << OpI << ":\n"; 2742 for (const Value *V : Operands[OpI]) 2743 dbgs().indent(2) << *V << "\n"; 2744 } 2745 dbgs() << "Scalars: \n"; 2746 for (Value *V : Scalars) 2747 dbgs().indent(2) << *V << "\n"; 2748 dbgs() << "State: "; 2749 switch (State) { 2750 case Vectorize: 2751 dbgs() << "Vectorize\n"; 2752 break; 2753 case ScatterVectorize: 2754 dbgs() << "ScatterVectorize\n"; 2755 break; 2756 case PossibleStridedVectorize: 2757 dbgs() << "PossibleStridedVectorize\n"; 2758 break; 2759 case NeedToGather: 2760 dbgs() << "NeedToGather\n"; 2761 break; 2762 } 2763 dbgs() << "MainOp: "; 2764 if (MainOp) 2765 dbgs() << *MainOp << "\n"; 2766 else 2767 dbgs() << "NULL\n"; 2768 dbgs() << "AltOp: "; 2769 if (AltOp) 2770 dbgs() << *AltOp << "\n"; 2771 else 2772 dbgs() << "NULL\n"; 2773 dbgs() << "VectorizedValue: "; 2774 if (VectorizedValue) 2775 dbgs() << *VectorizedValue << "\n"; 2776 else 2777 dbgs() << "NULL\n"; 2778 dbgs() << "ReuseShuffleIndices: "; 2779 if (ReuseShuffleIndices.empty()) 2780 dbgs() << "Empty"; 2781 else 2782 for (int ReuseIdx : ReuseShuffleIndices) 2783 dbgs() << ReuseIdx << ", "; 2784 dbgs() << "\n"; 2785 dbgs() << "ReorderIndices: "; 2786 for (unsigned ReorderIdx : ReorderIndices) 2787 dbgs() << ReorderIdx << ", "; 2788 dbgs() << "\n"; 2789 dbgs() << "UserTreeIndices: "; 2790 for (const auto &EInfo : UserTreeIndices) 2791 dbgs() << EInfo << ", "; 2792 dbgs() << "\n"; 2793 } 2794 #endif 2795 }; 2796 2797 #ifndef NDEBUG 2798 void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost, 2799 InstructionCost VecCost, InstructionCost ScalarCost, 2800 StringRef Banner) const { 2801 dbgs() << "SLP: " << Banner << ":\n"; 2802 E->dump(); 2803 dbgs() << "SLP: Costs:\n"; 2804 dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n"; 2805 dbgs() << "SLP: VectorCost = " << VecCost << "\n"; 2806 dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n"; 2807 dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = " 2808 << ReuseShuffleCost + VecCost - ScalarCost << "\n"; 2809 } 2810 #endif 2811 2812 /// Create a new VectorizableTree entry. 2813 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, 2814 std::optional<ScheduleData *> Bundle, 2815 const InstructionsState &S, 2816 const EdgeInfo &UserTreeIdx, 2817 ArrayRef<int> ReuseShuffleIndices = std::nullopt, 2818 ArrayRef<unsigned> ReorderIndices = std::nullopt) { 2819 TreeEntry::EntryState EntryState = 2820 Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather; 2821 return newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx, 2822 ReuseShuffleIndices, ReorderIndices); 2823 } 2824 2825 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, 2826 TreeEntry::EntryState EntryState, 2827 std::optional<ScheduleData *> Bundle, 2828 const InstructionsState &S, 2829 const EdgeInfo &UserTreeIdx, 2830 ArrayRef<int> ReuseShuffleIndices = std::nullopt, 2831 ArrayRef<unsigned> ReorderIndices = std::nullopt) { 2832 assert(((!Bundle && EntryState == TreeEntry::NeedToGather) || 2833 (Bundle && EntryState != TreeEntry::NeedToGather)) && 2834 "Need to vectorize gather entry?"); 2835 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree)); 2836 TreeEntry *Last = VectorizableTree.back().get(); 2837 Last->Idx = VectorizableTree.size() - 1; 2838 Last->State = EntryState; 2839 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(), 2840 ReuseShuffleIndices.end()); 2841 if (ReorderIndices.empty()) { 2842 Last->Scalars.assign(VL.begin(), VL.end()); 2843 Last->setOperations(S); 2844 } else { 2845 // Reorder scalars and build final mask. 2846 Last->Scalars.assign(VL.size(), nullptr); 2847 transform(ReorderIndices, Last->Scalars.begin(), 2848 [VL](unsigned Idx) -> Value * { 2849 if (Idx >= VL.size()) 2850 return UndefValue::get(VL.front()->getType()); 2851 return VL[Idx]; 2852 }); 2853 InstructionsState S = getSameOpcode(Last->Scalars, *TLI); 2854 Last->setOperations(S); 2855 Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end()); 2856 } 2857 if (Last->State != TreeEntry::NeedToGather) { 2858 for (Value *V : VL) { 2859 const TreeEntry *TE = getTreeEntry(V); 2860 assert((!TE || TE == Last || doesNotNeedToBeScheduled(V)) && 2861 "Scalar already in tree!"); 2862 if (TE) { 2863 if (TE != Last) 2864 MultiNodeScalars.try_emplace(V).first->getSecond().push_back(Last); 2865 continue; 2866 } 2867 ScalarToTreeEntry[V] = Last; 2868 } 2869 // Update the scheduler bundle to point to this TreeEntry. 2870 ScheduleData *BundleMember = *Bundle; 2871 assert((BundleMember || isa<PHINode>(S.MainOp) || 2872 isVectorLikeInstWithConstOps(S.MainOp) || 2873 doesNotNeedToSchedule(VL)) && 2874 "Bundle and VL out of sync"); 2875 if (BundleMember) { 2876 for (Value *V : VL) { 2877 if (doesNotNeedToBeScheduled(V)) 2878 continue; 2879 if (!BundleMember) 2880 continue; 2881 BundleMember->TE = Last; 2882 BundleMember = BundleMember->NextInBundle; 2883 } 2884 } 2885 assert(!BundleMember && "Bundle and VL out of sync"); 2886 } else { 2887 MustGather.insert(VL.begin(), VL.end()); 2888 // Build a map for gathered scalars to the nodes where they are used. 2889 for (Value *V : VL) 2890 if (!isConstant(V)) 2891 ValueToGatherNodes.try_emplace(V).first->getSecond().insert(Last); 2892 } 2893 2894 if (UserTreeIdx.UserTE) 2895 Last->UserTreeIndices.push_back(UserTreeIdx); 2896 2897 return Last; 2898 } 2899 2900 /// -- Vectorization State -- 2901 /// Holds all of the tree entries. 2902 TreeEntry::VecTreeTy VectorizableTree; 2903 2904 #ifndef NDEBUG 2905 /// Debug printer. 2906 LLVM_DUMP_METHOD void dumpVectorizableTree() const { 2907 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) { 2908 VectorizableTree[Id]->dump(); 2909 dbgs() << "\n"; 2910 } 2911 } 2912 #endif 2913 2914 TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); } 2915 2916 const TreeEntry *getTreeEntry(Value *V) const { 2917 return ScalarToTreeEntry.lookup(V); 2918 } 2919 2920 /// Checks if the specified list of the instructions/values can be vectorized 2921 /// and fills required data before actual scheduling of the instructions. 2922 TreeEntry::EntryState getScalarsVectorizationState( 2923 InstructionsState &S, ArrayRef<Value *> VL, bool IsScatterVectorizeUserTE, 2924 OrdersType &CurrentOrder, SmallVectorImpl<Value *> &PointerOps) const; 2925 2926 /// Maps a specific scalar to its tree entry. 2927 SmallDenseMap<Value *, TreeEntry *> ScalarToTreeEntry; 2928 2929 /// List of scalars, used in several vectorize nodes, and the list of the 2930 /// nodes. 2931 SmallDenseMap<Value *, SmallVector<TreeEntry *>> MultiNodeScalars; 2932 2933 /// Maps a value to the proposed vectorizable size. 2934 SmallDenseMap<Value *, unsigned> InstrElementSize; 2935 2936 /// A list of scalars that we found that we need to keep as scalars. 2937 ValueSet MustGather; 2938 2939 /// A map between the vectorized entries and the last instructions in the 2940 /// bundles. The bundles are built in use order, not in the def order of the 2941 /// instructions. So, we cannot rely directly on the last instruction in the 2942 /// bundle being the last instruction in the program order during 2943 /// vectorization process since the basic blocks are affected, need to 2944 /// pre-gather them before. 2945 DenseMap<const TreeEntry *, Instruction *> EntryToLastInstruction; 2946 2947 /// List of gather nodes, depending on other gather/vector nodes, which should 2948 /// be emitted after the vector instruction emission process to correctly 2949 /// handle order of the vector instructions and shuffles. 2950 SetVector<const TreeEntry *> PostponedGathers; 2951 2952 using ValueToGatherNodesMap = 2953 DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>>; 2954 ValueToGatherNodesMap ValueToGatherNodes; 2955 2956 /// This POD struct describes one external user in the vectorized tree. 2957 struct ExternalUser { 2958 ExternalUser(Value *S, llvm::User *U, int L) 2959 : Scalar(S), User(U), Lane(L) {} 2960 2961 // Which scalar in our function. 2962 Value *Scalar; 2963 2964 // Which user that uses the scalar. 2965 llvm::User *User; 2966 2967 // Which lane does the scalar belong to. 2968 int Lane; 2969 }; 2970 using UserList = SmallVector<ExternalUser, 16>; 2971 2972 /// Checks if two instructions may access the same memory. 2973 /// 2974 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it 2975 /// is invariant in the calling loop. 2976 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1, 2977 Instruction *Inst2) { 2978 if (!Loc1.Ptr || !isSimple(Inst1) || !isSimple(Inst2)) 2979 return true; 2980 // First check if the result is already in the cache. 2981 AliasCacheKey Key = std::make_pair(Inst1, Inst2); 2982 auto It = AliasCache.find(Key); 2983 if (It != AliasCache.end()) 2984 return It->second; 2985 bool Aliased = isModOrRefSet(BatchAA.getModRefInfo(Inst2, Loc1)); 2986 // Store the result in the cache. 2987 AliasCache.try_emplace(Key, Aliased); 2988 AliasCache.try_emplace(std::make_pair(Inst2, Inst1), Aliased); 2989 return Aliased; 2990 } 2991 2992 using AliasCacheKey = std::pair<Instruction *, Instruction *>; 2993 2994 /// Cache for alias results. 2995 /// TODO: consider moving this to the AliasAnalysis itself. 2996 DenseMap<AliasCacheKey, bool> AliasCache; 2997 2998 // Cache for pointerMayBeCaptured calls inside AA. This is preserved 2999 // globally through SLP because we don't perform any action which 3000 // invalidates capture results. 3001 BatchAAResults BatchAA; 3002 3003 /// Temporary store for deleted instructions. Instructions will be deleted 3004 /// eventually when the BoUpSLP is destructed. The deferral is required to 3005 /// ensure that there are no incorrect collisions in the AliasCache, which 3006 /// can happen if a new instruction is allocated at the same address as a 3007 /// previously deleted instruction. 3008 DenseSet<Instruction *> DeletedInstructions; 3009 3010 /// Set of the instruction, being analyzed already for reductions. 3011 SmallPtrSet<Instruction *, 16> AnalyzedReductionsRoots; 3012 3013 /// Set of hashes for the list of reduction values already being analyzed. 3014 DenseSet<size_t> AnalyzedReductionVals; 3015 3016 /// A list of values that need to extracted out of the tree. 3017 /// This list holds pairs of (Internal Scalar : External User). External User 3018 /// can be nullptr, it means that this Internal Scalar will be used later, 3019 /// after vectorization. 3020 UserList ExternalUses; 3021 3022 /// Values used only by @llvm.assume calls. 3023 SmallPtrSet<const Value *, 32> EphValues; 3024 3025 /// Holds all of the instructions that we gathered, shuffle instructions and 3026 /// extractelements. 3027 SetVector<Instruction *> GatherShuffleExtractSeq; 3028 3029 /// A list of blocks that we are going to CSE. 3030 DenseSet<BasicBlock *> CSEBlocks; 3031 3032 /// Contains all scheduling relevant data for an instruction. 3033 /// A ScheduleData either represents a single instruction or a member of an 3034 /// instruction bundle (= a group of instructions which is combined into a 3035 /// vector instruction). 3036 struct ScheduleData { 3037 // The initial value for the dependency counters. It means that the 3038 // dependencies are not calculated yet. 3039 enum { InvalidDeps = -1 }; 3040 3041 ScheduleData() = default; 3042 3043 void init(int BlockSchedulingRegionID, Value *OpVal) { 3044 FirstInBundle = this; 3045 NextInBundle = nullptr; 3046 NextLoadStore = nullptr; 3047 IsScheduled = false; 3048 SchedulingRegionID = BlockSchedulingRegionID; 3049 clearDependencies(); 3050 OpValue = OpVal; 3051 TE = nullptr; 3052 } 3053 3054 /// Verify basic self consistency properties 3055 void verify() { 3056 if (hasValidDependencies()) { 3057 assert(UnscheduledDeps <= Dependencies && "invariant"); 3058 } else { 3059 assert(UnscheduledDeps == Dependencies && "invariant"); 3060 } 3061 3062 if (IsScheduled) { 3063 assert(isSchedulingEntity() && 3064 "unexpected scheduled state"); 3065 for (const ScheduleData *BundleMember = this; BundleMember; 3066 BundleMember = BundleMember->NextInBundle) { 3067 assert(BundleMember->hasValidDependencies() && 3068 BundleMember->UnscheduledDeps == 0 && 3069 "unexpected scheduled state"); 3070 assert((BundleMember == this || !BundleMember->IsScheduled) && 3071 "only bundle is marked scheduled"); 3072 } 3073 } 3074 3075 assert(Inst->getParent() == FirstInBundle->Inst->getParent() && 3076 "all bundle members must be in same basic block"); 3077 } 3078 3079 /// Returns true if the dependency information has been calculated. 3080 /// Note that depenendency validity can vary between instructions within 3081 /// a single bundle. 3082 bool hasValidDependencies() const { return Dependencies != InvalidDeps; } 3083 3084 /// Returns true for single instructions and for bundle representatives 3085 /// (= the head of a bundle). 3086 bool isSchedulingEntity() const { return FirstInBundle == this; } 3087 3088 /// Returns true if it represents an instruction bundle and not only a 3089 /// single instruction. 3090 bool isPartOfBundle() const { 3091 return NextInBundle != nullptr || FirstInBundle != this || TE; 3092 } 3093 3094 /// Returns true if it is ready for scheduling, i.e. it has no more 3095 /// unscheduled depending instructions/bundles. 3096 bool isReady() const { 3097 assert(isSchedulingEntity() && 3098 "can't consider non-scheduling entity for ready list"); 3099 return unscheduledDepsInBundle() == 0 && !IsScheduled; 3100 } 3101 3102 /// Modifies the number of unscheduled dependencies for this instruction, 3103 /// and returns the number of remaining dependencies for the containing 3104 /// bundle. 3105 int incrementUnscheduledDeps(int Incr) { 3106 assert(hasValidDependencies() && 3107 "increment of unscheduled deps would be meaningless"); 3108 UnscheduledDeps += Incr; 3109 return FirstInBundle->unscheduledDepsInBundle(); 3110 } 3111 3112 /// Sets the number of unscheduled dependencies to the number of 3113 /// dependencies. 3114 void resetUnscheduledDeps() { 3115 UnscheduledDeps = Dependencies; 3116 } 3117 3118 /// Clears all dependency information. 3119 void clearDependencies() { 3120 Dependencies = InvalidDeps; 3121 resetUnscheduledDeps(); 3122 MemoryDependencies.clear(); 3123 ControlDependencies.clear(); 3124 } 3125 3126 int unscheduledDepsInBundle() const { 3127 assert(isSchedulingEntity() && "only meaningful on the bundle"); 3128 int Sum = 0; 3129 for (const ScheduleData *BundleMember = this; BundleMember; 3130 BundleMember = BundleMember->NextInBundle) { 3131 if (BundleMember->UnscheduledDeps == InvalidDeps) 3132 return InvalidDeps; 3133 Sum += BundleMember->UnscheduledDeps; 3134 } 3135 return Sum; 3136 } 3137 3138 void dump(raw_ostream &os) const { 3139 if (!isSchedulingEntity()) { 3140 os << "/ " << *Inst; 3141 } else if (NextInBundle) { 3142 os << '[' << *Inst; 3143 ScheduleData *SD = NextInBundle; 3144 while (SD) { 3145 os << ';' << *SD->Inst; 3146 SD = SD->NextInBundle; 3147 } 3148 os << ']'; 3149 } else { 3150 os << *Inst; 3151 } 3152 } 3153 3154 Instruction *Inst = nullptr; 3155 3156 /// Opcode of the current instruction in the schedule data. 3157 Value *OpValue = nullptr; 3158 3159 /// The TreeEntry that this instruction corresponds to. 3160 TreeEntry *TE = nullptr; 3161 3162 /// Points to the head in an instruction bundle (and always to this for 3163 /// single instructions). 3164 ScheduleData *FirstInBundle = nullptr; 3165 3166 /// Single linked list of all instructions in a bundle. Null if it is a 3167 /// single instruction. 3168 ScheduleData *NextInBundle = nullptr; 3169 3170 /// Single linked list of all memory instructions (e.g. load, store, call) 3171 /// in the block - until the end of the scheduling region. 3172 ScheduleData *NextLoadStore = nullptr; 3173 3174 /// The dependent memory instructions. 3175 /// This list is derived on demand in calculateDependencies(). 3176 SmallVector<ScheduleData *, 4> MemoryDependencies; 3177 3178 /// List of instructions which this instruction could be control dependent 3179 /// on. Allowing such nodes to be scheduled below this one could introduce 3180 /// a runtime fault which didn't exist in the original program. 3181 /// ex: this is a load or udiv following a readonly call which inf loops 3182 SmallVector<ScheduleData *, 4> ControlDependencies; 3183 3184 /// This ScheduleData is in the current scheduling region if this matches 3185 /// the current SchedulingRegionID of BlockScheduling. 3186 int SchedulingRegionID = 0; 3187 3188 /// Used for getting a "good" final ordering of instructions. 3189 int SchedulingPriority = 0; 3190 3191 /// The number of dependencies. Constitutes of the number of users of the 3192 /// instruction plus the number of dependent memory instructions (if any). 3193 /// This value is calculated on demand. 3194 /// If InvalidDeps, the number of dependencies is not calculated yet. 3195 int Dependencies = InvalidDeps; 3196 3197 /// The number of dependencies minus the number of dependencies of scheduled 3198 /// instructions. As soon as this is zero, the instruction/bundle gets ready 3199 /// for scheduling. 3200 /// Note that this is negative as long as Dependencies is not calculated. 3201 int UnscheduledDeps = InvalidDeps; 3202 3203 /// True if this instruction is scheduled (or considered as scheduled in the 3204 /// dry-run). 3205 bool IsScheduled = false; 3206 }; 3207 3208 #ifndef NDEBUG 3209 friend inline raw_ostream &operator<<(raw_ostream &os, 3210 const BoUpSLP::ScheduleData &SD) { 3211 SD.dump(os); 3212 return os; 3213 } 3214 #endif 3215 3216 friend struct GraphTraits<BoUpSLP *>; 3217 friend struct DOTGraphTraits<BoUpSLP *>; 3218 3219 /// Contains all scheduling data for a basic block. 3220 /// It does not schedules instructions, which are not memory read/write 3221 /// instructions and their operands are either constants, or arguments, or 3222 /// phis, or instructions from others blocks, or their users are phis or from 3223 /// the other blocks. The resulting vector instructions can be placed at the 3224 /// beginning of the basic block without scheduling (if operands does not need 3225 /// to be scheduled) or at the end of the block (if users are outside of the 3226 /// block). It allows to save some compile time and memory used by the 3227 /// compiler. 3228 /// ScheduleData is assigned for each instruction in between the boundaries of 3229 /// the tree entry, even for those, which are not part of the graph. It is 3230 /// required to correctly follow the dependencies between the instructions and 3231 /// their correct scheduling. The ScheduleData is not allocated for the 3232 /// instructions, which do not require scheduling, like phis, nodes with 3233 /// extractelements/insertelements only or nodes with instructions, with 3234 /// uses/operands outside of the block. 3235 struct BlockScheduling { 3236 BlockScheduling(BasicBlock *BB) 3237 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {} 3238 3239 void clear() { 3240 ReadyInsts.clear(); 3241 ScheduleStart = nullptr; 3242 ScheduleEnd = nullptr; 3243 FirstLoadStoreInRegion = nullptr; 3244 LastLoadStoreInRegion = nullptr; 3245 RegionHasStackSave = false; 3246 3247 // Reduce the maximum schedule region size by the size of the 3248 // previous scheduling run. 3249 ScheduleRegionSizeLimit -= ScheduleRegionSize; 3250 if (ScheduleRegionSizeLimit < MinScheduleRegionSize) 3251 ScheduleRegionSizeLimit = MinScheduleRegionSize; 3252 ScheduleRegionSize = 0; 3253 3254 // Make a new scheduling region, i.e. all existing ScheduleData is not 3255 // in the new region yet. 3256 ++SchedulingRegionID; 3257 } 3258 3259 ScheduleData *getScheduleData(Instruction *I) { 3260 if (BB != I->getParent()) 3261 // Avoid lookup if can't possibly be in map. 3262 return nullptr; 3263 ScheduleData *SD = ScheduleDataMap.lookup(I); 3264 if (SD && isInSchedulingRegion(SD)) 3265 return SD; 3266 return nullptr; 3267 } 3268 3269 ScheduleData *getScheduleData(Value *V) { 3270 if (auto *I = dyn_cast<Instruction>(V)) 3271 return getScheduleData(I); 3272 return nullptr; 3273 } 3274 3275 ScheduleData *getScheduleData(Value *V, Value *Key) { 3276 if (V == Key) 3277 return getScheduleData(V); 3278 auto I = ExtraScheduleDataMap.find(V); 3279 if (I != ExtraScheduleDataMap.end()) { 3280 ScheduleData *SD = I->second.lookup(Key); 3281 if (SD && isInSchedulingRegion(SD)) 3282 return SD; 3283 } 3284 return nullptr; 3285 } 3286 3287 bool isInSchedulingRegion(ScheduleData *SD) const { 3288 return SD->SchedulingRegionID == SchedulingRegionID; 3289 } 3290 3291 /// Marks an instruction as scheduled and puts all dependent ready 3292 /// instructions into the ready-list. 3293 template <typename ReadyListType> 3294 void schedule(ScheduleData *SD, ReadyListType &ReadyList) { 3295 SD->IsScheduled = true; 3296 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n"); 3297 3298 for (ScheduleData *BundleMember = SD; BundleMember; 3299 BundleMember = BundleMember->NextInBundle) { 3300 if (BundleMember->Inst != BundleMember->OpValue) 3301 continue; 3302 3303 // Handle the def-use chain dependencies. 3304 3305 // Decrement the unscheduled counter and insert to ready list if ready. 3306 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) { 3307 doForAllOpcodes(I, [&ReadyList](ScheduleData *OpDef) { 3308 if (OpDef && OpDef->hasValidDependencies() && 3309 OpDef->incrementUnscheduledDeps(-1) == 0) { 3310 // There are no more unscheduled dependencies after 3311 // decrementing, so we can put the dependent instruction 3312 // into the ready list. 3313 ScheduleData *DepBundle = OpDef->FirstInBundle; 3314 assert(!DepBundle->IsScheduled && 3315 "already scheduled bundle gets ready"); 3316 ReadyList.insert(DepBundle); 3317 LLVM_DEBUG(dbgs() 3318 << "SLP: gets ready (def): " << *DepBundle << "\n"); 3319 } 3320 }); 3321 }; 3322 3323 // If BundleMember is a vector bundle, its operands may have been 3324 // reordered during buildTree(). We therefore need to get its operands 3325 // through the TreeEntry. 3326 if (TreeEntry *TE = BundleMember->TE) { 3327 // Need to search for the lane since the tree entry can be reordered. 3328 int Lane = std::distance(TE->Scalars.begin(), 3329 find(TE->Scalars, BundleMember->Inst)); 3330 assert(Lane >= 0 && "Lane not set"); 3331 3332 // Since vectorization tree is being built recursively this assertion 3333 // ensures that the tree entry has all operands set before reaching 3334 // this code. Couple of exceptions known at the moment are extracts 3335 // where their second (immediate) operand is not added. Since 3336 // immediates do not affect scheduler behavior this is considered 3337 // okay. 3338 auto *In = BundleMember->Inst; 3339 assert(In && 3340 (isa<ExtractValueInst, ExtractElementInst>(In) || 3341 In->getNumOperands() == TE->getNumOperands()) && 3342 "Missed TreeEntry operands?"); 3343 (void)In; // fake use to avoid build failure when assertions disabled 3344 3345 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands(); 3346 OpIdx != NumOperands; ++OpIdx) 3347 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane])) 3348 DecrUnsched(I); 3349 } else { 3350 // If BundleMember is a stand-alone instruction, no operand reordering 3351 // has taken place, so we directly access its operands. 3352 for (Use &U : BundleMember->Inst->operands()) 3353 if (auto *I = dyn_cast<Instruction>(U.get())) 3354 DecrUnsched(I); 3355 } 3356 // Handle the memory dependencies. 3357 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) { 3358 if (MemoryDepSD->hasValidDependencies() && 3359 MemoryDepSD->incrementUnscheduledDeps(-1) == 0) { 3360 // There are no more unscheduled dependencies after decrementing, 3361 // so we can put the dependent instruction into the ready list. 3362 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle; 3363 assert(!DepBundle->IsScheduled && 3364 "already scheduled bundle gets ready"); 3365 ReadyList.insert(DepBundle); 3366 LLVM_DEBUG(dbgs() 3367 << "SLP: gets ready (mem): " << *DepBundle << "\n"); 3368 } 3369 } 3370 // Handle the control dependencies. 3371 for (ScheduleData *DepSD : BundleMember->ControlDependencies) { 3372 if (DepSD->incrementUnscheduledDeps(-1) == 0) { 3373 // There are no more unscheduled dependencies after decrementing, 3374 // so we can put the dependent instruction into the ready list. 3375 ScheduleData *DepBundle = DepSD->FirstInBundle; 3376 assert(!DepBundle->IsScheduled && 3377 "already scheduled bundle gets ready"); 3378 ReadyList.insert(DepBundle); 3379 LLVM_DEBUG(dbgs() 3380 << "SLP: gets ready (ctl): " << *DepBundle << "\n"); 3381 } 3382 } 3383 } 3384 } 3385 3386 /// Verify basic self consistency properties of the data structure. 3387 void verify() { 3388 if (!ScheduleStart) 3389 return; 3390 3391 assert(ScheduleStart->getParent() == ScheduleEnd->getParent() && 3392 ScheduleStart->comesBefore(ScheduleEnd) && 3393 "Not a valid scheduling region?"); 3394 3395 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3396 auto *SD = getScheduleData(I); 3397 if (!SD) 3398 continue; 3399 assert(isInSchedulingRegion(SD) && 3400 "primary schedule data not in window?"); 3401 assert(isInSchedulingRegion(SD->FirstInBundle) && 3402 "entire bundle in window!"); 3403 (void)SD; 3404 doForAllOpcodes(I, [](ScheduleData *SD) { SD->verify(); }); 3405 } 3406 3407 for (auto *SD : ReadyInsts) { 3408 assert(SD->isSchedulingEntity() && SD->isReady() && 3409 "item in ready list not ready?"); 3410 (void)SD; 3411 } 3412 } 3413 3414 void doForAllOpcodes(Value *V, 3415 function_ref<void(ScheduleData *SD)> Action) { 3416 if (ScheduleData *SD = getScheduleData(V)) 3417 Action(SD); 3418 auto I = ExtraScheduleDataMap.find(V); 3419 if (I != ExtraScheduleDataMap.end()) 3420 for (auto &P : I->second) 3421 if (isInSchedulingRegion(P.second)) 3422 Action(P.second); 3423 } 3424 3425 /// Put all instructions into the ReadyList which are ready for scheduling. 3426 template <typename ReadyListType> 3427 void initialFillReadyList(ReadyListType &ReadyList) { 3428 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 3429 doForAllOpcodes(I, [&](ScheduleData *SD) { 3430 if (SD->isSchedulingEntity() && SD->hasValidDependencies() && 3431 SD->isReady()) { 3432 ReadyList.insert(SD); 3433 LLVM_DEBUG(dbgs() 3434 << "SLP: initially in ready list: " << *SD << "\n"); 3435 } 3436 }); 3437 } 3438 } 3439 3440 /// Build a bundle from the ScheduleData nodes corresponding to the 3441 /// scalar instruction for each lane. 3442 ScheduleData *buildBundle(ArrayRef<Value *> VL); 3443 3444 /// Checks if a bundle of instructions can be scheduled, i.e. has no 3445 /// cyclic dependencies. This is only a dry-run, no instructions are 3446 /// actually moved at this stage. 3447 /// \returns the scheduling bundle. The returned Optional value is not 3448 /// std::nullopt if \p VL is allowed to be scheduled. 3449 std::optional<ScheduleData *> 3450 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 3451 const InstructionsState &S); 3452 3453 /// Un-bundles a group of instructions. 3454 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue); 3455 3456 /// Allocates schedule data chunk. 3457 ScheduleData *allocateScheduleDataChunks(); 3458 3459 /// Extends the scheduling region so that V is inside the region. 3460 /// \returns true if the region size is within the limit. 3461 bool extendSchedulingRegion(Value *V, const InstructionsState &S); 3462 3463 /// Initialize the ScheduleData structures for new instructions in the 3464 /// scheduling region. 3465 void initScheduleData(Instruction *FromI, Instruction *ToI, 3466 ScheduleData *PrevLoadStore, 3467 ScheduleData *NextLoadStore); 3468 3469 /// Updates the dependency information of a bundle and of all instructions/ 3470 /// bundles which depend on the original bundle. 3471 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList, 3472 BoUpSLP *SLP); 3473 3474 /// Sets all instruction in the scheduling region to un-scheduled. 3475 void resetSchedule(); 3476 3477 BasicBlock *BB; 3478 3479 /// Simple memory allocation for ScheduleData. 3480 SmallVector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks; 3481 3482 /// The size of a ScheduleData array in ScheduleDataChunks. 3483 int ChunkSize; 3484 3485 /// The allocator position in the current chunk, which is the last entry 3486 /// of ScheduleDataChunks. 3487 int ChunkPos; 3488 3489 /// Attaches ScheduleData to Instruction. 3490 /// Note that the mapping survives during all vectorization iterations, i.e. 3491 /// ScheduleData structures are recycled. 3492 DenseMap<Instruction *, ScheduleData *> ScheduleDataMap; 3493 3494 /// Attaches ScheduleData to Instruction with the leading key. 3495 DenseMap<Value *, SmallDenseMap<Value *, ScheduleData *>> 3496 ExtraScheduleDataMap; 3497 3498 /// The ready-list for scheduling (only used for the dry-run). 3499 SetVector<ScheduleData *> ReadyInsts; 3500 3501 /// The first instruction of the scheduling region. 3502 Instruction *ScheduleStart = nullptr; 3503 3504 /// The first instruction _after_ the scheduling region. 3505 Instruction *ScheduleEnd = nullptr; 3506 3507 /// The first memory accessing instruction in the scheduling region 3508 /// (can be null). 3509 ScheduleData *FirstLoadStoreInRegion = nullptr; 3510 3511 /// The last memory accessing instruction in the scheduling region 3512 /// (can be null). 3513 ScheduleData *LastLoadStoreInRegion = nullptr; 3514 3515 /// Is there an llvm.stacksave or llvm.stackrestore in the scheduling 3516 /// region? Used to optimize the dependence calculation for the 3517 /// common case where there isn't. 3518 bool RegionHasStackSave = false; 3519 3520 /// The current size of the scheduling region. 3521 int ScheduleRegionSize = 0; 3522 3523 /// The maximum size allowed for the scheduling region. 3524 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget; 3525 3526 /// The ID of the scheduling region. For a new vectorization iteration this 3527 /// is incremented which "removes" all ScheduleData from the region. 3528 /// Make sure that the initial SchedulingRegionID is greater than the 3529 /// initial SchedulingRegionID in ScheduleData (which is 0). 3530 int SchedulingRegionID = 1; 3531 }; 3532 3533 /// Attaches the BlockScheduling structures to basic blocks. 3534 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules; 3535 3536 /// Performs the "real" scheduling. Done before vectorization is actually 3537 /// performed in a basic block. 3538 void scheduleBlock(BlockScheduling *BS); 3539 3540 /// List of users to ignore during scheduling and that don't need extracting. 3541 const SmallDenseSet<Value *> *UserIgnoreList = nullptr; 3542 3543 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of 3544 /// sorted SmallVectors of unsigned. 3545 struct OrdersTypeDenseMapInfo { 3546 static OrdersType getEmptyKey() { 3547 OrdersType V; 3548 V.push_back(~1U); 3549 return V; 3550 } 3551 3552 static OrdersType getTombstoneKey() { 3553 OrdersType V; 3554 V.push_back(~2U); 3555 return V; 3556 } 3557 3558 static unsigned getHashValue(const OrdersType &V) { 3559 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end())); 3560 } 3561 3562 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) { 3563 return LHS == RHS; 3564 } 3565 }; 3566 3567 // Analysis and block reference. 3568 Function *F; 3569 ScalarEvolution *SE; 3570 TargetTransformInfo *TTI; 3571 TargetLibraryInfo *TLI; 3572 LoopInfo *LI; 3573 DominatorTree *DT; 3574 AssumptionCache *AC; 3575 DemandedBits *DB; 3576 const DataLayout *DL; 3577 OptimizationRemarkEmitter *ORE; 3578 3579 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt. 3580 unsigned MinVecRegSize; // Set by cl::opt (default: 128). 3581 3582 /// Instruction builder to construct the vectorized tree. 3583 IRBuilder<> Builder; 3584 3585 /// A map of scalar integer values to the smallest bit width with which they 3586 /// can legally be represented. The values map to (width, signed) pairs, 3587 /// where "width" indicates the minimum bit width and "signed" is True if the 3588 /// value must be signed-extended, rather than zero-extended, back to its 3589 /// original width. 3590 DenseMap<const TreeEntry *, std::pair<uint64_t, bool>> MinBWs; 3591 }; 3592 3593 } // end namespace slpvectorizer 3594 3595 template <> struct GraphTraits<BoUpSLP *> { 3596 using TreeEntry = BoUpSLP::TreeEntry; 3597 3598 /// NodeRef has to be a pointer per the GraphWriter. 3599 using NodeRef = TreeEntry *; 3600 3601 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy; 3602 3603 /// Add the VectorizableTree to the index iterator to be able to return 3604 /// TreeEntry pointers. 3605 struct ChildIteratorType 3606 : public iterator_adaptor_base< 3607 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> { 3608 ContainerTy &VectorizableTree; 3609 3610 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W, 3611 ContainerTy &VT) 3612 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {} 3613 3614 NodeRef operator*() { return I->UserTE; } 3615 }; 3616 3617 static NodeRef getEntryNode(BoUpSLP &R) { 3618 return R.VectorizableTree[0].get(); 3619 } 3620 3621 static ChildIteratorType child_begin(NodeRef N) { 3622 return {N->UserTreeIndices.begin(), N->Container}; 3623 } 3624 3625 static ChildIteratorType child_end(NodeRef N) { 3626 return {N->UserTreeIndices.end(), N->Container}; 3627 } 3628 3629 /// For the node iterator we just need to turn the TreeEntry iterator into a 3630 /// TreeEntry* iterator so that it dereferences to NodeRef. 3631 class nodes_iterator { 3632 using ItTy = ContainerTy::iterator; 3633 ItTy It; 3634 3635 public: 3636 nodes_iterator(const ItTy &It2) : It(It2) {} 3637 NodeRef operator*() { return It->get(); } 3638 nodes_iterator operator++() { 3639 ++It; 3640 return *this; 3641 } 3642 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; } 3643 }; 3644 3645 static nodes_iterator nodes_begin(BoUpSLP *R) { 3646 return nodes_iterator(R->VectorizableTree.begin()); 3647 } 3648 3649 static nodes_iterator nodes_end(BoUpSLP *R) { 3650 return nodes_iterator(R->VectorizableTree.end()); 3651 } 3652 3653 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); } 3654 }; 3655 3656 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits { 3657 using TreeEntry = BoUpSLP::TreeEntry; 3658 3659 DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {} 3660 3661 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) { 3662 std::string Str; 3663 raw_string_ostream OS(Str); 3664 OS << Entry->Idx << ".\n"; 3665 if (isSplat(Entry->Scalars)) 3666 OS << "<splat> "; 3667 for (auto *V : Entry->Scalars) { 3668 OS << *V; 3669 if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) { 3670 return EU.Scalar == V; 3671 })) 3672 OS << " <extract>"; 3673 OS << "\n"; 3674 } 3675 return Str; 3676 } 3677 3678 static std::string getNodeAttributes(const TreeEntry *Entry, 3679 const BoUpSLP *) { 3680 if (Entry->State == TreeEntry::NeedToGather) 3681 return "color=red"; 3682 if (Entry->State == TreeEntry::ScatterVectorize || 3683 Entry->State == TreeEntry::PossibleStridedVectorize) 3684 return "color=blue"; 3685 return ""; 3686 } 3687 }; 3688 3689 } // end namespace llvm 3690 3691 BoUpSLP::~BoUpSLP() { 3692 SmallVector<WeakTrackingVH> DeadInsts; 3693 for (auto *I : DeletedInstructions) { 3694 for (Use &U : I->operands()) { 3695 auto *Op = dyn_cast<Instruction>(U.get()); 3696 if (Op && !DeletedInstructions.count(Op) && Op->hasOneUser() && 3697 wouldInstructionBeTriviallyDead(Op, TLI)) 3698 DeadInsts.emplace_back(Op); 3699 } 3700 I->dropAllReferences(); 3701 } 3702 for (auto *I : DeletedInstructions) { 3703 assert(I->use_empty() && 3704 "trying to erase instruction with users."); 3705 I->eraseFromParent(); 3706 } 3707 3708 // Cleanup any dead scalar code feeding the vectorized instructions 3709 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI); 3710 3711 #ifdef EXPENSIVE_CHECKS 3712 // If we could guarantee that this call is not extremely slow, we could 3713 // remove the ifdef limitation (see PR47712). 3714 assert(!verifyFunction(*F, &dbgs())); 3715 #endif 3716 } 3717 3718 /// Reorders the given \p Reuses mask according to the given \p Mask. \p Reuses 3719 /// contains original mask for the scalars reused in the node. Procedure 3720 /// transform this mask in accordance with the given \p Mask. 3721 static void reorderReuses(SmallVectorImpl<int> &Reuses, ArrayRef<int> Mask) { 3722 assert(!Mask.empty() && Reuses.size() == Mask.size() && 3723 "Expected non-empty mask."); 3724 SmallVector<int> Prev(Reuses.begin(), Reuses.end()); 3725 Prev.swap(Reuses); 3726 for (unsigned I = 0, E = Prev.size(); I < E; ++I) 3727 if (Mask[I] != PoisonMaskElem) 3728 Reuses[Mask[I]] = Prev[I]; 3729 } 3730 3731 /// Reorders the given \p Order according to the given \p Mask. \p Order - is 3732 /// the original order of the scalars. Procedure transforms the provided order 3733 /// in accordance with the given \p Mask. If the resulting \p Order is just an 3734 /// identity order, \p Order is cleared. 3735 static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask) { 3736 assert(!Mask.empty() && "Expected non-empty mask."); 3737 SmallVector<int> MaskOrder; 3738 if (Order.empty()) { 3739 MaskOrder.resize(Mask.size()); 3740 std::iota(MaskOrder.begin(), MaskOrder.end(), 0); 3741 } else { 3742 inversePermutation(Order, MaskOrder); 3743 } 3744 reorderReuses(MaskOrder, Mask); 3745 if (ShuffleVectorInst::isIdentityMask(MaskOrder, MaskOrder.size())) { 3746 Order.clear(); 3747 return; 3748 } 3749 Order.assign(Mask.size(), Mask.size()); 3750 for (unsigned I = 0, E = Mask.size(); I < E; ++I) 3751 if (MaskOrder[I] != PoisonMaskElem) 3752 Order[MaskOrder[I]] = I; 3753 fixupOrderingIndices(Order); 3754 } 3755 3756 std::optional<BoUpSLP::OrdersType> 3757 BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) { 3758 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only."); 3759 unsigned NumScalars = TE.Scalars.size(); 3760 OrdersType CurrentOrder(NumScalars, NumScalars); 3761 SmallVector<int> Positions; 3762 SmallBitVector UsedPositions(NumScalars); 3763 const TreeEntry *STE = nullptr; 3764 // Try to find all gathered scalars that are gets vectorized in other 3765 // vectorize node. Here we can have only one single tree vector node to 3766 // correctly identify order of the gathered scalars. 3767 for (unsigned I = 0; I < NumScalars; ++I) { 3768 Value *V = TE.Scalars[I]; 3769 if (!isa<LoadInst, ExtractElementInst, ExtractValueInst>(V)) 3770 continue; 3771 if (const auto *LocalSTE = getTreeEntry(V)) { 3772 if (!STE) 3773 STE = LocalSTE; 3774 else if (STE != LocalSTE) 3775 // Take the order only from the single vector node. 3776 return std::nullopt; 3777 unsigned Lane = 3778 std::distance(STE->Scalars.begin(), find(STE->Scalars, V)); 3779 if (Lane >= NumScalars) 3780 return std::nullopt; 3781 if (CurrentOrder[Lane] != NumScalars) { 3782 if (Lane != I) 3783 continue; 3784 UsedPositions.reset(CurrentOrder[Lane]); 3785 } 3786 // The partial identity (where only some elements of the gather node are 3787 // in the identity order) is good. 3788 CurrentOrder[Lane] = I; 3789 UsedPositions.set(I); 3790 } 3791 } 3792 // Need to keep the order if we have a vector entry and at least 2 scalars or 3793 // the vectorized entry has just 2 scalars. 3794 if (STE && (UsedPositions.count() > 1 || STE->Scalars.size() == 2)) { 3795 auto &&IsIdentityOrder = [NumScalars](ArrayRef<unsigned> CurrentOrder) { 3796 for (unsigned I = 0; I < NumScalars; ++I) 3797 if (CurrentOrder[I] != I && CurrentOrder[I] != NumScalars) 3798 return false; 3799 return true; 3800 }; 3801 if (IsIdentityOrder(CurrentOrder)) 3802 return OrdersType(); 3803 auto *It = CurrentOrder.begin(); 3804 for (unsigned I = 0; I < NumScalars;) { 3805 if (UsedPositions.test(I)) { 3806 ++I; 3807 continue; 3808 } 3809 if (*It == NumScalars) { 3810 *It = I; 3811 ++I; 3812 } 3813 ++It; 3814 } 3815 return std::move(CurrentOrder); 3816 } 3817 return std::nullopt; 3818 } 3819 3820 namespace { 3821 /// Tracks the state we can represent the loads in the given sequence. 3822 enum class LoadsState { 3823 Gather, 3824 Vectorize, 3825 ScatterVectorize, 3826 PossibleStridedVectorize 3827 }; 3828 } // anonymous namespace 3829 3830 static bool arePointersCompatible(Value *Ptr1, Value *Ptr2, 3831 const TargetLibraryInfo &TLI, 3832 bool CompareOpcodes = true) { 3833 if (getUnderlyingObject(Ptr1) != getUnderlyingObject(Ptr2)) 3834 return false; 3835 auto *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1); 3836 if (!GEP1) 3837 return false; 3838 auto *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2); 3839 if (!GEP2) 3840 return false; 3841 return GEP1->getNumOperands() == 2 && GEP2->getNumOperands() == 2 && 3842 ((isConstant(GEP1->getOperand(1)) && 3843 isConstant(GEP2->getOperand(1))) || 3844 !CompareOpcodes || 3845 getSameOpcode({GEP1->getOperand(1), GEP2->getOperand(1)}, TLI) 3846 .getOpcode()); 3847 } 3848 3849 /// Checks if the given array of loads can be represented as a vectorized, 3850 /// scatter or just simple gather. 3851 static LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0, 3852 const TargetTransformInfo &TTI, 3853 const DataLayout &DL, ScalarEvolution &SE, 3854 LoopInfo &LI, const TargetLibraryInfo &TLI, 3855 SmallVectorImpl<unsigned> &Order, 3856 SmallVectorImpl<Value *> &PointerOps) { 3857 // Check that a vectorized load would load the same memory as a scalar 3858 // load. For example, we don't want to vectorize loads that are smaller 3859 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 3860 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 3861 // from such a struct, we read/write packed bits disagreeing with the 3862 // unvectorized version. 3863 Type *ScalarTy = VL0->getType(); 3864 3865 if (DL.getTypeSizeInBits(ScalarTy) != DL.getTypeAllocSizeInBits(ScalarTy)) 3866 return LoadsState::Gather; 3867 3868 // Make sure all loads in the bundle are simple - we can't vectorize 3869 // atomic or volatile loads. 3870 PointerOps.clear(); 3871 PointerOps.resize(VL.size()); 3872 auto *POIter = PointerOps.begin(); 3873 for (Value *V : VL) { 3874 auto *L = cast<LoadInst>(V); 3875 if (!L->isSimple()) 3876 return LoadsState::Gather; 3877 *POIter = L->getPointerOperand(); 3878 ++POIter; 3879 } 3880 3881 Order.clear(); 3882 // Check the order of pointer operands or that all pointers are the same. 3883 bool IsSorted = sortPtrAccesses(PointerOps, ScalarTy, DL, SE, Order); 3884 if (IsSorted || all_of(PointerOps, [&](Value *P) { 3885 return arePointersCompatible(P, PointerOps.front(), TLI); 3886 })) { 3887 bool IsPossibleStrided = false; 3888 if (IsSorted) { 3889 Value *Ptr0; 3890 Value *PtrN; 3891 if (Order.empty()) { 3892 Ptr0 = PointerOps.front(); 3893 PtrN = PointerOps.back(); 3894 } else { 3895 Ptr0 = PointerOps[Order.front()]; 3896 PtrN = PointerOps[Order.back()]; 3897 } 3898 std::optional<int> Diff = 3899 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, DL, SE); 3900 // Check that the sorted loads are consecutive. 3901 if (static_cast<unsigned>(*Diff) == VL.size() - 1) 3902 return LoadsState::Vectorize; 3903 // Simple check if not a strided access - clear order. 3904 IsPossibleStrided = *Diff % (VL.size() - 1) == 0; 3905 } 3906 // TODO: need to improve analysis of the pointers, if not all of them are 3907 // GEPs or have > 2 operands, we end up with a gather node, which just 3908 // increases the cost. 3909 Loop *L = LI.getLoopFor(cast<LoadInst>(VL0)->getParent()); 3910 bool ProfitableGatherPointers = 3911 static_cast<unsigned>(count_if(PointerOps, [L](Value *V) { 3912 return L && L->isLoopInvariant(V); 3913 })) <= VL.size() / 2 && VL.size() > 2; 3914 if (ProfitableGatherPointers || all_of(PointerOps, [IsSorted](Value *P) { 3915 auto *GEP = dyn_cast<GetElementPtrInst>(P); 3916 return (IsSorted && !GEP && doesNotNeedToBeScheduled(P)) || 3917 (GEP && GEP->getNumOperands() == 2); 3918 })) { 3919 Align CommonAlignment = cast<LoadInst>(VL0)->getAlign(); 3920 for (Value *V : VL) 3921 CommonAlignment = 3922 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 3923 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 3924 if (TTI.isLegalMaskedGather(VecTy, CommonAlignment) && 3925 !TTI.forceScalarizeMaskedGather(VecTy, CommonAlignment)) 3926 return IsPossibleStrided ? LoadsState::PossibleStridedVectorize 3927 : LoadsState::ScatterVectorize; 3928 } 3929 } 3930 3931 return LoadsState::Gather; 3932 } 3933 3934 static bool clusterSortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy, 3935 const DataLayout &DL, ScalarEvolution &SE, 3936 SmallVectorImpl<unsigned> &SortedIndices) { 3937 assert(llvm::all_of( 3938 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) && 3939 "Expected list of pointer operands."); 3940 // Map from bases to a vector of (Ptr, Offset, OrigIdx), which we insert each 3941 // Ptr into, sort and return the sorted indices with values next to one 3942 // another. 3943 MapVector<Value *, SmallVector<std::tuple<Value *, int, unsigned>>> Bases; 3944 Bases[VL[0]].push_back(std::make_tuple(VL[0], 0U, 0U)); 3945 3946 unsigned Cnt = 1; 3947 for (Value *Ptr : VL.drop_front()) { 3948 bool Found = any_of(Bases, [&](auto &Base) { 3949 std::optional<int> Diff = 3950 getPointersDiff(ElemTy, Base.first, ElemTy, Ptr, DL, SE, 3951 /*StrictCheck=*/true); 3952 if (!Diff) 3953 return false; 3954 3955 Base.second.emplace_back(Ptr, *Diff, Cnt++); 3956 return true; 3957 }); 3958 3959 if (!Found) { 3960 // If we haven't found enough to usefully cluster, return early. 3961 if (Bases.size() > VL.size() / 2 - 1) 3962 return false; 3963 3964 // Not found already - add a new Base 3965 Bases[Ptr].emplace_back(Ptr, 0, Cnt++); 3966 } 3967 } 3968 3969 // For each of the bases sort the pointers by Offset and check if any of the 3970 // base become consecutively allocated. 3971 bool AnyConsecutive = false; 3972 for (auto &Base : Bases) { 3973 auto &Vec = Base.second; 3974 if (Vec.size() > 1) { 3975 llvm::stable_sort(Vec, [](const std::tuple<Value *, int, unsigned> &X, 3976 const std::tuple<Value *, int, unsigned> &Y) { 3977 return std::get<1>(X) < std::get<1>(Y); 3978 }); 3979 int InitialOffset = std::get<1>(Vec[0]); 3980 AnyConsecutive |= all_of(enumerate(Vec), [InitialOffset](const auto &P) { 3981 return std::get<1>(P.value()) == int(P.index()) + InitialOffset; 3982 }); 3983 } 3984 } 3985 3986 // Fill SortedIndices array only if it looks worth-while to sort the ptrs. 3987 SortedIndices.clear(); 3988 if (!AnyConsecutive) 3989 return false; 3990 3991 for (auto &Base : Bases) { 3992 for (auto &T : Base.second) 3993 SortedIndices.push_back(std::get<2>(T)); 3994 } 3995 3996 assert(SortedIndices.size() == VL.size() && 3997 "Expected SortedIndices to be the size of VL"); 3998 return true; 3999 } 4000 4001 std::optional<BoUpSLP::OrdersType> 4002 BoUpSLP::findPartiallyOrderedLoads(const BoUpSLP::TreeEntry &TE) { 4003 assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only."); 4004 Type *ScalarTy = TE.Scalars[0]->getType(); 4005 4006 SmallVector<Value *> Ptrs; 4007 Ptrs.reserve(TE.Scalars.size()); 4008 for (Value *V : TE.Scalars) { 4009 auto *L = dyn_cast<LoadInst>(V); 4010 if (!L || !L->isSimple()) 4011 return std::nullopt; 4012 Ptrs.push_back(L->getPointerOperand()); 4013 } 4014 4015 BoUpSLP::OrdersType Order; 4016 if (clusterSortPtrAccesses(Ptrs, ScalarTy, *DL, *SE, Order)) 4017 return std::move(Order); 4018 return std::nullopt; 4019 } 4020 4021 /// Check if two insertelement instructions are from the same buildvector. 4022 static bool areTwoInsertFromSameBuildVector( 4023 InsertElementInst *VU, InsertElementInst *V, 4024 function_ref<Value *(InsertElementInst *)> GetBaseOperand) { 4025 // Instructions must be from the same basic blocks. 4026 if (VU->getParent() != V->getParent()) 4027 return false; 4028 // Checks if 2 insertelements are from the same buildvector. 4029 if (VU->getType() != V->getType()) 4030 return false; 4031 // Multiple used inserts are separate nodes. 4032 if (!VU->hasOneUse() && !V->hasOneUse()) 4033 return false; 4034 auto *IE1 = VU; 4035 auto *IE2 = V; 4036 std::optional<unsigned> Idx1 = getInsertIndex(IE1); 4037 std::optional<unsigned> Idx2 = getInsertIndex(IE2); 4038 if (Idx1 == std::nullopt || Idx2 == std::nullopt) 4039 return false; 4040 // Go through the vector operand of insertelement instructions trying to find 4041 // either VU as the original vector for IE2 or V as the original vector for 4042 // IE1. 4043 SmallBitVector ReusedIdx( 4044 cast<VectorType>(VU->getType())->getElementCount().getKnownMinValue()); 4045 bool IsReusedIdx = false; 4046 do { 4047 if (IE2 == VU && !IE1) 4048 return VU->hasOneUse(); 4049 if (IE1 == V && !IE2) 4050 return V->hasOneUse(); 4051 if (IE1 && IE1 != V) { 4052 unsigned Idx1 = getInsertIndex(IE1).value_or(*Idx2); 4053 IsReusedIdx |= ReusedIdx.test(Idx1); 4054 ReusedIdx.set(Idx1); 4055 if ((IE1 != VU && !IE1->hasOneUse()) || IsReusedIdx) 4056 IE1 = nullptr; 4057 else 4058 IE1 = dyn_cast_or_null<InsertElementInst>(GetBaseOperand(IE1)); 4059 } 4060 if (IE2 && IE2 != VU) { 4061 unsigned Idx2 = getInsertIndex(IE2).value_or(*Idx1); 4062 IsReusedIdx |= ReusedIdx.test(Idx2); 4063 ReusedIdx.set(Idx2); 4064 if ((IE2 != V && !IE2->hasOneUse()) || IsReusedIdx) 4065 IE2 = nullptr; 4066 else 4067 IE2 = dyn_cast_or_null<InsertElementInst>(GetBaseOperand(IE2)); 4068 } 4069 } while (!IsReusedIdx && (IE1 || IE2)); 4070 return false; 4071 } 4072 4073 std::optional<BoUpSLP::OrdersType> 4074 BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) { 4075 // No need to reorder if need to shuffle reuses, still need to shuffle the 4076 // node. 4077 if (!TE.ReuseShuffleIndices.empty()) { 4078 // Check if reuse shuffle indices can be improved by reordering. 4079 // For this, check that reuse mask is "clustered", i.e. each scalar values 4080 // is used once in each submask of size <number_of_scalars>. 4081 // Example: 4 scalar values. 4082 // ReuseShuffleIndices mask: 0, 1, 2, 3, 3, 2, 0, 1 - clustered. 4083 // 0, 1, 2, 3, 3, 3, 1, 0 - not clustered, because 4084 // element 3 is used twice in the second submask. 4085 unsigned Sz = TE.Scalars.size(); 4086 if (!ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices, 4087 Sz)) 4088 return std::nullopt; 4089 unsigned VF = TE.getVectorFactor(); 4090 // Try build correct order for extractelement instructions. 4091 SmallVector<int> ReusedMask(TE.ReuseShuffleIndices.begin(), 4092 TE.ReuseShuffleIndices.end()); 4093 if (TE.getOpcode() == Instruction::ExtractElement && !TE.isAltShuffle() && 4094 all_of(TE.Scalars, [Sz](Value *V) { 4095 std::optional<unsigned> Idx = getExtractIndex(cast<Instruction>(V)); 4096 return Idx && *Idx < Sz; 4097 })) { 4098 SmallVector<int> ReorderMask(Sz, PoisonMaskElem); 4099 if (TE.ReorderIndices.empty()) 4100 std::iota(ReorderMask.begin(), ReorderMask.end(), 0); 4101 else 4102 inversePermutation(TE.ReorderIndices, ReorderMask); 4103 for (unsigned I = 0; I < VF; ++I) { 4104 int &Idx = ReusedMask[I]; 4105 if (Idx == PoisonMaskElem) 4106 continue; 4107 Value *V = TE.Scalars[ReorderMask[Idx]]; 4108 std::optional<unsigned> EI = getExtractIndex(cast<Instruction>(V)); 4109 Idx = std::distance(ReorderMask.begin(), find(ReorderMask, *EI)); 4110 } 4111 } 4112 // Build the order of the VF size, need to reorder reuses shuffles, they are 4113 // always of VF size. 4114 OrdersType ResOrder(VF); 4115 std::iota(ResOrder.begin(), ResOrder.end(), 0); 4116 auto *It = ResOrder.begin(); 4117 for (unsigned K = 0; K < VF; K += Sz) { 4118 OrdersType CurrentOrder(TE.ReorderIndices); 4119 SmallVector<int> SubMask{ArrayRef(ReusedMask).slice(K, Sz)}; 4120 if (SubMask.front() == PoisonMaskElem) 4121 std::iota(SubMask.begin(), SubMask.end(), 0); 4122 reorderOrder(CurrentOrder, SubMask); 4123 transform(CurrentOrder, It, [K](unsigned Pos) { return Pos + K; }); 4124 std::advance(It, Sz); 4125 } 4126 if (all_of(enumerate(ResOrder), 4127 [](const auto &Data) { return Data.index() == Data.value(); })) 4128 return std::nullopt; // No need to reorder. 4129 return std::move(ResOrder); 4130 } 4131 if ((TE.State == TreeEntry::Vectorize || 4132 TE.State == TreeEntry::PossibleStridedVectorize) && 4133 (isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE.getMainOp()) || 4134 (TopToBottom && isa<StoreInst, InsertElementInst>(TE.getMainOp()))) && 4135 !TE.isAltShuffle()) 4136 return TE.ReorderIndices; 4137 if (TE.State == TreeEntry::Vectorize && TE.getOpcode() == Instruction::PHI) { 4138 auto PHICompare = [&](unsigned I1, unsigned I2) { 4139 Value *V1 = TE.Scalars[I1]; 4140 Value *V2 = TE.Scalars[I2]; 4141 if (V1 == V2) 4142 return false; 4143 if (!V1->hasOneUse() || !V2->hasOneUse()) 4144 return false; 4145 auto *FirstUserOfPhi1 = cast<Instruction>(*V1->user_begin()); 4146 auto *FirstUserOfPhi2 = cast<Instruction>(*V2->user_begin()); 4147 if (auto *IE1 = dyn_cast<InsertElementInst>(FirstUserOfPhi1)) 4148 if (auto *IE2 = dyn_cast<InsertElementInst>(FirstUserOfPhi2)) { 4149 if (!areTwoInsertFromSameBuildVector( 4150 IE1, IE2, 4151 [](InsertElementInst *II) { return II->getOperand(0); })) 4152 return false; 4153 std::optional<unsigned> Idx1 = getInsertIndex(IE1); 4154 std::optional<unsigned> Idx2 = getInsertIndex(IE2); 4155 if (Idx1 == std::nullopt || Idx2 == std::nullopt) 4156 return false; 4157 return *Idx1 < *Idx2; 4158 } 4159 if (auto *EE1 = dyn_cast<ExtractElementInst>(FirstUserOfPhi1)) 4160 if (auto *EE2 = dyn_cast<ExtractElementInst>(FirstUserOfPhi2)) { 4161 if (EE1->getOperand(0) != EE2->getOperand(0)) 4162 return false; 4163 std::optional<unsigned> Idx1 = getExtractIndex(EE1); 4164 std::optional<unsigned> Idx2 = getExtractIndex(EE2); 4165 if (Idx1 == std::nullopt || Idx2 == std::nullopt) 4166 return false; 4167 return *Idx1 < *Idx2; 4168 } 4169 return false; 4170 }; 4171 auto IsIdentityOrder = [](const OrdersType &Order) { 4172 for (unsigned Idx : seq<unsigned>(0, Order.size())) 4173 if (Idx != Order[Idx]) 4174 return false; 4175 return true; 4176 }; 4177 if (!TE.ReorderIndices.empty()) 4178 return TE.ReorderIndices; 4179 DenseMap<unsigned, unsigned> PhiToId; 4180 SmallVector<unsigned> Phis(TE.Scalars.size()); 4181 std::iota(Phis.begin(), Phis.end(), 0); 4182 OrdersType ResOrder(TE.Scalars.size()); 4183 for (unsigned Id = 0, Sz = TE.Scalars.size(); Id < Sz; ++Id) 4184 PhiToId[Id] = Id; 4185 stable_sort(Phis, PHICompare); 4186 for (unsigned Id = 0, Sz = Phis.size(); Id < Sz; ++Id) 4187 ResOrder[Id] = PhiToId[Phis[Id]]; 4188 if (IsIdentityOrder(ResOrder)) 4189 return std::nullopt; // No need to reorder. 4190 return std::move(ResOrder); 4191 } 4192 if (TE.State == TreeEntry::NeedToGather) { 4193 // TODO: add analysis of other gather nodes with extractelement 4194 // instructions and other values/instructions, not only undefs. 4195 if (((TE.getOpcode() == Instruction::ExtractElement && 4196 !TE.isAltShuffle()) || 4197 (all_of(TE.Scalars, 4198 [](Value *V) { 4199 return isa<UndefValue, ExtractElementInst>(V); 4200 }) && 4201 any_of(TE.Scalars, 4202 [](Value *V) { return isa<ExtractElementInst>(V); }))) && 4203 all_of(TE.Scalars, 4204 [](Value *V) { 4205 auto *EE = dyn_cast<ExtractElementInst>(V); 4206 return !EE || isa<FixedVectorType>(EE->getVectorOperandType()); 4207 }) && 4208 allSameType(TE.Scalars)) { 4209 // Check that gather of extractelements can be represented as 4210 // just a shuffle of a single vector. 4211 OrdersType CurrentOrder; 4212 bool Reuse = canReuseExtract(TE.Scalars, TE.getMainOp(), CurrentOrder, 4213 /*ResizeAllowed=*/true); 4214 if (Reuse || !CurrentOrder.empty()) { 4215 if (!CurrentOrder.empty()) 4216 fixupOrderingIndices(CurrentOrder); 4217 return std::move(CurrentOrder); 4218 } 4219 } 4220 // If the gather node is <undef, v, .., poison> and 4221 // insertelement poison, v, 0 [+ permute] 4222 // is cheaper than 4223 // insertelement poison, v, n - try to reorder. 4224 // If rotating the whole graph, exclude the permute cost, the whole graph 4225 // might be transformed. 4226 int Sz = TE.Scalars.size(); 4227 if (isSplat(TE.Scalars) && !allConstant(TE.Scalars) && 4228 count_if(TE.Scalars, UndefValue::classof) == Sz - 1) { 4229 const auto *It = 4230 find_if(TE.Scalars, [](Value *V) { return !isConstant(V); }); 4231 if (It == TE.Scalars.begin()) 4232 return OrdersType(); 4233 auto *Ty = FixedVectorType::get(TE.Scalars.front()->getType(), Sz); 4234 if (It != TE.Scalars.end()) { 4235 OrdersType Order(Sz, Sz); 4236 unsigned Idx = std::distance(TE.Scalars.begin(), It); 4237 Order[Idx] = 0; 4238 fixupOrderingIndices(Order); 4239 SmallVector<int> Mask; 4240 inversePermutation(Order, Mask); 4241 InstructionCost PermuteCost = 4242 TopToBottom 4243 ? 0 4244 : TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty, Mask); 4245 InstructionCost InsertFirstCost = TTI->getVectorInstrCost( 4246 Instruction::InsertElement, Ty, TTI::TCK_RecipThroughput, 0, 4247 PoisonValue::get(Ty), *It); 4248 InstructionCost InsertIdxCost = TTI->getVectorInstrCost( 4249 Instruction::InsertElement, Ty, TTI::TCK_RecipThroughput, Idx, 4250 PoisonValue::get(Ty), *It); 4251 if (InsertFirstCost + PermuteCost < InsertIdxCost) 4252 return std::move(Order); 4253 } 4254 } 4255 if (std::optional<OrdersType> CurrentOrder = findReusedOrderedScalars(TE)) 4256 return CurrentOrder; 4257 if (TE.Scalars.size() >= 4) 4258 if (std::optional<OrdersType> Order = findPartiallyOrderedLoads(TE)) 4259 return Order; 4260 } 4261 return std::nullopt; 4262 } 4263 4264 /// Checks if the given mask is a "clustered" mask with the same clusters of 4265 /// size \p Sz, which are not identity submasks. 4266 static bool isRepeatedNonIdentityClusteredMask(ArrayRef<int> Mask, 4267 unsigned Sz) { 4268 ArrayRef<int> FirstCluster = Mask.slice(0, Sz); 4269 if (ShuffleVectorInst::isIdentityMask(FirstCluster, Sz)) 4270 return false; 4271 for (unsigned I = Sz, E = Mask.size(); I < E; I += Sz) { 4272 ArrayRef<int> Cluster = Mask.slice(I, Sz); 4273 if (Cluster != FirstCluster) 4274 return false; 4275 } 4276 return true; 4277 } 4278 4279 void BoUpSLP::reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const { 4280 // Reorder reuses mask. 4281 reorderReuses(TE.ReuseShuffleIndices, Mask); 4282 const unsigned Sz = TE.Scalars.size(); 4283 // For vectorized and non-clustered reused no need to do anything else. 4284 if (TE.State != TreeEntry::NeedToGather || 4285 !ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices, 4286 Sz) || 4287 !isRepeatedNonIdentityClusteredMask(TE.ReuseShuffleIndices, Sz)) 4288 return; 4289 SmallVector<int> NewMask; 4290 inversePermutation(TE.ReorderIndices, NewMask); 4291 addMask(NewMask, TE.ReuseShuffleIndices); 4292 // Clear reorder since it is going to be applied to the new mask. 4293 TE.ReorderIndices.clear(); 4294 // Try to improve gathered nodes with clustered reuses, if possible. 4295 ArrayRef<int> Slice = ArrayRef(NewMask).slice(0, Sz); 4296 SmallVector<unsigned> NewOrder(Slice.begin(), Slice.end()); 4297 inversePermutation(NewOrder, NewMask); 4298 reorderScalars(TE.Scalars, NewMask); 4299 // Fill the reuses mask with the identity submasks. 4300 for (auto *It = TE.ReuseShuffleIndices.begin(), 4301 *End = TE.ReuseShuffleIndices.end(); 4302 It != End; std::advance(It, Sz)) 4303 std::iota(It, std::next(It, Sz), 0); 4304 } 4305 4306 void BoUpSLP::reorderTopToBottom() { 4307 // Maps VF to the graph nodes. 4308 DenseMap<unsigned, SetVector<TreeEntry *>> VFToOrderedEntries; 4309 // ExtractElement gather nodes which can be vectorized and need to handle 4310 // their ordering. 4311 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 4312 4313 // Phi nodes can have preferred ordering based on their result users 4314 DenseMap<const TreeEntry *, OrdersType> PhisToOrders; 4315 4316 // AltShuffles can also have a preferred ordering that leads to fewer 4317 // instructions, e.g., the addsub instruction in x86. 4318 DenseMap<const TreeEntry *, OrdersType> AltShufflesToOrders; 4319 4320 // Maps a TreeEntry to the reorder indices of external users. 4321 DenseMap<const TreeEntry *, SmallVector<OrdersType, 1>> 4322 ExternalUserReorderMap; 4323 // FIXME: Workaround for syntax error reported by MSVC buildbots. 4324 TargetTransformInfo &TTIRef = *TTI; 4325 // Find all reorderable nodes with the given VF. 4326 // Currently the are vectorized stores,loads,extracts + some gathering of 4327 // extracts. 4328 for_each(VectorizableTree, [this, &TTIRef, &VFToOrderedEntries, 4329 &GathersToOrders, &ExternalUserReorderMap, 4330 &AltShufflesToOrders, &PhisToOrders]( 4331 const std::unique_ptr<TreeEntry> &TE) { 4332 // Look for external users that will probably be vectorized. 4333 SmallVector<OrdersType, 1> ExternalUserReorderIndices = 4334 findExternalStoreUsersReorderIndices(TE.get()); 4335 if (!ExternalUserReorderIndices.empty()) { 4336 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get()); 4337 ExternalUserReorderMap.try_emplace(TE.get(), 4338 std::move(ExternalUserReorderIndices)); 4339 } 4340 4341 // Patterns like [fadd,fsub] can be combined into a single instruction in 4342 // x86. Reordering them into [fsub,fadd] blocks this pattern. So we need 4343 // to take into account their order when looking for the most used order. 4344 if (TE->isAltShuffle()) { 4345 VectorType *VecTy = 4346 FixedVectorType::get(TE->Scalars[0]->getType(), TE->Scalars.size()); 4347 unsigned Opcode0 = TE->getOpcode(); 4348 unsigned Opcode1 = TE->getAltOpcode(); 4349 // The opcode mask selects between the two opcodes. 4350 SmallBitVector OpcodeMask(TE->Scalars.size(), false); 4351 for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size())) 4352 if (cast<Instruction>(TE->Scalars[Lane])->getOpcode() == Opcode1) 4353 OpcodeMask.set(Lane); 4354 // If this pattern is supported by the target then we consider the order. 4355 if (TTIRef.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask)) { 4356 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get()); 4357 AltShufflesToOrders.try_emplace(TE.get(), OrdersType()); 4358 } 4359 // TODO: Check the reverse order too. 4360 } 4361 4362 if (std::optional<OrdersType> CurrentOrder = 4363 getReorderingData(*TE, /*TopToBottom=*/true)) { 4364 // Do not include ordering for nodes used in the alt opcode vectorization, 4365 // better to reorder them during bottom-to-top stage. If follow the order 4366 // here, it causes reordering of the whole graph though actually it is 4367 // profitable just to reorder the subgraph that starts from the alternate 4368 // opcode vectorization node. Such nodes already end-up with the shuffle 4369 // instruction and it is just enough to change this shuffle rather than 4370 // rotate the scalars for the whole graph. 4371 unsigned Cnt = 0; 4372 const TreeEntry *UserTE = TE.get(); 4373 while (UserTE && Cnt < RecursionMaxDepth) { 4374 if (UserTE->UserTreeIndices.size() != 1) 4375 break; 4376 if (all_of(UserTE->UserTreeIndices, [](const EdgeInfo &EI) { 4377 return EI.UserTE->State == TreeEntry::Vectorize && 4378 EI.UserTE->isAltShuffle() && EI.UserTE->Idx != 0; 4379 })) 4380 return; 4381 UserTE = UserTE->UserTreeIndices.back().UserTE; 4382 ++Cnt; 4383 } 4384 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get()); 4385 if (!(TE->State == TreeEntry::Vectorize || 4386 TE->State == TreeEntry::PossibleStridedVectorize) || 4387 !TE->ReuseShuffleIndices.empty()) 4388 GathersToOrders.try_emplace(TE.get(), *CurrentOrder); 4389 if (TE->State == TreeEntry::Vectorize && 4390 TE->getOpcode() == Instruction::PHI) 4391 PhisToOrders.try_emplace(TE.get(), *CurrentOrder); 4392 } 4393 }); 4394 4395 // Reorder the graph nodes according to their vectorization factor. 4396 for (unsigned VF = VectorizableTree.front()->getVectorFactor(); VF > 1; 4397 VF /= 2) { 4398 auto It = VFToOrderedEntries.find(VF); 4399 if (It == VFToOrderedEntries.end()) 4400 continue; 4401 // Try to find the most profitable order. We just are looking for the most 4402 // used order and reorder scalar elements in the nodes according to this 4403 // mostly used order. 4404 ArrayRef<TreeEntry *> OrderedEntries = It->second.getArrayRef(); 4405 // All operands are reordered and used only in this node - propagate the 4406 // most used order to the user node. 4407 MapVector<OrdersType, unsigned, 4408 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>> 4409 OrdersUses; 4410 // Last chance orders - scatter vectorize. Try to use their orders if no 4411 // other orders or the order is counted already. 4412 SmallVector<OrdersType> StridedVectorizeOrders; 4413 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 4414 for (const TreeEntry *OpTE : OrderedEntries) { 4415 // No need to reorder this nodes, still need to extend and to use shuffle, 4416 // just need to merge reordering shuffle and the reuse shuffle. 4417 if (!OpTE->ReuseShuffleIndices.empty() && !GathersToOrders.count(OpTE)) 4418 continue; 4419 // Count number of orders uses. 4420 const auto &Order = [OpTE, &GathersToOrders, &AltShufflesToOrders, 4421 &PhisToOrders]() -> const OrdersType & { 4422 if (OpTE->State == TreeEntry::NeedToGather || 4423 !OpTE->ReuseShuffleIndices.empty()) { 4424 auto It = GathersToOrders.find(OpTE); 4425 if (It != GathersToOrders.end()) 4426 return It->second; 4427 } 4428 if (OpTE->isAltShuffle()) { 4429 auto It = AltShufflesToOrders.find(OpTE); 4430 if (It != AltShufflesToOrders.end()) 4431 return It->second; 4432 } 4433 if (OpTE->State == TreeEntry::Vectorize && 4434 OpTE->getOpcode() == Instruction::PHI) { 4435 auto It = PhisToOrders.find(OpTE); 4436 if (It != PhisToOrders.end()) 4437 return It->second; 4438 } 4439 return OpTE->ReorderIndices; 4440 }(); 4441 // First consider the order of the external scalar users. 4442 auto It = ExternalUserReorderMap.find(OpTE); 4443 if (It != ExternalUserReorderMap.end()) { 4444 const auto &ExternalUserReorderIndices = It->second; 4445 // If the OpTE vector factor != number of scalars - use natural order, 4446 // it is an attempt to reorder node with reused scalars but with 4447 // external uses. 4448 if (OpTE->getVectorFactor() != OpTE->Scalars.size()) { 4449 OrdersUses.insert(std::make_pair(OrdersType(), 0)).first->second += 4450 ExternalUserReorderIndices.size(); 4451 } else { 4452 for (const OrdersType &ExtOrder : ExternalUserReorderIndices) 4453 ++OrdersUses.insert(std::make_pair(ExtOrder, 0)).first->second; 4454 } 4455 // No other useful reorder data in this entry. 4456 if (Order.empty()) 4457 continue; 4458 } 4459 // Postpone scatter orders. 4460 if (OpTE->State == TreeEntry::PossibleStridedVectorize) { 4461 StridedVectorizeOrders.push_back(Order); 4462 continue; 4463 } 4464 // Stores actually store the mask, not the order, need to invert. 4465 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 4466 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 4467 SmallVector<int> Mask; 4468 inversePermutation(Order, Mask); 4469 unsigned E = Order.size(); 4470 OrdersType CurrentOrder(E, E); 4471 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 4472 return Idx == PoisonMaskElem ? E : static_cast<unsigned>(Idx); 4473 }); 4474 fixupOrderingIndices(CurrentOrder); 4475 ++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second; 4476 } else { 4477 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second; 4478 } 4479 } 4480 // Set order of the user node. 4481 if (OrdersUses.empty()) { 4482 if (StridedVectorizeOrders.empty()) 4483 continue; 4484 // Add (potentially!) strided vectorize orders. 4485 for (OrdersType &Order : StridedVectorizeOrders) 4486 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second; 4487 } else { 4488 // Account (potentially!) strided vectorize orders only if it was used 4489 // already. 4490 for (OrdersType &Order : StridedVectorizeOrders) { 4491 auto *It = OrdersUses.find(Order); 4492 if (It != OrdersUses.end()) 4493 ++It->second; 4494 } 4495 } 4496 // Choose the most used order. 4497 ArrayRef<unsigned> BestOrder = OrdersUses.front().first; 4498 unsigned Cnt = OrdersUses.front().second; 4499 for (const auto &Pair : drop_begin(OrdersUses)) { 4500 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 4501 BestOrder = Pair.first; 4502 Cnt = Pair.second; 4503 } 4504 } 4505 // Set order of the user node. 4506 if (BestOrder.empty()) 4507 continue; 4508 SmallVector<int> Mask; 4509 inversePermutation(BestOrder, Mask); 4510 SmallVector<int> MaskOrder(BestOrder.size(), PoisonMaskElem); 4511 unsigned E = BestOrder.size(); 4512 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 4513 return I < E ? static_cast<int>(I) : PoisonMaskElem; 4514 }); 4515 // Do an actual reordering, if profitable. 4516 for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 4517 // Just do the reordering for the nodes with the given VF. 4518 if (TE->Scalars.size() != VF) { 4519 if (TE->ReuseShuffleIndices.size() == VF) { 4520 // Need to reorder the reuses masks of the operands with smaller VF to 4521 // be able to find the match between the graph nodes and scalar 4522 // operands of the given node during vectorization/cost estimation. 4523 assert(all_of(TE->UserTreeIndices, 4524 [VF, &TE](const EdgeInfo &EI) { 4525 return EI.UserTE->Scalars.size() == VF || 4526 EI.UserTE->Scalars.size() == 4527 TE->Scalars.size(); 4528 }) && 4529 "All users must be of VF size."); 4530 // Update ordering of the operands with the smaller VF than the given 4531 // one. 4532 reorderNodeWithReuses(*TE, Mask); 4533 } 4534 continue; 4535 } 4536 if ((TE->State == TreeEntry::Vectorize || 4537 TE->State == TreeEntry::PossibleStridedVectorize) && 4538 isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst, 4539 InsertElementInst>(TE->getMainOp()) && 4540 !TE->isAltShuffle()) { 4541 // Build correct orders for extract{element,value}, loads and 4542 // stores. 4543 reorderOrder(TE->ReorderIndices, Mask); 4544 if (isa<InsertElementInst, StoreInst>(TE->getMainOp())) 4545 TE->reorderOperands(Mask); 4546 } else { 4547 // Reorder the node and its operands. 4548 TE->reorderOperands(Mask); 4549 assert(TE->ReorderIndices.empty() && 4550 "Expected empty reorder sequence."); 4551 reorderScalars(TE->Scalars, Mask); 4552 } 4553 if (!TE->ReuseShuffleIndices.empty()) { 4554 // Apply reversed order to keep the original ordering of the reused 4555 // elements to avoid extra reorder indices shuffling. 4556 OrdersType CurrentOrder; 4557 reorderOrder(CurrentOrder, MaskOrder); 4558 SmallVector<int> NewReuses; 4559 inversePermutation(CurrentOrder, NewReuses); 4560 addMask(NewReuses, TE->ReuseShuffleIndices); 4561 TE->ReuseShuffleIndices.swap(NewReuses); 4562 } 4563 } 4564 } 4565 } 4566 4567 bool BoUpSLP::canReorderOperands( 4568 TreeEntry *UserTE, SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges, 4569 ArrayRef<TreeEntry *> ReorderableGathers, 4570 SmallVectorImpl<TreeEntry *> &GatherOps) { 4571 for (unsigned I = 0, E = UserTE->getNumOperands(); I < E; ++I) { 4572 if (any_of(Edges, [I](const std::pair<unsigned, TreeEntry *> &OpData) { 4573 return OpData.first == I && 4574 OpData.second->State == TreeEntry::Vectorize; 4575 })) 4576 continue; 4577 if (TreeEntry *TE = getVectorizedOperand(UserTE, I)) { 4578 // FIXME: Do not reorder (possible!) strided vectorized nodes, they 4579 // require reordering of the operands, which is not implemented yet. 4580 if (TE->State == TreeEntry::PossibleStridedVectorize) 4581 return false; 4582 // Do not reorder if operand node is used by many user nodes. 4583 if (any_of(TE->UserTreeIndices, 4584 [UserTE](const EdgeInfo &EI) { return EI.UserTE != UserTE; })) 4585 return false; 4586 // Add the node to the list of the ordered nodes with the identity 4587 // order. 4588 Edges.emplace_back(I, TE); 4589 // Add ScatterVectorize nodes to the list of operands, where just 4590 // reordering of the scalars is required. Similar to the gathers, so 4591 // simply add to the list of gathered ops. 4592 // If there are reused scalars, process this node as a regular vectorize 4593 // node, just reorder reuses mask. 4594 if (TE->State != TreeEntry::Vectorize && 4595 TE->ReuseShuffleIndices.empty() && TE->ReorderIndices.empty()) 4596 GatherOps.push_back(TE); 4597 continue; 4598 } 4599 TreeEntry *Gather = nullptr; 4600 if (count_if(ReorderableGathers, 4601 [&Gather, UserTE, I](TreeEntry *TE) { 4602 assert(TE->State != TreeEntry::Vectorize && 4603 "Only non-vectorized nodes are expected."); 4604 if (any_of(TE->UserTreeIndices, 4605 [UserTE, I](const EdgeInfo &EI) { 4606 return EI.UserTE == UserTE && EI.EdgeIdx == I; 4607 })) { 4608 assert(TE->isSame(UserTE->getOperand(I)) && 4609 "Operand entry does not match operands."); 4610 Gather = TE; 4611 return true; 4612 } 4613 return false; 4614 }) > 1 && 4615 !allConstant(UserTE->getOperand(I))) 4616 return false; 4617 if (Gather) 4618 GatherOps.push_back(Gather); 4619 } 4620 return true; 4621 } 4622 4623 void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) { 4624 SetVector<TreeEntry *> OrderedEntries; 4625 DenseMap<const TreeEntry *, OrdersType> GathersToOrders; 4626 // Find all reorderable leaf nodes with the given VF. 4627 // Currently the are vectorized loads,extracts without alternate operands + 4628 // some gathering of extracts. 4629 SmallVector<TreeEntry *> NonVectorized; 4630 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 4631 if (TE->State != TreeEntry::Vectorize && 4632 TE->State != TreeEntry::PossibleStridedVectorize) 4633 NonVectorized.push_back(TE.get()); 4634 if (std::optional<OrdersType> CurrentOrder = 4635 getReorderingData(*TE, /*TopToBottom=*/false)) { 4636 OrderedEntries.insert(TE.get()); 4637 if (!(TE->State == TreeEntry::Vectorize || 4638 TE->State == TreeEntry::PossibleStridedVectorize) || 4639 !TE->ReuseShuffleIndices.empty()) 4640 GathersToOrders.try_emplace(TE.get(), *CurrentOrder); 4641 } 4642 } 4643 4644 // 1. Propagate order to the graph nodes, which use only reordered nodes. 4645 // I.e., if the node has operands, that are reordered, try to make at least 4646 // one operand order in the natural order and reorder others + reorder the 4647 // user node itself. 4648 SmallPtrSet<const TreeEntry *, 4> Visited; 4649 while (!OrderedEntries.empty()) { 4650 // 1. Filter out only reordered nodes. 4651 // 2. If the entry has multiple uses - skip it and jump to the next node. 4652 DenseMap<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>> Users; 4653 SmallVector<TreeEntry *> Filtered; 4654 for (TreeEntry *TE : OrderedEntries) { 4655 if (!(TE->State == TreeEntry::Vectorize || 4656 TE->State == TreeEntry::PossibleStridedVectorize || 4657 (TE->State == TreeEntry::NeedToGather && 4658 GathersToOrders.count(TE))) || 4659 TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() || 4660 !all_of(drop_begin(TE->UserTreeIndices), 4661 [TE](const EdgeInfo &EI) { 4662 return EI.UserTE == TE->UserTreeIndices.front().UserTE; 4663 }) || 4664 !Visited.insert(TE).second) { 4665 Filtered.push_back(TE); 4666 continue; 4667 } 4668 // Build a map between user nodes and their operands order to speedup 4669 // search. The graph currently does not provide this dependency directly. 4670 for (EdgeInfo &EI : TE->UserTreeIndices) { 4671 TreeEntry *UserTE = EI.UserTE; 4672 auto It = Users.find(UserTE); 4673 if (It == Users.end()) 4674 It = Users.insert({UserTE, {}}).first; 4675 It->second.emplace_back(EI.EdgeIdx, TE); 4676 } 4677 } 4678 // Erase filtered entries. 4679 for (TreeEntry *TE : Filtered) 4680 OrderedEntries.remove(TE); 4681 SmallVector< 4682 std::pair<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>>> 4683 UsersVec(Users.begin(), Users.end()); 4684 sort(UsersVec, [](const auto &Data1, const auto &Data2) { 4685 return Data1.first->Idx > Data2.first->Idx; 4686 }); 4687 for (auto &Data : UsersVec) { 4688 // Check that operands are used only in the User node. 4689 SmallVector<TreeEntry *> GatherOps; 4690 if (!canReorderOperands(Data.first, Data.second, NonVectorized, 4691 GatherOps)) { 4692 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) 4693 OrderedEntries.remove(Op.second); 4694 continue; 4695 } 4696 // All operands are reordered and used only in this node - propagate the 4697 // most used order to the user node. 4698 MapVector<OrdersType, unsigned, 4699 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>> 4700 OrdersUses; 4701 // Last chance orders - scatter vectorize. Try to use their orders if no 4702 // other orders or the order is counted already. 4703 SmallVector<std::pair<OrdersType, unsigned>> StridedVectorizeOrders; 4704 // Do the analysis for each tree entry only once, otherwise the order of 4705 // the same node my be considered several times, though might be not 4706 // profitable. 4707 SmallPtrSet<const TreeEntry *, 4> VisitedOps; 4708 SmallPtrSet<const TreeEntry *, 4> VisitedUsers; 4709 for (const auto &Op : Data.second) { 4710 TreeEntry *OpTE = Op.second; 4711 if (!VisitedOps.insert(OpTE).second) 4712 continue; 4713 if (!OpTE->ReuseShuffleIndices.empty() && !GathersToOrders.count(OpTE)) 4714 continue; 4715 const auto &Order = [OpTE, &GathersToOrders]() -> const OrdersType & { 4716 if (OpTE->State == TreeEntry::NeedToGather || 4717 !OpTE->ReuseShuffleIndices.empty()) 4718 return GathersToOrders.find(OpTE)->second; 4719 return OpTE->ReorderIndices; 4720 }(); 4721 unsigned NumOps = count_if( 4722 Data.second, [OpTE](const std::pair<unsigned, TreeEntry *> &P) { 4723 return P.second == OpTE; 4724 }); 4725 // Postpone scatter orders. 4726 if (OpTE->State == TreeEntry::PossibleStridedVectorize) { 4727 StridedVectorizeOrders.emplace_back(Order, NumOps); 4728 continue; 4729 } 4730 // Stores actually store the mask, not the order, need to invert. 4731 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() && 4732 OpTE->getOpcode() == Instruction::Store && !Order.empty()) { 4733 SmallVector<int> Mask; 4734 inversePermutation(Order, Mask); 4735 unsigned E = Order.size(); 4736 OrdersType CurrentOrder(E, E); 4737 transform(Mask, CurrentOrder.begin(), [E](int Idx) { 4738 return Idx == PoisonMaskElem ? E : static_cast<unsigned>(Idx); 4739 }); 4740 fixupOrderingIndices(CurrentOrder); 4741 OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second += 4742 NumOps; 4743 } else { 4744 OrdersUses.insert(std::make_pair(Order, 0)).first->second += NumOps; 4745 } 4746 auto Res = OrdersUses.insert(std::make_pair(OrdersType(), 0)); 4747 const auto &&AllowsReordering = [IgnoreReorder, &GathersToOrders]( 4748 const TreeEntry *TE) { 4749 if (!TE->ReorderIndices.empty() || !TE->ReuseShuffleIndices.empty() || 4750 (TE->State == TreeEntry::Vectorize && TE->isAltShuffle()) || 4751 (IgnoreReorder && TE->Idx == 0)) 4752 return true; 4753 if (TE->State == TreeEntry::NeedToGather) { 4754 auto It = GathersToOrders.find(TE); 4755 if (It != GathersToOrders.end()) 4756 return !It->second.empty(); 4757 return true; 4758 } 4759 return false; 4760 }; 4761 for (const EdgeInfo &EI : OpTE->UserTreeIndices) { 4762 TreeEntry *UserTE = EI.UserTE; 4763 if (!VisitedUsers.insert(UserTE).second) 4764 continue; 4765 // May reorder user node if it requires reordering, has reused 4766 // scalars, is an alternate op vectorize node or its op nodes require 4767 // reordering. 4768 if (AllowsReordering(UserTE)) 4769 continue; 4770 // Check if users allow reordering. 4771 // Currently look up just 1 level of operands to avoid increase of 4772 // the compile time. 4773 // Profitable to reorder if definitely more operands allow 4774 // reordering rather than those with natural order. 4775 ArrayRef<std::pair<unsigned, TreeEntry *>> Ops = Users[UserTE]; 4776 if (static_cast<unsigned>(count_if( 4777 Ops, [UserTE, &AllowsReordering]( 4778 const std::pair<unsigned, TreeEntry *> &Op) { 4779 return AllowsReordering(Op.second) && 4780 all_of(Op.second->UserTreeIndices, 4781 [UserTE](const EdgeInfo &EI) { 4782 return EI.UserTE == UserTE; 4783 }); 4784 })) <= Ops.size() / 2) 4785 ++Res.first->second; 4786 } 4787 } 4788 // If no orders - skip current nodes and jump to the next one, if any. 4789 if (OrdersUses.empty()) { 4790 if (StridedVectorizeOrders.empty() || 4791 (Data.first->ReorderIndices.empty() && 4792 Data.first->ReuseShuffleIndices.empty() && 4793 !(IgnoreReorder && 4794 Data.first == VectorizableTree.front().get()))) { 4795 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) 4796 OrderedEntries.remove(Op.second); 4797 continue; 4798 } 4799 // Add (potentially!) strided vectorize orders. 4800 for (std::pair<OrdersType, unsigned> &Pair : StridedVectorizeOrders) 4801 OrdersUses.insert(std::make_pair(Pair.first, 0)).first->second += 4802 Pair.second; 4803 } else { 4804 // Account (potentially!) strided vectorize orders only if it was used 4805 // already. 4806 for (std::pair<OrdersType, unsigned> &Pair : StridedVectorizeOrders) { 4807 auto *It = OrdersUses.find(Pair.first); 4808 if (It != OrdersUses.end()) 4809 It->second += Pair.second; 4810 } 4811 } 4812 // Choose the best order. 4813 ArrayRef<unsigned> BestOrder = OrdersUses.front().first; 4814 unsigned Cnt = OrdersUses.front().second; 4815 for (const auto &Pair : drop_begin(OrdersUses)) { 4816 if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) { 4817 BestOrder = Pair.first; 4818 Cnt = Pair.second; 4819 } 4820 } 4821 // Set order of the user node (reordering of operands and user nodes). 4822 if (BestOrder.empty()) { 4823 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) 4824 OrderedEntries.remove(Op.second); 4825 continue; 4826 } 4827 // Erase operands from OrderedEntries list and adjust their orders. 4828 VisitedOps.clear(); 4829 SmallVector<int> Mask; 4830 inversePermutation(BestOrder, Mask); 4831 SmallVector<int> MaskOrder(BestOrder.size(), PoisonMaskElem); 4832 unsigned E = BestOrder.size(); 4833 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) { 4834 return I < E ? static_cast<int>(I) : PoisonMaskElem; 4835 }); 4836 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) { 4837 TreeEntry *TE = Op.second; 4838 OrderedEntries.remove(TE); 4839 if (!VisitedOps.insert(TE).second) 4840 continue; 4841 if (TE->ReuseShuffleIndices.size() == BestOrder.size()) { 4842 reorderNodeWithReuses(*TE, Mask); 4843 continue; 4844 } 4845 // Gathers are processed separately. 4846 if (TE->State != TreeEntry::Vectorize && 4847 TE->State != TreeEntry::PossibleStridedVectorize && 4848 (TE->State != TreeEntry::ScatterVectorize || 4849 TE->ReorderIndices.empty())) 4850 continue; 4851 assert((BestOrder.size() == TE->ReorderIndices.size() || 4852 TE->ReorderIndices.empty()) && 4853 "Non-matching sizes of user/operand entries."); 4854 reorderOrder(TE->ReorderIndices, Mask); 4855 if (IgnoreReorder && TE == VectorizableTree.front().get()) 4856 IgnoreReorder = false; 4857 } 4858 // For gathers just need to reorder its scalars. 4859 for (TreeEntry *Gather : GatherOps) { 4860 assert(Gather->ReorderIndices.empty() && 4861 "Unexpected reordering of gathers."); 4862 if (!Gather->ReuseShuffleIndices.empty()) { 4863 // Just reorder reuses indices. 4864 reorderReuses(Gather->ReuseShuffleIndices, Mask); 4865 continue; 4866 } 4867 reorderScalars(Gather->Scalars, Mask); 4868 OrderedEntries.remove(Gather); 4869 } 4870 // Reorder operands of the user node and set the ordering for the user 4871 // node itself. 4872 if (Data.first->State != TreeEntry::Vectorize || 4873 !isa<ExtractElementInst, ExtractValueInst, LoadInst>( 4874 Data.first->getMainOp()) || 4875 Data.first->isAltShuffle()) 4876 Data.first->reorderOperands(Mask); 4877 if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) || 4878 Data.first->isAltShuffle() || 4879 Data.first->State == TreeEntry::PossibleStridedVectorize) { 4880 reorderScalars(Data.first->Scalars, Mask); 4881 reorderOrder(Data.first->ReorderIndices, MaskOrder); 4882 if (Data.first->ReuseShuffleIndices.empty() && 4883 !Data.first->ReorderIndices.empty() && 4884 !Data.first->isAltShuffle()) { 4885 // Insert user node to the list to try to sink reordering deeper in 4886 // the graph. 4887 OrderedEntries.insert(Data.first); 4888 } 4889 } else { 4890 reorderOrder(Data.first->ReorderIndices, Mask); 4891 } 4892 } 4893 } 4894 // If the reordering is unnecessary, just remove the reorder. 4895 if (IgnoreReorder && !VectorizableTree.front()->ReorderIndices.empty() && 4896 VectorizableTree.front()->ReuseShuffleIndices.empty()) 4897 VectorizableTree.front()->ReorderIndices.clear(); 4898 } 4899 4900 void BoUpSLP::buildExternalUses( 4901 const ExtraValueToDebugLocsMap &ExternallyUsedValues) { 4902 // Collect the values that we need to extract from the tree. 4903 for (auto &TEPtr : VectorizableTree) { 4904 TreeEntry *Entry = TEPtr.get(); 4905 4906 // No need to handle users of gathered values. 4907 if (Entry->State == TreeEntry::NeedToGather) 4908 continue; 4909 4910 // For each lane: 4911 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 4912 Value *Scalar = Entry->Scalars[Lane]; 4913 if (!isa<Instruction>(Scalar)) 4914 continue; 4915 int FoundLane = Entry->findLaneForValue(Scalar); 4916 4917 // Check if the scalar is externally used as an extra arg. 4918 const auto *ExtI = ExternallyUsedValues.find(Scalar); 4919 if (ExtI != ExternallyUsedValues.end()) { 4920 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " 4921 << Lane << " from " << *Scalar << ".\n"); 4922 ExternalUses.emplace_back(Scalar, nullptr, FoundLane); 4923 } 4924 for (User *U : Scalar->users()) { 4925 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n"); 4926 4927 Instruction *UserInst = dyn_cast<Instruction>(U); 4928 if (!UserInst || isDeleted(UserInst)) 4929 continue; 4930 4931 // Ignore users in the user ignore list. 4932 if (UserIgnoreList && UserIgnoreList->contains(UserInst)) 4933 continue; 4934 4935 // Skip in-tree scalars that become vectors 4936 if (TreeEntry *UseEntry = getTreeEntry(U)) { 4937 // Some in-tree scalars will remain as scalar in vectorized 4938 // instructions. If that is the case, the one in FoundLane will 4939 // be used. 4940 if (UseEntry->State == TreeEntry::ScatterVectorize || 4941 UseEntry->State == TreeEntry::PossibleStridedVectorize || 4942 !doesInTreeUserNeedToExtract( 4943 Scalar, cast<Instruction>(UseEntry->Scalars.front()), TLI)) { 4944 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U 4945 << ".\n"); 4946 assert(UseEntry->State != TreeEntry::NeedToGather && "Bad state"); 4947 continue; 4948 } 4949 U = nullptr; 4950 } 4951 4952 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *UserInst 4953 << " from lane " << Lane << " from " << *Scalar 4954 << ".\n"); 4955 ExternalUses.emplace_back(Scalar, U, FoundLane); 4956 } 4957 } 4958 } 4959 } 4960 4961 DenseMap<Value *, SmallVector<StoreInst *>> 4962 BoUpSLP::collectUserStores(const BoUpSLP::TreeEntry *TE) const { 4963 DenseMap<Value *, SmallVector<StoreInst *>> PtrToStoresMap; 4964 for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size())) { 4965 Value *V = TE->Scalars[Lane]; 4966 // To save compilation time we don't visit if we have too many users. 4967 static constexpr unsigned UsersLimit = 4; 4968 if (V->hasNUsesOrMore(UsersLimit)) 4969 break; 4970 4971 // Collect stores per pointer object. 4972 for (User *U : V->users()) { 4973 auto *SI = dyn_cast<StoreInst>(U); 4974 if (SI == nullptr || !SI->isSimple() || 4975 !isValidElementType(SI->getValueOperand()->getType())) 4976 continue; 4977 // Skip entry if already 4978 if (getTreeEntry(U)) 4979 continue; 4980 4981 Value *Ptr = getUnderlyingObject(SI->getPointerOperand()); 4982 auto &StoresVec = PtrToStoresMap[Ptr]; 4983 // For now just keep one store per pointer object per lane. 4984 // TODO: Extend this to support multiple stores per pointer per lane 4985 if (StoresVec.size() > Lane) 4986 continue; 4987 // Skip if in different BBs. 4988 if (!StoresVec.empty() && 4989 SI->getParent() != StoresVec.back()->getParent()) 4990 continue; 4991 // Make sure that the stores are of the same type. 4992 if (!StoresVec.empty() && 4993 SI->getValueOperand()->getType() != 4994 StoresVec.back()->getValueOperand()->getType()) 4995 continue; 4996 StoresVec.push_back(SI); 4997 } 4998 } 4999 return PtrToStoresMap; 5000 } 5001 5002 bool BoUpSLP::canFormVector(ArrayRef<StoreInst *> StoresVec, 5003 OrdersType &ReorderIndices) const { 5004 // We check whether the stores in StoreVec can form a vector by sorting them 5005 // and checking whether they are consecutive. 5006 5007 // To avoid calling getPointersDiff() while sorting we create a vector of 5008 // pairs {store, offset from first} and sort this instead. 5009 SmallVector<std::pair<StoreInst *, int>> StoreOffsetVec(StoresVec.size()); 5010 StoreInst *S0 = StoresVec[0]; 5011 StoreOffsetVec[0] = {S0, 0}; 5012 Type *S0Ty = S0->getValueOperand()->getType(); 5013 Value *S0Ptr = S0->getPointerOperand(); 5014 for (unsigned Idx : seq<unsigned>(1, StoresVec.size())) { 5015 StoreInst *SI = StoresVec[Idx]; 5016 std::optional<int> Diff = 5017 getPointersDiff(S0Ty, S0Ptr, SI->getValueOperand()->getType(), 5018 SI->getPointerOperand(), *DL, *SE, 5019 /*StrictCheck=*/true); 5020 // We failed to compare the pointers so just abandon this StoresVec. 5021 if (!Diff) 5022 return false; 5023 StoreOffsetVec[Idx] = {StoresVec[Idx], *Diff}; 5024 } 5025 5026 // Sort the vector based on the pointers. We create a copy because we may 5027 // need the original later for calculating the reorder (shuffle) indices. 5028 stable_sort(StoreOffsetVec, [](const std::pair<StoreInst *, int> &Pair1, 5029 const std::pair<StoreInst *, int> &Pair2) { 5030 int Offset1 = Pair1.second; 5031 int Offset2 = Pair2.second; 5032 return Offset1 < Offset2; 5033 }); 5034 5035 // Check if the stores are consecutive by checking if their difference is 1. 5036 for (unsigned Idx : seq<unsigned>(1, StoreOffsetVec.size())) 5037 if (StoreOffsetVec[Idx].second != StoreOffsetVec[Idx - 1].second + 1) 5038 return false; 5039 5040 // Calculate the shuffle indices according to their offset against the sorted 5041 // StoreOffsetVec. 5042 ReorderIndices.reserve(StoresVec.size()); 5043 for (StoreInst *SI : StoresVec) { 5044 unsigned Idx = find_if(StoreOffsetVec, 5045 [SI](const std::pair<StoreInst *, int> &Pair) { 5046 return Pair.first == SI; 5047 }) - 5048 StoreOffsetVec.begin(); 5049 ReorderIndices.push_back(Idx); 5050 } 5051 // Identity order (e.g., {0,1,2,3}) is modeled as an empty OrdersType in 5052 // reorderTopToBottom() and reorderBottomToTop(), so we are following the 5053 // same convention here. 5054 auto IsIdentityOrder = [](const OrdersType &Order) { 5055 for (unsigned Idx : seq<unsigned>(0, Order.size())) 5056 if (Idx != Order[Idx]) 5057 return false; 5058 return true; 5059 }; 5060 if (IsIdentityOrder(ReorderIndices)) 5061 ReorderIndices.clear(); 5062 5063 return true; 5064 } 5065 5066 #ifndef NDEBUG 5067 LLVM_DUMP_METHOD static void dumpOrder(const BoUpSLP::OrdersType &Order) { 5068 for (unsigned Idx : Order) 5069 dbgs() << Idx << ", "; 5070 dbgs() << "\n"; 5071 } 5072 #endif 5073 5074 SmallVector<BoUpSLP::OrdersType, 1> 5075 BoUpSLP::findExternalStoreUsersReorderIndices(TreeEntry *TE) const { 5076 unsigned NumLanes = TE->Scalars.size(); 5077 5078 DenseMap<Value *, SmallVector<StoreInst *>> PtrToStoresMap = 5079 collectUserStores(TE); 5080 5081 // Holds the reorder indices for each candidate store vector that is a user of 5082 // the current TreeEntry. 5083 SmallVector<OrdersType, 1> ExternalReorderIndices; 5084 5085 // Now inspect the stores collected per pointer and look for vectorization 5086 // candidates. For each candidate calculate the reorder index vector and push 5087 // it into `ExternalReorderIndices` 5088 for (const auto &Pair : PtrToStoresMap) { 5089 auto &StoresVec = Pair.second; 5090 // If we have fewer than NumLanes stores, then we can't form a vector. 5091 if (StoresVec.size() != NumLanes) 5092 continue; 5093 5094 // If the stores are not consecutive then abandon this StoresVec. 5095 OrdersType ReorderIndices; 5096 if (!canFormVector(StoresVec, ReorderIndices)) 5097 continue; 5098 5099 // We now know that the scalars in StoresVec can form a vector instruction, 5100 // so set the reorder indices. 5101 ExternalReorderIndices.push_back(ReorderIndices); 5102 } 5103 return ExternalReorderIndices; 5104 } 5105 5106 void BoUpSLP::buildTree(ArrayRef<Value *> Roots, 5107 const SmallDenseSet<Value *> &UserIgnoreLst) { 5108 deleteTree(); 5109 UserIgnoreList = &UserIgnoreLst; 5110 if (!allSameType(Roots)) 5111 return; 5112 buildTree_rec(Roots, 0, EdgeInfo()); 5113 } 5114 5115 void BoUpSLP::buildTree(ArrayRef<Value *> Roots) { 5116 deleteTree(); 5117 if (!allSameType(Roots)) 5118 return; 5119 buildTree_rec(Roots, 0, EdgeInfo()); 5120 } 5121 5122 /// \return true if the specified list of values has only one instruction that 5123 /// requires scheduling, false otherwise. 5124 #ifndef NDEBUG 5125 static bool needToScheduleSingleInstruction(ArrayRef<Value *> VL) { 5126 Value *NeedsScheduling = nullptr; 5127 for (Value *V : VL) { 5128 if (doesNotNeedToBeScheduled(V)) 5129 continue; 5130 if (!NeedsScheduling) { 5131 NeedsScheduling = V; 5132 continue; 5133 } 5134 return false; 5135 } 5136 return NeedsScheduling; 5137 } 5138 #endif 5139 5140 /// Generates key/subkey pair for the given value to provide effective sorting 5141 /// of the values and better detection of the vectorizable values sequences. The 5142 /// keys/subkeys can be used for better sorting of the values themselves (keys) 5143 /// and in values subgroups (subkeys). 5144 static std::pair<size_t, size_t> generateKeySubkey( 5145 Value *V, const TargetLibraryInfo *TLI, 5146 function_ref<hash_code(size_t, LoadInst *)> LoadsSubkeyGenerator, 5147 bool AllowAlternate) { 5148 hash_code Key = hash_value(V->getValueID() + 2); 5149 hash_code SubKey = hash_value(0); 5150 // Sort the loads by the distance between the pointers. 5151 if (auto *LI = dyn_cast<LoadInst>(V)) { 5152 Key = hash_combine(LI->getType(), hash_value(Instruction::Load), Key); 5153 if (LI->isSimple()) 5154 SubKey = hash_value(LoadsSubkeyGenerator(Key, LI)); 5155 else 5156 Key = SubKey = hash_value(LI); 5157 } else if (isVectorLikeInstWithConstOps(V)) { 5158 // Sort extracts by the vector operands. 5159 if (isa<ExtractElementInst, UndefValue>(V)) 5160 Key = hash_value(Value::UndefValueVal + 1); 5161 if (auto *EI = dyn_cast<ExtractElementInst>(V)) { 5162 if (!isUndefVector(EI->getVectorOperand()).all() && 5163 !isa<UndefValue>(EI->getIndexOperand())) 5164 SubKey = hash_value(EI->getVectorOperand()); 5165 } 5166 } else if (auto *I = dyn_cast<Instruction>(V)) { 5167 // Sort other instructions just by the opcodes except for CMPInst. 5168 // For CMP also sort by the predicate kind. 5169 if ((isa<BinaryOperator, CastInst>(I)) && 5170 isValidForAlternation(I->getOpcode())) { 5171 if (AllowAlternate) 5172 Key = hash_value(isa<BinaryOperator>(I) ? 1 : 0); 5173 else 5174 Key = hash_combine(hash_value(I->getOpcode()), Key); 5175 SubKey = hash_combine( 5176 hash_value(I->getOpcode()), hash_value(I->getType()), 5177 hash_value(isa<BinaryOperator>(I) 5178 ? I->getType() 5179 : cast<CastInst>(I)->getOperand(0)->getType())); 5180 // For casts, look through the only operand to improve compile time. 5181 if (isa<CastInst>(I)) { 5182 std::pair<size_t, size_t> OpVals = 5183 generateKeySubkey(I->getOperand(0), TLI, LoadsSubkeyGenerator, 5184 /*AllowAlternate=*/true); 5185 Key = hash_combine(OpVals.first, Key); 5186 SubKey = hash_combine(OpVals.first, SubKey); 5187 } 5188 } else if (auto *CI = dyn_cast<CmpInst>(I)) { 5189 CmpInst::Predicate Pred = CI->getPredicate(); 5190 if (CI->isCommutative()) 5191 Pred = std::min(Pred, CmpInst::getInversePredicate(Pred)); 5192 CmpInst::Predicate SwapPred = CmpInst::getSwappedPredicate(Pred); 5193 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Pred), 5194 hash_value(SwapPred), 5195 hash_value(CI->getOperand(0)->getType())); 5196 } else if (auto *Call = dyn_cast<CallInst>(I)) { 5197 Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, TLI); 5198 if (isTriviallyVectorizable(ID)) { 5199 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(ID)); 5200 } else if (!VFDatabase(*Call).getMappings(*Call).empty()) { 5201 SubKey = hash_combine(hash_value(I->getOpcode()), 5202 hash_value(Call->getCalledFunction())); 5203 } else { 5204 Key = hash_combine(hash_value(Call), Key); 5205 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Call)); 5206 } 5207 for (const CallBase::BundleOpInfo &Op : Call->bundle_op_infos()) 5208 SubKey = hash_combine(hash_value(Op.Begin), hash_value(Op.End), 5209 hash_value(Op.Tag), SubKey); 5210 } else if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) { 5211 if (Gep->getNumOperands() == 2 && isa<ConstantInt>(Gep->getOperand(1))) 5212 SubKey = hash_value(Gep->getPointerOperand()); 5213 else 5214 SubKey = hash_value(Gep); 5215 } else if (BinaryOperator::isIntDivRem(I->getOpcode()) && 5216 !isa<ConstantInt>(I->getOperand(1))) { 5217 // Do not try to vectorize instructions with potentially high cost. 5218 SubKey = hash_value(I); 5219 } else { 5220 SubKey = hash_value(I->getOpcode()); 5221 } 5222 Key = hash_combine(hash_value(I->getParent()), Key); 5223 } 5224 return std::make_pair(Key, SubKey); 5225 } 5226 5227 /// Checks if the specified instruction \p I is an alternate operation for 5228 /// the given \p MainOp and \p AltOp instructions. 5229 static bool isAlternateInstruction(const Instruction *I, 5230 const Instruction *MainOp, 5231 const Instruction *AltOp, 5232 const TargetLibraryInfo &TLI); 5233 5234 BoUpSLP::TreeEntry::EntryState BoUpSLP::getScalarsVectorizationState( 5235 InstructionsState &S, ArrayRef<Value *> VL, bool IsScatterVectorizeUserTE, 5236 OrdersType &CurrentOrder, SmallVectorImpl<Value *> &PointerOps) const { 5237 assert(S.MainOp && "Expected instructions with same/alternate opcodes only."); 5238 5239 unsigned ShuffleOrOp = 5240 S.isAltShuffle() ? (unsigned)Instruction::ShuffleVector : S.getOpcode(); 5241 auto *VL0 = cast<Instruction>(S.OpValue); 5242 switch (ShuffleOrOp) { 5243 case Instruction::PHI: { 5244 // Check for terminator values (e.g. invoke). 5245 for (Value *V : VL) 5246 for (Value *Incoming : cast<PHINode>(V)->incoming_values()) { 5247 Instruction *Term = dyn_cast<Instruction>(Incoming); 5248 if (Term && Term->isTerminator()) { 5249 LLVM_DEBUG(dbgs() 5250 << "SLP: Need to swizzle PHINodes (terminator use).\n"); 5251 return TreeEntry::NeedToGather; 5252 } 5253 } 5254 5255 return TreeEntry::Vectorize; 5256 } 5257 case Instruction::ExtractValue: 5258 case Instruction::ExtractElement: { 5259 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder); 5260 if (Reuse || !CurrentOrder.empty()) 5261 return TreeEntry::Vectorize; 5262 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n"); 5263 return TreeEntry::NeedToGather; 5264 } 5265 case Instruction::InsertElement: { 5266 // Check that we have a buildvector and not a shuffle of 2 or more 5267 // different vectors. 5268 ValueSet SourceVectors; 5269 for (Value *V : VL) { 5270 SourceVectors.insert(cast<Instruction>(V)->getOperand(0)); 5271 assert(getInsertIndex(V) != std::nullopt && 5272 "Non-constant or undef index?"); 5273 } 5274 5275 if (count_if(VL, [&SourceVectors](Value *V) { 5276 return !SourceVectors.contains(V); 5277 }) >= 2) { 5278 // Found 2nd source vector - cancel. 5279 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with " 5280 "different source vectors.\n"); 5281 return TreeEntry::NeedToGather; 5282 } 5283 5284 return TreeEntry::Vectorize; 5285 } 5286 case Instruction::Load: { 5287 // Check that a vectorized load would load the same memory as a scalar 5288 // load. For example, we don't want to vectorize loads that are smaller 5289 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 5290 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 5291 // from such a struct, we read/write packed bits disagreeing with the 5292 // unvectorized version. 5293 switch (canVectorizeLoads(VL, VL0, *TTI, *DL, *SE, *LI, *TLI, CurrentOrder, 5294 PointerOps)) { 5295 case LoadsState::Vectorize: 5296 return TreeEntry::Vectorize; 5297 case LoadsState::ScatterVectorize: 5298 return TreeEntry::ScatterVectorize; 5299 case LoadsState::PossibleStridedVectorize: 5300 return TreeEntry::PossibleStridedVectorize; 5301 case LoadsState::Gather: 5302 #ifndef NDEBUG 5303 Type *ScalarTy = VL0->getType(); 5304 if (DL->getTypeSizeInBits(ScalarTy) != 5305 DL->getTypeAllocSizeInBits(ScalarTy)) 5306 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); 5307 else if (any_of(VL, 5308 [](Value *V) { return !cast<LoadInst>(V)->isSimple(); })) 5309 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); 5310 else 5311 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); 5312 #endif // NDEBUG 5313 return TreeEntry::NeedToGather; 5314 } 5315 llvm_unreachable("Unexpected state of loads"); 5316 } 5317 case Instruction::ZExt: 5318 case Instruction::SExt: 5319 case Instruction::FPToUI: 5320 case Instruction::FPToSI: 5321 case Instruction::FPExt: 5322 case Instruction::PtrToInt: 5323 case Instruction::IntToPtr: 5324 case Instruction::SIToFP: 5325 case Instruction::UIToFP: 5326 case Instruction::Trunc: 5327 case Instruction::FPTrunc: 5328 case Instruction::BitCast: { 5329 Type *SrcTy = VL0->getOperand(0)->getType(); 5330 for (Value *V : VL) { 5331 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType(); 5332 if (Ty != SrcTy || !isValidElementType(Ty)) { 5333 LLVM_DEBUG( 5334 dbgs() << "SLP: Gathering casts with different src types.\n"); 5335 return TreeEntry::NeedToGather; 5336 } 5337 } 5338 return TreeEntry::Vectorize; 5339 } 5340 case Instruction::ICmp: 5341 case Instruction::FCmp: { 5342 // Check that all of the compares have the same predicate. 5343 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 5344 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0); 5345 Type *ComparedTy = VL0->getOperand(0)->getType(); 5346 for (Value *V : VL) { 5347 CmpInst *Cmp = cast<CmpInst>(V); 5348 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) || 5349 Cmp->getOperand(0)->getType() != ComparedTy) { 5350 LLVM_DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); 5351 return TreeEntry::NeedToGather; 5352 } 5353 } 5354 return TreeEntry::Vectorize; 5355 } 5356 case Instruction::Select: 5357 case Instruction::FNeg: 5358 case Instruction::Add: 5359 case Instruction::FAdd: 5360 case Instruction::Sub: 5361 case Instruction::FSub: 5362 case Instruction::Mul: 5363 case Instruction::FMul: 5364 case Instruction::UDiv: 5365 case Instruction::SDiv: 5366 case Instruction::FDiv: 5367 case Instruction::URem: 5368 case Instruction::SRem: 5369 case Instruction::FRem: 5370 case Instruction::Shl: 5371 case Instruction::LShr: 5372 case Instruction::AShr: 5373 case Instruction::And: 5374 case Instruction::Or: 5375 case Instruction::Xor: 5376 return TreeEntry::Vectorize; 5377 case Instruction::GetElementPtr: { 5378 // We don't combine GEPs with complicated (nested) indexing. 5379 for (Value *V : VL) { 5380 auto *I = dyn_cast<GetElementPtrInst>(V); 5381 if (!I) 5382 continue; 5383 if (I->getNumOperands() != 2) { 5384 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); 5385 return TreeEntry::NeedToGather; 5386 } 5387 } 5388 5389 // We can't combine several GEPs into one vector if they operate on 5390 // different types. 5391 Type *Ty0 = cast<GEPOperator>(VL0)->getSourceElementType(); 5392 for (Value *V : VL) { 5393 auto *GEP = dyn_cast<GEPOperator>(V); 5394 if (!GEP) 5395 continue; 5396 Type *CurTy = GEP->getSourceElementType(); 5397 if (Ty0 != CurTy) { 5398 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); 5399 return TreeEntry::NeedToGather; 5400 } 5401 } 5402 5403 // We don't combine GEPs with non-constant indexes. 5404 Type *Ty1 = VL0->getOperand(1)->getType(); 5405 for (Value *V : VL) { 5406 auto *I = dyn_cast<GetElementPtrInst>(V); 5407 if (!I) 5408 continue; 5409 auto *Op = I->getOperand(1); 5410 if ((!IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) || 5411 (Op->getType() != Ty1 && 5412 ((IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) || 5413 Op->getType()->getScalarSizeInBits() > 5414 DL->getIndexSizeInBits( 5415 V->getType()->getPointerAddressSpace())))) { 5416 LLVM_DEBUG( 5417 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); 5418 return TreeEntry::NeedToGather; 5419 } 5420 } 5421 5422 return TreeEntry::Vectorize; 5423 } 5424 case Instruction::Store: { 5425 // Check if the stores are consecutive or if we need to swizzle them. 5426 llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType(); 5427 // Avoid types that are padded when being allocated as scalars, while 5428 // being packed together in a vector (such as i1). 5429 if (DL->getTypeSizeInBits(ScalarTy) != 5430 DL->getTypeAllocSizeInBits(ScalarTy)) { 5431 LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n"); 5432 return TreeEntry::NeedToGather; 5433 } 5434 // Make sure all stores in the bundle are simple - we can't vectorize 5435 // atomic or volatile stores. 5436 for (Value *V : VL) { 5437 auto *SI = cast<StoreInst>(V); 5438 if (!SI->isSimple()) { 5439 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n"); 5440 return TreeEntry::NeedToGather; 5441 } 5442 PointerOps.push_back(SI->getPointerOperand()); 5443 } 5444 5445 // Check the order of pointer operands. 5446 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) { 5447 Value *Ptr0; 5448 Value *PtrN; 5449 if (CurrentOrder.empty()) { 5450 Ptr0 = PointerOps.front(); 5451 PtrN = PointerOps.back(); 5452 } else { 5453 Ptr0 = PointerOps[CurrentOrder.front()]; 5454 PtrN = PointerOps[CurrentOrder.back()]; 5455 } 5456 std::optional<int> Dist = 5457 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE); 5458 // Check that the sorted pointer operands are consecutive. 5459 if (static_cast<unsigned>(*Dist) == VL.size() - 1) 5460 return TreeEntry::Vectorize; 5461 } 5462 5463 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); 5464 return TreeEntry::NeedToGather; 5465 } 5466 case Instruction::Call: { 5467 // Check if the calls are all to the same vectorizable intrinsic or 5468 // library function. 5469 CallInst *CI = cast<CallInst>(VL0); 5470 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 5471 5472 VFShape Shape = VFShape::get( 5473 CI->getFunctionType(), 5474 ElementCount::getFixed(static_cast<unsigned int>(VL.size())), 5475 false /*HasGlobalPred*/); 5476 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 5477 5478 if (!VecFunc && !isTriviallyVectorizable(ID)) { 5479 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); 5480 return TreeEntry::NeedToGather; 5481 } 5482 Function *F = CI->getCalledFunction(); 5483 unsigned NumArgs = CI->arg_size(); 5484 SmallVector<Value *, 4> ScalarArgs(NumArgs, nullptr); 5485 for (unsigned J = 0; J != NumArgs; ++J) 5486 if (isVectorIntrinsicWithScalarOpAtArg(ID, J)) 5487 ScalarArgs[J] = CI->getArgOperand(J); 5488 for (Value *V : VL) { 5489 CallInst *CI2 = dyn_cast<CallInst>(V); 5490 if (!CI2 || CI2->getCalledFunction() != F || 5491 getVectorIntrinsicIDForCall(CI2, TLI) != ID || 5492 (VecFunc && 5493 VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) || 5494 !CI->hasIdenticalOperandBundleSchema(*CI2)) { 5495 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V 5496 << "\n"); 5497 return TreeEntry::NeedToGather; 5498 } 5499 // Some intrinsics have scalar arguments and should be same in order for 5500 // them to be vectorized. 5501 for (unsigned J = 0; J != NumArgs; ++J) { 5502 if (isVectorIntrinsicWithScalarOpAtArg(ID, J)) { 5503 Value *A1J = CI2->getArgOperand(J); 5504 if (ScalarArgs[J] != A1J) { 5505 LLVM_DEBUG(dbgs() 5506 << "SLP: mismatched arguments in call:" << *CI 5507 << " argument " << ScalarArgs[J] << "!=" << A1J << "\n"); 5508 return TreeEntry::NeedToGather; 5509 } 5510 } 5511 } 5512 // Verify that the bundle operands are identical between the two calls. 5513 if (CI->hasOperandBundles() && 5514 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(), 5515 CI->op_begin() + CI->getBundleOperandsEndIndex(), 5516 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { 5517 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI 5518 << "!=" << *V << '\n'); 5519 return TreeEntry::NeedToGather; 5520 } 5521 } 5522 5523 return TreeEntry::Vectorize; 5524 } 5525 case Instruction::ShuffleVector: { 5526 // If this is not an alternate sequence of opcode like add-sub 5527 // then do not vectorize this instruction. 5528 if (!S.isAltShuffle()) { 5529 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); 5530 return TreeEntry::NeedToGather; 5531 } 5532 return TreeEntry::Vectorize; 5533 } 5534 default: 5535 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); 5536 return TreeEntry::NeedToGather; 5537 } 5538 } 5539 5540 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, 5541 const EdgeInfo &UserTreeIdx) { 5542 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); 5543 5544 SmallVector<int> ReuseShuffleIndicies; 5545 SmallVector<Value *> UniqueValues; 5546 SmallVector<Value *> NonUniqueValueVL; 5547 auto TryToFindDuplicates = [&](const InstructionsState &S, 5548 bool DoNotFail = false) { 5549 // Check that every instruction appears once in this bundle. 5550 DenseMap<Value *, unsigned> UniquePositions(VL.size()); 5551 for (Value *V : VL) { 5552 if (isConstant(V)) { 5553 ReuseShuffleIndicies.emplace_back( 5554 isa<UndefValue>(V) ? PoisonMaskElem : UniqueValues.size()); 5555 UniqueValues.emplace_back(V); 5556 continue; 5557 } 5558 auto Res = UniquePositions.try_emplace(V, UniqueValues.size()); 5559 ReuseShuffleIndicies.emplace_back(Res.first->second); 5560 if (Res.second) 5561 UniqueValues.emplace_back(V); 5562 } 5563 size_t NumUniqueScalarValues = UniqueValues.size(); 5564 if (NumUniqueScalarValues == VL.size()) { 5565 ReuseShuffleIndicies.clear(); 5566 } else { 5567 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n"); 5568 if (NumUniqueScalarValues <= 1 || 5569 (UniquePositions.size() == 1 && all_of(UniqueValues, 5570 [](Value *V) { 5571 return isa<UndefValue>(V) || 5572 !isConstant(V); 5573 })) || 5574 !llvm::has_single_bit<uint32_t>(NumUniqueScalarValues)) { 5575 if (DoNotFail && UniquePositions.size() > 1 && 5576 NumUniqueScalarValues > 1 && S.MainOp->isSafeToRemove() && 5577 all_of(UniqueValues, [=](Value *V) { 5578 return isa<ExtractElementInst>(V) || 5579 areAllUsersVectorized(cast<Instruction>(V), 5580 UserIgnoreList); 5581 })) { 5582 unsigned PWSz = PowerOf2Ceil(UniqueValues.size()); 5583 if (PWSz == VL.size()) { 5584 ReuseShuffleIndicies.clear(); 5585 } else { 5586 NonUniqueValueVL.assign(UniqueValues.begin(), UniqueValues.end()); 5587 NonUniqueValueVL.append(PWSz - UniqueValues.size(), 5588 UniqueValues.back()); 5589 VL = NonUniqueValueVL; 5590 } 5591 return true; 5592 } 5593 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); 5594 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5595 return false; 5596 } 5597 VL = UniqueValues; 5598 } 5599 return true; 5600 }; 5601 5602 InstructionsState S = getSameOpcode(VL, *TLI); 5603 5604 // Don't vectorize ephemeral values. 5605 if (!EphValues.empty()) { 5606 for (Value *V : VL) { 5607 if (EphValues.count(V)) { 5608 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 5609 << ") is ephemeral.\n"); 5610 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5611 return; 5612 } 5613 } 5614 } 5615 5616 // Gather if we hit the RecursionMaxDepth, unless this is a load (or z/sext of 5617 // a load), in which case peek through to include it in the tree, without 5618 // ballooning over-budget. 5619 if (Depth >= RecursionMaxDepth && 5620 !(S.MainOp && isa<Instruction>(S.MainOp) && S.MainOp == S.AltOp && 5621 VL.size() >= 4 && 5622 (match(S.MainOp, m_Load(m_Value())) || all_of(VL, [&S](const Value *I) { 5623 return match(I, 5624 m_OneUse(m_ZExtOrSExt(m_OneUse(m_Load(m_Value()))))) && 5625 cast<Instruction>(I)->getOpcode() == 5626 cast<Instruction>(S.MainOp)->getOpcode(); 5627 })))) { 5628 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); 5629 if (TryToFindDuplicates(S)) 5630 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5631 ReuseShuffleIndicies); 5632 return; 5633 } 5634 5635 // Don't handle scalable vectors 5636 if (S.getOpcode() == Instruction::ExtractElement && 5637 isa<ScalableVectorType>( 5638 cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) { 5639 LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n"); 5640 if (TryToFindDuplicates(S)) 5641 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5642 ReuseShuffleIndicies); 5643 return; 5644 } 5645 5646 // Don't handle vectors. 5647 if (S.OpValue->getType()->isVectorTy() && 5648 !isa<InsertElementInst>(S.OpValue)) { 5649 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); 5650 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5651 return; 5652 } 5653 5654 if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) 5655 if (SI->getValueOperand()->getType()->isVectorTy()) { 5656 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); 5657 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5658 return; 5659 } 5660 5661 // If all of the operands are identical or constant we have a simple solution. 5662 // If we deal with insert/extract instructions, they all must have constant 5663 // indices, otherwise we should gather them, not try to vectorize. 5664 // If alternate op node with 2 elements with gathered operands - do not 5665 // vectorize. 5666 auto &&NotProfitableForVectorization = [&S, this, 5667 Depth](ArrayRef<Value *> VL) { 5668 if (!S.getOpcode() || !S.isAltShuffle() || VL.size() > 2) 5669 return false; 5670 if (VectorizableTree.size() < MinTreeSize) 5671 return false; 5672 if (Depth >= RecursionMaxDepth - 1) 5673 return true; 5674 // Check if all operands are extracts, part of vector node or can build a 5675 // regular vectorize node. 5676 SmallVector<unsigned, 2> InstsCount(VL.size(), 0); 5677 for (Value *V : VL) { 5678 auto *I = cast<Instruction>(V); 5679 InstsCount.push_back(count_if(I->operand_values(), [](Value *Op) { 5680 return isa<Instruction>(Op) || isVectorLikeInstWithConstOps(Op); 5681 })); 5682 } 5683 bool IsCommutative = isCommutative(S.MainOp) || isCommutative(S.AltOp); 5684 if ((IsCommutative && 5685 std::accumulate(InstsCount.begin(), InstsCount.end(), 0) < 2) || 5686 (!IsCommutative && 5687 all_of(InstsCount, [](unsigned ICnt) { return ICnt < 2; }))) 5688 return true; 5689 assert(VL.size() == 2 && "Expected only 2 alternate op instructions."); 5690 SmallVector<SmallVector<std::pair<Value *, Value *>>> Candidates; 5691 auto *I1 = cast<Instruction>(VL.front()); 5692 auto *I2 = cast<Instruction>(VL.back()); 5693 for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op) 5694 Candidates.emplace_back().emplace_back(I1->getOperand(Op), 5695 I2->getOperand(Op)); 5696 if (static_cast<unsigned>(count_if( 5697 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) { 5698 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat); 5699 })) >= S.MainOp->getNumOperands() / 2) 5700 return false; 5701 if (S.MainOp->getNumOperands() > 2) 5702 return true; 5703 if (IsCommutative) { 5704 // Check permuted operands. 5705 Candidates.clear(); 5706 for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op) 5707 Candidates.emplace_back().emplace_back(I1->getOperand(Op), 5708 I2->getOperand((Op + 1) % E)); 5709 if (any_of( 5710 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) { 5711 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat); 5712 })) 5713 return false; 5714 } 5715 return true; 5716 }; 5717 SmallVector<unsigned> SortedIndices; 5718 BasicBlock *BB = nullptr; 5719 bool IsScatterVectorizeUserTE = 5720 UserTreeIdx.UserTE && 5721 (UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize || 5722 UserTreeIdx.UserTE->State == TreeEntry::PossibleStridedVectorize); 5723 bool AreAllSameInsts = 5724 (S.getOpcode() && allSameBlock(VL)) || 5725 (S.OpValue->getType()->isPointerTy() && IsScatterVectorizeUserTE && 5726 VL.size() > 2 && 5727 all_of(VL, 5728 [&BB](Value *V) { 5729 auto *I = dyn_cast<GetElementPtrInst>(V); 5730 if (!I) 5731 return doesNotNeedToBeScheduled(V); 5732 if (!BB) 5733 BB = I->getParent(); 5734 return BB == I->getParent() && I->getNumOperands() == 2; 5735 }) && 5736 BB && 5737 sortPtrAccesses(VL, UserTreeIdx.UserTE->getMainOp()->getType(), *DL, *SE, 5738 SortedIndices)); 5739 if (!AreAllSameInsts || allConstant(VL) || isSplat(VL) || 5740 (isa<InsertElementInst, ExtractValueInst, ExtractElementInst>( 5741 S.OpValue) && 5742 !all_of(VL, isVectorLikeInstWithConstOps)) || 5743 NotProfitableForVectorization(VL)) { 5744 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O, small shuffle. \n"); 5745 if (TryToFindDuplicates(S)) 5746 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5747 ReuseShuffleIndicies); 5748 return; 5749 } 5750 5751 // We now know that this is a vector of instructions of the same type from 5752 // the same block. 5753 5754 // Check if this is a duplicate of another entry. 5755 if (TreeEntry *E = getTreeEntry(S.OpValue)) { 5756 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n"); 5757 if (!E->isSame(VL)) { 5758 auto It = MultiNodeScalars.find(S.OpValue); 5759 if (It != MultiNodeScalars.end()) { 5760 auto *TEIt = find_if(It->getSecond(), 5761 [&](TreeEntry *ME) { return ME->isSame(VL); }); 5762 if (TEIt != It->getSecond().end()) 5763 E = *TEIt; 5764 else 5765 E = nullptr; 5766 } else { 5767 E = nullptr; 5768 } 5769 } 5770 if (!E) { 5771 if (!doesNotNeedToBeScheduled(S.OpValue)) { 5772 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); 5773 if (TryToFindDuplicates(S)) 5774 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5775 ReuseShuffleIndicies); 5776 return; 5777 } 5778 } else { 5779 // Record the reuse of the tree node. FIXME, currently this is only used 5780 // to properly draw the graph rather than for the actual vectorization. 5781 E->UserTreeIndices.push_back(UserTreeIdx); 5782 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue 5783 << ".\n"); 5784 return; 5785 } 5786 } 5787 5788 // Check that none of the instructions in the bundle are already in the tree. 5789 for (Value *V : VL) { 5790 if ((!IsScatterVectorizeUserTE && !isa<Instruction>(V)) || 5791 doesNotNeedToBeScheduled(V)) 5792 continue; 5793 if (getTreeEntry(V)) { 5794 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V 5795 << ") is already in tree.\n"); 5796 if (TryToFindDuplicates(S)) 5797 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5798 ReuseShuffleIndicies); 5799 return; 5800 } 5801 } 5802 5803 // The reduction nodes (stored in UserIgnoreList) also should stay scalar. 5804 if (UserIgnoreList && !UserIgnoreList->empty()) { 5805 for (Value *V : VL) { 5806 if (UserIgnoreList && UserIgnoreList->contains(V)) { 5807 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); 5808 if (TryToFindDuplicates(S)) 5809 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5810 ReuseShuffleIndicies); 5811 return; 5812 } 5813 } 5814 } 5815 5816 // Special processing for sorted pointers for ScatterVectorize node with 5817 // constant indeces only. 5818 if (AreAllSameInsts && UserTreeIdx.UserTE && 5819 (UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize || 5820 UserTreeIdx.UserTE->State == TreeEntry::PossibleStridedVectorize) && 5821 !(S.getOpcode() && allSameBlock(VL))) { 5822 assert(S.OpValue->getType()->isPointerTy() && 5823 count_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }) >= 5824 2 && 5825 "Expected pointers only."); 5826 // Reset S to make it GetElementPtr kind of node. 5827 const auto *It = find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }); 5828 assert(It != VL.end() && "Expected at least one GEP."); 5829 S = getSameOpcode(*It, *TLI); 5830 } 5831 5832 // Check that all of the users of the scalars that we want to vectorize are 5833 // schedulable. 5834 auto *VL0 = cast<Instruction>(S.OpValue); 5835 BB = VL0->getParent(); 5836 5837 if (!DT->isReachableFromEntry(BB)) { 5838 // Don't go into unreachable blocks. They may contain instructions with 5839 // dependency cycles which confuse the final scheduling. 5840 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); 5841 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5842 return; 5843 } 5844 5845 // Don't go into catchswitch blocks, which can happen with PHIs. 5846 // Such blocks can only have PHIs and the catchswitch. There is no 5847 // place to insert a shuffle if we need to, so just avoid that issue. 5848 if (isa<CatchSwitchInst>(BB->getTerminator())) { 5849 LLVM_DEBUG(dbgs() << "SLP: bundle in catchswitch block.\n"); 5850 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx); 5851 return; 5852 } 5853 5854 // Check that every instruction appears once in this bundle. 5855 if (!TryToFindDuplicates(S, /*DoNotFail=*/true)) 5856 return; 5857 5858 // Perform specific checks for each particular instruction kind. 5859 OrdersType CurrentOrder; 5860 SmallVector<Value *> PointerOps; 5861 TreeEntry::EntryState State = getScalarsVectorizationState( 5862 S, VL, IsScatterVectorizeUserTE, CurrentOrder, PointerOps); 5863 if (State == TreeEntry::NeedToGather) { 5864 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5865 ReuseShuffleIndicies); 5866 return; 5867 } 5868 5869 auto &BSRef = BlocksSchedules[BB]; 5870 if (!BSRef) 5871 BSRef = std::make_unique<BlockScheduling>(BB); 5872 5873 BlockScheduling &BS = *BSRef; 5874 5875 std::optional<ScheduleData *> Bundle = 5876 BS.tryScheduleBundle(UniqueValues, this, S); 5877 #ifdef EXPENSIVE_CHECKS 5878 // Make sure we didn't break any internal invariants 5879 BS.verify(); 5880 #endif 5881 if (!Bundle) { 5882 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n"); 5883 assert((!BS.getScheduleData(VL0) || 5884 !BS.getScheduleData(VL0)->isPartOfBundle()) && 5885 "tryScheduleBundle should cancelScheduling on failure"); 5886 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx, 5887 ReuseShuffleIndicies); 5888 return; 5889 } 5890 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); 5891 5892 unsigned ShuffleOrOp = S.isAltShuffle() ? 5893 (unsigned) Instruction::ShuffleVector : S.getOpcode(); 5894 switch (ShuffleOrOp) { 5895 case Instruction::PHI: { 5896 auto *PH = cast<PHINode>(VL0); 5897 5898 TreeEntry *TE = 5899 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndicies); 5900 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); 5901 5902 // Keeps the reordered operands to avoid code duplication. 5903 SmallVector<ValueList, 2> OperandsVec; 5904 for (unsigned I = 0, E = PH->getNumIncomingValues(); I < E; ++I) { 5905 if (!DT->isReachableFromEntry(PH->getIncomingBlock(I))) { 5906 ValueList Operands(VL.size(), PoisonValue::get(PH->getType())); 5907 TE->setOperand(I, Operands); 5908 OperandsVec.push_back(Operands); 5909 continue; 5910 } 5911 ValueList Operands; 5912 // Prepare the operand vector. 5913 for (Value *V : VL) 5914 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock( 5915 PH->getIncomingBlock(I))); 5916 TE->setOperand(I, Operands); 5917 OperandsVec.push_back(Operands); 5918 } 5919 for (unsigned OpIdx = 0, OpE = OperandsVec.size(); OpIdx != OpE; ++OpIdx) 5920 buildTree_rec(OperandsVec[OpIdx], Depth + 1, {TE, OpIdx}); 5921 return; 5922 } 5923 case Instruction::ExtractValue: 5924 case Instruction::ExtractElement: { 5925 if (CurrentOrder.empty()) { 5926 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n"); 5927 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5928 ReuseShuffleIndicies); 5929 // This is a special case, as it does not gather, but at the same time 5930 // we are not extending buildTree_rec() towards the operands. 5931 ValueList Op0; 5932 Op0.assign(VL.size(), VL0->getOperand(0)); 5933 VectorizableTree.back()->setOperand(0, Op0); 5934 return; 5935 } 5936 LLVM_DEBUG({ 5937 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence " 5938 "with order"; 5939 for (unsigned Idx : CurrentOrder) 5940 dbgs() << " " << Idx; 5941 dbgs() << "\n"; 5942 }); 5943 fixupOrderingIndices(CurrentOrder); 5944 // Insert new order with initial value 0, if it does not exist, 5945 // otherwise return the iterator to the existing one. 5946 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5947 ReuseShuffleIndicies, CurrentOrder); 5948 // This is a special case, as it does not gather, but at the same time 5949 // we are not extending buildTree_rec() towards the operands. 5950 ValueList Op0; 5951 Op0.assign(VL.size(), VL0->getOperand(0)); 5952 VectorizableTree.back()->setOperand(0, Op0); 5953 return; 5954 } 5955 case Instruction::InsertElement: { 5956 assert(ReuseShuffleIndicies.empty() && "All inserts should be unique"); 5957 5958 auto OrdCompare = [](const std::pair<int, int> &P1, 5959 const std::pair<int, int> &P2) { 5960 return P1.first > P2.first; 5961 }; 5962 PriorityQueue<std::pair<int, int>, SmallVector<std::pair<int, int>>, 5963 decltype(OrdCompare)> 5964 Indices(OrdCompare); 5965 for (int I = 0, E = VL.size(); I < E; ++I) { 5966 unsigned Idx = *getInsertIndex(VL[I]); 5967 Indices.emplace(Idx, I); 5968 } 5969 OrdersType CurrentOrder(VL.size(), VL.size()); 5970 bool IsIdentity = true; 5971 for (int I = 0, E = VL.size(); I < E; ++I) { 5972 CurrentOrder[Indices.top().second] = I; 5973 IsIdentity &= Indices.top().second == I; 5974 Indices.pop(); 5975 } 5976 if (IsIdentity) 5977 CurrentOrder.clear(); 5978 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 5979 std::nullopt, CurrentOrder); 5980 LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n"); 5981 5982 constexpr int NumOps = 2; 5983 ValueList VectorOperands[NumOps]; 5984 for (int I = 0; I < NumOps; ++I) { 5985 for (Value *V : VL) 5986 VectorOperands[I].push_back(cast<Instruction>(V)->getOperand(I)); 5987 5988 TE->setOperand(I, VectorOperands[I]); 5989 } 5990 buildTree_rec(VectorOperands[NumOps - 1], Depth + 1, {TE, NumOps - 1}); 5991 return; 5992 } 5993 case Instruction::Load: { 5994 // Check that a vectorized load would load the same memory as a scalar 5995 // load. For example, we don't want to vectorize loads that are smaller 5996 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM 5997 // treats loading/storing it as an i8 struct. If we vectorize loads/stores 5998 // from such a struct, we read/write packed bits disagreeing with the 5999 // unvectorized version. 6000 TreeEntry *TE = nullptr; 6001 fixupOrderingIndices(CurrentOrder); 6002 switch (State) { 6003 case TreeEntry::Vectorize: 6004 if (CurrentOrder.empty()) { 6005 // Original loads are consecutive and does not require reordering. 6006 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6007 ReuseShuffleIndicies); 6008 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n"); 6009 } else { 6010 // Need to reorder. 6011 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6012 ReuseShuffleIndicies, CurrentOrder); 6013 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n"); 6014 } 6015 TE->setOperandsInOrder(); 6016 break; 6017 case TreeEntry::PossibleStridedVectorize: 6018 // Vectorizing non-consecutive loads with `llvm.masked.gather`. 6019 if (CurrentOrder.empty()) { 6020 TE = newTreeEntry(VL, TreeEntry::PossibleStridedVectorize, Bundle, S, 6021 UserTreeIdx, ReuseShuffleIndicies); 6022 } else { 6023 TE = newTreeEntry(VL, TreeEntry::PossibleStridedVectorize, Bundle, S, 6024 UserTreeIdx, ReuseShuffleIndicies, CurrentOrder); 6025 } 6026 TE->setOperandsInOrder(); 6027 buildTree_rec(PointerOps, Depth + 1, {TE, 0}); 6028 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n"); 6029 break; 6030 case TreeEntry::ScatterVectorize: 6031 // Vectorizing non-consecutive loads with `llvm.masked.gather`. 6032 TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S, 6033 UserTreeIdx, ReuseShuffleIndicies); 6034 TE->setOperandsInOrder(); 6035 buildTree_rec(PointerOps, Depth + 1, {TE, 0}); 6036 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n"); 6037 break; 6038 case TreeEntry::NeedToGather: 6039 llvm_unreachable("Unexpected loads state."); 6040 } 6041 return; 6042 } 6043 case Instruction::ZExt: 6044 case Instruction::SExt: 6045 case Instruction::FPToUI: 6046 case Instruction::FPToSI: 6047 case Instruction::FPExt: 6048 case Instruction::PtrToInt: 6049 case Instruction::IntToPtr: 6050 case Instruction::SIToFP: 6051 case Instruction::UIToFP: 6052 case Instruction::Trunc: 6053 case Instruction::FPTrunc: 6054 case Instruction::BitCast: { 6055 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6056 ReuseShuffleIndicies); 6057 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n"); 6058 6059 TE->setOperandsInOrder(); 6060 for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) { 6061 ValueList Operands; 6062 // Prepare the operand vector. 6063 for (Value *V : VL) 6064 Operands.push_back(cast<Instruction>(V)->getOperand(I)); 6065 6066 buildTree_rec(Operands, Depth + 1, {TE, I}); 6067 } 6068 return; 6069 } 6070 case Instruction::ICmp: 6071 case Instruction::FCmp: { 6072 // Check that all of the compares have the same predicate. 6073 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 6074 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6075 ReuseShuffleIndicies); 6076 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n"); 6077 6078 ValueList Left, Right; 6079 if (cast<CmpInst>(VL0)->isCommutative()) { 6080 // Commutative predicate - collect + sort operands of the instructions 6081 // so that each side is more likely to have the same opcode. 6082 assert(P0 == CmpInst::getSwappedPredicate(P0) && 6083 "Commutative Predicate mismatch"); 6084 reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, *this); 6085 } else { 6086 // Collect operands - commute if it uses the swapped predicate. 6087 for (Value *V : VL) { 6088 auto *Cmp = cast<CmpInst>(V); 6089 Value *LHS = Cmp->getOperand(0); 6090 Value *RHS = Cmp->getOperand(1); 6091 if (Cmp->getPredicate() != P0) 6092 std::swap(LHS, RHS); 6093 Left.push_back(LHS); 6094 Right.push_back(RHS); 6095 } 6096 } 6097 TE->setOperand(0, Left); 6098 TE->setOperand(1, Right); 6099 buildTree_rec(Left, Depth + 1, {TE, 0}); 6100 buildTree_rec(Right, Depth + 1, {TE, 1}); 6101 return; 6102 } 6103 case Instruction::Select: 6104 case Instruction::FNeg: 6105 case Instruction::Add: 6106 case Instruction::FAdd: 6107 case Instruction::Sub: 6108 case Instruction::FSub: 6109 case Instruction::Mul: 6110 case Instruction::FMul: 6111 case Instruction::UDiv: 6112 case Instruction::SDiv: 6113 case Instruction::FDiv: 6114 case Instruction::URem: 6115 case Instruction::SRem: 6116 case Instruction::FRem: 6117 case Instruction::Shl: 6118 case Instruction::LShr: 6119 case Instruction::AShr: 6120 case Instruction::And: 6121 case Instruction::Or: 6122 case Instruction::Xor: { 6123 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6124 ReuseShuffleIndicies); 6125 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n"); 6126 6127 // Sort operands of the instructions so that each side is more likely to 6128 // have the same opcode. 6129 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) { 6130 ValueList Left, Right; 6131 reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, *this); 6132 TE->setOperand(0, Left); 6133 TE->setOperand(1, Right); 6134 buildTree_rec(Left, Depth + 1, {TE, 0}); 6135 buildTree_rec(Right, Depth + 1, {TE, 1}); 6136 return; 6137 } 6138 6139 TE->setOperandsInOrder(); 6140 for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) { 6141 ValueList Operands; 6142 // Prepare the operand vector. 6143 for (Value *V : VL) 6144 Operands.push_back(cast<Instruction>(V)->getOperand(I)); 6145 6146 buildTree_rec(Operands, Depth + 1, {TE, I}); 6147 } 6148 return; 6149 } 6150 case Instruction::GetElementPtr: { 6151 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6152 ReuseShuffleIndicies); 6153 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); 6154 SmallVector<ValueList, 2> Operands(2); 6155 // Prepare the operand vector for pointer operands. 6156 for (Value *V : VL) { 6157 auto *GEP = dyn_cast<GetElementPtrInst>(V); 6158 if (!GEP) { 6159 Operands.front().push_back(V); 6160 continue; 6161 } 6162 Operands.front().push_back(GEP->getPointerOperand()); 6163 } 6164 TE->setOperand(0, Operands.front()); 6165 // Need to cast all indices to the same type before vectorization to 6166 // avoid crash. 6167 // Required to be able to find correct matches between different gather 6168 // nodes and reuse the vectorized values rather than trying to gather them 6169 // again. 6170 int IndexIdx = 1; 6171 Type *VL0Ty = VL0->getOperand(IndexIdx)->getType(); 6172 Type *Ty = all_of(VL, 6173 [VL0Ty, IndexIdx](Value *V) { 6174 auto *GEP = dyn_cast<GetElementPtrInst>(V); 6175 if (!GEP) 6176 return true; 6177 return VL0Ty == GEP->getOperand(IndexIdx)->getType(); 6178 }) 6179 ? VL0Ty 6180 : DL->getIndexType(cast<GetElementPtrInst>(VL0) 6181 ->getPointerOperandType() 6182 ->getScalarType()); 6183 // Prepare the operand vector. 6184 for (Value *V : VL) { 6185 auto *I = dyn_cast<GetElementPtrInst>(V); 6186 if (!I) { 6187 Operands.back().push_back( 6188 ConstantInt::get(Ty, 0, /*isSigned=*/false)); 6189 continue; 6190 } 6191 auto *Op = I->getOperand(IndexIdx); 6192 auto *CI = dyn_cast<ConstantInt>(Op); 6193 if (!CI) 6194 Operands.back().push_back(Op); 6195 else 6196 Operands.back().push_back(ConstantFoldIntegerCast( 6197 CI, Ty, CI->getValue().isSignBitSet(), *DL)); 6198 } 6199 TE->setOperand(IndexIdx, Operands.back()); 6200 6201 for (unsigned I = 0, Ops = Operands.size(); I < Ops; ++I) 6202 buildTree_rec(Operands[I], Depth + 1, {TE, I}); 6203 return; 6204 } 6205 case Instruction::Store: { 6206 // Check if the stores are consecutive or if we need to swizzle them. 6207 ValueList Operands(VL.size()); 6208 auto *OIter = Operands.begin(); 6209 for (Value *V : VL) { 6210 auto *SI = cast<StoreInst>(V); 6211 *OIter = SI->getValueOperand(); 6212 ++OIter; 6213 } 6214 // Check that the sorted pointer operands are consecutive. 6215 if (CurrentOrder.empty()) { 6216 // Original stores are consecutive and does not require reordering. 6217 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6218 ReuseShuffleIndicies); 6219 TE->setOperandsInOrder(); 6220 buildTree_rec(Operands, Depth + 1, {TE, 0}); 6221 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n"); 6222 } else { 6223 fixupOrderingIndices(CurrentOrder); 6224 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6225 ReuseShuffleIndicies, CurrentOrder); 6226 TE->setOperandsInOrder(); 6227 buildTree_rec(Operands, Depth + 1, {TE, 0}); 6228 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n"); 6229 } 6230 return; 6231 } 6232 case Instruction::Call: { 6233 // Check if the calls are all to the same vectorizable intrinsic or 6234 // library function. 6235 CallInst *CI = cast<CallInst>(VL0); 6236 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6237 6238 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6239 ReuseShuffleIndicies); 6240 TE->setOperandsInOrder(); 6241 for (unsigned I : seq<unsigned>(0, CI->arg_size())) { 6242 // For scalar operands no need to create an entry since no need to 6243 // vectorize it. 6244 if (isVectorIntrinsicWithScalarOpAtArg(ID, I)) 6245 continue; 6246 ValueList Operands; 6247 // Prepare the operand vector. 6248 for (Value *V : VL) { 6249 auto *CI2 = cast<CallInst>(V); 6250 Operands.push_back(CI2->getArgOperand(I)); 6251 } 6252 buildTree_rec(Operands, Depth + 1, {TE, I}); 6253 } 6254 return; 6255 } 6256 case Instruction::ShuffleVector: { 6257 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx, 6258 ReuseShuffleIndicies); 6259 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); 6260 6261 // Reorder operands if reordering would enable vectorization. 6262 auto *CI = dyn_cast<CmpInst>(VL0); 6263 if (isa<BinaryOperator>(VL0) || CI) { 6264 ValueList Left, Right; 6265 if (!CI || all_of(VL, [](Value *V) { 6266 return cast<CmpInst>(V)->isCommutative(); 6267 })) { 6268 reorderInputsAccordingToOpcode(VL, Left, Right, *TLI, *DL, *SE, 6269 *this); 6270 } else { 6271 auto *MainCI = cast<CmpInst>(S.MainOp); 6272 auto *AltCI = cast<CmpInst>(S.AltOp); 6273 CmpInst::Predicate MainP = MainCI->getPredicate(); 6274 CmpInst::Predicate AltP = AltCI->getPredicate(); 6275 assert(MainP != AltP && 6276 "Expected different main/alternate predicates."); 6277 // Collect operands - commute if it uses the swapped predicate or 6278 // alternate operation. 6279 for (Value *V : VL) { 6280 auto *Cmp = cast<CmpInst>(V); 6281 Value *LHS = Cmp->getOperand(0); 6282 Value *RHS = Cmp->getOperand(1); 6283 6284 if (isAlternateInstruction(Cmp, MainCI, AltCI, *TLI)) { 6285 if (AltP == CmpInst::getSwappedPredicate(Cmp->getPredicate())) 6286 std::swap(LHS, RHS); 6287 } else { 6288 if (MainP == CmpInst::getSwappedPredicate(Cmp->getPredicate())) 6289 std::swap(LHS, RHS); 6290 } 6291 Left.push_back(LHS); 6292 Right.push_back(RHS); 6293 } 6294 } 6295 TE->setOperand(0, Left); 6296 TE->setOperand(1, Right); 6297 buildTree_rec(Left, Depth + 1, {TE, 0}); 6298 buildTree_rec(Right, Depth + 1, {TE, 1}); 6299 return; 6300 } 6301 6302 TE->setOperandsInOrder(); 6303 for (unsigned I : seq<unsigned>(0, VL0->getNumOperands())) { 6304 ValueList Operands; 6305 // Prepare the operand vector. 6306 for (Value *V : VL) 6307 Operands.push_back(cast<Instruction>(V)->getOperand(I)); 6308 6309 buildTree_rec(Operands, Depth + 1, {TE, I}); 6310 } 6311 return; 6312 } 6313 default: 6314 break; 6315 } 6316 llvm_unreachable("Unexpected vectorization of the instructions."); 6317 } 6318 6319 unsigned BoUpSLP::canMapToVector(Type *T) const { 6320 unsigned N = 1; 6321 Type *EltTy = T; 6322 6323 while (isa<StructType, ArrayType, FixedVectorType>(EltTy)) { 6324 if (auto *ST = dyn_cast<StructType>(EltTy)) { 6325 // Check that struct is homogeneous. 6326 for (const auto *Ty : ST->elements()) 6327 if (Ty != *ST->element_begin()) 6328 return 0; 6329 N *= ST->getNumElements(); 6330 EltTy = *ST->element_begin(); 6331 } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) { 6332 N *= AT->getNumElements(); 6333 EltTy = AT->getElementType(); 6334 } else { 6335 auto *VT = cast<FixedVectorType>(EltTy); 6336 N *= VT->getNumElements(); 6337 EltTy = VT->getElementType(); 6338 } 6339 } 6340 6341 if (!isValidElementType(EltTy)) 6342 return 0; 6343 uint64_t VTSize = DL->getTypeStoreSizeInBits(FixedVectorType::get(EltTy, N)); 6344 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize || 6345 VTSize != DL->getTypeStoreSizeInBits(T)) 6346 return 0; 6347 return N; 6348 } 6349 6350 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue, 6351 SmallVectorImpl<unsigned> &CurrentOrder, 6352 bool ResizeAllowed) const { 6353 const auto *It = find_if(VL, [](Value *V) { 6354 return isa<ExtractElementInst, ExtractValueInst>(V); 6355 }); 6356 assert(It != VL.end() && "Expected at least one extract instruction."); 6357 auto *E0 = cast<Instruction>(*It); 6358 assert(all_of(VL, 6359 [](Value *V) { 6360 return isa<UndefValue, ExtractElementInst, ExtractValueInst>( 6361 V); 6362 }) && 6363 "Invalid opcode"); 6364 // Check if all of the extracts come from the same vector and from the 6365 // correct offset. 6366 Value *Vec = E0->getOperand(0); 6367 6368 CurrentOrder.clear(); 6369 6370 // We have to extract from a vector/aggregate with the same number of elements. 6371 unsigned NElts; 6372 if (E0->getOpcode() == Instruction::ExtractValue) { 6373 NElts = canMapToVector(Vec->getType()); 6374 if (!NElts) 6375 return false; 6376 // Check if load can be rewritten as load of vector. 6377 LoadInst *LI = dyn_cast<LoadInst>(Vec); 6378 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) 6379 return false; 6380 } else { 6381 NElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); 6382 } 6383 6384 unsigned E = VL.size(); 6385 if (!ResizeAllowed && NElts != E) 6386 return false; 6387 SmallVector<int> Indices(E, PoisonMaskElem); 6388 unsigned MinIdx = NElts, MaxIdx = 0; 6389 for (auto [I, V] : enumerate(VL)) { 6390 auto *Inst = dyn_cast<Instruction>(V); 6391 if (!Inst) 6392 continue; 6393 if (Inst->getOperand(0) != Vec) 6394 return false; 6395 if (auto *EE = dyn_cast<ExtractElementInst>(Inst)) 6396 if (isa<UndefValue>(EE->getIndexOperand())) 6397 continue; 6398 std::optional<unsigned> Idx = getExtractIndex(Inst); 6399 if (!Idx) 6400 return false; 6401 const unsigned ExtIdx = *Idx; 6402 if (ExtIdx >= NElts) 6403 continue; 6404 Indices[I] = ExtIdx; 6405 if (MinIdx > ExtIdx) 6406 MinIdx = ExtIdx; 6407 if (MaxIdx < ExtIdx) 6408 MaxIdx = ExtIdx; 6409 } 6410 if (MaxIdx - MinIdx + 1 > E) 6411 return false; 6412 if (MaxIdx + 1 <= E) 6413 MinIdx = 0; 6414 6415 // Check that all of the indices extract from the correct offset. 6416 bool ShouldKeepOrder = true; 6417 // Assign to all items the initial value E + 1 so we can check if the extract 6418 // instruction index was used already. 6419 // Also, later we can check that all the indices are used and we have a 6420 // consecutive access in the extract instructions, by checking that no 6421 // element of CurrentOrder still has value E + 1. 6422 CurrentOrder.assign(E, E); 6423 for (unsigned I = 0; I < E; ++I) { 6424 if (Indices[I] == PoisonMaskElem) 6425 continue; 6426 const unsigned ExtIdx = Indices[I] - MinIdx; 6427 if (CurrentOrder[ExtIdx] != E) { 6428 CurrentOrder.clear(); 6429 return false; 6430 } 6431 ShouldKeepOrder &= ExtIdx == I; 6432 CurrentOrder[ExtIdx] = I; 6433 } 6434 if (ShouldKeepOrder) 6435 CurrentOrder.clear(); 6436 6437 return ShouldKeepOrder; 6438 } 6439 6440 bool BoUpSLP::areAllUsersVectorized( 6441 Instruction *I, const SmallDenseSet<Value *> *VectorizedVals) const { 6442 return (I->hasOneUse() && (!VectorizedVals || VectorizedVals->contains(I))) || 6443 all_of(I->users(), [this](User *U) { 6444 return ScalarToTreeEntry.contains(U) || 6445 isVectorLikeInstWithConstOps(U) || 6446 (isa<ExtractElementInst>(U) && MustGather.contains(U)); 6447 }); 6448 } 6449 6450 static std::pair<InstructionCost, InstructionCost> 6451 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy, 6452 TargetTransformInfo *TTI, TargetLibraryInfo *TLI) { 6453 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 6454 6455 // Calculate the cost of the scalar and vector calls. 6456 SmallVector<Type *, 4> VecTys; 6457 for (Use &Arg : CI->args()) 6458 VecTys.push_back( 6459 FixedVectorType::get(Arg->getType(), VecTy->getNumElements())); 6460 FastMathFlags FMF; 6461 if (auto *FPCI = dyn_cast<FPMathOperator>(CI)) 6462 FMF = FPCI->getFastMathFlags(); 6463 SmallVector<const Value *> Arguments(CI->args()); 6464 IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, VecTys, FMF, 6465 dyn_cast<IntrinsicInst>(CI)); 6466 auto IntrinsicCost = 6467 TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput); 6468 6469 auto Shape = VFShape::get(CI->getFunctionType(), 6470 ElementCount::getFixed(VecTy->getNumElements()), 6471 false /*HasGlobalPred*/); 6472 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 6473 auto LibCost = IntrinsicCost; 6474 if (!CI->isNoBuiltin() && VecFunc) { 6475 // Calculate the cost of the vector library call. 6476 // If the corresponding vector call is cheaper, return its cost. 6477 LibCost = TTI->getCallInstrCost(nullptr, VecTy, VecTys, 6478 TTI::TCK_RecipThroughput); 6479 } 6480 return {IntrinsicCost, LibCost}; 6481 } 6482 6483 void BoUpSLP::TreeEntry::buildAltOpShuffleMask( 6484 const function_ref<bool(Instruction *)> IsAltOp, SmallVectorImpl<int> &Mask, 6485 SmallVectorImpl<Value *> *OpScalars, 6486 SmallVectorImpl<Value *> *AltScalars) const { 6487 unsigned Sz = Scalars.size(); 6488 Mask.assign(Sz, PoisonMaskElem); 6489 SmallVector<int> OrderMask; 6490 if (!ReorderIndices.empty()) 6491 inversePermutation(ReorderIndices, OrderMask); 6492 for (unsigned I = 0; I < Sz; ++I) { 6493 unsigned Idx = I; 6494 if (!ReorderIndices.empty()) 6495 Idx = OrderMask[I]; 6496 auto *OpInst = cast<Instruction>(Scalars[Idx]); 6497 if (IsAltOp(OpInst)) { 6498 Mask[I] = Sz + Idx; 6499 if (AltScalars) 6500 AltScalars->push_back(OpInst); 6501 } else { 6502 Mask[I] = Idx; 6503 if (OpScalars) 6504 OpScalars->push_back(OpInst); 6505 } 6506 } 6507 if (!ReuseShuffleIndices.empty()) { 6508 SmallVector<int> NewMask(ReuseShuffleIndices.size(), PoisonMaskElem); 6509 transform(ReuseShuffleIndices, NewMask.begin(), [&Mask](int Idx) { 6510 return Idx != PoisonMaskElem ? Mask[Idx] : PoisonMaskElem; 6511 }); 6512 Mask.swap(NewMask); 6513 } 6514 } 6515 6516 static bool isAlternateInstruction(const Instruction *I, 6517 const Instruction *MainOp, 6518 const Instruction *AltOp, 6519 const TargetLibraryInfo &TLI) { 6520 if (auto *MainCI = dyn_cast<CmpInst>(MainOp)) { 6521 auto *AltCI = cast<CmpInst>(AltOp); 6522 CmpInst::Predicate MainP = MainCI->getPredicate(); 6523 CmpInst::Predicate AltP = AltCI->getPredicate(); 6524 assert(MainP != AltP && "Expected different main/alternate predicates."); 6525 auto *CI = cast<CmpInst>(I); 6526 if (isCmpSameOrSwapped(MainCI, CI, TLI)) 6527 return false; 6528 if (isCmpSameOrSwapped(AltCI, CI, TLI)) 6529 return true; 6530 CmpInst::Predicate P = CI->getPredicate(); 6531 CmpInst::Predicate SwappedP = CmpInst::getSwappedPredicate(P); 6532 6533 assert((MainP == P || AltP == P || MainP == SwappedP || AltP == SwappedP) && 6534 "CmpInst expected to match either main or alternate predicate or " 6535 "their swap."); 6536 (void)AltP; 6537 return MainP != P && MainP != SwappedP; 6538 } 6539 return I->getOpcode() == AltOp->getOpcode(); 6540 } 6541 6542 TTI::OperandValueInfo BoUpSLP::getOperandInfo(ArrayRef<Value *> Ops) { 6543 assert(!Ops.empty()); 6544 const auto *Op0 = Ops.front(); 6545 6546 const bool IsConstant = all_of(Ops, [](Value *V) { 6547 // TODO: We should allow undef elements here 6548 return isConstant(V) && !isa<UndefValue>(V); 6549 }); 6550 const bool IsUniform = all_of(Ops, [=](Value *V) { 6551 // TODO: We should allow undef elements here 6552 return V == Op0; 6553 }); 6554 const bool IsPowerOfTwo = all_of(Ops, [](Value *V) { 6555 // TODO: We should allow undef elements here 6556 if (auto *CI = dyn_cast<ConstantInt>(V)) 6557 return CI->getValue().isPowerOf2(); 6558 return false; 6559 }); 6560 const bool IsNegatedPowerOfTwo = all_of(Ops, [](Value *V) { 6561 // TODO: We should allow undef elements here 6562 if (auto *CI = dyn_cast<ConstantInt>(V)) 6563 return CI->getValue().isNegatedPowerOf2(); 6564 return false; 6565 }); 6566 6567 TTI::OperandValueKind VK = TTI::OK_AnyValue; 6568 if (IsConstant && IsUniform) 6569 VK = TTI::OK_UniformConstantValue; 6570 else if (IsConstant) 6571 VK = TTI::OK_NonUniformConstantValue; 6572 else if (IsUniform) 6573 VK = TTI::OK_UniformValue; 6574 6575 TTI::OperandValueProperties VP = TTI::OP_None; 6576 VP = IsPowerOfTwo ? TTI::OP_PowerOf2 : VP; 6577 VP = IsNegatedPowerOfTwo ? TTI::OP_NegatedPowerOf2 : VP; 6578 6579 return {VK, VP}; 6580 } 6581 6582 namespace { 6583 /// The base class for shuffle instruction emission and shuffle cost estimation. 6584 class BaseShuffleAnalysis { 6585 protected: 6586 /// Checks if the mask is an identity mask. 6587 /// \param IsStrict if is true the function returns false if mask size does 6588 /// not match vector size. 6589 static bool isIdentityMask(ArrayRef<int> Mask, const FixedVectorType *VecTy, 6590 bool IsStrict) { 6591 int Limit = Mask.size(); 6592 int VF = VecTy->getNumElements(); 6593 int Index = -1; 6594 if (VF == Limit && ShuffleVectorInst::isIdentityMask(Mask, Limit)) 6595 return true; 6596 if (!IsStrict) { 6597 // Consider extract subvector starting from index 0. 6598 if (ShuffleVectorInst::isExtractSubvectorMask(Mask, VF, Index) && 6599 Index == 0) 6600 return true; 6601 // All VF-size submasks are identity (e.g. 6602 // <poison,poison,poison,poison,0,1,2,poison,poison,1,2,3> etc. for VF 4). 6603 if (Limit % VF == 0 && all_of(seq<int>(0, Limit / VF), [=](int Idx) { 6604 ArrayRef<int> Slice = Mask.slice(Idx * VF, VF); 6605 return all_of(Slice, [](int I) { return I == PoisonMaskElem; }) || 6606 ShuffleVectorInst::isIdentityMask(Slice, VF); 6607 })) 6608 return true; 6609 } 6610 return false; 6611 } 6612 6613 /// Tries to combine 2 different masks into single one. 6614 /// \param LocalVF Vector length of the permuted input vector. \p Mask may 6615 /// change the size of the vector, \p LocalVF is the original size of the 6616 /// shuffled vector. 6617 static void combineMasks(unsigned LocalVF, SmallVectorImpl<int> &Mask, 6618 ArrayRef<int> ExtMask) { 6619 unsigned VF = Mask.size(); 6620 SmallVector<int> NewMask(ExtMask.size(), PoisonMaskElem); 6621 for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) { 6622 if (ExtMask[I] == PoisonMaskElem) 6623 continue; 6624 int MaskedIdx = Mask[ExtMask[I] % VF]; 6625 NewMask[I] = 6626 MaskedIdx == PoisonMaskElem ? PoisonMaskElem : MaskedIdx % LocalVF; 6627 } 6628 Mask.swap(NewMask); 6629 } 6630 6631 /// Looks through shuffles trying to reduce final number of shuffles in the 6632 /// code. The function looks through the previously emitted shuffle 6633 /// instructions and properly mark indices in mask as undef. 6634 /// For example, given the code 6635 /// \code 6636 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0> 6637 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0> 6638 /// \endcode 6639 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 3, 2>, it will 6640 /// look through %s1 and %s2 and select vectors %0 and %1 with mask 6641 /// <0, 1, 2, 3> for the shuffle. 6642 /// If 2 operands are of different size, the smallest one will be resized and 6643 /// the mask recalculated properly. 6644 /// For example, given the code 6645 /// \code 6646 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0, 1, 0> 6647 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0, 1, 0> 6648 /// \endcode 6649 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 5, 4>, it will 6650 /// look through %s1 and %s2 and select vectors %0 and %1 with mask 6651 /// <0, 1, 2, 3> for the shuffle. 6652 /// So, it tries to transform permutations to simple vector merge, if 6653 /// possible. 6654 /// \param V The input vector which must be shuffled using the given \p Mask. 6655 /// If the better candidate is found, \p V is set to this best candidate 6656 /// vector. 6657 /// \param Mask The input mask for the shuffle. If the best candidate is found 6658 /// during looking-through-shuffles attempt, it is updated accordingly. 6659 /// \param SinglePermute true if the shuffle operation is originally a 6660 /// single-value-permutation. In this case the look-through-shuffles procedure 6661 /// may look for resizing shuffles as the best candidates. 6662 /// \return true if the shuffle results in the non-resizing identity shuffle 6663 /// (and thus can be ignored), false - otherwise. 6664 static bool peekThroughShuffles(Value *&V, SmallVectorImpl<int> &Mask, 6665 bool SinglePermute) { 6666 Value *Op = V; 6667 ShuffleVectorInst *IdentityOp = nullptr; 6668 SmallVector<int> IdentityMask; 6669 while (auto *SV = dyn_cast<ShuffleVectorInst>(Op)) { 6670 // Exit if not a fixed vector type or changing size shuffle. 6671 auto *SVTy = dyn_cast<FixedVectorType>(SV->getType()); 6672 if (!SVTy) 6673 break; 6674 // Remember the identity or broadcast mask, if it is not a resizing 6675 // shuffle. If no better candidates are found, this Op and Mask will be 6676 // used in the final shuffle. 6677 if (isIdentityMask(Mask, SVTy, /*IsStrict=*/false)) { 6678 if (!IdentityOp || !SinglePermute || 6679 (isIdentityMask(Mask, SVTy, /*IsStrict=*/true) && 6680 !ShuffleVectorInst::isZeroEltSplatMask(IdentityMask, 6681 IdentityMask.size()))) { 6682 IdentityOp = SV; 6683 // Store current mask in the IdentityMask so later we did not lost 6684 // this info if IdentityOp is selected as the best candidate for the 6685 // permutation. 6686 IdentityMask.assign(Mask); 6687 } 6688 } 6689 // Remember the broadcast mask. If no better candidates are found, this Op 6690 // and Mask will be used in the final shuffle. 6691 // Zero splat can be used as identity too, since it might be used with 6692 // mask <0, 1, 2, ...>, i.e. identity mask without extra reshuffling. 6693 // E.g. if need to shuffle the vector with the mask <3, 1, 2, 0>, which is 6694 // expensive, the analysis founds out, that the source vector is just a 6695 // broadcast, this original mask can be transformed to identity mask <0, 6696 // 1, 2, 3>. 6697 // \code 6698 // %0 = shuffle %v, poison, zeroinitalizer 6699 // %res = shuffle %0, poison, <3, 1, 2, 0> 6700 // \endcode 6701 // may be transformed to 6702 // \code 6703 // %0 = shuffle %v, poison, zeroinitalizer 6704 // %res = shuffle %0, poison, <0, 1, 2, 3> 6705 // \endcode 6706 if (SV->isZeroEltSplat()) { 6707 IdentityOp = SV; 6708 IdentityMask.assign(Mask); 6709 } 6710 int LocalVF = Mask.size(); 6711 if (auto *SVOpTy = 6712 dyn_cast<FixedVectorType>(SV->getOperand(0)->getType())) 6713 LocalVF = SVOpTy->getNumElements(); 6714 SmallVector<int> ExtMask(Mask.size(), PoisonMaskElem); 6715 for (auto [Idx, I] : enumerate(Mask)) { 6716 if (I == PoisonMaskElem || 6717 static_cast<unsigned>(I) >= SV->getShuffleMask().size()) 6718 continue; 6719 ExtMask[Idx] = SV->getMaskValue(I); 6720 } 6721 bool IsOp1Undef = 6722 isUndefVector(SV->getOperand(0), 6723 buildUseMask(LocalVF, ExtMask, UseMask::FirstArg)) 6724 .all(); 6725 bool IsOp2Undef = 6726 isUndefVector(SV->getOperand(1), 6727 buildUseMask(LocalVF, ExtMask, UseMask::SecondArg)) 6728 .all(); 6729 if (!IsOp1Undef && !IsOp2Undef) { 6730 // Update mask and mark undef elems. 6731 for (int &I : Mask) { 6732 if (I == PoisonMaskElem) 6733 continue; 6734 if (SV->getMaskValue(I % SV->getShuffleMask().size()) == 6735 PoisonMaskElem) 6736 I = PoisonMaskElem; 6737 } 6738 break; 6739 } 6740 SmallVector<int> ShuffleMask(SV->getShuffleMask().begin(), 6741 SV->getShuffleMask().end()); 6742 combineMasks(LocalVF, ShuffleMask, Mask); 6743 Mask.swap(ShuffleMask); 6744 if (IsOp2Undef) 6745 Op = SV->getOperand(0); 6746 else 6747 Op = SV->getOperand(1); 6748 } 6749 if (auto *OpTy = dyn_cast<FixedVectorType>(Op->getType()); 6750 !OpTy || !isIdentityMask(Mask, OpTy, SinglePermute) || 6751 ShuffleVectorInst::isZeroEltSplatMask(Mask, Mask.size())) { 6752 if (IdentityOp) { 6753 V = IdentityOp; 6754 assert(Mask.size() == IdentityMask.size() && 6755 "Expected masks of same sizes."); 6756 // Clear known poison elements. 6757 for (auto [I, Idx] : enumerate(Mask)) 6758 if (Idx == PoisonMaskElem) 6759 IdentityMask[I] = PoisonMaskElem; 6760 Mask.swap(IdentityMask); 6761 auto *Shuffle = dyn_cast<ShuffleVectorInst>(V); 6762 return SinglePermute && 6763 (isIdentityMask(Mask, cast<FixedVectorType>(V->getType()), 6764 /*IsStrict=*/true) || 6765 (Shuffle && Mask.size() == Shuffle->getShuffleMask().size() && 6766 Shuffle->isZeroEltSplat() && 6767 ShuffleVectorInst::isZeroEltSplatMask(Mask, Mask.size()))); 6768 } 6769 V = Op; 6770 return false; 6771 } 6772 V = Op; 6773 return true; 6774 } 6775 6776 /// Smart shuffle instruction emission, walks through shuffles trees and 6777 /// tries to find the best matching vector for the actual shuffle 6778 /// instruction. 6779 template <typename T, typename ShuffleBuilderTy> 6780 static T createShuffle(Value *V1, Value *V2, ArrayRef<int> Mask, 6781 ShuffleBuilderTy &Builder) { 6782 assert(V1 && "Expected at least one vector value."); 6783 if (V2) 6784 Builder.resizeToMatch(V1, V2); 6785 int VF = Mask.size(); 6786 if (auto *FTy = dyn_cast<FixedVectorType>(V1->getType())) 6787 VF = FTy->getNumElements(); 6788 if (V2 && 6789 !isUndefVector(V2, buildUseMask(VF, Mask, UseMask::SecondArg)).all()) { 6790 // Peek through shuffles. 6791 Value *Op1 = V1; 6792 Value *Op2 = V2; 6793 int VF = 6794 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue(); 6795 SmallVector<int> CombinedMask1(Mask.size(), PoisonMaskElem); 6796 SmallVector<int> CombinedMask2(Mask.size(), PoisonMaskElem); 6797 for (int I = 0, E = Mask.size(); I < E; ++I) { 6798 if (Mask[I] < VF) 6799 CombinedMask1[I] = Mask[I]; 6800 else 6801 CombinedMask2[I] = Mask[I] - VF; 6802 } 6803 Value *PrevOp1; 6804 Value *PrevOp2; 6805 do { 6806 PrevOp1 = Op1; 6807 PrevOp2 = Op2; 6808 (void)peekThroughShuffles(Op1, CombinedMask1, /*SinglePermute=*/false); 6809 (void)peekThroughShuffles(Op2, CombinedMask2, /*SinglePermute=*/false); 6810 // Check if we have 2 resizing shuffles - need to peek through operands 6811 // again. 6812 if (auto *SV1 = dyn_cast<ShuffleVectorInst>(Op1)) 6813 if (auto *SV2 = dyn_cast<ShuffleVectorInst>(Op2)) { 6814 SmallVector<int> ExtMask1(Mask.size(), PoisonMaskElem); 6815 for (auto [Idx, I] : enumerate(CombinedMask1)) { 6816 if (I == PoisonMaskElem) 6817 continue; 6818 ExtMask1[Idx] = SV1->getMaskValue(I); 6819 } 6820 SmallBitVector UseMask1 = buildUseMask( 6821 cast<FixedVectorType>(SV1->getOperand(1)->getType()) 6822 ->getNumElements(), 6823 ExtMask1, UseMask::SecondArg); 6824 SmallVector<int> ExtMask2(CombinedMask2.size(), PoisonMaskElem); 6825 for (auto [Idx, I] : enumerate(CombinedMask2)) { 6826 if (I == PoisonMaskElem) 6827 continue; 6828 ExtMask2[Idx] = SV2->getMaskValue(I); 6829 } 6830 SmallBitVector UseMask2 = buildUseMask( 6831 cast<FixedVectorType>(SV2->getOperand(1)->getType()) 6832 ->getNumElements(), 6833 ExtMask2, UseMask::SecondArg); 6834 if (SV1->getOperand(0)->getType() == 6835 SV2->getOperand(0)->getType() && 6836 SV1->getOperand(0)->getType() != SV1->getType() && 6837 isUndefVector(SV1->getOperand(1), UseMask1).all() && 6838 isUndefVector(SV2->getOperand(1), UseMask2).all()) { 6839 Op1 = SV1->getOperand(0); 6840 Op2 = SV2->getOperand(0); 6841 SmallVector<int> ShuffleMask1(SV1->getShuffleMask().begin(), 6842 SV1->getShuffleMask().end()); 6843 int LocalVF = ShuffleMask1.size(); 6844 if (auto *FTy = dyn_cast<FixedVectorType>(Op1->getType())) 6845 LocalVF = FTy->getNumElements(); 6846 combineMasks(LocalVF, ShuffleMask1, CombinedMask1); 6847 CombinedMask1.swap(ShuffleMask1); 6848 SmallVector<int> ShuffleMask2(SV2->getShuffleMask().begin(), 6849 SV2->getShuffleMask().end()); 6850 LocalVF = ShuffleMask2.size(); 6851 if (auto *FTy = dyn_cast<FixedVectorType>(Op2->getType())) 6852 LocalVF = FTy->getNumElements(); 6853 combineMasks(LocalVF, ShuffleMask2, CombinedMask2); 6854 CombinedMask2.swap(ShuffleMask2); 6855 } 6856 } 6857 } while (PrevOp1 != Op1 || PrevOp2 != Op2); 6858 Builder.resizeToMatch(Op1, Op2); 6859 VF = std::max(cast<VectorType>(Op1->getType()) 6860 ->getElementCount() 6861 .getKnownMinValue(), 6862 cast<VectorType>(Op2->getType()) 6863 ->getElementCount() 6864 .getKnownMinValue()); 6865 for (int I = 0, E = Mask.size(); I < E; ++I) { 6866 if (CombinedMask2[I] != PoisonMaskElem) { 6867 assert(CombinedMask1[I] == PoisonMaskElem && 6868 "Expected undefined mask element"); 6869 CombinedMask1[I] = CombinedMask2[I] + (Op1 == Op2 ? 0 : VF); 6870 } 6871 } 6872 if (Op1 == Op2 && 6873 (ShuffleVectorInst::isIdentityMask(CombinedMask1, VF) || 6874 (ShuffleVectorInst::isZeroEltSplatMask(CombinedMask1, VF) && 6875 isa<ShuffleVectorInst>(Op1) && 6876 cast<ShuffleVectorInst>(Op1)->getShuffleMask() == 6877 ArrayRef(CombinedMask1)))) 6878 return Builder.createIdentity(Op1); 6879 return Builder.createShuffleVector( 6880 Op1, Op1 == Op2 ? PoisonValue::get(Op1->getType()) : Op2, 6881 CombinedMask1); 6882 } 6883 if (isa<PoisonValue>(V1)) 6884 return Builder.createPoison( 6885 cast<VectorType>(V1->getType())->getElementType(), Mask.size()); 6886 SmallVector<int> NewMask(Mask.begin(), Mask.end()); 6887 bool IsIdentity = peekThroughShuffles(V1, NewMask, /*SinglePermute=*/true); 6888 assert(V1 && "Expected non-null value after looking through shuffles."); 6889 6890 if (!IsIdentity) 6891 return Builder.createShuffleVector(V1, NewMask); 6892 return Builder.createIdentity(V1); 6893 } 6894 }; 6895 } // namespace 6896 6897 /// Merges shuffle masks and emits final shuffle instruction, if required. It 6898 /// supports shuffling of 2 input vectors. It implements lazy shuffles emission, 6899 /// when the actual shuffle instruction is generated only if this is actually 6900 /// required. Otherwise, the shuffle instruction emission is delayed till the 6901 /// end of the process, to reduce the number of emitted instructions and further 6902 /// analysis/transformations. 6903 class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis { 6904 bool IsFinalized = false; 6905 SmallVector<int> CommonMask; 6906 SmallVector<PointerUnion<Value *, const TreeEntry *>, 2> InVectors; 6907 const TargetTransformInfo &TTI; 6908 InstructionCost Cost = 0; 6909 SmallDenseSet<Value *> VectorizedVals; 6910 BoUpSLP &R; 6911 SmallPtrSetImpl<Value *> &CheckedExtracts; 6912 constexpr static TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6913 /// While set, still trying to estimate the cost for the same nodes and we 6914 /// can delay actual cost estimation (virtual shuffle instruction emission). 6915 /// May help better estimate the cost if same nodes must be permuted + allows 6916 /// to move most of the long shuffles cost estimation to TTI. 6917 bool SameNodesEstimated = true; 6918 6919 static Constant *getAllOnesValue(const DataLayout &DL, Type *Ty) { 6920 if (Ty->getScalarType()->isPointerTy()) { 6921 Constant *Res = ConstantExpr::getIntToPtr( 6922 ConstantInt::getAllOnesValue( 6923 IntegerType::get(Ty->getContext(), 6924 DL.getTypeStoreSizeInBits(Ty->getScalarType()))), 6925 Ty->getScalarType()); 6926 if (auto *VTy = dyn_cast<VectorType>(Ty)) 6927 Res = ConstantVector::getSplat(VTy->getElementCount(), Res); 6928 return Res; 6929 } 6930 return Constant::getAllOnesValue(Ty); 6931 } 6932 6933 InstructionCost getBuildVectorCost(ArrayRef<Value *> VL, Value *Root) { 6934 if ((!Root && allConstant(VL)) || all_of(VL, UndefValue::classof)) 6935 return TTI::TCC_Free; 6936 auto *VecTy = FixedVectorType::get(VL.front()->getType(), VL.size()); 6937 InstructionCost GatherCost = 0; 6938 SmallVector<Value *> Gathers(VL.begin(), VL.end()); 6939 // Improve gather cost for gather of loads, if we can group some of the 6940 // loads into vector loads. 6941 InstructionsState S = getSameOpcode(VL, *R.TLI); 6942 const unsigned Sz = R.DL->getTypeSizeInBits(VL.front()->getType()); 6943 unsigned MinVF = R.getMinVF(2 * Sz); 6944 if (VL.size() > 2 && 6945 ((S.getOpcode() == Instruction::Load && !S.isAltShuffle()) || 6946 (InVectors.empty() && 6947 any_of(seq<unsigned>(0, VL.size() / MinVF), 6948 [&](unsigned Idx) { 6949 ArrayRef<Value *> SubVL = VL.slice(Idx * MinVF, MinVF); 6950 InstructionsState S = getSameOpcode(SubVL, *R.TLI); 6951 return S.getOpcode() == Instruction::Load && 6952 !S.isAltShuffle(); 6953 }))) && 6954 !all_of(Gathers, [&](Value *V) { return R.getTreeEntry(V); }) && 6955 !isSplat(Gathers)) { 6956 SetVector<Value *> VectorizedLoads; 6957 SmallVector<LoadInst *> VectorizedStarts; 6958 SmallVector<std::pair<unsigned, unsigned>> ScatterVectorized; 6959 unsigned StartIdx = 0; 6960 unsigned VF = VL.size() / 2; 6961 for (; VF >= MinVF; VF /= 2) { 6962 for (unsigned Cnt = StartIdx, End = VL.size(); Cnt + VF <= End; 6963 Cnt += VF) { 6964 ArrayRef<Value *> Slice = VL.slice(Cnt, VF); 6965 if (S.getOpcode() != Instruction::Load || S.isAltShuffle()) { 6966 InstructionsState SliceS = getSameOpcode(Slice, *R.TLI); 6967 if (SliceS.getOpcode() != Instruction::Load || 6968 SliceS.isAltShuffle()) 6969 continue; 6970 } 6971 if (!VectorizedLoads.count(Slice.front()) && 6972 !VectorizedLoads.count(Slice.back()) && allSameBlock(Slice)) { 6973 SmallVector<Value *> PointerOps; 6974 OrdersType CurrentOrder; 6975 LoadsState LS = 6976 canVectorizeLoads(Slice, Slice.front(), TTI, *R.DL, *R.SE, 6977 *R.LI, *R.TLI, CurrentOrder, PointerOps); 6978 switch (LS) { 6979 case LoadsState::Vectorize: 6980 case LoadsState::ScatterVectorize: 6981 case LoadsState::PossibleStridedVectorize: 6982 // Mark the vectorized loads so that we don't vectorize them 6983 // again. 6984 // TODO: better handling of loads with reorders. 6985 if (LS == LoadsState::Vectorize && CurrentOrder.empty()) 6986 VectorizedStarts.push_back(cast<LoadInst>(Slice.front())); 6987 else 6988 ScatterVectorized.emplace_back(Cnt, VF); 6989 VectorizedLoads.insert(Slice.begin(), Slice.end()); 6990 // If we vectorized initial block, no need to try to vectorize 6991 // it again. 6992 if (Cnt == StartIdx) 6993 StartIdx += VF; 6994 break; 6995 case LoadsState::Gather: 6996 break; 6997 } 6998 } 6999 } 7000 // Check if the whole array was vectorized already - exit. 7001 if (StartIdx >= VL.size()) 7002 break; 7003 // Found vectorizable parts - exit. 7004 if (!VectorizedLoads.empty()) 7005 break; 7006 } 7007 if (!VectorizedLoads.empty()) { 7008 unsigned NumParts = TTI.getNumberOfParts(VecTy); 7009 bool NeedInsertSubvectorAnalysis = 7010 !NumParts || (VL.size() / VF) > NumParts; 7011 // Get the cost for gathered loads. 7012 for (unsigned I = 0, End = VL.size(); I < End; I += VF) { 7013 if (VectorizedLoads.contains(VL[I])) 7014 continue; 7015 GatherCost += getBuildVectorCost(VL.slice(I, VF), Root); 7016 } 7017 // Exclude potentially vectorized loads from list of gathered 7018 // scalars. 7019 Gathers.assign(Gathers.size(), PoisonValue::get(VL.front()->getType())); 7020 // The cost for vectorized loads. 7021 InstructionCost ScalarsCost = 0; 7022 for (Value *V : VectorizedLoads) { 7023 auto *LI = cast<LoadInst>(V); 7024 ScalarsCost += 7025 TTI.getMemoryOpCost(Instruction::Load, LI->getType(), 7026 LI->getAlign(), LI->getPointerAddressSpace(), 7027 CostKind, TTI::OperandValueInfo(), LI); 7028 } 7029 auto *LoadTy = FixedVectorType::get(VL.front()->getType(), VF); 7030 for (LoadInst *LI : VectorizedStarts) { 7031 Align Alignment = LI->getAlign(); 7032 GatherCost += 7033 TTI.getMemoryOpCost(Instruction::Load, LoadTy, Alignment, 7034 LI->getPointerAddressSpace(), CostKind, 7035 TTI::OperandValueInfo(), LI); 7036 } 7037 for (std::pair<unsigned, unsigned> P : ScatterVectorized) { 7038 auto *LI0 = cast<LoadInst>(VL[P.first]); 7039 Align CommonAlignment = LI0->getAlign(); 7040 for (Value *V : VL.slice(P.first + 1, VF - 1)) 7041 CommonAlignment = 7042 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 7043 GatherCost += TTI.getGatherScatterOpCost( 7044 Instruction::Load, LoadTy, LI0->getPointerOperand(), 7045 /*VariableMask=*/false, CommonAlignment, CostKind, LI0); 7046 } 7047 if (NeedInsertSubvectorAnalysis) { 7048 // Add the cost for the subvectors insert. 7049 for (int I = VF, E = VL.size(); I < E; I += VF) 7050 GatherCost += TTI.getShuffleCost(TTI::SK_InsertSubvector, VecTy, 7051 std::nullopt, CostKind, I, LoadTy); 7052 } 7053 GatherCost -= ScalarsCost; 7054 } 7055 } else if (!Root && isSplat(VL)) { 7056 // Found the broadcasting of the single scalar, calculate the cost as 7057 // the broadcast. 7058 const auto *It = 7059 find_if(VL, [](Value *V) { return !isa<UndefValue>(V); }); 7060 assert(It != VL.end() && "Expected at least one non-undef value."); 7061 // Add broadcast for non-identity shuffle only. 7062 bool NeedShuffle = 7063 count(VL, *It) > 1 && 7064 (VL.front() != *It || !all_of(VL.drop_front(), UndefValue::classof)); 7065 InstructionCost InsertCost = TTI.getVectorInstrCost( 7066 Instruction::InsertElement, VecTy, CostKind, 7067 NeedShuffle ? 0 : std::distance(VL.begin(), It), 7068 PoisonValue::get(VecTy), *It); 7069 return InsertCost + 7070 (NeedShuffle ? TTI.getShuffleCost( 7071 TargetTransformInfo::SK_Broadcast, VecTy, 7072 /*Mask=*/std::nullopt, CostKind, /*Index=*/0, 7073 /*SubTp=*/nullptr, /*Args=*/*It) 7074 : TTI::TCC_Free); 7075 } 7076 return GatherCost + 7077 (all_of(Gathers, UndefValue::classof) 7078 ? TTI::TCC_Free 7079 : R.getGatherCost(Gathers, !Root && VL.equals(Gathers))); 7080 }; 7081 7082 /// Compute the cost of creating a vector containing the extracted values from 7083 /// \p VL. 7084 InstructionCost 7085 computeExtractCost(ArrayRef<Value *> VL, ArrayRef<int> Mask, 7086 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds, 7087 unsigned NumParts) { 7088 assert(VL.size() > NumParts && "Unexpected scalarized shuffle."); 7089 unsigned NumElts = 7090 std::accumulate(VL.begin(), VL.end(), 0, [](unsigned Sz, Value *V) { 7091 auto *EE = dyn_cast<ExtractElementInst>(V); 7092 if (!EE) 7093 return Sz; 7094 auto *VecTy = cast<FixedVectorType>(EE->getVectorOperandType()); 7095 return std::max(Sz, VecTy->getNumElements()); 7096 }); 7097 unsigned NumSrcRegs = TTI.getNumberOfParts( 7098 FixedVectorType::get(VL.front()->getType(), NumElts)); 7099 if (NumSrcRegs == 0) 7100 NumSrcRegs = 1; 7101 // FIXME: this must be moved to TTI for better estimation. 7102 unsigned EltsPerVector = PowerOf2Ceil(std::max( 7103 divideCeil(VL.size(), NumParts), divideCeil(NumElts, NumSrcRegs))); 7104 auto CheckPerRegistersShuffle = 7105 [&](MutableArrayRef<int> Mask) -> std::optional<TTI::ShuffleKind> { 7106 DenseSet<int> RegIndices; 7107 // Check that if trying to permute same single/2 input vectors. 7108 TTI::ShuffleKind ShuffleKind = TTI::SK_PermuteSingleSrc; 7109 int FirstRegId = -1; 7110 for (int &I : Mask) { 7111 if (I == PoisonMaskElem) 7112 continue; 7113 int RegId = (I / NumElts) * NumParts + (I % NumElts) / EltsPerVector; 7114 if (FirstRegId < 0) 7115 FirstRegId = RegId; 7116 RegIndices.insert(RegId); 7117 if (RegIndices.size() > 2) 7118 return std::nullopt; 7119 if (RegIndices.size() == 2) 7120 ShuffleKind = TTI::SK_PermuteTwoSrc; 7121 I = (I % NumElts) % EltsPerVector + 7122 (RegId == FirstRegId ? 0 : EltsPerVector); 7123 } 7124 return ShuffleKind; 7125 }; 7126 InstructionCost Cost = 0; 7127 7128 // Process extracts in blocks of EltsPerVector to check if the source vector 7129 // operand can be re-used directly. If not, add the cost of creating a 7130 // shuffle to extract the values into a vector register. 7131 for (unsigned Part = 0; Part < NumParts; ++Part) { 7132 if (!ShuffleKinds[Part]) 7133 continue; 7134 ArrayRef<int> MaskSlice = 7135 Mask.slice(Part * EltsPerVector, 7136 (Part == NumParts - 1 && Mask.size() % EltsPerVector != 0) 7137 ? Mask.size() % EltsPerVector 7138 : EltsPerVector); 7139 SmallVector<int> SubMask(EltsPerVector, PoisonMaskElem); 7140 copy(MaskSlice, SubMask.begin()); 7141 std::optional<TTI::ShuffleKind> RegShuffleKind = 7142 CheckPerRegistersShuffle(SubMask); 7143 if (!RegShuffleKind) { 7144 Cost += TTI.getShuffleCost( 7145 *ShuffleKinds[Part], 7146 FixedVectorType::get(VL.front()->getType(), NumElts), MaskSlice); 7147 continue; 7148 } 7149 if (*RegShuffleKind != TTI::SK_PermuteSingleSrc || 7150 !ShuffleVectorInst::isIdentityMask(SubMask, EltsPerVector)) { 7151 Cost += TTI.getShuffleCost( 7152 *RegShuffleKind, 7153 FixedVectorType::get(VL.front()->getType(), EltsPerVector), 7154 SubMask); 7155 } 7156 } 7157 return Cost; 7158 } 7159 /// Transforms mask \p CommonMask per given \p Mask to make proper set after 7160 /// shuffle emission. 7161 static void transformMaskAfterShuffle(MutableArrayRef<int> CommonMask, 7162 ArrayRef<int> Mask) { 7163 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 7164 if (Mask[Idx] != PoisonMaskElem) 7165 CommonMask[Idx] = Idx; 7166 } 7167 /// Adds the cost of reshuffling \p E1 and \p E2 (if present), using given 7168 /// mask \p Mask, register number \p Part, that includes \p SliceSize 7169 /// elements. 7170 void estimateNodesPermuteCost(const TreeEntry &E1, const TreeEntry *E2, 7171 ArrayRef<int> Mask, unsigned Part, 7172 unsigned SliceSize) { 7173 if (SameNodesEstimated) { 7174 // Delay the cost estimation if the same nodes are reshuffling. 7175 // If we already requested the cost of reshuffling of E1 and E2 before, no 7176 // need to estimate another cost with the sub-Mask, instead include this 7177 // sub-Mask into the CommonMask to estimate it later and avoid double cost 7178 // estimation. 7179 if ((InVectors.size() == 2 && 7180 InVectors.front().get<const TreeEntry *>() == &E1 && 7181 InVectors.back().get<const TreeEntry *>() == E2) || 7182 (!E2 && InVectors.front().get<const TreeEntry *>() == &E1)) { 7183 assert(all_of(ArrayRef(CommonMask).slice(Part * SliceSize, SliceSize), 7184 [](int Idx) { return Idx == PoisonMaskElem; }) && 7185 "Expected all poisoned elements."); 7186 ArrayRef<int> SubMask = 7187 ArrayRef(Mask).slice(Part * SliceSize, SliceSize); 7188 copy(SubMask, std::next(CommonMask.begin(), SliceSize * Part)); 7189 return; 7190 } 7191 // Found non-matching nodes - need to estimate the cost for the matched 7192 // and transform mask. 7193 Cost += createShuffle(InVectors.front(), 7194 InVectors.size() == 1 ? nullptr : InVectors.back(), 7195 CommonMask); 7196 transformMaskAfterShuffle(CommonMask, CommonMask); 7197 } 7198 SameNodesEstimated = false; 7199 Cost += createShuffle(&E1, E2, Mask); 7200 transformMaskAfterShuffle(CommonMask, Mask); 7201 } 7202 7203 class ShuffleCostBuilder { 7204 const TargetTransformInfo &TTI; 7205 7206 static bool isEmptyOrIdentity(ArrayRef<int> Mask, unsigned VF) { 7207 int Index = -1; 7208 return Mask.empty() || 7209 (VF == Mask.size() && 7210 ShuffleVectorInst::isIdentityMask(Mask, VF)) || 7211 (ShuffleVectorInst::isExtractSubvectorMask(Mask, VF, Index) && 7212 Index == 0); 7213 } 7214 7215 public: 7216 ShuffleCostBuilder(const TargetTransformInfo &TTI) : TTI(TTI) {} 7217 ~ShuffleCostBuilder() = default; 7218 InstructionCost createShuffleVector(Value *V1, Value *, 7219 ArrayRef<int> Mask) const { 7220 // Empty mask or identity mask are free. 7221 unsigned VF = 7222 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue(); 7223 if (isEmptyOrIdentity(Mask, VF)) 7224 return TTI::TCC_Free; 7225 return TTI.getShuffleCost(TTI::SK_PermuteTwoSrc, 7226 cast<VectorType>(V1->getType()), Mask); 7227 } 7228 InstructionCost createShuffleVector(Value *V1, ArrayRef<int> Mask) const { 7229 // Empty mask or identity mask are free. 7230 unsigned VF = 7231 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue(); 7232 if (isEmptyOrIdentity(Mask, VF)) 7233 return TTI::TCC_Free; 7234 return TTI.getShuffleCost(TTI::SK_PermuteSingleSrc, 7235 cast<VectorType>(V1->getType()), Mask); 7236 } 7237 InstructionCost createIdentity(Value *) const { return TTI::TCC_Free; } 7238 InstructionCost createPoison(Type *Ty, unsigned VF) const { 7239 return TTI::TCC_Free; 7240 } 7241 void resizeToMatch(Value *&, Value *&) const {} 7242 }; 7243 7244 /// Smart shuffle instruction emission, walks through shuffles trees and 7245 /// tries to find the best matching vector for the actual shuffle 7246 /// instruction. 7247 InstructionCost 7248 createShuffle(const PointerUnion<Value *, const TreeEntry *> &P1, 7249 const PointerUnion<Value *, const TreeEntry *> &P2, 7250 ArrayRef<int> Mask) { 7251 ShuffleCostBuilder Builder(TTI); 7252 SmallVector<int> CommonMask(Mask.begin(), Mask.end()); 7253 Value *V1 = P1.dyn_cast<Value *>(), *V2 = P2.dyn_cast<Value *>(); 7254 unsigned CommonVF = Mask.size(); 7255 if (!V1 && !V2 && !P2.isNull()) { 7256 // Shuffle 2 entry nodes. 7257 const TreeEntry *E = P1.get<const TreeEntry *>(); 7258 unsigned VF = E->getVectorFactor(); 7259 const TreeEntry *E2 = P2.get<const TreeEntry *>(); 7260 CommonVF = std::max(VF, E2->getVectorFactor()); 7261 assert(all_of(Mask, 7262 [=](int Idx) { 7263 return Idx < 2 * static_cast<int>(CommonVF); 7264 }) && 7265 "All elements in mask must be less than 2 * CommonVF."); 7266 if (E->Scalars.size() == E2->Scalars.size()) { 7267 SmallVector<int> EMask = E->getCommonMask(); 7268 SmallVector<int> E2Mask = E2->getCommonMask(); 7269 if (!EMask.empty() || !E2Mask.empty()) { 7270 for (int &Idx : CommonMask) { 7271 if (Idx == PoisonMaskElem) 7272 continue; 7273 if (Idx < static_cast<int>(CommonVF) && !EMask.empty()) 7274 Idx = EMask[Idx]; 7275 else if (Idx >= static_cast<int>(CommonVF)) 7276 Idx = (E2Mask.empty() ? Idx - CommonVF : E2Mask[Idx - CommonVF]) + 7277 E->Scalars.size(); 7278 } 7279 } 7280 CommonVF = E->Scalars.size(); 7281 } 7282 V1 = Constant::getNullValue( 7283 FixedVectorType::get(E->Scalars.front()->getType(), CommonVF)); 7284 V2 = getAllOnesValue( 7285 *R.DL, FixedVectorType::get(E->Scalars.front()->getType(), CommonVF)); 7286 } else if (!V1 && P2.isNull()) { 7287 // Shuffle single entry node. 7288 const TreeEntry *E = P1.get<const TreeEntry *>(); 7289 unsigned VF = E->getVectorFactor(); 7290 CommonVF = VF; 7291 assert( 7292 all_of(Mask, 7293 [=](int Idx) { return Idx < static_cast<int>(CommonVF); }) && 7294 "All elements in mask must be less than CommonVF."); 7295 if (E->Scalars.size() == Mask.size() && VF != Mask.size()) { 7296 SmallVector<int> EMask = E->getCommonMask(); 7297 assert(!EMask.empty() && "Expected non-empty common mask."); 7298 for (int &Idx : CommonMask) { 7299 if (Idx != PoisonMaskElem) 7300 Idx = EMask[Idx]; 7301 } 7302 CommonVF = E->Scalars.size(); 7303 } 7304 V1 = Constant::getNullValue( 7305 FixedVectorType::get(E->Scalars.front()->getType(), CommonVF)); 7306 } else if (V1 && P2.isNull()) { 7307 // Shuffle single vector. 7308 CommonVF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7309 assert( 7310 all_of(Mask, 7311 [=](int Idx) { return Idx < static_cast<int>(CommonVF); }) && 7312 "All elements in mask must be less than CommonVF."); 7313 } else if (V1 && !V2) { 7314 // Shuffle vector and tree node. 7315 unsigned VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7316 const TreeEntry *E2 = P2.get<const TreeEntry *>(); 7317 CommonVF = std::max(VF, E2->getVectorFactor()); 7318 assert(all_of(Mask, 7319 [=](int Idx) { 7320 return Idx < 2 * static_cast<int>(CommonVF); 7321 }) && 7322 "All elements in mask must be less than 2 * CommonVF."); 7323 if (E2->Scalars.size() == VF && VF != CommonVF) { 7324 SmallVector<int> E2Mask = E2->getCommonMask(); 7325 assert(!E2Mask.empty() && "Expected non-empty common mask."); 7326 for (int &Idx : CommonMask) { 7327 if (Idx == PoisonMaskElem) 7328 continue; 7329 if (Idx >= static_cast<int>(CommonVF)) 7330 Idx = E2Mask[Idx - CommonVF] + VF; 7331 } 7332 CommonVF = VF; 7333 } 7334 V1 = Constant::getNullValue( 7335 FixedVectorType::get(E2->Scalars.front()->getType(), CommonVF)); 7336 V2 = getAllOnesValue( 7337 *R.DL, 7338 FixedVectorType::get(E2->Scalars.front()->getType(), CommonVF)); 7339 } else if (!V1 && V2) { 7340 // Shuffle vector and tree node. 7341 unsigned VF = cast<FixedVectorType>(V2->getType())->getNumElements(); 7342 const TreeEntry *E1 = P1.get<const TreeEntry *>(); 7343 CommonVF = std::max(VF, E1->getVectorFactor()); 7344 assert(all_of(Mask, 7345 [=](int Idx) { 7346 return Idx < 2 * static_cast<int>(CommonVF); 7347 }) && 7348 "All elements in mask must be less than 2 * CommonVF."); 7349 if (E1->Scalars.size() == VF && VF != CommonVF) { 7350 SmallVector<int> E1Mask = E1->getCommonMask(); 7351 assert(!E1Mask.empty() && "Expected non-empty common mask."); 7352 for (int &Idx : CommonMask) { 7353 if (Idx == PoisonMaskElem) 7354 continue; 7355 if (Idx >= static_cast<int>(CommonVF)) 7356 Idx = E1Mask[Idx - CommonVF] + VF; 7357 } 7358 CommonVF = VF; 7359 } 7360 V1 = Constant::getNullValue( 7361 FixedVectorType::get(E1->Scalars.front()->getType(), CommonVF)); 7362 V2 = getAllOnesValue( 7363 *R.DL, 7364 FixedVectorType::get(E1->Scalars.front()->getType(), CommonVF)); 7365 } else { 7366 assert(V1 && V2 && "Expected both vectors."); 7367 unsigned VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7368 CommonVF = 7369 std::max(VF, cast<FixedVectorType>(V2->getType())->getNumElements()); 7370 assert(all_of(Mask, 7371 [=](int Idx) { 7372 return Idx < 2 * static_cast<int>(CommonVF); 7373 }) && 7374 "All elements in mask must be less than 2 * CommonVF."); 7375 if (V1->getType() != V2->getType()) { 7376 V1 = Constant::getNullValue(FixedVectorType::get( 7377 cast<FixedVectorType>(V1->getType())->getElementType(), CommonVF)); 7378 V2 = getAllOnesValue( 7379 *R.DL, FixedVectorType::get( 7380 cast<FixedVectorType>(V1->getType())->getElementType(), 7381 CommonVF)); 7382 } 7383 } 7384 InVectors.front() = Constant::getNullValue(FixedVectorType::get( 7385 cast<FixedVectorType>(V1->getType())->getElementType(), 7386 CommonMask.size())); 7387 if (InVectors.size() == 2) 7388 InVectors.pop_back(); 7389 return BaseShuffleAnalysis::createShuffle<InstructionCost>( 7390 V1, V2, CommonMask, Builder); 7391 } 7392 7393 public: 7394 ShuffleCostEstimator(TargetTransformInfo &TTI, 7395 ArrayRef<Value *> VectorizedVals, BoUpSLP &R, 7396 SmallPtrSetImpl<Value *> &CheckedExtracts) 7397 : TTI(TTI), VectorizedVals(VectorizedVals.begin(), VectorizedVals.end()), 7398 R(R), CheckedExtracts(CheckedExtracts) {} 7399 Value *adjustExtracts(const TreeEntry *E, MutableArrayRef<int> Mask, 7400 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds, 7401 unsigned NumParts, bool &UseVecBaseAsInput) { 7402 UseVecBaseAsInput = false; 7403 if (Mask.empty()) 7404 return nullptr; 7405 Value *VecBase = nullptr; 7406 ArrayRef<Value *> VL = E->Scalars; 7407 // If the resulting type is scalarized, do not adjust the cost. 7408 if (NumParts == VL.size()) 7409 return nullptr; 7410 // Check if it can be considered reused if same extractelements were 7411 // vectorized already. 7412 bool PrevNodeFound = any_of( 7413 ArrayRef(R.VectorizableTree).take_front(E->Idx), 7414 [&](const std::unique_ptr<TreeEntry> &TE) { 7415 return ((!TE->isAltShuffle() && 7416 TE->getOpcode() == Instruction::ExtractElement) || 7417 TE->State == TreeEntry::NeedToGather) && 7418 all_of(enumerate(TE->Scalars), [&](auto &&Data) { 7419 return VL.size() > Data.index() && 7420 (Mask[Data.index()] == PoisonMaskElem || 7421 isa<UndefValue>(VL[Data.index()]) || 7422 Data.value() == VL[Data.index()]); 7423 }); 7424 }); 7425 SmallPtrSet<Value *, 4> UniqueBases; 7426 unsigned SliceSize = VL.size() / NumParts; 7427 for (unsigned Part = 0; Part < NumParts; ++Part) { 7428 ArrayRef<int> SubMask = Mask.slice(Part * SliceSize, SliceSize); 7429 for (auto [I, V] : enumerate(VL.slice(Part * SliceSize, SliceSize))) { 7430 // Ignore non-extractelement scalars. 7431 if (isa<UndefValue>(V) || 7432 (!SubMask.empty() && SubMask[I] == PoisonMaskElem)) 7433 continue; 7434 // If all users of instruction are going to be vectorized and this 7435 // instruction itself is not going to be vectorized, consider this 7436 // instruction as dead and remove its cost from the final cost of the 7437 // vectorized tree. 7438 // Also, avoid adjusting the cost for extractelements with multiple uses 7439 // in different graph entries. 7440 auto *EE = cast<ExtractElementInst>(V); 7441 VecBase = EE->getVectorOperand(); 7442 UniqueBases.insert(VecBase); 7443 const TreeEntry *VE = R.getTreeEntry(V); 7444 if (!CheckedExtracts.insert(V).second || 7445 !R.areAllUsersVectorized(cast<Instruction>(V), &VectorizedVals) || 7446 (VE && VE != E)) 7447 continue; 7448 std::optional<unsigned> EEIdx = getExtractIndex(EE); 7449 if (!EEIdx) 7450 continue; 7451 unsigned Idx = *EEIdx; 7452 // Take credit for instruction that will become dead. 7453 if (EE->hasOneUse() || !PrevNodeFound) { 7454 Instruction *Ext = EE->user_back(); 7455 if (isa<SExtInst, ZExtInst>(Ext) && all_of(Ext->users(), [](User *U) { 7456 return isa<GetElementPtrInst>(U); 7457 })) { 7458 // Use getExtractWithExtendCost() to calculate the cost of 7459 // extractelement/ext pair. 7460 Cost -= 7461 TTI.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(), 7462 EE->getVectorOperandType(), Idx); 7463 // Add back the cost of s|zext which is subtracted separately. 7464 Cost += TTI.getCastInstrCost( 7465 Ext->getOpcode(), Ext->getType(), EE->getType(), 7466 TTI::getCastContextHint(Ext), CostKind, Ext); 7467 continue; 7468 } 7469 } 7470 Cost -= TTI.getVectorInstrCost(*EE, EE->getVectorOperandType(), 7471 CostKind, Idx); 7472 } 7473 } 7474 // Check that gather of extractelements can be represented as just a 7475 // shuffle of a single/two vectors the scalars are extracted from. 7476 // Found the bunch of extractelement instructions that must be gathered 7477 // into a vector and can be represented as a permutation elements in a 7478 // single input vector or of 2 input vectors. 7479 // Done for reused if same extractelements were vectorized already. 7480 if (!PrevNodeFound) 7481 Cost += computeExtractCost(VL, Mask, ShuffleKinds, NumParts); 7482 InVectors.assign(1, E); 7483 CommonMask.assign(Mask.begin(), Mask.end()); 7484 transformMaskAfterShuffle(CommonMask, CommonMask); 7485 SameNodesEstimated = false; 7486 if (NumParts != 1 && UniqueBases.size() != 1) { 7487 UseVecBaseAsInput = true; 7488 VecBase = Constant::getNullValue( 7489 FixedVectorType::get(VL.front()->getType(), CommonMask.size())); 7490 } 7491 return VecBase; 7492 } 7493 /// Checks if the specified entry \p E needs to be delayed because of its 7494 /// dependency nodes. 7495 std::optional<InstructionCost> 7496 needToDelay(const TreeEntry *, 7497 ArrayRef<SmallVector<const TreeEntry *>>) const { 7498 // No need to delay the cost estimation during analysis. 7499 return std::nullopt; 7500 } 7501 void add(const TreeEntry &E1, const TreeEntry &E2, ArrayRef<int> Mask) { 7502 if (&E1 == &E2) { 7503 assert(all_of(Mask, 7504 [&](int Idx) { 7505 return Idx < static_cast<int>(E1.getVectorFactor()); 7506 }) && 7507 "Expected single vector shuffle mask."); 7508 add(E1, Mask); 7509 return; 7510 } 7511 if (InVectors.empty()) { 7512 CommonMask.assign(Mask.begin(), Mask.end()); 7513 InVectors.assign({&E1, &E2}); 7514 return; 7515 } 7516 assert(!CommonMask.empty() && "Expected non-empty common mask."); 7517 auto *MaskVecTy = 7518 FixedVectorType::get(E1.Scalars.front()->getType(), Mask.size()); 7519 unsigned NumParts = TTI.getNumberOfParts(MaskVecTy); 7520 if (NumParts == 0 || NumParts >= Mask.size()) 7521 NumParts = 1; 7522 unsigned SliceSize = Mask.size() / NumParts; 7523 const auto *It = 7524 find_if(Mask, [](int Idx) { return Idx != PoisonMaskElem; }); 7525 unsigned Part = std::distance(Mask.begin(), It) / SliceSize; 7526 estimateNodesPermuteCost(E1, &E2, Mask, Part, SliceSize); 7527 } 7528 void add(const TreeEntry &E1, ArrayRef<int> Mask) { 7529 if (InVectors.empty()) { 7530 CommonMask.assign(Mask.begin(), Mask.end()); 7531 InVectors.assign(1, &E1); 7532 return; 7533 } 7534 assert(!CommonMask.empty() && "Expected non-empty common mask."); 7535 auto *MaskVecTy = 7536 FixedVectorType::get(E1.Scalars.front()->getType(), Mask.size()); 7537 unsigned NumParts = TTI.getNumberOfParts(MaskVecTy); 7538 if (NumParts == 0 || NumParts >= Mask.size()) 7539 NumParts = 1; 7540 unsigned SliceSize = Mask.size() / NumParts; 7541 const auto *It = 7542 find_if(Mask, [](int Idx) { return Idx != PoisonMaskElem; }); 7543 unsigned Part = std::distance(Mask.begin(), It) / SliceSize; 7544 estimateNodesPermuteCost(E1, nullptr, Mask, Part, SliceSize); 7545 if (!SameNodesEstimated && InVectors.size() == 1) 7546 InVectors.emplace_back(&E1); 7547 } 7548 /// Adds 2 input vectors and the mask for their shuffling. 7549 void add(Value *V1, Value *V2, ArrayRef<int> Mask) { 7550 // May come only for shuffling of 2 vectors with extractelements, already 7551 // handled in adjustExtracts. 7552 assert(InVectors.size() == 1 && 7553 all_of(enumerate(CommonMask), 7554 [&](auto P) { 7555 if (P.value() == PoisonMaskElem) 7556 return Mask[P.index()] == PoisonMaskElem; 7557 auto *EI = 7558 cast<ExtractElementInst>(InVectors.front() 7559 .get<const TreeEntry *>() 7560 ->Scalars[P.index()]); 7561 return EI->getVectorOperand() == V1 || 7562 EI->getVectorOperand() == V2; 7563 }) && 7564 "Expected extractelement vectors."); 7565 } 7566 /// Adds another one input vector and the mask for the shuffling. 7567 void add(Value *V1, ArrayRef<int> Mask, bool ForExtracts = false) { 7568 if (InVectors.empty()) { 7569 assert(CommonMask.empty() && !ForExtracts && 7570 "Expected empty input mask/vectors."); 7571 CommonMask.assign(Mask.begin(), Mask.end()); 7572 InVectors.assign(1, V1); 7573 return; 7574 } 7575 if (ForExtracts) { 7576 // No need to add vectors here, already handled them in adjustExtracts. 7577 assert(InVectors.size() == 1 && 7578 InVectors.front().is<const TreeEntry *>() && !CommonMask.empty() && 7579 all_of(enumerate(CommonMask), 7580 [&](auto P) { 7581 Value *Scalar = InVectors.front() 7582 .get<const TreeEntry *>() 7583 ->Scalars[P.index()]; 7584 if (P.value() == PoisonMaskElem) 7585 return P.value() == Mask[P.index()] || 7586 isa<UndefValue>(Scalar); 7587 if (isa<Constant>(V1)) 7588 return true; 7589 auto *EI = cast<ExtractElementInst>(Scalar); 7590 return EI->getVectorOperand() == V1; 7591 }) && 7592 "Expected only tree entry for extractelement vectors."); 7593 return; 7594 } 7595 assert(!InVectors.empty() && !CommonMask.empty() && 7596 "Expected only tree entries from extracts/reused buildvectors."); 7597 unsigned VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 7598 if (InVectors.size() == 2) { 7599 Cost += createShuffle(InVectors.front(), InVectors.back(), CommonMask); 7600 transformMaskAfterShuffle(CommonMask, CommonMask); 7601 VF = std::max<unsigned>(VF, CommonMask.size()); 7602 } else if (const auto *InTE = 7603 InVectors.front().dyn_cast<const TreeEntry *>()) { 7604 VF = std::max(VF, InTE->getVectorFactor()); 7605 } else { 7606 VF = std::max( 7607 VF, cast<FixedVectorType>(InVectors.front().get<Value *>()->getType()) 7608 ->getNumElements()); 7609 } 7610 InVectors.push_back(V1); 7611 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 7612 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem) 7613 CommonMask[Idx] = Mask[Idx] + VF; 7614 } 7615 Value *gather(ArrayRef<Value *> VL, unsigned MaskVF = 0, 7616 Value *Root = nullptr) { 7617 Cost += getBuildVectorCost(VL, Root); 7618 if (!Root) { 7619 // FIXME: Need to find a way to avoid use of getNullValue here. 7620 SmallVector<Constant *> Vals; 7621 unsigned VF = VL.size(); 7622 if (MaskVF != 0) 7623 VF = std::min(VF, MaskVF); 7624 for (Value *V : VL.take_front(VF)) { 7625 if (isa<UndefValue>(V)) { 7626 Vals.push_back(cast<Constant>(V)); 7627 continue; 7628 } 7629 Vals.push_back(Constant::getNullValue(V->getType())); 7630 } 7631 return ConstantVector::get(Vals); 7632 } 7633 return ConstantVector::getSplat( 7634 ElementCount::getFixed( 7635 cast<FixedVectorType>(Root->getType())->getNumElements()), 7636 getAllOnesValue(*R.DL, VL.front()->getType())); 7637 } 7638 InstructionCost createFreeze(InstructionCost Cost) { return Cost; } 7639 /// Finalize emission of the shuffles. 7640 InstructionCost 7641 finalize(ArrayRef<int> ExtMask, unsigned VF = 0, 7642 function_ref<void(Value *&, SmallVectorImpl<int> &)> Action = {}) { 7643 IsFinalized = true; 7644 if (Action) { 7645 const PointerUnion<Value *, const TreeEntry *> &Vec = InVectors.front(); 7646 if (InVectors.size() == 2) 7647 Cost += createShuffle(Vec, InVectors.back(), CommonMask); 7648 else 7649 Cost += createShuffle(Vec, nullptr, CommonMask); 7650 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 7651 if (CommonMask[Idx] != PoisonMaskElem) 7652 CommonMask[Idx] = Idx; 7653 assert(VF > 0 && 7654 "Expected vector length for the final value before action."); 7655 Value *V = Vec.get<Value *>(); 7656 Action(V, CommonMask); 7657 InVectors.front() = V; 7658 } 7659 ::addMask(CommonMask, ExtMask, /*ExtendingManyInputs=*/true); 7660 if (CommonMask.empty()) { 7661 assert(InVectors.size() == 1 && "Expected only one vector with no mask"); 7662 return Cost; 7663 } 7664 return Cost + 7665 createShuffle(InVectors.front(), 7666 InVectors.size() == 2 ? InVectors.back() : nullptr, 7667 CommonMask); 7668 } 7669 7670 ~ShuffleCostEstimator() { 7671 assert((IsFinalized || CommonMask.empty()) && 7672 "Shuffle construction must be finalized."); 7673 } 7674 }; 7675 7676 const BoUpSLP::TreeEntry *BoUpSLP::getOperandEntry(const TreeEntry *E, 7677 unsigned Idx) const { 7678 Value *Op = E->getOperand(Idx).front(); 7679 if (const TreeEntry *TE = getTreeEntry(Op)) { 7680 if (find_if(E->UserTreeIndices, [&](const EdgeInfo &EI) { 7681 return EI.EdgeIdx == Idx && EI.UserTE == E; 7682 }) != TE->UserTreeIndices.end()) 7683 return TE; 7684 auto MIt = MultiNodeScalars.find(Op); 7685 if (MIt != MultiNodeScalars.end()) { 7686 for (const TreeEntry *TE : MIt->second) { 7687 if (find_if(TE->UserTreeIndices, [&](const EdgeInfo &EI) { 7688 return EI.EdgeIdx == Idx && EI.UserTE == E; 7689 }) != TE->UserTreeIndices.end()) 7690 return TE; 7691 } 7692 } 7693 } 7694 const auto *It = 7695 find_if(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) { 7696 return TE->State == TreeEntry::NeedToGather && 7697 find_if(TE->UserTreeIndices, [&](const EdgeInfo &EI) { 7698 return EI.EdgeIdx == Idx && EI.UserTE == E; 7699 }) != TE->UserTreeIndices.end(); 7700 }); 7701 assert(It != VectorizableTree.end() && "Expected vectorizable entry."); 7702 return It->get(); 7703 } 7704 7705 InstructionCost 7706 BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals, 7707 SmallPtrSetImpl<Value *> &CheckedExtracts) { 7708 ArrayRef<Value *> VL = E->Scalars; 7709 7710 Type *ScalarTy = VL[0]->getType(); 7711 if (E->State != TreeEntry::NeedToGather) { 7712 if (auto *SI = dyn_cast<StoreInst>(VL[0])) 7713 ScalarTy = SI->getValueOperand()->getType(); 7714 else if (auto *CI = dyn_cast<CmpInst>(VL[0])) 7715 ScalarTy = CI->getOperand(0)->getType(); 7716 else if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 7717 ScalarTy = IE->getOperand(1)->getType(); 7718 } 7719 if (!FixedVectorType::isValidElementType(ScalarTy)) 7720 return InstructionCost::getInvalid(); 7721 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 7722 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7723 7724 // If we have computed a smaller type for the expression, update VecTy so 7725 // that the costs will be accurate. 7726 auto It = MinBWs.find(E); 7727 if (It != MinBWs.end()) { 7728 ScalarTy = IntegerType::get(F->getContext(), It->second.first); 7729 VecTy = FixedVectorType::get(ScalarTy, VL.size()); 7730 } 7731 unsigned EntryVF = E->getVectorFactor(); 7732 auto *FinalVecTy = FixedVectorType::get(ScalarTy, EntryVF); 7733 7734 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty(); 7735 if (E->State == TreeEntry::NeedToGather) { 7736 if (allConstant(VL)) 7737 return 0; 7738 if (isa<InsertElementInst>(VL[0])) 7739 return InstructionCost::getInvalid(); 7740 return processBuildVector<ShuffleCostEstimator, InstructionCost>( 7741 E, *TTI, VectorizedVals, *this, CheckedExtracts); 7742 } 7743 InstructionCost CommonCost = 0; 7744 SmallVector<int> Mask; 7745 if (!E->ReorderIndices.empty() && 7746 E->State != TreeEntry::PossibleStridedVectorize) { 7747 SmallVector<int> NewMask; 7748 if (E->getOpcode() == Instruction::Store) { 7749 // For stores the order is actually a mask. 7750 NewMask.resize(E->ReorderIndices.size()); 7751 copy(E->ReorderIndices, NewMask.begin()); 7752 } else { 7753 inversePermutation(E->ReorderIndices, NewMask); 7754 } 7755 ::addMask(Mask, NewMask); 7756 } 7757 if (NeedToShuffleReuses) 7758 ::addMask(Mask, E->ReuseShuffleIndices); 7759 if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask, Mask.size())) 7760 CommonCost = 7761 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FinalVecTy, Mask); 7762 assert((E->State == TreeEntry::Vectorize || 7763 E->State == TreeEntry::ScatterVectorize || 7764 E->State == TreeEntry::PossibleStridedVectorize) && 7765 "Unhandled state"); 7766 assert(E->getOpcode() && 7767 ((allSameType(VL) && allSameBlock(VL)) || 7768 (E->getOpcode() == Instruction::GetElementPtr && 7769 E->getMainOp()->getType()->isPointerTy())) && 7770 "Invalid VL"); 7771 Instruction *VL0 = E->getMainOp(); 7772 unsigned ShuffleOrOp = 7773 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 7774 SetVector<Value *> UniqueValues(VL.begin(), VL.end()); 7775 const unsigned Sz = UniqueValues.size(); 7776 SmallBitVector UsedScalars(Sz, false); 7777 for (unsigned I = 0; I < Sz; ++I) { 7778 if (getTreeEntry(UniqueValues[I]) == E) 7779 continue; 7780 UsedScalars.set(I); 7781 } 7782 auto GetCastContextHint = [&](Value *V) { 7783 if (const TreeEntry *OpTE = getTreeEntry(V)) { 7784 if (OpTE->State == TreeEntry::ScatterVectorize) 7785 return TTI::CastContextHint::GatherScatter; 7786 if (OpTE->State == TreeEntry::Vectorize && 7787 OpTE->getOpcode() == Instruction::Load && !OpTE->isAltShuffle()) { 7788 if (OpTE->ReorderIndices.empty()) 7789 return TTI::CastContextHint::Normal; 7790 SmallVector<int> Mask; 7791 inversePermutation(OpTE->ReorderIndices, Mask); 7792 if (ShuffleVectorInst::isReverseMask(Mask, Mask.size())) 7793 return TTI::CastContextHint::Reversed; 7794 } 7795 } else { 7796 InstructionsState SrcState = getSameOpcode(E->getOperand(0), *TLI); 7797 if (SrcState.getOpcode() == Instruction::Load && !SrcState.isAltShuffle()) 7798 return TTI::CastContextHint::GatherScatter; 7799 } 7800 return TTI::CastContextHint::None; 7801 }; 7802 auto GetCostDiff = 7803 [=](function_ref<InstructionCost(unsigned)> ScalarEltCost, 7804 function_ref<InstructionCost(InstructionCost)> VectorCost) { 7805 // Calculate the cost of this instruction. 7806 InstructionCost ScalarCost = 0; 7807 if (isa<CastInst, CmpInst, SelectInst, CallInst>(VL0)) { 7808 // For some of the instructions no need to calculate cost for each 7809 // particular instruction, we can use the cost of the single 7810 // instruction x total number of scalar instructions. 7811 ScalarCost = (Sz - UsedScalars.count()) * ScalarEltCost(0); 7812 } else { 7813 for (unsigned I = 0; I < Sz; ++I) { 7814 if (UsedScalars.test(I)) 7815 continue; 7816 ScalarCost += ScalarEltCost(I); 7817 } 7818 } 7819 7820 InstructionCost VecCost = VectorCost(CommonCost); 7821 // Check if the current node must be resized, if the parent node is not 7822 // resized. 7823 if (!UnaryInstruction::isCast(E->getOpcode()) && E->Idx != 0) { 7824 const EdgeInfo &EI = E->UserTreeIndices.front(); 7825 if ((EI.UserTE->getOpcode() != Instruction::Select || 7826 EI.EdgeIdx != 0) && 7827 It != MinBWs.end()) { 7828 auto UserBWIt = MinBWs.find(EI.UserTE); 7829 Type *UserScalarTy = 7830 EI.UserTE->getOperand(EI.EdgeIdx).front()->getType(); 7831 if (UserBWIt != MinBWs.end()) 7832 UserScalarTy = IntegerType::get(ScalarTy->getContext(), 7833 UserBWIt->second.first); 7834 if (ScalarTy != UserScalarTy) { 7835 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy); 7836 unsigned SrcBWSz = DL->getTypeSizeInBits(UserScalarTy); 7837 unsigned VecOpcode; 7838 auto *SrcVecTy = 7839 FixedVectorType::get(UserScalarTy, E->getVectorFactor()); 7840 if (BWSz > SrcBWSz) 7841 VecOpcode = Instruction::Trunc; 7842 else 7843 VecOpcode = 7844 It->second.second ? Instruction::SExt : Instruction::ZExt; 7845 TTI::CastContextHint CCH = GetCastContextHint(VL0); 7846 VecCost += TTI->getCastInstrCost(VecOpcode, VecTy, SrcVecTy, CCH, 7847 CostKind); 7848 ScalarCost += 7849 Sz * TTI->getCastInstrCost(VecOpcode, ScalarTy, UserScalarTy, 7850 CCH, CostKind); 7851 } 7852 } 7853 } 7854 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost - CommonCost, 7855 ScalarCost, "Calculated costs for Tree")); 7856 return VecCost - ScalarCost; 7857 }; 7858 // Calculate cost difference from vectorizing set of GEPs. 7859 // Negative value means vectorizing is profitable. 7860 auto GetGEPCostDiff = [=](ArrayRef<Value *> Ptrs, Value *BasePtr) { 7861 InstructionCost ScalarCost = 0; 7862 InstructionCost VecCost = 0; 7863 // Here we differentiate two cases: (1) when Ptrs represent a regular 7864 // vectorization tree node (as they are pointer arguments of scattered 7865 // loads) or (2) when Ptrs are the arguments of loads or stores being 7866 // vectorized as plane wide unit-stride load/store since all the 7867 // loads/stores are known to be from/to adjacent locations. 7868 assert(E->State == TreeEntry::Vectorize && 7869 "Entry state expected to be Vectorize here."); 7870 if (isa<LoadInst, StoreInst>(VL0)) { 7871 // Case 2: estimate costs for pointer related costs when vectorizing to 7872 // a wide load/store. 7873 // Scalar cost is estimated as a set of pointers with known relationship 7874 // between them. 7875 // For vector code we will use BasePtr as argument for the wide load/store 7876 // but we also need to account all the instructions which are going to 7877 // stay in vectorized code due to uses outside of these scalar 7878 // loads/stores. 7879 ScalarCost = TTI->getPointersChainCost( 7880 Ptrs, BasePtr, TTI::PointersChainInfo::getUnitStride(), ScalarTy, 7881 CostKind); 7882 7883 SmallVector<const Value *> PtrsRetainedInVecCode; 7884 for (Value *V : Ptrs) { 7885 if (V == BasePtr) { 7886 PtrsRetainedInVecCode.push_back(V); 7887 continue; 7888 } 7889 auto *Ptr = dyn_cast<GetElementPtrInst>(V); 7890 // For simplicity assume Ptr to stay in vectorized code if it's not a 7891 // GEP instruction. We don't care since it's cost considered free. 7892 // TODO: We should check for any uses outside of vectorizable tree 7893 // rather than just single use. 7894 if (!Ptr || !Ptr->hasOneUse()) 7895 PtrsRetainedInVecCode.push_back(V); 7896 } 7897 7898 if (PtrsRetainedInVecCode.size() == Ptrs.size()) { 7899 // If all pointers stay in vectorized code then we don't have 7900 // any savings on that. 7901 LLVM_DEBUG(dumpTreeCosts(E, 0, ScalarCost, ScalarCost, 7902 "Calculated GEPs cost for Tree")); 7903 return InstructionCost{TTI::TCC_Free}; 7904 } 7905 VecCost = TTI->getPointersChainCost( 7906 PtrsRetainedInVecCode, BasePtr, 7907 TTI::PointersChainInfo::getKnownStride(), VecTy, CostKind); 7908 } else { 7909 // Case 1: Ptrs are the arguments of loads that we are going to transform 7910 // into masked gather load intrinsic. 7911 // All the scalar GEPs will be removed as a result of vectorization. 7912 // For any external uses of some lanes extract element instructions will 7913 // be generated (which cost is estimated separately). 7914 TTI::PointersChainInfo PtrsInfo = 7915 all_of(Ptrs, 7916 [](const Value *V) { 7917 auto *Ptr = dyn_cast<GetElementPtrInst>(V); 7918 return Ptr && !Ptr->hasAllConstantIndices(); 7919 }) 7920 ? TTI::PointersChainInfo::getUnknownStride() 7921 : TTI::PointersChainInfo::getKnownStride(); 7922 7923 ScalarCost = TTI->getPointersChainCost(Ptrs, BasePtr, PtrsInfo, ScalarTy, 7924 CostKind); 7925 if (auto *BaseGEP = dyn_cast<GEPOperator>(BasePtr)) { 7926 SmallVector<const Value *> Indices(BaseGEP->indices()); 7927 VecCost = TTI->getGEPCost(BaseGEP->getSourceElementType(), 7928 BaseGEP->getPointerOperand(), Indices, VecTy, 7929 CostKind); 7930 } 7931 } 7932 7933 LLVM_DEBUG(dumpTreeCosts(E, 0, VecCost, ScalarCost, 7934 "Calculated GEPs cost for Tree")); 7935 7936 return VecCost - ScalarCost; 7937 }; 7938 7939 switch (ShuffleOrOp) { 7940 case Instruction::PHI: { 7941 // Count reused scalars. 7942 InstructionCost ScalarCost = 0; 7943 SmallPtrSet<const TreeEntry *, 4> CountedOps; 7944 for (Value *V : UniqueValues) { 7945 auto *PHI = dyn_cast<PHINode>(V); 7946 if (!PHI) 7947 continue; 7948 7949 ValueList Operands(PHI->getNumIncomingValues(), nullptr); 7950 for (unsigned I = 0, N = PHI->getNumIncomingValues(); I < N; ++I) { 7951 Value *Op = PHI->getIncomingValue(I); 7952 Operands[I] = Op; 7953 } 7954 if (const TreeEntry *OpTE = getTreeEntry(Operands.front())) 7955 if (OpTE->isSame(Operands) && CountedOps.insert(OpTE).second) 7956 if (!OpTE->ReuseShuffleIndices.empty()) 7957 ScalarCost += TTI::TCC_Basic * (OpTE->ReuseShuffleIndices.size() - 7958 OpTE->Scalars.size()); 7959 } 7960 7961 return CommonCost - ScalarCost; 7962 } 7963 case Instruction::ExtractValue: 7964 case Instruction::ExtractElement: { 7965 auto GetScalarCost = [&](unsigned Idx) { 7966 auto *I = cast<Instruction>(UniqueValues[Idx]); 7967 VectorType *SrcVecTy; 7968 if (ShuffleOrOp == Instruction::ExtractElement) { 7969 auto *EE = cast<ExtractElementInst>(I); 7970 SrcVecTy = EE->getVectorOperandType(); 7971 } else { 7972 auto *EV = cast<ExtractValueInst>(I); 7973 Type *AggregateTy = EV->getAggregateOperand()->getType(); 7974 unsigned NumElts; 7975 if (auto *ATy = dyn_cast<ArrayType>(AggregateTy)) 7976 NumElts = ATy->getNumElements(); 7977 else 7978 NumElts = AggregateTy->getStructNumElements(); 7979 SrcVecTy = FixedVectorType::get(ScalarTy, NumElts); 7980 } 7981 if (I->hasOneUse()) { 7982 Instruction *Ext = I->user_back(); 7983 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && 7984 all_of(Ext->users(), 7985 [](User *U) { return isa<GetElementPtrInst>(U); })) { 7986 // Use getExtractWithExtendCost() to calculate the cost of 7987 // extractelement/ext pair. 7988 InstructionCost Cost = TTI->getExtractWithExtendCost( 7989 Ext->getOpcode(), Ext->getType(), SrcVecTy, *getExtractIndex(I)); 7990 // Subtract the cost of s|zext which is subtracted separately. 7991 Cost -= TTI->getCastInstrCost( 7992 Ext->getOpcode(), Ext->getType(), I->getType(), 7993 TTI::getCastContextHint(Ext), CostKind, Ext); 7994 return Cost; 7995 } 7996 } 7997 return TTI->getVectorInstrCost(Instruction::ExtractElement, SrcVecTy, 7998 CostKind, *getExtractIndex(I)); 7999 }; 8000 auto GetVectorCost = [](InstructionCost CommonCost) { return CommonCost; }; 8001 return GetCostDiff(GetScalarCost, GetVectorCost); 8002 } 8003 case Instruction::InsertElement: { 8004 assert(E->ReuseShuffleIndices.empty() && 8005 "Unique insertelements only are expected."); 8006 auto *SrcVecTy = cast<FixedVectorType>(VL0->getType()); 8007 unsigned const NumElts = SrcVecTy->getNumElements(); 8008 unsigned const NumScalars = VL.size(); 8009 8010 unsigned NumOfParts = TTI->getNumberOfParts(SrcVecTy); 8011 8012 SmallVector<int> InsertMask(NumElts, PoisonMaskElem); 8013 unsigned OffsetBeg = *getInsertIndex(VL.front()); 8014 unsigned OffsetEnd = OffsetBeg; 8015 InsertMask[OffsetBeg] = 0; 8016 for (auto [I, V] : enumerate(VL.drop_front())) { 8017 unsigned Idx = *getInsertIndex(V); 8018 if (OffsetBeg > Idx) 8019 OffsetBeg = Idx; 8020 else if (OffsetEnd < Idx) 8021 OffsetEnd = Idx; 8022 InsertMask[Idx] = I + 1; 8023 } 8024 unsigned VecScalarsSz = PowerOf2Ceil(NumElts); 8025 if (NumOfParts > 0) 8026 VecScalarsSz = PowerOf2Ceil((NumElts + NumOfParts - 1) / NumOfParts); 8027 unsigned VecSz = (1 + OffsetEnd / VecScalarsSz - OffsetBeg / VecScalarsSz) * 8028 VecScalarsSz; 8029 unsigned Offset = VecScalarsSz * (OffsetBeg / VecScalarsSz); 8030 unsigned InsertVecSz = std::min<unsigned>( 8031 PowerOf2Ceil(OffsetEnd - OffsetBeg + 1), 8032 ((OffsetEnd - OffsetBeg + VecScalarsSz) / VecScalarsSz) * VecScalarsSz); 8033 bool IsWholeSubvector = 8034 OffsetBeg == Offset && ((OffsetEnd + 1) % VecScalarsSz == 0); 8035 // Check if we can safely insert a subvector. If it is not possible, just 8036 // generate a whole-sized vector and shuffle the source vector and the new 8037 // subvector. 8038 if (OffsetBeg + InsertVecSz > VecSz) { 8039 // Align OffsetBeg to generate correct mask. 8040 OffsetBeg = alignDown(OffsetBeg, VecSz, Offset); 8041 InsertVecSz = VecSz; 8042 } 8043 8044 APInt DemandedElts = APInt::getZero(NumElts); 8045 // TODO: Add support for Instruction::InsertValue. 8046 SmallVector<int> Mask; 8047 if (!E->ReorderIndices.empty()) { 8048 inversePermutation(E->ReorderIndices, Mask); 8049 Mask.append(InsertVecSz - Mask.size(), PoisonMaskElem); 8050 } else { 8051 Mask.assign(VecSz, PoisonMaskElem); 8052 std::iota(Mask.begin(), std::next(Mask.begin(), InsertVecSz), 0); 8053 } 8054 bool IsIdentity = true; 8055 SmallVector<int> PrevMask(InsertVecSz, PoisonMaskElem); 8056 Mask.swap(PrevMask); 8057 for (unsigned I = 0; I < NumScalars; ++I) { 8058 unsigned InsertIdx = *getInsertIndex(VL[PrevMask[I]]); 8059 DemandedElts.setBit(InsertIdx); 8060 IsIdentity &= InsertIdx - OffsetBeg == I; 8061 Mask[InsertIdx - OffsetBeg] = I; 8062 } 8063 assert(Offset < NumElts && "Failed to find vector index offset"); 8064 8065 InstructionCost Cost = 0; 8066 Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts, 8067 /*Insert*/ true, /*Extract*/ false, 8068 CostKind); 8069 8070 // First cost - resize to actual vector size if not identity shuffle or 8071 // need to shift the vector. 8072 // Do not calculate the cost if the actual size is the register size and 8073 // we can merge this shuffle with the following SK_Select. 8074 auto *InsertVecTy = FixedVectorType::get(ScalarTy, InsertVecSz); 8075 if (!IsIdentity) 8076 Cost += TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, 8077 InsertVecTy, Mask); 8078 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 8079 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0)); 8080 })); 8081 // Second cost - permutation with subvector, if some elements are from the 8082 // initial vector or inserting a subvector. 8083 // TODO: Implement the analysis of the FirstInsert->getOperand(0) 8084 // subvector of ActualVecTy. 8085 SmallBitVector InMask = 8086 isUndefVector(FirstInsert->getOperand(0), 8087 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask)); 8088 if (!InMask.all() && NumScalars != NumElts && !IsWholeSubvector) { 8089 if (InsertVecSz != VecSz) { 8090 auto *ActualVecTy = FixedVectorType::get(ScalarTy, VecSz); 8091 Cost += TTI->getShuffleCost(TTI::SK_InsertSubvector, ActualVecTy, 8092 std::nullopt, CostKind, OffsetBeg - Offset, 8093 InsertVecTy); 8094 } else { 8095 for (unsigned I = 0, End = OffsetBeg - Offset; I < End; ++I) 8096 Mask[I] = InMask.test(I) ? PoisonMaskElem : I; 8097 for (unsigned I = OffsetBeg - Offset, End = OffsetEnd - Offset; 8098 I <= End; ++I) 8099 if (Mask[I] != PoisonMaskElem) 8100 Mask[I] = I + VecSz; 8101 for (unsigned I = OffsetEnd + 1 - Offset; I < VecSz; ++I) 8102 Mask[I] = 8103 ((I >= InMask.size()) || InMask.test(I)) ? PoisonMaskElem : I; 8104 Cost += TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, InsertVecTy, Mask); 8105 } 8106 } 8107 return Cost; 8108 } 8109 case Instruction::ZExt: 8110 case Instruction::SExt: 8111 case Instruction::FPToUI: 8112 case Instruction::FPToSI: 8113 case Instruction::FPExt: 8114 case Instruction::PtrToInt: 8115 case Instruction::IntToPtr: 8116 case Instruction::SIToFP: 8117 case Instruction::UIToFP: 8118 case Instruction::Trunc: 8119 case Instruction::FPTrunc: 8120 case Instruction::BitCast: { 8121 auto SrcIt = MinBWs.find(getOperandEntry(E, 0)); 8122 Type *SrcScalarTy = VL0->getOperand(0)->getType(); 8123 auto *SrcVecTy = FixedVectorType::get(SrcScalarTy, VL.size()); 8124 unsigned Opcode = ShuffleOrOp; 8125 unsigned VecOpcode = Opcode; 8126 if (!ScalarTy->isFloatingPointTy() && !SrcScalarTy->isFloatingPointTy() && 8127 (SrcIt != MinBWs.end() || It != MinBWs.end())) { 8128 // Check if the values are candidates to demote. 8129 unsigned SrcBWSz = DL->getTypeSizeInBits(SrcScalarTy); 8130 if (SrcIt != MinBWs.end()) { 8131 SrcBWSz = SrcIt->second.first; 8132 SrcScalarTy = IntegerType::get(F->getContext(), SrcBWSz); 8133 SrcVecTy = FixedVectorType::get(SrcScalarTy, VL.size()); 8134 } 8135 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy); 8136 if (BWSz == SrcBWSz) { 8137 VecOpcode = Instruction::BitCast; 8138 } else if (BWSz < SrcBWSz) { 8139 VecOpcode = Instruction::Trunc; 8140 } else if (It != MinBWs.end()) { 8141 assert(BWSz > SrcBWSz && "Invalid cast!"); 8142 VecOpcode = It->second.second ? Instruction::SExt : Instruction::ZExt; 8143 } 8144 } 8145 auto GetScalarCost = [&](unsigned Idx) -> InstructionCost { 8146 // Do not count cost here if minimum bitwidth is in effect and it is just 8147 // a bitcast (here it is just a noop). 8148 if (VecOpcode != Opcode && VecOpcode == Instruction::BitCast) 8149 return TTI::TCC_Free; 8150 auto *VI = VL0->getOpcode() == Opcode 8151 ? cast<Instruction>(UniqueValues[Idx]) 8152 : nullptr; 8153 return TTI->getCastInstrCost(Opcode, VL0->getType(), 8154 VL0->getOperand(0)->getType(), 8155 TTI::getCastContextHint(VI), CostKind, VI); 8156 }; 8157 auto GetVectorCost = [=](InstructionCost CommonCost) { 8158 // Do not count cost here if minimum bitwidth is in effect and it is just 8159 // a bitcast (here it is just a noop). 8160 if (VecOpcode != Opcode && VecOpcode == Instruction::BitCast) 8161 return CommonCost; 8162 auto *VI = VL0->getOpcode() == Opcode ? VL0 : nullptr; 8163 TTI::CastContextHint CCH = GetCastContextHint(VL0->getOperand(0)); 8164 return CommonCost + 8165 TTI->getCastInstrCost(VecOpcode, VecTy, SrcVecTy, CCH, CostKind, 8166 VecOpcode == Opcode ? VI : nullptr); 8167 }; 8168 return GetCostDiff(GetScalarCost, GetVectorCost); 8169 } 8170 case Instruction::FCmp: 8171 case Instruction::ICmp: 8172 case Instruction::Select: { 8173 CmpInst::Predicate VecPred, SwappedVecPred; 8174 auto MatchCmp = m_Cmp(VecPred, m_Value(), m_Value()); 8175 if (match(VL0, m_Select(MatchCmp, m_Value(), m_Value())) || 8176 match(VL0, MatchCmp)) 8177 SwappedVecPred = CmpInst::getSwappedPredicate(VecPred); 8178 else 8179 SwappedVecPred = VecPred = ScalarTy->isFloatingPointTy() 8180 ? CmpInst::BAD_FCMP_PREDICATE 8181 : CmpInst::BAD_ICMP_PREDICATE; 8182 auto GetScalarCost = [&](unsigned Idx) { 8183 auto *VI = cast<Instruction>(UniqueValues[Idx]); 8184 CmpInst::Predicate CurrentPred = ScalarTy->isFloatingPointTy() 8185 ? CmpInst::BAD_FCMP_PREDICATE 8186 : CmpInst::BAD_ICMP_PREDICATE; 8187 auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value()); 8188 if ((!match(VI, m_Select(MatchCmp, m_Value(), m_Value())) && 8189 !match(VI, MatchCmp)) || 8190 (CurrentPred != VecPred && CurrentPred != SwappedVecPred)) 8191 VecPred = SwappedVecPred = ScalarTy->isFloatingPointTy() 8192 ? CmpInst::BAD_FCMP_PREDICATE 8193 : CmpInst::BAD_ICMP_PREDICATE; 8194 8195 return TTI->getCmpSelInstrCost(E->getOpcode(), ScalarTy, 8196 Builder.getInt1Ty(), CurrentPred, CostKind, 8197 VI); 8198 }; 8199 auto GetVectorCost = [&](InstructionCost CommonCost) { 8200 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); 8201 8202 InstructionCost VecCost = TTI->getCmpSelInstrCost( 8203 E->getOpcode(), VecTy, MaskTy, VecPred, CostKind, VL0); 8204 // Check if it is possible and profitable to use min/max for selects 8205 // in VL. 8206 // 8207 auto IntrinsicAndUse = canConvertToMinOrMaxIntrinsic(VL); 8208 if (IntrinsicAndUse.first != Intrinsic::not_intrinsic) { 8209 IntrinsicCostAttributes CostAttrs(IntrinsicAndUse.first, VecTy, 8210 {VecTy, VecTy}); 8211 InstructionCost IntrinsicCost = 8212 TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 8213 // If the selects are the only uses of the compares, they will be 8214 // dead and we can adjust the cost by removing their cost. 8215 if (IntrinsicAndUse.second) 8216 IntrinsicCost -= TTI->getCmpSelInstrCost(Instruction::ICmp, VecTy, 8217 MaskTy, VecPred, CostKind); 8218 VecCost = std::min(VecCost, IntrinsicCost); 8219 } 8220 return VecCost + CommonCost; 8221 }; 8222 return GetCostDiff(GetScalarCost, GetVectorCost); 8223 } 8224 case Instruction::FNeg: 8225 case Instruction::Add: 8226 case Instruction::FAdd: 8227 case Instruction::Sub: 8228 case Instruction::FSub: 8229 case Instruction::Mul: 8230 case Instruction::FMul: 8231 case Instruction::UDiv: 8232 case Instruction::SDiv: 8233 case Instruction::FDiv: 8234 case Instruction::URem: 8235 case Instruction::SRem: 8236 case Instruction::FRem: 8237 case Instruction::Shl: 8238 case Instruction::LShr: 8239 case Instruction::AShr: 8240 case Instruction::And: 8241 case Instruction::Or: 8242 case Instruction::Xor: { 8243 auto GetScalarCost = [&](unsigned Idx) { 8244 auto *VI = cast<Instruction>(UniqueValues[Idx]); 8245 unsigned OpIdx = isa<UnaryOperator>(VI) ? 0 : 1; 8246 TTI::OperandValueInfo Op1Info = TTI::getOperandInfo(VI->getOperand(0)); 8247 TTI::OperandValueInfo Op2Info = 8248 TTI::getOperandInfo(VI->getOperand(OpIdx)); 8249 SmallVector<const Value *> Operands(VI->operand_values()); 8250 return TTI->getArithmeticInstrCost(ShuffleOrOp, ScalarTy, CostKind, 8251 Op1Info, Op2Info, Operands, VI); 8252 }; 8253 auto GetVectorCost = [=](InstructionCost CommonCost) { 8254 unsigned OpIdx = isa<UnaryOperator>(VL0) ? 0 : 1; 8255 TTI::OperandValueInfo Op1Info = getOperandInfo(E->getOperand(0)); 8256 TTI::OperandValueInfo Op2Info = getOperandInfo(E->getOperand(OpIdx)); 8257 return TTI->getArithmeticInstrCost(ShuffleOrOp, VecTy, CostKind, Op1Info, 8258 Op2Info) + 8259 CommonCost; 8260 }; 8261 return GetCostDiff(GetScalarCost, GetVectorCost); 8262 } 8263 case Instruction::GetElementPtr: { 8264 return CommonCost + GetGEPCostDiff(VL, VL0); 8265 } 8266 case Instruction::Load: { 8267 auto GetScalarCost = [&](unsigned Idx) { 8268 auto *VI = cast<LoadInst>(UniqueValues[Idx]); 8269 return TTI->getMemoryOpCost(Instruction::Load, ScalarTy, VI->getAlign(), 8270 VI->getPointerAddressSpace(), CostKind, 8271 TTI::OperandValueInfo(), VI); 8272 }; 8273 auto *LI0 = cast<LoadInst>(VL0); 8274 auto GetVectorCost = [&](InstructionCost CommonCost) { 8275 InstructionCost VecLdCost; 8276 if (E->State == TreeEntry::Vectorize) { 8277 VecLdCost = TTI->getMemoryOpCost( 8278 Instruction::Load, VecTy, LI0->getAlign(), 8279 LI0->getPointerAddressSpace(), CostKind, TTI::OperandValueInfo()); 8280 } else { 8281 assert((E->State == TreeEntry::ScatterVectorize || 8282 E->State == TreeEntry::PossibleStridedVectorize) && 8283 "Unknown EntryState"); 8284 Align CommonAlignment = LI0->getAlign(); 8285 for (Value *V : UniqueValues) 8286 CommonAlignment = 8287 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 8288 VecLdCost = TTI->getGatherScatterOpCost( 8289 Instruction::Load, VecTy, LI0->getPointerOperand(), 8290 /*VariableMask=*/false, CommonAlignment, CostKind); 8291 } 8292 return VecLdCost + CommonCost; 8293 }; 8294 8295 InstructionCost Cost = GetCostDiff(GetScalarCost, GetVectorCost); 8296 // If this node generates masked gather load then it is not a terminal node. 8297 // Hence address operand cost is estimated separately. 8298 if (E->State == TreeEntry::ScatterVectorize || 8299 E->State == TreeEntry::PossibleStridedVectorize) 8300 return Cost; 8301 8302 // Estimate cost of GEPs since this tree node is a terminator. 8303 SmallVector<Value *> PointerOps(VL.size()); 8304 for (auto [I, V] : enumerate(VL)) 8305 PointerOps[I] = cast<LoadInst>(V)->getPointerOperand(); 8306 return Cost + GetGEPCostDiff(PointerOps, LI0->getPointerOperand()); 8307 } 8308 case Instruction::Store: { 8309 bool IsReorder = !E->ReorderIndices.empty(); 8310 auto GetScalarCost = [=](unsigned Idx) { 8311 auto *VI = cast<StoreInst>(VL[Idx]); 8312 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(VI->getValueOperand()); 8313 return TTI->getMemoryOpCost(Instruction::Store, ScalarTy, VI->getAlign(), 8314 VI->getPointerAddressSpace(), CostKind, 8315 OpInfo, VI); 8316 }; 8317 auto *BaseSI = 8318 cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0); 8319 auto GetVectorCost = [=](InstructionCost CommonCost) { 8320 // We know that we can merge the stores. Calculate the cost. 8321 TTI::OperandValueInfo OpInfo = getOperandInfo(E->getOperand(0)); 8322 return TTI->getMemoryOpCost(Instruction::Store, VecTy, BaseSI->getAlign(), 8323 BaseSI->getPointerAddressSpace(), CostKind, 8324 OpInfo) + 8325 CommonCost; 8326 }; 8327 SmallVector<Value *> PointerOps(VL.size()); 8328 for (auto [I, V] : enumerate(VL)) { 8329 unsigned Idx = IsReorder ? E->ReorderIndices[I] : I; 8330 PointerOps[Idx] = cast<StoreInst>(V)->getPointerOperand(); 8331 } 8332 8333 return GetCostDiff(GetScalarCost, GetVectorCost) + 8334 GetGEPCostDiff(PointerOps, BaseSI->getPointerOperand()); 8335 } 8336 case Instruction::Call: { 8337 auto GetScalarCost = [&](unsigned Idx) { 8338 auto *CI = cast<CallInst>(UniqueValues[Idx]); 8339 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8340 if (ID != Intrinsic::not_intrinsic) { 8341 IntrinsicCostAttributes CostAttrs(ID, *CI, 1); 8342 return TTI->getIntrinsicInstrCost(CostAttrs, CostKind); 8343 } 8344 return TTI->getCallInstrCost(CI->getCalledFunction(), 8345 CI->getFunctionType()->getReturnType(), 8346 CI->getFunctionType()->params(), CostKind); 8347 }; 8348 auto GetVectorCost = [=](InstructionCost CommonCost) { 8349 auto *CI = cast<CallInst>(VL0); 8350 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 8351 return std::min(VecCallCosts.first, VecCallCosts.second) + CommonCost; 8352 }; 8353 return GetCostDiff(GetScalarCost, GetVectorCost); 8354 } 8355 case Instruction::ShuffleVector: { 8356 assert(E->isAltShuffle() && 8357 ((Instruction::isBinaryOp(E->getOpcode()) && 8358 Instruction::isBinaryOp(E->getAltOpcode())) || 8359 (Instruction::isCast(E->getOpcode()) && 8360 Instruction::isCast(E->getAltOpcode())) || 8361 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && 8362 "Invalid Shuffle Vector Operand"); 8363 // Try to find the previous shuffle node with the same operands and same 8364 // main/alternate ops. 8365 auto TryFindNodeWithEqualOperands = [=]() { 8366 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) { 8367 if (TE.get() == E) 8368 break; 8369 if (TE->isAltShuffle() && 8370 ((TE->getOpcode() == E->getOpcode() && 8371 TE->getAltOpcode() == E->getAltOpcode()) || 8372 (TE->getOpcode() == E->getAltOpcode() && 8373 TE->getAltOpcode() == E->getOpcode())) && 8374 TE->hasEqualOperands(*E)) 8375 return true; 8376 } 8377 return false; 8378 }; 8379 auto GetScalarCost = [&](unsigned Idx) { 8380 auto *VI = cast<Instruction>(UniqueValues[Idx]); 8381 assert(E->isOpcodeOrAlt(VI) && "Unexpected main/alternate opcode"); 8382 (void)E; 8383 return TTI->getInstructionCost(VI, CostKind); 8384 }; 8385 // FIXME: Workaround for syntax error reported by MSVC buildbots. 8386 TargetTransformInfo &TTIRef = *TTI; 8387 // Need to clear CommonCost since the final shuffle cost is included into 8388 // vector cost. 8389 auto GetVectorCost = [&](InstructionCost) { 8390 // VecCost is equal to sum of the cost of creating 2 vectors 8391 // and the cost of creating shuffle. 8392 InstructionCost VecCost = 0; 8393 if (TryFindNodeWithEqualOperands()) { 8394 LLVM_DEBUG({ 8395 dbgs() << "SLP: diamond match for alternate node found.\n"; 8396 E->dump(); 8397 }); 8398 // No need to add new vector costs here since we're going to reuse 8399 // same main/alternate vector ops, just do different shuffling. 8400 } else if (Instruction::isBinaryOp(E->getOpcode())) { 8401 VecCost = 8402 TTIRef.getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind); 8403 VecCost += 8404 TTIRef.getArithmeticInstrCost(E->getAltOpcode(), VecTy, CostKind); 8405 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) { 8406 auto *MaskTy = FixedVectorType::get(Builder.getInt1Ty(), VL.size()); 8407 VecCost = TTIRef.getCmpSelInstrCost(E->getOpcode(), VecTy, MaskTy, 8408 CI0->getPredicate(), CostKind, VL0); 8409 VecCost += TTIRef.getCmpSelInstrCost( 8410 E->getOpcode(), VecTy, MaskTy, 8411 cast<CmpInst>(E->getAltOp())->getPredicate(), CostKind, 8412 E->getAltOp()); 8413 } else { 8414 Type *Src0SclTy = E->getMainOp()->getOperand(0)->getType(); 8415 Type *Src1SclTy = E->getAltOp()->getOperand(0)->getType(); 8416 auto *Src0Ty = FixedVectorType::get(Src0SclTy, VL.size()); 8417 auto *Src1Ty = FixedVectorType::get(Src1SclTy, VL.size()); 8418 VecCost = TTIRef.getCastInstrCost(E->getOpcode(), VecTy, Src0Ty, 8419 TTI::CastContextHint::None, CostKind); 8420 VecCost += 8421 TTIRef.getCastInstrCost(E->getAltOpcode(), VecTy, Src1Ty, 8422 TTI::CastContextHint::None, CostKind); 8423 } 8424 SmallVector<int> Mask; 8425 E->buildAltOpShuffleMask( 8426 [E](Instruction *I) { 8427 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 8428 return I->getOpcode() == E->getAltOpcode(); 8429 }, 8430 Mask); 8431 VecCost += TTIRef.getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc, 8432 FinalVecTy, Mask); 8433 // Patterns like [fadd,fsub] can be combined into a single instruction 8434 // in x86. Reordering them into [fsub,fadd] blocks this pattern. So we 8435 // need to take into account their order when looking for the most used 8436 // order. 8437 unsigned Opcode0 = E->getOpcode(); 8438 unsigned Opcode1 = E->getAltOpcode(); 8439 // The opcode mask selects between the two opcodes. 8440 SmallBitVector OpcodeMask(E->Scalars.size(), false); 8441 for (unsigned Lane : seq<unsigned>(0, E->Scalars.size())) 8442 if (cast<Instruction>(E->Scalars[Lane])->getOpcode() == Opcode1) 8443 OpcodeMask.set(Lane); 8444 // If this pattern is supported by the target then we consider the 8445 // order. 8446 if (TTIRef.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask)) { 8447 InstructionCost AltVecCost = TTIRef.getAltInstrCost( 8448 VecTy, Opcode0, Opcode1, OpcodeMask, CostKind); 8449 return AltVecCost < VecCost ? AltVecCost : VecCost; 8450 } 8451 // TODO: Check the reverse order too. 8452 return VecCost; 8453 }; 8454 return GetCostDiff(GetScalarCost, GetVectorCost); 8455 } 8456 default: 8457 llvm_unreachable("Unknown instruction"); 8458 } 8459 } 8460 8461 bool BoUpSLP::isFullyVectorizableTinyTree(bool ForReduction) const { 8462 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height " 8463 << VectorizableTree.size() << " is fully vectorizable .\n"); 8464 8465 auto &&AreVectorizableGathers = [this](const TreeEntry *TE, unsigned Limit) { 8466 SmallVector<int> Mask; 8467 return TE->State == TreeEntry::NeedToGather && 8468 !any_of(TE->Scalars, 8469 [this](Value *V) { return EphValues.contains(V); }) && 8470 (allConstant(TE->Scalars) || isSplat(TE->Scalars) || 8471 TE->Scalars.size() < Limit || 8472 ((TE->getOpcode() == Instruction::ExtractElement || 8473 all_of(TE->Scalars, 8474 [](Value *V) { 8475 return isa<ExtractElementInst, UndefValue>(V); 8476 })) && 8477 isFixedVectorShuffle(TE->Scalars, Mask)) || 8478 (TE->State == TreeEntry::NeedToGather && 8479 TE->getOpcode() == Instruction::Load && !TE->isAltShuffle())); 8480 }; 8481 8482 // We only handle trees of heights 1 and 2. 8483 if (VectorizableTree.size() == 1 && 8484 (VectorizableTree[0]->State == TreeEntry::Vectorize || 8485 (ForReduction && 8486 AreVectorizableGathers(VectorizableTree[0].get(), 8487 VectorizableTree[0]->Scalars.size()) && 8488 VectorizableTree[0]->getVectorFactor() > 2))) 8489 return true; 8490 8491 if (VectorizableTree.size() != 2) 8492 return false; 8493 8494 // Handle splat and all-constants stores. Also try to vectorize tiny trees 8495 // with the second gather nodes if they have less scalar operands rather than 8496 // the initial tree element (may be profitable to shuffle the second gather) 8497 // or they are extractelements, which form shuffle. 8498 SmallVector<int> Mask; 8499 if (VectorizableTree[0]->State == TreeEntry::Vectorize && 8500 AreVectorizableGathers(VectorizableTree[1].get(), 8501 VectorizableTree[0]->Scalars.size())) 8502 return true; 8503 8504 // Gathering cost would be too much for tiny trees. 8505 if (VectorizableTree[0]->State == TreeEntry::NeedToGather || 8506 (VectorizableTree[1]->State == TreeEntry::NeedToGather && 8507 VectorizableTree[0]->State != TreeEntry::ScatterVectorize && 8508 VectorizableTree[0]->State != TreeEntry::PossibleStridedVectorize)) 8509 return false; 8510 8511 return true; 8512 } 8513 8514 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts, 8515 TargetTransformInfo *TTI, 8516 bool MustMatchOrInst) { 8517 // Look past the root to find a source value. Arbitrarily follow the 8518 // path through operand 0 of any 'or'. Also, peek through optional 8519 // shift-left-by-multiple-of-8-bits. 8520 Value *ZextLoad = Root; 8521 const APInt *ShAmtC; 8522 bool FoundOr = false; 8523 while (!isa<ConstantExpr>(ZextLoad) && 8524 (match(ZextLoad, m_Or(m_Value(), m_Value())) || 8525 (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) && 8526 ShAmtC->urem(8) == 0))) { 8527 auto *BinOp = cast<BinaryOperator>(ZextLoad); 8528 ZextLoad = BinOp->getOperand(0); 8529 if (BinOp->getOpcode() == Instruction::Or) 8530 FoundOr = true; 8531 } 8532 // Check if the input is an extended load of the required or/shift expression. 8533 Value *Load; 8534 if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root || 8535 !match(ZextLoad, m_ZExt(m_Value(Load))) || !isa<LoadInst>(Load)) 8536 return false; 8537 8538 // Require that the total load bit width is a legal integer type. 8539 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target. 8540 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it. 8541 Type *SrcTy = Load->getType(); 8542 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts; 8543 if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth))) 8544 return false; 8545 8546 // Everything matched - assume that we can fold the whole sequence using 8547 // load combining. 8548 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at " 8549 << *(cast<Instruction>(Root)) << "\n"); 8550 8551 return true; 8552 } 8553 8554 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const { 8555 if (RdxKind != RecurKind::Or) 8556 return false; 8557 8558 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 8559 Value *FirstReduced = VectorizableTree[0]->Scalars[0]; 8560 return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI, 8561 /* MatchOr */ false); 8562 } 8563 8564 bool BoUpSLP::isLoadCombineCandidate() const { 8565 // Peek through a final sequence of stores and check if all operations are 8566 // likely to be load-combined. 8567 unsigned NumElts = VectorizableTree[0]->Scalars.size(); 8568 for (Value *Scalar : VectorizableTree[0]->Scalars) { 8569 Value *X; 8570 if (!match(Scalar, m_Store(m_Value(X), m_Value())) || 8571 !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true)) 8572 return false; 8573 } 8574 return true; 8575 } 8576 8577 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const { 8578 // No need to vectorize inserts of gathered values. 8579 if (VectorizableTree.size() == 2 && 8580 isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) && 8581 VectorizableTree[1]->State == TreeEntry::NeedToGather && 8582 (VectorizableTree[1]->getVectorFactor() <= 2 || 8583 !(isSplat(VectorizableTree[1]->Scalars) || 8584 allConstant(VectorizableTree[1]->Scalars)))) 8585 return true; 8586 8587 // If the graph includes only PHI nodes and gathers, it is defnitely not 8588 // profitable for the vectorization, we can skip it, if the cost threshold is 8589 // default. The cost of vectorized PHI nodes is almost always 0 + the cost of 8590 // gathers/buildvectors. 8591 constexpr int Limit = 4; 8592 if (!ForReduction && !SLPCostThreshold.getNumOccurrences() && 8593 !VectorizableTree.empty() && 8594 all_of(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) { 8595 return (TE->State == TreeEntry::NeedToGather && 8596 TE->getOpcode() != Instruction::ExtractElement && 8597 count_if(TE->Scalars, 8598 [](Value *V) { return isa<ExtractElementInst>(V); }) <= 8599 Limit) || 8600 TE->getOpcode() == Instruction::PHI; 8601 })) 8602 return true; 8603 8604 // We can vectorize the tree if its size is greater than or equal to the 8605 // minimum size specified by the MinTreeSize command line option. 8606 if (VectorizableTree.size() >= MinTreeSize) 8607 return false; 8608 8609 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we 8610 // can vectorize it if we can prove it fully vectorizable. 8611 if (isFullyVectorizableTinyTree(ForReduction)) 8612 return false; 8613 8614 assert(VectorizableTree.empty() 8615 ? ExternalUses.empty() 8616 : true && "We shouldn't have any external users"); 8617 8618 // Otherwise, we can't vectorize the tree. It is both tiny and not fully 8619 // vectorizable. 8620 return true; 8621 } 8622 8623 InstructionCost BoUpSLP::getSpillCost() const { 8624 // Walk from the bottom of the tree to the top, tracking which values are 8625 // live. When we see a call instruction that is not part of our tree, 8626 // query TTI to see if there is a cost to keeping values live over it 8627 // (for example, if spills and fills are required). 8628 unsigned BundleWidth = VectorizableTree.front()->Scalars.size(); 8629 InstructionCost Cost = 0; 8630 8631 SmallPtrSet<Instruction *, 4> LiveValues; 8632 Instruction *PrevInst = nullptr; 8633 8634 // The entries in VectorizableTree are not necessarily ordered by their 8635 // position in basic blocks. Collect them and order them by dominance so later 8636 // instructions are guaranteed to be visited first. For instructions in 8637 // different basic blocks, we only scan to the beginning of the block, so 8638 // their order does not matter, as long as all instructions in a basic block 8639 // are grouped together. Using dominance ensures a deterministic order. 8640 SmallVector<Instruction *, 16> OrderedScalars; 8641 for (const auto &TEPtr : VectorizableTree) { 8642 if (TEPtr->State != TreeEntry::Vectorize) 8643 continue; 8644 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]); 8645 if (!Inst) 8646 continue; 8647 OrderedScalars.push_back(Inst); 8648 } 8649 llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) { 8650 auto *NodeA = DT->getNode(A->getParent()); 8651 auto *NodeB = DT->getNode(B->getParent()); 8652 assert(NodeA && "Should only process reachable instructions"); 8653 assert(NodeB && "Should only process reachable instructions"); 8654 assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 8655 "Different nodes should have different DFS numbers"); 8656 if (NodeA != NodeB) 8657 return NodeA->getDFSNumIn() > NodeB->getDFSNumIn(); 8658 return B->comesBefore(A); 8659 }); 8660 8661 for (Instruction *Inst : OrderedScalars) { 8662 if (!PrevInst) { 8663 PrevInst = Inst; 8664 continue; 8665 } 8666 8667 // Update LiveValues. 8668 LiveValues.erase(PrevInst); 8669 for (auto &J : PrevInst->operands()) { 8670 if (isa<Instruction>(&*J) && getTreeEntry(&*J)) 8671 LiveValues.insert(cast<Instruction>(&*J)); 8672 } 8673 8674 LLVM_DEBUG({ 8675 dbgs() << "SLP: #LV: " << LiveValues.size(); 8676 for (auto *X : LiveValues) 8677 dbgs() << " " << X->getName(); 8678 dbgs() << ", Looking at "; 8679 Inst->dump(); 8680 }); 8681 8682 // Now find the sequence of instructions between PrevInst and Inst. 8683 unsigned NumCalls = 0; 8684 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(), 8685 PrevInstIt = 8686 PrevInst->getIterator().getReverse(); 8687 while (InstIt != PrevInstIt) { 8688 if (PrevInstIt == PrevInst->getParent()->rend()) { 8689 PrevInstIt = Inst->getParent()->rbegin(); 8690 continue; 8691 } 8692 8693 auto NoCallIntrinsic = [this](Instruction *I) { 8694 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 8695 if (II->isAssumeLikeIntrinsic()) 8696 return true; 8697 FastMathFlags FMF; 8698 SmallVector<Type *, 4> Tys; 8699 for (auto &ArgOp : II->args()) 8700 Tys.push_back(ArgOp->getType()); 8701 if (auto *FPMO = dyn_cast<FPMathOperator>(II)) 8702 FMF = FPMO->getFastMathFlags(); 8703 IntrinsicCostAttributes ICA(II->getIntrinsicID(), II->getType(), Tys, 8704 FMF); 8705 InstructionCost IntrCost = 8706 TTI->getIntrinsicInstrCost(ICA, TTI::TCK_RecipThroughput); 8707 InstructionCost CallCost = TTI->getCallInstrCost( 8708 nullptr, II->getType(), Tys, TTI::TCK_RecipThroughput); 8709 if (IntrCost < CallCost) 8710 return true; 8711 } 8712 return false; 8713 }; 8714 8715 // Debug information does not impact spill cost. 8716 if (isa<CallBase>(&*PrevInstIt) && !NoCallIntrinsic(&*PrevInstIt) && 8717 &*PrevInstIt != PrevInst) 8718 NumCalls++; 8719 8720 ++PrevInstIt; 8721 } 8722 8723 if (NumCalls) { 8724 SmallVector<Type *, 4> V; 8725 for (auto *II : LiveValues) { 8726 auto *ScalarTy = II->getType(); 8727 if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy)) 8728 ScalarTy = VectorTy->getElementType(); 8729 V.push_back(FixedVectorType::get(ScalarTy, BundleWidth)); 8730 } 8731 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V); 8732 } 8733 8734 PrevInst = Inst; 8735 } 8736 8737 return Cost; 8738 } 8739 8740 /// Checks if the \p IE1 instructions is followed by \p IE2 instruction in the 8741 /// buildvector sequence. 8742 static bool isFirstInsertElement(const InsertElementInst *IE1, 8743 const InsertElementInst *IE2) { 8744 if (IE1 == IE2) 8745 return false; 8746 const auto *I1 = IE1; 8747 const auto *I2 = IE2; 8748 const InsertElementInst *PrevI1; 8749 const InsertElementInst *PrevI2; 8750 unsigned Idx1 = *getInsertIndex(IE1); 8751 unsigned Idx2 = *getInsertIndex(IE2); 8752 do { 8753 if (I2 == IE1) 8754 return true; 8755 if (I1 == IE2) 8756 return false; 8757 PrevI1 = I1; 8758 PrevI2 = I2; 8759 if (I1 && (I1 == IE1 || I1->hasOneUse()) && 8760 getInsertIndex(I1).value_or(Idx2) != Idx2) 8761 I1 = dyn_cast<InsertElementInst>(I1->getOperand(0)); 8762 if (I2 && ((I2 == IE2 || I2->hasOneUse())) && 8763 getInsertIndex(I2).value_or(Idx1) != Idx1) 8764 I2 = dyn_cast<InsertElementInst>(I2->getOperand(0)); 8765 } while ((I1 && PrevI1 != I1) || (I2 && PrevI2 != I2)); 8766 llvm_unreachable("Two different buildvectors not expected."); 8767 } 8768 8769 namespace { 8770 /// Returns incoming Value *, if the requested type is Value * too, or a default 8771 /// value, otherwise. 8772 struct ValueSelect { 8773 template <typename U> 8774 static std::enable_if_t<std::is_same_v<Value *, U>, Value *> get(Value *V) { 8775 return V; 8776 } 8777 template <typename U> 8778 static std::enable_if_t<!std::is_same_v<Value *, U>, U> get(Value *) { 8779 return U(); 8780 } 8781 }; 8782 } // namespace 8783 8784 /// Does the analysis of the provided shuffle masks and performs the requested 8785 /// actions on the vectors with the given shuffle masks. It tries to do it in 8786 /// several steps. 8787 /// 1. If the Base vector is not undef vector, resizing the very first mask to 8788 /// have common VF and perform action for 2 input vectors (including non-undef 8789 /// Base). Other shuffle masks are combined with the resulting after the 1 stage 8790 /// and processed as a shuffle of 2 elements. 8791 /// 2. If the Base is undef vector and have only 1 shuffle mask, perform the 8792 /// action only for 1 vector with the given mask, if it is not the identity 8793 /// mask. 8794 /// 3. If > 2 masks are used, perform the remaining shuffle actions for 2 8795 /// vectors, combing the masks properly between the steps. 8796 template <typename T> 8797 static T *performExtractsShuffleAction( 8798 MutableArrayRef<std::pair<T *, SmallVector<int>>> ShuffleMask, Value *Base, 8799 function_ref<unsigned(T *)> GetVF, 8800 function_ref<std::pair<T *, bool>(T *, ArrayRef<int>, bool)> ResizeAction, 8801 function_ref<T *(ArrayRef<int>, ArrayRef<T *>)> Action) { 8802 assert(!ShuffleMask.empty() && "Empty list of shuffles for inserts."); 8803 SmallVector<int> Mask(ShuffleMask.begin()->second); 8804 auto VMIt = std::next(ShuffleMask.begin()); 8805 T *Prev = nullptr; 8806 SmallBitVector UseMask = 8807 buildUseMask(Mask.size(), Mask, UseMask::UndefsAsMask); 8808 SmallBitVector IsBaseUndef = isUndefVector(Base, UseMask); 8809 if (!IsBaseUndef.all()) { 8810 // Base is not undef, need to combine it with the next subvectors. 8811 std::pair<T *, bool> Res = 8812 ResizeAction(ShuffleMask.begin()->first, Mask, /*ForSingleMask=*/false); 8813 SmallBitVector IsBasePoison = isUndefVector<true>(Base, UseMask); 8814 for (unsigned Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) { 8815 if (Mask[Idx] == PoisonMaskElem) 8816 Mask[Idx] = IsBasePoison.test(Idx) ? PoisonMaskElem : Idx; 8817 else 8818 Mask[Idx] = (Res.second ? Idx : Mask[Idx]) + VF; 8819 } 8820 auto *V = ValueSelect::get<T *>(Base); 8821 (void)V; 8822 assert((!V || GetVF(V) == Mask.size()) && 8823 "Expected base vector of VF number of elements."); 8824 Prev = Action(Mask, {nullptr, Res.first}); 8825 } else if (ShuffleMask.size() == 1) { 8826 // Base is undef and only 1 vector is shuffled - perform the action only for 8827 // single vector, if the mask is not the identity mask. 8828 std::pair<T *, bool> Res = ResizeAction(ShuffleMask.begin()->first, Mask, 8829 /*ForSingleMask=*/true); 8830 if (Res.second) 8831 // Identity mask is found. 8832 Prev = Res.first; 8833 else 8834 Prev = Action(Mask, {ShuffleMask.begin()->first}); 8835 } else { 8836 // Base is undef and at least 2 input vectors shuffled - perform 2 vectors 8837 // shuffles step by step, combining shuffle between the steps. 8838 unsigned Vec1VF = GetVF(ShuffleMask.begin()->first); 8839 unsigned Vec2VF = GetVF(VMIt->first); 8840 if (Vec1VF == Vec2VF) { 8841 // No need to resize the input vectors since they are of the same size, we 8842 // can shuffle them directly. 8843 ArrayRef<int> SecMask = VMIt->second; 8844 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) { 8845 if (SecMask[I] != PoisonMaskElem) { 8846 assert(Mask[I] == PoisonMaskElem && "Multiple uses of scalars."); 8847 Mask[I] = SecMask[I] + Vec1VF; 8848 } 8849 } 8850 Prev = Action(Mask, {ShuffleMask.begin()->first, VMIt->first}); 8851 } else { 8852 // Vectors of different sizes - resize and reshuffle. 8853 std::pair<T *, bool> Res1 = ResizeAction(ShuffleMask.begin()->first, Mask, 8854 /*ForSingleMask=*/false); 8855 std::pair<T *, bool> Res2 = 8856 ResizeAction(VMIt->first, VMIt->second, /*ForSingleMask=*/false); 8857 ArrayRef<int> SecMask = VMIt->second; 8858 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) { 8859 if (Mask[I] != PoisonMaskElem) { 8860 assert(SecMask[I] == PoisonMaskElem && "Multiple uses of scalars."); 8861 if (Res1.second) 8862 Mask[I] = I; 8863 } else if (SecMask[I] != PoisonMaskElem) { 8864 assert(Mask[I] == PoisonMaskElem && "Multiple uses of scalars."); 8865 Mask[I] = (Res2.second ? I : SecMask[I]) + VF; 8866 } 8867 } 8868 Prev = Action(Mask, {Res1.first, Res2.first}); 8869 } 8870 VMIt = std::next(VMIt); 8871 } 8872 bool IsBaseNotUndef = !IsBaseUndef.all(); 8873 (void)IsBaseNotUndef; 8874 // Perform requested actions for the remaining masks/vectors. 8875 for (auto E = ShuffleMask.end(); VMIt != E; ++VMIt) { 8876 // Shuffle other input vectors, if any. 8877 std::pair<T *, bool> Res = 8878 ResizeAction(VMIt->first, VMIt->second, /*ForSingleMask=*/false); 8879 ArrayRef<int> SecMask = VMIt->second; 8880 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) { 8881 if (SecMask[I] != PoisonMaskElem) { 8882 assert((Mask[I] == PoisonMaskElem || IsBaseNotUndef) && 8883 "Multiple uses of scalars."); 8884 Mask[I] = (Res.second ? I : SecMask[I]) + VF; 8885 } else if (Mask[I] != PoisonMaskElem) { 8886 Mask[I] = I; 8887 } 8888 } 8889 Prev = Action(Mask, {Prev, Res.first}); 8890 } 8891 return Prev; 8892 } 8893 8894 InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) { 8895 InstructionCost Cost = 0; 8896 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size " 8897 << VectorizableTree.size() << ".\n"); 8898 8899 unsigned BundleWidth = VectorizableTree[0]->Scalars.size(); 8900 8901 SmallPtrSet<Value *, 4> CheckedExtracts; 8902 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) { 8903 TreeEntry &TE = *VectorizableTree[I]; 8904 if (TE.State == TreeEntry::NeedToGather) { 8905 if (const TreeEntry *E = getTreeEntry(TE.getMainOp()); 8906 E && E->getVectorFactor() == TE.getVectorFactor() && 8907 E->isSame(TE.Scalars)) { 8908 // Some gather nodes might be absolutely the same as some vectorizable 8909 // nodes after reordering, need to handle it. 8910 LLVM_DEBUG(dbgs() << "SLP: Adding cost 0 for bundle " 8911 << shortBundleName(TE.Scalars) << ".\n" 8912 << "SLP: Current total cost = " << Cost << "\n"); 8913 continue; 8914 } 8915 } 8916 8917 InstructionCost C = getEntryCost(&TE, VectorizedVals, CheckedExtracts); 8918 Cost += C; 8919 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle " 8920 << shortBundleName(TE.Scalars) << ".\n" 8921 << "SLP: Current total cost = " << Cost << "\n"); 8922 } 8923 8924 SmallPtrSet<Value *, 16> ExtractCostCalculated; 8925 InstructionCost ExtractCost = 0; 8926 SmallVector<MapVector<const TreeEntry *, SmallVector<int>>> ShuffleMasks; 8927 SmallVector<std::pair<Value *, const TreeEntry *>> FirstUsers; 8928 SmallVector<APInt> DemandedElts; 8929 SmallDenseSet<Value *, 4> UsedInserts; 8930 DenseSet<Value *> VectorCasts; 8931 for (ExternalUser &EU : ExternalUses) { 8932 // We only add extract cost once for the same scalar. 8933 if (!isa_and_nonnull<InsertElementInst>(EU.User) && 8934 !ExtractCostCalculated.insert(EU.Scalar).second) 8935 continue; 8936 8937 // Uses by ephemeral values are free (because the ephemeral value will be 8938 // removed prior to code generation, and so the extraction will be 8939 // removed as well). 8940 if (EphValues.count(EU.User)) 8941 continue; 8942 8943 // No extract cost for vector "scalar" 8944 if (isa<FixedVectorType>(EU.Scalar->getType())) 8945 continue; 8946 8947 // If found user is an insertelement, do not calculate extract cost but try 8948 // to detect it as a final shuffled/identity match. 8949 if (auto *VU = dyn_cast_or_null<InsertElementInst>(EU.User)) { 8950 if (auto *FTy = dyn_cast<FixedVectorType>(VU->getType())) { 8951 if (!UsedInserts.insert(VU).second) 8952 continue; 8953 std::optional<unsigned> InsertIdx = getInsertIndex(VU); 8954 if (InsertIdx) { 8955 const TreeEntry *ScalarTE = getTreeEntry(EU.Scalar); 8956 auto *It = find_if( 8957 FirstUsers, 8958 [this, VU](const std::pair<Value *, const TreeEntry *> &Pair) { 8959 return areTwoInsertFromSameBuildVector( 8960 VU, cast<InsertElementInst>(Pair.first), 8961 [this](InsertElementInst *II) -> Value * { 8962 Value *Op0 = II->getOperand(0); 8963 if (getTreeEntry(II) && !getTreeEntry(Op0)) 8964 return nullptr; 8965 return Op0; 8966 }); 8967 }); 8968 int VecId = -1; 8969 if (It == FirstUsers.end()) { 8970 (void)ShuffleMasks.emplace_back(); 8971 SmallVectorImpl<int> &Mask = ShuffleMasks.back()[ScalarTE]; 8972 if (Mask.empty()) 8973 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 8974 // Find the insertvector, vectorized in tree, if any. 8975 Value *Base = VU; 8976 while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) { 8977 if (IEBase != EU.User && 8978 (!IEBase->hasOneUse() || 8979 getInsertIndex(IEBase).value_or(*InsertIdx) == *InsertIdx)) 8980 break; 8981 // Build the mask for the vectorized insertelement instructions. 8982 if (const TreeEntry *E = getTreeEntry(IEBase)) { 8983 VU = IEBase; 8984 do { 8985 IEBase = cast<InsertElementInst>(Base); 8986 int Idx = *getInsertIndex(IEBase); 8987 assert(Mask[Idx] == PoisonMaskElem && 8988 "InsertElementInstruction used already."); 8989 Mask[Idx] = Idx; 8990 Base = IEBase->getOperand(0); 8991 } while (E == getTreeEntry(Base)); 8992 break; 8993 } 8994 Base = cast<InsertElementInst>(Base)->getOperand(0); 8995 } 8996 FirstUsers.emplace_back(VU, ScalarTE); 8997 DemandedElts.push_back(APInt::getZero(FTy->getNumElements())); 8998 VecId = FirstUsers.size() - 1; 8999 auto It = MinBWs.find(ScalarTE); 9000 if (It != MinBWs.end() && VectorCasts.insert(EU.Scalar).second) { 9001 unsigned BWSz = It->second.second; 9002 unsigned SrcBWSz = DL->getTypeSizeInBits(FTy->getElementType()); 9003 unsigned VecOpcode; 9004 if (BWSz < SrcBWSz) 9005 VecOpcode = Instruction::Trunc; 9006 else 9007 VecOpcode = 9008 It->second.second ? Instruction::SExt : Instruction::ZExt; 9009 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 9010 InstructionCost C = TTI->getCastInstrCost( 9011 VecOpcode, FTy, 9012 FixedVectorType::get( 9013 IntegerType::get(FTy->getContext(), It->second.first), 9014 FTy->getNumElements()), 9015 TTI::CastContextHint::None, CostKind); 9016 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 9017 << " for extending externally used vector with " 9018 "non-equal minimum bitwidth.\n"); 9019 Cost += C; 9020 } 9021 } else { 9022 if (isFirstInsertElement(VU, cast<InsertElementInst>(It->first))) 9023 It->first = VU; 9024 VecId = std::distance(FirstUsers.begin(), It); 9025 } 9026 int InIdx = *InsertIdx; 9027 SmallVectorImpl<int> &Mask = ShuffleMasks[VecId][ScalarTE]; 9028 if (Mask.empty()) 9029 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 9030 Mask[InIdx] = EU.Lane; 9031 DemandedElts[VecId].setBit(InIdx); 9032 continue; 9033 } 9034 } 9035 } 9036 9037 // If we plan to rewrite the tree in a smaller type, we will need to sign 9038 // extend the extracted value back to the original type. Here, we account 9039 // for the extract and the added cost of the sign extend if needed. 9040 auto *VecTy = FixedVectorType::get(EU.Scalar->getType(), BundleWidth); 9041 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 9042 auto It = MinBWs.find(getTreeEntry(EU.Scalar)); 9043 if (It != MinBWs.end()) { 9044 auto *MinTy = IntegerType::get(F->getContext(), It->second.first); 9045 unsigned Extend = 9046 It->second.second ? Instruction::SExt : Instruction::ZExt; 9047 VecTy = FixedVectorType::get(MinTy, BundleWidth); 9048 ExtractCost += TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(), 9049 VecTy, EU.Lane); 9050 } else { 9051 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, 9052 CostKind, EU.Lane); 9053 } 9054 } 9055 // Add reduced value cost, if resized. 9056 if (!VectorizedVals.empty()) { 9057 auto BWIt = MinBWs.find(VectorizableTree.front().get()); 9058 if (BWIt != MinBWs.end()) { 9059 Type *DstTy = VectorizableTree.front()->Scalars.front()->getType(); 9060 unsigned OriginalSz = DL->getTypeSizeInBits(DstTy); 9061 unsigned Opcode = Instruction::Trunc; 9062 if (OriginalSz < BWIt->second.first) 9063 Opcode = BWIt->second.second ? Instruction::SExt : Instruction::ZExt; 9064 Type *SrcTy = IntegerType::get(DstTy->getContext(), BWIt->second.first); 9065 Cost += TTI->getCastInstrCost(Opcode, DstTy, SrcTy, 9066 TTI::CastContextHint::None, 9067 TTI::TCK_RecipThroughput); 9068 } 9069 } 9070 9071 InstructionCost SpillCost = getSpillCost(); 9072 Cost += SpillCost + ExtractCost; 9073 auto &&ResizeToVF = [this, &Cost](const TreeEntry *TE, ArrayRef<int> Mask, 9074 bool) { 9075 InstructionCost C = 0; 9076 unsigned VF = Mask.size(); 9077 unsigned VecVF = TE->getVectorFactor(); 9078 if (VF != VecVF && 9079 (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); }) || 9080 !ShuffleVectorInst::isIdentityMask(Mask, VF))) { 9081 SmallVector<int> OrigMask(VecVF, PoisonMaskElem); 9082 std::copy(Mask.begin(), std::next(Mask.begin(), std::min(VF, VecVF)), 9083 OrigMask.begin()); 9084 C = TTI->getShuffleCost( 9085 TTI::SK_PermuteSingleSrc, 9086 FixedVectorType::get(TE->getMainOp()->getType(), VecVF), OrigMask); 9087 LLVM_DEBUG( 9088 dbgs() << "SLP: Adding cost " << C 9089 << " for final shuffle of insertelement external users.\n"; 9090 TE->dump(); dbgs() << "SLP: Current total cost = " << Cost << "\n"); 9091 Cost += C; 9092 return std::make_pair(TE, true); 9093 } 9094 return std::make_pair(TE, false); 9095 }; 9096 // Calculate the cost of the reshuffled vectors, if any. 9097 for (int I = 0, E = FirstUsers.size(); I < E; ++I) { 9098 Value *Base = cast<Instruction>(FirstUsers[I].first)->getOperand(0); 9099 auto Vector = ShuffleMasks[I].takeVector(); 9100 unsigned VF = 0; 9101 auto EstimateShufflesCost = [&](ArrayRef<int> Mask, 9102 ArrayRef<const TreeEntry *> TEs) { 9103 assert((TEs.size() == 1 || TEs.size() == 2) && 9104 "Expected exactly 1 or 2 tree entries."); 9105 if (TEs.size() == 1) { 9106 if (VF == 0) 9107 VF = TEs.front()->getVectorFactor(); 9108 auto *FTy = 9109 FixedVectorType::get(TEs.back()->Scalars.front()->getType(), VF); 9110 if (!ShuffleVectorInst::isIdentityMask(Mask, VF) && 9111 !all_of(enumerate(Mask), [=](const auto &Data) { 9112 return Data.value() == PoisonMaskElem || 9113 (Data.index() < VF && 9114 static_cast<int>(Data.index()) == Data.value()); 9115 })) { 9116 InstructionCost C = 9117 TTI->getShuffleCost(TTI::SK_PermuteSingleSrc, FTy, Mask); 9118 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 9119 << " for final shuffle of insertelement " 9120 "external users.\n"; 9121 TEs.front()->dump(); 9122 dbgs() << "SLP: Current total cost = " << Cost << "\n"); 9123 Cost += C; 9124 } 9125 } else { 9126 if (VF == 0) { 9127 if (TEs.front() && 9128 TEs.front()->getVectorFactor() == TEs.back()->getVectorFactor()) 9129 VF = TEs.front()->getVectorFactor(); 9130 else 9131 VF = Mask.size(); 9132 } 9133 auto *FTy = 9134 FixedVectorType::get(TEs.back()->Scalars.front()->getType(), VF); 9135 InstructionCost C = 9136 TTI->getShuffleCost(TTI::SK_PermuteTwoSrc, FTy, Mask); 9137 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C 9138 << " for final shuffle of vector node and external " 9139 "insertelement users.\n"; 9140 if (TEs.front()) { TEs.front()->dump(); } TEs.back()->dump(); 9141 dbgs() << "SLP: Current total cost = " << Cost << "\n"); 9142 Cost += C; 9143 } 9144 VF = Mask.size(); 9145 return TEs.back(); 9146 }; 9147 (void)performExtractsShuffleAction<const TreeEntry>( 9148 MutableArrayRef(Vector.data(), Vector.size()), Base, 9149 [](const TreeEntry *E) { return E->getVectorFactor(); }, ResizeToVF, 9150 EstimateShufflesCost); 9151 InstructionCost InsertCost = TTI->getScalarizationOverhead( 9152 cast<FixedVectorType>(FirstUsers[I].first->getType()), DemandedElts[I], 9153 /*Insert*/ true, /*Extract*/ false, TTI::TCK_RecipThroughput); 9154 Cost -= InsertCost; 9155 } 9156 9157 #ifndef NDEBUG 9158 SmallString<256> Str; 9159 { 9160 raw_svector_ostream OS(Str); 9161 OS << "SLP: Spill Cost = " << SpillCost << ".\n" 9162 << "SLP: Extract Cost = " << ExtractCost << ".\n" 9163 << "SLP: Total Cost = " << Cost << ".\n"; 9164 } 9165 LLVM_DEBUG(dbgs() << Str); 9166 if (ViewSLPTree) 9167 ViewGraph(this, "SLP" + F->getName(), false, Str); 9168 #endif 9169 9170 return Cost; 9171 } 9172 9173 /// Tries to find extractelement instructions with constant indices from fixed 9174 /// vector type and gather such instructions into a bunch, which highly likely 9175 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt was 9176 /// successful, the matched scalars are replaced by poison values in \p VL for 9177 /// future analysis. 9178 std::optional<TTI::ShuffleKind> 9179 BoUpSLP::tryToGatherSingleRegisterExtractElements( 9180 MutableArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) const { 9181 // Scan list of gathered scalars for extractelements that can be represented 9182 // as shuffles. 9183 MapVector<Value *, SmallVector<int>> VectorOpToIdx; 9184 SmallVector<int> UndefVectorExtracts; 9185 for (int I = 0, E = VL.size(); I < E; ++I) { 9186 auto *EI = dyn_cast<ExtractElementInst>(VL[I]); 9187 if (!EI) { 9188 if (isa<UndefValue>(VL[I])) 9189 UndefVectorExtracts.push_back(I); 9190 continue; 9191 } 9192 auto *VecTy = dyn_cast<FixedVectorType>(EI->getVectorOperandType()); 9193 if (!VecTy || !isa<ConstantInt, UndefValue>(EI->getIndexOperand())) 9194 continue; 9195 std::optional<unsigned> Idx = getExtractIndex(EI); 9196 // Undefined index. 9197 if (!Idx) { 9198 UndefVectorExtracts.push_back(I); 9199 continue; 9200 } 9201 SmallBitVector ExtractMask(VecTy->getNumElements(), true); 9202 ExtractMask.reset(*Idx); 9203 if (isUndefVector(EI->getVectorOperand(), ExtractMask).all()) { 9204 UndefVectorExtracts.push_back(I); 9205 continue; 9206 } 9207 VectorOpToIdx[EI->getVectorOperand()].push_back(I); 9208 } 9209 // Sort the vector operands by the maximum number of uses in extractelements. 9210 MapVector<unsigned, SmallVector<Value *>> VFToVector; 9211 for (const auto &Data : VectorOpToIdx) 9212 VFToVector[cast<FixedVectorType>(Data.first->getType())->getNumElements()] 9213 .push_back(Data.first); 9214 for (auto &Data : VFToVector) { 9215 stable_sort(Data.second, [&VectorOpToIdx](Value *V1, Value *V2) { 9216 return VectorOpToIdx.find(V1)->second.size() > 9217 VectorOpToIdx.find(V2)->second.size(); 9218 }); 9219 } 9220 // Find the best pair of the vectors with the same number of elements or a 9221 // single vector. 9222 const int UndefSz = UndefVectorExtracts.size(); 9223 unsigned SingleMax = 0; 9224 Value *SingleVec = nullptr; 9225 unsigned PairMax = 0; 9226 std::pair<Value *, Value *> PairVec(nullptr, nullptr); 9227 for (auto &Data : VFToVector) { 9228 Value *V1 = Data.second.front(); 9229 if (SingleMax < VectorOpToIdx[V1].size() + UndefSz) { 9230 SingleMax = VectorOpToIdx[V1].size() + UndefSz; 9231 SingleVec = V1; 9232 } 9233 Value *V2 = nullptr; 9234 if (Data.second.size() > 1) 9235 V2 = *std::next(Data.second.begin()); 9236 if (V2 && PairMax < VectorOpToIdx[V1].size() + VectorOpToIdx[V2].size() + 9237 UndefSz) { 9238 PairMax = VectorOpToIdx[V1].size() + VectorOpToIdx[V2].size() + UndefSz; 9239 PairVec = std::make_pair(V1, V2); 9240 } 9241 } 9242 if (SingleMax == 0 && PairMax == 0 && UndefSz == 0) 9243 return std::nullopt; 9244 // Check if better to perform a shuffle of 2 vectors or just of a single 9245 // vector. 9246 SmallVector<Value *> SavedVL(VL.begin(), VL.end()); 9247 SmallVector<Value *> GatheredExtracts( 9248 VL.size(), PoisonValue::get(VL.front()->getType())); 9249 if (SingleMax >= PairMax && SingleMax) { 9250 for (int Idx : VectorOpToIdx[SingleVec]) 9251 std::swap(GatheredExtracts[Idx], VL[Idx]); 9252 } else { 9253 for (Value *V : {PairVec.first, PairVec.second}) 9254 for (int Idx : VectorOpToIdx[V]) 9255 std::swap(GatheredExtracts[Idx], VL[Idx]); 9256 } 9257 // Add extracts from undefs too. 9258 for (int Idx : UndefVectorExtracts) 9259 std::swap(GatheredExtracts[Idx], VL[Idx]); 9260 // Check that gather of extractelements can be represented as just a 9261 // shuffle of a single/two vectors the scalars are extracted from. 9262 std::optional<TTI::ShuffleKind> Res = 9263 isFixedVectorShuffle(GatheredExtracts, Mask); 9264 if (!Res) { 9265 // TODO: try to check other subsets if possible. 9266 // Restore the original VL if attempt was not successful. 9267 copy(SavedVL, VL.begin()); 9268 return std::nullopt; 9269 } 9270 // Restore unused scalars from mask, if some of the extractelements were not 9271 // selected for shuffle. 9272 for (int I = 0, E = GatheredExtracts.size(); I < E; ++I) { 9273 if (Mask[I] == PoisonMaskElem && !isa<PoisonValue>(GatheredExtracts[I]) && 9274 isa<UndefValue>(GatheredExtracts[I])) { 9275 std::swap(VL[I], GatheredExtracts[I]); 9276 continue; 9277 } 9278 auto *EI = dyn_cast<ExtractElementInst>(VL[I]); 9279 if (!EI || !isa<FixedVectorType>(EI->getVectorOperandType()) || 9280 !isa<ConstantInt, UndefValue>(EI->getIndexOperand()) || 9281 is_contained(UndefVectorExtracts, I)) 9282 continue; 9283 } 9284 return Res; 9285 } 9286 9287 /// Tries to find extractelement instructions with constant indices from fixed 9288 /// vector type and gather such instructions into a bunch, which highly likely 9289 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt was 9290 /// successful, the matched scalars are replaced by poison values in \p VL for 9291 /// future analysis. 9292 SmallVector<std::optional<TTI::ShuffleKind>> 9293 BoUpSLP::tryToGatherExtractElements(SmallVectorImpl<Value *> &VL, 9294 SmallVectorImpl<int> &Mask, 9295 unsigned NumParts) const { 9296 assert(NumParts > 0 && "NumParts expected be greater than or equal to 1."); 9297 SmallVector<std::optional<TTI::ShuffleKind>> ShufflesRes(NumParts); 9298 Mask.assign(VL.size(), PoisonMaskElem); 9299 unsigned SliceSize = VL.size() / NumParts; 9300 for (unsigned Part = 0; Part < NumParts; ++Part) { 9301 // Scan list of gathered scalars for extractelements that can be represented 9302 // as shuffles. 9303 MutableArrayRef<Value *> SubVL = 9304 MutableArrayRef(VL).slice(Part * SliceSize, SliceSize); 9305 SmallVector<int> SubMask; 9306 std::optional<TTI::ShuffleKind> Res = 9307 tryToGatherSingleRegisterExtractElements(SubVL, SubMask); 9308 ShufflesRes[Part] = Res; 9309 copy(SubMask, std::next(Mask.begin(), Part * SliceSize)); 9310 } 9311 if (none_of(ShufflesRes, [](const std::optional<TTI::ShuffleKind> &Res) { 9312 return Res.has_value(); 9313 })) 9314 ShufflesRes.clear(); 9315 return ShufflesRes; 9316 } 9317 9318 std::optional<TargetTransformInfo::ShuffleKind> 9319 BoUpSLP::isGatherShuffledSingleRegisterEntry( 9320 const TreeEntry *TE, ArrayRef<Value *> VL, MutableArrayRef<int> Mask, 9321 SmallVectorImpl<const TreeEntry *> &Entries, unsigned Part) { 9322 Entries.clear(); 9323 // TODO: currently checking only for Scalars in the tree entry, need to count 9324 // reused elements too for better cost estimation. 9325 const EdgeInfo &TEUseEI = TE->UserTreeIndices.front(); 9326 const Instruction *TEInsertPt = &getLastInstructionInBundle(TEUseEI.UserTE); 9327 const BasicBlock *TEInsertBlock = nullptr; 9328 // Main node of PHI entries keeps the correct order of operands/incoming 9329 // blocks. 9330 if (auto *PHI = dyn_cast<PHINode>(TEUseEI.UserTE->getMainOp())) { 9331 TEInsertBlock = PHI->getIncomingBlock(TEUseEI.EdgeIdx); 9332 TEInsertPt = TEInsertBlock->getTerminator(); 9333 } else { 9334 TEInsertBlock = TEInsertPt->getParent(); 9335 } 9336 auto *NodeUI = DT->getNode(TEInsertBlock); 9337 assert(NodeUI && "Should only process reachable instructions"); 9338 SmallPtrSet<Value *, 4> GatheredScalars(VL.begin(), VL.end()); 9339 auto CheckOrdering = [&](const Instruction *InsertPt) { 9340 // Argument InsertPt is an instruction where vector code for some other 9341 // tree entry (one that shares one or more scalars with TE) is going to be 9342 // generated. This lambda returns true if insertion point of vector code 9343 // for the TE dominates that point (otherwise dependency is the other way 9344 // around). The other node is not limited to be of a gather kind. Gather 9345 // nodes are not scheduled and their vector code is inserted before their 9346 // first user. If user is PHI, that is supposed to be at the end of a 9347 // predecessor block. Otherwise it is the last instruction among scalars of 9348 // the user node. So, instead of checking dependency between instructions 9349 // themselves, we check dependency between their insertion points for vector 9350 // code (since each scalar instruction ends up as a lane of a vector 9351 // instruction). 9352 const BasicBlock *InsertBlock = InsertPt->getParent(); 9353 auto *NodeEUI = DT->getNode(InsertBlock); 9354 if (!NodeEUI) 9355 return false; 9356 assert((NodeUI == NodeEUI) == 9357 (NodeUI->getDFSNumIn() == NodeEUI->getDFSNumIn()) && 9358 "Different nodes should have different DFS numbers"); 9359 // Check the order of the gather nodes users. 9360 if (TEInsertPt->getParent() != InsertBlock && 9361 (DT->dominates(NodeUI, NodeEUI) || !DT->dominates(NodeEUI, NodeUI))) 9362 return false; 9363 if (TEInsertPt->getParent() == InsertBlock && 9364 TEInsertPt->comesBefore(InsertPt)) 9365 return false; 9366 return true; 9367 }; 9368 // Find all tree entries used by the gathered values. If no common entries 9369 // found - not a shuffle. 9370 // Here we build a set of tree nodes for each gathered value and trying to 9371 // find the intersection between these sets. If we have at least one common 9372 // tree node for each gathered value - we have just a permutation of the 9373 // single vector. If we have 2 different sets, we're in situation where we 9374 // have a permutation of 2 input vectors. 9375 SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs; 9376 DenseMap<Value *, int> UsedValuesEntry; 9377 for (Value *V : VL) { 9378 if (isConstant(V)) 9379 continue; 9380 // Build a list of tree entries where V is used. 9381 SmallPtrSet<const TreeEntry *, 4> VToTEs; 9382 for (const TreeEntry *TEPtr : ValueToGatherNodes.find(V)->second) { 9383 if (TEPtr == TE) 9384 continue; 9385 assert(any_of(TEPtr->Scalars, 9386 [&](Value *V) { return GatheredScalars.contains(V); }) && 9387 "Must contain at least single gathered value."); 9388 assert(TEPtr->UserTreeIndices.size() == 1 && 9389 "Expected only single user of a gather node."); 9390 const EdgeInfo &UseEI = TEPtr->UserTreeIndices.front(); 9391 9392 PHINode *UserPHI = dyn_cast<PHINode>(UseEI.UserTE->getMainOp()); 9393 const Instruction *InsertPt = 9394 UserPHI ? UserPHI->getIncomingBlock(UseEI.EdgeIdx)->getTerminator() 9395 : &getLastInstructionInBundle(UseEI.UserTE); 9396 if (TEInsertPt == InsertPt) { 9397 // If 2 gathers are operands of the same entry (regardless of whether 9398 // user is PHI or else), compare operands indices, use the earlier one 9399 // as the base. 9400 if (TEUseEI.UserTE == UseEI.UserTE && TEUseEI.EdgeIdx < UseEI.EdgeIdx) 9401 continue; 9402 // If the user instruction is used for some reason in different 9403 // vectorized nodes - make it depend on index. 9404 if (TEUseEI.UserTE != UseEI.UserTE && 9405 TEUseEI.UserTE->Idx < UseEI.UserTE->Idx) 9406 continue; 9407 } 9408 9409 // Check if the user node of the TE comes after user node of TEPtr, 9410 // otherwise TEPtr depends on TE. 9411 if ((TEInsertBlock != InsertPt->getParent() || 9412 TEUseEI.EdgeIdx < UseEI.EdgeIdx || TEUseEI.UserTE != UseEI.UserTE) && 9413 !CheckOrdering(InsertPt)) 9414 continue; 9415 VToTEs.insert(TEPtr); 9416 } 9417 if (const TreeEntry *VTE = getTreeEntry(V)) { 9418 Instruction &LastBundleInst = getLastInstructionInBundle(VTE); 9419 if (&LastBundleInst == TEInsertPt || !CheckOrdering(&LastBundleInst)) 9420 continue; 9421 auto It = MinBWs.find(VTE); 9422 // If vectorize node is demoted - do not match. 9423 if (It != MinBWs.end() && 9424 It->second.first != DL->getTypeSizeInBits(V->getType())) 9425 continue; 9426 VToTEs.insert(VTE); 9427 } 9428 if (VToTEs.empty()) 9429 continue; 9430 if (UsedTEs.empty()) { 9431 // The first iteration, just insert the list of nodes to vector. 9432 UsedTEs.push_back(VToTEs); 9433 UsedValuesEntry.try_emplace(V, 0); 9434 } else { 9435 // Need to check if there are any previously used tree nodes which use V. 9436 // If there are no such nodes, consider that we have another one input 9437 // vector. 9438 SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs); 9439 unsigned Idx = 0; 9440 for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) { 9441 // Do we have a non-empty intersection of previously listed tree entries 9442 // and tree entries using current V? 9443 set_intersect(VToTEs, Set); 9444 if (!VToTEs.empty()) { 9445 // Yes, write the new subset and continue analysis for the next 9446 // scalar. 9447 Set.swap(VToTEs); 9448 break; 9449 } 9450 VToTEs = SavedVToTEs; 9451 ++Idx; 9452 } 9453 // No non-empty intersection found - need to add a second set of possible 9454 // source vectors. 9455 if (Idx == UsedTEs.size()) { 9456 // If the number of input vectors is greater than 2 - not a permutation, 9457 // fallback to the regular gather. 9458 // TODO: support multiple reshuffled nodes. 9459 if (UsedTEs.size() == 2) 9460 continue; 9461 UsedTEs.push_back(SavedVToTEs); 9462 Idx = UsedTEs.size() - 1; 9463 } 9464 UsedValuesEntry.try_emplace(V, Idx); 9465 } 9466 } 9467 9468 if (UsedTEs.empty()) { 9469 Entries.clear(); 9470 return std::nullopt; 9471 } 9472 9473 unsigned VF = 0; 9474 if (UsedTEs.size() == 1) { 9475 // Keep the order to avoid non-determinism. 9476 SmallVector<const TreeEntry *> FirstEntries(UsedTEs.front().begin(), 9477 UsedTEs.front().end()); 9478 sort(FirstEntries, [](const TreeEntry *TE1, const TreeEntry *TE2) { 9479 return TE1->Idx < TE2->Idx; 9480 }); 9481 // Try to find the perfect match in another gather node at first. 9482 auto *It = find_if(FirstEntries, [=](const TreeEntry *EntryPtr) { 9483 return EntryPtr->isSame(VL) || EntryPtr->isSame(TE->Scalars); 9484 }); 9485 if (It != FirstEntries.end() && 9486 ((*It)->getVectorFactor() == VL.size() || 9487 ((*It)->getVectorFactor() == TE->Scalars.size() && 9488 TE->ReuseShuffleIndices.size() == VL.size() && 9489 (*It)->isSame(TE->Scalars)))) { 9490 Entries.push_back(*It); 9491 if ((*It)->getVectorFactor() == VL.size()) { 9492 std::iota(std::next(Mask.begin(), Part * VL.size()), 9493 std::next(Mask.begin(), (Part + 1) * VL.size()), 0); 9494 } else { 9495 SmallVector<int> CommonMask = TE->getCommonMask(); 9496 copy(CommonMask, Mask.begin()); 9497 } 9498 // Clear undef scalars. 9499 for (int I = 0, Sz = VL.size(); I < Sz; ++I) 9500 if (isa<PoisonValue>(VL[I])) 9501 Mask[I] = PoisonMaskElem; 9502 return TargetTransformInfo::SK_PermuteSingleSrc; 9503 } 9504 // No perfect match, just shuffle, so choose the first tree node from the 9505 // tree. 9506 Entries.push_back(FirstEntries.front()); 9507 } else { 9508 // Try to find nodes with the same vector factor. 9509 assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries."); 9510 // Keep the order of tree nodes to avoid non-determinism. 9511 DenseMap<int, const TreeEntry *> VFToTE; 9512 for (const TreeEntry *TE : UsedTEs.front()) { 9513 unsigned VF = TE->getVectorFactor(); 9514 auto It = VFToTE.find(VF); 9515 if (It != VFToTE.end()) { 9516 if (It->second->Idx > TE->Idx) 9517 It->getSecond() = TE; 9518 continue; 9519 } 9520 VFToTE.try_emplace(VF, TE); 9521 } 9522 // Same, keep the order to avoid non-determinism. 9523 SmallVector<const TreeEntry *> SecondEntries(UsedTEs.back().begin(), 9524 UsedTEs.back().end()); 9525 sort(SecondEntries, [](const TreeEntry *TE1, const TreeEntry *TE2) { 9526 return TE1->Idx < TE2->Idx; 9527 }); 9528 for (const TreeEntry *TE : SecondEntries) { 9529 auto It = VFToTE.find(TE->getVectorFactor()); 9530 if (It != VFToTE.end()) { 9531 VF = It->first; 9532 Entries.push_back(It->second); 9533 Entries.push_back(TE); 9534 break; 9535 } 9536 } 9537 // No 2 source vectors with the same vector factor - just choose 2 with max 9538 // index. 9539 if (Entries.empty()) { 9540 Entries.push_back( 9541 *std::max_element(UsedTEs.front().begin(), UsedTEs.front().end(), 9542 [](const TreeEntry *TE1, const TreeEntry *TE2) { 9543 return TE1->Idx < TE2->Idx; 9544 })); 9545 Entries.push_back(SecondEntries.front()); 9546 VF = std::max(Entries.front()->getVectorFactor(), 9547 Entries.back()->getVectorFactor()); 9548 } 9549 } 9550 9551 bool IsSplatOrUndefs = isSplat(VL) || all_of(VL, UndefValue::classof); 9552 // Checks if the 2 PHIs are compatible in terms of high possibility to be 9553 // vectorized. 9554 auto AreCompatiblePHIs = [&](Value *V, Value *V1) { 9555 auto *PHI = cast<PHINode>(V); 9556 auto *PHI1 = cast<PHINode>(V1); 9557 // Check that all incoming values are compatible/from same parent (if they 9558 // are instructions). 9559 // The incoming values are compatible if they all are constants, or 9560 // instruction with the same/alternate opcodes from the same basic block. 9561 for (int I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) { 9562 Value *In = PHI->getIncomingValue(I); 9563 Value *In1 = PHI1->getIncomingValue(I); 9564 if (isConstant(In) && isConstant(In1)) 9565 continue; 9566 if (!getSameOpcode({In, In1}, *TLI).getOpcode()) 9567 return false; 9568 if (cast<Instruction>(In)->getParent() != 9569 cast<Instruction>(In1)->getParent()) 9570 return false; 9571 } 9572 return true; 9573 }; 9574 // Check if the value can be ignored during analysis for shuffled gathers. 9575 // We suppose it is better to ignore instruction, which do not form splats, 9576 // are not vectorized/not extractelements (these instructions will be handled 9577 // by extractelements processing) or may form vector node in future. 9578 auto MightBeIgnored = [=](Value *V) { 9579 auto *I = dyn_cast<Instruction>(V); 9580 return I && !IsSplatOrUndefs && !ScalarToTreeEntry.count(I) && 9581 !isVectorLikeInstWithConstOps(I) && 9582 !areAllUsersVectorized(I, UserIgnoreList) && isSimple(I); 9583 }; 9584 // Check that the neighbor instruction may form a full vector node with the 9585 // current instruction V. It is possible, if they have same/alternate opcode 9586 // and same parent basic block. 9587 auto NeighborMightBeIgnored = [&](Value *V, int Idx) { 9588 Value *V1 = VL[Idx]; 9589 bool UsedInSameVTE = false; 9590 auto It = UsedValuesEntry.find(V1); 9591 if (It != UsedValuesEntry.end()) 9592 UsedInSameVTE = It->second == UsedValuesEntry.find(V)->second; 9593 return V != V1 && MightBeIgnored(V1) && !UsedInSameVTE && 9594 getSameOpcode({V, V1}, *TLI).getOpcode() && 9595 cast<Instruction>(V)->getParent() == 9596 cast<Instruction>(V1)->getParent() && 9597 (!isa<PHINode>(V1) || AreCompatiblePHIs(V, V1)); 9598 }; 9599 // Build a shuffle mask for better cost estimation and vector emission. 9600 SmallBitVector UsedIdxs(Entries.size()); 9601 SmallVector<std::pair<unsigned, int>> EntryLanes; 9602 for (int I = 0, E = VL.size(); I < E; ++I) { 9603 Value *V = VL[I]; 9604 auto It = UsedValuesEntry.find(V); 9605 if (It == UsedValuesEntry.end()) 9606 continue; 9607 // Do not try to shuffle scalars, if they are constants, or instructions 9608 // that can be vectorized as a result of the following vector build 9609 // vectorization. 9610 if (isConstant(V) || (MightBeIgnored(V) && 9611 ((I > 0 && NeighborMightBeIgnored(V, I - 1)) || 9612 (I != E - 1 && NeighborMightBeIgnored(V, I + 1))))) 9613 continue; 9614 unsigned Idx = It->second; 9615 EntryLanes.emplace_back(Idx, I); 9616 UsedIdxs.set(Idx); 9617 } 9618 // Iterate through all shuffled scalars and select entries, which can be used 9619 // for final shuffle. 9620 SmallVector<const TreeEntry *> TempEntries; 9621 for (unsigned I = 0, Sz = Entries.size(); I < Sz; ++I) { 9622 if (!UsedIdxs.test(I)) 9623 continue; 9624 // Fix the entry number for the given scalar. If it is the first entry, set 9625 // Pair.first to 0, otherwise to 1 (currently select at max 2 nodes). 9626 // These indices are used when calculating final shuffle mask as the vector 9627 // offset. 9628 for (std::pair<unsigned, int> &Pair : EntryLanes) 9629 if (Pair.first == I) 9630 Pair.first = TempEntries.size(); 9631 TempEntries.push_back(Entries[I]); 9632 } 9633 Entries.swap(TempEntries); 9634 if (EntryLanes.size() == Entries.size() && 9635 !VL.equals(ArrayRef(TE->Scalars) 9636 .slice(Part * VL.size(), 9637 std::min<int>(VL.size(), TE->Scalars.size())))) { 9638 // We may have here 1 or 2 entries only. If the number of scalars is equal 9639 // to the number of entries, no need to do the analysis, it is not very 9640 // profitable. Since VL is not the same as TE->Scalars, it means we already 9641 // have some shuffles before. Cut off not profitable case. 9642 Entries.clear(); 9643 return std::nullopt; 9644 } 9645 // Build the final mask, check for the identity shuffle, if possible. 9646 bool IsIdentity = Entries.size() == 1; 9647 // Pair.first is the offset to the vector, while Pair.second is the index of 9648 // scalar in the list. 9649 for (const std::pair<unsigned, int> &Pair : EntryLanes) { 9650 unsigned Idx = Part * VL.size() + Pair.second; 9651 Mask[Idx] = Pair.first * VF + 9652 Entries[Pair.first]->findLaneForValue(VL[Pair.second]); 9653 IsIdentity &= Mask[Idx] == Pair.second; 9654 } 9655 switch (Entries.size()) { 9656 case 1: 9657 if (IsIdentity || EntryLanes.size() > 1 || VL.size() <= 2) 9658 return TargetTransformInfo::SK_PermuteSingleSrc; 9659 break; 9660 case 2: 9661 if (EntryLanes.size() > 2 || VL.size() <= 2) 9662 return TargetTransformInfo::SK_PermuteTwoSrc; 9663 break; 9664 default: 9665 break; 9666 } 9667 Entries.clear(); 9668 // Clear the corresponding mask elements. 9669 std::fill(std::next(Mask.begin(), Part * VL.size()), 9670 std::next(Mask.begin(), (Part + 1) * VL.size()), PoisonMaskElem); 9671 return std::nullopt; 9672 } 9673 9674 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> 9675 BoUpSLP::isGatherShuffledEntry( 9676 const TreeEntry *TE, ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask, 9677 SmallVectorImpl<SmallVector<const TreeEntry *>> &Entries, 9678 unsigned NumParts) { 9679 assert(NumParts > 0 && NumParts < VL.size() && 9680 "Expected positive number of registers."); 9681 Entries.clear(); 9682 // No need to check for the topmost gather node. 9683 if (TE == VectorizableTree.front().get()) 9684 return {}; 9685 Mask.assign(VL.size(), PoisonMaskElem); 9686 assert(TE->UserTreeIndices.size() == 1 && 9687 "Expected only single user of the gather node."); 9688 assert(VL.size() % NumParts == 0 && 9689 "Number of scalars must be divisible by NumParts."); 9690 unsigned SliceSize = VL.size() / NumParts; 9691 SmallVector<std::optional<TTI::ShuffleKind>> Res; 9692 for (unsigned Part = 0; Part < NumParts; ++Part) { 9693 ArrayRef<Value *> SubVL = VL.slice(Part * SliceSize, SliceSize); 9694 SmallVectorImpl<const TreeEntry *> &SubEntries = Entries.emplace_back(); 9695 std::optional<TTI::ShuffleKind> SubRes = 9696 isGatherShuffledSingleRegisterEntry(TE, SubVL, Mask, SubEntries, Part); 9697 if (!SubRes) 9698 SubEntries.clear(); 9699 Res.push_back(SubRes); 9700 if (SubEntries.size() == 1 && *SubRes == TTI::SK_PermuteSingleSrc && 9701 SubEntries.front()->getVectorFactor() == VL.size() && 9702 (SubEntries.front()->isSame(TE->Scalars) || 9703 SubEntries.front()->isSame(VL))) { 9704 SmallVector<const TreeEntry *> LocalSubEntries; 9705 LocalSubEntries.swap(SubEntries); 9706 Entries.clear(); 9707 Res.clear(); 9708 std::iota(Mask.begin(), Mask.end(), 0); 9709 // Clear undef scalars. 9710 for (int I = 0, Sz = VL.size(); I < Sz; ++I) 9711 if (isa<PoisonValue>(VL[I])) 9712 Mask[I] = PoisonMaskElem; 9713 Entries.emplace_back(1, LocalSubEntries.front()); 9714 Res.push_back(TargetTransformInfo::SK_PermuteSingleSrc); 9715 return Res; 9716 } 9717 } 9718 if (all_of(Res, 9719 [](const std::optional<TTI::ShuffleKind> &SK) { return !SK; })) { 9720 Entries.clear(); 9721 return {}; 9722 } 9723 return Res; 9724 } 9725 9726 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL, 9727 bool ForPoisonSrc) const { 9728 // Find the type of the operands in VL. 9729 Type *ScalarTy = VL[0]->getType(); 9730 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0])) 9731 ScalarTy = SI->getValueOperand()->getType(); 9732 auto *VecTy = FixedVectorType::get(ScalarTy, VL.size()); 9733 bool DuplicateNonConst = false; 9734 // Find the cost of inserting/extracting values from the vector. 9735 // Check if the same elements are inserted several times and count them as 9736 // shuffle candidates. 9737 APInt ShuffledElements = APInt::getZero(VL.size()); 9738 DenseSet<Value *> UniqueElements; 9739 constexpr TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 9740 InstructionCost Cost; 9741 auto EstimateInsertCost = [&](unsigned I, Value *V) { 9742 if (!ForPoisonSrc) 9743 Cost += 9744 TTI->getVectorInstrCost(Instruction::InsertElement, VecTy, CostKind, 9745 I, Constant::getNullValue(VecTy), V); 9746 }; 9747 for (unsigned I = 0, E = VL.size(); I < E; ++I) { 9748 Value *V = VL[I]; 9749 // No need to shuffle duplicates for constants. 9750 if ((ForPoisonSrc && isConstant(V)) || isa<UndefValue>(V)) { 9751 ShuffledElements.setBit(I); 9752 continue; 9753 } 9754 if (!UniqueElements.insert(V).second) { 9755 DuplicateNonConst = true; 9756 ShuffledElements.setBit(I); 9757 continue; 9758 } 9759 EstimateInsertCost(I, V); 9760 } 9761 if (ForPoisonSrc) 9762 Cost = 9763 TTI->getScalarizationOverhead(VecTy, ~ShuffledElements, /*Insert*/ true, 9764 /*Extract*/ false, CostKind); 9765 if (DuplicateNonConst) 9766 Cost += 9767 TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc, VecTy); 9768 return Cost; 9769 } 9770 9771 // Perform operand reordering on the instructions in VL and return the reordered 9772 // operands in Left and Right. 9773 void BoUpSLP::reorderInputsAccordingToOpcode( 9774 ArrayRef<Value *> VL, SmallVectorImpl<Value *> &Left, 9775 SmallVectorImpl<Value *> &Right, const TargetLibraryInfo &TLI, 9776 const DataLayout &DL, ScalarEvolution &SE, const BoUpSLP &R) { 9777 if (VL.empty()) 9778 return; 9779 VLOperands Ops(VL, TLI, DL, SE, R); 9780 // Reorder the operands in place. 9781 Ops.reorder(); 9782 Left = Ops.getVL(0); 9783 Right = Ops.getVL(1); 9784 } 9785 9786 Instruction &BoUpSLP::getLastInstructionInBundle(const TreeEntry *E) { 9787 auto &Res = EntryToLastInstruction.FindAndConstruct(E); 9788 if (Res.second) 9789 return *Res.second; 9790 // Get the basic block this bundle is in. All instructions in the bundle 9791 // should be in this block (except for extractelement-like instructions with 9792 // constant indeces). 9793 auto *Front = E->getMainOp(); 9794 auto *BB = Front->getParent(); 9795 assert(llvm::all_of(E->Scalars, [=](Value *V) -> bool { 9796 if (E->getOpcode() == Instruction::GetElementPtr && 9797 !isa<GetElementPtrInst>(V)) 9798 return true; 9799 auto *I = cast<Instruction>(V); 9800 return !E->isOpcodeOrAlt(I) || I->getParent() == BB || 9801 isVectorLikeInstWithConstOps(I); 9802 })); 9803 9804 auto FindLastInst = [&]() { 9805 Instruction *LastInst = Front; 9806 for (Value *V : E->Scalars) { 9807 auto *I = dyn_cast<Instruction>(V); 9808 if (!I) 9809 continue; 9810 if (LastInst->getParent() == I->getParent()) { 9811 if (LastInst->comesBefore(I)) 9812 LastInst = I; 9813 continue; 9814 } 9815 assert(((E->getOpcode() == Instruction::GetElementPtr && 9816 !isa<GetElementPtrInst>(I)) || 9817 (isVectorLikeInstWithConstOps(LastInst) && 9818 isVectorLikeInstWithConstOps(I))) && 9819 "Expected vector-like or non-GEP in GEP node insts only."); 9820 if (!DT->isReachableFromEntry(LastInst->getParent())) { 9821 LastInst = I; 9822 continue; 9823 } 9824 if (!DT->isReachableFromEntry(I->getParent())) 9825 continue; 9826 auto *NodeA = DT->getNode(LastInst->getParent()); 9827 auto *NodeB = DT->getNode(I->getParent()); 9828 assert(NodeA && "Should only process reachable instructions"); 9829 assert(NodeB && "Should only process reachable instructions"); 9830 assert((NodeA == NodeB) == 9831 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 9832 "Different nodes should have different DFS numbers"); 9833 if (NodeA->getDFSNumIn() < NodeB->getDFSNumIn()) 9834 LastInst = I; 9835 } 9836 BB = LastInst->getParent(); 9837 return LastInst; 9838 }; 9839 9840 auto FindFirstInst = [&]() { 9841 Instruction *FirstInst = Front; 9842 for (Value *V : E->Scalars) { 9843 auto *I = dyn_cast<Instruction>(V); 9844 if (!I) 9845 continue; 9846 if (FirstInst->getParent() == I->getParent()) { 9847 if (I->comesBefore(FirstInst)) 9848 FirstInst = I; 9849 continue; 9850 } 9851 assert(((E->getOpcode() == Instruction::GetElementPtr && 9852 !isa<GetElementPtrInst>(I)) || 9853 (isVectorLikeInstWithConstOps(FirstInst) && 9854 isVectorLikeInstWithConstOps(I))) && 9855 "Expected vector-like or non-GEP in GEP node insts only."); 9856 if (!DT->isReachableFromEntry(FirstInst->getParent())) { 9857 FirstInst = I; 9858 continue; 9859 } 9860 if (!DT->isReachableFromEntry(I->getParent())) 9861 continue; 9862 auto *NodeA = DT->getNode(FirstInst->getParent()); 9863 auto *NodeB = DT->getNode(I->getParent()); 9864 assert(NodeA && "Should only process reachable instructions"); 9865 assert(NodeB && "Should only process reachable instructions"); 9866 assert((NodeA == NodeB) == 9867 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) && 9868 "Different nodes should have different DFS numbers"); 9869 if (NodeA->getDFSNumIn() > NodeB->getDFSNumIn()) 9870 FirstInst = I; 9871 } 9872 return FirstInst; 9873 }; 9874 9875 // Set the insert point to the beginning of the basic block if the entry 9876 // should not be scheduled. 9877 if (doesNotNeedToSchedule(E->Scalars) || 9878 (E->State != TreeEntry::NeedToGather && 9879 all_of(E->Scalars, isVectorLikeInstWithConstOps))) { 9880 if ((E->getOpcode() == Instruction::GetElementPtr && 9881 any_of(E->Scalars, 9882 [](Value *V) { 9883 return !isa<GetElementPtrInst>(V) && isa<Instruction>(V); 9884 })) || 9885 all_of(E->Scalars, [](Value *V) { 9886 return !isVectorLikeInstWithConstOps(V) && isUsedOutsideBlock(V); 9887 })) 9888 Res.second = FindLastInst(); 9889 else 9890 Res.second = FindFirstInst(); 9891 return *Res.second; 9892 } 9893 9894 // Find the last instruction. The common case should be that BB has been 9895 // scheduled, and the last instruction is VL.back(). So we start with 9896 // VL.back() and iterate over schedule data until we reach the end of the 9897 // bundle. The end of the bundle is marked by null ScheduleData. 9898 if (BlocksSchedules.count(BB)) { 9899 Value *V = E->isOneOf(E->Scalars.back()); 9900 if (doesNotNeedToBeScheduled(V)) 9901 V = *find_if_not(E->Scalars, doesNotNeedToBeScheduled); 9902 auto *Bundle = BlocksSchedules[BB]->getScheduleData(V); 9903 if (Bundle && Bundle->isPartOfBundle()) 9904 for (; Bundle; Bundle = Bundle->NextInBundle) 9905 if (Bundle->OpValue == Bundle->Inst) 9906 Res.second = Bundle->Inst; 9907 } 9908 9909 // LastInst can still be null at this point if there's either not an entry 9910 // for BB in BlocksSchedules or there's no ScheduleData available for 9911 // VL.back(). This can be the case if buildTree_rec aborts for various 9912 // reasons (e.g., the maximum recursion depth is reached, the maximum region 9913 // size is reached, etc.). ScheduleData is initialized in the scheduling 9914 // "dry-run". 9915 // 9916 // If this happens, we can still find the last instruction by brute force. We 9917 // iterate forwards from Front (inclusive) until we either see all 9918 // instructions in the bundle or reach the end of the block. If Front is the 9919 // last instruction in program order, LastInst will be set to Front, and we 9920 // will visit all the remaining instructions in the block. 9921 // 9922 // One of the reasons we exit early from buildTree_rec is to place an upper 9923 // bound on compile-time. Thus, taking an additional compile-time hit here is 9924 // not ideal. However, this should be exceedingly rare since it requires that 9925 // we both exit early from buildTree_rec and that the bundle be out-of-order 9926 // (causing us to iterate all the way to the end of the block). 9927 if (!Res.second) 9928 Res.second = FindLastInst(); 9929 assert(Res.second && "Failed to find last instruction in bundle"); 9930 return *Res.second; 9931 } 9932 9933 void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) { 9934 auto *Front = E->getMainOp(); 9935 Instruction *LastInst = &getLastInstructionInBundle(E); 9936 assert(LastInst && "Failed to find last instruction in bundle"); 9937 BasicBlock::iterator LastInstIt = LastInst->getIterator(); 9938 // If the instruction is PHI, set the insert point after all the PHIs. 9939 bool IsPHI = isa<PHINode>(LastInst); 9940 if (IsPHI) 9941 LastInstIt = LastInst->getParent()->getFirstNonPHIIt(); 9942 if (IsPHI || (E->State != TreeEntry::NeedToGather && 9943 doesNotNeedToSchedule(E->Scalars))) { 9944 Builder.SetInsertPoint(LastInst->getParent(), LastInstIt); 9945 } else { 9946 // Set the insertion point after the last instruction in the bundle. Set the 9947 // debug location to Front. 9948 Builder.SetInsertPoint( 9949 LastInst->getParent(), 9950 LastInst->getNextNonDebugInstruction()->getIterator()); 9951 } 9952 Builder.SetCurrentDebugLocation(Front->getDebugLoc()); 9953 } 9954 9955 Value *BoUpSLP::gather(ArrayRef<Value *> VL, Value *Root) { 9956 // List of instructions/lanes from current block and/or the blocks which are 9957 // part of the current loop. These instructions will be inserted at the end to 9958 // make it possible to optimize loops and hoist invariant instructions out of 9959 // the loops body with better chances for success. 9960 SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts; 9961 SmallSet<int, 4> PostponedIndices; 9962 Loop *L = LI->getLoopFor(Builder.GetInsertBlock()); 9963 auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) { 9964 SmallPtrSet<BasicBlock *, 4> Visited; 9965 while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second) 9966 InsertBB = InsertBB->getSinglePredecessor(); 9967 return InsertBB && InsertBB == InstBB; 9968 }; 9969 for (int I = 0, E = VL.size(); I < E; ++I) { 9970 if (auto *Inst = dyn_cast<Instruction>(VL[I])) 9971 if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) || 9972 getTreeEntry(Inst) || 9973 (L && (!Root || L->isLoopInvariant(Root)) && L->contains(Inst))) && 9974 PostponedIndices.insert(I).second) 9975 PostponedInsts.emplace_back(Inst, I); 9976 } 9977 9978 auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos) { 9979 Vec = Builder.CreateInsertElement(Vec, V, Builder.getInt32(Pos)); 9980 auto *InsElt = dyn_cast<InsertElementInst>(Vec); 9981 if (!InsElt) 9982 return Vec; 9983 GatherShuffleExtractSeq.insert(InsElt); 9984 CSEBlocks.insert(InsElt->getParent()); 9985 // Add to our 'need-to-extract' list. 9986 if (isa<Instruction>(V)) { 9987 if (TreeEntry *Entry = getTreeEntry(V)) { 9988 // Find which lane we need to extract. 9989 unsigned FoundLane = Entry->findLaneForValue(V); 9990 ExternalUses.emplace_back(V, InsElt, FoundLane); 9991 } 9992 } 9993 return Vec; 9994 }; 9995 Value *Val0 = 9996 isa<StoreInst>(VL[0]) ? cast<StoreInst>(VL[0])->getValueOperand() : VL[0]; 9997 FixedVectorType *VecTy = FixedVectorType::get(Val0->getType(), VL.size()); 9998 Value *Vec = Root ? Root : PoisonValue::get(VecTy); 9999 SmallVector<int> NonConsts; 10000 // Insert constant values at first. 10001 for (int I = 0, E = VL.size(); I < E; ++I) { 10002 if (PostponedIndices.contains(I)) 10003 continue; 10004 if (!isConstant(VL[I])) { 10005 NonConsts.push_back(I); 10006 continue; 10007 } 10008 if (Root) { 10009 if (!isa<UndefValue>(VL[I])) { 10010 NonConsts.push_back(I); 10011 continue; 10012 } 10013 if (isa<PoisonValue>(VL[I])) 10014 continue; 10015 if (auto *SV = dyn_cast<ShuffleVectorInst>(Root)) { 10016 if (SV->getMaskValue(I) == PoisonMaskElem) 10017 continue; 10018 } 10019 } 10020 Vec = CreateInsertElement(Vec, VL[I], I); 10021 } 10022 // Insert non-constant values. 10023 for (int I : NonConsts) 10024 Vec = CreateInsertElement(Vec, VL[I], I); 10025 // Append instructions, which are/may be part of the loop, in the end to make 10026 // it possible to hoist non-loop-based instructions. 10027 for (const std::pair<Value *, unsigned> &Pair : PostponedInsts) 10028 Vec = CreateInsertElement(Vec, Pair.first, Pair.second); 10029 10030 return Vec; 10031 } 10032 10033 /// Merges shuffle masks and emits final shuffle instruction, if required. It 10034 /// supports shuffling of 2 input vectors. It implements lazy shuffles emission, 10035 /// when the actual shuffle instruction is generated only if this is actually 10036 /// required. Otherwise, the shuffle instruction emission is delayed till the 10037 /// end of the process, to reduce the number of emitted instructions and further 10038 /// analysis/transformations. 10039 /// The class also will look through the previously emitted shuffle instructions 10040 /// and properly mark indices in mask as undef. 10041 /// For example, given the code 10042 /// \code 10043 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0> 10044 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0> 10045 /// \endcode 10046 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 3, 2>, it will 10047 /// look through %s1 and %s2 and emit 10048 /// \code 10049 /// %res = shufflevector <2 x ty> %0, %1, <0, 1, 2, 3> 10050 /// \endcode 10051 /// instead. 10052 /// If 2 operands are of different size, the smallest one will be resized and 10053 /// the mask recalculated properly. 10054 /// For example, given the code 10055 /// \code 10056 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0, 1, 0> 10057 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0, 1, 0> 10058 /// \endcode 10059 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 5, 4>, it will 10060 /// look through %s1 and %s2 and emit 10061 /// \code 10062 /// %res = shufflevector <2 x ty> %0, %1, <0, 1, 2, 3> 10063 /// \endcode 10064 /// instead. 10065 class BoUpSLP::ShuffleInstructionBuilder final : public BaseShuffleAnalysis { 10066 bool IsFinalized = false; 10067 /// Combined mask for all applied operands and masks. It is built during 10068 /// analysis and actual emission of shuffle vector instructions. 10069 SmallVector<int> CommonMask; 10070 /// List of operands for the shuffle vector instruction. It hold at max 2 10071 /// operands, if the 3rd is going to be added, the first 2 are combined into 10072 /// shuffle with \p CommonMask mask, the first operand sets to be the 10073 /// resulting shuffle and the second operand sets to be the newly added 10074 /// operand. The \p CommonMask is transformed in the proper way after that. 10075 SmallVector<Value *, 2> InVectors; 10076 IRBuilderBase &Builder; 10077 BoUpSLP &R; 10078 10079 class ShuffleIRBuilder { 10080 IRBuilderBase &Builder; 10081 /// Holds all of the instructions that we gathered. 10082 SetVector<Instruction *> &GatherShuffleExtractSeq; 10083 /// A list of blocks that we are going to CSE. 10084 DenseSet<BasicBlock *> &CSEBlocks; 10085 10086 public: 10087 ShuffleIRBuilder(IRBuilderBase &Builder, 10088 SetVector<Instruction *> &GatherShuffleExtractSeq, 10089 DenseSet<BasicBlock *> &CSEBlocks) 10090 : Builder(Builder), GatherShuffleExtractSeq(GatherShuffleExtractSeq), 10091 CSEBlocks(CSEBlocks) {} 10092 ~ShuffleIRBuilder() = default; 10093 /// Creates shufflevector for the 2 operands with the given mask. 10094 Value *createShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask) { 10095 Value *Vec = Builder.CreateShuffleVector(V1, V2, Mask); 10096 if (auto *I = dyn_cast<Instruction>(Vec)) { 10097 GatherShuffleExtractSeq.insert(I); 10098 CSEBlocks.insert(I->getParent()); 10099 } 10100 return Vec; 10101 } 10102 /// Creates permutation of the single vector operand with the given mask, if 10103 /// it is not identity mask. 10104 Value *createShuffleVector(Value *V1, ArrayRef<int> Mask) { 10105 if (Mask.empty()) 10106 return V1; 10107 unsigned VF = Mask.size(); 10108 unsigned LocalVF = cast<FixedVectorType>(V1->getType())->getNumElements(); 10109 if (VF == LocalVF && ShuffleVectorInst::isIdentityMask(Mask, VF)) 10110 return V1; 10111 Value *Vec = Builder.CreateShuffleVector(V1, Mask); 10112 if (auto *I = dyn_cast<Instruction>(Vec)) { 10113 GatherShuffleExtractSeq.insert(I); 10114 CSEBlocks.insert(I->getParent()); 10115 } 10116 return Vec; 10117 } 10118 Value *createIdentity(Value *V) { return V; } 10119 Value *createPoison(Type *Ty, unsigned VF) { 10120 return PoisonValue::get(FixedVectorType::get(Ty, VF)); 10121 } 10122 /// Resizes 2 input vector to match the sizes, if the they are not equal 10123 /// yet. The smallest vector is resized to the size of the larger vector. 10124 void resizeToMatch(Value *&V1, Value *&V2) { 10125 if (V1->getType() == V2->getType()) 10126 return; 10127 int V1VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 10128 int V2VF = cast<FixedVectorType>(V2->getType())->getNumElements(); 10129 int VF = std::max(V1VF, V2VF); 10130 int MinVF = std::min(V1VF, V2VF); 10131 SmallVector<int> IdentityMask(VF, PoisonMaskElem); 10132 std::iota(IdentityMask.begin(), std::next(IdentityMask.begin(), MinVF), 10133 0); 10134 Value *&Op = MinVF == V1VF ? V1 : V2; 10135 Op = Builder.CreateShuffleVector(Op, IdentityMask); 10136 if (auto *I = dyn_cast<Instruction>(Op)) { 10137 GatherShuffleExtractSeq.insert(I); 10138 CSEBlocks.insert(I->getParent()); 10139 } 10140 if (MinVF == V1VF) 10141 V1 = Op; 10142 else 10143 V2 = Op; 10144 } 10145 }; 10146 10147 /// Smart shuffle instruction emission, walks through shuffles trees and 10148 /// tries to find the best matching vector for the actual shuffle 10149 /// instruction. 10150 Value *createShuffle(Value *V1, Value *V2, ArrayRef<int> Mask) { 10151 assert(V1 && "Expected at least one vector value."); 10152 ShuffleIRBuilder ShuffleBuilder(Builder, R.GatherShuffleExtractSeq, 10153 R.CSEBlocks); 10154 return BaseShuffleAnalysis::createShuffle<Value *>(V1, V2, Mask, 10155 ShuffleBuilder); 10156 } 10157 10158 /// Transforms mask \p CommonMask per given \p Mask to make proper set after 10159 /// shuffle emission. 10160 static void transformMaskAfterShuffle(MutableArrayRef<int> CommonMask, 10161 ArrayRef<int> Mask) { 10162 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10163 if (Mask[Idx] != PoisonMaskElem) 10164 CommonMask[Idx] = Idx; 10165 } 10166 10167 public: 10168 ShuffleInstructionBuilder(IRBuilderBase &Builder, BoUpSLP &R) 10169 : Builder(Builder), R(R) {} 10170 10171 /// Adjusts extractelements after reusing them. 10172 Value *adjustExtracts(const TreeEntry *E, MutableArrayRef<int> Mask, 10173 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds, 10174 unsigned NumParts, bool &UseVecBaseAsInput) { 10175 UseVecBaseAsInput = false; 10176 SmallPtrSet<Value *, 4> UniqueBases; 10177 Value *VecBase = nullptr; 10178 for (int I = 0, Sz = Mask.size(); I < Sz; ++I) { 10179 int Idx = Mask[I]; 10180 if (Idx == PoisonMaskElem) 10181 continue; 10182 auto *EI = cast<ExtractElementInst>(E->Scalars[I]); 10183 VecBase = EI->getVectorOperand(); 10184 if (const TreeEntry *TE = R.getTreeEntry(VecBase)) 10185 VecBase = TE->VectorizedValue; 10186 assert(VecBase && "Expected vectorized value."); 10187 UniqueBases.insert(VecBase); 10188 // If the only one use is vectorized - can delete the extractelement 10189 // itself. 10190 if (!EI->hasOneUse() || any_of(EI->users(), [&](User *U) { 10191 return !R.ScalarToTreeEntry.count(U); 10192 })) 10193 continue; 10194 R.eraseInstruction(EI); 10195 } 10196 if (NumParts == 1 || UniqueBases.size() == 1) 10197 return VecBase; 10198 UseVecBaseAsInput = true; 10199 auto TransformToIdentity = [](MutableArrayRef<int> Mask) { 10200 for (auto [I, Idx] : enumerate(Mask)) 10201 if (Idx != PoisonMaskElem) 10202 Idx = I; 10203 }; 10204 // Perform multi-register vector shuffle, joining them into a single virtual 10205 // long vector. 10206 // Need to shuffle each part independently and then insert all this parts 10207 // into a long virtual vector register, forming the original vector. 10208 Value *Vec = nullptr; 10209 SmallVector<int> VecMask(Mask.size(), PoisonMaskElem); 10210 unsigned SliceSize = E->Scalars.size() / NumParts; 10211 for (unsigned Part = 0; Part < NumParts; ++Part) { 10212 ArrayRef<Value *> VL = 10213 ArrayRef(E->Scalars).slice(Part * SliceSize, SliceSize); 10214 MutableArrayRef<int> SubMask = Mask.slice(Part * SliceSize, SliceSize); 10215 constexpr int MaxBases = 2; 10216 SmallVector<Value *, MaxBases> Bases(MaxBases); 10217 #ifndef NDEBUG 10218 int PrevSize = 0; 10219 #endif // NDEBUG 10220 for (const auto [I, V]: enumerate(VL)) { 10221 if (SubMask[I] == PoisonMaskElem) 10222 continue; 10223 Value *VecOp = cast<ExtractElementInst>(V)->getVectorOperand(); 10224 if (const TreeEntry *TE = R.getTreeEntry(VecOp)) 10225 VecOp = TE->VectorizedValue; 10226 assert(VecOp && "Expected vectorized value."); 10227 const int Size = 10228 cast<FixedVectorType>(VecOp->getType())->getNumElements(); 10229 #ifndef NDEBUG 10230 assert((PrevSize == Size || PrevSize == 0) && 10231 "Expected vectors of the same size."); 10232 PrevSize = Size; 10233 #endif // NDEBUG 10234 Bases[SubMask[I] < Size ? 0 : 1] = VecOp; 10235 } 10236 if (!Bases.front()) 10237 continue; 10238 Value *SubVec; 10239 if (Bases.back()) { 10240 SubVec = createShuffle(Bases.front(), Bases.back(), SubMask); 10241 TransformToIdentity(SubMask); 10242 } else { 10243 SubVec = Bases.front(); 10244 } 10245 if (!Vec) { 10246 Vec = SubVec; 10247 assert((Part == 0 || all_of(seq<unsigned>(0, Part), 10248 [&](unsigned P) { 10249 ArrayRef<int> SubMask = 10250 Mask.slice(P * SliceSize, SliceSize); 10251 return all_of(SubMask, [](int Idx) { 10252 return Idx == PoisonMaskElem; 10253 }); 10254 })) && 10255 "Expected first part or all previous parts masked."); 10256 copy(SubMask, std::next(VecMask.begin(), Part * SliceSize)); 10257 } else { 10258 unsigned VF = cast<FixedVectorType>(Vec->getType())->getNumElements(); 10259 if (Vec->getType() != SubVec->getType()) { 10260 unsigned SubVecVF = 10261 cast<FixedVectorType>(SubVec->getType())->getNumElements(); 10262 VF = std::max(VF, SubVecVF); 10263 } 10264 // Adjust SubMask. 10265 for (auto [I, Idx] : enumerate(SubMask)) 10266 if (Idx != PoisonMaskElem) 10267 Idx += VF; 10268 copy(SubMask, std::next(VecMask.begin(), Part * SliceSize)); 10269 Vec = createShuffle(Vec, SubVec, VecMask); 10270 TransformToIdentity(VecMask); 10271 } 10272 } 10273 copy(VecMask, Mask.begin()); 10274 return Vec; 10275 } 10276 /// Checks if the specified entry \p E needs to be delayed because of its 10277 /// dependency nodes. 10278 std::optional<Value *> 10279 needToDelay(const TreeEntry *E, 10280 ArrayRef<SmallVector<const TreeEntry *>> Deps) const { 10281 // No need to delay emission if all deps are ready. 10282 if (all_of(Deps, [](ArrayRef<const TreeEntry *> TEs) { 10283 return all_of( 10284 TEs, [](const TreeEntry *TE) { return TE->VectorizedValue; }); 10285 })) 10286 return std::nullopt; 10287 // Postpone gather emission, will be emitted after the end of the 10288 // process to keep correct order. 10289 auto *VecTy = FixedVectorType::get(E->Scalars.front()->getType(), 10290 E->getVectorFactor()); 10291 return Builder.CreateAlignedLoad( 10292 VecTy, PoisonValue::get(PointerType::getUnqual(VecTy->getContext())), 10293 MaybeAlign()); 10294 } 10295 /// Adds 2 input vectors (in form of tree entries) and the mask for their 10296 /// shuffling. 10297 void add(const TreeEntry &E1, const TreeEntry &E2, ArrayRef<int> Mask) { 10298 add(E1.VectorizedValue, E2.VectorizedValue, Mask); 10299 } 10300 /// Adds single input vector (in form of tree entry) and the mask for its 10301 /// shuffling. 10302 void add(const TreeEntry &E1, ArrayRef<int> Mask) { 10303 add(E1.VectorizedValue, Mask); 10304 } 10305 /// Adds 2 input vectors and the mask for their shuffling. 10306 void add(Value *V1, Value *V2, ArrayRef<int> Mask) { 10307 assert(V1 && V2 && !Mask.empty() && "Expected non-empty input vectors."); 10308 if (InVectors.empty()) { 10309 InVectors.push_back(V1); 10310 InVectors.push_back(V2); 10311 CommonMask.assign(Mask.begin(), Mask.end()); 10312 return; 10313 } 10314 Value *Vec = InVectors.front(); 10315 if (InVectors.size() == 2) { 10316 Vec = createShuffle(Vec, InVectors.back(), CommonMask); 10317 transformMaskAfterShuffle(CommonMask, CommonMask); 10318 } else if (cast<FixedVectorType>(Vec->getType())->getNumElements() != 10319 Mask.size()) { 10320 Vec = createShuffle(Vec, nullptr, CommonMask); 10321 transformMaskAfterShuffle(CommonMask, CommonMask); 10322 } 10323 V1 = createShuffle(V1, V2, Mask); 10324 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10325 if (Mask[Idx] != PoisonMaskElem) 10326 CommonMask[Idx] = Idx + Sz; 10327 InVectors.front() = Vec; 10328 if (InVectors.size() == 2) 10329 InVectors.back() = V1; 10330 else 10331 InVectors.push_back(V1); 10332 } 10333 /// Adds another one input vector and the mask for the shuffling. 10334 void add(Value *V1, ArrayRef<int> Mask, bool = false) { 10335 if (InVectors.empty()) { 10336 if (!isa<FixedVectorType>(V1->getType())) { 10337 V1 = createShuffle(V1, nullptr, CommonMask); 10338 CommonMask.assign(Mask.size(), PoisonMaskElem); 10339 transformMaskAfterShuffle(CommonMask, Mask); 10340 } 10341 InVectors.push_back(V1); 10342 CommonMask.assign(Mask.begin(), Mask.end()); 10343 return; 10344 } 10345 const auto *It = find(InVectors, V1); 10346 if (It == InVectors.end()) { 10347 if (InVectors.size() == 2 || 10348 InVectors.front()->getType() != V1->getType() || 10349 !isa<FixedVectorType>(V1->getType())) { 10350 Value *V = InVectors.front(); 10351 if (InVectors.size() == 2) { 10352 V = createShuffle(InVectors.front(), InVectors.back(), CommonMask); 10353 transformMaskAfterShuffle(CommonMask, CommonMask); 10354 } else if (cast<FixedVectorType>(V->getType())->getNumElements() != 10355 CommonMask.size()) { 10356 V = createShuffle(InVectors.front(), nullptr, CommonMask); 10357 transformMaskAfterShuffle(CommonMask, CommonMask); 10358 } 10359 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10360 if (CommonMask[Idx] == PoisonMaskElem && Mask[Idx] != PoisonMaskElem) 10361 CommonMask[Idx] = 10362 V->getType() != V1->getType() 10363 ? Idx + Sz 10364 : Mask[Idx] + cast<FixedVectorType>(V1->getType()) 10365 ->getNumElements(); 10366 if (V->getType() != V1->getType()) 10367 V1 = createShuffle(V1, nullptr, Mask); 10368 InVectors.front() = V; 10369 if (InVectors.size() == 2) 10370 InVectors.back() = V1; 10371 else 10372 InVectors.push_back(V1); 10373 return; 10374 } 10375 // Check if second vector is required if the used elements are already 10376 // used from the first one. 10377 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10378 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem) { 10379 InVectors.push_back(V1); 10380 break; 10381 } 10382 } 10383 int VF = CommonMask.size(); 10384 if (auto *FTy = dyn_cast<FixedVectorType>(V1->getType())) 10385 VF = FTy->getNumElements(); 10386 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10387 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem) 10388 CommonMask[Idx] = Mask[Idx] + (It == InVectors.begin() ? 0 : VF); 10389 } 10390 /// Adds another one input vector and the mask for the shuffling. 10391 void addOrdered(Value *V1, ArrayRef<unsigned> Order) { 10392 SmallVector<int> NewMask; 10393 inversePermutation(Order, NewMask); 10394 add(V1, NewMask); 10395 } 10396 Value *gather(ArrayRef<Value *> VL, unsigned MaskVF = 0, 10397 Value *Root = nullptr) { 10398 return R.gather(VL, Root); 10399 } 10400 Value *createFreeze(Value *V) { return Builder.CreateFreeze(V); } 10401 /// Finalize emission of the shuffles. 10402 /// \param Action the action (if any) to be performed before final applying of 10403 /// the \p ExtMask mask. 10404 Value * 10405 finalize(ArrayRef<int> ExtMask, unsigned VF = 0, 10406 function_ref<void(Value *&, SmallVectorImpl<int> &)> Action = {}) { 10407 IsFinalized = true; 10408 if (Action) { 10409 Value *Vec = InVectors.front(); 10410 if (InVectors.size() == 2) { 10411 Vec = createShuffle(Vec, InVectors.back(), CommonMask); 10412 InVectors.pop_back(); 10413 } else { 10414 Vec = createShuffle(Vec, nullptr, CommonMask); 10415 } 10416 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx) 10417 if (CommonMask[Idx] != PoisonMaskElem) 10418 CommonMask[Idx] = Idx; 10419 assert(VF > 0 && 10420 "Expected vector length for the final value before action."); 10421 unsigned VecVF = cast<FixedVectorType>(Vec->getType())->getNumElements(); 10422 if (VecVF < VF) { 10423 SmallVector<int> ResizeMask(VF, PoisonMaskElem); 10424 std::iota(ResizeMask.begin(), std::next(ResizeMask.begin(), VecVF), 0); 10425 Vec = createShuffle(Vec, nullptr, ResizeMask); 10426 } 10427 Action(Vec, CommonMask); 10428 InVectors.front() = Vec; 10429 } 10430 if (!ExtMask.empty()) { 10431 if (CommonMask.empty()) { 10432 CommonMask.assign(ExtMask.begin(), ExtMask.end()); 10433 } else { 10434 SmallVector<int> NewMask(ExtMask.size(), PoisonMaskElem); 10435 for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) { 10436 if (ExtMask[I] == PoisonMaskElem) 10437 continue; 10438 NewMask[I] = CommonMask[ExtMask[I]]; 10439 } 10440 CommonMask.swap(NewMask); 10441 } 10442 } 10443 if (CommonMask.empty()) { 10444 assert(InVectors.size() == 1 && "Expected only one vector with no mask"); 10445 return InVectors.front(); 10446 } 10447 if (InVectors.size() == 2) 10448 return createShuffle(InVectors.front(), InVectors.back(), CommonMask); 10449 return createShuffle(InVectors.front(), nullptr, CommonMask); 10450 } 10451 10452 ~ShuffleInstructionBuilder() { 10453 assert((IsFinalized || CommonMask.empty()) && 10454 "Shuffle construction must be finalized."); 10455 } 10456 }; 10457 10458 Value *BoUpSLP::vectorizeOperand(TreeEntry *E, unsigned NodeIdx, 10459 bool PostponedPHIs) { 10460 ValueList &VL = E->getOperand(NodeIdx); 10461 if (E->State == TreeEntry::PossibleStridedVectorize && 10462 !E->ReorderIndices.empty()) { 10463 SmallVector<int> Mask(E->ReorderIndices.begin(), E->ReorderIndices.end()); 10464 reorderScalars(VL, Mask); 10465 } 10466 const unsigned VF = VL.size(); 10467 InstructionsState S = getSameOpcode(VL, *TLI); 10468 // Special processing for GEPs bundle, which may include non-gep values. 10469 if (!S.getOpcode() && VL.front()->getType()->isPointerTy()) { 10470 const auto *It = 10471 find_if(VL, [](Value *V) { return isa<GetElementPtrInst>(V); }); 10472 if (It != VL.end()) 10473 S = getSameOpcode(*It, *TLI); 10474 } 10475 if (S.getOpcode()) { 10476 auto CheckSameVE = [&](const TreeEntry *VE) { 10477 return VE->isSame(VL) && 10478 (any_of(VE->UserTreeIndices, 10479 [E, NodeIdx](const EdgeInfo &EI) { 10480 return EI.UserTE == E && EI.EdgeIdx == NodeIdx; 10481 }) || 10482 any_of(VectorizableTree, 10483 [E, NodeIdx, VE](const std::unique_ptr<TreeEntry> &TE) { 10484 return TE->isOperandGatherNode({E, NodeIdx}) && 10485 VE->isSame(TE->Scalars); 10486 })); 10487 }; 10488 TreeEntry *VE = getTreeEntry(S.OpValue); 10489 bool IsSameVE = VE && CheckSameVE(VE); 10490 if (!IsSameVE) { 10491 auto It = MultiNodeScalars.find(S.OpValue); 10492 if (It != MultiNodeScalars.end()) { 10493 auto *I = find_if(It->getSecond(), [&](const TreeEntry *TE) { 10494 return TE != VE && CheckSameVE(TE); 10495 }); 10496 if (I != It->getSecond().end()) { 10497 VE = *I; 10498 IsSameVE = true; 10499 } 10500 } 10501 } 10502 if (IsSameVE) { 10503 auto FinalShuffle = [&](Value *V, ArrayRef<int> Mask) { 10504 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this); 10505 ShuffleBuilder.add(V, Mask); 10506 return ShuffleBuilder.finalize(std::nullopt); 10507 }; 10508 Value *V = vectorizeTree(VE, PostponedPHIs); 10509 if (VF != cast<FixedVectorType>(V->getType())->getNumElements()) { 10510 if (!VE->ReuseShuffleIndices.empty()) { 10511 // Reshuffle to get only unique values. 10512 // If some of the scalars are duplicated in the vectorization 10513 // tree entry, we do not vectorize them but instead generate a 10514 // mask for the reuses. But if there are several users of the 10515 // same entry, they may have different vectorization factors. 10516 // This is especially important for PHI nodes. In this case, we 10517 // need to adapt the resulting instruction for the user 10518 // vectorization factor and have to reshuffle it again to take 10519 // only unique elements of the vector. Without this code the 10520 // function incorrectly returns reduced vector instruction with 10521 // the same elements, not with the unique ones. 10522 10523 // block: 10524 // %phi = phi <2 x > { .., %entry} {%shuffle, %block} 10525 // %2 = shuffle <2 x > %phi, poison, <4 x > <1, 1, 0, 0> 10526 // ... (use %2) 10527 // %shuffle = shuffle <2 x> %2, poison, <2 x> {2, 0} 10528 // br %block 10529 SmallVector<int> UniqueIdxs(VF, PoisonMaskElem); 10530 SmallSet<int, 4> UsedIdxs; 10531 int Pos = 0; 10532 for (int Idx : VE->ReuseShuffleIndices) { 10533 if (Idx != static_cast<int>(VF) && Idx != PoisonMaskElem && 10534 UsedIdxs.insert(Idx).second) 10535 UniqueIdxs[Idx] = Pos; 10536 ++Pos; 10537 } 10538 assert(VF >= UsedIdxs.size() && "Expected vectorization factor " 10539 "less than original vector size."); 10540 UniqueIdxs.append(VF - UsedIdxs.size(), PoisonMaskElem); 10541 V = FinalShuffle(V, UniqueIdxs); 10542 } else { 10543 assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() && 10544 "Expected vectorization factor less " 10545 "than original vector size."); 10546 SmallVector<int> UniformMask(VF, 0); 10547 std::iota(UniformMask.begin(), UniformMask.end(), 0); 10548 V = FinalShuffle(V, UniformMask); 10549 } 10550 } 10551 // Need to update the operand gather node, if actually the operand is not a 10552 // vectorized node, but the buildvector/gather node, which matches one of 10553 // the vectorized nodes. 10554 if (find_if(VE->UserTreeIndices, [&](const EdgeInfo &EI) { 10555 return EI.UserTE == E && EI.EdgeIdx == NodeIdx; 10556 }) == VE->UserTreeIndices.end()) { 10557 auto *It = find_if( 10558 VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) { 10559 return TE->State == TreeEntry::NeedToGather && 10560 TE->UserTreeIndices.front().UserTE == E && 10561 TE->UserTreeIndices.front().EdgeIdx == NodeIdx; 10562 }); 10563 assert(It != VectorizableTree.end() && "Expected gather node operand."); 10564 (*It)->VectorizedValue = V; 10565 } 10566 return V; 10567 } 10568 } 10569 10570 // Find the corresponding gather entry and vectorize it. 10571 // Allows to be more accurate with tree/graph transformations, checks for the 10572 // correctness of the transformations in many cases. 10573 auto *I = find_if(VectorizableTree, 10574 [E, NodeIdx](const std::unique_ptr<TreeEntry> &TE) { 10575 return TE->isOperandGatherNode({E, NodeIdx}); 10576 }); 10577 assert(I != VectorizableTree.end() && "Gather node is not in the graph."); 10578 assert(I->get()->UserTreeIndices.size() == 1 && 10579 "Expected only single user for the gather node."); 10580 assert(I->get()->isSame(VL) && "Expected same list of scalars."); 10581 return vectorizeTree(I->get(), PostponedPHIs); 10582 } 10583 10584 template <typename BVTy, typename ResTy, typename... Args> 10585 ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Args &...Params) { 10586 assert(E->State == TreeEntry::NeedToGather && "Expected gather node."); 10587 unsigned VF = E->getVectorFactor(); 10588 10589 bool NeedFreeze = false; 10590 SmallVector<int> ReuseShuffleIndicies(E->ReuseShuffleIndices.begin(), 10591 E->ReuseShuffleIndices.end()); 10592 SmallVector<Value *> GatheredScalars(E->Scalars.begin(), E->Scalars.end()); 10593 // Build a mask out of the reorder indices and reorder scalars per this 10594 // mask. 10595 SmallVector<int> ReorderMask; 10596 inversePermutation(E->ReorderIndices, ReorderMask); 10597 if (!ReorderMask.empty()) 10598 reorderScalars(GatheredScalars, ReorderMask); 10599 auto FindReusedSplat = [&](MutableArrayRef<int> Mask, unsigned InputVF, 10600 unsigned I, unsigned SliceSize) { 10601 if (!isSplat(E->Scalars) || none_of(E->Scalars, [](Value *V) { 10602 return isa<UndefValue>(V) && !isa<PoisonValue>(V); 10603 })) 10604 return false; 10605 TreeEntry *UserTE = E->UserTreeIndices.back().UserTE; 10606 unsigned EdgeIdx = E->UserTreeIndices.back().EdgeIdx; 10607 if (UserTE->getNumOperands() != 2) 10608 return false; 10609 auto *It = 10610 find_if(VectorizableTree, [=](const std::unique_ptr<TreeEntry> &TE) { 10611 return find_if(TE->UserTreeIndices, [=](const EdgeInfo &EI) { 10612 return EI.UserTE == UserTE && EI.EdgeIdx != EdgeIdx; 10613 }) != TE->UserTreeIndices.end(); 10614 }); 10615 if (It == VectorizableTree.end()) 10616 return false; 10617 int Idx; 10618 if ((Mask.size() < InputVF && 10619 ShuffleVectorInst::isExtractSubvectorMask(Mask, InputVF, Idx) && 10620 Idx == 0) || 10621 (Mask.size() == InputVF && 10622 ShuffleVectorInst::isIdentityMask(Mask, Mask.size()))) { 10623 std::iota(std::next(Mask.begin(), I * SliceSize), 10624 std::next(Mask.begin(), (I + 1) * SliceSize), 0); 10625 } else { 10626 unsigned IVal = 10627 *find_if_not(Mask, [](int Idx) { return Idx == PoisonMaskElem; }); 10628 std::fill(std::next(Mask.begin(), I * SliceSize), 10629 std::next(Mask.begin(), (I + 1) * SliceSize), IVal); 10630 } 10631 return true; 10632 }; 10633 BVTy ShuffleBuilder(Params...); 10634 ResTy Res = ResTy(); 10635 SmallVector<int> Mask; 10636 SmallVector<int> ExtractMask(GatheredScalars.size(), PoisonMaskElem); 10637 SmallVector<std::optional<TTI::ShuffleKind>> ExtractShuffles; 10638 Value *ExtractVecBase = nullptr; 10639 bool UseVecBaseAsInput = false; 10640 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> GatherShuffles; 10641 SmallVector<SmallVector<const TreeEntry *>> Entries; 10642 Type *ScalarTy = GatheredScalars.front()->getType(); 10643 auto *VecTy = FixedVectorType::get(ScalarTy, GatheredScalars.size()); 10644 unsigned NumParts = TTI->getNumberOfParts(VecTy); 10645 if (NumParts == 0 || NumParts >= GatheredScalars.size()) 10646 NumParts = 1; 10647 if (!all_of(GatheredScalars, UndefValue::classof)) { 10648 // Check for gathered extracts. 10649 bool Resized = false; 10650 ExtractShuffles = 10651 tryToGatherExtractElements(GatheredScalars, ExtractMask, NumParts); 10652 if (!ExtractShuffles.empty()) { 10653 SmallVector<const TreeEntry *> ExtractEntries; 10654 for (auto [Idx, I] : enumerate(ExtractMask)) { 10655 if (I == PoisonMaskElem) 10656 continue; 10657 if (const auto *TE = getTreeEntry( 10658 cast<ExtractElementInst>(E->Scalars[Idx])->getVectorOperand())) 10659 ExtractEntries.push_back(TE); 10660 } 10661 if (std::optional<ResTy> Delayed = 10662 ShuffleBuilder.needToDelay(E, ExtractEntries)) { 10663 // Delay emission of gathers which are not ready yet. 10664 PostponedGathers.insert(E); 10665 // Postpone gather emission, will be emitted after the end of the 10666 // process to keep correct order. 10667 return *Delayed; 10668 } 10669 if (Value *VecBase = ShuffleBuilder.adjustExtracts( 10670 E, ExtractMask, ExtractShuffles, NumParts, UseVecBaseAsInput)) { 10671 ExtractVecBase = VecBase; 10672 if (auto *VecBaseTy = dyn_cast<FixedVectorType>(VecBase->getType())) 10673 if (VF == VecBaseTy->getNumElements() && 10674 GatheredScalars.size() != VF) { 10675 Resized = true; 10676 GatheredScalars.append(VF - GatheredScalars.size(), 10677 PoisonValue::get(ScalarTy)); 10678 } 10679 } 10680 } 10681 // Gather extracts after we check for full matched gathers only. 10682 if (!ExtractShuffles.empty() || E->getOpcode() != Instruction::Load || 10683 E->isAltShuffle() || 10684 all_of(E->Scalars, [this](Value *V) { return getTreeEntry(V); }) || 10685 isSplat(E->Scalars) || 10686 (E->Scalars != GatheredScalars && GatheredScalars.size() <= 2)) { 10687 GatherShuffles = 10688 isGatherShuffledEntry(E, GatheredScalars, Mask, Entries, NumParts); 10689 } 10690 if (!GatherShuffles.empty()) { 10691 if (std::optional<ResTy> Delayed = 10692 ShuffleBuilder.needToDelay(E, Entries)) { 10693 // Delay emission of gathers which are not ready yet. 10694 PostponedGathers.insert(E); 10695 // Postpone gather emission, will be emitted after the end of the 10696 // process to keep correct order. 10697 return *Delayed; 10698 } 10699 if (GatherShuffles.size() == 1 && 10700 *GatherShuffles.front() == TTI::SK_PermuteSingleSrc && 10701 Entries.front().front()->isSame(E->Scalars)) { 10702 // Perfect match in the graph, will reuse the previously vectorized 10703 // node. Cost is 0. 10704 LLVM_DEBUG( 10705 dbgs() 10706 << "SLP: perfect diamond match for gather bundle " 10707 << shortBundleName(E->Scalars) << ".\n"); 10708 // Restore the mask for previous partially matched values. 10709 Mask.resize(E->Scalars.size()); 10710 const TreeEntry *FrontTE = Entries.front().front(); 10711 if (FrontTE->ReorderIndices.empty() && 10712 ((FrontTE->ReuseShuffleIndices.empty() && 10713 E->Scalars.size() == FrontTE->Scalars.size()) || 10714 (E->Scalars.size() == FrontTE->ReuseShuffleIndices.size()))) { 10715 std::iota(Mask.begin(), Mask.end(), 0); 10716 } else { 10717 for (auto [I, V] : enumerate(E->Scalars)) { 10718 if (isa<PoisonValue>(V)) { 10719 Mask[I] = PoisonMaskElem; 10720 continue; 10721 } 10722 Mask[I] = FrontTE->findLaneForValue(V); 10723 } 10724 } 10725 ShuffleBuilder.add(*FrontTE, Mask); 10726 Res = ShuffleBuilder.finalize(E->getCommonMask()); 10727 return Res; 10728 } 10729 if (!Resized) { 10730 if (GatheredScalars.size() != VF && 10731 any_of(Entries, [&](ArrayRef<const TreeEntry *> TEs) { 10732 return any_of(TEs, [&](const TreeEntry *TE) { 10733 return TE->getVectorFactor() == VF; 10734 }); 10735 })) 10736 GatheredScalars.append(VF - GatheredScalars.size(), 10737 PoisonValue::get(ScalarTy)); 10738 } 10739 // Remove shuffled elements from list of gathers. 10740 for (int I = 0, Sz = Mask.size(); I < Sz; ++I) { 10741 if (Mask[I] != PoisonMaskElem) 10742 GatheredScalars[I] = PoisonValue::get(ScalarTy); 10743 } 10744 } 10745 } 10746 auto TryPackScalars = [&](SmallVectorImpl<Value *> &Scalars, 10747 SmallVectorImpl<int> &ReuseMask, 10748 bool IsRootPoison) { 10749 // For splats with can emit broadcasts instead of gathers, so try to find 10750 // such sequences. 10751 bool IsSplat = IsRootPoison && isSplat(Scalars) && 10752 (Scalars.size() > 2 || Scalars.front() == Scalars.back()); 10753 Scalars.append(VF - Scalars.size(), PoisonValue::get(ScalarTy)); 10754 SmallVector<int> UndefPos; 10755 DenseMap<Value *, unsigned> UniquePositions; 10756 // Gather unique non-const values and all constant values. 10757 // For repeated values, just shuffle them. 10758 int NumNonConsts = 0; 10759 int SinglePos = 0; 10760 for (auto [I, V] : enumerate(Scalars)) { 10761 if (isa<UndefValue>(V)) { 10762 if (!isa<PoisonValue>(V)) { 10763 ReuseMask[I] = I; 10764 UndefPos.push_back(I); 10765 } 10766 continue; 10767 } 10768 if (isConstant(V)) { 10769 ReuseMask[I] = I; 10770 continue; 10771 } 10772 ++NumNonConsts; 10773 SinglePos = I; 10774 Value *OrigV = V; 10775 Scalars[I] = PoisonValue::get(ScalarTy); 10776 if (IsSplat) { 10777 Scalars.front() = OrigV; 10778 ReuseMask[I] = 0; 10779 } else { 10780 const auto Res = UniquePositions.try_emplace(OrigV, I); 10781 Scalars[Res.first->second] = OrigV; 10782 ReuseMask[I] = Res.first->second; 10783 } 10784 } 10785 if (NumNonConsts == 1) { 10786 // Restore single insert element. 10787 if (IsSplat) { 10788 ReuseMask.assign(VF, PoisonMaskElem); 10789 std::swap(Scalars.front(), Scalars[SinglePos]); 10790 if (!UndefPos.empty() && UndefPos.front() == 0) 10791 Scalars.front() = UndefValue::get(ScalarTy); 10792 } 10793 ReuseMask[SinglePos] = SinglePos; 10794 } else if (!UndefPos.empty() && IsSplat) { 10795 // For undef values, try to replace them with the simple broadcast. 10796 // We can do it if the broadcasted value is guaranteed to be 10797 // non-poisonous, or by freezing the incoming scalar value first. 10798 auto *It = find_if(Scalars, [this, E](Value *V) { 10799 return !isa<UndefValue>(V) && 10800 (getTreeEntry(V) || isGuaranteedNotToBePoison(V) || 10801 (E->UserTreeIndices.size() == 1 && 10802 any_of(V->uses(), [E](const Use &U) { 10803 // Check if the value already used in the same operation in 10804 // one of the nodes already. 10805 return E->UserTreeIndices.front().EdgeIdx != 10806 U.getOperandNo() && 10807 is_contained( 10808 E->UserTreeIndices.front().UserTE->Scalars, 10809 U.getUser()); 10810 }))); 10811 }); 10812 if (It != Scalars.end()) { 10813 // Replace undefs by the non-poisoned scalars and emit broadcast. 10814 int Pos = std::distance(Scalars.begin(), It); 10815 for (int I : UndefPos) { 10816 // Set the undef position to the non-poisoned scalar. 10817 ReuseMask[I] = Pos; 10818 // Replace the undef by the poison, in the mask it is replaced by 10819 // non-poisoned scalar already. 10820 if (I != Pos) 10821 Scalars[I] = PoisonValue::get(ScalarTy); 10822 } 10823 } else { 10824 // Replace undefs by the poisons, emit broadcast and then emit 10825 // freeze. 10826 for (int I : UndefPos) { 10827 ReuseMask[I] = PoisonMaskElem; 10828 if (isa<UndefValue>(Scalars[I])) 10829 Scalars[I] = PoisonValue::get(ScalarTy); 10830 } 10831 NeedFreeze = true; 10832 } 10833 } 10834 }; 10835 if (!ExtractShuffles.empty() || !GatherShuffles.empty()) { 10836 bool IsNonPoisoned = true; 10837 bool IsUsedInExpr = true; 10838 Value *Vec1 = nullptr; 10839 if (!ExtractShuffles.empty()) { 10840 // Gather of extractelements can be represented as just a shuffle of 10841 // a single/two vectors the scalars are extracted from. 10842 // Find input vectors. 10843 Value *Vec2 = nullptr; 10844 for (unsigned I = 0, Sz = ExtractMask.size(); I < Sz; ++I) { 10845 if (!Mask.empty() && Mask[I] != PoisonMaskElem) 10846 ExtractMask[I] = PoisonMaskElem; 10847 } 10848 if (UseVecBaseAsInput) { 10849 Vec1 = ExtractVecBase; 10850 } else { 10851 for (unsigned I = 0, Sz = ExtractMask.size(); I < Sz; ++I) { 10852 if (ExtractMask[I] == PoisonMaskElem) 10853 continue; 10854 if (isa<UndefValue>(E->Scalars[I])) 10855 continue; 10856 auto *EI = cast<ExtractElementInst>(E->Scalars[I]); 10857 Value *VecOp = EI->getVectorOperand(); 10858 if (const auto *TE = getTreeEntry(VecOp)) 10859 if (TE->VectorizedValue) 10860 VecOp = TE->VectorizedValue; 10861 if (!Vec1) { 10862 Vec1 = VecOp; 10863 } else if (Vec1 != EI->getVectorOperand()) { 10864 assert((!Vec2 || Vec2 == EI->getVectorOperand()) && 10865 "Expected only 1 or 2 vectors shuffle."); 10866 Vec2 = VecOp; 10867 } 10868 } 10869 } 10870 if (Vec2) { 10871 IsUsedInExpr = false; 10872 IsNonPoisoned &= 10873 isGuaranteedNotToBePoison(Vec1) && isGuaranteedNotToBePoison(Vec2); 10874 ShuffleBuilder.add(Vec1, Vec2, ExtractMask); 10875 } else if (Vec1) { 10876 IsUsedInExpr &= FindReusedSplat( 10877 ExtractMask, 10878 cast<FixedVectorType>(Vec1->getType())->getNumElements(), 0, 10879 ExtractMask.size()); 10880 ShuffleBuilder.add(Vec1, ExtractMask, /*ForExtracts=*/true); 10881 IsNonPoisoned &= isGuaranteedNotToBePoison(Vec1); 10882 } else { 10883 IsUsedInExpr = false; 10884 ShuffleBuilder.add(PoisonValue::get(FixedVectorType::get( 10885 ScalarTy, GatheredScalars.size())), 10886 ExtractMask, /*ForExtracts=*/true); 10887 } 10888 } 10889 if (!GatherShuffles.empty()) { 10890 unsigned SliceSize = E->Scalars.size() / NumParts; 10891 SmallVector<int> VecMask(Mask.size(), PoisonMaskElem); 10892 for (const auto [I, TEs] : enumerate(Entries)) { 10893 if (TEs.empty()) { 10894 assert(!GatherShuffles[I] && 10895 "No shuffles with empty entries list expected."); 10896 continue; 10897 } 10898 assert((TEs.size() == 1 || TEs.size() == 2) && 10899 "Expected shuffle of 1 or 2 entries."); 10900 auto SubMask = ArrayRef(Mask).slice(I * SliceSize, SliceSize); 10901 VecMask.assign(VecMask.size(), PoisonMaskElem); 10902 copy(SubMask, std::next(VecMask.begin(), I * SliceSize)); 10903 if (TEs.size() == 1) { 10904 IsUsedInExpr &= 10905 FindReusedSplat(VecMask, TEs.front()->getVectorFactor(), I, SliceSize); 10906 ShuffleBuilder.add(*TEs.front(), VecMask); 10907 if (TEs.front()->VectorizedValue) 10908 IsNonPoisoned &= 10909 isGuaranteedNotToBePoison(TEs.front()->VectorizedValue); 10910 } else { 10911 IsUsedInExpr = false; 10912 ShuffleBuilder.add(*TEs.front(), *TEs.back(), VecMask); 10913 if (TEs.front()->VectorizedValue && TEs.back()->VectorizedValue) 10914 IsNonPoisoned &= 10915 isGuaranteedNotToBePoison(TEs.front()->VectorizedValue) && 10916 isGuaranteedNotToBePoison(TEs.back()->VectorizedValue); 10917 } 10918 } 10919 } 10920 // Try to figure out best way to combine values: build a shuffle and insert 10921 // elements or just build several shuffles. 10922 // Insert non-constant scalars. 10923 SmallVector<Value *> NonConstants(GatheredScalars); 10924 int EMSz = ExtractMask.size(); 10925 int MSz = Mask.size(); 10926 // Try to build constant vector and shuffle with it only if currently we 10927 // have a single permutation and more than 1 scalar constants. 10928 bool IsSingleShuffle = ExtractShuffles.empty() || GatherShuffles.empty(); 10929 bool IsIdentityShuffle = 10930 ((UseVecBaseAsInput || 10931 all_of(ExtractShuffles, 10932 [](const std::optional<TTI::ShuffleKind> &SK) { 10933 return SK.value_or(TTI::SK_PermuteTwoSrc) == 10934 TTI::SK_PermuteSingleSrc; 10935 })) && 10936 none_of(ExtractMask, [&](int I) { return I >= EMSz; }) && 10937 ShuffleVectorInst::isIdentityMask(ExtractMask, EMSz)) || 10938 (!GatherShuffles.empty() && 10939 all_of(GatherShuffles, 10940 [](const std::optional<TTI::ShuffleKind> &SK) { 10941 return SK.value_or(TTI::SK_PermuteTwoSrc) == 10942 TTI::SK_PermuteSingleSrc; 10943 }) && 10944 none_of(Mask, [&](int I) { return I >= MSz; }) && 10945 ShuffleVectorInst::isIdentityMask(Mask, MSz)); 10946 bool EnoughConstsForShuffle = 10947 IsSingleShuffle && 10948 (none_of(GatheredScalars, 10949 [](Value *V) { 10950 return isa<UndefValue>(V) && !isa<PoisonValue>(V); 10951 }) || 10952 any_of(GatheredScalars, 10953 [](Value *V) { 10954 return isa<Constant>(V) && !isa<UndefValue>(V); 10955 })) && 10956 (!IsIdentityShuffle || 10957 (GatheredScalars.size() == 2 && 10958 any_of(GatheredScalars, 10959 [](Value *V) { return !isa<UndefValue>(V); })) || 10960 count_if(GatheredScalars, [](Value *V) { 10961 return isa<Constant>(V) && !isa<PoisonValue>(V); 10962 }) > 1); 10963 // NonConstants array contains just non-constant values, GatheredScalars 10964 // contains only constant to build final vector and then shuffle. 10965 for (int I = 0, Sz = GatheredScalars.size(); I < Sz; ++I) { 10966 if (EnoughConstsForShuffle && isa<Constant>(GatheredScalars[I])) 10967 NonConstants[I] = PoisonValue::get(ScalarTy); 10968 else 10969 GatheredScalars[I] = PoisonValue::get(ScalarTy); 10970 } 10971 // Generate constants for final shuffle and build a mask for them. 10972 if (!all_of(GatheredScalars, PoisonValue::classof)) { 10973 SmallVector<int> BVMask(GatheredScalars.size(), PoisonMaskElem); 10974 TryPackScalars(GatheredScalars, BVMask, /*IsRootPoison=*/true); 10975 Value *BV = ShuffleBuilder.gather(GatheredScalars, BVMask.size()); 10976 ShuffleBuilder.add(BV, BVMask); 10977 } 10978 if (all_of(NonConstants, [=](Value *V) { 10979 return isa<PoisonValue>(V) || 10980 (IsSingleShuffle && ((IsIdentityShuffle && 10981 IsNonPoisoned) || IsUsedInExpr) && isa<UndefValue>(V)); 10982 })) 10983 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices); 10984 else 10985 Res = ShuffleBuilder.finalize( 10986 E->ReuseShuffleIndices, E->Scalars.size(), 10987 [&](Value *&Vec, SmallVectorImpl<int> &Mask) { 10988 TryPackScalars(NonConstants, Mask, /*IsRootPoison=*/false); 10989 Vec = ShuffleBuilder.gather(NonConstants, Mask.size(), Vec); 10990 }); 10991 } else if (!allConstant(GatheredScalars)) { 10992 // Gather unique scalars and all constants. 10993 SmallVector<int> ReuseMask(GatheredScalars.size(), PoisonMaskElem); 10994 TryPackScalars(GatheredScalars, ReuseMask, /*IsRootPoison=*/true); 10995 Value *BV = ShuffleBuilder.gather(GatheredScalars, ReuseMask.size()); 10996 ShuffleBuilder.add(BV, ReuseMask); 10997 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices); 10998 } else { 10999 // Gather all constants. 11000 SmallVector<int> Mask(E->Scalars.size(), PoisonMaskElem); 11001 for (auto [I, V] : enumerate(E->Scalars)) { 11002 if (!isa<PoisonValue>(V)) 11003 Mask[I] = I; 11004 } 11005 Value *BV = ShuffleBuilder.gather(E->Scalars); 11006 ShuffleBuilder.add(BV, Mask); 11007 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices); 11008 } 11009 11010 if (NeedFreeze) 11011 Res = ShuffleBuilder.createFreeze(Res); 11012 return Res; 11013 } 11014 11015 Value *BoUpSLP::createBuildVector(const TreeEntry *E) { 11016 return processBuildVector<ShuffleInstructionBuilder, Value *>(E, Builder, 11017 *this); 11018 } 11019 11020 Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) { 11021 IRBuilder<>::InsertPointGuard Guard(Builder); 11022 11023 if (E->VectorizedValue && 11024 (E->State != TreeEntry::Vectorize || E->getOpcode() != Instruction::PHI || 11025 E->isAltShuffle())) { 11026 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); 11027 return E->VectorizedValue; 11028 } 11029 11030 if (E->State == TreeEntry::NeedToGather) { 11031 // Set insert point for non-reduction initial nodes. 11032 if (E->getMainOp() && E->Idx == 0 && !UserIgnoreList) 11033 setInsertPointAfterBundle(E); 11034 Value *Vec = createBuildVector(E); 11035 E->VectorizedValue = Vec; 11036 return Vec; 11037 } 11038 11039 auto FinalShuffle = [&](Value *V, const TreeEntry *E, VectorType *VecTy, 11040 bool IsSigned) { 11041 if (V->getType() != VecTy) 11042 V = Builder.CreateIntCast(V, VecTy, IsSigned); 11043 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this); 11044 if (E->getOpcode() == Instruction::Store) { 11045 ArrayRef<int> Mask = 11046 ArrayRef(reinterpret_cast<const int *>(E->ReorderIndices.begin()), 11047 E->ReorderIndices.size()); 11048 ShuffleBuilder.add(V, Mask); 11049 } else if (E->State == TreeEntry::PossibleStridedVectorize) { 11050 ShuffleBuilder.addOrdered(V, std::nullopt); 11051 } else { 11052 ShuffleBuilder.addOrdered(V, E->ReorderIndices); 11053 } 11054 return ShuffleBuilder.finalize(E->ReuseShuffleIndices); 11055 }; 11056 11057 assert((E->State == TreeEntry::Vectorize || 11058 E->State == TreeEntry::ScatterVectorize || 11059 E->State == TreeEntry::PossibleStridedVectorize) && 11060 "Unhandled state"); 11061 unsigned ShuffleOrOp = 11062 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode(); 11063 Instruction *VL0 = E->getMainOp(); 11064 Type *ScalarTy = VL0->getType(); 11065 if (auto *Store = dyn_cast<StoreInst>(VL0)) 11066 ScalarTy = Store->getValueOperand()->getType(); 11067 else if (auto *IE = dyn_cast<InsertElementInst>(VL0)) 11068 ScalarTy = IE->getOperand(1)->getType(); 11069 bool IsSigned = false; 11070 auto It = MinBWs.find(E); 11071 if (It != MinBWs.end()) { 11072 ScalarTy = IntegerType::get(F->getContext(), It->second.first); 11073 IsSigned = It->second.second; 11074 } 11075 auto *VecTy = FixedVectorType::get(ScalarTy, E->Scalars.size()); 11076 switch (ShuffleOrOp) { 11077 case Instruction::PHI: { 11078 assert((E->ReorderIndices.empty() || 11079 E != VectorizableTree.front().get() || 11080 !E->UserTreeIndices.empty()) && 11081 "PHI reordering is free."); 11082 if (PostponedPHIs && E->VectorizedValue) 11083 return E->VectorizedValue; 11084 auto *PH = cast<PHINode>(VL0); 11085 Builder.SetInsertPoint(PH->getParent(), 11086 PH->getParent()->getFirstNonPHIIt()); 11087 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 11088 if (PostponedPHIs || !E->VectorizedValue) { 11089 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues()); 11090 E->PHI = NewPhi; 11091 Value *V = NewPhi; 11092 11093 // Adjust insertion point once all PHI's have been generated. 11094 Builder.SetInsertPoint(PH->getParent(), 11095 PH->getParent()->getFirstInsertionPt()); 11096 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 11097 11098 V = FinalShuffle(V, E, VecTy, IsSigned); 11099 11100 E->VectorizedValue = V; 11101 if (PostponedPHIs) 11102 return V; 11103 } 11104 PHINode *NewPhi = cast<PHINode>(E->PHI); 11105 // If phi node is fully emitted - exit. 11106 if (NewPhi->getNumIncomingValues() != 0) 11107 return NewPhi; 11108 11109 // PHINodes may have multiple entries from the same block. We want to 11110 // visit every block once. 11111 SmallPtrSet<BasicBlock *, 4> VisitedBBs; 11112 11113 for (unsigned I : seq<unsigned>(0, PH->getNumIncomingValues())) { 11114 ValueList Operands; 11115 BasicBlock *IBB = PH->getIncomingBlock(I); 11116 11117 // Stop emission if all incoming values are generated. 11118 if (NewPhi->getNumIncomingValues() == PH->getNumIncomingValues()) { 11119 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11120 return NewPhi; 11121 } 11122 11123 if (!VisitedBBs.insert(IBB).second) { 11124 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB); 11125 continue; 11126 } 11127 11128 Builder.SetInsertPoint(IBB->getTerminator()); 11129 Builder.SetCurrentDebugLocation(PH->getDebugLoc()); 11130 Value *Vec = vectorizeOperand(E, I, /*PostponedPHIs=*/true); 11131 if (VecTy != Vec->getType()) { 11132 assert(MinBWs.contains(getOperandEntry(E, I)) && 11133 "Expected item in MinBWs."); 11134 Vec = Builder.CreateIntCast(Vec, VecTy, It->second.second); 11135 } 11136 NewPhi->addIncoming(Vec, IBB); 11137 } 11138 11139 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() && 11140 "Invalid number of incoming values"); 11141 return NewPhi; 11142 } 11143 11144 case Instruction::ExtractElement: { 11145 Value *V = E->getSingleOperand(0); 11146 if (const TreeEntry *TE = getTreeEntry(V)) 11147 V = TE->VectorizedValue; 11148 setInsertPointAfterBundle(E); 11149 V = FinalShuffle(V, E, VecTy, IsSigned); 11150 E->VectorizedValue = V; 11151 return V; 11152 } 11153 case Instruction::ExtractValue: { 11154 auto *LI = cast<LoadInst>(E->getSingleOperand(0)); 11155 Builder.SetInsertPoint(LI); 11156 Value *Ptr = LI->getPointerOperand(); 11157 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign()); 11158 Value *NewV = propagateMetadata(V, E->Scalars); 11159 NewV = FinalShuffle(NewV, E, VecTy, IsSigned); 11160 E->VectorizedValue = NewV; 11161 return NewV; 11162 } 11163 case Instruction::InsertElement: { 11164 assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique"); 11165 Builder.SetInsertPoint(cast<Instruction>(E->Scalars.back())); 11166 Value *V = vectorizeOperand(E, 1, PostponedPHIs); 11167 ArrayRef<Value *> Op = E->getOperand(1); 11168 Type *ScalarTy = Op.front()->getType(); 11169 if (cast<VectorType>(V->getType())->getElementType() != ScalarTy) { 11170 assert(ScalarTy->isIntegerTy() && "Expected item in MinBWs."); 11171 std::pair<unsigned, bool> Res = MinBWs.lookup(getOperandEntry(E, 1)); 11172 assert(Res.first > 0 && "Expected item in MinBWs."); 11173 V = Builder.CreateIntCast( 11174 V, 11175 FixedVectorType::get( 11176 ScalarTy, 11177 cast<FixedVectorType>(V->getType())->getNumElements()), 11178 Res.second); 11179 } 11180 11181 // Create InsertVector shuffle if necessary 11182 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) { 11183 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0)); 11184 })); 11185 const unsigned NumElts = 11186 cast<FixedVectorType>(FirstInsert->getType())->getNumElements(); 11187 const unsigned NumScalars = E->Scalars.size(); 11188 11189 unsigned Offset = *getInsertIndex(VL0); 11190 assert(Offset < NumElts && "Failed to find vector index offset"); 11191 11192 // Create shuffle to resize vector 11193 SmallVector<int> Mask; 11194 if (!E->ReorderIndices.empty()) { 11195 inversePermutation(E->ReorderIndices, Mask); 11196 Mask.append(NumElts - NumScalars, PoisonMaskElem); 11197 } else { 11198 Mask.assign(NumElts, PoisonMaskElem); 11199 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0); 11200 } 11201 // Create InsertVector shuffle if necessary 11202 bool IsIdentity = true; 11203 SmallVector<int> PrevMask(NumElts, PoisonMaskElem); 11204 Mask.swap(PrevMask); 11205 for (unsigned I = 0; I < NumScalars; ++I) { 11206 Value *Scalar = E->Scalars[PrevMask[I]]; 11207 unsigned InsertIdx = *getInsertIndex(Scalar); 11208 IsIdentity &= InsertIdx - Offset == I; 11209 Mask[InsertIdx - Offset] = I; 11210 } 11211 if (!IsIdentity || NumElts != NumScalars) { 11212 Value *V2 = nullptr; 11213 bool IsVNonPoisonous = isGuaranteedNotToBePoison(V) && !isConstant(V); 11214 SmallVector<int> InsertMask(Mask); 11215 if (NumElts != NumScalars && Offset == 0) { 11216 // Follow all insert element instructions from the current buildvector 11217 // sequence. 11218 InsertElementInst *Ins = cast<InsertElementInst>(VL0); 11219 do { 11220 std::optional<unsigned> InsertIdx = getInsertIndex(Ins); 11221 if (!InsertIdx) 11222 break; 11223 if (InsertMask[*InsertIdx] == PoisonMaskElem) 11224 InsertMask[*InsertIdx] = *InsertIdx; 11225 if (!Ins->hasOneUse()) 11226 break; 11227 Ins = dyn_cast_or_null<InsertElementInst>( 11228 Ins->getUniqueUndroppableUser()); 11229 } while (Ins); 11230 SmallBitVector UseMask = 11231 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask); 11232 SmallBitVector IsFirstPoison = 11233 isUndefVector<true>(FirstInsert->getOperand(0), UseMask); 11234 SmallBitVector IsFirstUndef = 11235 isUndefVector(FirstInsert->getOperand(0), UseMask); 11236 if (!IsFirstPoison.all()) { 11237 unsigned Idx = 0; 11238 for (unsigned I = 0; I < NumElts; I++) { 11239 if (InsertMask[I] == PoisonMaskElem && !IsFirstPoison.test(I) && 11240 IsFirstUndef.test(I)) { 11241 if (IsVNonPoisonous) { 11242 InsertMask[I] = I < NumScalars ? I : 0; 11243 continue; 11244 } 11245 if (!V2) 11246 V2 = UndefValue::get(V->getType()); 11247 if (Idx >= NumScalars) 11248 Idx = NumScalars - 1; 11249 InsertMask[I] = NumScalars + Idx; 11250 ++Idx; 11251 } else if (InsertMask[I] != PoisonMaskElem && 11252 Mask[I] == PoisonMaskElem) { 11253 InsertMask[I] = PoisonMaskElem; 11254 } 11255 } 11256 } else { 11257 InsertMask = Mask; 11258 } 11259 } 11260 if (!V2) 11261 V2 = PoisonValue::get(V->getType()); 11262 V = Builder.CreateShuffleVector(V, V2, InsertMask); 11263 if (auto *I = dyn_cast<Instruction>(V)) { 11264 GatherShuffleExtractSeq.insert(I); 11265 CSEBlocks.insert(I->getParent()); 11266 } 11267 } 11268 11269 SmallVector<int> InsertMask(NumElts, PoisonMaskElem); 11270 for (unsigned I = 0; I < NumElts; I++) { 11271 if (Mask[I] != PoisonMaskElem) 11272 InsertMask[Offset + I] = I; 11273 } 11274 SmallBitVector UseMask = 11275 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask); 11276 SmallBitVector IsFirstUndef = 11277 isUndefVector(FirstInsert->getOperand(0), UseMask); 11278 if ((!IsIdentity || Offset != 0 || !IsFirstUndef.all()) && 11279 NumElts != NumScalars) { 11280 if (IsFirstUndef.all()) { 11281 if (!ShuffleVectorInst::isIdentityMask(InsertMask, NumElts)) { 11282 SmallBitVector IsFirstPoison = 11283 isUndefVector<true>(FirstInsert->getOperand(0), UseMask); 11284 if (!IsFirstPoison.all()) { 11285 for (unsigned I = 0; I < NumElts; I++) { 11286 if (InsertMask[I] == PoisonMaskElem && !IsFirstPoison.test(I)) 11287 InsertMask[I] = I + NumElts; 11288 } 11289 } 11290 V = Builder.CreateShuffleVector( 11291 V, 11292 IsFirstPoison.all() ? PoisonValue::get(V->getType()) 11293 : FirstInsert->getOperand(0), 11294 InsertMask, cast<Instruction>(E->Scalars.back())->getName()); 11295 if (auto *I = dyn_cast<Instruction>(V)) { 11296 GatherShuffleExtractSeq.insert(I); 11297 CSEBlocks.insert(I->getParent()); 11298 } 11299 } 11300 } else { 11301 SmallBitVector IsFirstPoison = 11302 isUndefVector<true>(FirstInsert->getOperand(0), UseMask); 11303 for (unsigned I = 0; I < NumElts; I++) { 11304 if (InsertMask[I] == PoisonMaskElem) 11305 InsertMask[I] = IsFirstPoison.test(I) ? PoisonMaskElem : I; 11306 else 11307 InsertMask[I] += NumElts; 11308 } 11309 V = Builder.CreateShuffleVector( 11310 FirstInsert->getOperand(0), V, InsertMask, 11311 cast<Instruction>(E->Scalars.back())->getName()); 11312 if (auto *I = dyn_cast<Instruction>(V)) { 11313 GatherShuffleExtractSeq.insert(I); 11314 CSEBlocks.insert(I->getParent()); 11315 } 11316 } 11317 } 11318 11319 ++NumVectorInstructions; 11320 E->VectorizedValue = V; 11321 return V; 11322 } 11323 case Instruction::ZExt: 11324 case Instruction::SExt: 11325 case Instruction::FPToUI: 11326 case Instruction::FPToSI: 11327 case Instruction::FPExt: 11328 case Instruction::PtrToInt: 11329 case Instruction::IntToPtr: 11330 case Instruction::SIToFP: 11331 case Instruction::UIToFP: 11332 case Instruction::Trunc: 11333 case Instruction::FPTrunc: 11334 case Instruction::BitCast: { 11335 setInsertPointAfterBundle(E); 11336 11337 Value *InVec = vectorizeOperand(E, 0, PostponedPHIs); 11338 if (E->VectorizedValue) { 11339 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11340 return E->VectorizedValue; 11341 } 11342 11343 auto *CI = cast<CastInst>(VL0); 11344 Instruction::CastOps VecOpcode = CI->getOpcode(); 11345 Type *SrcScalarTy = VL0->getOperand(0)->getType(); 11346 auto SrcIt = MinBWs.find(getOperandEntry(E, 0)); 11347 if (!ScalarTy->isFloatingPointTy() && !SrcScalarTy->isFloatingPointTy() && 11348 (SrcIt != MinBWs.end() || It != MinBWs.end())) { 11349 // Check if the values are candidates to demote. 11350 unsigned SrcBWSz = DL->getTypeSizeInBits(SrcScalarTy); 11351 if (SrcIt != MinBWs.end()) 11352 SrcBWSz = SrcIt->second.first; 11353 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy); 11354 if (BWSz == SrcBWSz) { 11355 VecOpcode = Instruction::BitCast; 11356 } else if (BWSz < SrcBWSz) { 11357 VecOpcode = Instruction::Trunc; 11358 } else if (It != MinBWs.end()) { 11359 assert(BWSz > SrcBWSz && "Invalid cast!"); 11360 VecOpcode = It->second.second ? Instruction::SExt : Instruction::ZExt; 11361 } 11362 } 11363 Value *V = (VecOpcode != ShuffleOrOp && VecOpcode == Instruction::BitCast) 11364 ? InVec 11365 : Builder.CreateCast(VecOpcode, InVec, VecTy); 11366 V = FinalShuffle(V, E, VecTy, IsSigned); 11367 11368 E->VectorizedValue = V; 11369 ++NumVectorInstructions; 11370 return V; 11371 } 11372 case Instruction::FCmp: 11373 case Instruction::ICmp: { 11374 setInsertPointAfterBundle(E); 11375 11376 Value *L = vectorizeOperand(E, 0, PostponedPHIs); 11377 if (E->VectorizedValue) { 11378 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11379 return E->VectorizedValue; 11380 } 11381 Value *R = vectorizeOperand(E, 1, PostponedPHIs); 11382 if (E->VectorizedValue) { 11383 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11384 return E->VectorizedValue; 11385 } 11386 if (L->getType() != R->getType()) { 11387 assert((MinBWs.contains(getOperandEntry(E, 0)) || 11388 MinBWs.contains(getOperandEntry(E, 1))) && 11389 "Expected item in MinBWs."); 11390 L = Builder.CreateIntCast(L, VecTy, IsSigned); 11391 R = Builder.CreateIntCast(R, VecTy, IsSigned); 11392 } 11393 11394 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate(); 11395 Value *V = Builder.CreateCmp(P0, L, R); 11396 propagateIRFlags(V, E->Scalars, VL0); 11397 // Do not cast for cmps. 11398 VecTy = cast<FixedVectorType>(V->getType()); 11399 V = FinalShuffle(V, E, VecTy, IsSigned); 11400 11401 E->VectorizedValue = V; 11402 ++NumVectorInstructions; 11403 return V; 11404 } 11405 case Instruction::Select: { 11406 setInsertPointAfterBundle(E); 11407 11408 Value *Cond = vectorizeOperand(E, 0, PostponedPHIs); 11409 if (E->VectorizedValue) { 11410 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11411 return E->VectorizedValue; 11412 } 11413 Value *True = vectorizeOperand(E, 1, PostponedPHIs); 11414 if (E->VectorizedValue) { 11415 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11416 return E->VectorizedValue; 11417 } 11418 Value *False = vectorizeOperand(E, 2, PostponedPHIs); 11419 if (E->VectorizedValue) { 11420 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11421 return E->VectorizedValue; 11422 } 11423 if (True->getType() != False->getType()) { 11424 assert((MinBWs.contains(getOperandEntry(E, 1)) || 11425 MinBWs.contains(getOperandEntry(E, 2))) && 11426 "Expected item in MinBWs."); 11427 True = Builder.CreateIntCast(True, VecTy, IsSigned); 11428 False = Builder.CreateIntCast(False, VecTy, IsSigned); 11429 } 11430 11431 Value *V = Builder.CreateSelect(Cond, True, False); 11432 V = FinalShuffle(V, E, VecTy, IsSigned); 11433 11434 E->VectorizedValue = V; 11435 ++NumVectorInstructions; 11436 return V; 11437 } 11438 case Instruction::FNeg: { 11439 setInsertPointAfterBundle(E); 11440 11441 Value *Op = vectorizeOperand(E, 0, PostponedPHIs); 11442 11443 if (E->VectorizedValue) { 11444 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11445 return E->VectorizedValue; 11446 } 11447 11448 Value *V = Builder.CreateUnOp( 11449 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op); 11450 propagateIRFlags(V, E->Scalars, VL0); 11451 if (auto *I = dyn_cast<Instruction>(V)) 11452 V = propagateMetadata(I, E->Scalars); 11453 11454 V = FinalShuffle(V, E, VecTy, IsSigned); 11455 11456 E->VectorizedValue = V; 11457 ++NumVectorInstructions; 11458 11459 return V; 11460 } 11461 case Instruction::Add: 11462 case Instruction::FAdd: 11463 case Instruction::Sub: 11464 case Instruction::FSub: 11465 case Instruction::Mul: 11466 case Instruction::FMul: 11467 case Instruction::UDiv: 11468 case Instruction::SDiv: 11469 case Instruction::FDiv: 11470 case Instruction::URem: 11471 case Instruction::SRem: 11472 case Instruction::FRem: 11473 case Instruction::Shl: 11474 case Instruction::LShr: 11475 case Instruction::AShr: 11476 case Instruction::And: 11477 case Instruction::Or: 11478 case Instruction::Xor: { 11479 setInsertPointAfterBundle(E); 11480 11481 Value *LHS = vectorizeOperand(E, 0, PostponedPHIs); 11482 if (E->VectorizedValue) { 11483 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11484 return E->VectorizedValue; 11485 } 11486 Value *RHS = vectorizeOperand(E, 1, PostponedPHIs); 11487 if (E->VectorizedValue) { 11488 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11489 return E->VectorizedValue; 11490 } 11491 if (LHS->getType() != RHS->getType()) { 11492 assert((MinBWs.contains(getOperandEntry(E, 0)) || 11493 MinBWs.contains(getOperandEntry(E, 1))) && 11494 "Expected item in MinBWs."); 11495 LHS = Builder.CreateIntCast(LHS, VecTy, IsSigned); 11496 RHS = Builder.CreateIntCast(RHS, VecTy, IsSigned); 11497 } 11498 11499 Value *V = Builder.CreateBinOp( 11500 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, 11501 RHS); 11502 propagateIRFlags(V, E->Scalars, VL0, !MinBWs.contains(E)); 11503 if (auto *I = dyn_cast<Instruction>(V)) 11504 V = propagateMetadata(I, E->Scalars); 11505 11506 V = FinalShuffle(V, E, VecTy, IsSigned); 11507 11508 E->VectorizedValue = V; 11509 ++NumVectorInstructions; 11510 11511 return V; 11512 } 11513 case Instruction::Load: { 11514 // Loads are inserted at the head of the tree because we don't want to 11515 // sink them all the way down past store instructions. 11516 setInsertPointAfterBundle(E); 11517 11518 LoadInst *LI = cast<LoadInst>(VL0); 11519 Instruction *NewLI; 11520 Value *PO = LI->getPointerOperand(); 11521 if (E->State == TreeEntry::Vectorize) { 11522 NewLI = Builder.CreateAlignedLoad(VecTy, PO, LI->getAlign()); 11523 } else { 11524 assert((E->State == TreeEntry::ScatterVectorize || 11525 E->State == TreeEntry::PossibleStridedVectorize) && 11526 "Unhandled state"); 11527 Value *VecPtr = vectorizeOperand(E, 0, PostponedPHIs); 11528 if (E->VectorizedValue) { 11529 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11530 return E->VectorizedValue; 11531 } 11532 // Use the minimum alignment of the gathered loads. 11533 Align CommonAlignment = LI->getAlign(); 11534 for (Value *V : E->Scalars) 11535 CommonAlignment = 11536 std::min(CommonAlignment, cast<LoadInst>(V)->getAlign()); 11537 NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment); 11538 } 11539 Value *V = propagateMetadata(NewLI, E->Scalars); 11540 11541 V = FinalShuffle(V, E, VecTy, IsSigned); 11542 E->VectorizedValue = V; 11543 ++NumVectorInstructions; 11544 return V; 11545 } 11546 case Instruction::Store: { 11547 auto *SI = cast<StoreInst>(VL0); 11548 11549 setInsertPointAfterBundle(E); 11550 11551 Value *VecValue = vectorizeOperand(E, 0, PostponedPHIs); 11552 VecValue = FinalShuffle(VecValue, E, VecTy, IsSigned); 11553 11554 Value *Ptr = SI->getPointerOperand(); 11555 StoreInst *ST = 11556 Builder.CreateAlignedStore(VecValue, Ptr, SI->getAlign()); 11557 11558 Value *V = propagateMetadata(ST, E->Scalars); 11559 11560 E->VectorizedValue = V; 11561 ++NumVectorInstructions; 11562 return V; 11563 } 11564 case Instruction::GetElementPtr: { 11565 auto *GEP0 = cast<GetElementPtrInst>(VL0); 11566 setInsertPointAfterBundle(E); 11567 11568 Value *Op0 = vectorizeOperand(E, 0, PostponedPHIs); 11569 if (E->VectorizedValue) { 11570 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11571 return E->VectorizedValue; 11572 } 11573 11574 SmallVector<Value *> OpVecs; 11575 for (int J = 1, N = GEP0->getNumOperands(); J < N; ++J) { 11576 Value *OpVec = vectorizeOperand(E, J, PostponedPHIs); 11577 if (E->VectorizedValue) { 11578 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11579 return E->VectorizedValue; 11580 } 11581 OpVecs.push_back(OpVec); 11582 } 11583 11584 Value *V = Builder.CreateGEP(GEP0->getSourceElementType(), Op0, OpVecs); 11585 if (Instruction *I = dyn_cast<GetElementPtrInst>(V)) { 11586 SmallVector<Value *> GEPs; 11587 for (Value *V : E->Scalars) { 11588 if (isa<GetElementPtrInst>(V)) 11589 GEPs.push_back(V); 11590 } 11591 V = propagateMetadata(I, GEPs); 11592 } 11593 11594 V = FinalShuffle(V, E, VecTy, IsSigned); 11595 11596 E->VectorizedValue = V; 11597 ++NumVectorInstructions; 11598 11599 return V; 11600 } 11601 case Instruction::Call: { 11602 CallInst *CI = cast<CallInst>(VL0); 11603 setInsertPointAfterBundle(E); 11604 11605 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 11606 11607 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI); 11608 bool UseIntrinsic = ID != Intrinsic::not_intrinsic && 11609 VecCallCosts.first <= VecCallCosts.second; 11610 11611 Value *ScalarArg = nullptr; 11612 SmallVector<Value *> OpVecs; 11613 SmallVector<Type *, 2> TysForDecl; 11614 // Add return type if intrinsic is overloaded on it. 11615 if (UseIntrinsic && isVectorIntrinsicWithOverloadTypeAtArg(ID, -1)) 11616 TysForDecl.push_back( 11617 FixedVectorType::get(CI->getType(), E->Scalars.size())); 11618 for (unsigned I : seq<unsigned>(0, CI->arg_size())) { 11619 ValueList OpVL; 11620 // Some intrinsics have scalar arguments. This argument should not be 11621 // vectorized. 11622 if (UseIntrinsic && isVectorIntrinsicWithScalarOpAtArg(ID, I)) { 11623 CallInst *CEI = cast<CallInst>(VL0); 11624 ScalarArg = CEI->getArgOperand(I); 11625 OpVecs.push_back(CEI->getArgOperand(I)); 11626 if (isVectorIntrinsicWithOverloadTypeAtArg(ID, I)) 11627 TysForDecl.push_back(ScalarArg->getType()); 11628 continue; 11629 } 11630 11631 Value *OpVec = vectorizeOperand(E, I, PostponedPHIs); 11632 if (E->VectorizedValue) { 11633 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11634 return E->VectorizedValue; 11635 } 11636 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << I << "]: " << *OpVec << "\n"); 11637 OpVecs.push_back(OpVec); 11638 if (UseIntrinsic && isVectorIntrinsicWithOverloadTypeAtArg(ID, I)) 11639 TysForDecl.push_back(OpVec->getType()); 11640 } 11641 11642 Function *CF; 11643 if (!UseIntrinsic) { 11644 VFShape Shape = 11645 VFShape::get(CI->getFunctionType(), 11646 ElementCount::getFixed( 11647 static_cast<unsigned>(VecTy->getNumElements())), 11648 false /*HasGlobalPred*/); 11649 CF = VFDatabase(*CI).getVectorizedFunction(Shape); 11650 } else { 11651 CF = Intrinsic::getDeclaration(F->getParent(), ID, TysForDecl); 11652 } 11653 11654 SmallVector<OperandBundleDef, 1> OpBundles; 11655 CI->getOperandBundlesAsDefs(OpBundles); 11656 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles); 11657 11658 propagateIRFlags(V, E->Scalars, VL0); 11659 V = FinalShuffle(V, E, VecTy, IsSigned); 11660 11661 E->VectorizedValue = V; 11662 ++NumVectorInstructions; 11663 return V; 11664 } 11665 case Instruction::ShuffleVector: { 11666 assert(E->isAltShuffle() && 11667 ((Instruction::isBinaryOp(E->getOpcode()) && 11668 Instruction::isBinaryOp(E->getAltOpcode())) || 11669 (Instruction::isCast(E->getOpcode()) && 11670 Instruction::isCast(E->getAltOpcode())) || 11671 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) && 11672 "Invalid Shuffle Vector Operand"); 11673 11674 Value *LHS = nullptr, *RHS = nullptr; 11675 if (Instruction::isBinaryOp(E->getOpcode()) || isa<CmpInst>(VL0)) { 11676 setInsertPointAfterBundle(E); 11677 LHS = vectorizeOperand(E, 0, PostponedPHIs); 11678 if (E->VectorizedValue) { 11679 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11680 return E->VectorizedValue; 11681 } 11682 RHS = vectorizeOperand(E, 1, PostponedPHIs); 11683 } else { 11684 setInsertPointAfterBundle(E); 11685 LHS = vectorizeOperand(E, 0, PostponedPHIs); 11686 } 11687 if (E->VectorizedValue) { 11688 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n"); 11689 return E->VectorizedValue; 11690 } 11691 if (LHS && RHS && LHS->getType() != RHS->getType()) { 11692 assert((MinBWs.contains(getOperandEntry(E, 0)) || 11693 MinBWs.contains(getOperandEntry(E, 1))) && 11694 "Expected item in MinBWs."); 11695 LHS = Builder.CreateIntCast(LHS, VecTy, IsSigned); 11696 RHS = Builder.CreateIntCast(RHS, VecTy, IsSigned); 11697 } 11698 11699 Value *V0, *V1; 11700 if (Instruction::isBinaryOp(E->getOpcode())) { 11701 V0 = Builder.CreateBinOp( 11702 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS); 11703 V1 = Builder.CreateBinOp( 11704 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS); 11705 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) { 11706 V0 = Builder.CreateCmp(CI0->getPredicate(), LHS, RHS); 11707 auto *AltCI = cast<CmpInst>(E->getAltOp()); 11708 CmpInst::Predicate AltPred = AltCI->getPredicate(); 11709 V1 = Builder.CreateCmp(AltPred, LHS, RHS); 11710 } else { 11711 V0 = Builder.CreateCast( 11712 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy); 11713 V1 = Builder.CreateCast( 11714 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy); 11715 } 11716 // Add V0 and V1 to later analysis to try to find and remove matching 11717 // instruction, if any. 11718 for (Value *V : {V0, V1}) { 11719 if (auto *I = dyn_cast<Instruction>(V)) { 11720 GatherShuffleExtractSeq.insert(I); 11721 CSEBlocks.insert(I->getParent()); 11722 } 11723 } 11724 11725 // Create shuffle to take alternate operations from the vector. 11726 // Also, gather up main and alt scalar ops to propagate IR flags to 11727 // each vector operation. 11728 ValueList OpScalars, AltScalars; 11729 SmallVector<int> Mask; 11730 E->buildAltOpShuffleMask( 11731 [E, this](Instruction *I) { 11732 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode"); 11733 return isAlternateInstruction(I, E->getMainOp(), E->getAltOp(), 11734 *TLI); 11735 }, 11736 Mask, &OpScalars, &AltScalars); 11737 11738 propagateIRFlags(V0, OpScalars); 11739 propagateIRFlags(V1, AltScalars); 11740 11741 Value *V = Builder.CreateShuffleVector(V0, V1, Mask); 11742 if (auto *I = dyn_cast<Instruction>(V)) { 11743 V = propagateMetadata(I, E->Scalars); 11744 GatherShuffleExtractSeq.insert(I); 11745 CSEBlocks.insert(I->getParent()); 11746 } 11747 11748 if (V->getType() != VecTy && !isa<CmpInst>(VL0)) 11749 V = Builder.CreateIntCast( 11750 V, FixedVectorType::get(ScalarTy, E->getVectorFactor()), IsSigned); 11751 E->VectorizedValue = V; 11752 ++NumVectorInstructions; 11753 11754 return V; 11755 } 11756 default: 11757 llvm_unreachable("unknown inst"); 11758 } 11759 return nullptr; 11760 } 11761 11762 Value *BoUpSLP::vectorizeTree() { 11763 ExtraValueToDebugLocsMap ExternallyUsedValues; 11764 SmallVector<std::pair<Value *, Value *>> ReplacedExternals; 11765 return vectorizeTree(ExternallyUsedValues, ReplacedExternals); 11766 } 11767 11768 namespace { 11769 /// Data type for handling buildvector sequences with the reused scalars from 11770 /// other tree entries. 11771 struct ShuffledInsertData { 11772 /// List of insertelements to be replaced by shuffles. 11773 SmallVector<InsertElementInst *> InsertElements; 11774 /// The parent vectors and shuffle mask for the given list of inserts. 11775 MapVector<Value *, SmallVector<int>> ValueMasks; 11776 }; 11777 } // namespace 11778 11779 Value *BoUpSLP::vectorizeTree( 11780 const ExtraValueToDebugLocsMap &ExternallyUsedValues, 11781 SmallVectorImpl<std::pair<Value *, Value *>> &ReplacedExternals, 11782 Instruction *ReductionRoot) { 11783 // All blocks must be scheduled before any instructions are inserted. 11784 for (auto &BSIter : BlocksSchedules) { 11785 scheduleBlock(BSIter.second.get()); 11786 } 11787 // Clean Entry-to-LastInstruction table. It can be affected after scheduling, 11788 // need to rebuild it. 11789 EntryToLastInstruction.clear(); 11790 11791 if (ReductionRoot) 11792 Builder.SetInsertPoint(ReductionRoot->getParent(), 11793 ReductionRoot->getIterator()); 11794 else 11795 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin()); 11796 11797 // Postpone emission of PHIs operands to avoid cyclic dependencies issues. 11798 (void)vectorizeTree(VectorizableTree[0].get(), /*PostponedPHIs=*/true); 11799 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) 11800 if (TE->State == TreeEntry::Vectorize && 11801 TE->getOpcode() == Instruction::PHI && !TE->isAltShuffle() && 11802 TE->VectorizedValue) 11803 (void)vectorizeTree(TE.get(), /*PostponedPHIs=*/false); 11804 // Run through the list of postponed gathers and emit them, replacing the temp 11805 // emitted allocas with actual vector instructions. 11806 ArrayRef<const TreeEntry *> PostponedNodes = PostponedGathers.getArrayRef(); 11807 DenseMap<Value *, SmallVector<TreeEntry *>> PostponedValues; 11808 for (const TreeEntry *E : PostponedNodes) { 11809 auto *TE = const_cast<TreeEntry *>(E); 11810 if (auto *VecTE = getTreeEntry(TE->Scalars.front())) 11811 if (VecTE->isSame(TE->UserTreeIndices.front().UserTE->getOperand( 11812 TE->UserTreeIndices.front().EdgeIdx))) 11813 // Found gather node which is absolutely the same as one of the 11814 // vectorized nodes. It may happen after reordering. 11815 continue; 11816 auto *PrevVec = cast<Instruction>(TE->VectorizedValue); 11817 TE->VectorizedValue = nullptr; 11818 auto *UserI = 11819 cast<Instruction>(TE->UserTreeIndices.front().UserTE->VectorizedValue); 11820 // If user is a PHI node, its vector code have to be inserted right before 11821 // block terminator. Since the node was delayed, there were some unresolved 11822 // dependencies at the moment when stab instruction was emitted. In a case 11823 // when any of these dependencies turn out an operand of another PHI, coming 11824 // from this same block, position of a stab instruction will become invalid. 11825 // The is because source vector that supposed to feed this gather node was 11826 // inserted at the end of the block [after stab instruction]. So we need 11827 // to adjust insertion point again to the end of block. 11828 if (isa<PHINode>(UserI)) { 11829 // Insert before all users. 11830 Instruction *InsertPt = PrevVec->getParent()->getTerminator(); 11831 for (User *U : PrevVec->users()) { 11832 if (U == UserI) 11833 continue; 11834 auto *UI = dyn_cast<Instruction>(U); 11835 if (!UI || isa<PHINode>(UI) || UI->getParent() != InsertPt->getParent()) 11836 continue; 11837 if (UI->comesBefore(InsertPt)) 11838 InsertPt = UI; 11839 } 11840 Builder.SetInsertPoint(InsertPt); 11841 } else { 11842 Builder.SetInsertPoint(PrevVec); 11843 } 11844 Builder.SetCurrentDebugLocation(UserI->getDebugLoc()); 11845 Value *Vec = vectorizeTree(TE, /*PostponedPHIs=*/false); 11846 PrevVec->replaceAllUsesWith(Vec); 11847 PostponedValues.try_emplace(Vec).first->second.push_back(TE); 11848 // Replace the stub vector node, if it was used before for one of the 11849 // buildvector nodes already. 11850 auto It = PostponedValues.find(PrevVec); 11851 if (It != PostponedValues.end()) { 11852 for (TreeEntry *VTE : It->getSecond()) 11853 VTE->VectorizedValue = Vec; 11854 } 11855 eraseInstruction(PrevVec); 11856 } 11857 11858 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() 11859 << " values .\n"); 11860 11861 SmallVector<ShuffledInsertData> ShuffledInserts; 11862 // Maps vector instruction to original insertelement instruction 11863 DenseMap<Value *, InsertElementInst *> VectorToInsertElement; 11864 // Maps extract Scalar to the corresponding extractelement instruction in the 11865 // basic block. Only one extractelement per block should be emitted. 11866 DenseMap<Value *, DenseMap<BasicBlock *, Instruction *>> ScalarToEEs; 11867 SmallDenseSet<Value *, 4> UsedInserts; 11868 DenseMap<Value *, Value *> VectorCasts; 11869 SmallDenseSet<Value *, 4> ScalarsWithNullptrUser; 11870 // Extract all of the elements with the external uses. 11871 for (const auto &ExternalUse : ExternalUses) { 11872 Value *Scalar = ExternalUse.Scalar; 11873 llvm::User *User = ExternalUse.User; 11874 11875 // Skip users that we already RAUW. This happens when one instruction 11876 // has multiple uses of the same value. 11877 if (User && !is_contained(Scalar->users(), User)) 11878 continue; 11879 TreeEntry *E = getTreeEntry(Scalar); 11880 assert(E && "Invalid scalar"); 11881 assert(E->State != TreeEntry::NeedToGather && 11882 "Extracting from a gather list"); 11883 // Non-instruction pointers are not deleted, just skip them. 11884 if (E->getOpcode() == Instruction::GetElementPtr && 11885 !isa<GetElementPtrInst>(Scalar)) 11886 continue; 11887 11888 Value *Vec = E->VectorizedValue; 11889 assert(Vec && "Can't find vectorizable value"); 11890 11891 Value *Lane = Builder.getInt32(ExternalUse.Lane); 11892 auto ExtractAndExtendIfNeeded = [&](Value *Vec) { 11893 if (Scalar->getType() != Vec->getType()) { 11894 Value *Ex = nullptr; 11895 auto It = ScalarToEEs.find(Scalar); 11896 if (It != ScalarToEEs.end()) { 11897 // No need to emit many extracts, just move the only one in the 11898 // current block. 11899 auto EEIt = It->second.find(Builder.GetInsertBlock()); 11900 if (EEIt != It->second.end()) { 11901 Instruction *I = EEIt->second; 11902 if (Builder.GetInsertPoint() != Builder.GetInsertBlock()->end() && 11903 Builder.GetInsertPoint()->comesBefore(I)) 11904 I->moveBefore(*Builder.GetInsertPoint()->getParent(), 11905 Builder.GetInsertPoint()); 11906 Ex = I; 11907 } 11908 } 11909 if (!Ex) { 11910 // "Reuse" the existing extract to improve final codegen. 11911 if (auto *ES = dyn_cast<ExtractElementInst>(Scalar)) { 11912 Value *V = ES->getVectorOperand(); 11913 if (const TreeEntry *ETE = getTreeEntry(V)) 11914 V = ETE->VectorizedValue; 11915 Ex = Builder.CreateExtractElement(V, ES->getIndexOperand()); 11916 } else { 11917 Ex = Builder.CreateExtractElement(Vec, Lane); 11918 } 11919 if (auto *I = dyn_cast<Instruction>(Ex)) 11920 ScalarToEEs[Scalar].try_emplace(Builder.GetInsertBlock(), I); 11921 } 11922 // The then branch of the previous if may produce constants, since 0 11923 // operand might be a constant. 11924 if (auto *ExI = dyn_cast<Instruction>(Ex)) { 11925 GatherShuffleExtractSeq.insert(ExI); 11926 CSEBlocks.insert(ExI->getParent()); 11927 } 11928 // If necessary, sign-extend or zero-extend ScalarRoot 11929 // to the larger type. 11930 if (Scalar->getType() != Ex->getType()) 11931 return Builder.CreateIntCast(Ex, Scalar->getType(), 11932 MinBWs.find(E)->second.second); 11933 return Ex; 11934 } 11935 assert(isa<FixedVectorType>(Scalar->getType()) && 11936 isa<InsertElementInst>(Scalar) && 11937 "In-tree scalar of vector type is not insertelement?"); 11938 auto *IE = cast<InsertElementInst>(Scalar); 11939 VectorToInsertElement.try_emplace(Vec, IE); 11940 return Vec; 11941 }; 11942 // If User == nullptr, the Scalar remains as scalar in vectorized 11943 // instructions or is used as extra arg. Generate ExtractElement instruction 11944 // and update the record for this scalar in ExternallyUsedValues. 11945 if (!User) { 11946 if (!ScalarsWithNullptrUser.insert(Scalar).second) 11947 continue; 11948 assert((ExternallyUsedValues.count(Scalar) || 11949 any_of(Scalar->users(), 11950 [&](llvm::User *U) { 11951 TreeEntry *UseEntry = getTreeEntry(U); 11952 return UseEntry && 11953 UseEntry->State == TreeEntry::Vectorize && 11954 E->State == TreeEntry::Vectorize && 11955 doesInTreeUserNeedToExtract( 11956 Scalar, 11957 cast<Instruction>(UseEntry->Scalars.front()), 11958 TLI); 11959 })) && 11960 "Scalar with nullptr User must be registered in " 11961 "ExternallyUsedValues map or remain as scalar in vectorized " 11962 "instructions"); 11963 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 11964 if (auto *PHI = dyn_cast<PHINode>(VecI)) 11965 Builder.SetInsertPoint(PHI->getParent(), 11966 PHI->getParent()->getFirstNonPHIIt()); 11967 else 11968 Builder.SetInsertPoint(VecI->getParent(), 11969 std::next(VecI->getIterator())); 11970 } else { 11971 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin()); 11972 } 11973 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 11974 // Required to update internally referenced instructions. 11975 Scalar->replaceAllUsesWith(NewInst); 11976 ReplacedExternals.emplace_back(Scalar, NewInst); 11977 continue; 11978 } 11979 11980 if (auto *VU = dyn_cast<InsertElementInst>(User)) { 11981 // Skip if the scalar is another vector op or Vec is not an instruction. 11982 if (!Scalar->getType()->isVectorTy() && isa<Instruction>(Vec)) { 11983 if (auto *FTy = dyn_cast<FixedVectorType>(User->getType())) { 11984 if (!UsedInserts.insert(VU).second) 11985 continue; 11986 // Need to use original vector, if the root is truncated. 11987 auto BWIt = MinBWs.find(E); 11988 if (BWIt != MinBWs.end() && Vec->getType() != VU->getType()) { 11989 auto VecIt = VectorCasts.find(Scalar); 11990 if (VecIt == VectorCasts.end()) { 11991 IRBuilder<>::InsertPointGuard Guard(Builder); 11992 if (auto *IVec = dyn_cast<Instruction>(Vec)) 11993 Builder.SetInsertPoint(IVec->getNextNonDebugInstruction()); 11994 Vec = Builder.CreateIntCast(Vec, VU->getType(), 11995 BWIt->second.second); 11996 VectorCasts.try_emplace(Scalar, Vec); 11997 } else { 11998 Vec = VecIt->second; 11999 } 12000 } 12001 12002 std::optional<unsigned> InsertIdx = getInsertIndex(VU); 12003 if (InsertIdx) { 12004 auto *It = 12005 find_if(ShuffledInserts, [VU](const ShuffledInsertData &Data) { 12006 // Checks if 2 insertelements are from the same buildvector. 12007 InsertElementInst *VecInsert = Data.InsertElements.front(); 12008 return areTwoInsertFromSameBuildVector( 12009 VU, VecInsert, 12010 [](InsertElementInst *II) { return II->getOperand(0); }); 12011 }); 12012 unsigned Idx = *InsertIdx; 12013 if (It == ShuffledInserts.end()) { 12014 (void)ShuffledInserts.emplace_back(); 12015 It = std::next(ShuffledInserts.begin(), 12016 ShuffledInserts.size() - 1); 12017 SmallVectorImpl<int> &Mask = It->ValueMasks[Vec]; 12018 if (Mask.empty()) 12019 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 12020 // Find the insertvector, vectorized in tree, if any. 12021 Value *Base = VU; 12022 while (auto *IEBase = dyn_cast<InsertElementInst>(Base)) { 12023 if (IEBase != User && 12024 (!IEBase->hasOneUse() || 12025 getInsertIndex(IEBase).value_or(Idx) == Idx)) 12026 break; 12027 // Build the mask for the vectorized insertelement instructions. 12028 if (const TreeEntry *E = getTreeEntry(IEBase)) { 12029 do { 12030 IEBase = cast<InsertElementInst>(Base); 12031 int IEIdx = *getInsertIndex(IEBase); 12032 assert(Mask[Idx] == PoisonMaskElem && 12033 "InsertElementInstruction used already."); 12034 Mask[IEIdx] = IEIdx; 12035 Base = IEBase->getOperand(0); 12036 } while (E == getTreeEntry(Base)); 12037 break; 12038 } 12039 Base = cast<InsertElementInst>(Base)->getOperand(0); 12040 // After the vectorization the def-use chain has changed, need 12041 // to look through original insertelement instructions, if they 12042 // get replaced by vector instructions. 12043 auto It = VectorToInsertElement.find(Base); 12044 if (It != VectorToInsertElement.end()) 12045 Base = It->second; 12046 } 12047 } 12048 SmallVectorImpl<int> &Mask = It->ValueMasks[Vec]; 12049 if (Mask.empty()) 12050 Mask.assign(FTy->getNumElements(), PoisonMaskElem); 12051 Mask[Idx] = ExternalUse.Lane; 12052 It->InsertElements.push_back(cast<InsertElementInst>(User)); 12053 continue; 12054 } 12055 } 12056 } 12057 } 12058 12059 // Generate extracts for out-of-tree users. 12060 // Find the insertion point for the extractelement lane. 12061 if (auto *VecI = dyn_cast<Instruction>(Vec)) { 12062 if (PHINode *PH = dyn_cast<PHINode>(User)) { 12063 for (unsigned I : seq<unsigned>(0, PH->getNumIncomingValues())) { 12064 if (PH->getIncomingValue(I) == Scalar) { 12065 Instruction *IncomingTerminator = 12066 PH->getIncomingBlock(I)->getTerminator(); 12067 if (isa<CatchSwitchInst>(IncomingTerminator)) { 12068 Builder.SetInsertPoint(VecI->getParent(), 12069 std::next(VecI->getIterator())); 12070 } else { 12071 Builder.SetInsertPoint(PH->getIncomingBlock(I)->getTerminator()); 12072 } 12073 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 12074 PH->setOperand(I, NewInst); 12075 } 12076 } 12077 } else { 12078 Builder.SetInsertPoint(cast<Instruction>(User)); 12079 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 12080 User->replaceUsesOfWith(Scalar, NewInst); 12081 } 12082 } else { 12083 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin()); 12084 Value *NewInst = ExtractAndExtendIfNeeded(Vec); 12085 User->replaceUsesOfWith(Scalar, NewInst); 12086 } 12087 12088 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n"); 12089 } 12090 12091 auto CreateShuffle = [&](Value *V1, Value *V2, ArrayRef<int> Mask) { 12092 SmallVector<int> CombinedMask1(Mask.size(), PoisonMaskElem); 12093 SmallVector<int> CombinedMask2(Mask.size(), PoisonMaskElem); 12094 int VF = cast<FixedVectorType>(V1->getType())->getNumElements(); 12095 for (int I = 0, E = Mask.size(); I < E; ++I) { 12096 if (Mask[I] < VF) 12097 CombinedMask1[I] = Mask[I]; 12098 else 12099 CombinedMask2[I] = Mask[I] - VF; 12100 } 12101 ShuffleInstructionBuilder ShuffleBuilder(Builder, *this); 12102 ShuffleBuilder.add(V1, CombinedMask1); 12103 if (V2) 12104 ShuffleBuilder.add(V2, CombinedMask2); 12105 return ShuffleBuilder.finalize(std::nullopt); 12106 }; 12107 12108 auto &&ResizeToVF = [&CreateShuffle](Value *Vec, ArrayRef<int> Mask, 12109 bool ForSingleMask) { 12110 unsigned VF = Mask.size(); 12111 unsigned VecVF = cast<FixedVectorType>(Vec->getType())->getNumElements(); 12112 if (VF != VecVF) { 12113 if (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); })) { 12114 Vec = CreateShuffle(Vec, nullptr, Mask); 12115 return std::make_pair(Vec, true); 12116 } 12117 if (!ForSingleMask) { 12118 SmallVector<int> ResizeMask(VF, PoisonMaskElem); 12119 for (unsigned I = 0; I < VF; ++I) { 12120 if (Mask[I] != PoisonMaskElem) 12121 ResizeMask[Mask[I]] = Mask[I]; 12122 } 12123 Vec = CreateShuffle(Vec, nullptr, ResizeMask); 12124 } 12125 } 12126 12127 return std::make_pair(Vec, false); 12128 }; 12129 // Perform shuffling of the vectorize tree entries for better handling of 12130 // external extracts. 12131 for (int I = 0, E = ShuffledInserts.size(); I < E; ++I) { 12132 // Find the first and the last instruction in the list of insertelements. 12133 sort(ShuffledInserts[I].InsertElements, isFirstInsertElement); 12134 InsertElementInst *FirstInsert = ShuffledInserts[I].InsertElements.front(); 12135 InsertElementInst *LastInsert = ShuffledInserts[I].InsertElements.back(); 12136 Builder.SetInsertPoint(LastInsert); 12137 auto Vector = ShuffledInserts[I].ValueMasks.takeVector(); 12138 Value *NewInst = performExtractsShuffleAction<Value>( 12139 MutableArrayRef(Vector.data(), Vector.size()), 12140 FirstInsert->getOperand(0), 12141 [](Value *Vec) { 12142 return cast<VectorType>(Vec->getType()) 12143 ->getElementCount() 12144 .getKnownMinValue(); 12145 }, 12146 ResizeToVF, 12147 [FirstInsert, &CreateShuffle](ArrayRef<int> Mask, 12148 ArrayRef<Value *> Vals) { 12149 assert((Vals.size() == 1 || Vals.size() == 2) && 12150 "Expected exactly 1 or 2 input values."); 12151 if (Vals.size() == 1) { 12152 // Do not create shuffle if the mask is a simple identity 12153 // non-resizing mask. 12154 if (Mask.size() != cast<FixedVectorType>(Vals.front()->getType()) 12155 ->getNumElements() || 12156 !ShuffleVectorInst::isIdentityMask(Mask, Mask.size())) 12157 return CreateShuffle(Vals.front(), nullptr, Mask); 12158 return Vals.front(); 12159 } 12160 return CreateShuffle(Vals.front() ? Vals.front() 12161 : FirstInsert->getOperand(0), 12162 Vals.back(), Mask); 12163 }); 12164 auto It = ShuffledInserts[I].InsertElements.rbegin(); 12165 // Rebuild buildvector chain. 12166 InsertElementInst *II = nullptr; 12167 if (It != ShuffledInserts[I].InsertElements.rend()) 12168 II = *It; 12169 SmallVector<Instruction *> Inserts; 12170 while (It != ShuffledInserts[I].InsertElements.rend()) { 12171 assert(II && "Must be an insertelement instruction."); 12172 if (*It == II) 12173 ++It; 12174 else 12175 Inserts.push_back(cast<Instruction>(II)); 12176 II = dyn_cast<InsertElementInst>(II->getOperand(0)); 12177 } 12178 for (Instruction *II : reverse(Inserts)) { 12179 II->replaceUsesOfWith(II->getOperand(0), NewInst); 12180 if (auto *NewI = dyn_cast<Instruction>(NewInst)) 12181 if (II->getParent() == NewI->getParent() && II->comesBefore(NewI)) 12182 II->moveAfter(NewI); 12183 NewInst = II; 12184 } 12185 LastInsert->replaceAllUsesWith(NewInst); 12186 for (InsertElementInst *IE : reverse(ShuffledInserts[I].InsertElements)) { 12187 IE->replaceUsesOfWith(IE->getOperand(0), 12188 PoisonValue::get(IE->getOperand(0)->getType())); 12189 IE->replaceUsesOfWith(IE->getOperand(1), 12190 PoisonValue::get(IE->getOperand(1)->getType())); 12191 eraseInstruction(IE); 12192 } 12193 CSEBlocks.insert(LastInsert->getParent()); 12194 } 12195 12196 SmallVector<Instruction *> RemovedInsts; 12197 // For each vectorized value: 12198 for (auto &TEPtr : VectorizableTree) { 12199 TreeEntry *Entry = TEPtr.get(); 12200 12201 // No need to handle users of gathered values. 12202 if (Entry->State == TreeEntry::NeedToGather) 12203 continue; 12204 12205 assert(Entry->VectorizedValue && "Can't find vectorizable value"); 12206 12207 // For each lane: 12208 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) { 12209 Value *Scalar = Entry->Scalars[Lane]; 12210 12211 if (Entry->getOpcode() == Instruction::GetElementPtr && 12212 !isa<GetElementPtrInst>(Scalar)) 12213 continue; 12214 #ifndef NDEBUG 12215 Type *Ty = Scalar->getType(); 12216 if (!Ty->isVoidTy()) { 12217 for (User *U : Scalar->users()) { 12218 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n"); 12219 12220 // It is legal to delete users in the ignorelist. 12221 assert((getTreeEntry(U) || 12222 (UserIgnoreList && UserIgnoreList->contains(U)) || 12223 (isa_and_nonnull<Instruction>(U) && 12224 isDeleted(cast<Instruction>(U)))) && 12225 "Deleting out-of-tree value"); 12226 } 12227 } 12228 #endif 12229 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n"); 12230 eraseInstruction(cast<Instruction>(Scalar)); 12231 // Retain to-be-deleted instructions for some debug-info 12232 // bookkeeping. NOTE: eraseInstruction only marks the instruction for 12233 // deletion - instructions are not deleted until later. 12234 RemovedInsts.push_back(cast<Instruction>(Scalar)); 12235 } 12236 } 12237 12238 // Merge the DIAssignIDs from the about-to-be-deleted instructions into the 12239 // new vector instruction. 12240 if (auto *V = dyn_cast<Instruction>(VectorizableTree[0]->VectorizedValue)) 12241 V->mergeDIAssignID(RemovedInsts); 12242 12243 Builder.ClearInsertionPoint(); 12244 InstrElementSize.clear(); 12245 12246 return VectorizableTree[0]->VectorizedValue; 12247 } 12248 12249 void BoUpSLP::optimizeGatherSequence() { 12250 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherShuffleExtractSeq.size() 12251 << " gather sequences instructions.\n"); 12252 // LICM InsertElementInst sequences. 12253 for (Instruction *I : GatherShuffleExtractSeq) { 12254 if (isDeleted(I)) 12255 continue; 12256 12257 // Check if this block is inside a loop. 12258 Loop *L = LI->getLoopFor(I->getParent()); 12259 if (!L) 12260 continue; 12261 12262 // Check if it has a preheader. 12263 BasicBlock *PreHeader = L->getLoopPreheader(); 12264 if (!PreHeader) 12265 continue; 12266 12267 // If the vector or the element that we insert into it are 12268 // instructions that are defined in this basic block then we can't 12269 // hoist this instruction. 12270 if (any_of(I->operands(), [L](Value *V) { 12271 auto *OpI = dyn_cast<Instruction>(V); 12272 return OpI && L->contains(OpI); 12273 })) 12274 continue; 12275 12276 // We can hoist this instruction. Move it to the pre-header. 12277 I->moveBefore(PreHeader->getTerminator()); 12278 CSEBlocks.insert(PreHeader); 12279 } 12280 12281 // Make a list of all reachable blocks in our CSE queue. 12282 SmallVector<const DomTreeNode *, 8> CSEWorkList; 12283 CSEWorkList.reserve(CSEBlocks.size()); 12284 for (BasicBlock *BB : CSEBlocks) 12285 if (DomTreeNode *N = DT->getNode(BB)) { 12286 assert(DT->isReachableFromEntry(N)); 12287 CSEWorkList.push_back(N); 12288 } 12289 12290 // Sort blocks by domination. This ensures we visit a block after all blocks 12291 // dominating it are visited. 12292 llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) { 12293 assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) && 12294 "Different nodes should have different DFS numbers"); 12295 return A->getDFSNumIn() < B->getDFSNumIn(); 12296 }); 12297 12298 // Less defined shuffles can be replaced by the more defined copies. 12299 // Between two shuffles one is less defined if it has the same vector operands 12300 // and its mask indeces are the same as in the first one or undefs. E.g. 12301 // shuffle %0, poison, <0, 0, 0, undef> is less defined than shuffle %0, 12302 // poison, <0, 0, 0, 0>. 12303 auto &&IsIdenticalOrLessDefined = [this](Instruction *I1, Instruction *I2, 12304 SmallVectorImpl<int> &NewMask) { 12305 if (I1->getType() != I2->getType()) 12306 return false; 12307 auto *SI1 = dyn_cast<ShuffleVectorInst>(I1); 12308 auto *SI2 = dyn_cast<ShuffleVectorInst>(I2); 12309 if (!SI1 || !SI2) 12310 return I1->isIdenticalTo(I2); 12311 if (SI1->isIdenticalTo(SI2)) 12312 return true; 12313 for (int I = 0, E = SI1->getNumOperands(); I < E; ++I) 12314 if (SI1->getOperand(I) != SI2->getOperand(I)) 12315 return false; 12316 // Check if the second instruction is more defined than the first one. 12317 NewMask.assign(SI2->getShuffleMask().begin(), SI2->getShuffleMask().end()); 12318 ArrayRef<int> SM1 = SI1->getShuffleMask(); 12319 // Count trailing undefs in the mask to check the final number of used 12320 // registers. 12321 unsigned LastUndefsCnt = 0; 12322 for (int I = 0, E = NewMask.size(); I < E; ++I) { 12323 if (SM1[I] == PoisonMaskElem) 12324 ++LastUndefsCnt; 12325 else 12326 LastUndefsCnt = 0; 12327 if (NewMask[I] != PoisonMaskElem && SM1[I] != PoisonMaskElem && 12328 NewMask[I] != SM1[I]) 12329 return false; 12330 if (NewMask[I] == PoisonMaskElem) 12331 NewMask[I] = SM1[I]; 12332 } 12333 // Check if the last undefs actually change the final number of used vector 12334 // registers. 12335 return SM1.size() - LastUndefsCnt > 1 && 12336 TTI->getNumberOfParts(SI1->getType()) == 12337 TTI->getNumberOfParts( 12338 FixedVectorType::get(SI1->getType()->getElementType(), 12339 SM1.size() - LastUndefsCnt)); 12340 }; 12341 // Perform O(N^2) search over the gather/shuffle sequences and merge identical 12342 // instructions. TODO: We can further optimize this scan if we split the 12343 // instructions into different buckets based on the insert lane. 12344 SmallVector<Instruction *, 16> Visited; 12345 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) { 12346 assert(*I && 12347 (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) && 12348 "Worklist not sorted properly!"); 12349 BasicBlock *BB = (*I)->getBlock(); 12350 // For all instructions in blocks containing gather sequences: 12351 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 12352 if (isDeleted(&In)) 12353 continue; 12354 if (!isa<InsertElementInst, ExtractElementInst, ShuffleVectorInst>(&In) && 12355 !GatherShuffleExtractSeq.contains(&In)) 12356 continue; 12357 12358 // Check if we can replace this instruction with any of the 12359 // visited instructions. 12360 bool Replaced = false; 12361 for (Instruction *&V : Visited) { 12362 SmallVector<int> NewMask; 12363 if (IsIdenticalOrLessDefined(&In, V, NewMask) && 12364 DT->dominates(V->getParent(), In.getParent())) { 12365 In.replaceAllUsesWith(V); 12366 eraseInstruction(&In); 12367 if (auto *SI = dyn_cast<ShuffleVectorInst>(V)) 12368 if (!NewMask.empty()) 12369 SI->setShuffleMask(NewMask); 12370 Replaced = true; 12371 break; 12372 } 12373 if (isa<ShuffleVectorInst>(In) && isa<ShuffleVectorInst>(V) && 12374 GatherShuffleExtractSeq.contains(V) && 12375 IsIdenticalOrLessDefined(V, &In, NewMask) && 12376 DT->dominates(In.getParent(), V->getParent())) { 12377 In.moveAfter(V); 12378 V->replaceAllUsesWith(&In); 12379 eraseInstruction(V); 12380 if (auto *SI = dyn_cast<ShuffleVectorInst>(&In)) 12381 if (!NewMask.empty()) 12382 SI->setShuffleMask(NewMask); 12383 V = &In; 12384 Replaced = true; 12385 break; 12386 } 12387 } 12388 if (!Replaced) { 12389 assert(!is_contained(Visited, &In)); 12390 Visited.push_back(&In); 12391 } 12392 } 12393 } 12394 CSEBlocks.clear(); 12395 GatherShuffleExtractSeq.clear(); 12396 } 12397 12398 BoUpSLP::ScheduleData * 12399 BoUpSLP::BlockScheduling::buildBundle(ArrayRef<Value *> VL) { 12400 ScheduleData *Bundle = nullptr; 12401 ScheduleData *PrevInBundle = nullptr; 12402 for (Value *V : VL) { 12403 if (doesNotNeedToBeScheduled(V)) 12404 continue; 12405 ScheduleData *BundleMember = getScheduleData(V); 12406 assert(BundleMember && 12407 "no ScheduleData for bundle member " 12408 "(maybe not in same basic block)"); 12409 assert(BundleMember->isSchedulingEntity() && 12410 "bundle member already part of other bundle"); 12411 if (PrevInBundle) { 12412 PrevInBundle->NextInBundle = BundleMember; 12413 } else { 12414 Bundle = BundleMember; 12415 } 12416 12417 // Group the instructions to a bundle. 12418 BundleMember->FirstInBundle = Bundle; 12419 PrevInBundle = BundleMember; 12420 } 12421 assert(Bundle && "Failed to find schedule bundle"); 12422 return Bundle; 12423 } 12424 12425 // Groups the instructions to a bundle (which is then a single scheduling entity) 12426 // and schedules instructions until the bundle gets ready. 12427 std::optional<BoUpSLP::ScheduleData *> 12428 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP, 12429 const InstructionsState &S) { 12430 // No need to schedule PHIs, insertelement, extractelement and extractvalue 12431 // instructions. 12432 if (isa<PHINode>(S.OpValue) || isVectorLikeInstWithConstOps(S.OpValue) || 12433 doesNotNeedToSchedule(VL)) 12434 return nullptr; 12435 12436 // Initialize the instruction bundle. 12437 Instruction *OldScheduleEnd = ScheduleEnd; 12438 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n"); 12439 12440 auto TryScheduleBundleImpl = [this, OldScheduleEnd, SLP](bool ReSchedule, 12441 ScheduleData *Bundle) { 12442 // The scheduling region got new instructions at the lower end (or it is a 12443 // new region for the first bundle). This makes it necessary to 12444 // recalculate all dependencies. 12445 // It is seldom that this needs to be done a second time after adding the 12446 // initial bundle to the region. 12447 if (ScheduleEnd != OldScheduleEnd) { 12448 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) 12449 doForAllOpcodes(I, [](ScheduleData *SD) { SD->clearDependencies(); }); 12450 ReSchedule = true; 12451 } 12452 if (Bundle) { 12453 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle 12454 << " in block " << BB->getName() << "\n"); 12455 calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP); 12456 } 12457 12458 if (ReSchedule) { 12459 resetSchedule(); 12460 initialFillReadyList(ReadyInsts); 12461 } 12462 12463 // Now try to schedule the new bundle or (if no bundle) just calculate 12464 // dependencies. As soon as the bundle is "ready" it means that there are no 12465 // cyclic dependencies and we can schedule it. Note that's important that we 12466 // don't "schedule" the bundle yet (see cancelScheduling). 12467 while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) && 12468 !ReadyInsts.empty()) { 12469 ScheduleData *Picked = ReadyInsts.pop_back_val(); 12470 assert(Picked->isSchedulingEntity() && Picked->isReady() && 12471 "must be ready to schedule"); 12472 schedule(Picked, ReadyInsts); 12473 } 12474 }; 12475 12476 // Make sure that the scheduling region contains all 12477 // instructions of the bundle. 12478 for (Value *V : VL) { 12479 if (doesNotNeedToBeScheduled(V)) 12480 continue; 12481 if (!extendSchedulingRegion(V, S)) { 12482 // If the scheduling region got new instructions at the lower end (or it 12483 // is a new region for the first bundle). This makes it necessary to 12484 // recalculate all dependencies. 12485 // Otherwise the compiler may crash trying to incorrectly calculate 12486 // dependencies and emit instruction in the wrong order at the actual 12487 // scheduling. 12488 TryScheduleBundleImpl(/*ReSchedule=*/false, nullptr); 12489 return std::nullopt; 12490 } 12491 } 12492 12493 bool ReSchedule = false; 12494 for (Value *V : VL) { 12495 if (doesNotNeedToBeScheduled(V)) 12496 continue; 12497 ScheduleData *BundleMember = getScheduleData(V); 12498 assert(BundleMember && 12499 "no ScheduleData for bundle member (maybe not in same basic block)"); 12500 12501 // Make sure we don't leave the pieces of the bundle in the ready list when 12502 // whole bundle might not be ready. 12503 ReadyInsts.remove(BundleMember); 12504 12505 if (!BundleMember->IsScheduled) 12506 continue; 12507 // A bundle member was scheduled as single instruction before and now 12508 // needs to be scheduled as part of the bundle. We just get rid of the 12509 // existing schedule. 12510 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember 12511 << " was already scheduled\n"); 12512 ReSchedule = true; 12513 } 12514 12515 auto *Bundle = buildBundle(VL); 12516 TryScheduleBundleImpl(ReSchedule, Bundle); 12517 if (!Bundle->isReady()) { 12518 cancelScheduling(VL, S.OpValue); 12519 return std::nullopt; 12520 } 12521 return Bundle; 12522 } 12523 12524 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL, 12525 Value *OpValue) { 12526 if (isa<PHINode>(OpValue) || isVectorLikeInstWithConstOps(OpValue) || 12527 doesNotNeedToSchedule(VL)) 12528 return; 12529 12530 if (doesNotNeedToBeScheduled(OpValue)) 12531 OpValue = *find_if_not(VL, doesNotNeedToBeScheduled); 12532 ScheduleData *Bundle = getScheduleData(OpValue); 12533 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n"); 12534 assert(!Bundle->IsScheduled && 12535 "Can't cancel bundle which is already scheduled"); 12536 assert(Bundle->isSchedulingEntity() && 12537 (Bundle->isPartOfBundle() || needToScheduleSingleInstruction(VL)) && 12538 "tried to unbundle something which is not a bundle"); 12539 12540 // Remove the bundle from the ready list. 12541 if (Bundle->isReady()) 12542 ReadyInsts.remove(Bundle); 12543 12544 // Un-bundle: make single instructions out of the bundle. 12545 ScheduleData *BundleMember = Bundle; 12546 while (BundleMember) { 12547 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links"); 12548 BundleMember->FirstInBundle = BundleMember; 12549 ScheduleData *Next = BundleMember->NextInBundle; 12550 BundleMember->NextInBundle = nullptr; 12551 BundleMember->TE = nullptr; 12552 if (BundleMember->unscheduledDepsInBundle() == 0) { 12553 ReadyInsts.insert(BundleMember); 12554 } 12555 BundleMember = Next; 12556 } 12557 } 12558 12559 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() { 12560 // Allocate a new ScheduleData for the instruction. 12561 if (ChunkPos >= ChunkSize) { 12562 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize)); 12563 ChunkPos = 0; 12564 } 12565 return &(ScheduleDataChunks.back()[ChunkPos++]); 12566 } 12567 12568 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V, 12569 const InstructionsState &S) { 12570 if (getScheduleData(V, isOneOf(S, V))) 12571 return true; 12572 Instruction *I = dyn_cast<Instruction>(V); 12573 assert(I && "bundle member must be an instruction"); 12574 assert(!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) && 12575 !doesNotNeedToBeScheduled(I) && 12576 "phi nodes/insertelements/extractelements/extractvalues don't need to " 12577 "be scheduled"); 12578 auto &&CheckScheduleForI = [this, &S](Instruction *I) -> bool { 12579 ScheduleData *ISD = getScheduleData(I); 12580 if (!ISD) 12581 return false; 12582 assert(isInSchedulingRegion(ISD) && 12583 "ScheduleData not in scheduling region"); 12584 ScheduleData *SD = allocateScheduleDataChunks(); 12585 SD->Inst = I; 12586 SD->init(SchedulingRegionID, S.OpValue); 12587 ExtraScheduleDataMap[I][S.OpValue] = SD; 12588 return true; 12589 }; 12590 if (CheckScheduleForI(I)) 12591 return true; 12592 if (!ScheduleStart) { 12593 // It's the first instruction in the new region. 12594 initScheduleData(I, I->getNextNode(), nullptr, nullptr); 12595 ScheduleStart = I; 12596 ScheduleEnd = I->getNextNode(); 12597 if (isOneOf(S, I) != I) 12598 CheckScheduleForI(I); 12599 assert(ScheduleEnd && "tried to vectorize a terminator?"); 12600 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n"); 12601 return true; 12602 } 12603 // Search up and down at the same time, because we don't know if the new 12604 // instruction is above or below the existing scheduling region. 12605 // Ignore debug info (and other "AssumeLike" intrinsics) so that's not counted 12606 // against the budget. Otherwise debug info could affect codegen. 12607 BasicBlock::reverse_iterator UpIter = 12608 ++ScheduleStart->getIterator().getReverse(); 12609 BasicBlock::reverse_iterator UpperEnd = BB->rend(); 12610 BasicBlock::iterator DownIter = ScheduleEnd->getIterator(); 12611 BasicBlock::iterator LowerEnd = BB->end(); 12612 auto IsAssumeLikeIntr = [](const Instruction &I) { 12613 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 12614 return II->isAssumeLikeIntrinsic(); 12615 return false; 12616 }; 12617 UpIter = std::find_if_not(UpIter, UpperEnd, IsAssumeLikeIntr); 12618 DownIter = std::find_if_not(DownIter, LowerEnd, IsAssumeLikeIntr); 12619 while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I && 12620 &*DownIter != I) { 12621 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) { 12622 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n"); 12623 return false; 12624 } 12625 12626 ++UpIter; 12627 ++DownIter; 12628 12629 UpIter = std::find_if_not(UpIter, UpperEnd, IsAssumeLikeIntr); 12630 DownIter = std::find_if_not(DownIter, LowerEnd, IsAssumeLikeIntr); 12631 } 12632 if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) { 12633 assert(I->getParent() == ScheduleStart->getParent() && 12634 "Instruction is in wrong basic block."); 12635 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion); 12636 ScheduleStart = I; 12637 if (isOneOf(S, I) != I) 12638 CheckScheduleForI(I); 12639 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I 12640 << "\n"); 12641 return true; 12642 } 12643 assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) && 12644 "Expected to reach top of the basic block or instruction down the " 12645 "lower end."); 12646 assert(I->getParent() == ScheduleEnd->getParent() && 12647 "Instruction is in wrong basic block."); 12648 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion, 12649 nullptr); 12650 ScheduleEnd = I->getNextNode(); 12651 if (isOneOf(S, I) != I) 12652 CheckScheduleForI(I); 12653 assert(ScheduleEnd && "tried to vectorize a terminator?"); 12654 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n"); 12655 return true; 12656 } 12657 12658 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI, 12659 Instruction *ToI, 12660 ScheduleData *PrevLoadStore, 12661 ScheduleData *NextLoadStore) { 12662 ScheduleData *CurrentLoadStore = PrevLoadStore; 12663 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) { 12664 // No need to allocate data for non-schedulable instructions. 12665 if (doesNotNeedToBeScheduled(I)) 12666 continue; 12667 ScheduleData *SD = ScheduleDataMap.lookup(I); 12668 if (!SD) { 12669 SD = allocateScheduleDataChunks(); 12670 ScheduleDataMap[I] = SD; 12671 SD->Inst = I; 12672 } 12673 assert(!isInSchedulingRegion(SD) && 12674 "new ScheduleData already in scheduling region"); 12675 SD->init(SchedulingRegionID, I); 12676 12677 if (I->mayReadOrWriteMemory() && 12678 (!isa<IntrinsicInst>(I) || 12679 (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect && 12680 cast<IntrinsicInst>(I)->getIntrinsicID() != 12681 Intrinsic::pseudoprobe))) { 12682 // Update the linked list of memory accessing instructions. 12683 if (CurrentLoadStore) { 12684 CurrentLoadStore->NextLoadStore = SD; 12685 } else { 12686 FirstLoadStoreInRegion = SD; 12687 } 12688 CurrentLoadStore = SD; 12689 } 12690 12691 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) || 12692 match(I, m_Intrinsic<Intrinsic::stackrestore>())) 12693 RegionHasStackSave = true; 12694 } 12695 if (NextLoadStore) { 12696 if (CurrentLoadStore) 12697 CurrentLoadStore->NextLoadStore = NextLoadStore; 12698 } else { 12699 LastLoadStoreInRegion = CurrentLoadStore; 12700 } 12701 } 12702 12703 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD, 12704 bool InsertInReadyList, 12705 BoUpSLP *SLP) { 12706 assert(SD->isSchedulingEntity()); 12707 12708 SmallVector<ScheduleData *, 10> WorkList; 12709 WorkList.push_back(SD); 12710 12711 while (!WorkList.empty()) { 12712 ScheduleData *SD = WorkList.pop_back_val(); 12713 for (ScheduleData *BundleMember = SD; BundleMember; 12714 BundleMember = BundleMember->NextInBundle) { 12715 assert(isInSchedulingRegion(BundleMember)); 12716 if (BundleMember->hasValidDependencies()) 12717 continue; 12718 12719 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember 12720 << "\n"); 12721 BundleMember->Dependencies = 0; 12722 BundleMember->resetUnscheduledDeps(); 12723 12724 // Handle def-use chain dependencies. 12725 if (BundleMember->OpValue != BundleMember->Inst) { 12726 if (ScheduleData *UseSD = getScheduleData(BundleMember->Inst)) { 12727 BundleMember->Dependencies++; 12728 ScheduleData *DestBundle = UseSD->FirstInBundle; 12729 if (!DestBundle->IsScheduled) 12730 BundleMember->incrementUnscheduledDeps(1); 12731 if (!DestBundle->hasValidDependencies()) 12732 WorkList.push_back(DestBundle); 12733 } 12734 } else { 12735 for (User *U : BundleMember->Inst->users()) { 12736 if (ScheduleData *UseSD = getScheduleData(cast<Instruction>(U))) { 12737 BundleMember->Dependencies++; 12738 ScheduleData *DestBundle = UseSD->FirstInBundle; 12739 if (!DestBundle->IsScheduled) 12740 BundleMember->incrementUnscheduledDeps(1); 12741 if (!DestBundle->hasValidDependencies()) 12742 WorkList.push_back(DestBundle); 12743 } 12744 } 12745 } 12746 12747 auto MakeControlDependent = [&](Instruction *I) { 12748 auto *DepDest = getScheduleData(I); 12749 assert(DepDest && "must be in schedule window"); 12750 DepDest->ControlDependencies.push_back(BundleMember); 12751 BundleMember->Dependencies++; 12752 ScheduleData *DestBundle = DepDest->FirstInBundle; 12753 if (!DestBundle->IsScheduled) 12754 BundleMember->incrementUnscheduledDeps(1); 12755 if (!DestBundle->hasValidDependencies()) 12756 WorkList.push_back(DestBundle); 12757 }; 12758 12759 // Any instruction which isn't safe to speculate at the beginning of the 12760 // block is control dependend on any early exit or non-willreturn call 12761 // which proceeds it. 12762 if (!isGuaranteedToTransferExecutionToSuccessor(BundleMember->Inst)) { 12763 for (Instruction *I = BundleMember->Inst->getNextNode(); 12764 I != ScheduleEnd; I = I->getNextNode()) { 12765 if (isSafeToSpeculativelyExecute(I, &*BB->begin(), SLP->AC)) 12766 continue; 12767 12768 // Add the dependency 12769 MakeControlDependent(I); 12770 12771 if (!isGuaranteedToTransferExecutionToSuccessor(I)) 12772 // Everything past here must be control dependent on I. 12773 break; 12774 } 12775 } 12776 12777 if (RegionHasStackSave) { 12778 // If we have an inalloc alloca instruction, it needs to be scheduled 12779 // after any preceeding stacksave. We also need to prevent any alloca 12780 // from reordering above a preceeding stackrestore. 12781 if (match(BundleMember->Inst, m_Intrinsic<Intrinsic::stacksave>()) || 12782 match(BundleMember->Inst, m_Intrinsic<Intrinsic::stackrestore>())) { 12783 for (Instruction *I = BundleMember->Inst->getNextNode(); 12784 I != ScheduleEnd; I = I->getNextNode()) { 12785 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) || 12786 match(I, m_Intrinsic<Intrinsic::stackrestore>())) 12787 // Any allocas past here must be control dependent on I, and I 12788 // must be memory dependend on BundleMember->Inst. 12789 break; 12790 12791 if (!isa<AllocaInst>(I)) 12792 continue; 12793 12794 // Add the dependency 12795 MakeControlDependent(I); 12796 } 12797 } 12798 12799 // In addition to the cases handle just above, we need to prevent 12800 // allocas and loads/stores from moving below a stacksave or a 12801 // stackrestore. Avoiding moving allocas below stackrestore is currently 12802 // thought to be conservatism. Moving loads/stores below a stackrestore 12803 // can lead to incorrect code. 12804 if (isa<AllocaInst>(BundleMember->Inst) || 12805 BundleMember->Inst->mayReadOrWriteMemory()) { 12806 for (Instruction *I = BundleMember->Inst->getNextNode(); 12807 I != ScheduleEnd; I = I->getNextNode()) { 12808 if (!match(I, m_Intrinsic<Intrinsic::stacksave>()) && 12809 !match(I, m_Intrinsic<Intrinsic::stackrestore>())) 12810 continue; 12811 12812 // Add the dependency 12813 MakeControlDependent(I); 12814 break; 12815 } 12816 } 12817 } 12818 12819 // Handle the memory dependencies (if any). 12820 ScheduleData *DepDest = BundleMember->NextLoadStore; 12821 if (!DepDest) 12822 continue; 12823 Instruction *SrcInst = BundleMember->Inst; 12824 assert(SrcInst->mayReadOrWriteMemory() && 12825 "NextLoadStore list for non memory effecting bundle?"); 12826 MemoryLocation SrcLoc = getLocation(SrcInst); 12827 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory(); 12828 unsigned NumAliased = 0; 12829 unsigned DistToSrc = 1; 12830 12831 for (; DepDest; DepDest = DepDest->NextLoadStore) { 12832 assert(isInSchedulingRegion(DepDest)); 12833 12834 // We have two limits to reduce the complexity: 12835 // 1) AliasedCheckLimit: It's a small limit to reduce calls to 12836 // SLP->isAliased (which is the expensive part in this loop). 12837 // 2) MaxMemDepDistance: It's for very large blocks and it aborts 12838 // the whole loop (even if the loop is fast, it's quadratic). 12839 // It's important for the loop break condition (see below) to 12840 // check this limit even between two read-only instructions. 12841 if (DistToSrc >= MaxMemDepDistance || 12842 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) && 12843 (NumAliased >= AliasedCheckLimit || 12844 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) { 12845 12846 // We increment the counter only if the locations are aliased 12847 // (instead of counting all alias checks). This gives a better 12848 // balance between reduced runtime and accurate dependencies. 12849 NumAliased++; 12850 12851 DepDest->MemoryDependencies.push_back(BundleMember); 12852 BundleMember->Dependencies++; 12853 ScheduleData *DestBundle = DepDest->FirstInBundle; 12854 if (!DestBundle->IsScheduled) { 12855 BundleMember->incrementUnscheduledDeps(1); 12856 } 12857 if (!DestBundle->hasValidDependencies()) { 12858 WorkList.push_back(DestBundle); 12859 } 12860 } 12861 12862 // Example, explaining the loop break condition: Let's assume our 12863 // starting instruction is i0 and MaxMemDepDistance = 3. 12864 // 12865 // +--------v--v--v 12866 // i0,i1,i2,i3,i4,i5,i6,i7,i8 12867 // +--------^--^--^ 12868 // 12869 // MaxMemDepDistance let us stop alias-checking at i3 and we add 12870 // dependencies from i0 to i3,i4,.. (even if they are not aliased). 12871 // Previously we already added dependencies from i3 to i6,i7,i8 12872 // (because of MaxMemDepDistance). As we added a dependency from 12873 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8 12874 // and we can abort this loop at i6. 12875 if (DistToSrc >= 2 * MaxMemDepDistance) 12876 break; 12877 DistToSrc++; 12878 } 12879 } 12880 if (InsertInReadyList && SD->isReady()) { 12881 ReadyInsts.insert(SD); 12882 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst 12883 << "\n"); 12884 } 12885 } 12886 } 12887 12888 void BoUpSLP::BlockScheduling::resetSchedule() { 12889 assert(ScheduleStart && 12890 "tried to reset schedule on block which has not been scheduled"); 12891 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) { 12892 doForAllOpcodes(I, [&](ScheduleData *SD) { 12893 assert(isInSchedulingRegion(SD) && 12894 "ScheduleData not in scheduling region"); 12895 SD->IsScheduled = false; 12896 SD->resetUnscheduledDeps(); 12897 }); 12898 } 12899 ReadyInsts.clear(); 12900 } 12901 12902 void BoUpSLP::scheduleBlock(BlockScheduling *BS) { 12903 if (!BS->ScheduleStart) 12904 return; 12905 12906 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n"); 12907 12908 // A key point - if we got here, pre-scheduling was able to find a valid 12909 // scheduling of the sub-graph of the scheduling window which consists 12910 // of all vector bundles and their transitive users. As such, we do not 12911 // need to reschedule anything *outside of* that subgraph. 12912 12913 BS->resetSchedule(); 12914 12915 // For the real scheduling we use a more sophisticated ready-list: it is 12916 // sorted by the original instruction location. This lets the final schedule 12917 // be as close as possible to the original instruction order. 12918 // WARNING: If changing this order causes a correctness issue, that means 12919 // there is some missing dependence edge in the schedule data graph. 12920 struct ScheduleDataCompare { 12921 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const { 12922 return SD2->SchedulingPriority < SD1->SchedulingPriority; 12923 } 12924 }; 12925 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts; 12926 12927 // Ensure that all dependency data is updated (for nodes in the sub-graph) 12928 // and fill the ready-list with initial instructions. 12929 int Idx = 0; 12930 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; 12931 I = I->getNextNode()) { 12932 BS->doForAllOpcodes(I, [this, &Idx, BS](ScheduleData *SD) { 12933 TreeEntry *SDTE = getTreeEntry(SD->Inst); 12934 (void)SDTE; 12935 assert((isVectorLikeInstWithConstOps(SD->Inst) || 12936 SD->isPartOfBundle() == 12937 (SDTE && !doesNotNeedToSchedule(SDTE->Scalars))) && 12938 "scheduler and vectorizer bundle mismatch"); 12939 SD->FirstInBundle->SchedulingPriority = Idx++; 12940 12941 if (SD->isSchedulingEntity() && SD->isPartOfBundle()) 12942 BS->calculateDependencies(SD, false, this); 12943 }); 12944 } 12945 BS->initialFillReadyList(ReadyInsts); 12946 12947 Instruction *LastScheduledInst = BS->ScheduleEnd; 12948 12949 // Do the "real" scheduling. 12950 while (!ReadyInsts.empty()) { 12951 ScheduleData *Picked = *ReadyInsts.begin(); 12952 ReadyInsts.erase(ReadyInsts.begin()); 12953 12954 // Move the scheduled instruction(s) to their dedicated places, if not 12955 // there yet. 12956 for (ScheduleData *BundleMember = Picked; BundleMember; 12957 BundleMember = BundleMember->NextInBundle) { 12958 Instruction *PickedInst = BundleMember->Inst; 12959 if (PickedInst->getNextNode() != LastScheduledInst) 12960 PickedInst->moveBefore(LastScheduledInst); 12961 LastScheduledInst = PickedInst; 12962 } 12963 12964 BS->schedule(Picked, ReadyInsts); 12965 } 12966 12967 // Check that we didn't break any of our invariants. 12968 #ifdef EXPENSIVE_CHECKS 12969 BS->verify(); 12970 #endif 12971 12972 #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS) 12973 // Check that all schedulable entities got scheduled 12974 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; I = I->getNextNode()) { 12975 BS->doForAllOpcodes(I, [&](ScheduleData *SD) { 12976 if (SD->isSchedulingEntity() && SD->hasValidDependencies()) { 12977 assert(SD->IsScheduled && "must be scheduled at this point"); 12978 } 12979 }); 12980 } 12981 #endif 12982 12983 // Avoid duplicate scheduling of the block. 12984 BS->ScheduleStart = nullptr; 12985 } 12986 12987 unsigned BoUpSLP::getVectorElementSize(Value *V) { 12988 // If V is a store, just return the width of the stored value (or value 12989 // truncated just before storing) without traversing the expression tree. 12990 // This is the common case. 12991 if (auto *Store = dyn_cast<StoreInst>(V)) 12992 return DL->getTypeSizeInBits(Store->getValueOperand()->getType()); 12993 12994 if (auto *IEI = dyn_cast<InsertElementInst>(V)) 12995 return getVectorElementSize(IEI->getOperand(1)); 12996 12997 auto E = InstrElementSize.find(V); 12998 if (E != InstrElementSize.end()) 12999 return E->second; 13000 13001 // If V is not a store, we can traverse the expression tree to find loads 13002 // that feed it. The type of the loaded value may indicate a more suitable 13003 // width than V's type. We want to base the vector element size on the width 13004 // of memory operations where possible. 13005 SmallVector<std::pair<Instruction *, BasicBlock *>, 16> Worklist; 13006 SmallPtrSet<Instruction *, 16> Visited; 13007 if (auto *I = dyn_cast<Instruction>(V)) { 13008 Worklist.emplace_back(I, I->getParent()); 13009 Visited.insert(I); 13010 } 13011 13012 // Traverse the expression tree in bottom-up order looking for loads. If we 13013 // encounter an instruction we don't yet handle, we give up. 13014 auto Width = 0u; 13015 while (!Worklist.empty()) { 13016 Instruction *I; 13017 BasicBlock *Parent; 13018 std::tie(I, Parent) = Worklist.pop_back_val(); 13019 13020 // We should only be looking at scalar instructions here. If the current 13021 // instruction has a vector type, skip. 13022 auto *Ty = I->getType(); 13023 if (isa<VectorType>(Ty)) 13024 continue; 13025 13026 // If the current instruction is a load, update MaxWidth to reflect the 13027 // width of the loaded value. 13028 if (isa<LoadInst, ExtractElementInst, ExtractValueInst>(I)) 13029 Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty)); 13030 13031 // Otherwise, we need to visit the operands of the instruction. We only 13032 // handle the interesting cases from buildTree here. If an operand is an 13033 // instruction we haven't yet visited and from the same basic block as the 13034 // user or the use is a PHI node, we add it to the worklist. 13035 else if (isa<PHINode, CastInst, GetElementPtrInst, CmpInst, SelectInst, 13036 BinaryOperator, UnaryOperator>(I)) { 13037 for (Use &U : I->operands()) 13038 if (auto *J = dyn_cast<Instruction>(U.get())) 13039 if (Visited.insert(J).second && 13040 (isa<PHINode>(I) || J->getParent() == Parent)) 13041 Worklist.emplace_back(J, J->getParent()); 13042 } else { 13043 break; 13044 } 13045 } 13046 13047 // If we didn't encounter a memory access in the expression tree, or if we 13048 // gave up for some reason, just return the width of V. Otherwise, return the 13049 // maximum width we found. 13050 if (!Width) { 13051 if (auto *CI = dyn_cast<CmpInst>(V)) 13052 V = CI->getOperand(0); 13053 Width = DL->getTypeSizeInBits(V->getType()); 13054 } 13055 13056 for (Instruction *I : Visited) 13057 InstrElementSize[I] = Width; 13058 13059 return Width; 13060 } 13061 13062 // Determine if a value V in a vectorizable expression Expr can be demoted to a 13063 // smaller type with a truncation. We collect the values that will be demoted 13064 // in ToDemote and additional roots that require investigating in Roots. 13065 bool BoUpSLP::collectValuesToDemote( 13066 Value *V, SmallVectorImpl<Value *> &ToDemote, 13067 DenseMap<Instruction *, SmallVector<unsigned>> &DemotedConsts, 13068 SmallVectorImpl<Value *> &Roots, DenseSet<Value *> &Visited) const { 13069 // We can always demote constants. 13070 if (isa<Constant>(V)) 13071 return true; 13072 13073 // If the value is not a vectorized instruction in the expression with only 13074 // one use, it cannot be demoted. 13075 auto *I = dyn_cast<Instruction>(V); 13076 if (!I || !I->hasOneUse() || !getTreeEntry(I) || !Visited.insert(I).second) 13077 return false; 13078 13079 unsigned Start = 0; 13080 unsigned End = I->getNumOperands(); 13081 switch (I->getOpcode()) { 13082 13083 // We can always demote truncations and extensions. Since truncations can 13084 // seed additional demotion, we save the truncated value. 13085 case Instruction::Trunc: 13086 Roots.push_back(I->getOperand(0)); 13087 break; 13088 case Instruction::ZExt: 13089 case Instruction::SExt: 13090 if (isa<ExtractElementInst, InsertElementInst>(I->getOperand(0))) 13091 return false; 13092 break; 13093 13094 // We can demote certain binary operations if we can demote both of their 13095 // operands. 13096 case Instruction::Add: 13097 case Instruction::Sub: 13098 case Instruction::Mul: 13099 case Instruction::And: 13100 case Instruction::Or: 13101 case Instruction::Xor: 13102 if (!collectValuesToDemote(I->getOperand(0), ToDemote, DemotedConsts, Roots, 13103 Visited) || 13104 !collectValuesToDemote(I->getOperand(1), ToDemote, DemotedConsts, Roots, 13105 Visited)) 13106 return false; 13107 break; 13108 13109 // We can demote selects if we can demote their true and false values. 13110 case Instruction::Select: { 13111 Start = 1; 13112 SelectInst *SI = cast<SelectInst>(I); 13113 if (!collectValuesToDemote(SI->getTrueValue(), ToDemote, DemotedConsts, 13114 Roots, Visited) || 13115 !collectValuesToDemote(SI->getFalseValue(), ToDemote, DemotedConsts, 13116 Roots, Visited)) 13117 return false; 13118 break; 13119 } 13120 13121 // We can demote phis if we can demote all their incoming operands. Note that 13122 // we don't need to worry about cycles since we ensure single use above. 13123 case Instruction::PHI: { 13124 PHINode *PN = cast<PHINode>(I); 13125 for (Value *IncValue : PN->incoming_values()) 13126 if (!collectValuesToDemote(IncValue, ToDemote, DemotedConsts, Roots, 13127 Visited)) 13128 return false; 13129 break; 13130 } 13131 13132 // Otherwise, conservatively give up. 13133 default: 13134 return false; 13135 } 13136 13137 // Gather demoted constant operands. 13138 for (unsigned Idx : seq<unsigned>(Start, End)) 13139 if (isa<Constant>(I->getOperand(Idx))) 13140 DemotedConsts.try_emplace(I).first->getSecond().push_back(Idx); 13141 // Record the value that we can demote. 13142 ToDemote.push_back(V); 13143 return true; 13144 } 13145 13146 void BoUpSLP::computeMinimumValueSizes() { 13147 // If there are no external uses, the expression tree must be rooted by a 13148 // store. We can't demote in-memory values, so there is nothing to do here. 13149 if (ExternalUses.empty()) 13150 return; 13151 13152 // We only attempt to truncate integer expressions. 13153 auto &TreeRoot = VectorizableTree[0]->Scalars; 13154 auto *TreeRootIT = dyn_cast<IntegerType>(TreeRoot[0]->getType()); 13155 if (!TreeRootIT) 13156 return; 13157 13158 // Ensure the roots of the vectorizable tree don't form a cycle. 13159 if (!VectorizableTree.front()->UserTreeIndices.empty()) 13160 return; 13161 13162 // Conservatively determine if we can actually truncate the roots of the 13163 // expression. Collect the values that can be demoted in ToDemote and 13164 // additional roots that require investigating in Roots. 13165 SmallVector<Value *, 32> ToDemote; 13166 DenseMap<Instruction *, SmallVector<unsigned>> DemotedConsts; 13167 SmallVector<Value *, 4> Roots; 13168 for (auto *Root : TreeRoot) { 13169 DenseSet<Value *> Visited; 13170 if (!collectValuesToDemote(Root, ToDemote, DemotedConsts, Roots, Visited)) 13171 return; 13172 } 13173 13174 // The maximum bit width required to represent all the values that can be 13175 // demoted without loss of precision. It would be safe to truncate the roots 13176 // of the expression to this width. 13177 auto MaxBitWidth = 1u; 13178 13179 // We first check if all the bits of the roots are demanded. If they're not, 13180 // we can truncate the roots to this narrower type. 13181 for (auto *Root : TreeRoot) { 13182 auto Mask = DB->getDemandedBits(cast<Instruction>(Root)); 13183 MaxBitWidth = std::max<unsigned>(Mask.getBitWidth() - Mask.countl_zero(), 13184 MaxBitWidth); 13185 } 13186 13187 // True if the roots can be zero-extended back to their original type, rather 13188 // than sign-extended. We know that if the leading bits are not demanded, we 13189 // can safely zero-extend. So we initialize IsKnownPositive to True. 13190 bool IsKnownPositive = true; 13191 13192 // If all the bits of the roots are demanded, we can try a little harder to 13193 // compute a narrower type. This can happen, for example, if the roots are 13194 // getelementptr indices. InstCombine promotes these indices to the pointer 13195 // width. Thus, all their bits are technically demanded even though the 13196 // address computation might be vectorized in a smaller type. 13197 // 13198 // We start by looking at each entry that can be demoted. We compute the 13199 // maximum bit width required to store the scalar by using ValueTracking to 13200 // compute the number of high-order bits we can truncate. 13201 if (MaxBitWidth == DL->getTypeSizeInBits(TreeRoot[0]->getType()) && 13202 all_of(TreeRoot, [](Value *V) { 13203 return all_of(V->users(), 13204 [](User *U) { return isa<GetElementPtrInst>(U); }); 13205 })) { 13206 MaxBitWidth = 8u; 13207 13208 // Determine if the sign bit of all the roots is known to be zero. If not, 13209 // IsKnownPositive is set to False. 13210 IsKnownPositive = llvm::all_of(TreeRoot, [&](Value *R) { 13211 KnownBits Known = computeKnownBits(R, *DL); 13212 return Known.isNonNegative(); 13213 }); 13214 13215 // Determine the maximum number of bits required to store the scalar 13216 // values. 13217 for (auto *Scalar : ToDemote) { 13218 auto NumSignBits = ComputeNumSignBits(Scalar, *DL, 0, AC, nullptr, DT); 13219 auto NumTypeBits = DL->getTypeSizeInBits(Scalar->getType()); 13220 MaxBitWidth = std::max<unsigned>(NumTypeBits - NumSignBits, MaxBitWidth); 13221 } 13222 13223 // If we can't prove that the sign bit is zero, we must add one to the 13224 // maximum bit width to account for the unknown sign bit. This preserves 13225 // the existing sign bit so we can safely sign-extend the root back to the 13226 // original type. Otherwise, if we know the sign bit is zero, we will 13227 // zero-extend the root instead. 13228 // 13229 // FIXME: This is somewhat suboptimal, as there will be cases where adding 13230 // one to the maximum bit width will yield a larger-than-necessary 13231 // type. In general, we need to add an extra bit only if we can't 13232 // prove that the upper bit of the original type is equal to the 13233 // upper bit of the proposed smaller type. If these two bits are the 13234 // same (either zero or one) we know that sign-extending from the 13235 // smaller type will result in the same value. Here, since we can't 13236 // yet prove this, we are just making the proposed smaller type 13237 // larger to ensure correctness. 13238 if (!IsKnownPositive) 13239 ++MaxBitWidth; 13240 } 13241 13242 // Round MaxBitWidth up to the next power-of-two. 13243 MaxBitWidth = llvm::bit_ceil(MaxBitWidth); 13244 13245 // If the maximum bit width we compute is less than the with of the roots' 13246 // type, we can proceed with the narrowing. Otherwise, do nothing. 13247 if (MaxBitWidth >= TreeRootIT->getBitWidth()) 13248 return; 13249 13250 // If we can truncate the root, we must collect additional values that might 13251 // be demoted as a result. That is, those seeded by truncations we will 13252 // modify. 13253 while (!Roots.empty()) { 13254 DenseSet<Value *> Visited; 13255 collectValuesToDemote(Roots.pop_back_val(), ToDemote, DemotedConsts, Roots, 13256 Visited); 13257 } 13258 13259 // Finally, map the values we can demote to the maximum bit with we computed. 13260 for (auto *Scalar : ToDemote) { 13261 auto *TE = getTreeEntry(Scalar); 13262 assert(TE && "Expected vectorized scalar."); 13263 if (MinBWs.contains(TE)) 13264 continue; 13265 bool IsSigned = any_of(TE->Scalars, [&](Value *R) { 13266 KnownBits Known = computeKnownBits(R, *DL); 13267 return !Known.isNonNegative(); 13268 }); 13269 MinBWs.try_emplace(TE, MaxBitWidth, IsSigned); 13270 const auto *I = cast<Instruction>(Scalar); 13271 auto DCIt = DemotedConsts.find(I); 13272 if (DCIt != DemotedConsts.end()) { 13273 for (unsigned Idx : DCIt->getSecond()) { 13274 // Check that all instructions operands are demoted. 13275 if (all_of(TE->Scalars, [&](Value *V) { 13276 auto SIt = DemotedConsts.find(cast<Instruction>(V)); 13277 return SIt != DemotedConsts.end() && 13278 is_contained(SIt->getSecond(), Idx); 13279 })) { 13280 const TreeEntry *CTE = getOperandEntry(TE, Idx); 13281 MinBWs.try_emplace(CTE, MaxBitWidth, IsSigned); 13282 } 13283 } 13284 } 13285 } 13286 } 13287 13288 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 13289 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 13290 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 13291 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F); 13292 auto *AA = &AM.getResult<AAManager>(F); 13293 auto *LI = &AM.getResult<LoopAnalysis>(F); 13294 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 13295 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 13296 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F); 13297 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 13298 13299 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE); 13300 if (!Changed) 13301 return PreservedAnalyses::all(); 13302 13303 PreservedAnalyses PA; 13304 PA.preserveSet<CFGAnalyses>(); 13305 return PA; 13306 } 13307 13308 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_, 13309 TargetTransformInfo *TTI_, 13310 TargetLibraryInfo *TLI_, AAResults *AA_, 13311 LoopInfo *LI_, DominatorTree *DT_, 13312 AssumptionCache *AC_, DemandedBits *DB_, 13313 OptimizationRemarkEmitter *ORE_) { 13314 if (!RunSLPVectorization) 13315 return false; 13316 SE = SE_; 13317 TTI = TTI_; 13318 TLI = TLI_; 13319 AA = AA_; 13320 LI = LI_; 13321 DT = DT_; 13322 AC = AC_; 13323 DB = DB_; 13324 DL = &F.getParent()->getDataLayout(); 13325 13326 Stores.clear(); 13327 GEPs.clear(); 13328 bool Changed = false; 13329 13330 // If the target claims to have no vector registers don't attempt 13331 // vectorization. 13332 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) { 13333 LLVM_DEBUG( 13334 dbgs() << "SLP: Didn't find any vector registers for target, abort.\n"); 13335 return false; 13336 } 13337 13338 // Don't vectorize when the attribute NoImplicitFloat is used. 13339 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 13340 return false; 13341 13342 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n"); 13343 13344 // Use the bottom up slp vectorizer to construct chains that start with 13345 // store instructions. 13346 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_); 13347 13348 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to 13349 // delete instructions. 13350 13351 // Update DFS numbers now so that we can use them for ordering. 13352 DT->updateDFSNumbers(); 13353 13354 // Scan the blocks in the function in post order. 13355 for (auto *BB : post_order(&F.getEntryBlock())) { 13356 // Start new block - clear the list of reduction roots. 13357 R.clearReductionData(); 13358 collectSeedInstructions(BB); 13359 13360 // Vectorize trees that end at stores. 13361 if (!Stores.empty()) { 13362 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size() 13363 << " underlying objects.\n"); 13364 Changed |= vectorizeStoreChains(R); 13365 } 13366 13367 // Vectorize trees that end at reductions. 13368 Changed |= vectorizeChainsInBlock(BB, R); 13369 13370 // Vectorize the index computations of getelementptr instructions. This 13371 // is primarily intended to catch gather-like idioms ending at 13372 // non-consecutive loads. 13373 if (!GEPs.empty()) { 13374 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size() 13375 << " underlying objects.\n"); 13376 Changed |= vectorizeGEPIndices(BB, R); 13377 } 13378 } 13379 13380 if (Changed) { 13381 R.optimizeGatherSequence(); 13382 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n"); 13383 } 13384 return Changed; 13385 } 13386 13387 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R, 13388 unsigned Idx, unsigned MinVF) { 13389 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size() 13390 << "\n"); 13391 const unsigned Sz = R.getVectorElementSize(Chain[0]); 13392 unsigned VF = Chain.size(); 13393 13394 if (!isPowerOf2_32(Sz) || !isPowerOf2_32(VF) || VF < 2 || VF < MinVF) 13395 return false; 13396 13397 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx 13398 << "\n"); 13399 13400 R.buildTree(Chain); 13401 if (R.isTreeTinyAndNotFullyVectorizable()) 13402 return false; 13403 if (R.isLoadCombineCandidate()) 13404 return false; 13405 R.reorderTopToBottom(); 13406 R.reorderBottomToTop(); 13407 R.buildExternalUses(); 13408 13409 R.computeMinimumValueSizes(); 13410 13411 InstructionCost Cost = R.getTreeCost(); 13412 13413 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF=" << VF << "\n"); 13414 if (Cost < -SLPCostThreshold) { 13415 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n"); 13416 13417 using namespace ore; 13418 13419 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized", 13420 cast<StoreInst>(Chain[0])) 13421 << "Stores SLP vectorized with cost " << NV("Cost", Cost) 13422 << " and with tree size " 13423 << NV("TreeSize", R.getTreeSize())); 13424 13425 R.vectorizeTree(); 13426 return true; 13427 } 13428 13429 return false; 13430 } 13431 13432 bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores, 13433 BoUpSLP &R) { 13434 // We may run into multiple chains that merge into a single chain. We mark the 13435 // stores that we vectorized so that we don't visit the same store twice. 13436 BoUpSLP::ValueSet VectorizedStores; 13437 bool Changed = false; 13438 13439 // Stores the pair of stores (first_store, last_store) in a range, that were 13440 // already tried to be vectorized. Allows to skip the store ranges that were 13441 // already tried to be vectorized but the attempts were unsuccessful. 13442 DenseSet<std::pair<Value *, Value *>> TriedSequences; 13443 struct StoreDistCompare { 13444 bool operator()(const std::pair<unsigned, int> &Op1, 13445 const std::pair<unsigned, int> &Op2) const { 13446 return Op1.second < Op2.second; 13447 } 13448 }; 13449 // A set of pairs (index of store in Stores array ref, Distance of the store 13450 // address relative to base store address in units). 13451 using StoreIndexToDistSet = 13452 std::set<std::pair<unsigned, int>, StoreDistCompare>; 13453 auto TryToVectorize = [&](const StoreIndexToDistSet &Set) { 13454 int PrevDist = -1; 13455 BoUpSLP::ValueList Operands; 13456 // Collect the chain into a list. 13457 for (auto [Idx, Data] : enumerate(Set)) { 13458 if (Operands.empty() || Data.second - PrevDist == 1) { 13459 Operands.push_back(Stores[Data.first]); 13460 PrevDist = Data.second; 13461 if (Idx != Set.size() - 1) 13462 continue; 13463 } 13464 if (Operands.size() <= 1) { 13465 Operands.clear(); 13466 Operands.push_back(Stores[Data.first]); 13467 PrevDist = Data.second; 13468 continue; 13469 } 13470 13471 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 13472 unsigned EltSize = R.getVectorElementSize(Operands[0]); 13473 unsigned MaxElts = llvm::bit_floor(MaxVecRegSize / EltSize); 13474 13475 unsigned MaxVF = 13476 std::min(R.getMaximumVF(EltSize, Instruction::Store), MaxElts); 13477 auto *Store = cast<StoreInst>(Operands[0]); 13478 Type *StoreTy = Store->getValueOperand()->getType(); 13479 Type *ValueTy = StoreTy; 13480 if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand())) 13481 ValueTy = Trunc->getSrcTy(); 13482 unsigned MinVF = TTI->getStoreMinimumVF( 13483 R.getMinVF(DL->getTypeSizeInBits(ValueTy)), StoreTy, ValueTy); 13484 13485 if (MaxVF <= MinVF) { 13486 LLVM_DEBUG(dbgs() << "SLP: Vectorization infeasible as MaxVF (" << MaxVF 13487 << ") <= " 13488 << "MinVF (" << MinVF << ")\n"); 13489 } 13490 13491 // FIXME: Is division-by-2 the correct step? Should we assert that the 13492 // register size is a power-of-2? 13493 unsigned StartIdx = 0; 13494 for (unsigned Size = MaxVF; Size >= MinVF; Size /= 2) { 13495 for (unsigned Cnt = StartIdx, E = Operands.size(); Cnt + Size <= E;) { 13496 ArrayRef<Value *> Slice = ArrayRef(Operands).slice(Cnt, Size); 13497 assert( 13498 all_of( 13499 Slice, 13500 [&](Value *V) { 13501 return cast<StoreInst>(V)->getValueOperand()->getType() == 13502 cast<StoreInst>(Slice.front()) 13503 ->getValueOperand() 13504 ->getType(); 13505 }) && 13506 "Expected all operands of same type."); 13507 if (!VectorizedStores.count(Slice.front()) && 13508 !VectorizedStores.count(Slice.back()) && 13509 TriedSequences.insert(std::make_pair(Slice.front(), Slice.back())) 13510 .second && 13511 vectorizeStoreChain(Slice, R, Cnt, MinVF)) { 13512 // Mark the vectorized stores so that we don't vectorize them again. 13513 VectorizedStores.insert(Slice.begin(), Slice.end()); 13514 Changed = true; 13515 // If we vectorized initial block, no need to try to vectorize it 13516 // again. 13517 if (Cnt == StartIdx) 13518 StartIdx += Size; 13519 Cnt += Size; 13520 continue; 13521 } 13522 ++Cnt; 13523 } 13524 // Check if the whole array was vectorized already - exit. 13525 if (StartIdx >= Operands.size()) 13526 break; 13527 } 13528 Operands.clear(); 13529 Operands.push_back(Stores[Data.first]); 13530 PrevDist = Data.second; 13531 } 13532 }; 13533 13534 // Stores pair (first: index of the store into Stores array ref, address of 13535 // which taken as base, second: sorted set of pairs {index, dist}, which are 13536 // indices of stores in the set and their store location distances relative to 13537 // the base address). 13538 13539 // Need to store the index of the very first store separately, since the set 13540 // may be reordered after the insertion and the first store may be moved. This 13541 // container allows to reduce number of calls of getPointersDiff() function. 13542 SmallVector<std::pair<unsigned, StoreIndexToDistSet>> SortedStores; 13543 // Inserts the specified store SI with the given index Idx to the set of the 13544 // stores. If the store with the same distance is found already - stop 13545 // insertion, try to vectorize already found stores. If some stores from this 13546 // sequence were not vectorized - try to vectorize them with the new store 13547 // later. But this logic is applied only to the stores, that come before the 13548 // previous store with the same distance. 13549 // Example: 13550 // 1. store x, %p 13551 // 2. store y, %p+1 13552 // 3. store z, %p+2 13553 // 4. store a, %p 13554 // 5. store b, %p+3 13555 // - Scan this from the last to first store. The very first bunch of stores is 13556 // {5, {{4, -3}, {2, -2}, {3, -1}, {5, 0}}} (the element in SortedStores 13557 // vector). 13558 // - The next store in the list - #1 - has the same distance from store #5 as 13559 // the store #4. 13560 // - Try to vectorize sequence of stores 4,2,3,5. 13561 // - If all these stores are vectorized - just drop them. 13562 // - If some of them are not vectorized (say, #3 and #5), do extra analysis. 13563 // - Start new stores sequence. 13564 // The new bunch of stores is {1, {1, 0}}. 13565 // - Add the stores from previous sequence, that were not vectorized. 13566 // Here we consider the stores in the reversed order, rather they are used in 13567 // the IR (Stores are reversed already, see vectorizeStoreChains() function). 13568 // Store #3 can be added -> comes after store #4 with the same distance as 13569 // store #1. 13570 // Store #5 cannot be added - comes before store #4. 13571 // This logic allows to improve the compile time, we assume that the stores 13572 // after previous store with the same distance most likely have memory 13573 // dependencies and no need to waste compile time to try to vectorize them. 13574 // - Try to vectorize the sequence {1, {1, 0}, {3, 2}}. 13575 auto FillStoresSet = [&](unsigned Idx, StoreInst *SI) { 13576 for (std::pair<unsigned, StoreIndexToDistSet> &Set : SortedStores) { 13577 std::optional<int> Diff = getPointersDiff( 13578 Stores[Set.first]->getValueOperand()->getType(), 13579 Stores[Set.first]->getPointerOperand(), 13580 SI->getValueOperand()->getType(), SI->getPointerOperand(), *DL, *SE, 13581 /*StrictCheck=*/true); 13582 if (!Diff) 13583 continue; 13584 auto It = Set.second.find(std::make_pair(Idx, *Diff)); 13585 if (It == Set.second.end()) { 13586 Set.second.emplace(Idx, *Diff); 13587 return; 13588 } 13589 // Try to vectorize the first found set to avoid duplicate analysis. 13590 TryToVectorize(Set.second); 13591 StoreIndexToDistSet PrevSet; 13592 PrevSet.swap(Set.second); 13593 Set.first = Idx; 13594 Set.second.emplace(Idx, 0); 13595 // Insert stores that followed previous match to try to vectorize them 13596 // with this store. 13597 unsigned StartIdx = It->first + 1; 13598 SmallBitVector UsedStores(Idx - StartIdx); 13599 // Distances to previously found dup store (or this store, since they 13600 // store to the same addresses). 13601 SmallVector<int> Dists(Idx - StartIdx, 0); 13602 for (const std::pair<unsigned, int> &Pair : reverse(PrevSet)) { 13603 // Do not try to vectorize sequences, we already tried. 13604 if (Pair.first <= It->first || 13605 VectorizedStores.contains(Stores[Pair.first])) 13606 break; 13607 unsigned BI = Pair.first - StartIdx; 13608 UsedStores.set(BI); 13609 Dists[BI] = Pair.second - It->second; 13610 } 13611 for (unsigned I = StartIdx; I < Idx; ++I) { 13612 unsigned BI = I - StartIdx; 13613 if (UsedStores.test(BI)) 13614 Set.second.emplace(I, Dists[BI]); 13615 } 13616 return; 13617 } 13618 auto &Res = SortedStores.emplace_back(); 13619 Res.first = Idx; 13620 Res.second.emplace(Idx, 0); 13621 }; 13622 StoreInst *PrevStore = Stores.front(); 13623 for (auto [I, SI] : enumerate(Stores)) { 13624 // Check that we do not try to vectorize stores of different types. 13625 if (PrevStore->getValueOperand()->getType() != 13626 SI->getValueOperand()->getType()) { 13627 for (auto &Set : SortedStores) 13628 TryToVectorize(Set.second); 13629 SortedStores.clear(); 13630 PrevStore = SI; 13631 } 13632 FillStoresSet(I, SI); 13633 } 13634 13635 // Final vectorization attempt. 13636 for (auto &Set : SortedStores) 13637 TryToVectorize(Set.second); 13638 13639 return Changed; 13640 } 13641 13642 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) { 13643 // Initialize the collections. We will make a single pass over the block. 13644 Stores.clear(); 13645 GEPs.clear(); 13646 13647 // Visit the store and getelementptr instructions in BB and organize them in 13648 // Stores and GEPs according to the underlying objects of their pointer 13649 // operands. 13650 for (Instruction &I : *BB) { 13651 // Ignore store instructions that are volatile or have a pointer operand 13652 // that doesn't point to a scalar type. 13653 if (auto *SI = dyn_cast<StoreInst>(&I)) { 13654 if (!SI->isSimple()) 13655 continue; 13656 if (!isValidElementType(SI->getValueOperand()->getType())) 13657 continue; 13658 Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI); 13659 } 13660 13661 // Ignore getelementptr instructions that have more than one index, a 13662 // constant index, or a pointer operand that doesn't point to a scalar 13663 // type. 13664 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 13665 if (GEP->getNumIndices() != 1) 13666 continue; 13667 Value *Idx = GEP->idx_begin()->get(); 13668 if (isa<Constant>(Idx)) 13669 continue; 13670 if (!isValidElementType(Idx->getType())) 13671 continue; 13672 if (GEP->getType()->isVectorTy()) 13673 continue; 13674 GEPs[GEP->getPointerOperand()].push_back(GEP); 13675 } 13676 } 13677 } 13678 13679 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R, 13680 bool MaxVFOnly) { 13681 if (VL.size() < 2) 13682 return false; 13683 13684 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = " 13685 << VL.size() << ".\n"); 13686 13687 // Check that all of the parts are instructions of the same type, 13688 // we permit an alternate opcode via InstructionsState. 13689 InstructionsState S = getSameOpcode(VL, *TLI); 13690 if (!S.getOpcode()) 13691 return false; 13692 13693 Instruction *I0 = cast<Instruction>(S.OpValue); 13694 // Make sure invalid types (including vector type) are rejected before 13695 // determining vectorization factor for scalar instructions. 13696 for (Value *V : VL) { 13697 Type *Ty = V->getType(); 13698 if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) { 13699 // NOTE: the following will give user internal llvm type name, which may 13700 // not be useful. 13701 R.getORE()->emit([&]() { 13702 std::string TypeStr; 13703 llvm::raw_string_ostream rso(TypeStr); 13704 Ty->print(rso); 13705 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0) 13706 << "Cannot SLP vectorize list: type " 13707 << rso.str() + " is unsupported by vectorizer"; 13708 }); 13709 return false; 13710 } 13711 } 13712 13713 unsigned Sz = R.getVectorElementSize(I0); 13714 unsigned MinVF = R.getMinVF(Sz); 13715 unsigned MaxVF = std::max<unsigned>(llvm::bit_floor(VL.size()), MinVF); 13716 MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF); 13717 if (MaxVF < 2) { 13718 R.getORE()->emit([&]() { 13719 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0) 13720 << "Cannot SLP vectorize list: vectorization factor " 13721 << "less than 2 is not supported"; 13722 }); 13723 return false; 13724 } 13725 13726 bool Changed = false; 13727 bool CandidateFound = false; 13728 InstructionCost MinCost = SLPCostThreshold.getValue(); 13729 Type *ScalarTy = VL[0]->getType(); 13730 if (auto *IE = dyn_cast<InsertElementInst>(VL[0])) 13731 ScalarTy = IE->getOperand(1)->getType(); 13732 13733 unsigned NextInst = 0, MaxInst = VL.size(); 13734 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) { 13735 // No actual vectorization should happen, if number of parts is the same as 13736 // provided vectorization factor (i.e. the scalar type is used for vector 13737 // code during codegen). 13738 auto *VecTy = FixedVectorType::get(ScalarTy, VF); 13739 if (TTI->getNumberOfParts(VecTy) == VF) 13740 continue; 13741 for (unsigned I = NextInst; I < MaxInst; ++I) { 13742 unsigned ActualVF = std::min(MaxInst - I, VF); 13743 13744 if (!isPowerOf2_32(ActualVF)) 13745 continue; 13746 13747 if (MaxVFOnly && ActualVF < MaxVF) 13748 break; 13749 if ((VF > MinVF && ActualVF <= VF / 2) || (VF == MinVF && ActualVF < 2)) 13750 break; 13751 13752 ArrayRef<Value *> Ops = VL.slice(I, ActualVF); 13753 // Check that a previous iteration of this loop did not delete the Value. 13754 if (llvm::any_of(Ops, [&R](Value *V) { 13755 auto *I = dyn_cast<Instruction>(V); 13756 return I && R.isDeleted(I); 13757 })) 13758 continue; 13759 13760 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << ActualVF << " operations " 13761 << "\n"); 13762 13763 R.buildTree(Ops); 13764 if (R.isTreeTinyAndNotFullyVectorizable()) 13765 continue; 13766 R.reorderTopToBottom(); 13767 R.reorderBottomToTop( 13768 /*IgnoreReorder=*/!isa<InsertElementInst>(Ops.front()) && 13769 !R.doesRootHaveInTreeUses()); 13770 R.buildExternalUses(); 13771 13772 R.computeMinimumValueSizes(); 13773 InstructionCost Cost = R.getTreeCost(); 13774 CandidateFound = true; 13775 MinCost = std::min(MinCost, Cost); 13776 13777 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost 13778 << " for VF=" << ActualVF << "\n"); 13779 if (Cost < -SLPCostThreshold) { 13780 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n"); 13781 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList", 13782 cast<Instruction>(Ops[0])) 13783 << "SLP vectorized with cost " << ore::NV("Cost", Cost) 13784 << " and with tree size " 13785 << ore::NV("TreeSize", R.getTreeSize())); 13786 13787 R.vectorizeTree(); 13788 // Move to the next bundle. 13789 I += VF - 1; 13790 NextInst = I + 1; 13791 Changed = true; 13792 } 13793 } 13794 } 13795 13796 if (!Changed && CandidateFound) { 13797 R.getORE()->emit([&]() { 13798 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0) 13799 << "List vectorization was possible but not beneficial with cost " 13800 << ore::NV("Cost", MinCost) << " >= " 13801 << ore::NV("Treshold", -SLPCostThreshold); 13802 }); 13803 } else if (!Changed) { 13804 R.getORE()->emit([&]() { 13805 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0) 13806 << "Cannot SLP vectorize list: vectorization was impossible" 13807 << " with available vectorization factors"; 13808 }); 13809 } 13810 return Changed; 13811 } 13812 13813 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) { 13814 if (!I) 13815 return false; 13816 13817 if (!isa<BinaryOperator, CmpInst>(I) || isa<VectorType>(I->getType())) 13818 return false; 13819 13820 Value *P = I->getParent(); 13821 13822 // Vectorize in current basic block only. 13823 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0)); 13824 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1)); 13825 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P) 13826 return false; 13827 13828 // First collect all possible candidates 13829 SmallVector<std::pair<Value *, Value *>, 4> Candidates; 13830 Candidates.emplace_back(Op0, Op1); 13831 13832 auto *A = dyn_cast<BinaryOperator>(Op0); 13833 auto *B = dyn_cast<BinaryOperator>(Op1); 13834 // Try to skip B. 13835 if (A && B && B->hasOneUse()) { 13836 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0)); 13837 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1)); 13838 if (B0 && B0->getParent() == P) 13839 Candidates.emplace_back(A, B0); 13840 if (B1 && B1->getParent() == P) 13841 Candidates.emplace_back(A, B1); 13842 } 13843 // Try to skip A. 13844 if (B && A && A->hasOneUse()) { 13845 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0)); 13846 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1)); 13847 if (A0 && A0->getParent() == P) 13848 Candidates.emplace_back(A0, B); 13849 if (A1 && A1->getParent() == P) 13850 Candidates.emplace_back(A1, B); 13851 } 13852 13853 if (Candidates.size() == 1) 13854 return tryToVectorizeList({Op0, Op1}, R); 13855 13856 // We have multiple options. Try to pick the single best. 13857 std::optional<int> BestCandidate = R.findBestRootPair(Candidates); 13858 if (!BestCandidate) 13859 return false; 13860 return tryToVectorizeList( 13861 {Candidates[*BestCandidate].first, Candidates[*BestCandidate].second}, R); 13862 } 13863 13864 namespace { 13865 13866 /// Model horizontal reductions. 13867 /// 13868 /// A horizontal reduction is a tree of reduction instructions that has values 13869 /// that can be put into a vector as its leaves. For example: 13870 /// 13871 /// mul mul mul mul 13872 /// \ / \ / 13873 /// + + 13874 /// \ / 13875 /// + 13876 /// This tree has "mul" as its leaf values and "+" as its reduction 13877 /// instructions. A reduction can feed into a store or a binary operation 13878 /// feeding a phi. 13879 /// ... 13880 /// \ / 13881 /// + 13882 /// | 13883 /// phi += 13884 /// 13885 /// Or: 13886 /// ... 13887 /// \ / 13888 /// + 13889 /// | 13890 /// *p = 13891 /// 13892 class HorizontalReduction { 13893 using ReductionOpsType = SmallVector<Value *, 16>; 13894 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>; 13895 ReductionOpsListType ReductionOps; 13896 /// List of possibly reduced values. 13897 SmallVector<SmallVector<Value *>> ReducedVals; 13898 /// Maps reduced value to the corresponding reduction operation. 13899 DenseMap<Value *, SmallVector<Instruction *>> ReducedValsToOps; 13900 // Use map vector to make stable output. 13901 MapVector<Instruction *, Value *> ExtraArgs; 13902 WeakTrackingVH ReductionRoot; 13903 /// The type of reduction operation. 13904 RecurKind RdxKind; 13905 /// Checks if the optimization of original scalar identity operations on 13906 /// matched horizontal reductions is enabled and allowed. 13907 bool IsSupportedHorRdxIdentityOp = false; 13908 13909 static bool isCmpSelMinMax(Instruction *I) { 13910 return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) && 13911 RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I)); 13912 } 13913 13914 // And/or are potentially poison-safe logical patterns like: 13915 // select x, y, false 13916 // select x, true, y 13917 static bool isBoolLogicOp(Instruction *I) { 13918 return isa<SelectInst>(I) && 13919 (match(I, m_LogicalAnd()) || match(I, m_LogicalOr())); 13920 } 13921 13922 /// Checks if instruction is associative and can be vectorized. 13923 static bool isVectorizable(RecurKind Kind, Instruction *I) { 13924 if (Kind == RecurKind::None) 13925 return false; 13926 13927 // Integer ops that map to select instructions or intrinsics are fine. 13928 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) || 13929 isBoolLogicOp(I)) 13930 return true; 13931 13932 if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) { 13933 // FP min/max are associative except for NaN and -0.0. We do not 13934 // have to rule out -0.0 here because the intrinsic semantics do not 13935 // specify a fixed result for it. 13936 return I->getFastMathFlags().noNaNs(); 13937 } 13938 13939 if (Kind == RecurKind::FMaximum || Kind == RecurKind::FMinimum) 13940 return true; 13941 13942 return I->isAssociative(); 13943 } 13944 13945 static Value *getRdxOperand(Instruction *I, unsigned Index) { 13946 // Poison-safe 'or' takes the form: select X, true, Y 13947 // To make that work with the normal operand processing, we skip the 13948 // true value operand. 13949 // TODO: Change the code and data structures to handle this without a hack. 13950 if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1) 13951 return I->getOperand(2); 13952 return I->getOperand(Index); 13953 } 13954 13955 /// Creates reduction operation with the current opcode. 13956 static Value *createOp(IRBuilder<> &Builder, RecurKind Kind, Value *LHS, 13957 Value *RHS, const Twine &Name, bool UseSelect) { 13958 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind); 13959 bool IsConstant = isConstant(LHS) && isConstant(RHS); 13960 switch (Kind) { 13961 case RecurKind::Or: 13962 if (UseSelect && 13963 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType())) 13964 return Builder.CreateSelect(LHS, Builder.getTrue(), RHS, Name); 13965 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 13966 Name); 13967 case RecurKind::And: 13968 if (UseSelect && 13969 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType())) 13970 return Builder.CreateSelect(LHS, RHS, Builder.getFalse(), Name); 13971 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 13972 Name); 13973 case RecurKind::Add: 13974 case RecurKind::Mul: 13975 case RecurKind::Xor: 13976 case RecurKind::FAdd: 13977 case RecurKind::FMul: 13978 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS, 13979 Name); 13980 case RecurKind::FMax: 13981 if (IsConstant) 13982 return ConstantFP::get(LHS->getType(), 13983 maxnum(cast<ConstantFP>(LHS)->getValueAPF(), 13984 cast<ConstantFP>(RHS)->getValueAPF())); 13985 return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS); 13986 case RecurKind::FMin: 13987 if (IsConstant) 13988 return ConstantFP::get(LHS->getType(), 13989 minnum(cast<ConstantFP>(LHS)->getValueAPF(), 13990 cast<ConstantFP>(RHS)->getValueAPF())); 13991 return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS); 13992 case RecurKind::FMaximum: 13993 if (IsConstant) 13994 return ConstantFP::get(LHS->getType(), 13995 maximum(cast<ConstantFP>(LHS)->getValueAPF(), 13996 cast<ConstantFP>(RHS)->getValueAPF())); 13997 return Builder.CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS); 13998 case RecurKind::FMinimum: 13999 if (IsConstant) 14000 return ConstantFP::get(LHS->getType(), 14001 minimum(cast<ConstantFP>(LHS)->getValueAPF(), 14002 cast<ConstantFP>(RHS)->getValueAPF())); 14003 return Builder.CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS); 14004 case RecurKind::SMax: 14005 if (IsConstant || UseSelect) { 14006 Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name); 14007 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14008 } 14009 return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS); 14010 case RecurKind::SMin: 14011 if (IsConstant || UseSelect) { 14012 Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name); 14013 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14014 } 14015 return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS); 14016 case RecurKind::UMax: 14017 if (IsConstant || UseSelect) { 14018 Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name); 14019 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14020 } 14021 return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS); 14022 case RecurKind::UMin: 14023 if (IsConstant || UseSelect) { 14024 Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name); 14025 return Builder.CreateSelect(Cmp, LHS, RHS, Name); 14026 } 14027 return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS); 14028 default: 14029 llvm_unreachable("Unknown reduction operation."); 14030 } 14031 } 14032 14033 /// Creates reduction operation with the current opcode with the IR flags 14034 /// from \p ReductionOps, dropping nuw/nsw flags. 14035 static Value *createOp(IRBuilder<> &Builder, RecurKind RdxKind, Value *LHS, 14036 Value *RHS, const Twine &Name, 14037 const ReductionOpsListType &ReductionOps) { 14038 bool UseSelect = 14039 ReductionOps.size() == 2 || 14040 // Logical or/and. 14041 (ReductionOps.size() == 1 && any_of(ReductionOps.front(), [](Value *V) { 14042 return isa<SelectInst>(V); 14043 })); 14044 assert((!UseSelect || ReductionOps.size() != 2 || 14045 isa<SelectInst>(ReductionOps[1][0])) && 14046 "Expected cmp + select pairs for reduction"); 14047 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect); 14048 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) { 14049 if (auto *Sel = dyn_cast<SelectInst>(Op)) { 14050 propagateIRFlags(Sel->getCondition(), ReductionOps[0], nullptr, 14051 /*IncludeWrapFlags=*/false); 14052 propagateIRFlags(Op, ReductionOps[1], nullptr, 14053 /*IncludeWrapFlags=*/false); 14054 return Op; 14055 } 14056 } 14057 propagateIRFlags(Op, ReductionOps[0], nullptr, /*IncludeWrapFlags=*/false); 14058 return Op; 14059 } 14060 14061 public: 14062 static RecurKind getRdxKind(Value *V) { 14063 auto *I = dyn_cast<Instruction>(V); 14064 if (!I) 14065 return RecurKind::None; 14066 if (match(I, m_Add(m_Value(), m_Value()))) 14067 return RecurKind::Add; 14068 if (match(I, m_Mul(m_Value(), m_Value()))) 14069 return RecurKind::Mul; 14070 if (match(I, m_And(m_Value(), m_Value())) || 14071 match(I, m_LogicalAnd(m_Value(), m_Value()))) 14072 return RecurKind::And; 14073 if (match(I, m_Or(m_Value(), m_Value())) || 14074 match(I, m_LogicalOr(m_Value(), m_Value()))) 14075 return RecurKind::Or; 14076 if (match(I, m_Xor(m_Value(), m_Value()))) 14077 return RecurKind::Xor; 14078 if (match(I, m_FAdd(m_Value(), m_Value()))) 14079 return RecurKind::FAdd; 14080 if (match(I, m_FMul(m_Value(), m_Value()))) 14081 return RecurKind::FMul; 14082 14083 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value()))) 14084 return RecurKind::FMax; 14085 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value()))) 14086 return RecurKind::FMin; 14087 14088 if (match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(), m_Value()))) 14089 return RecurKind::FMaximum; 14090 if (match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(), m_Value()))) 14091 return RecurKind::FMinimum; 14092 // This matches either cmp+select or intrinsics. SLP is expected to handle 14093 // either form. 14094 // TODO: If we are canonicalizing to intrinsics, we can remove several 14095 // special-case paths that deal with selects. 14096 if (match(I, m_SMax(m_Value(), m_Value()))) 14097 return RecurKind::SMax; 14098 if (match(I, m_SMin(m_Value(), m_Value()))) 14099 return RecurKind::SMin; 14100 if (match(I, m_UMax(m_Value(), m_Value()))) 14101 return RecurKind::UMax; 14102 if (match(I, m_UMin(m_Value(), m_Value()))) 14103 return RecurKind::UMin; 14104 14105 if (auto *Select = dyn_cast<SelectInst>(I)) { 14106 // Try harder: look for min/max pattern based on instructions producing 14107 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2). 14108 // During the intermediate stages of SLP, it's very common to have 14109 // pattern like this (since optimizeGatherSequence is run only once 14110 // at the end): 14111 // %1 = extractelement <2 x i32> %a, i32 0 14112 // %2 = extractelement <2 x i32> %a, i32 1 14113 // %cond = icmp sgt i32 %1, %2 14114 // %3 = extractelement <2 x i32> %a, i32 0 14115 // %4 = extractelement <2 x i32> %a, i32 1 14116 // %select = select i1 %cond, i32 %3, i32 %4 14117 CmpInst::Predicate Pred; 14118 Instruction *L1; 14119 Instruction *L2; 14120 14121 Value *LHS = Select->getTrueValue(); 14122 Value *RHS = Select->getFalseValue(); 14123 Value *Cond = Select->getCondition(); 14124 14125 // TODO: Support inverse predicates. 14126 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) { 14127 if (!isa<ExtractElementInst>(RHS) || 14128 !L2->isIdenticalTo(cast<Instruction>(RHS))) 14129 return RecurKind::None; 14130 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) { 14131 if (!isa<ExtractElementInst>(LHS) || 14132 !L1->isIdenticalTo(cast<Instruction>(LHS))) 14133 return RecurKind::None; 14134 } else { 14135 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS)) 14136 return RecurKind::None; 14137 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) || 14138 !L1->isIdenticalTo(cast<Instruction>(LHS)) || 14139 !L2->isIdenticalTo(cast<Instruction>(RHS))) 14140 return RecurKind::None; 14141 } 14142 14143 switch (Pred) { 14144 default: 14145 return RecurKind::None; 14146 case CmpInst::ICMP_SGT: 14147 case CmpInst::ICMP_SGE: 14148 return RecurKind::SMax; 14149 case CmpInst::ICMP_SLT: 14150 case CmpInst::ICMP_SLE: 14151 return RecurKind::SMin; 14152 case CmpInst::ICMP_UGT: 14153 case CmpInst::ICMP_UGE: 14154 return RecurKind::UMax; 14155 case CmpInst::ICMP_ULT: 14156 case CmpInst::ICMP_ULE: 14157 return RecurKind::UMin; 14158 } 14159 } 14160 return RecurKind::None; 14161 } 14162 14163 /// Get the index of the first operand. 14164 static unsigned getFirstOperandIndex(Instruction *I) { 14165 return isCmpSelMinMax(I) ? 1 : 0; 14166 } 14167 14168 private: 14169 /// Total number of operands in the reduction operation. 14170 static unsigned getNumberOfOperands(Instruction *I) { 14171 return isCmpSelMinMax(I) ? 3 : 2; 14172 } 14173 14174 /// Checks if the instruction is in basic block \p BB. 14175 /// For a cmp+sel min/max reduction check that both ops are in \p BB. 14176 static bool hasSameParent(Instruction *I, BasicBlock *BB) { 14177 if (isCmpSelMinMax(I) || isBoolLogicOp(I)) { 14178 auto *Sel = cast<SelectInst>(I); 14179 auto *Cmp = dyn_cast<Instruction>(Sel->getCondition()); 14180 return Sel->getParent() == BB && Cmp && Cmp->getParent() == BB; 14181 } 14182 return I->getParent() == BB; 14183 } 14184 14185 /// Expected number of uses for reduction operations/reduced values. 14186 static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) { 14187 if (IsCmpSelMinMax) { 14188 // SelectInst must be used twice while the condition op must have single 14189 // use only. 14190 if (auto *Sel = dyn_cast<SelectInst>(I)) 14191 return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse(); 14192 return I->hasNUses(2); 14193 } 14194 14195 // Arithmetic reduction operation must be used once only. 14196 return I->hasOneUse(); 14197 } 14198 14199 /// Initializes the list of reduction operations. 14200 void initReductionOps(Instruction *I) { 14201 if (isCmpSelMinMax(I)) 14202 ReductionOps.assign(2, ReductionOpsType()); 14203 else 14204 ReductionOps.assign(1, ReductionOpsType()); 14205 } 14206 14207 /// Add all reduction operations for the reduction instruction \p I. 14208 void addReductionOps(Instruction *I) { 14209 if (isCmpSelMinMax(I)) { 14210 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition()); 14211 ReductionOps[1].emplace_back(I); 14212 } else { 14213 ReductionOps[0].emplace_back(I); 14214 } 14215 } 14216 14217 static bool isGoodForReduction(ArrayRef<Value *> Data) { 14218 int Sz = Data.size(); 14219 auto *I = dyn_cast<Instruction>(Data.front()); 14220 return Sz > 1 || isConstant(Data.front()) || 14221 (I && !isa<LoadInst>(I) && isValidForAlternation(I->getOpcode())); 14222 } 14223 14224 public: 14225 HorizontalReduction() = default; 14226 14227 /// Try to find a reduction tree. 14228 bool matchAssociativeReduction(BoUpSLP &R, Instruction *Root, 14229 ScalarEvolution &SE, const DataLayout &DL, 14230 const TargetLibraryInfo &TLI) { 14231 RdxKind = HorizontalReduction::getRdxKind(Root); 14232 if (!isVectorizable(RdxKind, Root)) 14233 return false; 14234 14235 // Analyze "regular" integer/FP types for reductions - no target-specific 14236 // types or pointers. 14237 Type *Ty = Root->getType(); 14238 if (!isValidElementType(Ty) || Ty->isPointerTy()) 14239 return false; 14240 14241 // Though the ultimate reduction may have multiple uses, its condition must 14242 // have only single use. 14243 if (auto *Sel = dyn_cast<SelectInst>(Root)) 14244 if (!Sel->getCondition()->hasOneUse()) 14245 return false; 14246 14247 ReductionRoot = Root; 14248 14249 // Iterate through all the operands of the possible reduction tree and 14250 // gather all the reduced values, sorting them by their value id. 14251 BasicBlock *BB = Root->getParent(); 14252 bool IsCmpSelMinMax = isCmpSelMinMax(Root); 14253 SmallVector<Instruction *> Worklist(1, Root); 14254 // Checks if the operands of the \p TreeN instruction are also reduction 14255 // operations or should be treated as reduced values or an extra argument, 14256 // which is not part of the reduction. 14257 auto CheckOperands = [&](Instruction *TreeN, 14258 SmallVectorImpl<Value *> &ExtraArgs, 14259 SmallVectorImpl<Value *> &PossibleReducedVals, 14260 SmallVectorImpl<Instruction *> &ReductionOps) { 14261 for (int I = getFirstOperandIndex(TreeN), 14262 End = getNumberOfOperands(TreeN); 14263 I < End; ++I) { 14264 Value *EdgeVal = getRdxOperand(TreeN, I); 14265 ReducedValsToOps[EdgeVal].push_back(TreeN); 14266 auto *EdgeInst = dyn_cast<Instruction>(EdgeVal); 14267 // Edge has wrong parent - mark as an extra argument. 14268 if (EdgeInst && !isVectorLikeInstWithConstOps(EdgeInst) && 14269 !hasSameParent(EdgeInst, BB)) { 14270 ExtraArgs.push_back(EdgeVal); 14271 continue; 14272 } 14273 // If the edge is not an instruction, or it is different from the main 14274 // reduction opcode or has too many uses - possible reduced value. 14275 // Also, do not try to reduce const values, if the operation is not 14276 // foldable. 14277 if (!EdgeInst || getRdxKind(EdgeInst) != RdxKind || 14278 IsCmpSelMinMax != isCmpSelMinMax(EdgeInst) || 14279 !hasRequiredNumberOfUses(IsCmpSelMinMax, EdgeInst) || 14280 !isVectorizable(RdxKind, EdgeInst) || 14281 (R.isAnalyzedReductionRoot(EdgeInst) && 14282 all_of(EdgeInst->operands(), Constant::classof))) { 14283 PossibleReducedVals.push_back(EdgeVal); 14284 continue; 14285 } 14286 ReductionOps.push_back(EdgeInst); 14287 } 14288 }; 14289 // Try to regroup reduced values so that it gets more profitable to try to 14290 // reduce them. Values are grouped by their value ids, instructions - by 14291 // instruction op id and/or alternate op id, plus do extra analysis for 14292 // loads (grouping them by the distabce between pointers) and cmp 14293 // instructions (grouping them by the predicate). 14294 MapVector<size_t, MapVector<size_t, MapVector<Value *, unsigned>>> 14295 PossibleReducedVals; 14296 initReductionOps(Root); 14297 DenseMap<Value *, SmallVector<LoadInst *>> LoadsMap; 14298 SmallSet<size_t, 2> LoadKeyUsed; 14299 SmallPtrSet<Value *, 4> DoNotReverseVals; 14300 14301 auto GenerateLoadsSubkey = [&](size_t Key, LoadInst *LI) { 14302 Value *Ptr = getUnderlyingObject(LI->getPointerOperand()); 14303 if (LoadKeyUsed.contains(Key)) { 14304 auto LIt = LoadsMap.find(Ptr); 14305 if (LIt != LoadsMap.end()) { 14306 for (LoadInst *RLI : LIt->second) { 14307 if (getPointersDiff(RLI->getType(), RLI->getPointerOperand(), 14308 LI->getType(), LI->getPointerOperand(), DL, SE, 14309 /*StrictCheck=*/true)) 14310 return hash_value(RLI->getPointerOperand()); 14311 } 14312 for (LoadInst *RLI : LIt->second) { 14313 if (arePointersCompatible(RLI->getPointerOperand(), 14314 LI->getPointerOperand(), TLI)) { 14315 hash_code SubKey = hash_value(RLI->getPointerOperand()); 14316 DoNotReverseVals.insert(RLI); 14317 return SubKey; 14318 } 14319 } 14320 if (LIt->second.size() > 2) { 14321 hash_code SubKey = 14322 hash_value(LIt->second.back()->getPointerOperand()); 14323 DoNotReverseVals.insert(LIt->second.back()); 14324 return SubKey; 14325 } 14326 } 14327 } 14328 LoadKeyUsed.insert(Key); 14329 LoadsMap.try_emplace(Ptr).first->second.push_back(LI); 14330 return hash_value(LI->getPointerOperand()); 14331 }; 14332 14333 while (!Worklist.empty()) { 14334 Instruction *TreeN = Worklist.pop_back_val(); 14335 SmallVector<Value *> Args; 14336 SmallVector<Value *> PossibleRedVals; 14337 SmallVector<Instruction *> PossibleReductionOps; 14338 CheckOperands(TreeN, Args, PossibleRedVals, PossibleReductionOps); 14339 // If too many extra args - mark the instruction itself as a reduction 14340 // value, not a reduction operation. 14341 if (Args.size() < 2) { 14342 addReductionOps(TreeN); 14343 // Add extra args. 14344 if (!Args.empty()) { 14345 assert(Args.size() == 1 && "Expected only single argument."); 14346 ExtraArgs[TreeN] = Args.front(); 14347 } 14348 // Add reduction values. The values are sorted for better vectorization 14349 // results. 14350 for (Value *V : PossibleRedVals) { 14351 size_t Key, Idx; 14352 std::tie(Key, Idx) = generateKeySubkey(V, &TLI, GenerateLoadsSubkey, 14353 /*AllowAlternate=*/false); 14354 ++PossibleReducedVals[Key][Idx] 14355 .insert(std::make_pair(V, 0)) 14356 .first->second; 14357 } 14358 Worklist.append(PossibleReductionOps.rbegin(), 14359 PossibleReductionOps.rend()); 14360 } else { 14361 size_t Key, Idx; 14362 std::tie(Key, Idx) = generateKeySubkey(TreeN, &TLI, GenerateLoadsSubkey, 14363 /*AllowAlternate=*/false); 14364 ++PossibleReducedVals[Key][Idx] 14365 .insert(std::make_pair(TreeN, 0)) 14366 .first->second; 14367 } 14368 } 14369 auto PossibleReducedValsVect = PossibleReducedVals.takeVector(); 14370 // Sort values by the total number of values kinds to start the reduction 14371 // from the longest possible reduced values sequences. 14372 for (auto &PossibleReducedVals : PossibleReducedValsVect) { 14373 auto PossibleRedVals = PossibleReducedVals.second.takeVector(); 14374 SmallVector<SmallVector<Value *>> PossibleRedValsVect; 14375 for (auto It = PossibleRedVals.begin(), E = PossibleRedVals.end(); 14376 It != E; ++It) { 14377 PossibleRedValsVect.emplace_back(); 14378 auto RedValsVect = It->second.takeVector(); 14379 stable_sort(RedValsVect, llvm::less_second()); 14380 for (const std::pair<Value *, unsigned> &Data : RedValsVect) 14381 PossibleRedValsVect.back().append(Data.second, Data.first); 14382 } 14383 stable_sort(PossibleRedValsVect, [](const auto &P1, const auto &P2) { 14384 return P1.size() > P2.size(); 14385 }); 14386 int NewIdx = -1; 14387 for (ArrayRef<Value *> Data : PossibleRedValsVect) { 14388 if (isGoodForReduction(Data) || 14389 (isa<LoadInst>(Data.front()) && NewIdx >= 0 && 14390 isa<LoadInst>(ReducedVals[NewIdx].front()) && 14391 getUnderlyingObject( 14392 cast<LoadInst>(Data.front())->getPointerOperand()) == 14393 getUnderlyingObject(cast<LoadInst>(ReducedVals[NewIdx].front()) 14394 ->getPointerOperand()))) { 14395 if (NewIdx < 0) { 14396 NewIdx = ReducedVals.size(); 14397 ReducedVals.emplace_back(); 14398 } 14399 if (DoNotReverseVals.contains(Data.front())) 14400 ReducedVals[NewIdx].append(Data.begin(), Data.end()); 14401 else 14402 ReducedVals[NewIdx].append(Data.rbegin(), Data.rend()); 14403 } else { 14404 ReducedVals.emplace_back().append(Data.rbegin(), Data.rend()); 14405 } 14406 } 14407 } 14408 // Sort the reduced values by number of same/alternate opcode and/or pointer 14409 // operand. 14410 stable_sort(ReducedVals, [](ArrayRef<Value *> P1, ArrayRef<Value *> P2) { 14411 return P1.size() > P2.size(); 14412 }); 14413 return true; 14414 } 14415 14416 /// Attempt to vectorize the tree found by matchAssociativeReduction. 14417 Value *tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI, 14418 const TargetLibraryInfo &TLI) { 14419 constexpr int ReductionLimit = 4; 14420 constexpr unsigned RegMaxNumber = 4; 14421 constexpr unsigned RedValsMaxNumber = 128; 14422 // If there are a sufficient number of reduction values, reduce 14423 // to a nearby power-of-2. We can safely generate oversized 14424 // vectors and rely on the backend to split them to legal sizes. 14425 unsigned NumReducedVals = 14426 std::accumulate(ReducedVals.begin(), ReducedVals.end(), 0, 14427 [](unsigned Num, ArrayRef<Value *> Vals) -> unsigned { 14428 if (!isGoodForReduction(Vals)) 14429 return Num; 14430 return Num + Vals.size(); 14431 }); 14432 if (NumReducedVals < ReductionLimit && 14433 (!AllowHorRdxIdenityOptimization || 14434 all_of(ReducedVals, [](ArrayRef<Value *> RedV) { 14435 return RedV.size() < 2 || !allConstant(RedV) || !isSplat(RedV); 14436 }))) { 14437 for (ReductionOpsType &RdxOps : ReductionOps) 14438 for (Value *RdxOp : RdxOps) 14439 V.analyzedReductionRoot(cast<Instruction>(RdxOp)); 14440 return nullptr; 14441 } 14442 14443 IRBuilder<> Builder(cast<Instruction>(ReductionRoot)); 14444 14445 // Track the reduced values in case if they are replaced by extractelement 14446 // because of the vectorization. 14447 DenseMap<Value *, WeakTrackingVH> TrackedVals( 14448 ReducedVals.size() * ReducedVals.front().size() + ExtraArgs.size()); 14449 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues; 14450 SmallVector<std::pair<Value *, Value *>> ReplacedExternals; 14451 ExternallyUsedValues.reserve(ExtraArgs.size() + 1); 14452 // The same extra argument may be used several times, so log each attempt 14453 // to use it. 14454 for (const std::pair<Instruction *, Value *> &Pair : ExtraArgs) { 14455 assert(Pair.first && "DebugLoc must be set."); 14456 ExternallyUsedValues[Pair.second].push_back(Pair.first); 14457 TrackedVals.try_emplace(Pair.second, Pair.second); 14458 } 14459 14460 // The compare instruction of a min/max is the insertion point for new 14461 // instructions and may be replaced with a new compare instruction. 14462 auto &&GetCmpForMinMaxReduction = [](Instruction *RdxRootInst) { 14463 assert(isa<SelectInst>(RdxRootInst) && 14464 "Expected min/max reduction to have select root instruction"); 14465 Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition(); 14466 assert(isa<Instruction>(ScalarCond) && 14467 "Expected min/max reduction to have compare condition"); 14468 return cast<Instruction>(ScalarCond); 14469 }; 14470 14471 // Return new VectorizedTree, based on previous value. 14472 auto GetNewVectorizedTree = [&](Value *VectorizedTree, Value *Res) { 14473 if (VectorizedTree) { 14474 // Update the final value in the reduction. 14475 Builder.SetCurrentDebugLocation( 14476 cast<Instruction>(ReductionOps.front().front())->getDebugLoc()); 14477 if ((isa<PoisonValue>(VectorizedTree) && !isa<PoisonValue>(Res)) || 14478 (isGuaranteedNotToBePoison(Res) && 14479 !isGuaranteedNotToBePoison(VectorizedTree))) { 14480 auto It = ReducedValsToOps.find(Res); 14481 if (It != ReducedValsToOps.end() && 14482 any_of(It->getSecond(), 14483 [](Instruction *I) { return isBoolLogicOp(I); })) 14484 std::swap(VectorizedTree, Res); 14485 } 14486 14487 return createOp(Builder, RdxKind, VectorizedTree, Res, "op.rdx", 14488 ReductionOps); 14489 } 14490 // Initialize the final value in the reduction. 14491 return Res; 14492 }; 14493 bool AnyBoolLogicOp = 14494 any_of(ReductionOps.back(), [](Value *V) { 14495 return isBoolLogicOp(cast<Instruction>(V)); 14496 }); 14497 // The reduction root is used as the insertion point for new instructions, 14498 // so set it as externally used to prevent it from being deleted. 14499 ExternallyUsedValues[ReductionRoot]; 14500 SmallDenseSet<Value *> IgnoreList(ReductionOps.size() * 14501 ReductionOps.front().size()); 14502 for (ReductionOpsType &RdxOps : ReductionOps) 14503 for (Value *RdxOp : RdxOps) { 14504 if (!RdxOp) 14505 continue; 14506 IgnoreList.insert(RdxOp); 14507 } 14508 // Intersect the fast-math-flags from all reduction operations. 14509 FastMathFlags RdxFMF; 14510 RdxFMF.set(); 14511 for (Value *U : IgnoreList) 14512 if (auto *FPMO = dyn_cast<FPMathOperator>(U)) 14513 RdxFMF &= FPMO->getFastMathFlags(); 14514 bool IsCmpSelMinMax = isCmpSelMinMax(cast<Instruction>(ReductionRoot)); 14515 14516 // Need to track reduced vals, they may be changed during vectorization of 14517 // subvectors. 14518 for (ArrayRef<Value *> Candidates : ReducedVals) 14519 for (Value *V : Candidates) 14520 TrackedVals.try_emplace(V, V); 14521 14522 DenseMap<Value *, unsigned> VectorizedVals(ReducedVals.size()); 14523 // List of the values that were reduced in other trees as part of gather 14524 // nodes and thus requiring extract if fully vectorized in other trees. 14525 SmallPtrSet<Value *, 4> RequiredExtract; 14526 Value *VectorizedTree = nullptr; 14527 bool CheckForReusedReductionOps = false; 14528 // Try to vectorize elements based on their type. 14529 for (unsigned I = 0, E = ReducedVals.size(); I < E; ++I) { 14530 ArrayRef<Value *> OrigReducedVals = ReducedVals[I]; 14531 InstructionsState S = getSameOpcode(OrigReducedVals, TLI); 14532 SmallVector<Value *> Candidates; 14533 Candidates.reserve(2 * OrigReducedVals.size()); 14534 DenseMap<Value *, Value *> TrackedToOrig(2 * OrigReducedVals.size()); 14535 for (unsigned Cnt = 0, Sz = OrigReducedVals.size(); Cnt < Sz; ++Cnt) { 14536 Value *RdxVal = TrackedVals.find(OrigReducedVals[Cnt])->second; 14537 // Check if the reduction value was not overriden by the extractelement 14538 // instruction because of the vectorization and exclude it, if it is not 14539 // compatible with other values. 14540 // Also check if the instruction was folded to constant/other value. 14541 auto *Inst = dyn_cast<Instruction>(RdxVal); 14542 if ((Inst && isVectorLikeInstWithConstOps(Inst) && 14543 (!S.getOpcode() || !S.isOpcodeOrAlt(Inst))) || 14544 (S.getOpcode() && !Inst)) 14545 continue; 14546 Candidates.push_back(RdxVal); 14547 TrackedToOrig.try_emplace(RdxVal, OrigReducedVals[Cnt]); 14548 } 14549 bool ShuffledExtracts = false; 14550 // Try to handle shuffled extractelements. 14551 if (S.getOpcode() == Instruction::ExtractElement && !S.isAltShuffle() && 14552 I + 1 < E) { 14553 InstructionsState NextS = getSameOpcode(ReducedVals[I + 1], TLI); 14554 if (NextS.getOpcode() == Instruction::ExtractElement && 14555 !NextS.isAltShuffle()) { 14556 SmallVector<Value *> CommonCandidates(Candidates); 14557 for (Value *RV : ReducedVals[I + 1]) { 14558 Value *RdxVal = TrackedVals.find(RV)->second; 14559 // Check if the reduction value was not overriden by the 14560 // extractelement instruction because of the vectorization and 14561 // exclude it, if it is not compatible with other values. 14562 if (auto *Inst = dyn_cast<Instruction>(RdxVal)) 14563 if (!NextS.getOpcode() || !NextS.isOpcodeOrAlt(Inst)) 14564 continue; 14565 CommonCandidates.push_back(RdxVal); 14566 TrackedToOrig.try_emplace(RdxVal, RV); 14567 } 14568 SmallVector<int> Mask; 14569 if (isFixedVectorShuffle(CommonCandidates, Mask)) { 14570 ++I; 14571 Candidates.swap(CommonCandidates); 14572 ShuffledExtracts = true; 14573 } 14574 } 14575 } 14576 14577 // Emit code for constant values. 14578 if (AllowHorRdxIdenityOptimization && Candidates.size() > 1 && 14579 allConstant(Candidates)) { 14580 Value *Res = Candidates.front(); 14581 ++VectorizedVals.try_emplace(Candidates.front(), 0).first->getSecond(); 14582 for (Value *VC : ArrayRef(Candidates).drop_front()) { 14583 Res = createOp(Builder, RdxKind, Res, VC, "const.rdx", ReductionOps); 14584 ++VectorizedVals.try_emplace(VC, 0).first->getSecond(); 14585 if (auto *ResI = dyn_cast<Instruction>(Res)) 14586 V.analyzedReductionRoot(ResI); 14587 } 14588 VectorizedTree = GetNewVectorizedTree(VectorizedTree, Res); 14589 continue; 14590 } 14591 14592 unsigned NumReducedVals = Candidates.size(); 14593 if (NumReducedVals < ReductionLimit && 14594 (NumReducedVals < 2 || !AllowHorRdxIdenityOptimization || 14595 !isSplat(Candidates))) 14596 continue; 14597 14598 // Check if we support repeated scalar values processing (optimization of 14599 // original scalar identity operations on matched horizontal reductions). 14600 IsSupportedHorRdxIdentityOp = 14601 AllowHorRdxIdenityOptimization && RdxKind != RecurKind::Mul && 14602 RdxKind != RecurKind::FMul && RdxKind != RecurKind::FMulAdd; 14603 // Gather same values. 14604 MapVector<Value *, unsigned> SameValuesCounter; 14605 if (IsSupportedHorRdxIdentityOp) 14606 for (Value *V : Candidates) 14607 ++SameValuesCounter.insert(std::make_pair(V, 0)).first->second; 14608 // Used to check if the reduced values used same number of times. In this 14609 // case the compiler may produce better code. E.g. if reduced values are 14610 // aabbccdd (8 x values), then the first node of the tree will have a node 14611 // for 4 x abcd + shuffle <4 x abcd>, <0, 0, 1, 1, 2, 2, 3, 3>. 14612 // Plus, the final reduction will be performed on <8 x aabbccdd>. 14613 // Instead compiler may build <4 x abcd> tree immediately, + reduction (4 14614 // x abcd) * 2. 14615 // Currently it only handles add/fadd/xor. and/or/min/max do not require 14616 // this analysis, other operations may require an extra estimation of 14617 // the profitability. 14618 bool SameScaleFactor = false; 14619 bool OptReusedScalars = IsSupportedHorRdxIdentityOp && 14620 SameValuesCounter.size() != Candidates.size(); 14621 if (OptReusedScalars) { 14622 SameScaleFactor = 14623 (RdxKind == RecurKind::Add || RdxKind == RecurKind::FAdd || 14624 RdxKind == RecurKind::Xor) && 14625 all_of(drop_begin(SameValuesCounter), 14626 [&SameValuesCounter](const std::pair<Value *, unsigned> &P) { 14627 return P.second == SameValuesCounter.front().second; 14628 }); 14629 Candidates.resize(SameValuesCounter.size()); 14630 transform(SameValuesCounter, Candidates.begin(), 14631 [](const auto &P) { return P.first; }); 14632 NumReducedVals = Candidates.size(); 14633 // Have a reduction of the same element. 14634 if (NumReducedVals == 1) { 14635 Value *OrigV = TrackedToOrig.find(Candidates.front())->second; 14636 unsigned Cnt = SameValuesCounter.lookup(OrigV); 14637 Value *RedVal = 14638 emitScaleForReusedOps(Candidates.front(), Builder, Cnt); 14639 VectorizedTree = GetNewVectorizedTree(VectorizedTree, RedVal); 14640 VectorizedVals.try_emplace(OrigV, Cnt); 14641 continue; 14642 } 14643 } 14644 14645 unsigned MaxVecRegSize = V.getMaxVecRegSize(); 14646 unsigned EltSize = V.getVectorElementSize(Candidates[0]); 14647 unsigned MaxElts = 14648 RegMaxNumber * llvm::bit_floor(MaxVecRegSize / EltSize); 14649 14650 unsigned ReduxWidth = std::min<unsigned>( 14651 llvm::bit_floor(NumReducedVals), std::max(RedValsMaxNumber, MaxElts)); 14652 unsigned Start = 0; 14653 unsigned Pos = Start; 14654 // Restarts vectorization attempt with lower vector factor. 14655 unsigned PrevReduxWidth = ReduxWidth; 14656 bool CheckForReusedReductionOpsLocal = false; 14657 auto &&AdjustReducedVals = [&Pos, &Start, &ReduxWidth, NumReducedVals, 14658 &CheckForReusedReductionOpsLocal, 14659 &PrevReduxWidth, &V, 14660 &IgnoreList](bool IgnoreVL = false) { 14661 bool IsAnyRedOpGathered = !IgnoreVL && V.isAnyGathered(IgnoreList); 14662 if (!CheckForReusedReductionOpsLocal && PrevReduxWidth == ReduxWidth) { 14663 // Check if any of the reduction ops are gathered. If so, worth 14664 // trying again with less number of reduction ops. 14665 CheckForReusedReductionOpsLocal |= IsAnyRedOpGathered; 14666 } 14667 ++Pos; 14668 if (Pos < NumReducedVals - ReduxWidth + 1) 14669 return IsAnyRedOpGathered; 14670 Pos = Start; 14671 ReduxWidth /= 2; 14672 return IsAnyRedOpGathered; 14673 }; 14674 bool AnyVectorized = false; 14675 while (Pos < NumReducedVals - ReduxWidth + 1 && 14676 ReduxWidth >= ReductionLimit) { 14677 // Dependency in tree of the reduction ops - drop this attempt, try 14678 // later. 14679 if (CheckForReusedReductionOpsLocal && PrevReduxWidth != ReduxWidth && 14680 Start == 0) { 14681 CheckForReusedReductionOps = true; 14682 break; 14683 } 14684 PrevReduxWidth = ReduxWidth; 14685 ArrayRef<Value *> VL(std::next(Candidates.begin(), Pos), ReduxWidth); 14686 // Beeing analyzed already - skip. 14687 if (V.areAnalyzedReductionVals(VL)) { 14688 (void)AdjustReducedVals(/*IgnoreVL=*/true); 14689 continue; 14690 } 14691 // Early exit if any of the reduction values were deleted during 14692 // previous vectorization attempts. 14693 if (any_of(VL, [&V](Value *RedVal) { 14694 auto *RedValI = dyn_cast<Instruction>(RedVal); 14695 if (!RedValI) 14696 return false; 14697 return V.isDeleted(RedValI); 14698 })) 14699 break; 14700 V.buildTree(VL, IgnoreList); 14701 if (V.isTreeTinyAndNotFullyVectorizable(/*ForReduction=*/true)) { 14702 if (!AdjustReducedVals()) 14703 V.analyzedReductionVals(VL); 14704 continue; 14705 } 14706 if (V.isLoadCombineReductionCandidate(RdxKind)) { 14707 if (!AdjustReducedVals()) 14708 V.analyzedReductionVals(VL); 14709 continue; 14710 } 14711 V.reorderTopToBottom(); 14712 // No need to reorder the root node at all. 14713 V.reorderBottomToTop(/*IgnoreReorder=*/true); 14714 // Keep extracted other reduction values, if they are used in the 14715 // vectorization trees. 14716 BoUpSLP::ExtraValueToDebugLocsMap LocalExternallyUsedValues( 14717 ExternallyUsedValues); 14718 for (unsigned Cnt = 0, Sz = ReducedVals.size(); Cnt < Sz; ++Cnt) { 14719 if (Cnt == I || (ShuffledExtracts && Cnt == I - 1)) 14720 continue; 14721 for (Value *V : ReducedVals[Cnt]) 14722 if (isa<Instruction>(V)) 14723 LocalExternallyUsedValues[TrackedVals[V]]; 14724 } 14725 if (!IsSupportedHorRdxIdentityOp) { 14726 // Number of uses of the candidates in the vector of values. 14727 assert(SameValuesCounter.empty() && 14728 "Reused values counter map is not empty"); 14729 for (unsigned Cnt = 0; Cnt < NumReducedVals; ++Cnt) { 14730 if (Cnt >= Pos && Cnt < Pos + ReduxWidth) 14731 continue; 14732 Value *V = Candidates[Cnt]; 14733 Value *OrigV = TrackedToOrig.find(V)->second; 14734 ++SameValuesCounter[OrigV]; 14735 } 14736 } 14737 SmallPtrSet<Value *, 4> VLScalars(VL.begin(), VL.end()); 14738 // Gather externally used values. 14739 SmallPtrSet<Value *, 4> Visited; 14740 for (unsigned Cnt = 0; Cnt < NumReducedVals; ++Cnt) { 14741 if (Cnt >= Pos && Cnt < Pos + ReduxWidth) 14742 continue; 14743 Value *RdxVal = Candidates[Cnt]; 14744 if (!Visited.insert(RdxVal).second) 14745 continue; 14746 // Check if the scalar was vectorized as part of the vectorization 14747 // tree but not the top node. 14748 if (!VLScalars.contains(RdxVal) && V.isVectorized(RdxVal)) { 14749 LocalExternallyUsedValues[RdxVal]; 14750 continue; 14751 } 14752 Value *OrigV = TrackedToOrig.find(RdxVal)->second; 14753 unsigned NumOps = 14754 VectorizedVals.lookup(RdxVal) + SameValuesCounter[OrigV]; 14755 if (NumOps != ReducedValsToOps.find(OrigV)->second.size()) 14756 LocalExternallyUsedValues[RdxVal]; 14757 } 14758 // Do not need the list of reused scalars in regular mode anymore. 14759 if (!IsSupportedHorRdxIdentityOp) 14760 SameValuesCounter.clear(); 14761 for (Value *RdxVal : VL) 14762 if (RequiredExtract.contains(RdxVal)) 14763 LocalExternallyUsedValues[RdxVal]; 14764 // Update LocalExternallyUsedValues for the scalar, replaced by 14765 // extractelement instructions. 14766 for (const std::pair<Value *, Value *> &Pair : ReplacedExternals) { 14767 auto *It = ExternallyUsedValues.find(Pair.first); 14768 if (It == ExternallyUsedValues.end()) 14769 continue; 14770 LocalExternallyUsedValues[Pair.second].append(It->second); 14771 } 14772 V.buildExternalUses(LocalExternallyUsedValues); 14773 14774 V.computeMinimumValueSizes(); 14775 14776 // Estimate cost. 14777 InstructionCost TreeCost = V.getTreeCost(VL); 14778 InstructionCost ReductionCost = 14779 getReductionCost(TTI, VL, IsCmpSelMinMax, ReduxWidth, RdxFMF); 14780 InstructionCost Cost = TreeCost + ReductionCost; 14781 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost 14782 << " for reduction\n"); 14783 if (!Cost.isValid()) 14784 return nullptr; 14785 if (Cost >= -SLPCostThreshold) { 14786 V.getORE()->emit([&]() { 14787 return OptimizationRemarkMissed( 14788 SV_NAME, "HorSLPNotBeneficial", 14789 ReducedValsToOps.find(VL[0])->second.front()) 14790 << "Vectorizing horizontal reduction is possible " 14791 << "but not beneficial with cost " << ore::NV("Cost", Cost) 14792 << " and threshold " 14793 << ore::NV("Threshold", -SLPCostThreshold); 14794 }); 14795 if (!AdjustReducedVals()) 14796 V.analyzedReductionVals(VL); 14797 continue; 14798 } 14799 14800 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" 14801 << Cost << ". (HorRdx)\n"); 14802 V.getORE()->emit([&]() { 14803 return OptimizationRemark( 14804 SV_NAME, "VectorizedHorizontalReduction", 14805 ReducedValsToOps.find(VL[0])->second.front()) 14806 << "Vectorized horizontal reduction with cost " 14807 << ore::NV("Cost", Cost) << " and with tree size " 14808 << ore::NV("TreeSize", V.getTreeSize()); 14809 }); 14810 14811 Builder.setFastMathFlags(RdxFMF); 14812 14813 // Emit a reduction. If the root is a select (min/max idiom), the insert 14814 // point is the compare condition of that select. 14815 Instruction *RdxRootInst = cast<Instruction>(ReductionRoot); 14816 Instruction *InsertPt = RdxRootInst; 14817 if (IsCmpSelMinMax) 14818 InsertPt = GetCmpForMinMaxReduction(RdxRootInst); 14819 14820 // Vectorize a tree. 14821 Value *VectorizedRoot = V.vectorizeTree(LocalExternallyUsedValues, 14822 ReplacedExternals, InsertPt); 14823 14824 Builder.SetInsertPoint(InsertPt); 14825 14826 // To prevent poison from leaking across what used to be sequential, 14827 // safe, scalar boolean logic operations, the reduction operand must be 14828 // frozen. 14829 if ((isBoolLogicOp(RdxRootInst) || 14830 (AnyBoolLogicOp && VL.size() != TrackedVals.size())) && 14831 !isGuaranteedNotToBePoison(VectorizedRoot)) 14832 VectorizedRoot = Builder.CreateFreeze(VectorizedRoot); 14833 14834 // Emit code to correctly handle reused reduced values, if required. 14835 if (OptReusedScalars && !SameScaleFactor) { 14836 VectorizedRoot = 14837 emitReusedOps(VectorizedRoot, Builder, V.getRootNodeScalars(), 14838 SameValuesCounter, TrackedToOrig); 14839 } 14840 14841 Value *ReducedSubTree = 14842 emitReduction(VectorizedRoot, Builder, ReduxWidth, TTI); 14843 if (ReducedSubTree->getType() != VL.front()->getType()) { 14844 ReducedSubTree = Builder.CreateIntCast( 14845 ReducedSubTree, VL.front()->getType(), any_of(VL, [&](Value *R) { 14846 KnownBits Known = computeKnownBits( 14847 R, cast<Instruction>(ReductionOps.front().front()) 14848 ->getModule() 14849 ->getDataLayout()); 14850 return !Known.isNonNegative(); 14851 })); 14852 } 14853 14854 // Improved analysis for add/fadd/xor reductions with same scale factor 14855 // for all operands of reductions. We can emit scalar ops for them 14856 // instead. 14857 if (OptReusedScalars && SameScaleFactor) 14858 ReducedSubTree = emitScaleForReusedOps( 14859 ReducedSubTree, Builder, SameValuesCounter.front().second); 14860 14861 VectorizedTree = GetNewVectorizedTree(VectorizedTree, ReducedSubTree); 14862 // Count vectorized reduced values to exclude them from final reduction. 14863 for (Value *RdxVal : VL) { 14864 Value *OrigV = TrackedToOrig.find(RdxVal)->second; 14865 if (IsSupportedHorRdxIdentityOp) { 14866 VectorizedVals.try_emplace(OrigV, SameValuesCounter[RdxVal]); 14867 continue; 14868 } 14869 ++VectorizedVals.try_emplace(OrigV, 0).first->getSecond(); 14870 if (!V.isVectorized(RdxVal)) 14871 RequiredExtract.insert(RdxVal); 14872 } 14873 Pos += ReduxWidth; 14874 Start = Pos; 14875 ReduxWidth = llvm::bit_floor(NumReducedVals - Pos); 14876 AnyVectorized = true; 14877 } 14878 if (OptReusedScalars && !AnyVectorized) { 14879 for (const std::pair<Value *, unsigned> &P : SameValuesCounter) { 14880 Value *RedVal = emitScaleForReusedOps(P.first, Builder, P.second); 14881 VectorizedTree = GetNewVectorizedTree(VectorizedTree, RedVal); 14882 Value *OrigV = TrackedToOrig.find(P.first)->second; 14883 VectorizedVals.try_emplace(OrigV, P.second); 14884 } 14885 continue; 14886 } 14887 } 14888 if (VectorizedTree) { 14889 // Reorder operands of bool logical op in the natural order to avoid 14890 // possible problem with poison propagation. If not possible to reorder 14891 // (both operands are originally RHS), emit an extra freeze instruction 14892 // for the LHS operand. 14893 // I.e., if we have original code like this: 14894 // RedOp1 = select i1 ?, i1 LHS, i1 false 14895 // RedOp2 = select i1 RHS, i1 ?, i1 false 14896 14897 // Then, we swap LHS/RHS to create a new op that matches the poison 14898 // semantics of the original code. 14899 14900 // If we have original code like this and both values could be poison: 14901 // RedOp1 = select i1 ?, i1 LHS, i1 false 14902 // RedOp2 = select i1 ?, i1 RHS, i1 false 14903 14904 // Then, we must freeze LHS in the new op. 14905 auto FixBoolLogicalOps = [&, VectorizedTree](Value *&LHS, Value *&RHS, 14906 Instruction *RedOp1, 14907 Instruction *RedOp2, 14908 bool InitStep) { 14909 if (!AnyBoolLogicOp) 14910 return; 14911 if (isBoolLogicOp(RedOp1) && 14912 ((!InitStep && LHS == VectorizedTree) || 14913 getRdxOperand(RedOp1, 0) == LHS || isGuaranteedNotToBePoison(LHS))) 14914 return; 14915 if (isBoolLogicOp(RedOp2) && ((!InitStep && RHS == VectorizedTree) || 14916 getRdxOperand(RedOp2, 0) == RHS || 14917 isGuaranteedNotToBePoison(RHS))) { 14918 std::swap(LHS, RHS); 14919 return; 14920 } 14921 if (LHS != VectorizedTree) 14922 LHS = Builder.CreateFreeze(LHS); 14923 }; 14924 // Finish the reduction. 14925 // Need to add extra arguments and not vectorized possible reduction 14926 // values. 14927 // Try to avoid dependencies between the scalar remainders after 14928 // reductions. 14929 auto FinalGen = 14930 [&](ArrayRef<std::pair<Instruction *, Value *>> InstVals, 14931 bool InitStep) { 14932 unsigned Sz = InstVals.size(); 14933 SmallVector<std::pair<Instruction *, Value *>> ExtraReds(Sz / 2 + 14934 Sz % 2); 14935 for (unsigned I = 0, E = (Sz / 2) * 2; I < E; I += 2) { 14936 Instruction *RedOp = InstVals[I + 1].first; 14937 Builder.SetCurrentDebugLocation(RedOp->getDebugLoc()); 14938 Value *RdxVal1 = InstVals[I].second; 14939 Value *StableRdxVal1 = RdxVal1; 14940 auto It1 = TrackedVals.find(RdxVal1); 14941 if (It1 != TrackedVals.end()) 14942 StableRdxVal1 = It1->second; 14943 Value *RdxVal2 = InstVals[I + 1].second; 14944 Value *StableRdxVal2 = RdxVal2; 14945 auto It2 = TrackedVals.find(RdxVal2); 14946 if (It2 != TrackedVals.end()) 14947 StableRdxVal2 = It2->second; 14948 // To prevent poison from leaking across what used to be 14949 // sequential, safe, scalar boolean logic operations, the 14950 // reduction operand must be frozen. 14951 FixBoolLogicalOps(StableRdxVal1, StableRdxVal2, InstVals[I].first, 14952 RedOp, InitStep); 14953 Value *ExtraRed = createOp(Builder, RdxKind, StableRdxVal1, 14954 StableRdxVal2, "op.rdx", ReductionOps); 14955 ExtraReds[I / 2] = std::make_pair(InstVals[I].first, ExtraRed); 14956 } 14957 if (Sz % 2 == 1) 14958 ExtraReds[Sz / 2] = InstVals.back(); 14959 return ExtraReds; 14960 }; 14961 SmallVector<std::pair<Instruction *, Value *>> ExtraReductions; 14962 ExtraReductions.emplace_back(cast<Instruction>(ReductionRoot), 14963 VectorizedTree); 14964 SmallPtrSet<Value *, 8> Visited; 14965 for (ArrayRef<Value *> Candidates : ReducedVals) { 14966 for (Value *RdxVal : Candidates) { 14967 if (!Visited.insert(RdxVal).second) 14968 continue; 14969 unsigned NumOps = VectorizedVals.lookup(RdxVal); 14970 for (Instruction *RedOp : 14971 ArrayRef(ReducedValsToOps.find(RdxVal)->second) 14972 .drop_back(NumOps)) 14973 ExtraReductions.emplace_back(RedOp, RdxVal); 14974 } 14975 } 14976 for (auto &Pair : ExternallyUsedValues) { 14977 // Add each externally used value to the final reduction. 14978 for (auto *I : Pair.second) 14979 ExtraReductions.emplace_back(I, Pair.first); 14980 } 14981 // Iterate through all not-vectorized reduction values/extra arguments. 14982 bool InitStep = true; 14983 while (ExtraReductions.size() > 1) { 14984 VectorizedTree = ExtraReductions.front().second; 14985 SmallVector<std::pair<Instruction *, Value *>> NewReds = 14986 FinalGen(ExtraReductions, InitStep); 14987 ExtraReductions.swap(NewReds); 14988 InitStep = false; 14989 } 14990 VectorizedTree = ExtraReductions.front().second; 14991 14992 ReductionRoot->replaceAllUsesWith(VectorizedTree); 14993 14994 // The original scalar reduction is expected to have no remaining 14995 // uses outside the reduction tree itself. Assert that we got this 14996 // correct, replace internal uses with undef, and mark for eventual 14997 // deletion. 14998 #ifndef NDEBUG 14999 SmallSet<Value *, 4> IgnoreSet; 15000 for (ArrayRef<Value *> RdxOps : ReductionOps) 15001 IgnoreSet.insert(RdxOps.begin(), RdxOps.end()); 15002 #endif 15003 for (ArrayRef<Value *> RdxOps : ReductionOps) { 15004 for (Value *Ignore : RdxOps) { 15005 if (!Ignore) 15006 continue; 15007 #ifndef NDEBUG 15008 for (auto *U : Ignore->users()) { 15009 assert(IgnoreSet.count(U) && 15010 "All users must be either in the reduction ops list."); 15011 } 15012 #endif 15013 if (!Ignore->use_empty()) { 15014 Value *Undef = UndefValue::get(Ignore->getType()); 15015 Ignore->replaceAllUsesWith(Undef); 15016 } 15017 V.eraseInstruction(cast<Instruction>(Ignore)); 15018 } 15019 } 15020 } else if (!CheckForReusedReductionOps) { 15021 for (ReductionOpsType &RdxOps : ReductionOps) 15022 for (Value *RdxOp : RdxOps) 15023 V.analyzedReductionRoot(cast<Instruction>(RdxOp)); 15024 } 15025 return VectorizedTree; 15026 } 15027 15028 private: 15029 /// Calculate the cost of a reduction. 15030 InstructionCost getReductionCost(TargetTransformInfo *TTI, 15031 ArrayRef<Value *> ReducedVals, 15032 bool IsCmpSelMinMax, unsigned ReduxWidth, 15033 FastMathFlags FMF) { 15034 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 15035 Type *ScalarTy = ReducedVals.front()->getType(); 15036 FixedVectorType *VectorTy = FixedVectorType::get(ScalarTy, ReduxWidth); 15037 InstructionCost VectorCost = 0, ScalarCost; 15038 // If all of the reduced values are constant, the vector cost is 0, since 15039 // the reduction value can be calculated at the compile time. 15040 bool AllConsts = allConstant(ReducedVals); 15041 auto EvaluateScalarCost = [&](function_ref<InstructionCost()> GenCostFn) { 15042 InstructionCost Cost = 0; 15043 // Scalar cost is repeated for N-1 elements. 15044 int Cnt = ReducedVals.size(); 15045 for (Value *RdxVal : ReducedVals) { 15046 if (Cnt == 1) 15047 break; 15048 --Cnt; 15049 if (RdxVal->hasNUsesOrMore(IsCmpSelMinMax ? 3 : 2)) { 15050 Cost += GenCostFn(); 15051 continue; 15052 } 15053 InstructionCost ScalarCost = 0; 15054 for (User *U : RdxVal->users()) { 15055 auto *RdxOp = cast<Instruction>(U); 15056 if (hasRequiredNumberOfUses(IsCmpSelMinMax, RdxOp)) { 15057 ScalarCost += TTI->getInstructionCost(RdxOp, CostKind); 15058 continue; 15059 } 15060 ScalarCost = InstructionCost::getInvalid(); 15061 break; 15062 } 15063 if (ScalarCost.isValid()) 15064 Cost += ScalarCost; 15065 else 15066 Cost += GenCostFn(); 15067 } 15068 return Cost; 15069 }; 15070 switch (RdxKind) { 15071 case RecurKind::Add: 15072 case RecurKind::Mul: 15073 case RecurKind::Or: 15074 case RecurKind::And: 15075 case RecurKind::Xor: 15076 case RecurKind::FAdd: 15077 case RecurKind::FMul: { 15078 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind); 15079 if (!AllConsts) 15080 VectorCost = 15081 TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF, CostKind); 15082 ScalarCost = EvaluateScalarCost([&]() { 15083 return TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy, CostKind); 15084 }); 15085 break; 15086 } 15087 case RecurKind::FMax: 15088 case RecurKind::FMin: 15089 case RecurKind::FMaximum: 15090 case RecurKind::FMinimum: 15091 case RecurKind::SMax: 15092 case RecurKind::SMin: 15093 case RecurKind::UMax: 15094 case RecurKind::UMin: { 15095 Intrinsic::ID Id = getMinMaxReductionIntrinsicOp(RdxKind); 15096 if (!AllConsts) 15097 VectorCost = TTI->getMinMaxReductionCost(Id, VectorTy, FMF, CostKind); 15098 ScalarCost = EvaluateScalarCost([&]() { 15099 IntrinsicCostAttributes ICA(Id, ScalarTy, {ScalarTy, ScalarTy}, FMF); 15100 return TTI->getIntrinsicInstrCost(ICA, CostKind); 15101 }); 15102 break; 15103 } 15104 default: 15105 llvm_unreachable("Expected arithmetic or min/max reduction operation"); 15106 } 15107 15108 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost 15109 << " for reduction of " << shortBundleName(ReducedVals) 15110 << " (It is a splitting reduction)\n"); 15111 return VectorCost - ScalarCost; 15112 } 15113 15114 /// Emit a horizontal reduction of the vectorized value. 15115 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder, 15116 unsigned ReduxWidth, const TargetTransformInfo *TTI) { 15117 assert(VectorizedValue && "Need to have a vectorized tree node"); 15118 assert(isPowerOf2_32(ReduxWidth) && 15119 "We only handle power-of-two reductions for now"); 15120 assert(RdxKind != RecurKind::FMulAdd && 15121 "A call to the llvm.fmuladd intrinsic is not handled yet"); 15122 15123 ++NumVectorInstructions; 15124 return createSimpleTargetReduction(Builder, VectorizedValue, RdxKind); 15125 } 15126 15127 /// Emits optimized code for unique scalar value reused \p Cnt times. 15128 Value *emitScaleForReusedOps(Value *VectorizedValue, IRBuilderBase &Builder, 15129 unsigned Cnt) { 15130 assert(IsSupportedHorRdxIdentityOp && 15131 "The optimization of matched scalar identity horizontal reductions " 15132 "must be supported."); 15133 switch (RdxKind) { 15134 case RecurKind::Add: { 15135 // res = mul vv, n 15136 Value *Scale = ConstantInt::get(VectorizedValue->getType(), Cnt); 15137 LLVM_DEBUG(dbgs() << "SLP: Add (to-mul) " << Cnt << "of " 15138 << VectorizedValue << ". (HorRdx)\n"); 15139 return Builder.CreateMul(VectorizedValue, Scale); 15140 } 15141 case RecurKind::Xor: { 15142 // res = n % 2 ? 0 : vv 15143 LLVM_DEBUG(dbgs() << "SLP: Xor " << Cnt << "of " << VectorizedValue 15144 << ". (HorRdx)\n"); 15145 if (Cnt % 2 == 0) 15146 return Constant::getNullValue(VectorizedValue->getType()); 15147 return VectorizedValue; 15148 } 15149 case RecurKind::FAdd: { 15150 // res = fmul v, n 15151 Value *Scale = ConstantFP::get(VectorizedValue->getType(), Cnt); 15152 LLVM_DEBUG(dbgs() << "SLP: FAdd (to-fmul) " << Cnt << "of " 15153 << VectorizedValue << ". (HorRdx)\n"); 15154 return Builder.CreateFMul(VectorizedValue, Scale); 15155 } 15156 case RecurKind::And: 15157 case RecurKind::Or: 15158 case RecurKind::SMax: 15159 case RecurKind::SMin: 15160 case RecurKind::UMax: 15161 case RecurKind::UMin: 15162 case RecurKind::FMax: 15163 case RecurKind::FMin: 15164 case RecurKind::FMaximum: 15165 case RecurKind::FMinimum: 15166 // res = vv 15167 return VectorizedValue; 15168 case RecurKind::Mul: 15169 case RecurKind::FMul: 15170 case RecurKind::FMulAdd: 15171 case RecurKind::IAnyOf: 15172 case RecurKind::FAnyOf: 15173 case RecurKind::None: 15174 llvm_unreachable("Unexpected reduction kind for repeated scalar."); 15175 } 15176 return nullptr; 15177 } 15178 15179 /// Emits actual operation for the scalar identity values, found during 15180 /// horizontal reduction analysis. 15181 Value *emitReusedOps(Value *VectorizedValue, IRBuilderBase &Builder, 15182 ArrayRef<Value *> VL, 15183 const MapVector<Value *, unsigned> &SameValuesCounter, 15184 const DenseMap<Value *, Value *> &TrackedToOrig) { 15185 assert(IsSupportedHorRdxIdentityOp && 15186 "The optimization of matched scalar identity horizontal reductions " 15187 "must be supported."); 15188 switch (RdxKind) { 15189 case RecurKind::Add: { 15190 // root = mul prev_root, <1, 1, n, 1> 15191 SmallVector<Constant *> Vals; 15192 for (Value *V : VL) { 15193 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.find(V)->second); 15194 Vals.push_back(ConstantInt::get(V->getType(), Cnt, /*IsSigned=*/false)); 15195 } 15196 auto *Scale = ConstantVector::get(Vals); 15197 LLVM_DEBUG(dbgs() << "SLP: Add (to-mul) " << Scale << "of " 15198 << VectorizedValue << ". (HorRdx)\n"); 15199 return Builder.CreateMul(VectorizedValue, Scale); 15200 } 15201 case RecurKind::And: 15202 case RecurKind::Or: 15203 // No need for multiple or/and(s). 15204 LLVM_DEBUG(dbgs() << "SLP: And/or of same " << VectorizedValue 15205 << ". (HorRdx)\n"); 15206 return VectorizedValue; 15207 case RecurKind::SMax: 15208 case RecurKind::SMin: 15209 case RecurKind::UMax: 15210 case RecurKind::UMin: 15211 case RecurKind::FMax: 15212 case RecurKind::FMin: 15213 case RecurKind::FMaximum: 15214 case RecurKind::FMinimum: 15215 // No need for multiple min/max(s) of the same value. 15216 LLVM_DEBUG(dbgs() << "SLP: Max/min of same " << VectorizedValue 15217 << ". (HorRdx)\n"); 15218 return VectorizedValue; 15219 case RecurKind::Xor: { 15220 // Replace values with even number of repeats with 0, since 15221 // x xor x = 0. 15222 // root = shuffle prev_root, zeroinitalizer, <0, 1, 2, vf, 4, vf, 5, 6, 15223 // 7>, if elements 4th and 6th elements have even number of repeats. 15224 SmallVector<int> Mask( 15225 cast<FixedVectorType>(VectorizedValue->getType())->getNumElements(), 15226 PoisonMaskElem); 15227 std::iota(Mask.begin(), Mask.end(), 0); 15228 bool NeedShuffle = false; 15229 for (unsigned I = 0, VF = VL.size(); I < VF; ++I) { 15230 Value *V = VL[I]; 15231 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.find(V)->second); 15232 if (Cnt % 2 == 0) { 15233 Mask[I] = VF; 15234 NeedShuffle = true; 15235 } 15236 } 15237 LLVM_DEBUG(dbgs() << "SLP: Xor <"; for (int I 15238 : Mask) dbgs() 15239 << I << " "; 15240 dbgs() << "> of " << VectorizedValue << ". (HorRdx)\n"); 15241 if (NeedShuffle) 15242 VectorizedValue = Builder.CreateShuffleVector( 15243 VectorizedValue, 15244 ConstantVector::getNullValue(VectorizedValue->getType()), Mask); 15245 return VectorizedValue; 15246 } 15247 case RecurKind::FAdd: { 15248 // root = fmul prev_root, <1.0, 1.0, n.0, 1.0> 15249 SmallVector<Constant *> Vals; 15250 for (Value *V : VL) { 15251 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.find(V)->second); 15252 Vals.push_back(ConstantFP::get(V->getType(), Cnt)); 15253 } 15254 auto *Scale = ConstantVector::get(Vals); 15255 return Builder.CreateFMul(VectorizedValue, Scale); 15256 } 15257 case RecurKind::Mul: 15258 case RecurKind::FMul: 15259 case RecurKind::FMulAdd: 15260 case RecurKind::IAnyOf: 15261 case RecurKind::FAnyOf: 15262 case RecurKind::None: 15263 llvm_unreachable("Unexpected reduction kind for reused scalars."); 15264 } 15265 return nullptr; 15266 } 15267 }; 15268 } // end anonymous namespace 15269 15270 static std::optional<unsigned> getAggregateSize(Instruction *InsertInst) { 15271 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst)) 15272 return cast<FixedVectorType>(IE->getType())->getNumElements(); 15273 15274 unsigned AggregateSize = 1; 15275 auto *IV = cast<InsertValueInst>(InsertInst); 15276 Type *CurrentType = IV->getType(); 15277 do { 15278 if (auto *ST = dyn_cast<StructType>(CurrentType)) { 15279 for (auto *Elt : ST->elements()) 15280 if (Elt != ST->getElementType(0)) // check homogeneity 15281 return std::nullopt; 15282 AggregateSize *= ST->getNumElements(); 15283 CurrentType = ST->getElementType(0); 15284 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) { 15285 AggregateSize *= AT->getNumElements(); 15286 CurrentType = AT->getElementType(); 15287 } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) { 15288 AggregateSize *= VT->getNumElements(); 15289 return AggregateSize; 15290 } else if (CurrentType->isSingleValueType()) { 15291 return AggregateSize; 15292 } else { 15293 return std::nullopt; 15294 } 15295 } while (true); 15296 } 15297 15298 static void findBuildAggregate_rec(Instruction *LastInsertInst, 15299 TargetTransformInfo *TTI, 15300 SmallVectorImpl<Value *> &BuildVectorOpds, 15301 SmallVectorImpl<Value *> &InsertElts, 15302 unsigned OperandOffset) { 15303 do { 15304 Value *InsertedOperand = LastInsertInst->getOperand(1); 15305 std::optional<unsigned> OperandIndex = 15306 getInsertIndex(LastInsertInst, OperandOffset); 15307 if (!OperandIndex) 15308 return; 15309 if (isa<InsertElementInst, InsertValueInst>(InsertedOperand)) { 15310 findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI, 15311 BuildVectorOpds, InsertElts, *OperandIndex); 15312 15313 } else { 15314 BuildVectorOpds[*OperandIndex] = InsertedOperand; 15315 InsertElts[*OperandIndex] = LastInsertInst; 15316 } 15317 LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0)); 15318 } while (LastInsertInst != nullptr && 15319 isa<InsertValueInst, InsertElementInst>(LastInsertInst) && 15320 LastInsertInst->hasOneUse()); 15321 } 15322 15323 /// Recognize construction of vectors like 15324 /// %ra = insertelement <4 x float> poison, float %s0, i32 0 15325 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1 15326 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2 15327 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3 15328 /// starting from the last insertelement or insertvalue instruction. 15329 /// 15330 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>}, 15331 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on. 15332 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples. 15333 /// 15334 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type. 15335 /// 15336 /// \return true if it matches. 15337 static bool findBuildAggregate(Instruction *LastInsertInst, 15338 TargetTransformInfo *TTI, 15339 SmallVectorImpl<Value *> &BuildVectorOpds, 15340 SmallVectorImpl<Value *> &InsertElts) { 15341 15342 assert((isa<InsertElementInst>(LastInsertInst) || 15343 isa<InsertValueInst>(LastInsertInst)) && 15344 "Expected insertelement or insertvalue instruction!"); 15345 15346 assert((BuildVectorOpds.empty() && InsertElts.empty()) && 15347 "Expected empty result vectors!"); 15348 15349 std::optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst); 15350 if (!AggregateSize) 15351 return false; 15352 BuildVectorOpds.resize(*AggregateSize); 15353 InsertElts.resize(*AggregateSize); 15354 15355 findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 0); 15356 llvm::erase(BuildVectorOpds, nullptr); 15357 llvm::erase(InsertElts, nullptr); 15358 if (BuildVectorOpds.size() >= 2) 15359 return true; 15360 15361 return false; 15362 } 15363 15364 /// Try and get a reduction instruction from a phi node. 15365 /// 15366 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions 15367 /// if they come from either \p ParentBB or a containing loop latch. 15368 /// 15369 /// \returns A candidate reduction value if possible, or \code nullptr \endcode 15370 /// if not possible. 15371 static Instruction *getReductionInstr(const DominatorTree *DT, PHINode *P, 15372 BasicBlock *ParentBB, LoopInfo *LI) { 15373 // There are situations where the reduction value is not dominated by the 15374 // reduction phi. Vectorizing such cases has been reported to cause 15375 // miscompiles. See PR25787. 15376 auto DominatedReduxValue = [&](Value *R) { 15377 return isa<Instruction>(R) && 15378 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent()); 15379 }; 15380 15381 Instruction *Rdx = nullptr; 15382 15383 // Return the incoming value if it comes from the same BB as the phi node. 15384 if (P->getIncomingBlock(0) == ParentBB) { 15385 Rdx = dyn_cast<Instruction>(P->getIncomingValue(0)); 15386 } else if (P->getIncomingBlock(1) == ParentBB) { 15387 Rdx = dyn_cast<Instruction>(P->getIncomingValue(1)); 15388 } 15389 15390 if (Rdx && DominatedReduxValue(Rdx)) 15391 return Rdx; 15392 15393 // Otherwise, check whether we have a loop latch to look at. 15394 Loop *BBL = LI->getLoopFor(ParentBB); 15395 if (!BBL) 15396 return nullptr; 15397 BasicBlock *BBLatch = BBL->getLoopLatch(); 15398 if (!BBLatch) 15399 return nullptr; 15400 15401 // There is a loop latch, return the incoming value if it comes from 15402 // that. This reduction pattern occasionally turns up. 15403 if (P->getIncomingBlock(0) == BBLatch) { 15404 Rdx = dyn_cast<Instruction>(P->getIncomingValue(0)); 15405 } else if (P->getIncomingBlock(1) == BBLatch) { 15406 Rdx = dyn_cast<Instruction>(P->getIncomingValue(1)); 15407 } 15408 15409 if (Rdx && DominatedReduxValue(Rdx)) 15410 return Rdx; 15411 15412 return nullptr; 15413 } 15414 15415 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) { 15416 if (match(I, m_BinOp(m_Value(V0), m_Value(V1)))) 15417 return true; 15418 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1)))) 15419 return true; 15420 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1)))) 15421 return true; 15422 if (match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(V0), m_Value(V1)))) 15423 return true; 15424 if (match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(V0), m_Value(V1)))) 15425 return true; 15426 if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1)))) 15427 return true; 15428 if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1)))) 15429 return true; 15430 if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1)))) 15431 return true; 15432 if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1)))) 15433 return true; 15434 return false; 15435 } 15436 15437 /// We could have an initial reduction that is not an add. 15438 /// r *= v1 + v2 + v3 + v4 15439 /// In such a case start looking for a tree rooted in the first '+'. 15440 /// \Returns the new root if found, which may be nullptr if not an instruction. 15441 static Instruction *tryGetSecondaryReductionRoot(PHINode *Phi, 15442 Instruction *Root) { 15443 assert((isa<BinaryOperator>(Root) || isa<SelectInst>(Root) || 15444 isa<IntrinsicInst>(Root)) && 15445 "Expected binop, select, or intrinsic for reduction matching"); 15446 Value *LHS = 15447 Root->getOperand(HorizontalReduction::getFirstOperandIndex(Root)); 15448 Value *RHS = 15449 Root->getOperand(HorizontalReduction::getFirstOperandIndex(Root) + 1); 15450 if (LHS == Phi) 15451 return dyn_cast<Instruction>(RHS); 15452 if (RHS == Phi) 15453 return dyn_cast<Instruction>(LHS); 15454 return nullptr; 15455 } 15456 15457 /// \p Returns the first operand of \p I that does not match \p Phi. If 15458 /// operand is not an instruction it returns nullptr. 15459 static Instruction *getNonPhiOperand(Instruction *I, PHINode *Phi) { 15460 Value *Op0 = nullptr; 15461 Value *Op1 = nullptr; 15462 if (!matchRdxBop(I, Op0, Op1)) 15463 return nullptr; 15464 return dyn_cast<Instruction>(Op0 == Phi ? Op1 : Op0); 15465 } 15466 15467 /// \Returns true if \p I is a candidate instruction for reduction vectorization. 15468 static bool isReductionCandidate(Instruction *I) { 15469 bool IsSelect = match(I, m_Select(m_Value(), m_Value(), m_Value())); 15470 Value *B0 = nullptr, *B1 = nullptr; 15471 bool IsBinop = matchRdxBop(I, B0, B1); 15472 return IsBinop || IsSelect; 15473 } 15474 15475 bool SLPVectorizerPass::vectorizeHorReduction( 15476 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R, TargetTransformInfo *TTI, 15477 SmallVectorImpl<WeakTrackingVH> &PostponedInsts) { 15478 if (!ShouldVectorizeHor) 15479 return false; 15480 bool TryOperandsAsNewSeeds = P && isa<BinaryOperator>(Root); 15481 15482 if (Root->getParent() != BB || isa<PHINode>(Root)) 15483 return false; 15484 15485 // If we can find a secondary reduction root, use that instead. 15486 auto SelectRoot = [&]() { 15487 if (TryOperandsAsNewSeeds && isReductionCandidate(Root) && 15488 HorizontalReduction::getRdxKind(Root) != RecurKind::None) 15489 if (Instruction *NewRoot = tryGetSecondaryReductionRoot(P, Root)) 15490 return NewRoot; 15491 return Root; 15492 }; 15493 15494 // Start analysis starting from Root instruction. If horizontal reduction is 15495 // found, try to vectorize it. If it is not a horizontal reduction or 15496 // vectorization is not possible or not effective, and currently analyzed 15497 // instruction is a binary operation, try to vectorize the operands, using 15498 // pre-order DFS traversal order. If the operands were not vectorized, repeat 15499 // the same procedure considering each operand as a possible root of the 15500 // horizontal reduction. 15501 // Interrupt the process if the Root instruction itself was vectorized or all 15502 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized. 15503 // If a horizintal reduction was not matched or vectorized we collect 15504 // instructions for possible later attempts for vectorization. 15505 std::queue<std::pair<Instruction *, unsigned>> Stack; 15506 Stack.emplace(SelectRoot(), 0); 15507 SmallPtrSet<Value *, 8> VisitedInstrs; 15508 bool Res = false; 15509 auto &&TryToReduce = [this, TTI, &R](Instruction *Inst) -> Value * { 15510 if (R.isAnalyzedReductionRoot(Inst)) 15511 return nullptr; 15512 if (!isReductionCandidate(Inst)) 15513 return nullptr; 15514 HorizontalReduction HorRdx; 15515 if (!HorRdx.matchAssociativeReduction(R, Inst, *SE, *DL, *TLI)) 15516 return nullptr; 15517 return HorRdx.tryToReduce(R, TTI, *TLI); 15518 }; 15519 auto TryAppendToPostponedInsts = [&](Instruction *FutureSeed) { 15520 if (TryOperandsAsNewSeeds && FutureSeed == Root) { 15521 FutureSeed = getNonPhiOperand(Root, P); 15522 if (!FutureSeed) 15523 return false; 15524 } 15525 // Do not collect CmpInst or InsertElementInst/InsertValueInst as their 15526 // analysis is done separately. 15527 if (!isa<CmpInst, InsertElementInst, InsertValueInst>(FutureSeed)) 15528 PostponedInsts.push_back(FutureSeed); 15529 return true; 15530 }; 15531 15532 while (!Stack.empty()) { 15533 Instruction *Inst; 15534 unsigned Level; 15535 std::tie(Inst, Level) = Stack.front(); 15536 Stack.pop(); 15537 // Do not try to analyze instruction that has already been vectorized. 15538 // This may happen when we vectorize instruction operands on a previous 15539 // iteration while stack was populated before that happened. 15540 if (R.isDeleted(Inst)) 15541 continue; 15542 if (Value *VectorizedV = TryToReduce(Inst)) { 15543 Res = true; 15544 if (auto *I = dyn_cast<Instruction>(VectorizedV)) { 15545 // Try to find another reduction. 15546 Stack.emplace(I, Level); 15547 continue; 15548 } 15549 } else { 15550 // We could not vectorize `Inst` so try to use it as a future seed. 15551 if (!TryAppendToPostponedInsts(Inst)) { 15552 assert(Stack.empty() && "Expected empty stack"); 15553 break; 15554 } 15555 } 15556 15557 // Try to vectorize operands. 15558 // Continue analysis for the instruction from the same basic block only to 15559 // save compile time. 15560 if (++Level < RecursionMaxDepth) 15561 for (auto *Op : Inst->operand_values()) 15562 if (VisitedInstrs.insert(Op).second) 15563 if (auto *I = dyn_cast<Instruction>(Op)) 15564 // Do not try to vectorize CmpInst operands, this is done 15565 // separately. 15566 if (!isa<PHINode, CmpInst, InsertElementInst, InsertValueInst>(I) && 15567 !R.isDeleted(I) && I->getParent() == BB) 15568 Stack.emplace(I, Level); 15569 } 15570 return Res; 15571 } 15572 15573 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Instruction *Root, 15574 BasicBlock *BB, BoUpSLP &R, 15575 TargetTransformInfo *TTI) { 15576 SmallVector<WeakTrackingVH> PostponedInsts; 15577 bool Res = vectorizeHorReduction(P, Root, BB, R, TTI, PostponedInsts); 15578 Res |= tryToVectorize(PostponedInsts, R); 15579 return Res; 15580 } 15581 15582 bool SLPVectorizerPass::tryToVectorize(ArrayRef<WeakTrackingVH> Insts, 15583 BoUpSLP &R) { 15584 bool Res = false; 15585 for (Value *V : Insts) 15586 if (auto *Inst = dyn_cast<Instruction>(V); Inst && !R.isDeleted(Inst)) 15587 Res |= tryToVectorize(Inst, R); 15588 return Res; 15589 } 15590 15591 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI, 15592 BasicBlock *BB, BoUpSLP &R) { 15593 if (!R.canMapToVector(IVI->getType())) 15594 return false; 15595 15596 SmallVector<Value *, 16> BuildVectorOpds; 15597 SmallVector<Value *, 16> BuildVectorInsts; 15598 if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts)) 15599 return false; 15600 15601 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n"); 15602 // Aggregate value is unlikely to be processed in vector register. 15603 return tryToVectorizeList(BuildVectorOpds, R); 15604 } 15605 15606 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI, 15607 BasicBlock *BB, BoUpSLP &R) { 15608 SmallVector<Value *, 16> BuildVectorInsts; 15609 SmallVector<Value *, 16> BuildVectorOpds; 15610 SmallVector<int> Mask; 15611 if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts) || 15612 (llvm::all_of( 15613 BuildVectorOpds, 15614 [](Value *V) { return isa<ExtractElementInst, UndefValue>(V); }) && 15615 isFixedVectorShuffle(BuildVectorOpds, Mask))) 15616 return false; 15617 15618 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n"); 15619 return tryToVectorizeList(BuildVectorInsts, R); 15620 } 15621 15622 template <typename T> 15623 static bool tryToVectorizeSequence( 15624 SmallVectorImpl<T *> &Incoming, function_ref<bool(T *, T *)> Comparator, 15625 function_ref<bool(T *, T *)> AreCompatible, 15626 function_ref<bool(ArrayRef<T *>, bool)> TryToVectorizeHelper, 15627 bool MaxVFOnly, BoUpSLP &R) { 15628 bool Changed = false; 15629 // Sort by type, parent, operands. 15630 stable_sort(Incoming, Comparator); 15631 15632 // Try to vectorize elements base on their type. 15633 SmallVector<T *> Candidates; 15634 for (auto *IncIt = Incoming.begin(), *E = Incoming.end(); IncIt != E;) { 15635 // Look for the next elements with the same type, parent and operand 15636 // kinds. 15637 auto *SameTypeIt = IncIt; 15638 while (SameTypeIt != E && AreCompatible(*SameTypeIt, *IncIt)) 15639 ++SameTypeIt; 15640 15641 // Try to vectorize them. 15642 unsigned NumElts = (SameTypeIt - IncIt); 15643 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at nodes (" 15644 << NumElts << ")\n"); 15645 // The vectorization is a 3-state attempt: 15646 // 1. Try to vectorize instructions with the same/alternate opcodes with the 15647 // size of maximal register at first. 15648 // 2. Try to vectorize remaining instructions with the same type, if 15649 // possible. This may result in the better vectorization results rather than 15650 // if we try just to vectorize instructions with the same/alternate opcodes. 15651 // 3. Final attempt to try to vectorize all instructions with the 15652 // same/alternate ops only, this may result in some extra final 15653 // vectorization. 15654 if (NumElts > 1 && 15655 TryToVectorizeHelper(ArrayRef(IncIt, NumElts), MaxVFOnly)) { 15656 // Success start over because instructions might have been changed. 15657 Changed = true; 15658 } else { 15659 /// \Returns the minimum number of elements that we will attempt to 15660 /// vectorize. 15661 auto GetMinNumElements = [&R](Value *V) { 15662 unsigned EltSize = R.getVectorElementSize(V); 15663 return std::max(2U, R.getMaxVecRegSize() / EltSize); 15664 }; 15665 if (NumElts < GetMinNumElements(*IncIt) && 15666 (Candidates.empty() || 15667 Candidates.front()->getType() == (*IncIt)->getType())) { 15668 Candidates.append(IncIt, std::next(IncIt, NumElts)); 15669 } 15670 } 15671 // Final attempt to vectorize instructions with the same types. 15672 if (Candidates.size() > 1 && 15673 (SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType())) { 15674 if (TryToVectorizeHelper(Candidates, /*MaxVFOnly=*/false)) { 15675 // Success start over because instructions might have been changed. 15676 Changed = true; 15677 } else if (MaxVFOnly) { 15678 // Try to vectorize using small vectors. 15679 for (auto *It = Candidates.begin(), *End = Candidates.end(); 15680 It != End;) { 15681 auto *SameTypeIt = It; 15682 while (SameTypeIt != End && AreCompatible(*SameTypeIt, *It)) 15683 ++SameTypeIt; 15684 unsigned NumElts = (SameTypeIt - It); 15685 if (NumElts > 1 && TryToVectorizeHelper(ArrayRef(It, NumElts), 15686 /*MaxVFOnly=*/false)) 15687 Changed = true; 15688 It = SameTypeIt; 15689 } 15690 } 15691 Candidates.clear(); 15692 } 15693 15694 // Start over at the next instruction of a different type (or the end). 15695 IncIt = SameTypeIt; 15696 } 15697 return Changed; 15698 } 15699 15700 /// Compare two cmp instructions. If IsCompatibility is true, function returns 15701 /// true if 2 cmps have same/swapped predicates and mos compatible corresponding 15702 /// operands. If IsCompatibility is false, function implements strict weak 15703 /// ordering relation between two cmp instructions, returning true if the first 15704 /// instruction is "less" than the second, i.e. its predicate is less than the 15705 /// predicate of the second or the operands IDs are less than the operands IDs 15706 /// of the second cmp instruction. 15707 template <bool IsCompatibility> 15708 static bool compareCmp(Value *V, Value *V2, TargetLibraryInfo &TLI, 15709 const DominatorTree &DT) { 15710 assert(isValidElementType(V->getType()) && 15711 isValidElementType(V2->getType()) && 15712 "Expected valid element types only."); 15713 if (V == V2) 15714 return IsCompatibility; 15715 auto *CI1 = cast<CmpInst>(V); 15716 auto *CI2 = cast<CmpInst>(V2); 15717 if (CI1->getOperand(0)->getType()->getTypeID() < 15718 CI2->getOperand(0)->getType()->getTypeID()) 15719 return !IsCompatibility; 15720 if (CI1->getOperand(0)->getType()->getTypeID() > 15721 CI2->getOperand(0)->getType()->getTypeID()) 15722 return false; 15723 CmpInst::Predicate Pred1 = CI1->getPredicate(); 15724 CmpInst::Predicate Pred2 = CI2->getPredicate(); 15725 CmpInst::Predicate SwapPred1 = CmpInst::getSwappedPredicate(Pred1); 15726 CmpInst::Predicate SwapPred2 = CmpInst::getSwappedPredicate(Pred2); 15727 CmpInst::Predicate BasePred1 = std::min(Pred1, SwapPred1); 15728 CmpInst::Predicate BasePred2 = std::min(Pred2, SwapPred2); 15729 if (BasePred1 < BasePred2) 15730 return !IsCompatibility; 15731 if (BasePred1 > BasePred2) 15732 return false; 15733 // Compare operands. 15734 bool CI1Preds = Pred1 == BasePred1; 15735 bool CI2Preds = Pred2 == BasePred1; 15736 for (int I = 0, E = CI1->getNumOperands(); I < E; ++I) { 15737 auto *Op1 = CI1->getOperand(CI1Preds ? I : E - I - 1); 15738 auto *Op2 = CI2->getOperand(CI2Preds ? I : E - I - 1); 15739 if (Op1 == Op2) 15740 continue; 15741 if (Op1->getValueID() < Op2->getValueID()) 15742 return !IsCompatibility; 15743 if (Op1->getValueID() > Op2->getValueID()) 15744 return false; 15745 if (auto *I1 = dyn_cast<Instruction>(Op1)) 15746 if (auto *I2 = dyn_cast<Instruction>(Op2)) { 15747 if (IsCompatibility) { 15748 if (I1->getParent() != I2->getParent()) 15749 return false; 15750 } else { 15751 // Try to compare nodes with same parent. 15752 DomTreeNodeBase<BasicBlock> *NodeI1 = DT.getNode(I1->getParent()); 15753 DomTreeNodeBase<BasicBlock> *NodeI2 = DT.getNode(I2->getParent()); 15754 if (!NodeI1) 15755 return NodeI2 != nullptr; 15756 if (!NodeI2) 15757 return false; 15758 assert((NodeI1 == NodeI2) == 15759 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 15760 "Different nodes should have different DFS numbers"); 15761 if (NodeI1 != NodeI2) 15762 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 15763 } 15764 InstructionsState S = getSameOpcode({I1, I2}, TLI); 15765 if (S.getOpcode() && (IsCompatibility || !S.isAltShuffle())) 15766 continue; 15767 if (IsCompatibility) 15768 return false; 15769 if (I1->getOpcode() != I2->getOpcode()) 15770 return I1->getOpcode() < I2->getOpcode(); 15771 } 15772 } 15773 return IsCompatibility; 15774 } 15775 15776 template <typename ItT> 15777 bool SLPVectorizerPass::vectorizeCmpInsts(iterator_range<ItT> CmpInsts, 15778 BasicBlock *BB, BoUpSLP &R) { 15779 bool Changed = false; 15780 // Try to find reductions first. 15781 for (CmpInst *I : CmpInsts) { 15782 if (R.isDeleted(I)) 15783 continue; 15784 for (Value *Op : I->operands()) 15785 if (auto *RootOp = dyn_cast<Instruction>(Op)) 15786 Changed |= vectorizeRootInstruction(nullptr, RootOp, BB, R, TTI); 15787 } 15788 // Try to vectorize operands as vector bundles. 15789 for (CmpInst *I : CmpInsts) { 15790 if (R.isDeleted(I)) 15791 continue; 15792 Changed |= tryToVectorize(I, R); 15793 } 15794 // Try to vectorize list of compares. 15795 // Sort by type, compare predicate, etc. 15796 auto CompareSorter = [&](Value *V, Value *V2) { 15797 if (V == V2) 15798 return false; 15799 return compareCmp<false>(V, V2, *TLI, *DT); 15800 }; 15801 15802 auto AreCompatibleCompares = [&](Value *V1, Value *V2) { 15803 if (V1 == V2) 15804 return true; 15805 return compareCmp<true>(V1, V2, *TLI, *DT); 15806 }; 15807 15808 SmallVector<Value *> Vals; 15809 for (Instruction *V : CmpInsts) 15810 if (!R.isDeleted(V) && isValidElementType(V->getType())) 15811 Vals.push_back(V); 15812 if (Vals.size() <= 1) 15813 return Changed; 15814 Changed |= tryToVectorizeSequence<Value>( 15815 Vals, CompareSorter, AreCompatibleCompares, 15816 [this, &R](ArrayRef<Value *> Candidates, bool MaxVFOnly) { 15817 // Exclude possible reductions from other blocks. 15818 bool ArePossiblyReducedInOtherBlock = any_of(Candidates, [](Value *V) { 15819 return any_of(V->users(), [V](User *U) { 15820 auto *Select = dyn_cast<SelectInst>(U); 15821 return Select && 15822 Select->getParent() != cast<Instruction>(V)->getParent(); 15823 }); 15824 }); 15825 if (ArePossiblyReducedInOtherBlock) 15826 return false; 15827 return tryToVectorizeList(Candidates, R, MaxVFOnly); 15828 }, 15829 /*MaxVFOnly=*/true, R); 15830 return Changed; 15831 } 15832 15833 bool SLPVectorizerPass::vectorizeInserts(InstSetVector &Instructions, 15834 BasicBlock *BB, BoUpSLP &R) { 15835 assert(all_of(Instructions, 15836 [](auto *I) { 15837 return isa<InsertElementInst, InsertValueInst>(I); 15838 }) && 15839 "This function only accepts Insert instructions"); 15840 bool OpsChanged = false; 15841 SmallVector<WeakTrackingVH> PostponedInsts; 15842 // pass1 - try to vectorize reductions only 15843 for (auto *I : reverse(Instructions)) { 15844 if (R.isDeleted(I)) 15845 continue; 15846 OpsChanged |= vectorizeHorReduction(nullptr, I, BB, R, TTI, PostponedInsts); 15847 } 15848 // pass2 - try to match and vectorize a buildvector sequence. 15849 for (auto *I : reverse(Instructions)) { 15850 if (R.isDeleted(I) || isa<CmpInst>(I)) 15851 continue; 15852 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) { 15853 OpsChanged |= vectorizeInsertValueInst(LastInsertValue, BB, R); 15854 } else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) { 15855 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R); 15856 } 15857 } 15858 // Now try to vectorize postponed instructions. 15859 OpsChanged |= tryToVectorize(PostponedInsts, R); 15860 15861 Instructions.clear(); 15862 return OpsChanged; 15863 } 15864 15865 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { 15866 bool Changed = false; 15867 SmallVector<Value *, 4> Incoming; 15868 SmallPtrSet<Value *, 16> VisitedInstrs; 15869 // Maps phi nodes to the non-phi nodes found in the use tree for each phi 15870 // node. Allows better to identify the chains that can be vectorized in the 15871 // better way. 15872 DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes; 15873 auto PHICompare = [this, &PHIToOpcodes](Value *V1, Value *V2) { 15874 assert(isValidElementType(V1->getType()) && 15875 isValidElementType(V2->getType()) && 15876 "Expected vectorizable types only."); 15877 // It is fine to compare type IDs here, since we expect only vectorizable 15878 // types, like ints, floats and pointers, we don't care about other type. 15879 if (V1->getType()->getTypeID() < V2->getType()->getTypeID()) 15880 return true; 15881 if (V1->getType()->getTypeID() > V2->getType()->getTypeID()) 15882 return false; 15883 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 15884 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 15885 if (Opcodes1.size() < Opcodes2.size()) 15886 return true; 15887 if (Opcodes1.size() > Opcodes2.size()) 15888 return false; 15889 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 15890 // Undefs are compatible with any other value. 15891 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) { 15892 if (isa<Instruction>(Opcodes1[I])) 15893 return true; 15894 if (isa<Instruction>(Opcodes2[I])) 15895 return false; 15896 if (isa<Constant>(Opcodes1[I]) && !isa<UndefValue>(Opcodes1[I])) 15897 return true; 15898 if (isa<Constant>(Opcodes2[I]) && !isa<UndefValue>(Opcodes2[I])) 15899 return false; 15900 if (isa<UndefValue>(Opcodes1[I]) && isa<UndefValue>(Opcodes2[I])) 15901 continue; 15902 return isa<UndefValue>(Opcodes2[I]); 15903 } 15904 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 15905 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 15906 DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent()); 15907 DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent()); 15908 if (!NodeI1) 15909 return NodeI2 != nullptr; 15910 if (!NodeI2) 15911 return false; 15912 assert((NodeI1 == NodeI2) == 15913 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 15914 "Different nodes should have different DFS numbers"); 15915 if (NodeI1 != NodeI2) 15916 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 15917 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 15918 if (S.getOpcode() && !S.isAltShuffle()) 15919 continue; 15920 return I1->getOpcode() < I2->getOpcode(); 15921 } 15922 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 15923 return Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID(); 15924 if (isa<Instruction>(Opcodes1[I])) 15925 return true; 15926 if (isa<Instruction>(Opcodes2[I])) 15927 return false; 15928 if (isa<Constant>(Opcodes1[I])) 15929 return true; 15930 if (isa<Constant>(Opcodes2[I])) 15931 return false; 15932 if (Opcodes1[I]->getValueID() < Opcodes2[I]->getValueID()) 15933 return true; 15934 if (Opcodes1[I]->getValueID() > Opcodes2[I]->getValueID()) 15935 return false; 15936 } 15937 return false; 15938 }; 15939 auto AreCompatiblePHIs = [&PHIToOpcodes, this](Value *V1, Value *V2) { 15940 if (V1 == V2) 15941 return true; 15942 if (V1->getType() != V2->getType()) 15943 return false; 15944 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1]; 15945 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2]; 15946 if (Opcodes1.size() != Opcodes2.size()) 15947 return false; 15948 for (int I = 0, E = Opcodes1.size(); I < E; ++I) { 15949 // Undefs are compatible with any other value. 15950 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I])) 15951 continue; 15952 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I])) 15953 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) { 15954 if (I1->getParent() != I2->getParent()) 15955 return false; 15956 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 15957 if (S.getOpcode()) 15958 continue; 15959 return false; 15960 } 15961 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I])) 15962 continue; 15963 if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID()) 15964 return false; 15965 } 15966 return true; 15967 }; 15968 15969 bool HaveVectorizedPhiNodes = false; 15970 do { 15971 // Collect the incoming values from the PHIs. 15972 Incoming.clear(); 15973 for (Instruction &I : *BB) { 15974 PHINode *P = dyn_cast<PHINode>(&I); 15975 if (!P) 15976 break; 15977 15978 // No need to analyze deleted, vectorized and non-vectorizable 15979 // instructions. 15980 if (!VisitedInstrs.count(P) && !R.isDeleted(P) && 15981 isValidElementType(P->getType())) 15982 Incoming.push_back(P); 15983 } 15984 15985 if (Incoming.size() <= 1) 15986 break; 15987 15988 // Find the corresponding non-phi nodes for better matching when trying to 15989 // build the tree. 15990 for (Value *V : Incoming) { 15991 SmallVectorImpl<Value *> &Opcodes = 15992 PHIToOpcodes.try_emplace(V).first->getSecond(); 15993 if (!Opcodes.empty()) 15994 continue; 15995 SmallVector<Value *, 4> Nodes(1, V); 15996 SmallPtrSet<Value *, 4> Visited; 15997 while (!Nodes.empty()) { 15998 auto *PHI = cast<PHINode>(Nodes.pop_back_val()); 15999 if (!Visited.insert(PHI).second) 16000 continue; 16001 for (Value *V : PHI->incoming_values()) { 16002 if (auto *PHI1 = dyn_cast<PHINode>((V))) { 16003 Nodes.push_back(PHI1); 16004 continue; 16005 } 16006 Opcodes.emplace_back(V); 16007 } 16008 } 16009 } 16010 16011 HaveVectorizedPhiNodes = tryToVectorizeSequence<Value>( 16012 Incoming, PHICompare, AreCompatiblePHIs, 16013 [this, &R](ArrayRef<Value *> Candidates, bool MaxVFOnly) { 16014 return tryToVectorizeList(Candidates, R, MaxVFOnly); 16015 }, 16016 /*MaxVFOnly=*/true, R); 16017 Changed |= HaveVectorizedPhiNodes; 16018 VisitedInstrs.insert(Incoming.begin(), Incoming.end()); 16019 } while (HaveVectorizedPhiNodes); 16020 16021 VisitedInstrs.clear(); 16022 16023 InstSetVector PostProcessInserts; 16024 SmallSetVector<CmpInst *, 8> PostProcessCmps; 16025 // Vectorizes Inserts in `PostProcessInserts` and if `VecctorizeCmps` is true 16026 // also vectorizes `PostProcessCmps`. 16027 auto VectorizeInsertsAndCmps = [&](bool VectorizeCmps) { 16028 bool Changed = vectorizeInserts(PostProcessInserts, BB, R); 16029 if (VectorizeCmps) { 16030 Changed |= vectorizeCmpInsts(reverse(PostProcessCmps), BB, R); 16031 PostProcessCmps.clear(); 16032 } 16033 PostProcessInserts.clear(); 16034 return Changed; 16035 }; 16036 // Returns true if `I` is in `PostProcessInserts` or `PostProcessCmps`. 16037 auto IsInPostProcessInstrs = [&](Instruction *I) { 16038 if (auto *Cmp = dyn_cast<CmpInst>(I)) 16039 return PostProcessCmps.contains(Cmp); 16040 return isa<InsertElementInst, InsertValueInst>(I) && 16041 PostProcessInserts.contains(I); 16042 }; 16043 // Returns true if `I` is an instruction without users, like terminator, or 16044 // function call with ignored return value, store. Ignore unused instructions 16045 // (basing on instruction type, except for CallInst and InvokeInst). 16046 auto HasNoUsers = [](Instruction *I) { 16047 return I->use_empty() && 16048 (I->getType()->isVoidTy() || isa<CallInst, InvokeInst>(I)); 16049 }; 16050 for (BasicBlock::iterator It = BB->begin(), E = BB->end(); It != E; ++It) { 16051 // Skip instructions with scalable type. The num of elements is unknown at 16052 // compile-time for scalable type. 16053 if (isa<ScalableVectorType>(It->getType())) 16054 continue; 16055 16056 // Skip instructions marked for the deletion. 16057 if (R.isDeleted(&*It)) 16058 continue; 16059 // We may go through BB multiple times so skip the one we have checked. 16060 if (!VisitedInstrs.insert(&*It).second) { 16061 if (HasNoUsers(&*It) && 16062 VectorizeInsertsAndCmps(/*VectorizeCmps=*/It->isTerminator())) { 16063 // We would like to start over since some instructions are deleted 16064 // and the iterator may become invalid value. 16065 Changed = true; 16066 It = BB->begin(); 16067 E = BB->end(); 16068 } 16069 continue; 16070 } 16071 16072 if (isa<DbgInfoIntrinsic>(It)) 16073 continue; 16074 16075 // Try to vectorize reductions that use PHINodes. 16076 if (PHINode *P = dyn_cast<PHINode>(It)) { 16077 // Check that the PHI is a reduction PHI. 16078 if (P->getNumIncomingValues() == 2) { 16079 // Try to match and vectorize a horizontal reduction. 16080 Instruction *Root = getReductionInstr(DT, P, BB, LI); 16081 if (Root && vectorizeRootInstruction(P, Root, BB, R, TTI)) { 16082 Changed = true; 16083 It = BB->begin(); 16084 E = BB->end(); 16085 continue; 16086 } 16087 } 16088 // Try to vectorize the incoming values of the PHI, to catch reductions 16089 // that feed into PHIs. 16090 for (unsigned I = 0, E = P->getNumIncomingValues(); I != E; I++) { 16091 // Skip if the incoming block is the current BB for now. Also, bypass 16092 // unreachable IR for efficiency and to avoid crashing. 16093 // TODO: Collect the skipped incoming values and try to vectorize them 16094 // after processing BB. 16095 if (BB == P->getIncomingBlock(I) || 16096 !DT->isReachableFromEntry(P->getIncomingBlock(I))) 16097 continue; 16098 16099 // Postponed instructions should not be vectorized here, delay their 16100 // vectorization. 16101 if (auto *PI = dyn_cast<Instruction>(P->getIncomingValue(I)); 16102 PI && !IsInPostProcessInstrs(PI)) 16103 Changed |= vectorizeRootInstruction(nullptr, PI, 16104 P->getIncomingBlock(I), R, TTI); 16105 } 16106 continue; 16107 } 16108 16109 if (HasNoUsers(&*It)) { 16110 bool OpsChanged = false; 16111 auto *SI = dyn_cast<StoreInst>(It); 16112 bool TryToVectorizeRoot = ShouldStartVectorizeHorAtStore || !SI; 16113 if (SI) { 16114 auto *I = Stores.find(getUnderlyingObject(SI->getPointerOperand())); 16115 // Try to vectorize chain in store, if this is the only store to the 16116 // address in the block. 16117 // TODO: This is just a temporarily solution to save compile time. Need 16118 // to investigate if we can safely turn on slp-vectorize-hor-store 16119 // instead to allow lookup for reduction chains in all non-vectorized 16120 // stores (need to check side effects and compile time). 16121 TryToVectorizeRoot |= (I == Stores.end() || I->second.size() == 1) && 16122 SI->getValueOperand()->hasOneUse(); 16123 } 16124 if (TryToVectorizeRoot) { 16125 for (auto *V : It->operand_values()) { 16126 // Postponed instructions should not be vectorized here, delay their 16127 // vectorization. 16128 if (auto *VI = dyn_cast<Instruction>(V); 16129 VI && !IsInPostProcessInstrs(VI)) 16130 // Try to match and vectorize a horizontal reduction. 16131 OpsChanged |= vectorizeRootInstruction(nullptr, VI, BB, R, TTI); 16132 } 16133 } 16134 // Start vectorization of post-process list of instructions from the 16135 // top-tree instructions to try to vectorize as many instructions as 16136 // possible. 16137 OpsChanged |= 16138 VectorizeInsertsAndCmps(/*VectorizeCmps=*/It->isTerminator()); 16139 if (OpsChanged) { 16140 // We would like to start over since some instructions are deleted 16141 // and the iterator may become invalid value. 16142 Changed = true; 16143 It = BB->begin(); 16144 E = BB->end(); 16145 continue; 16146 } 16147 } 16148 16149 if (isa<InsertElementInst, InsertValueInst>(It)) 16150 PostProcessInserts.insert(&*It); 16151 else if (isa<CmpInst>(It)) 16152 PostProcessCmps.insert(cast<CmpInst>(&*It)); 16153 } 16154 16155 return Changed; 16156 } 16157 16158 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) { 16159 auto Changed = false; 16160 for (auto &Entry : GEPs) { 16161 // If the getelementptr list has fewer than two elements, there's nothing 16162 // to do. 16163 if (Entry.second.size() < 2) 16164 continue; 16165 16166 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length " 16167 << Entry.second.size() << ".\n"); 16168 16169 // Process the GEP list in chunks suitable for the target's supported 16170 // vector size. If a vector register can't hold 1 element, we are done. We 16171 // are trying to vectorize the index computations, so the maximum number of 16172 // elements is based on the size of the index expression, rather than the 16173 // size of the GEP itself (the target's pointer size). 16174 unsigned MaxVecRegSize = R.getMaxVecRegSize(); 16175 unsigned EltSize = R.getVectorElementSize(*Entry.second[0]->idx_begin()); 16176 if (MaxVecRegSize < EltSize) 16177 continue; 16178 16179 unsigned MaxElts = MaxVecRegSize / EltSize; 16180 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) { 16181 auto Len = std::min<unsigned>(BE - BI, MaxElts); 16182 ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len); 16183 16184 // Initialize a set a candidate getelementptrs. Note that we use a 16185 // SetVector here to preserve program order. If the index computations 16186 // are vectorizable and begin with loads, we want to minimize the chance 16187 // of having to reorder them later. 16188 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end()); 16189 16190 // Some of the candidates may have already been vectorized after we 16191 // initially collected them. If so, they are marked as deleted, so remove 16192 // them from the set of candidates. 16193 Candidates.remove_if( 16194 [&R](Value *I) { return R.isDeleted(cast<Instruction>(I)); }); 16195 16196 // Remove from the set of candidates all pairs of getelementptrs with 16197 // constant differences. Such getelementptrs are likely not good 16198 // candidates for vectorization in a bottom-up phase since one can be 16199 // computed from the other. We also ensure all candidate getelementptr 16200 // indices are unique. 16201 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) { 16202 auto *GEPI = GEPList[I]; 16203 if (!Candidates.count(GEPI)) 16204 continue; 16205 auto *SCEVI = SE->getSCEV(GEPList[I]); 16206 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) { 16207 auto *GEPJ = GEPList[J]; 16208 auto *SCEVJ = SE->getSCEV(GEPList[J]); 16209 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) { 16210 Candidates.remove(GEPI); 16211 Candidates.remove(GEPJ); 16212 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) { 16213 Candidates.remove(GEPJ); 16214 } 16215 } 16216 } 16217 16218 // We break out of the above computation as soon as we know there are 16219 // fewer than two candidates remaining. 16220 if (Candidates.size() < 2) 16221 continue; 16222 16223 // Add the single, non-constant index of each candidate to the bundle. We 16224 // ensured the indices met these constraints when we originally collected 16225 // the getelementptrs. 16226 SmallVector<Value *, 16> Bundle(Candidates.size()); 16227 auto BundleIndex = 0u; 16228 for (auto *V : Candidates) { 16229 auto *GEP = cast<GetElementPtrInst>(V); 16230 auto *GEPIdx = GEP->idx_begin()->get(); 16231 assert(GEP->getNumIndices() == 1 && !isa<Constant>(GEPIdx)); 16232 Bundle[BundleIndex++] = GEPIdx; 16233 } 16234 16235 // Try and vectorize the indices. We are currently only interested in 16236 // gather-like cases of the form: 16237 // 16238 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ... 16239 // 16240 // where the loads of "a", the loads of "b", and the subtractions can be 16241 // performed in parallel. It's likely that detecting this pattern in a 16242 // bottom-up phase will be simpler and less costly than building a 16243 // full-blown top-down phase beginning at the consecutive loads. 16244 Changed |= tryToVectorizeList(Bundle, R); 16245 } 16246 } 16247 return Changed; 16248 } 16249 16250 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) { 16251 bool Changed = false; 16252 // Sort by type, base pointers and values operand. Value operands must be 16253 // compatible (have the same opcode, same parent), otherwise it is 16254 // definitely not profitable to try to vectorize them. 16255 auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) { 16256 if (V->getValueOperand()->getType()->getTypeID() < 16257 V2->getValueOperand()->getType()->getTypeID()) 16258 return true; 16259 if (V->getValueOperand()->getType()->getTypeID() > 16260 V2->getValueOperand()->getType()->getTypeID()) 16261 return false; 16262 if (V->getPointerOperandType()->getTypeID() < 16263 V2->getPointerOperandType()->getTypeID()) 16264 return true; 16265 if (V->getPointerOperandType()->getTypeID() > 16266 V2->getPointerOperandType()->getTypeID()) 16267 return false; 16268 // UndefValues are compatible with all other values. 16269 if (isa<UndefValue>(V->getValueOperand()) || 16270 isa<UndefValue>(V2->getValueOperand())) 16271 return false; 16272 if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand())) 16273 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 16274 DomTreeNodeBase<llvm::BasicBlock> *NodeI1 = 16275 DT->getNode(I1->getParent()); 16276 DomTreeNodeBase<llvm::BasicBlock> *NodeI2 = 16277 DT->getNode(I2->getParent()); 16278 assert(NodeI1 && "Should only process reachable instructions"); 16279 assert(NodeI2 && "Should only process reachable instructions"); 16280 assert((NodeI1 == NodeI2) == 16281 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) && 16282 "Different nodes should have different DFS numbers"); 16283 if (NodeI1 != NodeI2) 16284 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn(); 16285 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 16286 if (S.getOpcode()) 16287 return false; 16288 return I1->getOpcode() < I2->getOpcode(); 16289 } 16290 if (isa<Constant>(V->getValueOperand()) && 16291 isa<Constant>(V2->getValueOperand())) 16292 return false; 16293 return V->getValueOperand()->getValueID() < 16294 V2->getValueOperand()->getValueID(); 16295 }; 16296 16297 auto &&AreCompatibleStores = [this](StoreInst *V1, StoreInst *V2) { 16298 if (V1 == V2) 16299 return true; 16300 if (V1->getValueOperand()->getType() != V2->getValueOperand()->getType()) 16301 return false; 16302 if (V1->getPointerOperandType() != V2->getPointerOperandType()) 16303 return false; 16304 // Undefs are compatible with any other value. 16305 if (isa<UndefValue>(V1->getValueOperand()) || 16306 isa<UndefValue>(V2->getValueOperand())) 16307 return true; 16308 if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand())) 16309 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) { 16310 if (I1->getParent() != I2->getParent()) 16311 return false; 16312 InstructionsState S = getSameOpcode({I1, I2}, *TLI); 16313 return S.getOpcode() > 0; 16314 } 16315 if (isa<Constant>(V1->getValueOperand()) && 16316 isa<Constant>(V2->getValueOperand())) 16317 return true; 16318 return V1->getValueOperand()->getValueID() == 16319 V2->getValueOperand()->getValueID(); 16320 }; 16321 16322 // Attempt to sort and vectorize each of the store-groups. 16323 for (auto &Pair : Stores) { 16324 if (Pair.second.size() < 2) 16325 continue; 16326 16327 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " 16328 << Pair.second.size() << ".\n"); 16329 16330 if (!isValidElementType(Pair.second.front()->getValueOperand()->getType())) 16331 continue; 16332 16333 // Reverse stores to do bottom-to-top analysis. This is important if the 16334 // values are stores to the same addresses several times, in this case need 16335 // to follow the stores order (reversed to meet the memory dependecies). 16336 SmallVector<StoreInst *> ReversedStores(Pair.second.rbegin(), 16337 Pair.second.rend()); 16338 Changed |= tryToVectorizeSequence<StoreInst>( 16339 ReversedStores, StoreSorter, AreCompatibleStores, 16340 [this, &R](ArrayRef<StoreInst *> Candidates, bool) { 16341 return vectorizeStores(Candidates, R); 16342 }, 16343 /*MaxVFOnly=*/false, R); 16344 } 16345 return Changed; 16346 } 16347