1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines vectorizer utilities. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Analysis/VectorUtils.h" 14 #include "llvm/ADT/EquivalenceClasses.h" 15 #include "llvm/Analysis/DemandedBits.h" 16 #include "llvm/Analysis/LoopInfo.h" 17 #include "llvm/Analysis/LoopIterator.h" 18 #include "llvm/Analysis/ScalarEvolution.h" 19 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 20 #include "llvm/Analysis/TargetTransformInfo.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/IR/Constants.h" 23 #include "llvm/IR/GetElementPtrTypeIterator.h" 24 #include "llvm/IR/IRBuilder.h" 25 #include "llvm/IR/PatternMatch.h" 26 #include "llvm/IR/Value.h" 27 #include "llvm/Support/CommandLine.h" 28 29 #define DEBUG_TYPE "vectorutils" 30 31 using namespace llvm; 32 using namespace llvm::PatternMatch; 33 34 /// Maximum factor for an interleaved memory access. 35 static cl::opt<unsigned> MaxInterleaveGroupFactor( 36 "max-interleave-group-factor", cl::Hidden, 37 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 38 cl::init(8)); 39 40 /// Return true if all of the intrinsic's arguments and return type are scalars 41 /// for the scalar form of the intrinsic, and vectors for the vector form of the 42 /// intrinsic (except operands that are marked as always being scalar by 43 /// hasVectorIntrinsicScalarOpd). 44 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) { 45 switch (ID) { 46 case Intrinsic::abs: // Begin integer bit-manipulation. 47 case Intrinsic::bswap: 48 case Intrinsic::bitreverse: 49 case Intrinsic::ctpop: 50 case Intrinsic::ctlz: 51 case Intrinsic::cttz: 52 case Intrinsic::fshl: 53 case Intrinsic::fshr: 54 case Intrinsic::smax: 55 case Intrinsic::smin: 56 case Intrinsic::umax: 57 case Intrinsic::umin: 58 case Intrinsic::sadd_sat: 59 case Intrinsic::ssub_sat: 60 case Intrinsic::uadd_sat: 61 case Intrinsic::usub_sat: 62 case Intrinsic::smul_fix: 63 case Intrinsic::smul_fix_sat: 64 case Intrinsic::umul_fix: 65 case Intrinsic::umul_fix_sat: 66 case Intrinsic::sqrt: // Begin floating-point. 67 case Intrinsic::sin: 68 case Intrinsic::cos: 69 case Intrinsic::exp: 70 case Intrinsic::exp2: 71 case Intrinsic::log: 72 case Intrinsic::log10: 73 case Intrinsic::log2: 74 case Intrinsic::fabs: 75 case Intrinsic::minnum: 76 case Intrinsic::maxnum: 77 case Intrinsic::minimum: 78 case Intrinsic::maximum: 79 case Intrinsic::copysign: 80 case Intrinsic::floor: 81 case Intrinsic::ceil: 82 case Intrinsic::trunc: 83 case Intrinsic::rint: 84 case Intrinsic::nearbyint: 85 case Intrinsic::round: 86 case Intrinsic::roundeven: 87 case Intrinsic::pow: 88 case Intrinsic::fma: 89 case Intrinsic::fmuladd: 90 case Intrinsic::powi: 91 case Intrinsic::canonicalize: 92 return true; 93 default: 94 return false; 95 } 96 } 97 98 /// Identifies if the vector form of the intrinsic has a scalar operand. 99 bool llvm::hasVectorIntrinsicScalarOpd(Intrinsic::ID ID, 100 unsigned ScalarOpdIdx) { 101 switch (ID) { 102 case Intrinsic::abs: 103 case Intrinsic::ctlz: 104 case Intrinsic::cttz: 105 case Intrinsic::powi: 106 return (ScalarOpdIdx == 1); 107 case Intrinsic::smul_fix: 108 case Intrinsic::smul_fix_sat: 109 case Intrinsic::umul_fix: 110 case Intrinsic::umul_fix_sat: 111 return (ScalarOpdIdx == 2); 112 default: 113 return false; 114 } 115 } 116 117 bool llvm::hasVectorIntrinsicOverloadedScalarOpd(Intrinsic::ID ID, 118 unsigned ScalarOpdIdx) { 119 switch (ID) { 120 case Intrinsic::powi: 121 return (ScalarOpdIdx == 1); 122 default: 123 return false; 124 } 125 } 126 127 /// Returns intrinsic ID for call. 128 /// For the input call instruction it finds mapping intrinsic and returns 129 /// its ID, in case it does not found it return not_intrinsic. 130 Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI, 131 const TargetLibraryInfo *TLI) { 132 Intrinsic::ID ID = getIntrinsicForCallSite(*CI, TLI); 133 if (ID == Intrinsic::not_intrinsic) 134 return Intrinsic::not_intrinsic; 135 136 if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start || 137 ID == Intrinsic::lifetime_end || ID == Intrinsic::assume || 138 ID == Intrinsic::experimental_noalias_scope_decl || 139 ID == Intrinsic::sideeffect || ID == Intrinsic::pseudoprobe) 140 return ID; 141 return Intrinsic::not_intrinsic; 142 } 143 144 /// Find the operand of the GEP that should be checked for consecutive 145 /// stores. This ignores trailing indices that have no effect on the final 146 /// pointer. 147 unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) { 148 const DataLayout &DL = Gep->getModule()->getDataLayout(); 149 unsigned LastOperand = Gep->getNumOperands() - 1; 150 TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType()); 151 152 // Walk backwards and try to peel off zeros. 153 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) { 154 // Find the type we're currently indexing into. 155 gep_type_iterator GEPTI = gep_type_begin(Gep); 156 std::advance(GEPTI, LastOperand - 2); 157 158 // If it's a type with the same allocation size as the result of the GEP we 159 // can peel off the zero index. 160 if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize) 161 break; 162 --LastOperand; 163 } 164 165 return LastOperand; 166 } 167 168 /// If the argument is a GEP, then returns the operand identified by 169 /// getGEPInductionOperand. However, if there is some other non-loop-invariant 170 /// operand, it returns that instead. 171 Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { 172 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 173 if (!GEP) 174 return Ptr; 175 176 unsigned InductionOperand = getGEPInductionOperand(GEP); 177 178 // Check that all of the gep indices are uniform except for our induction 179 // operand. 180 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) 181 if (i != InductionOperand && 182 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp)) 183 return Ptr; 184 return GEP->getOperand(InductionOperand); 185 } 186 187 /// If a value has only one user that is a CastInst, return it. 188 Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) { 189 Value *UniqueCast = nullptr; 190 for (User *U : Ptr->users()) { 191 CastInst *CI = dyn_cast<CastInst>(U); 192 if (CI && CI->getType() == Ty) { 193 if (!UniqueCast) 194 UniqueCast = CI; 195 else 196 return nullptr; 197 } 198 } 199 return UniqueCast; 200 } 201 202 /// Get the stride of a pointer access in a loop. Looks for symbolic 203 /// strides "a[i*stride]". Returns the symbolic stride, or null otherwise. 204 Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { 205 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 206 if (!PtrTy || PtrTy->isAggregateType()) 207 return nullptr; 208 209 // Try to remove a gep instruction to make the pointer (actually index at this 210 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the 211 // pointer, otherwise, we are analyzing the index. 212 Value *OrigPtr = Ptr; 213 214 // The size of the pointer access. 215 int64_t PtrAccessSize = 1; 216 217 Ptr = stripGetElementPtr(Ptr, SE, Lp); 218 const SCEV *V = SE->getSCEV(Ptr); 219 220 if (Ptr != OrigPtr) 221 // Strip off casts. 222 while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V)) 223 V = C->getOperand(); 224 225 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V); 226 if (!S) 227 return nullptr; 228 229 V = S->getStepRecurrence(*SE); 230 if (!V) 231 return nullptr; 232 233 // Strip off the size of access multiplication if we are still analyzing the 234 // pointer. 235 if (OrigPtr == Ptr) { 236 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) { 237 if (M->getOperand(0)->getSCEVType() != scConstant) 238 return nullptr; 239 240 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt(); 241 242 // Huge step value - give up. 243 if (APStepVal.getBitWidth() > 64) 244 return nullptr; 245 246 int64_t StepVal = APStepVal.getSExtValue(); 247 if (PtrAccessSize != StepVal) 248 return nullptr; 249 V = M->getOperand(1); 250 } 251 } 252 253 // Strip off casts. 254 Type *StripedOffRecurrenceCast = nullptr; 255 if (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V)) { 256 StripedOffRecurrenceCast = C->getType(); 257 V = C->getOperand(); 258 } 259 260 // Look for the loop invariant symbolic value. 261 const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V); 262 if (!U) 263 return nullptr; 264 265 Value *Stride = U->getValue(); 266 if (!Lp->isLoopInvariant(Stride)) 267 return nullptr; 268 269 // If we have stripped off the recurrence cast we have to make sure that we 270 // return the value that is used in this loop so that we can replace it later. 271 if (StripedOffRecurrenceCast) 272 Stride = getUniqueCastUse(Stride, Lp, StripedOffRecurrenceCast); 273 274 return Stride; 275 } 276 277 /// Given a vector and an element number, see if the scalar value is 278 /// already around as a register, for example if it were inserted then extracted 279 /// from the vector. 280 Value *llvm::findScalarElement(Value *V, unsigned EltNo) { 281 assert(V->getType()->isVectorTy() && "Not looking at a vector?"); 282 VectorType *VTy = cast<VectorType>(V->getType()); 283 // For fixed-length vector, return undef for out of range access. 284 if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) { 285 unsigned Width = FVTy->getNumElements(); 286 if (EltNo >= Width) 287 return UndefValue::get(FVTy->getElementType()); 288 } 289 290 if (Constant *C = dyn_cast<Constant>(V)) 291 return C->getAggregateElement(EltNo); 292 293 if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) { 294 // If this is an insert to a variable element, we don't know what it is. 295 if (!isa<ConstantInt>(III->getOperand(2))) 296 return nullptr; 297 unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue(); 298 299 // If this is an insert to the element we are looking for, return the 300 // inserted value. 301 if (EltNo == IIElt) 302 return III->getOperand(1); 303 304 // Guard against infinite loop on malformed, unreachable IR. 305 if (III == III->getOperand(0)) 306 return nullptr; 307 308 // Otherwise, the insertelement doesn't modify the value, recurse on its 309 // vector input. 310 return findScalarElement(III->getOperand(0), EltNo); 311 } 312 313 ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V); 314 // Restrict the following transformation to fixed-length vector. 315 if (SVI && isa<FixedVectorType>(SVI->getType())) { 316 unsigned LHSWidth = 317 cast<FixedVectorType>(SVI->getOperand(0)->getType())->getNumElements(); 318 int InEl = SVI->getMaskValue(EltNo); 319 if (InEl < 0) 320 return UndefValue::get(VTy->getElementType()); 321 if (InEl < (int)LHSWidth) 322 return findScalarElement(SVI->getOperand(0), InEl); 323 return findScalarElement(SVI->getOperand(1), InEl - LHSWidth); 324 } 325 326 // Extract a value from a vector add operation with a constant zero. 327 // TODO: Use getBinOpIdentity() to generalize this. 328 Value *Val; Constant *C; 329 if (match(V, m_Add(m_Value(Val), m_Constant(C)))) 330 if (Constant *Elt = C->getAggregateElement(EltNo)) 331 if (Elt->isNullValue()) 332 return findScalarElement(Val, EltNo); 333 334 // If the vector is a splat then we can trivially find the scalar element. 335 if (isa<ScalableVectorType>(VTy)) 336 if (Value *Splat = getSplatValue(V)) 337 if (EltNo < VTy->getElementCount().getKnownMinValue()) 338 return Splat; 339 340 // Otherwise, we don't know. 341 return nullptr; 342 } 343 344 int llvm::getSplatIndex(ArrayRef<int> Mask) { 345 int SplatIndex = -1; 346 for (int M : Mask) { 347 // Ignore invalid (undefined) mask elements. 348 if (M < 0) 349 continue; 350 351 // There can be only 1 non-negative mask element value if this is a splat. 352 if (SplatIndex != -1 && SplatIndex != M) 353 return -1; 354 355 // Initialize the splat index to the 1st non-negative mask element. 356 SplatIndex = M; 357 } 358 assert((SplatIndex == -1 || SplatIndex >= 0) && "Negative index?"); 359 return SplatIndex; 360 } 361 362 /// Get splat value if the input is a splat vector or return nullptr. 363 /// This function is not fully general. It checks only 2 cases: 364 /// the input value is (1) a splat constant vector or (2) a sequence 365 /// of instructions that broadcasts a scalar at element 0. 366 Value *llvm::getSplatValue(const Value *V) { 367 if (isa<VectorType>(V->getType())) 368 if (auto *C = dyn_cast<Constant>(V)) 369 return C->getSplatValue(); 370 371 // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...> 372 Value *Splat; 373 if (match(V, 374 m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat), m_ZeroInt()), 375 m_Value(), m_ZeroMask()))) 376 return Splat; 377 378 return nullptr; 379 } 380 381 bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) { 382 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); 383 384 if (isa<VectorType>(V->getType())) { 385 if (isa<UndefValue>(V)) 386 return true; 387 // FIXME: We can allow undefs, but if Index was specified, we may want to 388 // check that the constant is defined at that index. 389 if (auto *C = dyn_cast<Constant>(V)) 390 return C->getSplatValue() != nullptr; 391 } 392 393 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V)) { 394 // FIXME: We can safely allow undefs here. If Index was specified, we will 395 // check that the mask elt is defined at the required index. 396 if (!is_splat(Shuf->getShuffleMask())) 397 return false; 398 399 // Match any index. 400 if (Index == -1) 401 return true; 402 403 // Match a specific element. The mask should be defined at and match the 404 // specified index. 405 return Shuf->getMaskValue(Index) == Index; 406 } 407 408 // The remaining tests are all recursive, so bail out if we hit the limit. 409 if (Depth++ == MaxAnalysisRecursionDepth) 410 return false; 411 412 // If both operands of a binop are splats, the result is a splat. 413 Value *X, *Y, *Z; 414 if (match(V, m_BinOp(m_Value(X), m_Value(Y)))) 415 return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth); 416 417 // If all operands of a select are splats, the result is a splat. 418 if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z)))) 419 return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth) && 420 isSplatValue(Z, Index, Depth); 421 422 // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops). 423 424 return false; 425 } 426 427 void llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask, 428 SmallVectorImpl<int> &ScaledMask) { 429 assert(Scale > 0 && "Unexpected scaling factor"); 430 431 // Fast-path: if no scaling, then it is just a copy. 432 if (Scale == 1) { 433 ScaledMask.assign(Mask.begin(), Mask.end()); 434 return; 435 } 436 437 ScaledMask.clear(); 438 for (int MaskElt : Mask) { 439 if (MaskElt >= 0) { 440 assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= INT32_MAX && 441 "Overflowed 32-bits"); 442 } 443 for (int SliceElt = 0; SliceElt != Scale; ++SliceElt) 444 ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt); 445 } 446 } 447 448 bool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask, 449 SmallVectorImpl<int> &ScaledMask) { 450 assert(Scale > 0 && "Unexpected scaling factor"); 451 452 // Fast-path: if no scaling, then it is just a copy. 453 if (Scale == 1) { 454 ScaledMask.assign(Mask.begin(), Mask.end()); 455 return true; 456 } 457 458 // We must map the original elements down evenly to a type with less elements. 459 int NumElts = Mask.size(); 460 if (NumElts % Scale != 0) 461 return false; 462 463 ScaledMask.clear(); 464 ScaledMask.reserve(NumElts / Scale); 465 466 // Step through the input mask by splitting into Scale-sized slices. 467 do { 468 ArrayRef<int> MaskSlice = Mask.take_front(Scale); 469 assert((int)MaskSlice.size() == Scale && "Expected Scale-sized slice."); 470 471 // The first element of the slice determines how we evaluate this slice. 472 int SliceFront = MaskSlice.front(); 473 if (SliceFront < 0) { 474 // Negative values (undef or other "sentinel" values) must be equal across 475 // the entire slice. 476 if (!is_splat(MaskSlice)) 477 return false; 478 ScaledMask.push_back(SliceFront); 479 } else { 480 // A positive mask element must be cleanly divisible. 481 if (SliceFront % Scale != 0) 482 return false; 483 // Elements of the slice must be consecutive. 484 for (int i = 1; i < Scale; ++i) 485 if (MaskSlice[i] != SliceFront + i) 486 return false; 487 ScaledMask.push_back(SliceFront / Scale); 488 } 489 Mask = Mask.drop_front(Scale); 490 } while (!Mask.empty()); 491 492 assert((int)ScaledMask.size() * Scale == NumElts && "Unexpected scaled mask"); 493 494 // All elements of the original mask can be scaled down to map to the elements 495 // of a mask with wider elements. 496 return true; 497 } 498 499 void llvm::processShuffleMasks( 500 ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs, 501 unsigned NumOfUsedRegs, function_ref<void()> NoInputAction, 502 function_ref<void(ArrayRef<int>, unsigned)> SingleInputAction, 503 function_ref<void(ArrayRef<int>, unsigned, unsigned)> ManyInputsAction) { 504 SmallVector<SmallVector<SmallVector<int>>> Res(NumOfDestRegs); 505 // Try to perform better estimation of the permutation. 506 // 1. Split the source/destination vectors into real registers. 507 // 2. Do the mask analysis to identify which real registers are 508 // permuted. 509 int Sz = Mask.size(); 510 unsigned SzDest = Sz / NumOfDestRegs; 511 unsigned SzSrc = Sz / NumOfSrcRegs; 512 for (unsigned I = 0; I < NumOfDestRegs; ++I) { 513 auto &RegMasks = Res[I]; 514 RegMasks.assign(NumOfSrcRegs, {}); 515 // Check that the values in dest registers are in the one src 516 // register. 517 for (unsigned K = 0; K < SzDest; ++K) { 518 int Idx = I * SzDest + K; 519 if (Idx == Sz) 520 break; 521 if (Mask[Idx] >= Sz || Mask[Idx] == UndefMaskElem) 522 continue; 523 int SrcRegIdx = Mask[Idx] / SzSrc; 524 // Add a cost of PermuteTwoSrc for each new source register permute, 525 // if we have more than one source registers. 526 if (RegMasks[SrcRegIdx].empty()) 527 RegMasks[SrcRegIdx].assign(SzDest, UndefMaskElem); 528 RegMasks[SrcRegIdx][K] = Mask[Idx] % SzSrc; 529 } 530 } 531 // Process split mask. 532 for (unsigned I = 0; I < NumOfUsedRegs; ++I) { 533 auto &Dest = Res[I]; 534 int NumSrcRegs = 535 count_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); }); 536 switch (NumSrcRegs) { 537 case 0: 538 // No input vectors were used! 539 NoInputAction(); 540 break; 541 case 1: { 542 // Find the only mask with at least single undef mask elem. 543 auto *It = 544 find_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); }); 545 unsigned SrcReg = std::distance(Dest.begin(), It); 546 SingleInputAction(*It, SrcReg); 547 break; 548 } 549 default: { 550 // The first mask is a permutation of a single register. Since we have >2 551 // input registers to shuffle, we merge the masks for 2 first registers 552 // and generate a shuffle of 2 registers rather than the reordering of the 553 // first register and then shuffle with the second register. Next, 554 // generate the shuffles of the resulting register + the remaining 555 // registers from the list. 556 auto &&CombineMasks = [](MutableArrayRef<int> FirstMask, 557 ArrayRef<int> SecondMask) { 558 for (int Idx = 0, VF = FirstMask.size(); Idx < VF; ++Idx) { 559 if (SecondMask[Idx] != UndefMaskElem) { 560 assert(FirstMask[Idx] == UndefMaskElem && 561 "Expected undefined mask element."); 562 FirstMask[Idx] = SecondMask[Idx] + VF; 563 } 564 } 565 }; 566 auto &&NormalizeMask = [](MutableArrayRef<int> Mask) { 567 for (int Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) { 568 if (Mask[Idx] != UndefMaskElem) 569 Mask[Idx] = Idx; 570 } 571 }; 572 int SecondIdx; 573 do { 574 int FirstIdx = -1; 575 SecondIdx = -1; 576 MutableArrayRef<int> FirstMask, SecondMask; 577 for (unsigned I = 0; I < NumOfDestRegs; ++I) { 578 SmallVectorImpl<int> &RegMask = Dest[I]; 579 if (RegMask.empty()) 580 continue; 581 582 if (FirstIdx == SecondIdx) { 583 FirstIdx = I; 584 FirstMask = RegMask; 585 continue; 586 } 587 SecondIdx = I; 588 SecondMask = RegMask; 589 CombineMasks(FirstMask, SecondMask); 590 ManyInputsAction(FirstMask, FirstIdx, SecondIdx); 591 NormalizeMask(FirstMask); 592 RegMask.clear(); 593 SecondMask = FirstMask; 594 SecondIdx = FirstIdx; 595 } 596 if (FirstIdx != SecondIdx && SecondIdx >= 0) { 597 CombineMasks(SecondMask, FirstMask); 598 ManyInputsAction(SecondMask, SecondIdx, FirstIdx); 599 Dest[FirstIdx].clear(); 600 NormalizeMask(SecondMask); 601 } 602 } while (SecondIdx >= 0); 603 break; 604 } 605 } 606 } 607 } 608 609 MapVector<Instruction *, uint64_t> 610 llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB, 611 const TargetTransformInfo *TTI) { 612 613 // DemandedBits will give us every value's live-out bits. But we want 614 // to ensure no extra casts would need to be inserted, so every DAG 615 // of connected values must have the same minimum bitwidth. 616 EquivalenceClasses<Value *> ECs; 617 SmallVector<Value *, 16> Worklist; 618 SmallPtrSet<Value *, 4> Roots; 619 SmallPtrSet<Value *, 16> Visited; 620 DenseMap<Value *, uint64_t> DBits; 621 SmallPtrSet<Instruction *, 4> InstructionSet; 622 MapVector<Instruction *, uint64_t> MinBWs; 623 624 // Determine the roots. We work bottom-up, from truncs or icmps. 625 bool SeenExtFromIllegalType = false; 626 for (auto *BB : Blocks) 627 for (auto &I : *BB) { 628 InstructionSet.insert(&I); 629 630 if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) && 631 !TTI->isTypeLegal(I.getOperand(0)->getType())) 632 SeenExtFromIllegalType = true; 633 634 // Only deal with non-vector integers up to 64-bits wide. 635 if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) && 636 !I.getType()->isVectorTy() && 637 I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) { 638 // Don't make work for ourselves. If we know the loaded type is legal, 639 // don't add it to the worklist. 640 if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType())) 641 continue; 642 643 Worklist.push_back(&I); 644 Roots.insert(&I); 645 } 646 } 647 // Early exit. 648 if (Worklist.empty() || (TTI && !SeenExtFromIllegalType)) 649 return MinBWs; 650 651 // Now proceed breadth-first, unioning values together. 652 while (!Worklist.empty()) { 653 Value *Val = Worklist.pop_back_val(); 654 Value *Leader = ECs.getOrInsertLeaderValue(Val); 655 656 if (Visited.count(Val)) 657 continue; 658 Visited.insert(Val); 659 660 // Non-instructions terminate a chain successfully. 661 if (!isa<Instruction>(Val)) 662 continue; 663 Instruction *I = cast<Instruction>(Val); 664 665 // If we encounter a type that is larger than 64 bits, we can't represent 666 // it so bail out. 667 if (DB.getDemandedBits(I).getBitWidth() > 64) 668 return MapVector<Instruction *, uint64_t>(); 669 670 uint64_t V = DB.getDemandedBits(I).getZExtValue(); 671 DBits[Leader] |= V; 672 DBits[I] = V; 673 674 // Casts, loads and instructions outside of our range terminate a chain 675 // successfully. 676 if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) || 677 !InstructionSet.count(I)) 678 continue; 679 680 // Unsafe casts terminate a chain unsuccessfully. We can't do anything 681 // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to 682 // transform anything that relies on them. 683 if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) || 684 !I->getType()->isIntegerTy()) { 685 DBits[Leader] |= ~0ULL; 686 continue; 687 } 688 689 // We don't modify the types of PHIs. Reductions will already have been 690 // truncated if possible, and inductions' sizes will have been chosen by 691 // indvars. 692 if (isa<PHINode>(I)) 693 continue; 694 695 if (DBits[Leader] == ~0ULL) 696 // All bits demanded, no point continuing. 697 continue; 698 699 for (Value *O : cast<User>(I)->operands()) { 700 ECs.unionSets(Leader, O); 701 Worklist.push_back(O); 702 } 703 } 704 705 // Now we've discovered all values, walk them to see if there are 706 // any users we didn't see. If there are, we can't optimize that 707 // chain. 708 for (auto &I : DBits) 709 for (auto *U : I.first->users()) 710 if (U->getType()->isIntegerTy() && DBits.count(U) == 0) 711 DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL; 712 713 for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) { 714 uint64_t LeaderDemandedBits = 0; 715 for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) 716 LeaderDemandedBits |= DBits[M]; 717 718 uint64_t MinBW = (sizeof(LeaderDemandedBits) * 8) - 719 llvm::countLeadingZeros(LeaderDemandedBits); 720 // Round up to a power of 2 721 if (!isPowerOf2_64((uint64_t)MinBW)) 722 MinBW = NextPowerOf2(MinBW); 723 724 // We don't modify the types of PHIs. Reductions will already have been 725 // truncated if possible, and inductions' sizes will have been chosen by 726 // indvars. 727 // If we are required to shrink a PHI, abandon this entire equivalence class. 728 bool Abort = false; 729 for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) 730 if (isa<PHINode>(M) && MinBW < M->getType()->getScalarSizeInBits()) { 731 Abort = true; 732 break; 733 } 734 if (Abort) 735 continue; 736 737 for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) { 738 if (!isa<Instruction>(M)) 739 continue; 740 Type *Ty = M->getType(); 741 if (Roots.count(M)) 742 Ty = cast<Instruction>(M)->getOperand(0)->getType(); 743 if (MinBW < Ty->getScalarSizeInBits()) 744 MinBWs[cast<Instruction>(M)] = MinBW; 745 } 746 } 747 748 return MinBWs; 749 } 750 751 /// Add all access groups in @p AccGroups to @p List. 752 template <typename ListT> 753 static void addToAccessGroupList(ListT &List, MDNode *AccGroups) { 754 // Interpret an access group as a list containing itself. 755 if (AccGroups->getNumOperands() == 0) { 756 assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group"); 757 List.insert(AccGroups); 758 return; 759 } 760 761 for (auto &AccGroupListOp : AccGroups->operands()) { 762 auto *Item = cast<MDNode>(AccGroupListOp.get()); 763 assert(isValidAsAccessGroup(Item) && "List item must be an access group"); 764 List.insert(Item); 765 } 766 } 767 768 MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) { 769 if (!AccGroups1) 770 return AccGroups2; 771 if (!AccGroups2) 772 return AccGroups1; 773 if (AccGroups1 == AccGroups2) 774 return AccGroups1; 775 776 SmallSetVector<Metadata *, 4> Union; 777 addToAccessGroupList(Union, AccGroups1); 778 addToAccessGroupList(Union, AccGroups2); 779 780 if (Union.size() == 0) 781 return nullptr; 782 if (Union.size() == 1) 783 return cast<MDNode>(Union.front()); 784 785 LLVMContext &Ctx = AccGroups1->getContext(); 786 return MDNode::get(Ctx, Union.getArrayRef()); 787 } 788 789 MDNode *llvm::intersectAccessGroups(const Instruction *Inst1, 790 const Instruction *Inst2) { 791 bool MayAccessMem1 = Inst1->mayReadOrWriteMemory(); 792 bool MayAccessMem2 = Inst2->mayReadOrWriteMemory(); 793 794 if (!MayAccessMem1 && !MayAccessMem2) 795 return nullptr; 796 if (!MayAccessMem1) 797 return Inst2->getMetadata(LLVMContext::MD_access_group); 798 if (!MayAccessMem2) 799 return Inst1->getMetadata(LLVMContext::MD_access_group); 800 801 MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group); 802 MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group); 803 if (!MD1 || !MD2) 804 return nullptr; 805 if (MD1 == MD2) 806 return MD1; 807 808 // Use set for scalable 'contains' check. 809 SmallPtrSet<Metadata *, 4> AccGroupSet2; 810 addToAccessGroupList(AccGroupSet2, MD2); 811 812 SmallVector<Metadata *, 4> Intersection; 813 if (MD1->getNumOperands() == 0) { 814 assert(isValidAsAccessGroup(MD1) && "Node must be an access group"); 815 if (AccGroupSet2.count(MD1)) 816 Intersection.push_back(MD1); 817 } else { 818 for (const MDOperand &Node : MD1->operands()) { 819 auto *Item = cast<MDNode>(Node.get()); 820 assert(isValidAsAccessGroup(Item) && "List item must be an access group"); 821 if (AccGroupSet2.count(Item)) 822 Intersection.push_back(Item); 823 } 824 } 825 826 if (Intersection.size() == 0) 827 return nullptr; 828 if (Intersection.size() == 1) 829 return cast<MDNode>(Intersection.front()); 830 831 LLVMContext &Ctx = Inst1->getContext(); 832 return MDNode::get(Ctx, Intersection); 833 } 834 835 /// \returns \p I after propagating metadata from \p VL. 836 Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) { 837 if (VL.empty()) 838 return Inst; 839 Instruction *I0 = cast<Instruction>(VL[0]); 840 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 841 I0->getAllMetadataOtherThanDebugLoc(Metadata); 842 843 for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 844 LLVMContext::MD_noalias, LLVMContext::MD_fpmath, 845 LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load, 846 LLVMContext::MD_access_group}) { 847 MDNode *MD = I0->getMetadata(Kind); 848 849 for (int J = 1, E = VL.size(); MD && J != E; ++J) { 850 const Instruction *IJ = cast<Instruction>(VL[J]); 851 MDNode *IMD = IJ->getMetadata(Kind); 852 switch (Kind) { 853 case LLVMContext::MD_tbaa: 854 MD = MDNode::getMostGenericTBAA(MD, IMD); 855 break; 856 case LLVMContext::MD_alias_scope: 857 MD = MDNode::getMostGenericAliasScope(MD, IMD); 858 break; 859 case LLVMContext::MD_fpmath: 860 MD = MDNode::getMostGenericFPMath(MD, IMD); 861 break; 862 case LLVMContext::MD_noalias: 863 case LLVMContext::MD_nontemporal: 864 case LLVMContext::MD_invariant_load: 865 MD = MDNode::intersect(MD, IMD); 866 break; 867 case LLVMContext::MD_access_group: 868 MD = intersectAccessGroups(Inst, IJ); 869 break; 870 default: 871 llvm_unreachable("unhandled metadata"); 872 } 873 } 874 875 Inst->setMetadata(Kind, MD); 876 } 877 878 return Inst; 879 } 880 881 Constant * 882 llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF, 883 const InterleaveGroup<Instruction> &Group) { 884 // All 1's means mask is not needed. 885 if (Group.getNumMembers() == Group.getFactor()) 886 return nullptr; 887 888 // TODO: support reversed access. 889 assert(!Group.isReverse() && "Reversed group not supported."); 890 891 SmallVector<Constant *, 16> Mask; 892 for (unsigned i = 0; i < VF; i++) 893 for (unsigned j = 0; j < Group.getFactor(); ++j) { 894 unsigned HasMember = Group.getMember(j) ? 1 : 0; 895 Mask.push_back(Builder.getInt1(HasMember)); 896 } 897 898 return ConstantVector::get(Mask); 899 } 900 901 llvm::SmallVector<int, 16> 902 llvm::createReplicatedMask(unsigned ReplicationFactor, unsigned VF) { 903 SmallVector<int, 16> MaskVec; 904 for (unsigned i = 0; i < VF; i++) 905 for (unsigned j = 0; j < ReplicationFactor; j++) 906 MaskVec.push_back(i); 907 908 return MaskVec; 909 } 910 911 llvm::SmallVector<int, 16> llvm::createInterleaveMask(unsigned VF, 912 unsigned NumVecs) { 913 SmallVector<int, 16> Mask; 914 for (unsigned i = 0; i < VF; i++) 915 for (unsigned j = 0; j < NumVecs; j++) 916 Mask.push_back(j * VF + i); 917 918 return Mask; 919 } 920 921 llvm::SmallVector<int, 16> 922 llvm::createStrideMask(unsigned Start, unsigned Stride, unsigned VF) { 923 SmallVector<int, 16> Mask; 924 for (unsigned i = 0; i < VF; i++) 925 Mask.push_back(Start + i * Stride); 926 927 return Mask; 928 } 929 930 llvm::SmallVector<int, 16> llvm::createSequentialMask(unsigned Start, 931 unsigned NumInts, 932 unsigned NumUndefs) { 933 SmallVector<int, 16> Mask; 934 for (unsigned i = 0; i < NumInts; i++) 935 Mask.push_back(Start + i); 936 937 for (unsigned i = 0; i < NumUndefs; i++) 938 Mask.push_back(-1); 939 940 return Mask; 941 } 942 943 llvm::SmallVector<int, 16> llvm::createUnaryMask(ArrayRef<int> Mask, 944 unsigned NumElts) { 945 // Avoid casts in the loop and make sure we have a reasonable number. 946 int NumEltsSigned = NumElts; 947 assert(NumEltsSigned > 0 && "Expected smaller or non-zero element count"); 948 949 // If the mask chooses an element from operand 1, reduce it to choose from the 950 // corresponding element of operand 0. Undef mask elements are unchanged. 951 SmallVector<int, 16> UnaryMask; 952 for (int MaskElt : Mask) { 953 assert((MaskElt < NumEltsSigned * 2) && "Expected valid shuffle mask"); 954 int UnaryElt = MaskElt >= NumEltsSigned ? MaskElt - NumEltsSigned : MaskElt; 955 UnaryMask.push_back(UnaryElt); 956 } 957 return UnaryMask; 958 } 959 960 /// A helper function for concatenating vectors. This function concatenates two 961 /// vectors having the same element type. If the second vector has fewer 962 /// elements than the first, it is padded with undefs. 963 static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1, 964 Value *V2) { 965 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 966 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 967 assert(VecTy1 && VecTy2 && 968 VecTy1->getScalarType() == VecTy2->getScalarType() && 969 "Expect two vectors with the same element type"); 970 971 unsigned NumElts1 = cast<FixedVectorType>(VecTy1)->getNumElements(); 972 unsigned NumElts2 = cast<FixedVectorType>(VecTy2)->getNumElements(); 973 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 974 975 if (NumElts1 > NumElts2) { 976 // Extend with UNDEFs. 977 V2 = Builder.CreateShuffleVector( 978 V2, createSequentialMask(0, NumElts2, NumElts1 - NumElts2)); 979 } 980 981 return Builder.CreateShuffleVector( 982 V1, V2, createSequentialMask(0, NumElts1 + NumElts2, 0)); 983 } 984 985 Value *llvm::concatenateVectors(IRBuilderBase &Builder, 986 ArrayRef<Value *> Vecs) { 987 unsigned NumVecs = Vecs.size(); 988 assert(NumVecs > 1 && "Should be at least two vectors"); 989 990 SmallVector<Value *, 8> ResList; 991 ResList.append(Vecs.begin(), Vecs.end()); 992 do { 993 SmallVector<Value *, 8> TmpList; 994 for (unsigned i = 0; i < NumVecs - 1; i += 2) { 995 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 996 assert((V0->getType() == V1->getType() || i == NumVecs - 2) && 997 "Only the last vector may have a different type"); 998 999 TmpList.push_back(concatenateTwoVectors(Builder, V0, V1)); 1000 } 1001 1002 // Push the last vector if the total number of vectors is odd. 1003 if (NumVecs % 2 != 0) 1004 TmpList.push_back(ResList[NumVecs - 1]); 1005 1006 ResList = TmpList; 1007 NumVecs = ResList.size(); 1008 } while (NumVecs > 1); 1009 1010 return ResList[0]; 1011 } 1012 1013 bool llvm::maskIsAllZeroOrUndef(Value *Mask) { 1014 assert(isa<VectorType>(Mask->getType()) && 1015 isa<IntegerType>(Mask->getType()->getScalarType()) && 1016 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() == 1017 1 && 1018 "Mask must be a vector of i1"); 1019 1020 auto *ConstMask = dyn_cast<Constant>(Mask); 1021 if (!ConstMask) 1022 return false; 1023 if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask)) 1024 return true; 1025 if (isa<ScalableVectorType>(ConstMask->getType())) 1026 return false; 1027 for (unsigned 1028 I = 0, 1029 E = cast<FixedVectorType>(ConstMask->getType())->getNumElements(); 1030 I != E; ++I) { 1031 if (auto *MaskElt = ConstMask->getAggregateElement(I)) 1032 if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt)) 1033 continue; 1034 return false; 1035 } 1036 return true; 1037 } 1038 1039 bool llvm::maskIsAllOneOrUndef(Value *Mask) { 1040 assert(isa<VectorType>(Mask->getType()) && 1041 isa<IntegerType>(Mask->getType()->getScalarType()) && 1042 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() == 1043 1 && 1044 "Mask must be a vector of i1"); 1045 1046 auto *ConstMask = dyn_cast<Constant>(Mask); 1047 if (!ConstMask) 1048 return false; 1049 if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask)) 1050 return true; 1051 if (isa<ScalableVectorType>(ConstMask->getType())) 1052 return false; 1053 for (unsigned 1054 I = 0, 1055 E = cast<FixedVectorType>(ConstMask->getType())->getNumElements(); 1056 I != E; ++I) { 1057 if (auto *MaskElt = ConstMask->getAggregateElement(I)) 1058 if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt)) 1059 continue; 1060 return false; 1061 } 1062 return true; 1063 } 1064 1065 /// TODO: This is a lot like known bits, but for 1066 /// vectors. Is there something we can common this with? 1067 APInt llvm::possiblyDemandedEltsInMask(Value *Mask) { 1068 assert(isa<FixedVectorType>(Mask->getType()) && 1069 isa<IntegerType>(Mask->getType()->getScalarType()) && 1070 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() == 1071 1 && 1072 "Mask must be a fixed width vector of i1"); 1073 1074 const unsigned VWidth = 1075 cast<FixedVectorType>(Mask->getType())->getNumElements(); 1076 APInt DemandedElts = APInt::getAllOnes(VWidth); 1077 if (auto *CV = dyn_cast<ConstantVector>(Mask)) 1078 for (unsigned i = 0; i < VWidth; i++) 1079 if (CV->getAggregateElement(i)->isNullValue()) 1080 DemandedElts.clearBit(i); 1081 return DemandedElts; 1082 } 1083 1084 bool InterleavedAccessInfo::isStrided(int Stride) { 1085 unsigned Factor = std::abs(Stride); 1086 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 1087 } 1088 1089 void InterleavedAccessInfo::collectConstStrideAccesses( 1090 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 1091 const ValueToValueMap &Strides) { 1092 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 1093 1094 // Since it's desired that the load/store instructions be maintained in 1095 // "program order" for the interleaved access analysis, we have to visit the 1096 // blocks in the loop in reverse postorder (i.e., in a topological order). 1097 // Such an ordering will ensure that any load/store that may be executed 1098 // before a second load/store will precede the second load/store in 1099 // AccessStrideInfo. 1100 LoopBlocksDFS DFS(TheLoop); 1101 DFS.perform(LI); 1102 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 1103 for (auto &I : *BB) { 1104 Value *Ptr = getLoadStorePointerOperand(&I); 1105 if (!Ptr) 1106 continue; 1107 Type *ElementTy = getLoadStoreType(&I); 1108 1109 // We don't check wrapping here because we don't know yet if Ptr will be 1110 // part of a full group or a group with gaps. Checking wrapping for all 1111 // pointers (even those that end up in groups with no gaps) will be overly 1112 // conservative. For full groups, wrapping should be ok since if we would 1113 // wrap around the address space we would do a memory access at nullptr 1114 // even without the transformation. The wrapping checks are therefore 1115 // deferred until after we've formed the interleaved groups. 1116 int64_t Stride = getPtrStride(PSE, ElementTy, Ptr, TheLoop, Strides, 1117 /*Assume=*/true, /*ShouldCheckWrap=*/false); 1118 1119 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 1120 uint64_t Size = DL.getTypeAllocSize(ElementTy); 1121 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, 1122 getLoadStoreAlignment(&I)); 1123 } 1124 } 1125 1126 // Analyze interleaved accesses and collect them into interleaved load and 1127 // store groups. 1128 // 1129 // When generating code for an interleaved load group, we effectively hoist all 1130 // loads in the group to the location of the first load in program order. When 1131 // generating code for an interleaved store group, we sink all stores to the 1132 // location of the last store. This code motion can change the order of load 1133 // and store instructions and may break dependences. 1134 // 1135 // The code generation strategy mentioned above ensures that we won't violate 1136 // any write-after-read (WAR) dependences. 1137 // 1138 // E.g., for the WAR dependence: a = A[i]; // (1) 1139 // A[i] = b; // (2) 1140 // 1141 // The store group of (2) is always inserted at or below (2), and the load 1142 // group of (1) is always inserted at or above (1). Thus, the instructions will 1143 // never be reordered. All other dependences are checked to ensure the 1144 // correctness of the instruction reordering. 1145 // 1146 // The algorithm visits all memory accesses in the loop in bottom-up program 1147 // order. Program order is established by traversing the blocks in the loop in 1148 // reverse postorder when collecting the accesses. 1149 // 1150 // We visit the memory accesses in bottom-up order because it can simplify the 1151 // construction of store groups in the presence of write-after-write (WAW) 1152 // dependences. 1153 // 1154 // E.g., for the WAW dependence: A[i] = a; // (1) 1155 // A[i] = b; // (2) 1156 // A[i + 1] = c; // (3) 1157 // 1158 // We will first create a store group with (3) and (2). (1) can't be added to 1159 // this group because it and (2) are dependent. However, (1) can be grouped 1160 // with other accesses that may precede it in program order. Note that a 1161 // bottom-up order does not imply that WAW dependences should not be checked. 1162 void InterleavedAccessInfo::analyzeInterleaving( 1163 bool EnablePredicatedInterleavedMemAccesses) { 1164 LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 1165 const ValueToValueMap &Strides = LAI->getSymbolicStrides(); 1166 1167 // Holds all accesses with a constant stride. 1168 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 1169 collectConstStrideAccesses(AccessStrideInfo, Strides); 1170 1171 if (AccessStrideInfo.empty()) 1172 return; 1173 1174 // Collect the dependences in the loop. 1175 collectDependences(); 1176 1177 // Holds all interleaved store groups temporarily. 1178 SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups; 1179 // Holds all interleaved load groups temporarily. 1180 SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups; 1181 1182 // Search in bottom-up program order for pairs of accesses (A and B) that can 1183 // form interleaved load or store groups. In the algorithm below, access A 1184 // precedes access B in program order. We initialize a group for B in the 1185 // outer loop of the algorithm, and then in the inner loop, we attempt to 1186 // insert each A into B's group if: 1187 // 1188 // 1. A and B have the same stride, 1189 // 2. A and B have the same memory object size, and 1190 // 3. A belongs in B's group according to its distance from B. 1191 // 1192 // Special care is taken to ensure group formation will not break any 1193 // dependences. 1194 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 1195 BI != E; ++BI) { 1196 Instruction *B = BI->first; 1197 StrideDescriptor DesB = BI->second; 1198 1199 // Initialize a group for B if it has an allowable stride. Even if we don't 1200 // create a group for B, we continue with the bottom-up algorithm to ensure 1201 // we don't break any of B's dependences. 1202 InterleaveGroup<Instruction> *Group = nullptr; 1203 if (isStrided(DesB.Stride) && 1204 (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) { 1205 Group = getInterleaveGroup(B); 1206 if (!Group) { 1207 LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B 1208 << '\n'); 1209 Group = createInterleaveGroup(B, DesB.Stride, DesB.Alignment); 1210 } 1211 if (B->mayWriteToMemory()) 1212 StoreGroups.insert(Group); 1213 else 1214 LoadGroups.insert(Group); 1215 } 1216 1217 for (auto AI = std::next(BI); AI != E; ++AI) { 1218 Instruction *A = AI->first; 1219 StrideDescriptor DesA = AI->second; 1220 1221 // Our code motion strategy implies that we can't have dependences 1222 // between accesses in an interleaved group and other accesses located 1223 // between the first and last member of the group. Note that this also 1224 // means that a group can't have more than one member at a given offset. 1225 // The accesses in a group can have dependences with other accesses, but 1226 // we must ensure we don't extend the boundaries of the group such that 1227 // we encompass those dependent accesses. 1228 // 1229 // For example, assume we have the sequence of accesses shown below in a 1230 // stride-2 loop: 1231 // 1232 // (1, 2) is a group | A[i] = a; // (1) 1233 // | A[i-1] = b; // (2) | 1234 // A[i-3] = c; // (3) 1235 // A[i] = d; // (4) | (2, 4) is not a group 1236 // 1237 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 1238 // but not with (4). If we did, the dependent access (3) would be within 1239 // the boundaries of the (2, 4) group. 1240 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 1241 // If a dependence exists and A is already in a group, we know that A 1242 // must be a store since A precedes B and WAR dependences are allowed. 1243 // Thus, A would be sunk below B. We release A's group to prevent this 1244 // illegal code motion. A will then be free to form another group with 1245 // instructions that precede it. 1246 if (isInterleaved(A)) { 1247 InterleaveGroup<Instruction> *StoreGroup = getInterleaveGroup(A); 1248 1249 LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to " 1250 "dependence between " << *A << " and "<< *B << '\n'); 1251 1252 StoreGroups.remove(StoreGroup); 1253 releaseGroup(StoreGroup); 1254 } 1255 1256 // If a dependence exists and A is not already in a group (or it was 1257 // and we just released it), B might be hoisted above A (if B is a 1258 // load) or another store might be sunk below A (if B is a store). In 1259 // either case, we can't add additional instructions to B's group. B 1260 // will only form a group with instructions that it precedes. 1261 break; 1262 } 1263 1264 // At this point, we've checked for illegal code motion. If either A or B 1265 // isn't strided, there's nothing left to do. 1266 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 1267 continue; 1268 1269 // Ignore A if it's already in a group or isn't the same kind of memory 1270 // operation as B. 1271 // Note that mayReadFromMemory() isn't mutually exclusive to 1272 // mayWriteToMemory in the case of atomic loads. We shouldn't see those 1273 // here, canVectorizeMemory() should have returned false - except for the 1274 // case we asked for optimization remarks. 1275 if (isInterleaved(A) || 1276 (A->mayReadFromMemory() != B->mayReadFromMemory()) || 1277 (A->mayWriteToMemory() != B->mayWriteToMemory())) 1278 continue; 1279 1280 // Check rules 1 and 2. Ignore A if its stride or size is different from 1281 // that of B. 1282 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 1283 continue; 1284 1285 // Ignore A if the memory object of A and B don't belong to the same 1286 // address space 1287 if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B)) 1288 continue; 1289 1290 // Calculate the distance from A to B. 1291 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 1292 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 1293 if (!DistToB) 1294 continue; 1295 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 1296 1297 // Check rule 3. Ignore A if its distance to B is not a multiple of the 1298 // size. 1299 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 1300 continue; 1301 1302 // All members of a predicated interleave-group must have the same predicate, 1303 // and currently must reside in the same BB. 1304 BasicBlock *BlockA = A->getParent(); 1305 BasicBlock *BlockB = B->getParent(); 1306 if ((isPredicated(BlockA) || isPredicated(BlockB)) && 1307 (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB)) 1308 continue; 1309 1310 // The index of A is the index of B plus A's distance to B in multiples 1311 // of the size. 1312 int IndexA = 1313 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 1314 1315 // Try to insert A into B's group. 1316 if (Group->insertMember(A, IndexA, DesA.Alignment)) { 1317 LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 1318 << " into the interleave group with" << *B 1319 << '\n'); 1320 InterleaveGroupMap[A] = Group; 1321 1322 // Set the first load in program order as the insert position. 1323 if (A->mayReadFromMemory()) 1324 Group->setInsertPos(A); 1325 } 1326 } // Iteration over A accesses. 1327 } // Iteration over B accesses. 1328 1329 auto InvalidateGroupIfMemberMayWrap = [&](InterleaveGroup<Instruction> *Group, 1330 int Index, 1331 std::string FirstOrLast) -> bool { 1332 Instruction *Member = Group->getMember(Index); 1333 assert(Member && "Group member does not exist"); 1334 Value *MemberPtr = getLoadStorePointerOperand(Member); 1335 Type *AccessTy = getLoadStoreType(Member); 1336 if (getPtrStride(PSE, AccessTy, MemberPtr, TheLoop, Strides, 1337 /*Assume=*/false, /*ShouldCheckWrap=*/true)) 1338 return false; 1339 LLVM_DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " 1340 << FirstOrLast 1341 << " group member potentially pointer-wrapping.\n"); 1342 releaseGroup(Group); 1343 return true; 1344 }; 1345 1346 // Remove interleaved groups with gaps whose memory 1347 // accesses may wrap around. We have to revisit the getPtrStride analysis, 1348 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does 1349 // not check wrapping (see documentation there). 1350 // FORNOW we use Assume=false; 1351 // TODO: Change to Assume=true but making sure we don't exceed the threshold 1352 // of runtime SCEV assumptions checks (thereby potentially failing to 1353 // vectorize altogether). 1354 // Additional optional optimizations: 1355 // TODO: If we are peeling the loop and we know that the first pointer doesn't 1356 // wrap then we can deduce that all pointers in the group don't wrap. 1357 // This means that we can forcefully peel the loop in order to only have to 1358 // check the first pointer for no-wrap. When we'll change to use Assume=true 1359 // we'll only need at most one runtime check per interleaved group. 1360 for (auto *Group : LoadGroups) { 1361 // Case 1: A full group. Can Skip the checks; For full groups, if the wide 1362 // load would wrap around the address space we would do a memory access at 1363 // nullptr even without the transformation. 1364 if (Group->getNumMembers() == Group->getFactor()) 1365 continue; 1366 1367 // Case 2: If first and last members of the group don't wrap this implies 1368 // that all the pointers in the group don't wrap. 1369 // So we check only group member 0 (which is always guaranteed to exist), 1370 // and group member Factor - 1; If the latter doesn't exist we rely on 1371 // peeling (if it is a non-reversed accsess -- see Case 3). 1372 if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first"))) 1373 continue; 1374 if (Group->getMember(Group->getFactor() - 1)) 1375 InvalidateGroupIfMemberMayWrap(Group, Group->getFactor() - 1, 1376 std::string("last")); 1377 else { 1378 // Case 3: A non-reversed interleaved load group with gaps: We need 1379 // to execute at least one scalar epilogue iteration. This will ensure 1380 // we don't speculatively access memory out-of-bounds. We only need 1381 // to look for a member at index factor - 1, since every group must have 1382 // a member at index zero. 1383 if (Group->isReverse()) { 1384 LLVM_DEBUG( 1385 dbgs() << "LV: Invalidate candidate interleaved group due to " 1386 "a reverse access with gaps.\n"); 1387 releaseGroup(Group); 1388 continue; 1389 } 1390 LLVM_DEBUG( 1391 dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 1392 RequiresScalarEpilogue = true; 1393 } 1394 } 1395 1396 for (auto *Group : StoreGroups) { 1397 // Case 1: A full group. Can Skip the checks; For full groups, if the wide 1398 // store would wrap around the address space we would do a memory access at 1399 // nullptr even without the transformation. 1400 if (Group->getNumMembers() == Group->getFactor()) 1401 continue; 1402 1403 // Interleave-store-group with gaps is implemented using masked wide store. 1404 // Remove interleaved store groups with gaps if 1405 // masked-interleaved-accesses are not enabled by the target. 1406 if (!EnablePredicatedInterleavedMemAccesses) { 1407 LLVM_DEBUG( 1408 dbgs() << "LV: Invalidate candidate interleaved store group due " 1409 "to gaps.\n"); 1410 releaseGroup(Group); 1411 continue; 1412 } 1413 1414 // Case 2: If first and last members of the group don't wrap this implies 1415 // that all the pointers in the group don't wrap. 1416 // So we check only group member 0 (which is always guaranteed to exist), 1417 // and the last group member. Case 3 (scalar epilog) is not relevant for 1418 // stores with gaps, which are implemented with masked-store (rather than 1419 // speculative access, as in loads). 1420 if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first"))) 1421 continue; 1422 for (int Index = Group->getFactor() - 1; Index > 0; Index--) 1423 if (Group->getMember(Index)) { 1424 InvalidateGroupIfMemberMayWrap(Group, Index, std::string("last")); 1425 break; 1426 } 1427 } 1428 } 1429 1430 void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() { 1431 // If no group had triggered the requirement to create an epilogue loop, 1432 // there is nothing to do. 1433 if (!requiresScalarEpilogue()) 1434 return; 1435 1436 bool ReleasedGroup = false; 1437 // Release groups requiring scalar epilogues. Note that this also removes them 1438 // from InterleaveGroups. 1439 for (auto *Group : make_early_inc_range(InterleaveGroups)) { 1440 if (!Group->requiresScalarEpilogue()) 1441 continue; 1442 LLVM_DEBUG( 1443 dbgs() 1444 << "LV: Invalidate candidate interleaved group due to gaps that " 1445 "require a scalar epilogue (not allowed under optsize) and cannot " 1446 "be masked (not enabled). \n"); 1447 releaseGroup(Group); 1448 ReleasedGroup = true; 1449 } 1450 assert(ReleasedGroup && "At least one group must be invalidated, as a " 1451 "scalar epilogue was required"); 1452 (void)ReleasedGroup; 1453 RequiresScalarEpilogue = false; 1454 } 1455 1456 template <typename InstT> 1457 void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const { 1458 llvm_unreachable("addMetadata can only be used for Instruction"); 1459 } 1460 1461 namespace llvm { 1462 template <> 1463 void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const { 1464 SmallVector<Value *, 4> VL; 1465 std::transform(Members.begin(), Members.end(), std::back_inserter(VL), 1466 [](std::pair<int, Instruction *> p) { return p.second; }); 1467 propagateMetadata(NewInst, VL); 1468 } 1469 } 1470 1471 std::string VFABI::mangleTLIVectorName(StringRef VectorName, 1472 StringRef ScalarName, unsigned numArgs, 1473 ElementCount VF) { 1474 SmallString<256> Buffer; 1475 llvm::raw_svector_ostream Out(Buffer); 1476 Out << "_ZGV" << VFABI::_LLVM_ << "N"; 1477 if (VF.isScalable()) 1478 Out << 'x'; 1479 else 1480 Out << VF.getFixedValue(); 1481 for (unsigned I = 0; I < numArgs; ++I) 1482 Out << "v"; 1483 Out << "_" << ScalarName << "(" << VectorName << ")"; 1484 return std::string(Out.str()); 1485 } 1486 1487 void VFABI::getVectorVariantNames( 1488 const CallInst &CI, SmallVectorImpl<std::string> &VariantMappings) { 1489 const StringRef S = CI.getFnAttr(VFABI::MappingsAttrName).getValueAsString(); 1490 if (S.empty()) 1491 return; 1492 1493 SmallVector<StringRef, 8> ListAttr; 1494 S.split(ListAttr, ","); 1495 1496 for (auto &S : SetVector<StringRef>(ListAttr.begin(), ListAttr.end())) { 1497 #ifndef NDEBUG 1498 LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << S << "'\n"); 1499 Optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, *(CI.getModule())); 1500 assert(Info.hasValue() && "Invalid name for a VFABI variant."); 1501 assert(CI.getModule()->getFunction(Info.getValue().VectorName) && 1502 "Vector function is missing."); 1503 #endif 1504 VariantMappings.push_back(std::string(S)); 1505 } 1506 } 1507 1508 bool VFShape::hasValidParameterList() const { 1509 for (unsigned Pos = 0, NumParams = Parameters.size(); Pos < NumParams; 1510 ++Pos) { 1511 assert(Parameters[Pos].ParamPos == Pos && "Broken parameter list."); 1512 1513 switch (Parameters[Pos].ParamKind) { 1514 default: // Nothing to check. 1515 break; 1516 case VFParamKind::OMP_Linear: 1517 case VFParamKind::OMP_LinearRef: 1518 case VFParamKind::OMP_LinearVal: 1519 case VFParamKind::OMP_LinearUVal: 1520 // Compile time linear steps must be non-zero. 1521 if (Parameters[Pos].LinearStepOrPos == 0) 1522 return false; 1523 break; 1524 case VFParamKind::OMP_LinearPos: 1525 case VFParamKind::OMP_LinearRefPos: 1526 case VFParamKind::OMP_LinearValPos: 1527 case VFParamKind::OMP_LinearUValPos: 1528 // The runtime linear step must be referring to some other 1529 // parameters in the signature. 1530 if (Parameters[Pos].LinearStepOrPos >= int(NumParams)) 1531 return false; 1532 // The linear step parameter must be marked as uniform. 1533 if (Parameters[Parameters[Pos].LinearStepOrPos].ParamKind != 1534 VFParamKind::OMP_Uniform) 1535 return false; 1536 // The linear step parameter can't point at itself. 1537 if (Parameters[Pos].LinearStepOrPos == int(Pos)) 1538 return false; 1539 break; 1540 case VFParamKind::GlobalPredicate: 1541 // The global predicate must be the unique. Can be placed anywhere in the 1542 // signature. 1543 for (unsigned NextPos = Pos + 1; NextPos < NumParams; ++NextPos) 1544 if (Parameters[NextPos].ParamKind == VFParamKind::GlobalPredicate) 1545 return false; 1546 break; 1547 } 1548 } 1549 return true; 1550 } 1551