1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines vectorizer utilities. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Analysis/VectorUtils.h" 14 #include "llvm/ADT/EquivalenceClasses.h" 15 #include "llvm/Analysis/DemandedBits.h" 16 #include "llvm/Analysis/LoopInfo.h" 17 #include "llvm/Analysis/LoopIterator.h" 18 #include "llvm/Analysis/ScalarEvolution.h" 19 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 20 #include "llvm/Analysis/TargetTransformInfo.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/IR/Constants.h" 23 #include "llvm/IR/GetElementPtrTypeIterator.h" 24 #include "llvm/IR/IRBuilder.h" 25 #include "llvm/IR/PatternMatch.h" 26 #include "llvm/IR/Value.h" 27 #include "llvm/Support/CommandLine.h" 28 29 #define DEBUG_TYPE "vectorutils" 30 31 using namespace llvm; 32 using namespace llvm::PatternMatch; 33 34 /// Maximum factor for an interleaved memory access. 35 static cl::opt<unsigned> MaxInterleaveGroupFactor( 36 "max-interleave-group-factor", cl::Hidden, 37 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 38 cl::init(8)); 39 40 /// Return true if all of the intrinsic's arguments and return type are scalars 41 /// for the scalar form of the intrinsic, and vectors for the vector form of the 42 /// intrinsic (except operands that are marked as always being scalar by 43 /// hasVectorInstrinsicScalarOpd). 44 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) { 45 switch (ID) { 46 case Intrinsic::bswap: // Begin integer bit-manipulation. 47 case Intrinsic::bitreverse: 48 case Intrinsic::ctpop: 49 case Intrinsic::ctlz: 50 case Intrinsic::cttz: 51 case Intrinsic::fshl: 52 case Intrinsic::fshr: 53 case Intrinsic::sadd_sat: 54 case Intrinsic::ssub_sat: 55 case Intrinsic::uadd_sat: 56 case Intrinsic::usub_sat: 57 case Intrinsic::smul_fix: 58 case Intrinsic::smul_fix_sat: 59 case Intrinsic::umul_fix: 60 case Intrinsic::umul_fix_sat: 61 case Intrinsic::sqrt: // Begin floating-point. 62 case Intrinsic::sin: 63 case Intrinsic::cos: 64 case Intrinsic::exp: 65 case Intrinsic::exp2: 66 case Intrinsic::log: 67 case Intrinsic::log10: 68 case Intrinsic::log2: 69 case Intrinsic::fabs: 70 case Intrinsic::minnum: 71 case Intrinsic::maxnum: 72 case Intrinsic::minimum: 73 case Intrinsic::maximum: 74 case Intrinsic::copysign: 75 case Intrinsic::floor: 76 case Intrinsic::ceil: 77 case Intrinsic::trunc: 78 case Intrinsic::rint: 79 case Intrinsic::nearbyint: 80 case Intrinsic::round: 81 case Intrinsic::pow: 82 case Intrinsic::fma: 83 case Intrinsic::fmuladd: 84 case Intrinsic::powi: 85 case Intrinsic::canonicalize: 86 return true; 87 default: 88 return false; 89 } 90 } 91 92 /// Identifies if the vector form of the intrinsic has a scalar operand. 93 bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID, 94 unsigned ScalarOpdIdx) { 95 switch (ID) { 96 case Intrinsic::ctlz: 97 case Intrinsic::cttz: 98 case Intrinsic::powi: 99 return (ScalarOpdIdx == 1); 100 case Intrinsic::smul_fix: 101 case Intrinsic::smul_fix_sat: 102 case Intrinsic::umul_fix: 103 case Intrinsic::umul_fix_sat: 104 return (ScalarOpdIdx == 2); 105 default: 106 return false; 107 } 108 } 109 110 /// Returns intrinsic ID for call. 111 /// For the input call instruction it finds mapping intrinsic and returns 112 /// its ID, in case it does not found it return not_intrinsic. 113 Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI, 114 const TargetLibraryInfo *TLI) { 115 Intrinsic::ID ID = getIntrinsicForCallSite(*CI, TLI); 116 if (ID == Intrinsic::not_intrinsic) 117 return Intrinsic::not_intrinsic; 118 119 if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start || 120 ID == Intrinsic::lifetime_end || ID == Intrinsic::assume || 121 ID == Intrinsic::sideeffect) 122 return ID; 123 return Intrinsic::not_intrinsic; 124 } 125 126 /// Find the operand of the GEP that should be checked for consecutive 127 /// stores. This ignores trailing indices that have no effect on the final 128 /// pointer. 129 unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) { 130 const DataLayout &DL = Gep->getModule()->getDataLayout(); 131 unsigned LastOperand = Gep->getNumOperands() - 1; 132 unsigned GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType()); 133 134 // Walk backwards and try to peel off zeros. 135 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) { 136 // Find the type we're currently indexing into. 137 gep_type_iterator GEPTI = gep_type_begin(Gep); 138 std::advance(GEPTI, LastOperand - 2); 139 140 // If it's a type with the same allocation size as the result of the GEP we 141 // can peel off the zero index. 142 if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize) 143 break; 144 --LastOperand; 145 } 146 147 return LastOperand; 148 } 149 150 /// If the argument is a GEP, then returns the operand identified by 151 /// getGEPInductionOperand. However, if there is some other non-loop-invariant 152 /// operand, it returns that instead. 153 Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { 154 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 155 if (!GEP) 156 return Ptr; 157 158 unsigned InductionOperand = getGEPInductionOperand(GEP); 159 160 // Check that all of the gep indices are uniform except for our induction 161 // operand. 162 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) 163 if (i != InductionOperand && 164 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp)) 165 return Ptr; 166 return GEP->getOperand(InductionOperand); 167 } 168 169 /// If a value has only one user that is a CastInst, return it. 170 Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) { 171 Value *UniqueCast = nullptr; 172 for (User *U : Ptr->users()) { 173 CastInst *CI = dyn_cast<CastInst>(U); 174 if (CI && CI->getType() == Ty) { 175 if (!UniqueCast) 176 UniqueCast = CI; 177 else 178 return nullptr; 179 } 180 } 181 return UniqueCast; 182 } 183 184 /// Get the stride of a pointer access in a loop. Looks for symbolic 185 /// strides "a[i*stride]". Returns the symbolic stride, or null otherwise. 186 Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { 187 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 188 if (!PtrTy || PtrTy->isAggregateType()) 189 return nullptr; 190 191 // Try to remove a gep instruction to make the pointer (actually index at this 192 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the 193 // pointer, otherwise, we are analyzing the index. 194 Value *OrigPtr = Ptr; 195 196 // The size of the pointer access. 197 int64_t PtrAccessSize = 1; 198 199 Ptr = stripGetElementPtr(Ptr, SE, Lp); 200 const SCEV *V = SE->getSCEV(Ptr); 201 202 if (Ptr != OrigPtr) 203 // Strip off casts. 204 while (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V)) 205 V = C->getOperand(); 206 207 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V); 208 if (!S) 209 return nullptr; 210 211 V = S->getStepRecurrence(*SE); 212 if (!V) 213 return nullptr; 214 215 // Strip off the size of access multiplication if we are still analyzing the 216 // pointer. 217 if (OrigPtr == Ptr) { 218 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) { 219 if (M->getOperand(0)->getSCEVType() != scConstant) 220 return nullptr; 221 222 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt(); 223 224 // Huge step value - give up. 225 if (APStepVal.getBitWidth() > 64) 226 return nullptr; 227 228 int64_t StepVal = APStepVal.getSExtValue(); 229 if (PtrAccessSize != StepVal) 230 return nullptr; 231 V = M->getOperand(1); 232 } 233 } 234 235 // Strip off casts. 236 Type *StripedOffRecurrenceCast = nullptr; 237 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V)) { 238 StripedOffRecurrenceCast = C->getType(); 239 V = C->getOperand(); 240 } 241 242 // Look for the loop invariant symbolic value. 243 const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V); 244 if (!U) 245 return nullptr; 246 247 Value *Stride = U->getValue(); 248 if (!Lp->isLoopInvariant(Stride)) 249 return nullptr; 250 251 // If we have stripped off the recurrence cast we have to make sure that we 252 // return the value that is used in this loop so that we can replace it later. 253 if (StripedOffRecurrenceCast) 254 Stride = getUniqueCastUse(Stride, Lp, StripedOffRecurrenceCast); 255 256 return Stride; 257 } 258 259 /// Given a vector and an element number, see if the scalar value is 260 /// already around as a register, for example if it were inserted then extracted 261 /// from the vector. 262 Value *llvm::findScalarElement(Value *V, unsigned EltNo) { 263 assert(V->getType()->isVectorTy() && "Not looking at a vector?"); 264 VectorType *VTy = cast<VectorType>(V->getType()); 265 // For fixed-length vector, return undef for out of range access. 266 if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) { 267 unsigned Width = FVTy->getNumElements(); 268 if (EltNo >= Width) 269 return UndefValue::get(FVTy->getElementType()); 270 } 271 272 if (Constant *C = dyn_cast<Constant>(V)) 273 return C->getAggregateElement(EltNo); 274 275 if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) { 276 // If this is an insert to a variable element, we don't know what it is. 277 if (!isa<ConstantInt>(III->getOperand(2))) 278 return nullptr; 279 unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue(); 280 281 // If this is an insert to the element we are looking for, return the 282 // inserted value. 283 if (EltNo == IIElt) 284 return III->getOperand(1); 285 286 // Otherwise, the insertelement doesn't modify the value, recurse on its 287 // vector input. 288 return findScalarElement(III->getOperand(0), EltNo); 289 } 290 291 ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V); 292 // Restrict the following transformation to fixed-length vector. 293 if (SVI && isa<FixedVectorType>(SVI->getType())) { 294 unsigned LHSWidth = 295 cast<FixedVectorType>(SVI->getOperand(0)->getType())->getNumElements(); 296 int InEl = SVI->getMaskValue(EltNo); 297 if (InEl < 0) 298 return UndefValue::get(VTy->getElementType()); 299 if (InEl < (int)LHSWidth) 300 return findScalarElement(SVI->getOperand(0), InEl); 301 return findScalarElement(SVI->getOperand(1), InEl - LHSWidth); 302 } 303 304 // Extract a value from a vector add operation with a constant zero. 305 // TODO: Use getBinOpIdentity() to generalize this. 306 Value *Val; Constant *C; 307 if (match(V, m_Add(m_Value(Val), m_Constant(C)))) 308 if (Constant *Elt = C->getAggregateElement(EltNo)) 309 if (Elt->isNullValue()) 310 return findScalarElement(Val, EltNo); 311 312 // Otherwise, we don't know. 313 return nullptr; 314 } 315 316 int llvm::getSplatIndex(ArrayRef<int> Mask) { 317 int SplatIndex = -1; 318 for (int M : Mask) { 319 // Ignore invalid (undefined) mask elements. 320 if (M < 0) 321 continue; 322 323 // There can be only 1 non-negative mask element value if this is a splat. 324 if (SplatIndex != -1 && SplatIndex != M) 325 return -1; 326 327 // Initialize the splat index to the 1st non-negative mask element. 328 SplatIndex = M; 329 } 330 assert((SplatIndex == -1 || SplatIndex >= 0) && "Negative index?"); 331 return SplatIndex; 332 } 333 334 /// Get splat value if the input is a splat vector or return nullptr. 335 /// This function is not fully general. It checks only 2 cases: 336 /// the input value is (1) a splat constant vector or (2) a sequence 337 /// of instructions that broadcasts a scalar at element 0. 338 const llvm::Value *llvm::getSplatValue(const Value *V) { 339 if (isa<VectorType>(V->getType())) 340 if (auto *C = dyn_cast<Constant>(V)) 341 return C->getSplatValue(); 342 343 // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...> 344 Value *Splat; 345 if (match(V, m_ShuffleVector( 346 m_InsertElement(m_Value(), m_Value(Splat), m_ZeroInt()), 347 m_Value(), m_ZeroMask()))) 348 return Splat; 349 350 return nullptr; 351 } 352 353 // This setting is based on its counterpart in value tracking, but it could be 354 // adjusted if needed. 355 const unsigned MaxDepth = 6; 356 357 bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) { 358 assert(Depth <= MaxDepth && "Limit Search Depth"); 359 360 if (isa<VectorType>(V->getType())) { 361 if (isa<UndefValue>(V)) 362 return true; 363 // FIXME: We can allow undefs, but if Index was specified, we may want to 364 // check that the constant is defined at that index. 365 if (auto *C = dyn_cast<Constant>(V)) 366 return C->getSplatValue() != nullptr; 367 } 368 369 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V)) { 370 // FIXME: We can safely allow undefs here. If Index was specified, we will 371 // check that the mask elt is defined at the required index. 372 if (!is_splat(Shuf->getShuffleMask())) 373 return false; 374 375 // Match any index. 376 if (Index == -1) 377 return true; 378 379 // Match a specific element. The mask should be defined at and match the 380 // specified index. 381 return Shuf->getMaskValue(Index) == Index; 382 } 383 384 // The remaining tests are all recursive, so bail out if we hit the limit. 385 if (Depth++ == MaxDepth) 386 return false; 387 388 // If both operands of a binop are splats, the result is a splat. 389 Value *X, *Y, *Z; 390 if (match(V, m_BinOp(m_Value(X), m_Value(Y)))) 391 return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth); 392 393 // If all operands of a select are splats, the result is a splat. 394 if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z)))) 395 return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth) && 396 isSplatValue(Z, Index, Depth); 397 398 // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops). 399 400 return false; 401 } 402 403 void llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask, 404 SmallVectorImpl<int> &ScaledMask) { 405 assert(Scale > 0 && "Unexpected scaling factor"); 406 407 // Fast-path: if no scaling, then it is just a copy. 408 if (Scale == 1) { 409 ScaledMask.assign(Mask.begin(), Mask.end()); 410 return; 411 } 412 413 ScaledMask.clear(); 414 for (int MaskElt : Mask) { 415 if (MaskElt >= 0) { 416 assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= 417 std::numeric_limits<int32_t>::max() && 418 "Overflowed 32-bits"); 419 } 420 for (int SliceElt = 0; SliceElt != Scale; ++SliceElt) 421 ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt); 422 } 423 } 424 425 bool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask, 426 SmallVectorImpl<int> &ScaledMask) { 427 assert(Scale > 0 && "Unexpected scaling factor"); 428 429 // Fast-path: if no scaling, then it is just a copy. 430 if (Scale == 1) { 431 ScaledMask.assign(Mask.begin(), Mask.end()); 432 return true; 433 } 434 435 // We must map the original elements down evenly to a type with less elements. 436 int NumElts = Mask.size(); 437 if (NumElts % Scale != 0) 438 return false; 439 440 ScaledMask.clear(); 441 ScaledMask.reserve(NumElts / Scale); 442 443 // Step through the input mask by splitting into Scale-sized slices. 444 do { 445 ArrayRef<int> MaskSlice = Mask.take_front(Scale); 446 assert((int)MaskSlice.size() == Scale && "Expected Scale-sized slice."); 447 448 // The first element of the slice determines how we evaluate this slice. 449 int SliceFront = MaskSlice.front(); 450 if (SliceFront < 0) { 451 // Negative values (undef or other "sentinel" values) must be equal across 452 // the entire slice. 453 if (!is_splat(MaskSlice)) 454 return false; 455 ScaledMask.push_back(SliceFront); 456 } else { 457 // A positive mask element must be cleanly divisible. 458 if (SliceFront % Scale != 0) 459 return false; 460 // Elements of the slice must be consecutive. 461 for (int i = 1; i < Scale; ++i) 462 if (MaskSlice[i] != SliceFront + i) 463 return false; 464 ScaledMask.push_back(SliceFront / Scale); 465 } 466 Mask = Mask.drop_front(Scale); 467 } while (!Mask.empty()); 468 469 assert((int)ScaledMask.size() * Scale == NumElts && "Unexpected scaled mask"); 470 471 // All elements of the original mask can be scaled down to map to the elements 472 // of a mask with wider elements. 473 return true; 474 } 475 476 MapVector<Instruction *, uint64_t> 477 llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB, 478 const TargetTransformInfo *TTI) { 479 480 // DemandedBits will give us every value's live-out bits. But we want 481 // to ensure no extra casts would need to be inserted, so every DAG 482 // of connected values must have the same minimum bitwidth. 483 EquivalenceClasses<Value *> ECs; 484 SmallVector<Value *, 16> Worklist; 485 SmallPtrSet<Value *, 4> Roots; 486 SmallPtrSet<Value *, 16> Visited; 487 DenseMap<Value *, uint64_t> DBits; 488 SmallPtrSet<Instruction *, 4> InstructionSet; 489 MapVector<Instruction *, uint64_t> MinBWs; 490 491 // Determine the roots. We work bottom-up, from truncs or icmps. 492 bool SeenExtFromIllegalType = false; 493 for (auto *BB : Blocks) 494 for (auto &I : *BB) { 495 InstructionSet.insert(&I); 496 497 if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) && 498 !TTI->isTypeLegal(I.getOperand(0)->getType())) 499 SeenExtFromIllegalType = true; 500 501 // Only deal with non-vector integers up to 64-bits wide. 502 if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) && 503 !I.getType()->isVectorTy() && 504 I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) { 505 // Don't make work for ourselves. If we know the loaded type is legal, 506 // don't add it to the worklist. 507 if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType())) 508 continue; 509 510 Worklist.push_back(&I); 511 Roots.insert(&I); 512 } 513 } 514 // Early exit. 515 if (Worklist.empty() || (TTI && !SeenExtFromIllegalType)) 516 return MinBWs; 517 518 // Now proceed breadth-first, unioning values together. 519 while (!Worklist.empty()) { 520 Value *Val = Worklist.pop_back_val(); 521 Value *Leader = ECs.getOrInsertLeaderValue(Val); 522 523 if (Visited.count(Val)) 524 continue; 525 Visited.insert(Val); 526 527 // Non-instructions terminate a chain successfully. 528 if (!isa<Instruction>(Val)) 529 continue; 530 Instruction *I = cast<Instruction>(Val); 531 532 // If we encounter a type that is larger than 64 bits, we can't represent 533 // it so bail out. 534 if (DB.getDemandedBits(I).getBitWidth() > 64) 535 return MapVector<Instruction *, uint64_t>(); 536 537 uint64_t V = DB.getDemandedBits(I).getZExtValue(); 538 DBits[Leader] |= V; 539 DBits[I] = V; 540 541 // Casts, loads and instructions outside of our range terminate a chain 542 // successfully. 543 if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) || 544 !InstructionSet.count(I)) 545 continue; 546 547 // Unsafe casts terminate a chain unsuccessfully. We can't do anything 548 // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to 549 // transform anything that relies on them. 550 if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) || 551 !I->getType()->isIntegerTy()) { 552 DBits[Leader] |= ~0ULL; 553 continue; 554 } 555 556 // We don't modify the types of PHIs. Reductions will already have been 557 // truncated if possible, and inductions' sizes will have been chosen by 558 // indvars. 559 if (isa<PHINode>(I)) 560 continue; 561 562 if (DBits[Leader] == ~0ULL) 563 // All bits demanded, no point continuing. 564 continue; 565 566 for (Value *O : cast<User>(I)->operands()) { 567 ECs.unionSets(Leader, O); 568 Worklist.push_back(O); 569 } 570 } 571 572 // Now we've discovered all values, walk them to see if there are 573 // any users we didn't see. If there are, we can't optimize that 574 // chain. 575 for (auto &I : DBits) 576 for (auto *U : I.first->users()) 577 if (U->getType()->isIntegerTy() && DBits.count(U) == 0) 578 DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL; 579 580 for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) { 581 uint64_t LeaderDemandedBits = 0; 582 for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) 583 LeaderDemandedBits |= DBits[*MI]; 584 585 uint64_t MinBW = (sizeof(LeaderDemandedBits) * 8) - 586 llvm::countLeadingZeros(LeaderDemandedBits); 587 // Round up to a power of 2 588 if (!isPowerOf2_64((uint64_t)MinBW)) 589 MinBW = NextPowerOf2(MinBW); 590 591 // We don't modify the types of PHIs. Reductions will already have been 592 // truncated if possible, and inductions' sizes will have been chosen by 593 // indvars. 594 // If we are required to shrink a PHI, abandon this entire equivalence class. 595 bool Abort = false; 596 for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) 597 if (isa<PHINode>(*MI) && MinBW < (*MI)->getType()->getScalarSizeInBits()) { 598 Abort = true; 599 break; 600 } 601 if (Abort) 602 continue; 603 604 for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) { 605 if (!isa<Instruction>(*MI)) 606 continue; 607 Type *Ty = (*MI)->getType(); 608 if (Roots.count(*MI)) 609 Ty = cast<Instruction>(*MI)->getOperand(0)->getType(); 610 if (MinBW < Ty->getScalarSizeInBits()) 611 MinBWs[cast<Instruction>(*MI)] = MinBW; 612 } 613 } 614 615 return MinBWs; 616 } 617 618 /// Add all access groups in @p AccGroups to @p List. 619 template <typename ListT> 620 static void addToAccessGroupList(ListT &List, MDNode *AccGroups) { 621 // Interpret an access group as a list containing itself. 622 if (AccGroups->getNumOperands() == 0) { 623 assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group"); 624 List.insert(AccGroups); 625 return; 626 } 627 628 for (auto &AccGroupListOp : AccGroups->operands()) { 629 auto *Item = cast<MDNode>(AccGroupListOp.get()); 630 assert(isValidAsAccessGroup(Item) && "List item must be an access group"); 631 List.insert(Item); 632 } 633 } 634 635 MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) { 636 if (!AccGroups1) 637 return AccGroups2; 638 if (!AccGroups2) 639 return AccGroups1; 640 if (AccGroups1 == AccGroups2) 641 return AccGroups1; 642 643 SmallSetVector<Metadata *, 4> Union; 644 addToAccessGroupList(Union, AccGroups1); 645 addToAccessGroupList(Union, AccGroups2); 646 647 if (Union.size() == 0) 648 return nullptr; 649 if (Union.size() == 1) 650 return cast<MDNode>(Union.front()); 651 652 LLVMContext &Ctx = AccGroups1->getContext(); 653 return MDNode::get(Ctx, Union.getArrayRef()); 654 } 655 656 MDNode *llvm::intersectAccessGroups(const Instruction *Inst1, 657 const Instruction *Inst2) { 658 bool MayAccessMem1 = Inst1->mayReadOrWriteMemory(); 659 bool MayAccessMem2 = Inst2->mayReadOrWriteMemory(); 660 661 if (!MayAccessMem1 && !MayAccessMem2) 662 return nullptr; 663 if (!MayAccessMem1) 664 return Inst2->getMetadata(LLVMContext::MD_access_group); 665 if (!MayAccessMem2) 666 return Inst1->getMetadata(LLVMContext::MD_access_group); 667 668 MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group); 669 MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group); 670 if (!MD1 || !MD2) 671 return nullptr; 672 if (MD1 == MD2) 673 return MD1; 674 675 // Use set for scalable 'contains' check. 676 SmallPtrSet<Metadata *, 4> AccGroupSet2; 677 addToAccessGroupList(AccGroupSet2, MD2); 678 679 SmallVector<Metadata *, 4> Intersection; 680 if (MD1->getNumOperands() == 0) { 681 assert(isValidAsAccessGroup(MD1) && "Node must be an access group"); 682 if (AccGroupSet2.count(MD1)) 683 Intersection.push_back(MD1); 684 } else { 685 for (const MDOperand &Node : MD1->operands()) { 686 auto *Item = cast<MDNode>(Node.get()); 687 assert(isValidAsAccessGroup(Item) && "List item must be an access group"); 688 if (AccGroupSet2.count(Item)) 689 Intersection.push_back(Item); 690 } 691 } 692 693 if (Intersection.size() == 0) 694 return nullptr; 695 if (Intersection.size() == 1) 696 return cast<MDNode>(Intersection.front()); 697 698 LLVMContext &Ctx = Inst1->getContext(); 699 return MDNode::get(Ctx, Intersection); 700 } 701 702 /// \returns \p I after propagating metadata from \p VL. 703 Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) { 704 Instruction *I0 = cast<Instruction>(VL[0]); 705 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 706 I0->getAllMetadataOtherThanDebugLoc(Metadata); 707 708 for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 709 LLVMContext::MD_noalias, LLVMContext::MD_fpmath, 710 LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load, 711 LLVMContext::MD_access_group}) { 712 MDNode *MD = I0->getMetadata(Kind); 713 714 for (int J = 1, E = VL.size(); MD && J != E; ++J) { 715 const Instruction *IJ = cast<Instruction>(VL[J]); 716 MDNode *IMD = IJ->getMetadata(Kind); 717 switch (Kind) { 718 case LLVMContext::MD_tbaa: 719 MD = MDNode::getMostGenericTBAA(MD, IMD); 720 break; 721 case LLVMContext::MD_alias_scope: 722 MD = MDNode::getMostGenericAliasScope(MD, IMD); 723 break; 724 case LLVMContext::MD_fpmath: 725 MD = MDNode::getMostGenericFPMath(MD, IMD); 726 break; 727 case LLVMContext::MD_noalias: 728 case LLVMContext::MD_nontemporal: 729 case LLVMContext::MD_invariant_load: 730 MD = MDNode::intersect(MD, IMD); 731 break; 732 case LLVMContext::MD_access_group: 733 MD = intersectAccessGroups(Inst, IJ); 734 break; 735 default: 736 llvm_unreachable("unhandled metadata"); 737 } 738 } 739 740 Inst->setMetadata(Kind, MD); 741 } 742 743 return Inst; 744 } 745 746 Constant * 747 llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF, 748 const InterleaveGroup<Instruction> &Group) { 749 // All 1's means mask is not needed. 750 if (Group.getNumMembers() == Group.getFactor()) 751 return nullptr; 752 753 // TODO: support reversed access. 754 assert(!Group.isReverse() && "Reversed group not supported."); 755 756 SmallVector<Constant *, 16> Mask; 757 for (unsigned i = 0; i < VF; i++) 758 for (unsigned j = 0; j < Group.getFactor(); ++j) { 759 unsigned HasMember = Group.getMember(j) ? 1 : 0; 760 Mask.push_back(Builder.getInt1(HasMember)); 761 } 762 763 return ConstantVector::get(Mask); 764 } 765 766 llvm::SmallVector<int, 16> 767 llvm::createReplicatedMask(unsigned ReplicationFactor, unsigned VF) { 768 SmallVector<int, 16> MaskVec; 769 for (unsigned i = 0; i < VF; i++) 770 for (unsigned j = 0; j < ReplicationFactor; j++) 771 MaskVec.push_back(i); 772 773 return MaskVec; 774 } 775 776 llvm::SmallVector<int, 16> llvm::createInterleaveMask(unsigned VF, 777 unsigned NumVecs) { 778 SmallVector<int, 16> Mask; 779 for (unsigned i = 0; i < VF; i++) 780 for (unsigned j = 0; j < NumVecs; j++) 781 Mask.push_back(j * VF + i); 782 783 return Mask; 784 } 785 786 llvm::SmallVector<int, 16> 787 llvm::createStrideMask(unsigned Start, unsigned Stride, unsigned VF) { 788 SmallVector<int, 16> Mask; 789 for (unsigned i = 0; i < VF; i++) 790 Mask.push_back(Start + i * Stride); 791 792 return Mask; 793 } 794 795 llvm::SmallVector<int, 16> llvm::createSequentialMask(unsigned Start, 796 unsigned NumInts, 797 unsigned NumUndefs) { 798 SmallVector<int, 16> Mask; 799 for (unsigned i = 0; i < NumInts; i++) 800 Mask.push_back(Start + i); 801 802 for (unsigned i = 0; i < NumUndefs; i++) 803 Mask.push_back(-1); 804 805 return Mask; 806 } 807 808 /// A helper function for concatenating vectors. This function concatenates two 809 /// vectors having the same element type. If the second vector has fewer 810 /// elements than the first, it is padded with undefs. 811 static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1, 812 Value *V2) { 813 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 814 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 815 assert(VecTy1 && VecTy2 && 816 VecTy1->getScalarType() == VecTy2->getScalarType() && 817 "Expect two vectors with the same element type"); 818 819 unsigned NumElts1 = VecTy1->getNumElements(); 820 unsigned NumElts2 = VecTy2->getNumElements(); 821 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 822 823 if (NumElts1 > NumElts2) { 824 // Extend with UNDEFs. 825 V2 = Builder.CreateShuffleVector( 826 V2, UndefValue::get(VecTy2), 827 createSequentialMask(0, NumElts2, NumElts1 - NumElts2)); 828 } 829 830 return Builder.CreateShuffleVector( 831 V1, V2, createSequentialMask(0, NumElts1 + NumElts2, 0)); 832 } 833 834 Value *llvm::concatenateVectors(IRBuilderBase &Builder, 835 ArrayRef<Value *> Vecs) { 836 unsigned NumVecs = Vecs.size(); 837 assert(NumVecs > 1 && "Should be at least two vectors"); 838 839 SmallVector<Value *, 8> ResList; 840 ResList.append(Vecs.begin(), Vecs.end()); 841 do { 842 SmallVector<Value *, 8> TmpList; 843 for (unsigned i = 0; i < NumVecs - 1; i += 2) { 844 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 845 assert((V0->getType() == V1->getType() || i == NumVecs - 2) && 846 "Only the last vector may have a different type"); 847 848 TmpList.push_back(concatenateTwoVectors(Builder, V0, V1)); 849 } 850 851 // Push the last vector if the total number of vectors is odd. 852 if (NumVecs % 2 != 0) 853 TmpList.push_back(ResList[NumVecs - 1]); 854 855 ResList = TmpList; 856 NumVecs = ResList.size(); 857 } while (NumVecs > 1); 858 859 return ResList[0]; 860 } 861 862 bool llvm::maskIsAllZeroOrUndef(Value *Mask) { 863 auto *ConstMask = dyn_cast<Constant>(Mask); 864 if (!ConstMask) 865 return false; 866 if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask)) 867 return true; 868 for (unsigned I = 0, 869 E = cast<VectorType>(ConstMask->getType())->getNumElements(); 870 I != E; ++I) { 871 if (auto *MaskElt = ConstMask->getAggregateElement(I)) 872 if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt)) 873 continue; 874 return false; 875 } 876 return true; 877 } 878 879 880 bool llvm::maskIsAllOneOrUndef(Value *Mask) { 881 auto *ConstMask = dyn_cast<Constant>(Mask); 882 if (!ConstMask) 883 return false; 884 if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask)) 885 return true; 886 for (unsigned I = 0, 887 E = cast<VectorType>(ConstMask->getType())->getNumElements(); 888 I != E; ++I) { 889 if (auto *MaskElt = ConstMask->getAggregateElement(I)) 890 if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt)) 891 continue; 892 return false; 893 } 894 return true; 895 } 896 897 /// TODO: This is a lot like known bits, but for 898 /// vectors. Is there something we can common this with? 899 APInt llvm::possiblyDemandedEltsInMask(Value *Mask) { 900 901 const unsigned VWidth = cast<VectorType>(Mask->getType())->getNumElements(); 902 APInt DemandedElts = APInt::getAllOnesValue(VWidth); 903 if (auto *CV = dyn_cast<ConstantVector>(Mask)) 904 for (unsigned i = 0; i < VWidth; i++) 905 if (CV->getAggregateElement(i)->isNullValue()) 906 DemandedElts.clearBit(i); 907 return DemandedElts; 908 } 909 910 bool InterleavedAccessInfo::isStrided(int Stride) { 911 unsigned Factor = std::abs(Stride); 912 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 913 } 914 915 void InterleavedAccessInfo::collectConstStrideAccesses( 916 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 917 const ValueToValueMap &Strides) { 918 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 919 920 // Since it's desired that the load/store instructions be maintained in 921 // "program order" for the interleaved access analysis, we have to visit the 922 // blocks in the loop in reverse postorder (i.e., in a topological order). 923 // Such an ordering will ensure that any load/store that may be executed 924 // before a second load/store will precede the second load/store in 925 // AccessStrideInfo. 926 LoopBlocksDFS DFS(TheLoop); 927 DFS.perform(LI); 928 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 929 for (auto &I : *BB) { 930 auto *LI = dyn_cast<LoadInst>(&I); 931 auto *SI = dyn_cast<StoreInst>(&I); 932 if (!LI && !SI) 933 continue; 934 935 Value *Ptr = getLoadStorePointerOperand(&I); 936 // We don't check wrapping here because we don't know yet if Ptr will be 937 // part of a full group or a group with gaps. Checking wrapping for all 938 // pointers (even those that end up in groups with no gaps) will be overly 939 // conservative. For full groups, wrapping should be ok since if we would 940 // wrap around the address space we would do a memory access at nullptr 941 // even without the transformation. The wrapping checks are therefore 942 // deferred until after we've formed the interleaved groups. 943 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, 944 /*Assume=*/true, /*ShouldCheckWrap=*/false); 945 946 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 947 PointerType *PtrTy = cast<PointerType>(Ptr->getType()); 948 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 949 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, 950 getLoadStoreAlignment(&I)); 951 } 952 } 953 954 // Analyze interleaved accesses and collect them into interleaved load and 955 // store groups. 956 // 957 // When generating code for an interleaved load group, we effectively hoist all 958 // loads in the group to the location of the first load in program order. When 959 // generating code for an interleaved store group, we sink all stores to the 960 // location of the last store. This code motion can change the order of load 961 // and store instructions and may break dependences. 962 // 963 // The code generation strategy mentioned above ensures that we won't violate 964 // any write-after-read (WAR) dependences. 965 // 966 // E.g., for the WAR dependence: a = A[i]; // (1) 967 // A[i] = b; // (2) 968 // 969 // The store group of (2) is always inserted at or below (2), and the load 970 // group of (1) is always inserted at or above (1). Thus, the instructions will 971 // never be reordered. All other dependences are checked to ensure the 972 // correctness of the instruction reordering. 973 // 974 // The algorithm visits all memory accesses in the loop in bottom-up program 975 // order. Program order is established by traversing the blocks in the loop in 976 // reverse postorder when collecting the accesses. 977 // 978 // We visit the memory accesses in bottom-up order because it can simplify the 979 // construction of store groups in the presence of write-after-write (WAW) 980 // dependences. 981 // 982 // E.g., for the WAW dependence: A[i] = a; // (1) 983 // A[i] = b; // (2) 984 // A[i + 1] = c; // (3) 985 // 986 // We will first create a store group with (3) and (2). (1) can't be added to 987 // this group because it and (2) are dependent. However, (1) can be grouped 988 // with other accesses that may precede it in program order. Note that a 989 // bottom-up order does not imply that WAW dependences should not be checked. 990 void InterleavedAccessInfo::analyzeInterleaving( 991 bool EnablePredicatedInterleavedMemAccesses) { 992 LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 993 const ValueToValueMap &Strides = LAI->getSymbolicStrides(); 994 995 // Holds all accesses with a constant stride. 996 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 997 collectConstStrideAccesses(AccessStrideInfo, Strides); 998 999 if (AccessStrideInfo.empty()) 1000 return; 1001 1002 // Collect the dependences in the loop. 1003 collectDependences(); 1004 1005 // Holds all interleaved store groups temporarily. 1006 SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups; 1007 // Holds all interleaved load groups temporarily. 1008 SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups; 1009 1010 // Search in bottom-up program order for pairs of accesses (A and B) that can 1011 // form interleaved load or store groups. In the algorithm below, access A 1012 // precedes access B in program order. We initialize a group for B in the 1013 // outer loop of the algorithm, and then in the inner loop, we attempt to 1014 // insert each A into B's group if: 1015 // 1016 // 1. A and B have the same stride, 1017 // 2. A and B have the same memory object size, and 1018 // 3. A belongs in B's group according to its distance from B. 1019 // 1020 // Special care is taken to ensure group formation will not break any 1021 // dependences. 1022 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 1023 BI != E; ++BI) { 1024 Instruction *B = BI->first; 1025 StrideDescriptor DesB = BI->second; 1026 1027 // Initialize a group for B if it has an allowable stride. Even if we don't 1028 // create a group for B, we continue with the bottom-up algorithm to ensure 1029 // we don't break any of B's dependences. 1030 InterleaveGroup<Instruction> *Group = nullptr; 1031 if (isStrided(DesB.Stride) && 1032 (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) { 1033 Group = getInterleaveGroup(B); 1034 if (!Group) { 1035 LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B 1036 << '\n'); 1037 Group = createInterleaveGroup(B, DesB.Stride, DesB.Alignment); 1038 } 1039 if (B->mayWriteToMemory()) 1040 StoreGroups.insert(Group); 1041 else 1042 LoadGroups.insert(Group); 1043 } 1044 1045 for (auto AI = std::next(BI); AI != E; ++AI) { 1046 Instruction *A = AI->first; 1047 StrideDescriptor DesA = AI->second; 1048 1049 // Our code motion strategy implies that we can't have dependences 1050 // between accesses in an interleaved group and other accesses located 1051 // between the first and last member of the group. Note that this also 1052 // means that a group can't have more than one member at a given offset. 1053 // The accesses in a group can have dependences with other accesses, but 1054 // we must ensure we don't extend the boundaries of the group such that 1055 // we encompass those dependent accesses. 1056 // 1057 // For example, assume we have the sequence of accesses shown below in a 1058 // stride-2 loop: 1059 // 1060 // (1, 2) is a group | A[i] = a; // (1) 1061 // | A[i-1] = b; // (2) | 1062 // A[i-3] = c; // (3) 1063 // A[i] = d; // (4) | (2, 4) is not a group 1064 // 1065 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 1066 // but not with (4). If we did, the dependent access (3) would be within 1067 // the boundaries of the (2, 4) group. 1068 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 1069 // If a dependence exists and A is already in a group, we know that A 1070 // must be a store since A precedes B and WAR dependences are allowed. 1071 // Thus, A would be sunk below B. We release A's group to prevent this 1072 // illegal code motion. A will then be free to form another group with 1073 // instructions that precede it. 1074 if (isInterleaved(A)) { 1075 InterleaveGroup<Instruction> *StoreGroup = getInterleaveGroup(A); 1076 1077 LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to " 1078 "dependence between " << *A << " and "<< *B << '\n'); 1079 1080 StoreGroups.remove(StoreGroup); 1081 releaseGroup(StoreGroup); 1082 } 1083 1084 // If a dependence exists and A is not already in a group (or it was 1085 // and we just released it), B might be hoisted above A (if B is a 1086 // load) or another store might be sunk below A (if B is a store). In 1087 // either case, we can't add additional instructions to B's group. B 1088 // will only form a group with instructions that it precedes. 1089 break; 1090 } 1091 1092 // At this point, we've checked for illegal code motion. If either A or B 1093 // isn't strided, there's nothing left to do. 1094 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 1095 continue; 1096 1097 // Ignore A if it's already in a group or isn't the same kind of memory 1098 // operation as B. 1099 // Note that mayReadFromMemory() isn't mutually exclusive to 1100 // mayWriteToMemory in the case of atomic loads. We shouldn't see those 1101 // here, canVectorizeMemory() should have returned false - except for the 1102 // case we asked for optimization remarks. 1103 if (isInterleaved(A) || 1104 (A->mayReadFromMemory() != B->mayReadFromMemory()) || 1105 (A->mayWriteToMemory() != B->mayWriteToMemory())) 1106 continue; 1107 1108 // Check rules 1 and 2. Ignore A if its stride or size is different from 1109 // that of B. 1110 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 1111 continue; 1112 1113 // Ignore A if the memory object of A and B don't belong to the same 1114 // address space 1115 if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B)) 1116 continue; 1117 1118 // Calculate the distance from A to B. 1119 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 1120 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 1121 if (!DistToB) 1122 continue; 1123 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 1124 1125 // Check rule 3. Ignore A if its distance to B is not a multiple of the 1126 // size. 1127 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 1128 continue; 1129 1130 // All members of a predicated interleave-group must have the same predicate, 1131 // and currently must reside in the same BB. 1132 BasicBlock *BlockA = A->getParent(); 1133 BasicBlock *BlockB = B->getParent(); 1134 if ((isPredicated(BlockA) || isPredicated(BlockB)) && 1135 (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB)) 1136 continue; 1137 1138 // The index of A is the index of B plus A's distance to B in multiples 1139 // of the size. 1140 int IndexA = 1141 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 1142 1143 // Try to insert A into B's group. 1144 if (Group->insertMember(A, IndexA, DesA.Alignment)) { 1145 LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 1146 << " into the interleave group with" << *B 1147 << '\n'); 1148 InterleaveGroupMap[A] = Group; 1149 1150 // Set the first load in program order as the insert position. 1151 if (A->mayReadFromMemory()) 1152 Group->setInsertPos(A); 1153 } 1154 } // Iteration over A accesses. 1155 } // Iteration over B accesses. 1156 1157 // Remove interleaved store groups with gaps. 1158 for (auto *Group : StoreGroups) 1159 if (Group->getNumMembers() != Group->getFactor()) { 1160 LLVM_DEBUG( 1161 dbgs() << "LV: Invalidate candidate interleaved store group due " 1162 "to gaps.\n"); 1163 releaseGroup(Group); 1164 } 1165 // Remove interleaved groups with gaps (currently only loads) whose memory 1166 // accesses may wrap around. We have to revisit the getPtrStride analysis, 1167 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does 1168 // not check wrapping (see documentation there). 1169 // FORNOW we use Assume=false; 1170 // TODO: Change to Assume=true but making sure we don't exceed the threshold 1171 // of runtime SCEV assumptions checks (thereby potentially failing to 1172 // vectorize altogether). 1173 // Additional optional optimizations: 1174 // TODO: If we are peeling the loop and we know that the first pointer doesn't 1175 // wrap then we can deduce that all pointers in the group don't wrap. 1176 // This means that we can forcefully peel the loop in order to only have to 1177 // check the first pointer for no-wrap. When we'll change to use Assume=true 1178 // we'll only need at most one runtime check per interleaved group. 1179 for (auto *Group : LoadGroups) { 1180 // Case 1: A full group. Can Skip the checks; For full groups, if the wide 1181 // load would wrap around the address space we would do a memory access at 1182 // nullptr even without the transformation. 1183 if (Group->getNumMembers() == Group->getFactor()) 1184 continue; 1185 1186 // Case 2: If first and last members of the group don't wrap this implies 1187 // that all the pointers in the group don't wrap. 1188 // So we check only group member 0 (which is always guaranteed to exist), 1189 // and group member Factor - 1; If the latter doesn't exist we rely on 1190 // peeling (if it is a non-reversed accsess -- see Case 3). 1191 Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0)); 1192 if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false, 1193 /*ShouldCheckWrap=*/true)) { 1194 LLVM_DEBUG( 1195 dbgs() << "LV: Invalidate candidate interleaved group due to " 1196 "first group member potentially pointer-wrapping.\n"); 1197 releaseGroup(Group); 1198 continue; 1199 } 1200 Instruction *LastMember = Group->getMember(Group->getFactor() - 1); 1201 if (LastMember) { 1202 Value *LastMemberPtr = getLoadStorePointerOperand(LastMember); 1203 if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false, 1204 /*ShouldCheckWrap=*/true)) { 1205 LLVM_DEBUG( 1206 dbgs() << "LV: Invalidate candidate interleaved group due to " 1207 "last group member potentially pointer-wrapping.\n"); 1208 releaseGroup(Group); 1209 } 1210 } else { 1211 // Case 3: A non-reversed interleaved load group with gaps: We need 1212 // to execute at least one scalar epilogue iteration. This will ensure 1213 // we don't speculatively access memory out-of-bounds. We only need 1214 // to look for a member at index factor - 1, since every group must have 1215 // a member at index zero. 1216 if (Group->isReverse()) { 1217 LLVM_DEBUG( 1218 dbgs() << "LV: Invalidate candidate interleaved group due to " 1219 "a reverse access with gaps.\n"); 1220 releaseGroup(Group); 1221 continue; 1222 } 1223 LLVM_DEBUG( 1224 dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 1225 RequiresScalarEpilogue = true; 1226 } 1227 } 1228 } 1229 1230 void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() { 1231 // If no group had triggered the requirement to create an epilogue loop, 1232 // there is nothing to do. 1233 if (!requiresScalarEpilogue()) 1234 return; 1235 1236 bool ReleasedGroup = false; 1237 // Release groups requiring scalar epilogues. Note that this also removes them 1238 // from InterleaveGroups. 1239 for (auto *Group : make_early_inc_range(InterleaveGroups)) { 1240 if (!Group->requiresScalarEpilogue()) 1241 continue; 1242 LLVM_DEBUG( 1243 dbgs() 1244 << "LV: Invalidate candidate interleaved group due to gaps that " 1245 "require a scalar epilogue (not allowed under optsize) and cannot " 1246 "be masked (not enabled). \n"); 1247 releaseGroup(Group); 1248 ReleasedGroup = true; 1249 } 1250 assert(ReleasedGroup && "At least one group must be invalidated, as a " 1251 "scalar epilogue was required"); 1252 (void)ReleasedGroup; 1253 RequiresScalarEpilogue = false; 1254 } 1255 1256 template <typename InstT> 1257 void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const { 1258 llvm_unreachable("addMetadata can only be used for Instruction"); 1259 } 1260 1261 namespace llvm { 1262 template <> 1263 void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const { 1264 SmallVector<Value *, 4> VL; 1265 std::transform(Members.begin(), Members.end(), std::back_inserter(VL), 1266 [](std::pair<int, Instruction *> p) { return p.second; }); 1267 propagateMetadata(NewInst, VL); 1268 } 1269 } 1270 1271 std::string VFABI::mangleTLIVectorName(StringRef VectorName, 1272 StringRef ScalarName, unsigned numArgs, 1273 unsigned VF) { 1274 SmallString<256> Buffer; 1275 llvm::raw_svector_ostream Out(Buffer); 1276 Out << "_ZGV" << VFABI::_LLVM_ << "N" << VF; 1277 for (unsigned I = 0; I < numArgs; ++I) 1278 Out << "v"; 1279 Out << "_" << ScalarName << "(" << VectorName << ")"; 1280 return std::string(Out.str()); 1281 } 1282 1283 void VFABI::getVectorVariantNames( 1284 const CallInst &CI, SmallVectorImpl<std::string> &VariantMappings) { 1285 const StringRef S = 1286 CI.getAttribute(AttributeList::FunctionIndex, VFABI::MappingsAttrName) 1287 .getValueAsString(); 1288 if (S.empty()) 1289 return; 1290 1291 SmallVector<StringRef, 8> ListAttr; 1292 S.split(ListAttr, ","); 1293 1294 for (auto &S : SetVector<StringRef>(ListAttr.begin(), ListAttr.end())) { 1295 #ifndef NDEBUG 1296 LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << S << "'\n"); 1297 Optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, *(CI.getModule())); 1298 assert(Info.hasValue() && "Invalid name for a VFABI variant."); 1299 assert(CI.getModule()->getFunction(Info.getValue().VectorName) && 1300 "Vector function is missing."); 1301 #endif 1302 VariantMappings.push_back(std::string(S)); 1303 } 1304 } 1305 1306 bool VFShape::hasValidParameterList() const { 1307 for (unsigned Pos = 0, NumParams = Parameters.size(); Pos < NumParams; 1308 ++Pos) { 1309 assert(Parameters[Pos].ParamPos == Pos && "Broken parameter list."); 1310 1311 switch (Parameters[Pos].ParamKind) { 1312 default: // Nothing to check. 1313 break; 1314 case VFParamKind::OMP_Linear: 1315 case VFParamKind::OMP_LinearRef: 1316 case VFParamKind::OMP_LinearVal: 1317 case VFParamKind::OMP_LinearUVal: 1318 // Compile time linear steps must be non-zero. 1319 if (Parameters[Pos].LinearStepOrPos == 0) 1320 return false; 1321 break; 1322 case VFParamKind::OMP_LinearPos: 1323 case VFParamKind::OMP_LinearRefPos: 1324 case VFParamKind::OMP_LinearValPos: 1325 case VFParamKind::OMP_LinearUValPos: 1326 // The runtime linear step must be referring to some other 1327 // parameters in the signature. 1328 if (Parameters[Pos].LinearStepOrPos >= int(NumParams)) 1329 return false; 1330 // The linear step parameter must be marked as uniform. 1331 if (Parameters[Parameters[Pos].LinearStepOrPos].ParamKind != 1332 VFParamKind::OMP_Uniform) 1333 return false; 1334 // The linear step parameter can't point at itself. 1335 if (Parameters[Pos].LinearStepOrPos == int(Pos)) 1336 return false; 1337 break; 1338 case VFParamKind::GlobalPredicate: 1339 // The global predicate must be the unique. Can be placed anywhere in the 1340 // signature. 1341 for (unsigned NextPos = Pos + 1; NextPos < NumParams; ++NextPos) 1342 if (Parameters[NextPos].ParamKind == VFParamKind::GlobalPredicate) 1343 return false; 1344 break; 1345 } 1346 } 1347 return true; 1348 } 1349