1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines vectorizer utilities. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/VectorUtils.h" 15 #include "llvm/ADT/EquivalenceClasses.h" 16 #include "llvm/Analysis/DemandedBits.h" 17 #include "llvm/Analysis/LoopInfo.h" 18 #include "llvm/Analysis/LoopIterator.h" 19 #include "llvm/Analysis/ScalarEvolution.h" 20 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 21 #include "llvm/Analysis/TargetTransformInfo.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/IR/Constants.h" 24 #include "llvm/IR/GetElementPtrTypeIterator.h" 25 #include "llvm/IR/IRBuilder.h" 26 #include "llvm/IR/PatternMatch.h" 27 #include "llvm/IR/Value.h" 28 29 #define DEBUG_TYPE "vectorutils" 30 31 using namespace llvm; 32 using namespace llvm::PatternMatch; 33 34 /// Maximum factor for an interleaved memory access. 35 static cl::opt<unsigned> MaxInterleaveGroupFactor( 36 "max-interleave-group-factor", cl::Hidden, 37 cl::desc("Maximum factor for an interleaved access group (default = 8)"), 38 cl::init(8)); 39 40 /// Identify if the intrinsic is trivially vectorizable. 41 /// This method returns true if the intrinsic's argument types are all 42 /// scalars for the scalar form of the intrinsic and all vectors for 43 /// the vector form of the intrinsic. 44 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) { 45 switch (ID) { 46 case Intrinsic::sqrt: 47 case Intrinsic::sin: 48 case Intrinsic::cos: 49 case Intrinsic::exp: 50 case Intrinsic::exp2: 51 case Intrinsic::log: 52 case Intrinsic::log10: 53 case Intrinsic::log2: 54 case Intrinsic::fabs: 55 case Intrinsic::minnum: 56 case Intrinsic::maxnum: 57 case Intrinsic::minimum: 58 case Intrinsic::maximum: 59 case Intrinsic::copysign: 60 case Intrinsic::floor: 61 case Intrinsic::ceil: 62 case Intrinsic::trunc: 63 case Intrinsic::rint: 64 case Intrinsic::nearbyint: 65 case Intrinsic::round: 66 case Intrinsic::bswap: 67 case Intrinsic::bitreverse: 68 case Intrinsic::ctpop: 69 case Intrinsic::pow: 70 case Intrinsic::fma: 71 case Intrinsic::fmuladd: 72 case Intrinsic::ctlz: 73 case Intrinsic::cttz: 74 case Intrinsic::powi: 75 case Intrinsic::canonicalize: 76 return true; 77 default: 78 return false; 79 } 80 } 81 82 /// Identifies if the intrinsic has a scalar operand. It check for 83 /// ctlz,cttz and powi special intrinsics whose argument is scalar. 84 bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID, 85 unsigned ScalarOpdIdx) { 86 switch (ID) { 87 case Intrinsic::ctlz: 88 case Intrinsic::cttz: 89 case Intrinsic::powi: 90 return (ScalarOpdIdx == 1); 91 default: 92 return false; 93 } 94 } 95 96 /// Returns intrinsic ID for call. 97 /// For the input call instruction it finds mapping intrinsic and returns 98 /// its ID, in case it does not found it return not_intrinsic. 99 Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI, 100 const TargetLibraryInfo *TLI) { 101 Intrinsic::ID ID = getIntrinsicForCallSite(CI, TLI); 102 if (ID == Intrinsic::not_intrinsic) 103 return Intrinsic::not_intrinsic; 104 105 if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start || 106 ID == Intrinsic::lifetime_end || ID == Intrinsic::assume || 107 ID == Intrinsic::sideeffect) 108 return ID; 109 return Intrinsic::not_intrinsic; 110 } 111 112 /// Find the operand of the GEP that should be checked for consecutive 113 /// stores. This ignores trailing indices that have no effect on the final 114 /// pointer. 115 unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) { 116 const DataLayout &DL = Gep->getModule()->getDataLayout(); 117 unsigned LastOperand = Gep->getNumOperands() - 1; 118 unsigned GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType()); 119 120 // Walk backwards and try to peel off zeros. 121 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) { 122 // Find the type we're currently indexing into. 123 gep_type_iterator GEPTI = gep_type_begin(Gep); 124 std::advance(GEPTI, LastOperand - 2); 125 126 // If it's a type with the same allocation size as the result of the GEP we 127 // can peel off the zero index. 128 if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize) 129 break; 130 --LastOperand; 131 } 132 133 return LastOperand; 134 } 135 136 /// If the argument is a GEP, then returns the operand identified by 137 /// getGEPInductionOperand. However, if there is some other non-loop-invariant 138 /// operand, it returns that instead. 139 Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { 140 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 141 if (!GEP) 142 return Ptr; 143 144 unsigned InductionOperand = getGEPInductionOperand(GEP); 145 146 // Check that all of the gep indices are uniform except for our induction 147 // operand. 148 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) 149 if (i != InductionOperand && 150 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp)) 151 return Ptr; 152 return GEP->getOperand(InductionOperand); 153 } 154 155 /// If a value has only one user that is a CastInst, return it. 156 Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) { 157 Value *UniqueCast = nullptr; 158 for (User *U : Ptr->users()) { 159 CastInst *CI = dyn_cast<CastInst>(U); 160 if (CI && CI->getType() == Ty) { 161 if (!UniqueCast) 162 UniqueCast = CI; 163 else 164 return nullptr; 165 } 166 } 167 return UniqueCast; 168 } 169 170 /// Get the stride of a pointer access in a loop. Looks for symbolic 171 /// strides "a[i*stride]". Returns the symbolic stride, or null otherwise. 172 Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { 173 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 174 if (!PtrTy || PtrTy->isAggregateType()) 175 return nullptr; 176 177 // Try to remove a gep instruction to make the pointer (actually index at this 178 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the 179 // pointer, otherwise, we are analyzing the index. 180 Value *OrigPtr = Ptr; 181 182 // The size of the pointer access. 183 int64_t PtrAccessSize = 1; 184 185 Ptr = stripGetElementPtr(Ptr, SE, Lp); 186 const SCEV *V = SE->getSCEV(Ptr); 187 188 if (Ptr != OrigPtr) 189 // Strip off casts. 190 while (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V)) 191 V = C->getOperand(); 192 193 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V); 194 if (!S) 195 return nullptr; 196 197 V = S->getStepRecurrence(*SE); 198 if (!V) 199 return nullptr; 200 201 // Strip off the size of access multiplication if we are still analyzing the 202 // pointer. 203 if (OrigPtr == Ptr) { 204 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) { 205 if (M->getOperand(0)->getSCEVType() != scConstant) 206 return nullptr; 207 208 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt(); 209 210 // Huge step value - give up. 211 if (APStepVal.getBitWidth() > 64) 212 return nullptr; 213 214 int64_t StepVal = APStepVal.getSExtValue(); 215 if (PtrAccessSize != StepVal) 216 return nullptr; 217 V = M->getOperand(1); 218 } 219 } 220 221 // Strip off casts. 222 Type *StripedOffRecurrenceCast = nullptr; 223 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V)) { 224 StripedOffRecurrenceCast = C->getType(); 225 V = C->getOperand(); 226 } 227 228 // Look for the loop invariant symbolic value. 229 const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V); 230 if (!U) 231 return nullptr; 232 233 Value *Stride = U->getValue(); 234 if (!Lp->isLoopInvariant(Stride)) 235 return nullptr; 236 237 // If we have stripped off the recurrence cast we have to make sure that we 238 // return the value that is used in this loop so that we can replace it later. 239 if (StripedOffRecurrenceCast) 240 Stride = getUniqueCastUse(Stride, Lp, StripedOffRecurrenceCast); 241 242 return Stride; 243 } 244 245 /// Given a vector and an element number, see if the scalar value is 246 /// already around as a register, for example if it were inserted then extracted 247 /// from the vector. 248 Value *llvm::findScalarElement(Value *V, unsigned EltNo) { 249 assert(V->getType()->isVectorTy() && "Not looking at a vector?"); 250 VectorType *VTy = cast<VectorType>(V->getType()); 251 unsigned Width = VTy->getNumElements(); 252 if (EltNo >= Width) // Out of range access. 253 return UndefValue::get(VTy->getElementType()); 254 255 if (Constant *C = dyn_cast<Constant>(V)) 256 return C->getAggregateElement(EltNo); 257 258 if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) { 259 // If this is an insert to a variable element, we don't know what it is. 260 if (!isa<ConstantInt>(III->getOperand(2))) 261 return nullptr; 262 unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue(); 263 264 // If this is an insert to the element we are looking for, return the 265 // inserted value. 266 if (EltNo == IIElt) 267 return III->getOperand(1); 268 269 // Otherwise, the insertelement doesn't modify the value, recurse on its 270 // vector input. 271 return findScalarElement(III->getOperand(0), EltNo); 272 } 273 274 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V)) { 275 unsigned LHSWidth = SVI->getOperand(0)->getType()->getVectorNumElements(); 276 int InEl = SVI->getMaskValue(EltNo); 277 if (InEl < 0) 278 return UndefValue::get(VTy->getElementType()); 279 if (InEl < (int)LHSWidth) 280 return findScalarElement(SVI->getOperand(0), InEl); 281 return findScalarElement(SVI->getOperand(1), InEl - LHSWidth); 282 } 283 284 // Extract a value from a vector add operation with a constant zero. 285 // TODO: Use getBinOpIdentity() to generalize this. 286 Value *Val; Constant *C; 287 if (match(V, m_Add(m_Value(Val), m_Constant(C)))) 288 if (Constant *Elt = C->getAggregateElement(EltNo)) 289 if (Elt->isNullValue()) 290 return findScalarElement(Val, EltNo); 291 292 // Otherwise, we don't know. 293 return nullptr; 294 } 295 296 /// Get splat value if the input is a splat vector or return nullptr. 297 /// This function is not fully general. It checks only 2 cases: 298 /// the input value is (1) a splat constants vector or (2) a sequence 299 /// of instructions that broadcast a single value into a vector. 300 /// 301 const llvm::Value *llvm::getSplatValue(const Value *V) { 302 303 if (auto *C = dyn_cast<Constant>(V)) 304 if (isa<VectorType>(V->getType())) 305 return C->getSplatValue(); 306 307 auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V); 308 if (!ShuffleInst) 309 return nullptr; 310 // All-zero (or undef) shuffle mask elements. 311 for (int MaskElt : ShuffleInst->getShuffleMask()) 312 if (MaskElt != 0 && MaskElt != -1) 313 return nullptr; 314 // The first shuffle source is 'insertelement' with index 0. 315 auto *InsertEltInst = 316 dyn_cast<InsertElementInst>(ShuffleInst->getOperand(0)); 317 if (!InsertEltInst || !isa<ConstantInt>(InsertEltInst->getOperand(2)) || 318 !cast<ConstantInt>(InsertEltInst->getOperand(2))->isZero()) 319 return nullptr; 320 321 return InsertEltInst->getOperand(1); 322 } 323 324 MapVector<Instruction *, uint64_t> 325 llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB, 326 const TargetTransformInfo *TTI) { 327 328 // DemandedBits will give us every value's live-out bits. But we want 329 // to ensure no extra casts would need to be inserted, so every DAG 330 // of connected values must have the same minimum bitwidth. 331 EquivalenceClasses<Value *> ECs; 332 SmallVector<Value *, 16> Worklist; 333 SmallPtrSet<Value *, 4> Roots; 334 SmallPtrSet<Value *, 16> Visited; 335 DenseMap<Value *, uint64_t> DBits; 336 SmallPtrSet<Instruction *, 4> InstructionSet; 337 MapVector<Instruction *, uint64_t> MinBWs; 338 339 // Determine the roots. We work bottom-up, from truncs or icmps. 340 bool SeenExtFromIllegalType = false; 341 for (auto *BB : Blocks) 342 for (auto &I : *BB) { 343 InstructionSet.insert(&I); 344 345 if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) && 346 !TTI->isTypeLegal(I.getOperand(0)->getType())) 347 SeenExtFromIllegalType = true; 348 349 // Only deal with non-vector integers up to 64-bits wide. 350 if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) && 351 !I.getType()->isVectorTy() && 352 I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) { 353 // Don't make work for ourselves. If we know the loaded type is legal, 354 // don't add it to the worklist. 355 if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType())) 356 continue; 357 358 Worklist.push_back(&I); 359 Roots.insert(&I); 360 } 361 } 362 // Early exit. 363 if (Worklist.empty() || (TTI && !SeenExtFromIllegalType)) 364 return MinBWs; 365 366 // Now proceed breadth-first, unioning values together. 367 while (!Worklist.empty()) { 368 Value *Val = Worklist.pop_back_val(); 369 Value *Leader = ECs.getOrInsertLeaderValue(Val); 370 371 if (Visited.count(Val)) 372 continue; 373 Visited.insert(Val); 374 375 // Non-instructions terminate a chain successfully. 376 if (!isa<Instruction>(Val)) 377 continue; 378 Instruction *I = cast<Instruction>(Val); 379 380 // If we encounter a type that is larger than 64 bits, we can't represent 381 // it so bail out. 382 if (DB.getDemandedBits(I).getBitWidth() > 64) 383 return MapVector<Instruction *, uint64_t>(); 384 385 uint64_t V = DB.getDemandedBits(I).getZExtValue(); 386 DBits[Leader] |= V; 387 DBits[I] = V; 388 389 // Casts, loads and instructions outside of our range terminate a chain 390 // successfully. 391 if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) || 392 !InstructionSet.count(I)) 393 continue; 394 395 // Unsafe casts terminate a chain unsuccessfully. We can't do anything 396 // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to 397 // transform anything that relies on them. 398 if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) || 399 !I->getType()->isIntegerTy()) { 400 DBits[Leader] |= ~0ULL; 401 continue; 402 } 403 404 // We don't modify the types of PHIs. Reductions will already have been 405 // truncated if possible, and inductions' sizes will have been chosen by 406 // indvars. 407 if (isa<PHINode>(I)) 408 continue; 409 410 if (DBits[Leader] == ~0ULL) 411 // All bits demanded, no point continuing. 412 continue; 413 414 for (Value *O : cast<User>(I)->operands()) { 415 ECs.unionSets(Leader, O); 416 Worklist.push_back(O); 417 } 418 } 419 420 // Now we've discovered all values, walk them to see if there are 421 // any users we didn't see. If there are, we can't optimize that 422 // chain. 423 for (auto &I : DBits) 424 for (auto *U : I.first->users()) 425 if (U->getType()->isIntegerTy() && DBits.count(U) == 0) 426 DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL; 427 428 for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) { 429 uint64_t LeaderDemandedBits = 0; 430 for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) 431 LeaderDemandedBits |= DBits[*MI]; 432 433 uint64_t MinBW = (sizeof(LeaderDemandedBits) * 8) - 434 llvm::countLeadingZeros(LeaderDemandedBits); 435 // Round up to a power of 2 436 if (!isPowerOf2_64((uint64_t)MinBW)) 437 MinBW = NextPowerOf2(MinBW); 438 439 // We don't modify the types of PHIs. Reductions will already have been 440 // truncated if possible, and inductions' sizes will have been chosen by 441 // indvars. 442 // If we are required to shrink a PHI, abandon this entire equivalence class. 443 bool Abort = false; 444 for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) 445 if (isa<PHINode>(*MI) && MinBW < (*MI)->getType()->getScalarSizeInBits()) { 446 Abort = true; 447 break; 448 } 449 if (Abort) 450 continue; 451 452 for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) { 453 if (!isa<Instruction>(*MI)) 454 continue; 455 Type *Ty = (*MI)->getType(); 456 if (Roots.count(*MI)) 457 Ty = cast<Instruction>(*MI)->getOperand(0)->getType(); 458 if (MinBW < Ty->getScalarSizeInBits()) 459 MinBWs[cast<Instruction>(*MI)] = MinBW; 460 } 461 } 462 463 return MinBWs; 464 } 465 466 /// \returns \p I after propagating metadata from \p VL. 467 Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) { 468 Instruction *I0 = cast<Instruction>(VL[0]); 469 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 470 I0->getAllMetadataOtherThanDebugLoc(Metadata); 471 472 for (auto Kind : 473 {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 474 LLVMContext::MD_noalias, LLVMContext::MD_fpmath, 475 LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load}) { 476 MDNode *MD = I0->getMetadata(Kind); 477 478 for (int J = 1, E = VL.size(); MD && J != E; ++J) { 479 const Instruction *IJ = cast<Instruction>(VL[J]); 480 MDNode *IMD = IJ->getMetadata(Kind); 481 switch (Kind) { 482 case LLVMContext::MD_tbaa: 483 MD = MDNode::getMostGenericTBAA(MD, IMD); 484 break; 485 case LLVMContext::MD_alias_scope: 486 MD = MDNode::getMostGenericAliasScope(MD, IMD); 487 break; 488 case LLVMContext::MD_fpmath: 489 MD = MDNode::getMostGenericFPMath(MD, IMD); 490 break; 491 case LLVMContext::MD_noalias: 492 case LLVMContext::MD_nontemporal: 493 case LLVMContext::MD_invariant_load: 494 MD = MDNode::intersect(MD, IMD); 495 break; 496 default: 497 llvm_unreachable("unhandled metadata"); 498 } 499 } 500 501 Inst->setMetadata(Kind, MD); 502 } 503 504 return Inst; 505 } 506 507 Constant *llvm::createBitMaskForGaps(IRBuilder<> &Builder, unsigned VF, 508 const InterleaveGroup &Group) { 509 // All 1's means mask is not needed. 510 if (Group.getNumMembers() == Group.getFactor()) 511 return nullptr; 512 513 // TODO: support reversed access. 514 assert(!Group.isReverse() && "Reversed group not supported."); 515 516 SmallVector<Constant *, 16> Mask; 517 for (unsigned i = 0; i < VF; i++) 518 for (unsigned j = 0; j < Group.getFactor(); ++j) { 519 unsigned HasMember = Group.getMember(j) ? 1 : 0; 520 Mask.push_back(Builder.getInt1(HasMember)); 521 } 522 523 return ConstantVector::get(Mask); 524 } 525 526 Constant *llvm::createReplicatedMask(IRBuilder<> &Builder, 527 unsigned ReplicationFactor, unsigned VF) { 528 SmallVector<Constant *, 16> MaskVec; 529 for (unsigned i = 0; i < VF; i++) 530 for (unsigned j = 0; j < ReplicationFactor; j++) 531 MaskVec.push_back(Builder.getInt32(i)); 532 533 return ConstantVector::get(MaskVec); 534 } 535 536 Constant *llvm::createInterleaveMask(IRBuilder<> &Builder, unsigned VF, 537 unsigned NumVecs) { 538 SmallVector<Constant *, 16> Mask; 539 for (unsigned i = 0; i < VF; i++) 540 for (unsigned j = 0; j < NumVecs; j++) 541 Mask.push_back(Builder.getInt32(j * VF + i)); 542 543 return ConstantVector::get(Mask); 544 } 545 546 Constant *llvm::createStrideMask(IRBuilder<> &Builder, unsigned Start, 547 unsigned Stride, unsigned VF) { 548 SmallVector<Constant *, 16> Mask; 549 for (unsigned i = 0; i < VF; i++) 550 Mask.push_back(Builder.getInt32(Start + i * Stride)); 551 552 return ConstantVector::get(Mask); 553 } 554 555 Constant *llvm::createSequentialMask(IRBuilder<> &Builder, unsigned Start, 556 unsigned NumInts, unsigned NumUndefs) { 557 SmallVector<Constant *, 16> Mask; 558 for (unsigned i = 0; i < NumInts; i++) 559 Mask.push_back(Builder.getInt32(Start + i)); 560 561 Constant *Undef = UndefValue::get(Builder.getInt32Ty()); 562 for (unsigned i = 0; i < NumUndefs; i++) 563 Mask.push_back(Undef); 564 565 return ConstantVector::get(Mask); 566 } 567 568 /// A helper function for concatenating vectors. This function concatenates two 569 /// vectors having the same element type. If the second vector has fewer 570 /// elements than the first, it is padded with undefs. 571 static Value *concatenateTwoVectors(IRBuilder<> &Builder, Value *V1, 572 Value *V2) { 573 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); 574 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); 575 assert(VecTy1 && VecTy2 && 576 VecTy1->getScalarType() == VecTy2->getScalarType() && 577 "Expect two vectors with the same element type"); 578 579 unsigned NumElts1 = VecTy1->getNumElements(); 580 unsigned NumElts2 = VecTy2->getNumElements(); 581 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); 582 583 if (NumElts1 > NumElts2) { 584 // Extend with UNDEFs. 585 Constant *ExtMask = 586 createSequentialMask(Builder, 0, NumElts2, NumElts1 - NumElts2); 587 V2 = Builder.CreateShuffleVector(V2, UndefValue::get(VecTy2), ExtMask); 588 } 589 590 Constant *Mask = createSequentialMask(Builder, 0, NumElts1 + NumElts2, 0); 591 return Builder.CreateShuffleVector(V1, V2, Mask); 592 } 593 594 Value *llvm::concatenateVectors(IRBuilder<> &Builder, ArrayRef<Value *> Vecs) { 595 unsigned NumVecs = Vecs.size(); 596 assert(NumVecs > 1 && "Should be at least two vectors"); 597 598 SmallVector<Value *, 8> ResList; 599 ResList.append(Vecs.begin(), Vecs.end()); 600 do { 601 SmallVector<Value *, 8> TmpList; 602 for (unsigned i = 0; i < NumVecs - 1; i += 2) { 603 Value *V0 = ResList[i], *V1 = ResList[i + 1]; 604 assert((V0->getType() == V1->getType() || i == NumVecs - 2) && 605 "Only the last vector may have a different type"); 606 607 TmpList.push_back(concatenateTwoVectors(Builder, V0, V1)); 608 } 609 610 // Push the last vector if the total number of vectors is odd. 611 if (NumVecs % 2 != 0) 612 TmpList.push_back(ResList[NumVecs - 1]); 613 614 ResList = TmpList; 615 NumVecs = ResList.size(); 616 } while (NumVecs > 1); 617 618 return ResList[0]; 619 } 620 621 bool InterleavedAccessInfo::isStrided(int Stride) { 622 unsigned Factor = std::abs(Stride); 623 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; 624 } 625 626 void InterleavedAccessInfo::collectConstStrideAccesses( 627 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, 628 const ValueToValueMap &Strides) { 629 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout(); 630 631 // Since it's desired that the load/store instructions be maintained in 632 // "program order" for the interleaved access analysis, we have to visit the 633 // blocks in the loop in reverse postorder (i.e., in a topological order). 634 // Such an ordering will ensure that any load/store that may be executed 635 // before a second load/store will precede the second load/store in 636 // AccessStrideInfo. 637 LoopBlocksDFS DFS(TheLoop); 638 DFS.perform(LI); 639 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) 640 for (auto &I : *BB) { 641 auto *LI = dyn_cast<LoadInst>(&I); 642 auto *SI = dyn_cast<StoreInst>(&I); 643 if (!LI && !SI) 644 continue; 645 646 Value *Ptr = getLoadStorePointerOperand(&I); 647 // We don't check wrapping here because we don't know yet if Ptr will be 648 // part of a full group or a group with gaps. Checking wrapping for all 649 // pointers (even those that end up in groups with no gaps) will be overly 650 // conservative. For full groups, wrapping should be ok since if we would 651 // wrap around the address space we would do a memory access at nullptr 652 // even without the transformation. The wrapping checks are therefore 653 // deferred until after we've formed the interleaved groups. 654 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides, 655 /*Assume=*/true, /*ShouldCheckWrap=*/false); 656 657 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); 658 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 659 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType()); 660 661 // An alignment of 0 means target ABI alignment. 662 unsigned Align = getLoadStoreAlignment(&I); 663 if (!Align) 664 Align = DL.getABITypeAlignment(PtrTy->getElementType()); 665 666 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, Align); 667 } 668 } 669 670 // Analyze interleaved accesses and collect them into interleaved load and 671 // store groups. 672 // 673 // When generating code for an interleaved load group, we effectively hoist all 674 // loads in the group to the location of the first load in program order. When 675 // generating code for an interleaved store group, we sink all stores to the 676 // location of the last store. This code motion can change the order of load 677 // and store instructions and may break dependences. 678 // 679 // The code generation strategy mentioned above ensures that we won't violate 680 // any write-after-read (WAR) dependences. 681 // 682 // E.g., for the WAR dependence: a = A[i]; // (1) 683 // A[i] = b; // (2) 684 // 685 // The store group of (2) is always inserted at or below (2), and the load 686 // group of (1) is always inserted at or above (1). Thus, the instructions will 687 // never be reordered. All other dependences are checked to ensure the 688 // correctness of the instruction reordering. 689 // 690 // The algorithm visits all memory accesses in the loop in bottom-up program 691 // order. Program order is established by traversing the blocks in the loop in 692 // reverse postorder when collecting the accesses. 693 // 694 // We visit the memory accesses in bottom-up order because it can simplify the 695 // construction of store groups in the presence of write-after-write (WAW) 696 // dependences. 697 // 698 // E.g., for the WAW dependence: A[i] = a; // (1) 699 // A[i] = b; // (2) 700 // A[i + 1] = c; // (3) 701 // 702 // We will first create a store group with (3) and (2). (1) can't be added to 703 // this group because it and (2) are dependent. However, (1) can be grouped 704 // with other accesses that may precede it in program order. Note that a 705 // bottom-up order does not imply that WAW dependences should not be checked. 706 void InterleavedAccessInfo::analyzeInterleaving( 707 bool EnablePredicatedInterleavedMemAccesses) { 708 LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); 709 const ValueToValueMap &Strides = LAI->getSymbolicStrides(); 710 711 // Holds all accesses with a constant stride. 712 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; 713 collectConstStrideAccesses(AccessStrideInfo, Strides); 714 715 if (AccessStrideInfo.empty()) 716 return; 717 718 // Collect the dependences in the loop. 719 collectDependences(); 720 721 // Holds all interleaved store groups temporarily. 722 SmallSetVector<InterleaveGroup *, 4> StoreGroups; 723 // Holds all interleaved load groups temporarily. 724 SmallSetVector<InterleaveGroup *, 4> LoadGroups; 725 726 // Search in bottom-up program order for pairs of accesses (A and B) that can 727 // form interleaved load or store groups. In the algorithm below, access A 728 // precedes access B in program order. We initialize a group for B in the 729 // outer loop of the algorithm, and then in the inner loop, we attempt to 730 // insert each A into B's group if: 731 // 732 // 1. A and B have the same stride, 733 // 2. A and B have the same memory object size, and 734 // 3. A belongs in B's group according to its distance from B. 735 // 736 // Special care is taken to ensure group formation will not break any 737 // dependences. 738 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); 739 BI != E; ++BI) { 740 Instruction *B = BI->first; 741 StrideDescriptor DesB = BI->second; 742 743 // Initialize a group for B if it has an allowable stride. Even if we don't 744 // create a group for B, we continue with the bottom-up algorithm to ensure 745 // we don't break any of B's dependences. 746 InterleaveGroup *Group = nullptr; 747 if (isStrided(DesB.Stride) && 748 (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) { 749 Group = getInterleaveGroup(B); 750 if (!Group) { 751 LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B 752 << '\n'); 753 Group = createInterleaveGroup(B, DesB.Stride, DesB.Align); 754 } 755 if (B->mayWriteToMemory()) 756 StoreGroups.insert(Group); 757 else 758 LoadGroups.insert(Group); 759 } 760 761 for (auto AI = std::next(BI); AI != E; ++AI) { 762 Instruction *A = AI->first; 763 StrideDescriptor DesA = AI->second; 764 765 // Our code motion strategy implies that we can't have dependences 766 // between accesses in an interleaved group and other accesses located 767 // between the first and last member of the group. Note that this also 768 // means that a group can't have more than one member at a given offset. 769 // The accesses in a group can have dependences with other accesses, but 770 // we must ensure we don't extend the boundaries of the group such that 771 // we encompass those dependent accesses. 772 // 773 // For example, assume we have the sequence of accesses shown below in a 774 // stride-2 loop: 775 // 776 // (1, 2) is a group | A[i] = a; // (1) 777 // | A[i-1] = b; // (2) | 778 // A[i-3] = c; // (3) 779 // A[i] = d; // (4) | (2, 4) is not a group 780 // 781 // Because accesses (2) and (3) are dependent, we can group (2) with (1) 782 // but not with (4). If we did, the dependent access (3) would be within 783 // the boundaries of the (2, 4) group. 784 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) { 785 // If a dependence exists and A is already in a group, we know that A 786 // must be a store since A precedes B and WAR dependences are allowed. 787 // Thus, A would be sunk below B. We release A's group to prevent this 788 // illegal code motion. A will then be free to form another group with 789 // instructions that precede it. 790 if (isInterleaved(A)) { 791 InterleaveGroup *StoreGroup = getInterleaveGroup(A); 792 StoreGroups.remove(StoreGroup); 793 releaseGroup(StoreGroup); 794 } 795 796 // If a dependence exists and A is not already in a group (or it was 797 // and we just released it), B might be hoisted above A (if B is a 798 // load) or another store might be sunk below A (if B is a store). In 799 // either case, we can't add additional instructions to B's group. B 800 // will only form a group with instructions that it precedes. 801 break; 802 } 803 804 // At this point, we've checked for illegal code motion. If either A or B 805 // isn't strided, there's nothing left to do. 806 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) 807 continue; 808 809 // Ignore A if it's already in a group or isn't the same kind of memory 810 // operation as B. 811 // Note that mayReadFromMemory() isn't mutually exclusive to 812 // mayWriteToMemory in the case of atomic loads. We shouldn't see those 813 // here, canVectorizeMemory() should have returned false - except for the 814 // case we asked for optimization remarks. 815 if (isInterleaved(A) || 816 (A->mayReadFromMemory() != B->mayReadFromMemory()) || 817 (A->mayWriteToMemory() != B->mayWriteToMemory())) 818 continue; 819 820 // Check rules 1 and 2. Ignore A if its stride or size is different from 821 // that of B. 822 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) 823 continue; 824 825 // Ignore A if the memory object of A and B don't belong to the same 826 // address space 827 if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B)) 828 continue; 829 830 // Calculate the distance from A to B. 831 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( 832 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); 833 if (!DistToB) 834 continue; 835 int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); 836 837 // Check rule 3. Ignore A if its distance to B is not a multiple of the 838 // size. 839 if (DistanceToB % static_cast<int64_t>(DesB.Size)) 840 continue; 841 842 // All members of a predicated interleave-group must have the same predicate, 843 // and currently must reside in the same BB. 844 BasicBlock *BlockA = A->getParent(); 845 BasicBlock *BlockB = B->getParent(); 846 if ((isPredicated(BlockA) || isPredicated(BlockB)) && 847 (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB)) 848 continue; 849 850 // The index of A is the index of B plus A's distance to B in multiples 851 // of the size. 852 int IndexA = 853 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); 854 855 // Try to insert A into B's group. 856 if (Group->insertMember(A, IndexA, DesA.Align)) { 857 LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' 858 << " into the interleave group with" << *B 859 << '\n'); 860 InterleaveGroupMap[A] = Group; 861 862 // Set the first load in program order as the insert position. 863 if (A->mayReadFromMemory()) 864 Group->setInsertPos(A); 865 } 866 } // Iteration over A accesses. 867 } // Iteration over B accesses. 868 869 // Remove interleaved store groups with gaps. 870 for (InterleaveGroup *Group : StoreGroups) 871 if (Group->getNumMembers() != Group->getFactor()) { 872 LLVM_DEBUG( 873 dbgs() << "LV: Invalidate candidate interleaved store group due " 874 "to gaps.\n"); 875 releaseGroup(Group); 876 } 877 // Remove interleaved groups with gaps (currently only loads) whose memory 878 // accesses may wrap around. We have to revisit the getPtrStride analysis, 879 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does 880 // not check wrapping (see documentation there). 881 // FORNOW we use Assume=false; 882 // TODO: Change to Assume=true but making sure we don't exceed the threshold 883 // of runtime SCEV assumptions checks (thereby potentially failing to 884 // vectorize altogether). 885 // Additional optional optimizations: 886 // TODO: If we are peeling the loop and we know that the first pointer doesn't 887 // wrap then we can deduce that all pointers in the group don't wrap. 888 // This means that we can forcefully peel the loop in order to only have to 889 // check the first pointer for no-wrap. When we'll change to use Assume=true 890 // we'll only need at most one runtime check per interleaved group. 891 for (InterleaveGroup *Group : LoadGroups) { 892 // Case 1: A full group. Can Skip the checks; For full groups, if the wide 893 // load would wrap around the address space we would do a memory access at 894 // nullptr even without the transformation. 895 if (Group->getNumMembers() == Group->getFactor()) 896 continue; 897 898 // Case 2: If first and last members of the group don't wrap this implies 899 // that all the pointers in the group don't wrap. 900 // So we check only group member 0 (which is always guaranteed to exist), 901 // and group member Factor - 1; If the latter doesn't exist we rely on 902 // peeling (if it is a non-reveresed accsess -- see Case 3). 903 Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0)); 904 if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false, 905 /*ShouldCheckWrap=*/true)) { 906 LLVM_DEBUG( 907 dbgs() << "LV: Invalidate candidate interleaved group due to " 908 "first group member potentially pointer-wrapping.\n"); 909 releaseGroup(Group); 910 continue; 911 } 912 Instruction *LastMember = Group->getMember(Group->getFactor() - 1); 913 if (LastMember) { 914 Value *LastMemberPtr = getLoadStorePointerOperand(LastMember); 915 if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false, 916 /*ShouldCheckWrap=*/true)) { 917 LLVM_DEBUG( 918 dbgs() << "LV: Invalidate candidate interleaved group due to " 919 "last group member potentially pointer-wrapping.\n"); 920 releaseGroup(Group); 921 } 922 } else { 923 // Case 3: A non-reversed interleaved load group with gaps: We need 924 // to execute at least one scalar epilogue iteration. This will ensure 925 // we don't speculatively access memory out-of-bounds. We only need 926 // to look for a member at index factor - 1, since every group must have 927 // a member at index zero. 928 if (Group->isReverse()) { 929 LLVM_DEBUG( 930 dbgs() << "LV: Invalidate candidate interleaved group due to " 931 "a reverse access with gaps.\n"); 932 releaseGroup(Group); 933 continue; 934 } 935 LLVM_DEBUG( 936 dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); 937 RequiresScalarEpilogue = true; 938 } 939 } 940 } 941 942 void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() { 943 // If no group had triggered the requirement to create an epilogue loop, 944 // there is nothing to do. 945 if (!requiresScalarEpilogue()) 946 return; 947 948 // Avoid releasing a Group twice. 949 SmallPtrSet<InterleaveGroup *, 4> DelSet; 950 for (auto &I : InterleaveGroupMap) { 951 InterleaveGroup *Group = I.second; 952 if (Group->requiresScalarEpilogue()) 953 DelSet.insert(Group); 954 } 955 for (auto *Ptr : DelSet) { 956 LLVM_DEBUG( 957 dbgs() 958 << "LV: Invalidate candidate interleaved group due to gaps that " 959 "require a scalar epilogue (not allowed under optsize) and cannot " 960 "be masked (not enabled). \n"); 961 releaseGroup(Ptr); 962 } 963 964 RequiresScalarEpilogue = false; 965 } 966