1 //===- LoadStoreVectorizer.cpp - GPU Load & Store Vectorizer --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass merges loads/stores to/from sequential memory addresses into vector 10 // loads/stores. Although there's nothing GPU-specific in here, this pass is 11 // motivated by the microarchitectural quirks of nVidia and AMD GPUs. 12 // 13 // (For simplicity below we talk about loads only, but everything also applies 14 // to stores.) 15 // 16 // This pass is intended to be run late in the pipeline, after other 17 // vectorization opportunities have been exploited. So the assumption here is 18 // that immediately following our new vector load we'll need to extract out the 19 // individual elements of the load, so we can operate on them individually. 20 // 21 // On CPUs this transformation is usually not beneficial, because extracting the 22 // elements of a vector register is expensive on most architectures. It's 23 // usually better just to load each element individually into its own scalar 24 // register. 25 // 26 // However, nVidia and AMD GPUs don't have proper vector registers. Instead, a 27 // "vector load" loads directly into a series of scalar registers. In effect, 28 // extracting the elements of the vector is free. It's therefore always 29 // beneficial to vectorize a sequence of loads on these architectures. 30 // 31 // Vectorizing (perhaps a better name might be "coalescing") loads can have 32 // large performance impacts on GPU kernels, and opportunities for vectorizing 33 // are common in GPU code. This pass tries very hard to find such 34 // opportunities; its runtime is quadratic in the number of loads in a BB. 35 // 36 // Some CPU architectures, such as ARM, have instructions that load into 37 // multiple scalar registers, similar to a GPU vectorized load. In theory ARM 38 // could use this pass (with some modifications), but currently it implements 39 // its own pass to do something similar to what we do here. 40 41 #include "llvm/Transforms/Vectorize/LoadStoreVectorizer.h" 42 #include "llvm/ADT/APInt.h" 43 #include "llvm/ADT/ArrayRef.h" 44 #include "llvm/ADT/MapVector.h" 45 #include "llvm/ADT/PostOrderIterator.h" 46 #include "llvm/ADT/STLExtras.h" 47 #include "llvm/ADT/SmallPtrSet.h" 48 #include "llvm/ADT/SmallVector.h" 49 #include "llvm/ADT/Statistic.h" 50 #include "llvm/ADT/iterator_range.h" 51 #include "llvm/Analysis/AliasAnalysis.h" 52 #include "llvm/Analysis/MemoryLocation.h" 53 #include "llvm/Analysis/ScalarEvolution.h" 54 #include "llvm/Analysis/TargetTransformInfo.h" 55 #include "llvm/Analysis/ValueTracking.h" 56 #include "llvm/Analysis/VectorUtils.h" 57 #include "llvm/IR/Attributes.h" 58 #include "llvm/IR/BasicBlock.h" 59 #include "llvm/IR/Constants.h" 60 #include "llvm/IR/DataLayout.h" 61 #include "llvm/IR/DerivedTypes.h" 62 #include "llvm/IR/Dominators.h" 63 #include "llvm/IR/Function.h" 64 #include "llvm/IR/IRBuilder.h" 65 #include "llvm/IR/InstrTypes.h" 66 #include "llvm/IR/Instruction.h" 67 #include "llvm/IR/Instructions.h" 68 #include "llvm/IR/IntrinsicInst.h" 69 #include "llvm/IR/Module.h" 70 #include "llvm/IR/Type.h" 71 #include "llvm/IR/User.h" 72 #include "llvm/IR/Value.h" 73 #include "llvm/InitializePasses.h" 74 #include "llvm/Pass.h" 75 #include "llvm/Support/Casting.h" 76 #include "llvm/Support/Debug.h" 77 #include "llvm/Support/KnownBits.h" 78 #include "llvm/Support/MathExtras.h" 79 #include "llvm/Support/raw_ostream.h" 80 #include "llvm/Transforms/Utils/Local.h" 81 #include "llvm/Transforms/Vectorize.h" 82 #include <algorithm> 83 #include <cassert> 84 #include <cstdlib> 85 #include <tuple> 86 #include <utility> 87 88 using namespace llvm; 89 90 #define DEBUG_TYPE "load-store-vectorizer" 91 92 STATISTIC(NumVectorInstructions, "Number of vector accesses generated"); 93 STATISTIC(NumScalarsVectorized, "Number of scalar accesses vectorized"); 94 95 // FIXME: Assuming stack alignment of 4 is always good enough 96 static const unsigned StackAdjustedAlignment = 4; 97 98 namespace { 99 100 /// ChainID is an arbitrary token that is allowed to be different only for the 101 /// accesses that are guaranteed to be considered non-consecutive by 102 /// Vectorizer::isConsecutiveAccess. It's used for grouping instructions 103 /// together and reducing the number of instructions the main search operates on 104 /// at a time, i.e. this is to reduce compile time and nothing else as the main 105 /// search has O(n^2) time complexity. The underlying type of ChainID should not 106 /// be relied upon. 107 using ChainID = const Value *; 108 using InstrList = SmallVector<Instruction *, 8>; 109 using InstrListMap = MapVector<ChainID, InstrList>; 110 111 class Vectorizer { 112 Function &F; 113 AliasAnalysis &AA; 114 DominatorTree &DT; 115 ScalarEvolution &SE; 116 TargetTransformInfo &TTI; 117 const DataLayout &DL; 118 IRBuilder<> Builder; 119 120 public: 121 Vectorizer(Function &F, AliasAnalysis &AA, DominatorTree &DT, 122 ScalarEvolution &SE, TargetTransformInfo &TTI) 123 : F(F), AA(AA), DT(DT), SE(SE), TTI(TTI), 124 DL(F.getParent()->getDataLayout()), Builder(SE.getContext()) {} 125 126 bool run(); 127 128 private: 129 unsigned getPointerAddressSpace(Value *I); 130 131 /// TODO: Remove this function once transition to Align is over. 132 unsigned getAlignment(LoadInst *LI) const { return getAlign(LI).value(); } 133 134 Align getAlign(LoadInst *LI) const { 135 return DL.getValueOrABITypeAlignment(LI->getAlign(), LI->getType()); 136 } 137 138 /// TODO: Remove this function once transition to Align is over. 139 unsigned getAlignment(StoreInst *SI) const { return getAlign(SI).value(); } 140 141 Align getAlign(StoreInst *SI) const { 142 return DL.getValueOrABITypeAlignment(SI->getAlign(), 143 SI->getValueOperand()->getType()); 144 } 145 146 static const unsigned MaxDepth = 3; 147 148 bool isConsecutiveAccess(Value *A, Value *B); 149 bool areConsecutivePointers(Value *PtrA, Value *PtrB, APInt PtrDelta, 150 unsigned Depth = 0) const; 151 bool lookThroughComplexAddresses(Value *PtrA, Value *PtrB, APInt PtrDelta, 152 unsigned Depth) const; 153 bool lookThroughSelects(Value *PtrA, Value *PtrB, const APInt &PtrDelta, 154 unsigned Depth) const; 155 156 /// After vectorization, reorder the instructions that I depends on 157 /// (the instructions defining its operands), to ensure they dominate I. 158 void reorder(Instruction *I); 159 160 /// Returns the first and the last instructions in Chain. 161 std::pair<BasicBlock::iterator, BasicBlock::iterator> 162 getBoundaryInstrs(ArrayRef<Instruction *> Chain); 163 164 /// Erases the original instructions after vectorizing. 165 void eraseInstructions(ArrayRef<Instruction *> Chain); 166 167 /// "Legalize" the vector type that would be produced by combining \p 168 /// ElementSizeBits elements in \p Chain. Break into two pieces such that the 169 /// total size of each piece is 1, 2 or a multiple of 4 bytes. \p Chain is 170 /// expected to have more than 4 elements. 171 std::pair<ArrayRef<Instruction *>, ArrayRef<Instruction *>> 172 splitOddVectorElts(ArrayRef<Instruction *> Chain, unsigned ElementSizeBits); 173 174 /// Finds the largest prefix of Chain that's vectorizable, checking for 175 /// intervening instructions which may affect the memory accessed by the 176 /// instructions within Chain. 177 /// 178 /// The elements of \p Chain must be all loads or all stores and must be in 179 /// address order. 180 ArrayRef<Instruction *> getVectorizablePrefix(ArrayRef<Instruction *> Chain); 181 182 /// Collects load and store instructions to vectorize. 183 std::pair<InstrListMap, InstrListMap> collectInstructions(BasicBlock *BB); 184 185 /// Processes the collected instructions, the \p Map. The values of \p Map 186 /// should be all loads or all stores. 187 bool vectorizeChains(InstrListMap &Map); 188 189 /// Finds the load/stores to consecutive memory addresses and vectorizes them. 190 bool vectorizeInstructions(ArrayRef<Instruction *> Instrs); 191 192 /// Vectorizes the load instructions in Chain. 193 bool 194 vectorizeLoadChain(ArrayRef<Instruction *> Chain, 195 SmallPtrSet<Instruction *, 16> *InstructionsProcessed); 196 197 /// Vectorizes the store instructions in Chain. 198 bool 199 vectorizeStoreChain(ArrayRef<Instruction *> Chain, 200 SmallPtrSet<Instruction *, 16> *InstructionsProcessed); 201 202 /// Check if this load/store access is misaligned accesses. 203 bool accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace, 204 unsigned Alignment); 205 }; 206 207 class LoadStoreVectorizerLegacyPass : public FunctionPass { 208 public: 209 static char ID; 210 211 LoadStoreVectorizerLegacyPass() : FunctionPass(ID) { 212 initializeLoadStoreVectorizerLegacyPassPass(*PassRegistry::getPassRegistry()); 213 } 214 215 bool runOnFunction(Function &F) override; 216 217 StringRef getPassName() const override { 218 return "GPU Load and Store Vectorizer"; 219 } 220 221 void getAnalysisUsage(AnalysisUsage &AU) const override { 222 AU.addRequired<AAResultsWrapperPass>(); 223 AU.addRequired<ScalarEvolutionWrapperPass>(); 224 AU.addRequired<DominatorTreeWrapperPass>(); 225 AU.addRequired<TargetTransformInfoWrapperPass>(); 226 AU.setPreservesCFG(); 227 } 228 }; 229 230 } // end anonymous namespace 231 232 char LoadStoreVectorizerLegacyPass::ID = 0; 233 234 INITIALIZE_PASS_BEGIN(LoadStoreVectorizerLegacyPass, DEBUG_TYPE, 235 "Vectorize load and Store instructions", false, false) 236 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass) 237 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 238 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 239 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 240 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 241 INITIALIZE_PASS_END(LoadStoreVectorizerLegacyPass, DEBUG_TYPE, 242 "Vectorize load and store instructions", false, false) 243 244 Pass *llvm::createLoadStoreVectorizerPass() { 245 return new LoadStoreVectorizerLegacyPass(); 246 } 247 248 bool LoadStoreVectorizerLegacyPass::runOnFunction(Function &F) { 249 // Don't vectorize when the attribute NoImplicitFloat is used. 250 if (skipFunction(F) || F.hasFnAttribute(Attribute::NoImplicitFloat)) 251 return false; 252 253 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 254 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 255 ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 256 TargetTransformInfo &TTI = 257 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 258 259 Vectorizer V(F, AA, DT, SE, TTI); 260 return V.run(); 261 } 262 263 PreservedAnalyses LoadStoreVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 264 // Don't vectorize when the attribute NoImplicitFloat is used. 265 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 266 return PreservedAnalyses::all(); 267 268 AliasAnalysis &AA = AM.getResult<AAManager>(F); 269 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F); 270 ScalarEvolution &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 271 TargetTransformInfo &TTI = AM.getResult<TargetIRAnalysis>(F); 272 273 Vectorizer V(F, AA, DT, SE, TTI); 274 bool Changed = V.run(); 275 PreservedAnalyses PA; 276 PA.preserveSet<CFGAnalyses>(); 277 return Changed ? PA : PreservedAnalyses::all(); 278 } 279 280 // The real propagateMetadata expects a SmallVector<Value*>, but we deal in 281 // vectors of Instructions. 282 static void propagateMetadata(Instruction *I, ArrayRef<Instruction *> IL) { 283 SmallVector<Value *, 8> VL(IL.begin(), IL.end()); 284 propagateMetadata(I, VL); 285 } 286 287 // Vectorizer Implementation 288 bool Vectorizer::run() { 289 bool Changed = false; 290 291 // Scan the blocks in the function in post order. 292 for (BasicBlock *BB : post_order(&F)) { 293 InstrListMap LoadRefs, StoreRefs; 294 std::tie(LoadRefs, StoreRefs) = collectInstructions(BB); 295 Changed |= vectorizeChains(LoadRefs); 296 Changed |= vectorizeChains(StoreRefs); 297 } 298 299 return Changed; 300 } 301 302 unsigned Vectorizer::getPointerAddressSpace(Value *I) { 303 if (LoadInst *L = dyn_cast<LoadInst>(I)) 304 return L->getPointerAddressSpace(); 305 if (StoreInst *S = dyn_cast<StoreInst>(I)) 306 return S->getPointerAddressSpace(); 307 return -1; 308 } 309 310 // FIXME: Merge with llvm::isConsecutiveAccess 311 bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) { 312 Value *PtrA = getLoadStorePointerOperand(A); 313 Value *PtrB = getLoadStorePointerOperand(B); 314 unsigned ASA = getPointerAddressSpace(A); 315 unsigned ASB = getPointerAddressSpace(B); 316 317 // Check that the address spaces match and that the pointers are valid. 318 if (!PtrA || !PtrB || (ASA != ASB)) 319 return false; 320 321 // Make sure that A and B are different pointers of the same size type. 322 Type *PtrATy = PtrA->getType()->getPointerElementType(); 323 Type *PtrBTy = PtrB->getType()->getPointerElementType(); 324 if (PtrA == PtrB || 325 PtrATy->isVectorTy() != PtrBTy->isVectorTy() || 326 DL.getTypeStoreSize(PtrATy) != DL.getTypeStoreSize(PtrBTy) || 327 DL.getTypeStoreSize(PtrATy->getScalarType()) != 328 DL.getTypeStoreSize(PtrBTy->getScalarType())) 329 return false; 330 331 unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA); 332 APInt Size(PtrBitWidth, DL.getTypeStoreSize(PtrATy)); 333 334 return areConsecutivePointers(PtrA, PtrB, Size); 335 } 336 337 bool Vectorizer::areConsecutivePointers(Value *PtrA, Value *PtrB, 338 APInt PtrDelta, unsigned Depth) const { 339 unsigned PtrBitWidth = DL.getPointerTypeSizeInBits(PtrA->getType()); 340 APInt OffsetA(PtrBitWidth, 0); 341 APInt OffsetB(PtrBitWidth, 0); 342 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); 343 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); 344 345 unsigned NewPtrBitWidth = DL.getTypeStoreSizeInBits(PtrA->getType()); 346 347 if (NewPtrBitWidth != DL.getTypeStoreSizeInBits(PtrB->getType())) 348 return false; 349 350 // In case if we have to shrink the pointer 351 // stripAndAccumulateInBoundsConstantOffsets should properly handle a 352 // possible overflow and the value should fit into a smallest data type 353 // used in the cast/gep chain. 354 assert(OffsetA.getMinSignedBits() <= NewPtrBitWidth && 355 OffsetB.getMinSignedBits() <= NewPtrBitWidth); 356 357 OffsetA = OffsetA.sextOrTrunc(NewPtrBitWidth); 358 OffsetB = OffsetB.sextOrTrunc(NewPtrBitWidth); 359 PtrDelta = PtrDelta.sextOrTrunc(NewPtrBitWidth); 360 361 APInt OffsetDelta = OffsetB - OffsetA; 362 363 // Check if they are based on the same pointer. That makes the offsets 364 // sufficient. 365 if (PtrA == PtrB) 366 return OffsetDelta == PtrDelta; 367 368 // Compute the necessary base pointer delta to have the necessary final delta 369 // equal to the pointer delta requested. 370 APInt BaseDelta = PtrDelta - OffsetDelta; 371 372 // Compute the distance with SCEV between the base pointers. 373 const SCEV *PtrSCEVA = SE.getSCEV(PtrA); 374 const SCEV *PtrSCEVB = SE.getSCEV(PtrB); 375 const SCEV *C = SE.getConstant(BaseDelta); 376 const SCEV *X = SE.getAddExpr(PtrSCEVA, C); 377 if (X == PtrSCEVB) 378 return true; 379 380 // The above check will not catch the cases where one of the pointers is 381 // factorized but the other one is not, such as (C + (S * (A + B))) vs 382 // (AS + BS). Get the minus scev. That will allow re-combining the expresions 383 // and getting the simplified difference. 384 const SCEV *Dist = SE.getMinusSCEV(PtrSCEVB, PtrSCEVA); 385 if (C == Dist) 386 return true; 387 388 // Sometimes even this doesn't work, because SCEV can't always see through 389 // patterns that look like (gep (ext (add (shl X, C1), C2))). Try checking 390 // things the hard way. 391 return lookThroughComplexAddresses(PtrA, PtrB, BaseDelta, Depth); 392 } 393 394 bool Vectorizer::lookThroughComplexAddresses(Value *PtrA, Value *PtrB, 395 APInt PtrDelta, 396 unsigned Depth) const { 397 auto *GEPA = dyn_cast<GetElementPtrInst>(PtrA); 398 auto *GEPB = dyn_cast<GetElementPtrInst>(PtrB); 399 if (!GEPA || !GEPB) 400 return lookThroughSelects(PtrA, PtrB, PtrDelta, Depth); 401 402 // Look through GEPs after checking they're the same except for the last 403 // index. 404 if (GEPA->getNumOperands() != GEPB->getNumOperands() || 405 GEPA->getPointerOperand() != GEPB->getPointerOperand()) 406 return false; 407 gep_type_iterator GTIA = gep_type_begin(GEPA); 408 gep_type_iterator GTIB = gep_type_begin(GEPB); 409 for (unsigned I = 0, E = GEPA->getNumIndices() - 1; I < E; ++I) { 410 if (GTIA.getOperand() != GTIB.getOperand()) 411 return false; 412 ++GTIA; 413 ++GTIB; 414 } 415 416 Instruction *OpA = dyn_cast<Instruction>(GTIA.getOperand()); 417 Instruction *OpB = dyn_cast<Instruction>(GTIB.getOperand()); 418 if (!OpA || !OpB || OpA->getOpcode() != OpB->getOpcode() || 419 OpA->getType() != OpB->getType()) 420 return false; 421 422 if (PtrDelta.isNegative()) { 423 if (PtrDelta.isMinSignedValue()) 424 return false; 425 PtrDelta.negate(); 426 std::swap(OpA, OpB); 427 } 428 uint64_t Stride = DL.getTypeAllocSize(GTIA.getIndexedType()); 429 if (PtrDelta.urem(Stride) != 0) 430 return false; 431 unsigned IdxBitWidth = OpA->getType()->getScalarSizeInBits(); 432 APInt IdxDiff = PtrDelta.udiv(Stride).zextOrSelf(IdxBitWidth); 433 434 // Only look through a ZExt/SExt. 435 if (!isa<SExtInst>(OpA) && !isa<ZExtInst>(OpA)) 436 return false; 437 438 bool Signed = isa<SExtInst>(OpA); 439 440 // At this point A could be a function parameter, i.e. not an instruction 441 Value *ValA = OpA->getOperand(0); 442 OpB = dyn_cast<Instruction>(OpB->getOperand(0)); 443 if (!OpB || ValA->getType() != OpB->getType()) 444 return false; 445 446 // Now we need to prove that adding IdxDiff to ValA won't overflow. 447 bool Safe = false; 448 // First attempt: if OpB is an add with NSW/NUW, and OpB is IdxDiff added to 449 // ValA, we're okay. 450 if (OpB->getOpcode() == Instruction::Add && 451 isa<ConstantInt>(OpB->getOperand(1)) && 452 IdxDiff.sle(cast<ConstantInt>(OpB->getOperand(1))->getSExtValue())) { 453 if (Signed) 454 Safe = cast<BinaryOperator>(OpB)->hasNoSignedWrap(); 455 else 456 Safe = cast<BinaryOperator>(OpB)->hasNoUnsignedWrap(); 457 } 458 459 unsigned BitWidth = ValA->getType()->getScalarSizeInBits(); 460 461 // Second attempt: 462 // If all set bits of IdxDiff or any higher order bit other than the sign bit 463 // are known to be zero in ValA, we can add Diff to it while guaranteeing no 464 // overflow of any sort. 465 if (!Safe) { 466 OpA = dyn_cast<Instruction>(ValA); 467 if (!OpA) 468 return false; 469 KnownBits Known(BitWidth); 470 computeKnownBits(OpA, Known, DL, 0, nullptr, OpA, &DT); 471 APInt BitsAllowedToBeSet = Known.Zero.zext(IdxDiff.getBitWidth()); 472 if (Signed) 473 BitsAllowedToBeSet.clearBit(BitWidth - 1); 474 if (BitsAllowedToBeSet.ult(IdxDiff)) 475 return false; 476 } 477 478 const SCEV *OffsetSCEVA = SE.getSCEV(ValA); 479 const SCEV *OffsetSCEVB = SE.getSCEV(OpB); 480 const SCEV *C = SE.getConstant(IdxDiff.trunc(BitWidth)); 481 const SCEV *X = SE.getAddExpr(OffsetSCEVA, C); 482 return X == OffsetSCEVB; 483 } 484 485 bool Vectorizer::lookThroughSelects(Value *PtrA, Value *PtrB, 486 const APInt &PtrDelta, 487 unsigned Depth) const { 488 if (Depth++ == MaxDepth) 489 return false; 490 491 if (auto *SelectA = dyn_cast<SelectInst>(PtrA)) { 492 if (auto *SelectB = dyn_cast<SelectInst>(PtrB)) { 493 return SelectA->getCondition() == SelectB->getCondition() && 494 areConsecutivePointers(SelectA->getTrueValue(), 495 SelectB->getTrueValue(), PtrDelta, Depth) && 496 areConsecutivePointers(SelectA->getFalseValue(), 497 SelectB->getFalseValue(), PtrDelta, Depth); 498 } 499 } 500 return false; 501 } 502 503 void Vectorizer::reorder(Instruction *I) { 504 SmallPtrSet<Instruction *, 16> InstructionsToMove; 505 SmallVector<Instruction *, 16> Worklist; 506 507 Worklist.push_back(I); 508 while (!Worklist.empty()) { 509 Instruction *IW = Worklist.pop_back_val(); 510 int NumOperands = IW->getNumOperands(); 511 for (int i = 0; i < NumOperands; i++) { 512 Instruction *IM = dyn_cast<Instruction>(IW->getOperand(i)); 513 if (!IM || IM->getOpcode() == Instruction::PHI) 514 continue; 515 516 // If IM is in another BB, no need to move it, because this pass only 517 // vectorizes instructions within one BB. 518 if (IM->getParent() != I->getParent()) 519 continue; 520 521 if (!IM->comesBefore(I)) { 522 InstructionsToMove.insert(IM); 523 Worklist.push_back(IM); 524 } 525 } 526 } 527 528 // All instructions to move should follow I. Start from I, not from begin(). 529 for (auto BBI = I->getIterator(), E = I->getParent()->end(); BBI != E; 530 ++BBI) { 531 if (!InstructionsToMove.count(&*BBI)) 532 continue; 533 Instruction *IM = &*BBI; 534 --BBI; 535 IM->removeFromParent(); 536 IM->insertBefore(I); 537 } 538 } 539 540 std::pair<BasicBlock::iterator, BasicBlock::iterator> 541 Vectorizer::getBoundaryInstrs(ArrayRef<Instruction *> Chain) { 542 Instruction *C0 = Chain[0]; 543 BasicBlock::iterator FirstInstr = C0->getIterator(); 544 BasicBlock::iterator LastInstr = C0->getIterator(); 545 546 BasicBlock *BB = C0->getParent(); 547 unsigned NumFound = 0; 548 for (Instruction &I : *BB) { 549 if (!is_contained(Chain, &I)) 550 continue; 551 552 ++NumFound; 553 if (NumFound == 1) { 554 FirstInstr = I.getIterator(); 555 } 556 if (NumFound == Chain.size()) { 557 LastInstr = I.getIterator(); 558 break; 559 } 560 } 561 562 // Range is [first, last). 563 return std::make_pair(FirstInstr, ++LastInstr); 564 } 565 566 void Vectorizer::eraseInstructions(ArrayRef<Instruction *> Chain) { 567 SmallVector<Instruction *, 16> Instrs; 568 for (Instruction *I : Chain) { 569 Value *PtrOperand = getLoadStorePointerOperand(I); 570 assert(PtrOperand && "Instruction must have a pointer operand."); 571 Instrs.push_back(I); 572 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(PtrOperand)) 573 Instrs.push_back(GEP); 574 } 575 576 // Erase instructions. 577 for (Instruction *I : Instrs) 578 if (I->use_empty()) 579 I->eraseFromParent(); 580 } 581 582 std::pair<ArrayRef<Instruction *>, ArrayRef<Instruction *>> 583 Vectorizer::splitOddVectorElts(ArrayRef<Instruction *> Chain, 584 unsigned ElementSizeBits) { 585 unsigned ElementSizeBytes = ElementSizeBits / 8; 586 unsigned SizeBytes = ElementSizeBytes * Chain.size(); 587 unsigned NumLeft = (SizeBytes - (SizeBytes % 4)) / ElementSizeBytes; 588 if (NumLeft == Chain.size()) { 589 if ((NumLeft & 1) == 0) 590 NumLeft /= 2; // Split even in half 591 else 592 --NumLeft; // Split off last element 593 } else if (NumLeft == 0) 594 NumLeft = 1; 595 return std::make_pair(Chain.slice(0, NumLeft), Chain.slice(NumLeft)); 596 } 597 598 ArrayRef<Instruction *> 599 Vectorizer::getVectorizablePrefix(ArrayRef<Instruction *> Chain) { 600 // These are in BB order, unlike Chain, which is in address order. 601 SmallVector<Instruction *, 16> MemoryInstrs; 602 SmallVector<Instruction *, 16> ChainInstrs; 603 604 bool IsLoadChain = isa<LoadInst>(Chain[0]); 605 LLVM_DEBUG({ 606 for (Instruction *I : Chain) { 607 if (IsLoadChain) 608 assert(isa<LoadInst>(I) && 609 "All elements of Chain must be loads, or all must be stores."); 610 else 611 assert(isa<StoreInst>(I) && 612 "All elements of Chain must be loads, or all must be stores."); 613 } 614 }); 615 616 for (Instruction &I : make_range(getBoundaryInstrs(Chain))) { 617 if (isa<LoadInst>(I) || isa<StoreInst>(I)) { 618 if (!is_contained(Chain, &I)) 619 MemoryInstrs.push_back(&I); 620 else 621 ChainInstrs.push_back(&I); 622 } else if (isa<IntrinsicInst>(&I) && 623 cast<IntrinsicInst>(&I)->getIntrinsicID() == 624 Intrinsic::sideeffect) { 625 // Ignore llvm.sideeffect calls. 626 } else if (IsLoadChain && (I.mayWriteToMemory() || I.mayThrow())) { 627 LLVM_DEBUG(dbgs() << "LSV: Found may-write/throw operation: " << I 628 << '\n'); 629 break; 630 } else if (!IsLoadChain && (I.mayReadOrWriteMemory() || I.mayThrow())) { 631 LLVM_DEBUG(dbgs() << "LSV: Found may-read/write/throw operation: " << I 632 << '\n'); 633 break; 634 } 635 } 636 637 // Loop until we find an instruction in ChainInstrs that we can't vectorize. 638 unsigned ChainInstrIdx = 0; 639 Instruction *BarrierMemoryInstr = nullptr; 640 641 for (unsigned E = ChainInstrs.size(); ChainInstrIdx < E; ++ChainInstrIdx) { 642 Instruction *ChainInstr = ChainInstrs[ChainInstrIdx]; 643 644 // If a barrier memory instruction was found, chain instructions that follow 645 // will not be added to the valid prefix. 646 if (BarrierMemoryInstr && BarrierMemoryInstr->comesBefore(ChainInstr)) 647 break; 648 649 // Check (in BB order) if any instruction prevents ChainInstr from being 650 // vectorized. Find and store the first such "conflicting" instruction. 651 for (Instruction *MemInstr : MemoryInstrs) { 652 // If a barrier memory instruction was found, do not check past it. 653 if (BarrierMemoryInstr && BarrierMemoryInstr->comesBefore(MemInstr)) 654 break; 655 656 auto *MemLoad = dyn_cast<LoadInst>(MemInstr); 657 auto *ChainLoad = dyn_cast<LoadInst>(ChainInstr); 658 if (MemLoad && ChainLoad) 659 continue; 660 661 // We can ignore the alias if the we have a load store pair and the load 662 // is known to be invariant. The load cannot be clobbered by the store. 663 auto IsInvariantLoad = [](const LoadInst *LI) -> bool { 664 return LI->hasMetadata(LLVMContext::MD_invariant_load); 665 }; 666 667 // We can ignore the alias as long as the load comes before the store, 668 // because that means we won't be moving the load past the store to 669 // vectorize it (the vectorized load is inserted at the location of the 670 // first load in the chain). 671 if (isa<StoreInst>(MemInstr) && ChainLoad && 672 (IsInvariantLoad(ChainLoad) || ChainLoad->comesBefore(MemInstr))) 673 continue; 674 675 // Same case, but in reverse. 676 if (MemLoad && isa<StoreInst>(ChainInstr) && 677 (IsInvariantLoad(MemLoad) || MemLoad->comesBefore(ChainInstr))) 678 continue; 679 680 if (!AA.isNoAlias(MemoryLocation::get(MemInstr), 681 MemoryLocation::get(ChainInstr))) { 682 LLVM_DEBUG({ 683 dbgs() << "LSV: Found alias:\n" 684 " Aliasing instruction and pointer:\n" 685 << " " << *MemInstr << '\n' 686 << " " << *getLoadStorePointerOperand(MemInstr) << '\n' 687 << " Aliased instruction and pointer:\n" 688 << " " << *ChainInstr << '\n' 689 << " " << *getLoadStorePointerOperand(ChainInstr) << '\n'; 690 }); 691 // Save this aliasing memory instruction as a barrier, but allow other 692 // instructions that precede the barrier to be vectorized with this one. 693 BarrierMemoryInstr = MemInstr; 694 break; 695 } 696 } 697 // Continue the search only for store chains, since vectorizing stores that 698 // precede an aliasing load is valid. Conversely, vectorizing loads is valid 699 // up to an aliasing store, but should not pull loads from further down in 700 // the basic block. 701 if (IsLoadChain && BarrierMemoryInstr) { 702 // The BarrierMemoryInstr is a store that precedes ChainInstr. 703 assert(BarrierMemoryInstr->comesBefore(ChainInstr)); 704 break; 705 } 706 } 707 708 // Find the largest prefix of Chain whose elements are all in 709 // ChainInstrs[0, ChainInstrIdx). This is the largest vectorizable prefix of 710 // Chain. (Recall that Chain is in address order, but ChainInstrs is in BB 711 // order.) 712 SmallPtrSet<Instruction *, 8> VectorizableChainInstrs( 713 ChainInstrs.begin(), ChainInstrs.begin() + ChainInstrIdx); 714 unsigned ChainIdx = 0; 715 for (unsigned ChainLen = Chain.size(); ChainIdx < ChainLen; ++ChainIdx) { 716 if (!VectorizableChainInstrs.count(Chain[ChainIdx])) 717 break; 718 } 719 return Chain.slice(0, ChainIdx); 720 } 721 722 static ChainID getChainID(const Value *Ptr, const DataLayout &DL) { 723 const Value *ObjPtr = GetUnderlyingObject(Ptr, DL); 724 if (const auto *Sel = dyn_cast<SelectInst>(ObjPtr)) { 725 // The select's themselves are distinct instructions even if they share the 726 // same condition and evaluate to consecutive pointers for true and false 727 // values of the condition. Therefore using the select's themselves for 728 // grouping instructions would put consecutive accesses into different lists 729 // and they won't be even checked for being consecutive, and won't be 730 // vectorized. 731 return Sel->getCondition(); 732 } 733 return ObjPtr; 734 } 735 736 std::pair<InstrListMap, InstrListMap> 737 Vectorizer::collectInstructions(BasicBlock *BB) { 738 InstrListMap LoadRefs; 739 InstrListMap StoreRefs; 740 741 for (Instruction &I : *BB) { 742 if (!I.mayReadOrWriteMemory()) 743 continue; 744 745 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) { 746 if (!LI->isSimple()) 747 continue; 748 749 // Skip if it's not legal. 750 if (!TTI.isLegalToVectorizeLoad(LI)) 751 continue; 752 753 Type *Ty = LI->getType(); 754 if (!VectorType::isValidElementType(Ty->getScalarType())) 755 continue; 756 757 // Skip weird non-byte sizes. They probably aren't worth the effort of 758 // handling correctly. 759 unsigned TySize = DL.getTypeSizeInBits(Ty); 760 if ((TySize % 8) != 0) 761 continue; 762 763 // Skip vectors of pointers. The vectorizeLoadChain/vectorizeStoreChain 764 // functions are currently using an integer type for the vectorized 765 // load/store, and does not support casting between the integer type and a 766 // vector of pointers (e.g. i64 to <2 x i16*>) 767 if (Ty->isVectorTy() && Ty->isPtrOrPtrVectorTy()) 768 continue; 769 770 Value *Ptr = LI->getPointerOperand(); 771 unsigned AS = Ptr->getType()->getPointerAddressSpace(); 772 unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); 773 774 unsigned VF = VecRegSize / TySize; 775 VectorType *VecTy = dyn_cast<VectorType>(Ty); 776 777 // No point in looking at these if they're too big to vectorize. 778 if (TySize > VecRegSize / 2 || 779 (VecTy && TTI.getLoadVectorFactor(VF, TySize, TySize / 8, VecTy) == 0)) 780 continue; 781 782 // Make sure all the users of a vector are constant-index extracts. 783 if (isa<VectorType>(Ty) && !llvm::all_of(LI->users(), [](const User *U) { 784 const ExtractElementInst *EEI = dyn_cast<ExtractElementInst>(U); 785 return EEI && isa<ConstantInt>(EEI->getOperand(1)); 786 })) 787 continue; 788 789 // Save the load locations. 790 const ChainID ID = getChainID(Ptr, DL); 791 LoadRefs[ID].push_back(LI); 792 } else if (StoreInst *SI = dyn_cast<StoreInst>(&I)) { 793 if (!SI->isSimple()) 794 continue; 795 796 // Skip if it's not legal. 797 if (!TTI.isLegalToVectorizeStore(SI)) 798 continue; 799 800 Type *Ty = SI->getValueOperand()->getType(); 801 if (!VectorType::isValidElementType(Ty->getScalarType())) 802 continue; 803 804 // Skip vectors of pointers. The vectorizeLoadChain/vectorizeStoreChain 805 // functions are currently using an integer type for the vectorized 806 // load/store, and does not support casting between the integer type and a 807 // vector of pointers (e.g. i64 to <2 x i16*>) 808 if (Ty->isVectorTy() && Ty->isPtrOrPtrVectorTy()) 809 continue; 810 811 // Skip weird non-byte sizes. They probably aren't worth the effort of 812 // handling correctly. 813 unsigned TySize = DL.getTypeSizeInBits(Ty); 814 if ((TySize % 8) != 0) 815 continue; 816 817 Value *Ptr = SI->getPointerOperand(); 818 unsigned AS = Ptr->getType()->getPointerAddressSpace(); 819 unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); 820 821 unsigned VF = VecRegSize / TySize; 822 VectorType *VecTy = dyn_cast<VectorType>(Ty); 823 824 // No point in looking at these if they're too big to vectorize. 825 if (TySize > VecRegSize / 2 || 826 (VecTy && TTI.getStoreVectorFactor(VF, TySize, TySize / 8, VecTy) == 0)) 827 continue; 828 829 if (isa<VectorType>(Ty) && !llvm::all_of(SI->users(), [](const User *U) { 830 const ExtractElementInst *EEI = dyn_cast<ExtractElementInst>(U); 831 return EEI && isa<ConstantInt>(EEI->getOperand(1)); 832 })) 833 continue; 834 835 // Save store location. 836 const ChainID ID = getChainID(Ptr, DL); 837 StoreRefs[ID].push_back(SI); 838 } 839 } 840 841 return {LoadRefs, StoreRefs}; 842 } 843 844 bool Vectorizer::vectorizeChains(InstrListMap &Map) { 845 bool Changed = false; 846 847 for (const std::pair<ChainID, InstrList> &Chain : Map) { 848 unsigned Size = Chain.second.size(); 849 if (Size < 2) 850 continue; 851 852 LLVM_DEBUG(dbgs() << "LSV: Analyzing a chain of length " << Size << ".\n"); 853 854 // Process the stores in chunks of 64. 855 for (unsigned CI = 0, CE = Size; CI < CE; CI += 64) { 856 unsigned Len = std::min<unsigned>(CE - CI, 64); 857 ArrayRef<Instruction *> Chunk(&Chain.second[CI], Len); 858 Changed |= vectorizeInstructions(Chunk); 859 } 860 } 861 862 return Changed; 863 } 864 865 bool Vectorizer::vectorizeInstructions(ArrayRef<Instruction *> Instrs) { 866 LLVM_DEBUG(dbgs() << "LSV: Vectorizing " << Instrs.size() 867 << " instructions.\n"); 868 SmallVector<int, 16> Heads, Tails; 869 int ConsecutiveChain[64]; 870 871 // Do a quadratic search on all of the given loads/stores and find all of the 872 // pairs of loads/stores that follow each other. 873 for (int i = 0, e = Instrs.size(); i < e; ++i) { 874 ConsecutiveChain[i] = -1; 875 for (int j = e - 1; j >= 0; --j) { 876 if (i == j) 877 continue; 878 879 if (isConsecutiveAccess(Instrs[i], Instrs[j])) { 880 if (ConsecutiveChain[i] != -1) { 881 int CurDistance = std::abs(ConsecutiveChain[i] - i); 882 int NewDistance = std::abs(ConsecutiveChain[i] - j); 883 if (j < i || NewDistance > CurDistance) 884 continue; // Should not insert. 885 } 886 887 Tails.push_back(j); 888 Heads.push_back(i); 889 ConsecutiveChain[i] = j; 890 } 891 } 892 } 893 894 bool Changed = false; 895 SmallPtrSet<Instruction *, 16> InstructionsProcessed; 896 897 for (int Head : Heads) { 898 if (InstructionsProcessed.count(Instrs[Head])) 899 continue; 900 bool LongerChainExists = false; 901 for (unsigned TIt = 0; TIt < Tails.size(); TIt++) 902 if (Head == Tails[TIt] && 903 !InstructionsProcessed.count(Instrs[Heads[TIt]])) { 904 LongerChainExists = true; 905 break; 906 } 907 if (LongerChainExists) 908 continue; 909 910 // We found an instr that starts a chain. Now follow the chain and try to 911 // vectorize it. 912 SmallVector<Instruction *, 16> Operands; 913 int I = Head; 914 while (I != -1 && (is_contained(Tails, I) || is_contained(Heads, I))) { 915 if (InstructionsProcessed.count(Instrs[I])) 916 break; 917 918 Operands.push_back(Instrs[I]); 919 I = ConsecutiveChain[I]; 920 } 921 922 bool Vectorized = false; 923 if (isa<LoadInst>(*Operands.begin())) 924 Vectorized = vectorizeLoadChain(Operands, &InstructionsProcessed); 925 else 926 Vectorized = vectorizeStoreChain(Operands, &InstructionsProcessed); 927 928 Changed |= Vectorized; 929 } 930 931 return Changed; 932 } 933 934 bool Vectorizer::vectorizeStoreChain( 935 ArrayRef<Instruction *> Chain, 936 SmallPtrSet<Instruction *, 16> *InstructionsProcessed) { 937 StoreInst *S0 = cast<StoreInst>(Chain[0]); 938 939 // If the vector has an int element, default to int for the whole store. 940 Type *StoreTy = nullptr; 941 for (Instruction *I : Chain) { 942 StoreTy = cast<StoreInst>(I)->getValueOperand()->getType(); 943 if (StoreTy->isIntOrIntVectorTy()) 944 break; 945 946 if (StoreTy->isPtrOrPtrVectorTy()) { 947 StoreTy = Type::getIntNTy(F.getParent()->getContext(), 948 DL.getTypeSizeInBits(StoreTy)); 949 break; 950 } 951 } 952 assert(StoreTy && "Failed to find store type"); 953 954 unsigned Sz = DL.getTypeSizeInBits(StoreTy); 955 unsigned AS = S0->getPointerAddressSpace(); 956 unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); 957 unsigned VF = VecRegSize / Sz; 958 unsigned ChainSize = Chain.size(); 959 Align Alignment = getAlign(S0); 960 961 if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) { 962 InstructionsProcessed->insert(Chain.begin(), Chain.end()); 963 return false; 964 } 965 966 ArrayRef<Instruction *> NewChain = getVectorizablePrefix(Chain); 967 if (NewChain.empty()) { 968 // No vectorization possible. 969 InstructionsProcessed->insert(Chain.begin(), Chain.end()); 970 return false; 971 } 972 if (NewChain.size() == 1) { 973 // Failed after the first instruction. Discard it and try the smaller chain. 974 InstructionsProcessed->insert(NewChain.front()); 975 return false; 976 } 977 978 // Update Chain to the valid vectorizable subchain. 979 Chain = NewChain; 980 ChainSize = Chain.size(); 981 982 // Check if it's legal to vectorize this chain. If not, split the chain and 983 // try again. 984 unsigned EltSzInBytes = Sz / 8; 985 unsigned SzInBytes = EltSzInBytes * ChainSize; 986 987 VectorType *VecTy; 988 VectorType *VecStoreTy = dyn_cast<VectorType>(StoreTy); 989 if (VecStoreTy) 990 VecTy = VectorType::get(StoreTy->getScalarType(), 991 Chain.size() * VecStoreTy->getNumElements()); 992 else 993 VecTy = VectorType::get(StoreTy, Chain.size()); 994 995 // If it's more than the max vector size or the target has a better 996 // vector factor, break it into two pieces. 997 unsigned TargetVF = TTI.getStoreVectorFactor(VF, Sz, SzInBytes, VecTy); 998 if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) { 999 LLVM_DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor." 1000 " Creating two separate arrays.\n"); 1001 return vectorizeStoreChain(Chain.slice(0, TargetVF), 1002 InstructionsProcessed) | 1003 vectorizeStoreChain(Chain.slice(TargetVF), InstructionsProcessed); 1004 } 1005 1006 LLVM_DEBUG({ 1007 dbgs() << "LSV: Stores to vectorize:\n"; 1008 for (Instruction *I : Chain) 1009 dbgs() << " " << *I << "\n"; 1010 }); 1011 1012 // We won't try again to vectorize the elements of the chain, regardless of 1013 // whether we succeed below. 1014 InstructionsProcessed->insert(Chain.begin(), Chain.end()); 1015 1016 // If the store is going to be misaligned, don't vectorize it. 1017 if (accessIsMisaligned(SzInBytes, AS, Alignment.value())) { 1018 if (S0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) { 1019 auto Chains = splitOddVectorElts(Chain, Sz); 1020 return vectorizeStoreChain(Chains.first, InstructionsProcessed) | 1021 vectorizeStoreChain(Chains.second, InstructionsProcessed); 1022 } 1023 1024 unsigned NewAlign = getOrEnforceKnownAlignment(S0->getPointerOperand(), 1025 StackAdjustedAlignment, 1026 DL, S0, nullptr, &DT); 1027 if (NewAlign >= Alignment.value()) 1028 Alignment = Align(NewAlign); 1029 else 1030 return false; 1031 } 1032 1033 if (!TTI.isLegalToVectorizeStoreChain(SzInBytes, Alignment.value(), AS)) { 1034 auto Chains = splitOddVectorElts(Chain, Sz); 1035 return vectorizeStoreChain(Chains.first, InstructionsProcessed) | 1036 vectorizeStoreChain(Chains.second, InstructionsProcessed); 1037 } 1038 1039 BasicBlock::iterator First, Last; 1040 std::tie(First, Last) = getBoundaryInstrs(Chain); 1041 Builder.SetInsertPoint(&*Last); 1042 1043 Value *Vec = UndefValue::get(VecTy); 1044 1045 if (VecStoreTy) { 1046 unsigned VecWidth = VecStoreTy->getNumElements(); 1047 for (unsigned I = 0, E = Chain.size(); I != E; ++I) { 1048 StoreInst *Store = cast<StoreInst>(Chain[I]); 1049 for (unsigned J = 0, NE = VecStoreTy->getNumElements(); J != NE; ++J) { 1050 unsigned NewIdx = J + I * VecWidth; 1051 Value *Extract = Builder.CreateExtractElement(Store->getValueOperand(), 1052 Builder.getInt32(J)); 1053 if (Extract->getType() != StoreTy->getScalarType()) 1054 Extract = Builder.CreateBitCast(Extract, StoreTy->getScalarType()); 1055 1056 Value *Insert = 1057 Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(NewIdx)); 1058 Vec = Insert; 1059 } 1060 } 1061 } else { 1062 for (unsigned I = 0, E = Chain.size(); I != E; ++I) { 1063 StoreInst *Store = cast<StoreInst>(Chain[I]); 1064 Value *Extract = Store->getValueOperand(); 1065 if (Extract->getType() != StoreTy->getScalarType()) 1066 Extract = 1067 Builder.CreateBitOrPointerCast(Extract, StoreTy->getScalarType()); 1068 1069 Value *Insert = 1070 Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(I)); 1071 Vec = Insert; 1072 } 1073 } 1074 1075 StoreInst *SI = Builder.CreateAlignedStore( 1076 Vec, 1077 Builder.CreateBitCast(S0->getPointerOperand(), VecTy->getPointerTo(AS)), 1078 Alignment); 1079 propagateMetadata(SI, Chain); 1080 1081 eraseInstructions(Chain); 1082 ++NumVectorInstructions; 1083 NumScalarsVectorized += Chain.size(); 1084 return true; 1085 } 1086 1087 bool Vectorizer::vectorizeLoadChain( 1088 ArrayRef<Instruction *> Chain, 1089 SmallPtrSet<Instruction *, 16> *InstructionsProcessed) { 1090 LoadInst *L0 = cast<LoadInst>(Chain[0]); 1091 1092 // If the vector has an int element, default to int for the whole load. 1093 Type *LoadTy = nullptr; 1094 for (const auto &V : Chain) { 1095 LoadTy = cast<LoadInst>(V)->getType(); 1096 if (LoadTy->isIntOrIntVectorTy()) 1097 break; 1098 1099 if (LoadTy->isPtrOrPtrVectorTy()) { 1100 LoadTy = Type::getIntNTy(F.getParent()->getContext(), 1101 DL.getTypeSizeInBits(LoadTy)); 1102 break; 1103 } 1104 } 1105 assert(LoadTy && "Can't determine LoadInst type from chain"); 1106 1107 unsigned Sz = DL.getTypeSizeInBits(LoadTy); 1108 unsigned AS = L0->getPointerAddressSpace(); 1109 unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); 1110 unsigned VF = VecRegSize / Sz; 1111 unsigned ChainSize = Chain.size(); 1112 unsigned Alignment = getAlignment(L0); 1113 1114 if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) { 1115 InstructionsProcessed->insert(Chain.begin(), Chain.end()); 1116 return false; 1117 } 1118 1119 ArrayRef<Instruction *> NewChain = getVectorizablePrefix(Chain); 1120 if (NewChain.empty()) { 1121 // No vectorization possible. 1122 InstructionsProcessed->insert(Chain.begin(), Chain.end()); 1123 return false; 1124 } 1125 if (NewChain.size() == 1) { 1126 // Failed after the first instruction. Discard it and try the smaller chain. 1127 InstructionsProcessed->insert(NewChain.front()); 1128 return false; 1129 } 1130 1131 // Update Chain to the valid vectorizable subchain. 1132 Chain = NewChain; 1133 ChainSize = Chain.size(); 1134 1135 // Check if it's legal to vectorize this chain. If not, split the chain and 1136 // try again. 1137 unsigned EltSzInBytes = Sz / 8; 1138 unsigned SzInBytes = EltSzInBytes * ChainSize; 1139 VectorType *VecTy; 1140 VectorType *VecLoadTy = dyn_cast<VectorType>(LoadTy); 1141 if (VecLoadTy) 1142 VecTy = VectorType::get(LoadTy->getScalarType(), 1143 Chain.size() * VecLoadTy->getNumElements()); 1144 else 1145 VecTy = VectorType::get(LoadTy, Chain.size()); 1146 1147 // If it's more than the max vector size or the target has a better 1148 // vector factor, break it into two pieces. 1149 unsigned TargetVF = TTI.getLoadVectorFactor(VF, Sz, SzInBytes, VecTy); 1150 if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) { 1151 LLVM_DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor." 1152 " Creating two separate arrays.\n"); 1153 return vectorizeLoadChain(Chain.slice(0, TargetVF), InstructionsProcessed) | 1154 vectorizeLoadChain(Chain.slice(TargetVF), InstructionsProcessed); 1155 } 1156 1157 // We won't try again to vectorize the elements of the chain, regardless of 1158 // whether we succeed below. 1159 InstructionsProcessed->insert(Chain.begin(), Chain.end()); 1160 1161 // If the load is going to be misaligned, don't vectorize it. 1162 if (accessIsMisaligned(SzInBytes, AS, Alignment)) { 1163 if (L0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) { 1164 auto Chains = splitOddVectorElts(Chain, Sz); 1165 return vectorizeLoadChain(Chains.first, InstructionsProcessed) | 1166 vectorizeLoadChain(Chains.second, InstructionsProcessed); 1167 } 1168 1169 unsigned NewAlign = getOrEnforceKnownAlignment( 1170 L0->getPointerOperand(), StackAdjustedAlignment, DL, L0, nullptr, &DT); 1171 if (NewAlign >= Alignment) 1172 Alignment = NewAlign; 1173 else 1174 return false; 1175 } 1176 1177 if (!TTI.isLegalToVectorizeLoadChain(SzInBytes, Alignment, AS)) { 1178 auto Chains = splitOddVectorElts(Chain, Sz); 1179 return vectorizeLoadChain(Chains.first, InstructionsProcessed) | 1180 vectorizeLoadChain(Chains.second, InstructionsProcessed); 1181 } 1182 1183 LLVM_DEBUG({ 1184 dbgs() << "LSV: Loads to vectorize:\n"; 1185 for (Instruction *I : Chain) 1186 I->dump(); 1187 }); 1188 1189 // getVectorizablePrefix already computed getBoundaryInstrs. The value of 1190 // Last may have changed since then, but the value of First won't have. If it 1191 // matters, we could compute getBoundaryInstrs only once and reuse it here. 1192 BasicBlock::iterator First, Last; 1193 std::tie(First, Last) = getBoundaryInstrs(Chain); 1194 Builder.SetInsertPoint(&*First); 1195 1196 Value *Bitcast = 1197 Builder.CreateBitCast(L0->getPointerOperand(), VecTy->getPointerTo(AS)); 1198 LoadInst *LI = 1199 Builder.CreateAlignedLoad(VecTy, Bitcast, MaybeAlign(Alignment)); 1200 propagateMetadata(LI, Chain); 1201 1202 if (VecLoadTy) { 1203 SmallVector<Instruction *, 16> InstrsToErase; 1204 1205 unsigned VecWidth = VecLoadTy->getNumElements(); 1206 for (unsigned I = 0, E = Chain.size(); I != E; ++I) { 1207 for (auto Use : Chain[I]->users()) { 1208 // All users of vector loads are ExtractElement instructions with 1209 // constant indices, otherwise we would have bailed before now. 1210 Instruction *UI = cast<Instruction>(Use); 1211 unsigned Idx = cast<ConstantInt>(UI->getOperand(1))->getZExtValue(); 1212 unsigned NewIdx = Idx + I * VecWidth; 1213 Value *V = Builder.CreateExtractElement(LI, Builder.getInt32(NewIdx), 1214 UI->getName()); 1215 if (V->getType() != UI->getType()) 1216 V = Builder.CreateBitCast(V, UI->getType()); 1217 1218 // Replace the old instruction. 1219 UI->replaceAllUsesWith(V); 1220 InstrsToErase.push_back(UI); 1221 } 1222 } 1223 1224 // Bitcast might not be an Instruction, if the value being loaded is a 1225 // constant. In that case, no need to reorder anything. 1226 if (Instruction *BitcastInst = dyn_cast<Instruction>(Bitcast)) 1227 reorder(BitcastInst); 1228 1229 for (auto I : InstrsToErase) 1230 I->eraseFromParent(); 1231 } else { 1232 for (unsigned I = 0, E = Chain.size(); I != E; ++I) { 1233 Value *CV = Chain[I]; 1234 Value *V = 1235 Builder.CreateExtractElement(LI, Builder.getInt32(I), CV->getName()); 1236 if (V->getType() != CV->getType()) { 1237 V = Builder.CreateBitOrPointerCast(V, CV->getType()); 1238 } 1239 1240 // Replace the old instruction. 1241 CV->replaceAllUsesWith(V); 1242 } 1243 1244 if (Instruction *BitcastInst = dyn_cast<Instruction>(Bitcast)) 1245 reorder(BitcastInst); 1246 } 1247 1248 eraseInstructions(Chain); 1249 1250 ++NumVectorInstructions; 1251 NumScalarsVectorized += Chain.size(); 1252 return true; 1253 } 1254 1255 bool Vectorizer::accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace, 1256 unsigned Alignment) { 1257 if (Alignment % SzInBytes == 0) 1258 return false; 1259 1260 bool Fast = false; 1261 bool Allows = TTI.allowsMisalignedMemoryAccesses(F.getParent()->getContext(), 1262 SzInBytes * 8, AddressSpace, 1263 Alignment, &Fast); 1264 LLVM_DEBUG(dbgs() << "LSV: Target said misaligned is allowed? " << Allows 1265 << " and fast? " << Fast << "\n";); 1266 return !Allows || !Fast; 1267 } 1268