1 //===- LoadStoreVectorizer.cpp - GPU Load & Store Vectorizer --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass merges loads/stores to/from sequential memory addresses into vector 10 // loads/stores. Although there's nothing GPU-specific in here, this pass is 11 // motivated by the microarchitectural quirks of nVidia and AMD GPUs. 12 // 13 // (For simplicity below we talk about loads only, but everything also applies 14 // to stores.) 15 // 16 // This pass is intended to be run late in the pipeline, after other 17 // vectorization opportunities have been exploited. So the assumption here is 18 // that immediately following our new vector load we'll need to extract out the 19 // individual elements of the load, so we can operate on them individually. 20 // 21 // On CPUs this transformation is usually not beneficial, because extracting the 22 // elements of a vector register is expensive on most architectures. It's 23 // usually better just to load each element individually into its own scalar 24 // register. 25 // 26 // However, nVidia and AMD GPUs don't have proper vector registers. Instead, a 27 // "vector load" loads directly into a series of scalar registers. In effect, 28 // extracting the elements of the vector is free. It's therefore always 29 // beneficial to vectorize a sequence of loads on these architectures. 30 // 31 // Vectorizing (perhaps a better name might be "coalescing") loads can have 32 // large performance impacts on GPU kernels, and opportunities for vectorizing 33 // are common in GPU code. This pass tries very hard to find such 34 // opportunities; its runtime is quadratic in the number of loads in a BB. 35 // 36 // Some CPU architectures, such as ARM, have instructions that load into 37 // multiple scalar registers, similar to a GPU vectorized load. In theory ARM 38 // could use this pass (with some modifications), but currently it implements 39 // its own pass to do something similar to what we do here. 40 41 #include "llvm/Transforms/Vectorize/LoadStoreVectorizer.h" 42 #include "llvm/ADT/APInt.h" 43 #include "llvm/ADT/ArrayRef.h" 44 #include "llvm/ADT/MapVector.h" 45 #include "llvm/ADT/PostOrderIterator.h" 46 #include "llvm/ADT/STLExtras.h" 47 #include "llvm/ADT/SmallPtrSet.h" 48 #include "llvm/ADT/SmallVector.h" 49 #include "llvm/ADT/Statistic.h" 50 #include "llvm/ADT/iterator_range.h" 51 #include "llvm/Analysis/AliasAnalysis.h" 52 #include "llvm/Analysis/AssumptionCache.h" 53 #include "llvm/Analysis/MemoryLocation.h" 54 #include "llvm/Analysis/ScalarEvolution.h" 55 #include "llvm/Analysis/TargetTransformInfo.h" 56 #include "llvm/Analysis/ValueTracking.h" 57 #include "llvm/Analysis/VectorUtils.h" 58 #include "llvm/IR/Attributes.h" 59 #include "llvm/IR/BasicBlock.h" 60 #include "llvm/IR/Constants.h" 61 #include "llvm/IR/DataLayout.h" 62 #include "llvm/IR/DerivedTypes.h" 63 #include "llvm/IR/Dominators.h" 64 #include "llvm/IR/Function.h" 65 #include "llvm/IR/GetElementPtrTypeIterator.h" 66 #include "llvm/IR/IRBuilder.h" 67 #include "llvm/IR/InstrTypes.h" 68 #include "llvm/IR/Instruction.h" 69 #include "llvm/IR/Instructions.h" 70 #include "llvm/IR/IntrinsicInst.h" 71 #include "llvm/IR/Module.h" 72 #include "llvm/IR/Type.h" 73 #include "llvm/IR/User.h" 74 #include "llvm/IR/Value.h" 75 #include "llvm/InitializePasses.h" 76 #include "llvm/Pass.h" 77 #include "llvm/Support/Casting.h" 78 #include "llvm/Support/Debug.h" 79 #include "llvm/Support/KnownBits.h" 80 #include "llvm/Support/MathExtras.h" 81 #include "llvm/Support/raw_ostream.h" 82 #include "llvm/Transforms/Utils/Local.h" 83 #include "llvm/Transforms/Vectorize.h" 84 #include <algorithm> 85 #include <cassert> 86 #include <cstdlib> 87 #include <tuple> 88 #include <utility> 89 90 using namespace llvm; 91 92 #define DEBUG_TYPE "load-store-vectorizer" 93 94 STATISTIC(NumVectorInstructions, "Number of vector accesses generated"); 95 STATISTIC(NumScalarsVectorized, "Number of scalar accesses vectorized"); 96 97 // FIXME: Assuming stack alignment of 4 is always good enough 98 static const unsigned StackAdjustedAlignment = 4; 99 100 namespace { 101 102 /// ChainID is an arbitrary token that is allowed to be different only for the 103 /// accesses that are guaranteed to be considered non-consecutive by 104 /// Vectorizer::isConsecutiveAccess. It's used for grouping instructions 105 /// together and reducing the number of instructions the main search operates on 106 /// at a time, i.e. this is to reduce compile time and nothing else as the main 107 /// search has O(n^2) time complexity. The underlying type of ChainID should not 108 /// be relied upon. 109 using ChainID = const Value *; 110 using InstrList = SmallVector<Instruction *, 8>; 111 using InstrListMap = MapVector<ChainID, InstrList>; 112 113 class Vectorizer { 114 Function &F; 115 AliasAnalysis &AA; 116 AssumptionCache &AC; 117 DominatorTree &DT; 118 ScalarEvolution &SE; 119 TargetTransformInfo &TTI; 120 const DataLayout &DL; 121 IRBuilder<> Builder; 122 123 public: 124 Vectorizer(Function &F, AliasAnalysis &AA, AssumptionCache &AC, 125 DominatorTree &DT, ScalarEvolution &SE, TargetTransformInfo &TTI) 126 : F(F), AA(AA), AC(AC), DT(DT), SE(SE), TTI(TTI), 127 DL(F.getParent()->getDataLayout()), Builder(SE.getContext()) {} 128 129 bool run(); 130 131 private: 132 unsigned getPointerAddressSpace(Value *I); 133 134 static const unsigned MaxDepth = 3; 135 136 bool isConsecutiveAccess(Value *A, Value *B); 137 bool areConsecutivePointers(Value *PtrA, Value *PtrB, APInt PtrDelta, 138 unsigned Depth = 0) const; 139 bool lookThroughComplexAddresses(Value *PtrA, Value *PtrB, APInt PtrDelta, 140 unsigned Depth) const; 141 bool lookThroughSelects(Value *PtrA, Value *PtrB, const APInt &PtrDelta, 142 unsigned Depth) const; 143 144 /// After vectorization, reorder the instructions that I depends on 145 /// (the instructions defining its operands), to ensure they dominate I. 146 void reorder(Instruction *I); 147 148 /// Returns the first and the last instructions in Chain. 149 std::pair<BasicBlock::iterator, BasicBlock::iterator> 150 getBoundaryInstrs(ArrayRef<Instruction *> Chain); 151 152 /// Erases the original instructions after vectorizing. 153 void eraseInstructions(ArrayRef<Instruction *> Chain); 154 155 /// "Legalize" the vector type that would be produced by combining \p 156 /// ElementSizeBits elements in \p Chain. Break into two pieces such that the 157 /// total size of each piece is 1, 2 or a multiple of 4 bytes. \p Chain is 158 /// expected to have more than 4 elements. 159 std::pair<ArrayRef<Instruction *>, ArrayRef<Instruction *>> 160 splitOddVectorElts(ArrayRef<Instruction *> Chain, unsigned ElementSizeBits); 161 162 /// Finds the largest prefix of Chain that's vectorizable, checking for 163 /// intervening instructions which may affect the memory accessed by the 164 /// instructions within Chain. 165 /// 166 /// The elements of \p Chain must be all loads or all stores and must be in 167 /// address order. 168 ArrayRef<Instruction *> getVectorizablePrefix(ArrayRef<Instruction *> Chain); 169 170 /// Collects load and store instructions to vectorize. 171 std::pair<InstrListMap, InstrListMap> collectInstructions(BasicBlock *BB); 172 173 /// Processes the collected instructions, the \p Map. The values of \p Map 174 /// should be all loads or all stores. 175 bool vectorizeChains(InstrListMap &Map); 176 177 /// Finds the load/stores to consecutive memory addresses and vectorizes them. 178 bool vectorizeInstructions(ArrayRef<Instruction *> Instrs); 179 180 /// Vectorizes the load instructions in Chain. 181 bool 182 vectorizeLoadChain(ArrayRef<Instruction *> Chain, 183 SmallPtrSet<Instruction *, 16> *InstructionsProcessed); 184 185 /// Vectorizes the store instructions in Chain. 186 bool 187 vectorizeStoreChain(ArrayRef<Instruction *> Chain, 188 SmallPtrSet<Instruction *, 16> *InstructionsProcessed); 189 190 /// Check if this load/store access is misaligned accesses. 191 bool accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace, 192 Align Alignment); 193 }; 194 195 class LoadStoreVectorizerLegacyPass : public FunctionPass { 196 public: 197 static char ID; 198 199 LoadStoreVectorizerLegacyPass() : FunctionPass(ID) { 200 initializeLoadStoreVectorizerLegacyPassPass(*PassRegistry::getPassRegistry()); 201 } 202 203 bool runOnFunction(Function &F) override; 204 205 StringRef getPassName() const override { 206 return "GPU Load and Store Vectorizer"; 207 } 208 209 void getAnalysisUsage(AnalysisUsage &AU) const override { 210 AU.addRequired<AAResultsWrapperPass>(); 211 AU.addRequired<AssumptionCacheTracker>(); 212 AU.addRequired<ScalarEvolutionWrapperPass>(); 213 AU.addRequired<DominatorTreeWrapperPass>(); 214 AU.addRequired<TargetTransformInfoWrapperPass>(); 215 AU.setPreservesCFG(); 216 } 217 }; 218 219 } // end anonymous namespace 220 221 char LoadStoreVectorizerLegacyPass::ID = 0; 222 223 INITIALIZE_PASS_BEGIN(LoadStoreVectorizerLegacyPass, DEBUG_TYPE, 224 "Vectorize load and Store instructions", false, false) 225 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass) 226 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker); 227 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 228 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 229 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 230 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 231 INITIALIZE_PASS_END(LoadStoreVectorizerLegacyPass, DEBUG_TYPE, 232 "Vectorize load and store instructions", false, false) 233 234 Pass *llvm::createLoadStoreVectorizerPass() { 235 return new LoadStoreVectorizerLegacyPass(); 236 } 237 238 bool LoadStoreVectorizerLegacyPass::runOnFunction(Function &F) { 239 // Don't vectorize when the attribute NoImplicitFloat is used. 240 if (skipFunction(F) || F.hasFnAttribute(Attribute::NoImplicitFloat)) 241 return false; 242 243 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 244 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 245 ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 246 TargetTransformInfo &TTI = 247 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 248 249 AssumptionCache &AC = 250 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 251 252 Vectorizer V(F, AA, AC, DT, SE, TTI); 253 return V.run(); 254 } 255 256 PreservedAnalyses LoadStoreVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { 257 // Don't vectorize when the attribute NoImplicitFloat is used. 258 if (F.hasFnAttribute(Attribute::NoImplicitFloat)) 259 return PreservedAnalyses::all(); 260 261 AliasAnalysis &AA = AM.getResult<AAManager>(F); 262 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F); 263 ScalarEvolution &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 264 TargetTransformInfo &TTI = AM.getResult<TargetIRAnalysis>(F); 265 AssumptionCache &AC = AM.getResult<AssumptionAnalysis>(F); 266 267 Vectorizer V(F, AA, AC, DT, SE, TTI); 268 bool Changed = V.run(); 269 PreservedAnalyses PA; 270 PA.preserveSet<CFGAnalyses>(); 271 return Changed ? PA : PreservedAnalyses::all(); 272 } 273 274 // The real propagateMetadata expects a SmallVector<Value*>, but we deal in 275 // vectors of Instructions. 276 static void propagateMetadata(Instruction *I, ArrayRef<Instruction *> IL) { 277 SmallVector<Value *, 8> VL(IL.begin(), IL.end()); 278 propagateMetadata(I, VL); 279 } 280 281 // Vectorizer Implementation 282 bool Vectorizer::run() { 283 bool Changed = false; 284 285 // Scan the blocks in the function in post order. 286 for (BasicBlock *BB : post_order(&F)) { 287 InstrListMap LoadRefs, StoreRefs; 288 std::tie(LoadRefs, StoreRefs) = collectInstructions(BB); 289 Changed |= vectorizeChains(LoadRefs); 290 Changed |= vectorizeChains(StoreRefs); 291 } 292 293 return Changed; 294 } 295 296 unsigned Vectorizer::getPointerAddressSpace(Value *I) { 297 if (LoadInst *L = dyn_cast<LoadInst>(I)) 298 return L->getPointerAddressSpace(); 299 if (StoreInst *S = dyn_cast<StoreInst>(I)) 300 return S->getPointerAddressSpace(); 301 return -1; 302 } 303 304 // FIXME: Merge with llvm::isConsecutiveAccess 305 bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) { 306 Value *PtrA = getLoadStorePointerOperand(A); 307 Value *PtrB = getLoadStorePointerOperand(B); 308 unsigned ASA = getPointerAddressSpace(A); 309 unsigned ASB = getPointerAddressSpace(B); 310 311 // Check that the address spaces match and that the pointers are valid. 312 if (!PtrA || !PtrB || (ASA != ASB)) 313 return false; 314 315 // Make sure that A and B are different pointers of the same size type. 316 Type *PtrATy = getLoadStoreType(A); 317 Type *PtrBTy = getLoadStoreType(B); 318 if (PtrA == PtrB || 319 PtrATy->isVectorTy() != PtrBTy->isVectorTy() || 320 DL.getTypeStoreSize(PtrATy) != DL.getTypeStoreSize(PtrBTy) || 321 DL.getTypeStoreSize(PtrATy->getScalarType()) != 322 DL.getTypeStoreSize(PtrBTy->getScalarType())) 323 return false; 324 325 unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA); 326 APInt Size(PtrBitWidth, DL.getTypeStoreSize(PtrATy)); 327 328 return areConsecutivePointers(PtrA, PtrB, Size); 329 } 330 331 bool Vectorizer::areConsecutivePointers(Value *PtrA, Value *PtrB, 332 APInt PtrDelta, unsigned Depth) const { 333 unsigned PtrBitWidth = DL.getPointerTypeSizeInBits(PtrA->getType()); 334 APInt OffsetA(PtrBitWidth, 0); 335 APInt OffsetB(PtrBitWidth, 0); 336 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); 337 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); 338 339 unsigned NewPtrBitWidth = DL.getTypeStoreSizeInBits(PtrA->getType()); 340 341 if (NewPtrBitWidth != DL.getTypeStoreSizeInBits(PtrB->getType())) 342 return false; 343 344 // In case if we have to shrink the pointer 345 // stripAndAccumulateInBoundsConstantOffsets should properly handle a 346 // possible overflow and the value should fit into a smallest data type 347 // used in the cast/gep chain. 348 assert(OffsetA.getMinSignedBits() <= NewPtrBitWidth && 349 OffsetB.getMinSignedBits() <= NewPtrBitWidth); 350 351 OffsetA = OffsetA.sextOrTrunc(NewPtrBitWidth); 352 OffsetB = OffsetB.sextOrTrunc(NewPtrBitWidth); 353 PtrDelta = PtrDelta.sextOrTrunc(NewPtrBitWidth); 354 355 APInt OffsetDelta = OffsetB - OffsetA; 356 357 // Check if they are based on the same pointer. That makes the offsets 358 // sufficient. 359 if (PtrA == PtrB) 360 return OffsetDelta == PtrDelta; 361 362 // Compute the necessary base pointer delta to have the necessary final delta 363 // equal to the pointer delta requested. 364 APInt BaseDelta = PtrDelta - OffsetDelta; 365 366 // Compute the distance with SCEV between the base pointers. 367 const SCEV *PtrSCEVA = SE.getSCEV(PtrA); 368 const SCEV *PtrSCEVB = SE.getSCEV(PtrB); 369 const SCEV *C = SE.getConstant(BaseDelta); 370 const SCEV *X = SE.getAddExpr(PtrSCEVA, C); 371 if (X == PtrSCEVB) 372 return true; 373 374 // The above check will not catch the cases where one of the pointers is 375 // factorized but the other one is not, such as (C + (S * (A + B))) vs 376 // (AS + BS). Get the minus scev. That will allow re-combining the expresions 377 // and getting the simplified difference. 378 const SCEV *Dist = SE.getMinusSCEV(PtrSCEVB, PtrSCEVA); 379 if (C == Dist) 380 return true; 381 382 // Sometimes even this doesn't work, because SCEV can't always see through 383 // patterns that look like (gep (ext (add (shl X, C1), C2))). Try checking 384 // things the hard way. 385 return lookThroughComplexAddresses(PtrA, PtrB, BaseDelta, Depth); 386 } 387 388 static bool checkNoWrapFlags(Instruction *I, bool Signed) { 389 BinaryOperator *BinOpI = cast<BinaryOperator>(I); 390 return (Signed && BinOpI->hasNoSignedWrap()) || 391 (!Signed && BinOpI->hasNoUnsignedWrap()); 392 } 393 394 static bool checkIfSafeAddSequence(const APInt &IdxDiff, Instruction *AddOpA, 395 unsigned MatchingOpIdxA, Instruction *AddOpB, 396 unsigned MatchingOpIdxB, bool Signed) { 397 // If both OpA and OpB is an add with NSW/NUW and with 398 // one of the operands being the same, we can guarantee that the 399 // transformation is safe if we can prove that OpA won't overflow when 400 // IdxDiff added to the other operand of OpA. 401 // For example: 402 // %tmp7 = add nsw i32 %tmp2, %v0 403 // %tmp8 = sext i32 %tmp7 to i64 404 // ... 405 // %tmp11 = add nsw i32 %v0, 1 406 // %tmp12 = add nsw i32 %tmp2, %tmp11 407 // %tmp13 = sext i32 %tmp12 to i64 408 // 409 // Both %tmp7 and %tmp2 has the nsw flag and the first operand 410 // is %tmp2. It's guaranteed that adding 1 to %tmp7 won't overflow 411 // because %tmp11 adds 1 to %v0 and both %tmp11 and %tmp12 has the 412 // nsw flag. 413 assert(AddOpA->getOpcode() == Instruction::Add && 414 AddOpB->getOpcode() == Instruction::Add && 415 checkNoWrapFlags(AddOpA, Signed) && checkNoWrapFlags(AddOpB, Signed)); 416 if (AddOpA->getOperand(MatchingOpIdxA) == 417 AddOpB->getOperand(MatchingOpIdxB)) { 418 Value *OtherOperandA = AddOpA->getOperand(MatchingOpIdxA == 1 ? 0 : 1); 419 Value *OtherOperandB = AddOpB->getOperand(MatchingOpIdxB == 1 ? 0 : 1); 420 Instruction *OtherInstrA = dyn_cast<Instruction>(OtherOperandA); 421 Instruction *OtherInstrB = dyn_cast<Instruction>(OtherOperandB); 422 // Match `x +nsw/nuw y` and `x +nsw/nuw (y +nsw/nuw IdxDiff)`. 423 if (OtherInstrB && OtherInstrB->getOpcode() == Instruction::Add && 424 checkNoWrapFlags(OtherInstrB, Signed) && 425 isa<ConstantInt>(OtherInstrB->getOperand(1))) { 426 int64_t CstVal = 427 cast<ConstantInt>(OtherInstrB->getOperand(1))->getSExtValue(); 428 if (OtherInstrB->getOperand(0) == OtherOperandA && 429 IdxDiff.getSExtValue() == CstVal) 430 return true; 431 } 432 // Match `x +nsw/nuw (y +nsw/nuw -Idx)` and `x +nsw/nuw (y +nsw/nuw x)`. 433 if (OtherInstrA && OtherInstrA->getOpcode() == Instruction::Add && 434 checkNoWrapFlags(OtherInstrA, Signed) && 435 isa<ConstantInt>(OtherInstrA->getOperand(1))) { 436 int64_t CstVal = 437 cast<ConstantInt>(OtherInstrA->getOperand(1))->getSExtValue(); 438 if (OtherInstrA->getOperand(0) == OtherOperandB && 439 IdxDiff.getSExtValue() == -CstVal) 440 return true; 441 } 442 // Match `x +nsw/nuw (y +nsw/nuw c)` and 443 // `x +nsw/nuw (y +nsw/nuw (c + IdxDiff))`. 444 if (OtherInstrA && OtherInstrB && 445 OtherInstrA->getOpcode() == Instruction::Add && 446 OtherInstrB->getOpcode() == Instruction::Add && 447 checkNoWrapFlags(OtherInstrA, Signed) && 448 checkNoWrapFlags(OtherInstrB, Signed) && 449 isa<ConstantInt>(OtherInstrA->getOperand(1)) && 450 isa<ConstantInt>(OtherInstrB->getOperand(1))) { 451 int64_t CstValA = 452 cast<ConstantInt>(OtherInstrA->getOperand(1))->getSExtValue(); 453 int64_t CstValB = 454 cast<ConstantInt>(OtherInstrB->getOperand(1))->getSExtValue(); 455 if (OtherInstrA->getOperand(0) == OtherInstrB->getOperand(0) && 456 IdxDiff.getSExtValue() == (CstValB - CstValA)) 457 return true; 458 } 459 } 460 return false; 461 } 462 463 bool Vectorizer::lookThroughComplexAddresses(Value *PtrA, Value *PtrB, 464 APInt PtrDelta, 465 unsigned Depth) const { 466 auto *GEPA = dyn_cast<GetElementPtrInst>(PtrA); 467 auto *GEPB = dyn_cast<GetElementPtrInst>(PtrB); 468 if (!GEPA || !GEPB) 469 return lookThroughSelects(PtrA, PtrB, PtrDelta, Depth); 470 471 // Look through GEPs after checking they're the same except for the last 472 // index. 473 if (GEPA->getNumOperands() != GEPB->getNumOperands() || 474 GEPA->getPointerOperand() != GEPB->getPointerOperand()) 475 return false; 476 gep_type_iterator GTIA = gep_type_begin(GEPA); 477 gep_type_iterator GTIB = gep_type_begin(GEPB); 478 for (unsigned I = 0, E = GEPA->getNumIndices() - 1; I < E; ++I) { 479 if (GTIA.getOperand() != GTIB.getOperand()) 480 return false; 481 ++GTIA; 482 ++GTIB; 483 } 484 485 Instruction *OpA = dyn_cast<Instruction>(GTIA.getOperand()); 486 Instruction *OpB = dyn_cast<Instruction>(GTIB.getOperand()); 487 if (!OpA || !OpB || OpA->getOpcode() != OpB->getOpcode() || 488 OpA->getType() != OpB->getType()) 489 return false; 490 491 if (PtrDelta.isNegative()) { 492 if (PtrDelta.isMinSignedValue()) 493 return false; 494 PtrDelta.negate(); 495 std::swap(OpA, OpB); 496 } 497 uint64_t Stride = DL.getTypeAllocSize(GTIA.getIndexedType()); 498 if (PtrDelta.urem(Stride) != 0) 499 return false; 500 unsigned IdxBitWidth = OpA->getType()->getScalarSizeInBits(); 501 APInt IdxDiff = PtrDelta.udiv(Stride).zextOrSelf(IdxBitWidth); 502 503 // Only look through a ZExt/SExt. 504 if (!isa<SExtInst>(OpA) && !isa<ZExtInst>(OpA)) 505 return false; 506 507 bool Signed = isa<SExtInst>(OpA); 508 509 // At this point A could be a function parameter, i.e. not an instruction 510 Value *ValA = OpA->getOperand(0); 511 OpB = dyn_cast<Instruction>(OpB->getOperand(0)); 512 if (!OpB || ValA->getType() != OpB->getType()) 513 return false; 514 515 // Now we need to prove that adding IdxDiff to ValA won't overflow. 516 bool Safe = false; 517 518 // First attempt: if OpB is an add with NSW/NUW, and OpB is IdxDiff added to 519 // ValA, we're okay. 520 if (OpB->getOpcode() == Instruction::Add && 521 isa<ConstantInt>(OpB->getOperand(1)) && 522 IdxDiff.sle(cast<ConstantInt>(OpB->getOperand(1))->getSExtValue()) && 523 checkNoWrapFlags(OpB, Signed)) 524 Safe = true; 525 526 // Second attempt: check if we have eligible add NSW/NUW instruction 527 // sequences. 528 OpA = dyn_cast<Instruction>(ValA); 529 if (!Safe && OpA && OpA->getOpcode() == Instruction::Add && 530 OpB->getOpcode() == Instruction::Add && checkNoWrapFlags(OpA, Signed) && 531 checkNoWrapFlags(OpB, Signed)) { 532 // In the checks below a matching operand in OpA and OpB is 533 // an operand which is the same in those two instructions. 534 // Below we account for possible orders of the operands of 535 // these add instructions. 536 for (unsigned MatchingOpIdxA : {0, 1}) 537 for (unsigned MatchingOpIdxB : {0, 1}) 538 if (!Safe) 539 Safe = checkIfSafeAddSequence(IdxDiff, OpA, MatchingOpIdxA, OpB, 540 MatchingOpIdxB, Signed); 541 } 542 543 unsigned BitWidth = ValA->getType()->getScalarSizeInBits(); 544 545 // Third attempt: 546 // If all set bits of IdxDiff or any higher order bit other than the sign bit 547 // are known to be zero in ValA, we can add Diff to it while guaranteeing no 548 // overflow of any sort. 549 if (!Safe) { 550 KnownBits Known(BitWidth); 551 computeKnownBits(ValA, Known, DL, 0, &AC, OpB, &DT); 552 APInt BitsAllowedToBeSet = Known.Zero.zext(IdxDiff.getBitWidth()); 553 if (Signed) 554 BitsAllowedToBeSet.clearBit(BitWidth - 1); 555 if (BitsAllowedToBeSet.ult(IdxDiff)) 556 return false; 557 } 558 559 const SCEV *OffsetSCEVA = SE.getSCEV(ValA); 560 const SCEV *OffsetSCEVB = SE.getSCEV(OpB); 561 const SCEV *C = SE.getConstant(IdxDiff.trunc(BitWidth)); 562 const SCEV *X = SE.getAddExpr(OffsetSCEVA, C); 563 return X == OffsetSCEVB; 564 } 565 566 bool Vectorizer::lookThroughSelects(Value *PtrA, Value *PtrB, 567 const APInt &PtrDelta, 568 unsigned Depth) const { 569 if (Depth++ == MaxDepth) 570 return false; 571 572 if (auto *SelectA = dyn_cast<SelectInst>(PtrA)) { 573 if (auto *SelectB = dyn_cast<SelectInst>(PtrB)) { 574 return SelectA->getCondition() == SelectB->getCondition() && 575 areConsecutivePointers(SelectA->getTrueValue(), 576 SelectB->getTrueValue(), PtrDelta, Depth) && 577 areConsecutivePointers(SelectA->getFalseValue(), 578 SelectB->getFalseValue(), PtrDelta, Depth); 579 } 580 } 581 return false; 582 } 583 584 void Vectorizer::reorder(Instruction *I) { 585 SmallPtrSet<Instruction *, 16> InstructionsToMove; 586 SmallVector<Instruction *, 16> Worklist; 587 588 Worklist.push_back(I); 589 while (!Worklist.empty()) { 590 Instruction *IW = Worklist.pop_back_val(); 591 int NumOperands = IW->getNumOperands(); 592 for (int i = 0; i < NumOperands; i++) { 593 Instruction *IM = dyn_cast<Instruction>(IW->getOperand(i)); 594 if (!IM || IM->getOpcode() == Instruction::PHI) 595 continue; 596 597 // If IM is in another BB, no need to move it, because this pass only 598 // vectorizes instructions within one BB. 599 if (IM->getParent() != I->getParent()) 600 continue; 601 602 if (!IM->comesBefore(I)) { 603 InstructionsToMove.insert(IM); 604 Worklist.push_back(IM); 605 } 606 } 607 } 608 609 // All instructions to move should follow I. Start from I, not from begin(). 610 for (auto BBI = I->getIterator(), E = I->getParent()->end(); BBI != E; 611 ++BBI) { 612 if (!InstructionsToMove.count(&*BBI)) 613 continue; 614 Instruction *IM = &*BBI; 615 --BBI; 616 IM->removeFromParent(); 617 IM->insertBefore(I); 618 } 619 } 620 621 std::pair<BasicBlock::iterator, BasicBlock::iterator> 622 Vectorizer::getBoundaryInstrs(ArrayRef<Instruction *> Chain) { 623 Instruction *C0 = Chain[0]; 624 BasicBlock::iterator FirstInstr = C0->getIterator(); 625 BasicBlock::iterator LastInstr = C0->getIterator(); 626 627 BasicBlock *BB = C0->getParent(); 628 unsigned NumFound = 0; 629 for (Instruction &I : *BB) { 630 if (!is_contained(Chain, &I)) 631 continue; 632 633 ++NumFound; 634 if (NumFound == 1) { 635 FirstInstr = I.getIterator(); 636 } 637 if (NumFound == Chain.size()) { 638 LastInstr = I.getIterator(); 639 break; 640 } 641 } 642 643 // Range is [first, last). 644 return std::make_pair(FirstInstr, ++LastInstr); 645 } 646 647 void Vectorizer::eraseInstructions(ArrayRef<Instruction *> Chain) { 648 SmallVector<Instruction *, 16> Instrs; 649 for (Instruction *I : Chain) { 650 Value *PtrOperand = getLoadStorePointerOperand(I); 651 assert(PtrOperand && "Instruction must have a pointer operand."); 652 Instrs.push_back(I); 653 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(PtrOperand)) 654 Instrs.push_back(GEP); 655 } 656 657 // Erase instructions. 658 for (Instruction *I : Instrs) 659 if (I->use_empty()) 660 I->eraseFromParent(); 661 } 662 663 std::pair<ArrayRef<Instruction *>, ArrayRef<Instruction *>> 664 Vectorizer::splitOddVectorElts(ArrayRef<Instruction *> Chain, 665 unsigned ElementSizeBits) { 666 unsigned ElementSizeBytes = ElementSizeBits / 8; 667 unsigned SizeBytes = ElementSizeBytes * Chain.size(); 668 unsigned NumLeft = (SizeBytes - (SizeBytes % 4)) / ElementSizeBytes; 669 if (NumLeft == Chain.size()) { 670 if ((NumLeft & 1) == 0) 671 NumLeft /= 2; // Split even in half 672 else 673 --NumLeft; // Split off last element 674 } else if (NumLeft == 0) 675 NumLeft = 1; 676 return std::make_pair(Chain.slice(0, NumLeft), Chain.slice(NumLeft)); 677 } 678 679 ArrayRef<Instruction *> 680 Vectorizer::getVectorizablePrefix(ArrayRef<Instruction *> Chain) { 681 // These are in BB order, unlike Chain, which is in address order. 682 SmallVector<Instruction *, 16> MemoryInstrs; 683 SmallVector<Instruction *, 16> ChainInstrs; 684 685 bool IsLoadChain = isa<LoadInst>(Chain[0]); 686 LLVM_DEBUG({ 687 for (Instruction *I : Chain) { 688 if (IsLoadChain) 689 assert(isa<LoadInst>(I) && 690 "All elements of Chain must be loads, or all must be stores."); 691 else 692 assert(isa<StoreInst>(I) && 693 "All elements of Chain must be loads, or all must be stores."); 694 } 695 }); 696 697 for (Instruction &I : make_range(getBoundaryInstrs(Chain))) { 698 if ((isa<LoadInst>(I) || isa<StoreInst>(I)) && is_contained(Chain, &I)) { 699 ChainInstrs.push_back(&I); 700 continue; 701 } 702 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) { 703 LLVM_DEBUG(dbgs() << "LSV: Found instruction may not transfer execution: " 704 << I << '\n'); 705 break; 706 } 707 if (I.mayReadOrWriteMemory()) 708 MemoryInstrs.push_back(&I); 709 } 710 711 // Loop until we find an instruction in ChainInstrs that we can't vectorize. 712 unsigned ChainInstrIdx = 0; 713 Instruction *BarrierMemoryInstr = nullptr; 714 715 for (unsigned E = ChainInstrs.size(); ChainInstrIdx < E; ++ChainInstrIdx) { 716 Instruction *ChainInstr = ChainInstrs[ChainInstrIdx]; 717 718 // If a barrier memory instruction was found, chain instructions that follow 719 // will not be added to the valid prefix. 720 if (BarrierMemoryInstr && BarrierMemoryInstr->comesBefore(ChainInstr)) 721 break; 722 723 // Check (in BB order) if any instruction prevents ChainInstr from being 724 // vectorized. Find and store the first such "conflicting" instruction. 725 for (Instruction *MemInstr : MemoryInstrs) { 726 // If a barrier memory instruction was found, do not check past it. 727 if (BarrierMemoryInstr && BarrierMemoryInstr->comesBefore(MemInstr)) 728 break; 729 730 auto *MemLoad = dyn_cast<LoadInst>(MemInstr); 731 auto *ChainLoad = dyn_cast<LoadInst>(ChainInstr); 732 if (MemLoad && ChainLoad) 733 continue; 734 735 // We can ignore the alias if the we have a load store pair and the load 736 // is known to be invariant. The load cannot be clobbered by the store. 737 auto IsInvariantLoad = [](const LoadInst *LI) -> bool { 738 return LI->hasMetadata(LLVMContext::MD_invariant_load); 739 }; 740 741 if (IsLoadChain) { 742 // We can ignore the alias as long as the load comes before the store, 743 // because that means we won't be moving the load past the store to 744 // vectorize it (the vectorized load is inserted at the location of the 745 // first load in the chain). 746 if (ChainInstr->comesBefore(MemInstr) || 747 (ChainLoad && IsInvariantLoad(ChainLoad))) 748 continue; 749 } else { 750 // Same case, but in reverse. 751 if (MemInstr->comesBefore(ChainInstr) || 752 (MemLoad && IsInvariantLoad(MemLoad))) 753 continue; 754 } 755 756 ModRefInfo MR = 757 AA.getModRefInfo(MemInstr, MemoryLocation::get(ChainInstr)); 758 if (IsLoadChain ? isModSet(MR) : isModOrRefSet(MR)) { 759 LLVM_DEBUG({ 760 dbgs() << "LSV: Found alias:\n" 761 " Aliasing instruction:\n" 762 << " " << *MemInstr << '\n' 763 << " Aliased instruction and pointer:\n" 764 << " " << *ChainInstr << '\n' 765 << " " << *getLoadStorePointerOperand(ChainInstr) << '\n'; 766 }); 767 // Save this aliasing memory instruction as a barrier, but allow other 768 // instructions that precede the barrier to be vectorized with this one. 769 BarrierMemoryInstr = MemInstr; 770 break; 771 } 772 } 773 // Continue the search only for store chains, since vectorizing stores that 774 // precede an aliasing load is valid. Conversely, vectorizing loads is valid 775 // up to an aliasing store, but should not pull loads from further down in 776 // the basic block. 777 if (IsLoadChain && BarrierMemoryInstr) { 778 // The BarrierMemoryInstr is a store that precedes ChainInstr. 779 assert(BarrierMemoryInstr->comesBefore(ChainInstr)); 780 break; 781 } 782 } 783 784 // Find the largest prefix of Chain whose elements are all in 785 // ChainInstrs[0, ChainInstrIdx). This is the largest vectorizable prefix of 786 // Chain. (Recall that Chain is in address order, but ChainInstrs is in BB 787 // order.) 788 SmallPtrSet<Instruction *, 8> VectorizableChainInstrs( 789 ChainInstrs.begin(), ChainInstrs.begin() + ChainInstrIdx); 790 unsigned ChainIdx = 0; 791 for (unsigned ChainLen = Chain.size(); ChainIdx < ChainLen; ++ChainIdx) { 792 if (!VectorizableChainInstrs.count(Chain[ChainIdx])) 793 break; 794 } 795 return Chain.slice(0, ChainIdx); 796 } 797 798 static ChainID getChainID(const Value *Ptr) { 799 const Value *ObjPtr = getUnderlyingObject(Ptr); 800 if (const auto *Sel = dyn_cast<SelectInst>(ObjPtr)) { 801 // The select's themselves are distinct instructions even if they share the 802 // same condition and evaluate to consecutive pointers for true and false 803 // values of the condition. Therefore using the select's themselves for 804 // grouping instructions would put consecutive accesses into different lists 805 // and they won't be even checked for being consecutive, and won't be 806 // vectorized. 807 return Sel->getCondition(); 808 } 809 return ObjPtr; 810 } 811 812 std::pair<InstrListMap, InstrListMap> 813 Vectorizer::collectInstructions(BasicBlock *BB) { 814 InstrListMap LoadRefs; 815 InstrListMap StoreRefs; 816 817 for (Instruction &I : *BB) { 818 if (!I.mayReadOrWriteMemory()) 819 continue; 820 821 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) { 822 if (!LI->isSimple()) 823 continue; 824 825 // Skip if it's not legal. 826 if (!TTI.isLegalToVectorizeLoad(LI)) 827 continue; 828 829 Type *Ty = LI->getType(); 830 if (!VectorType::isValidElementType(Ty->getScalarType())) 831 continue; 832 833 // Skip weird non-byte sizes. They probably aren't worth the effort of 834 // handling correctly. 835 unsigned TySize = DL.getTypeSizeInBits(Ty); 836 if ((TySize % 8) != 0) 837 continue; 838 839 // Skip vectors of pointers. The vectorizeLoadChain/vectorizeStoreChain 840 // functions are currently using an integer type for the vectorized 841 // load/store, and does not support casting between the integer type and a 842 // vector of pointers (e.g. i64 to <2 x i16*>) 843 if (Ty->isVectorTy() && Ty->isPtrOrPtrVectorTy()) 844 continue; 845 846 Value *Ptr = LI->getPointerOperand(); 847 unsigned AS = Ptr->getType()->getPointerAddressSpace(); 848 unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); 849 850 unsigned VF = VecRegSize / TySize; 851 VectorType *VecTy = dyn_cast<VectorType>(Ty); 852 853 // No point in looking at these if they're too big to vectorize. 854 if (TySize > VecRegSize / 2 || 855 (VecTy && TTI.getLoadVectorFactor(VF, TySize, TySize / 8, VecTy) == 0)) 856 continue; 857 858 // Save the load locations. 859 const ChainID ID = getChainID(Ptr); 860 LoadRefs[ID].push_back(LI); 861 } else if (StoreInst *SI = dyn_cast<StoreInst>(&I)) { 862 if (!SI->isSimple()) 863 continue; 864 865 // Skip if it's not legal. 866 if (!TTI.isLegalToVectorizeStore(SI)) 867 continue; 868 869 Type *Ty = SI->getValueOperand()->getType(); 870 if (!VectorType::isValidElementType(Ty->getScalarType())) 871 continue; 872 873 // Skip vectors of pointers. The vectorizeLoadChain/vectorizeStoreChain 874 // functions are currently using an integer type for the vectorized 875 // load/store, and does not support casting between the integer type and a 876 // vector of pointers (e.g. i64 to <2 x i16*>) 877 if (Ty->isVectorTy() && Ty->isPtrOrPtrVectorTy()) 878 continue; 879 880 // Skip weird non-byte sizes. They probably aren't worth the effort of 881 // handling correctly. 882 unsigned TySize = DL.getTypeSizeInBits(Ty); 883 if ((TySize % 8) != 0) 884 continue; 885 886 Value *Ptr = SI->getPointerOperand(); 887 unsigned AS = Ptr->getType()->getPointerAddressSpace(); 888 unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); 889 890 unsigned VF = VecRegSize / TySize; 891 VectorType *VecTy = dyn_cast<VectorType>(Ty); 892 893 // No point in looking at these if they're too big to vectorize. 894 if (TySize > VecRegSize / 2 || 895 (VecTy && TTI.getStoreVectorFactor(VF, TySize, TySize / 8, VecTy) == 0)) 896 continue; 897 898 // Save store location. 899 const ChainID ID = getChainID(Ptr); 900 StoreRefs[ID].push_back(SI); 901 } 902 } 903 904 return {LoadRefs, StoreRefs}; 905 } 906 907 bool Vectorizer::vectorizeChains(InstrListMap &Map) { 908 bool Changed = false; 909 910 for (const std::pair<ChainID, InstrList> &Chain : Map) { 911 unsigned Size = Chain.second.size(); 912 if (Size < 2) 913 continue; 914 915 LLVM_DEBUG(dbgs() << "LSV: Analyzing a chain of length " << Size << ".\n"); 916 917 // Process the stores in chunks of 64. 918 for (unsigned CI = 0, CE = Size; CI < CE; CI += 64) { 919 unsigned Len = std::min<unsigned>(CE - CI, 64); 920 ArrayRef<Instruction *> Chunk(&Chain.second[CI], Len); 921 Changed |= vectorizeInstructions(Chunk); 922 } 923 } 924 925 return Changed; 926 } 927 928 bool Vectorizer::vectorizeInstructions(ArrayRef<Instruction *> Instrs) { 929 LLVM_DEBUG(dbgs() << "LSV: Vectorizing " << Instrs.size() 930 << " instructions.\n"); 931 SmallVector<int, 16> Heads, Tails; 932 int ConsecutiveChain[64]; 933 934 // Do a quadratic search on all of the given loads/stores and find all of the 935 // pairs of loads/stores that follow each other. 936 for (int i = 0, e = Instrs.size(); i < e; ++i) { 937 ConsecutiveChain[i] = -1; 938 for (int j = e - 1; j >= 0; --j) { 939 if (i == j) 940 continue; 941 942 if (isConsecutiveAccess(Instrs[i], Instrs[j])) { 943 if (ConsecutiveChain[i] != -1) { 944 int CurDistance = std::abs(ConsecutiveChain[i] - i); 945 int NewDistance = std::abs(ConsecutiveChain[i] - j); 946 if (j < i || NewDistance > CurDistance) 947 continue; // Should not insert. 948 } 949 950 Tails.push_back(j); 951 Heads.push_back(i); 952 ConsecutiveChain[i] = j; 953 } 954 } 955 } 956 957 bool Changed = false; 958 SmallPtrSet<Instruction *, 16> InstructionsProcessed; 959 960 for (int Head : Heads) { 961 if (InstructionsProcessed.count(Instrs[Head])) 962 continue; 963 bool LongerChainExists = false; 964 for (unsigned TIt = 0; TIt < Tails.size(); TIt++) 965 if (Head == Tails[TIt] && 966 !InstructionsProcessed.count(Instrs[Heads[TIt]])) { 967 LongerChainExists = true; 968 break; 969 } 970 if (LongerChainExists) 971 continue; 972 973 // We found an instr that starts a chain. Now follow the chain and try to 974 // vectorize it. 975 SmallVector<Instruction *, 16> Operands; 976 int I = Head; 977 while (I != -1 && (is_contained(Tails, I) || is_contained(Heads, I))) { 978 if (InstructionsProcessed.count(Instrs[I])) 979 break; 980 981 Operands.push_back(Instrs[I]); 982 I = ConsecutiveChain[I]; 983 } 984 985 bool Vectorized = false; 986 if (isa<LoadInst>(*Operands.begin())) 987 Vectorized = vectorizeLoadChain(Operands, &InstructionsProcessed); 988 else 989 Vectorized = vectorizeStoreChain(Operands, &InstructionsProcessed); 990 991 Changed |= Vectorized; 992 } 993 994 return Changed; 995 } 996 997 bool Vectorizer::vectorizeStoreChain( 998 ArrayRef<Instruction *> Chain, 999 SmallPtrSet<Instruction *, 16> *InstructionsProcessed) { 1000 StoreInst *S0 = cast<StoreInst>(Chain[0]); 1001 1002 // If the vector has an int element, default to int for the whole store. 1003 Type *StoreTy = nullptr; 1004 for (Instruction *I : Chain) { 1005 StoreTy = cast<StoreInst>(I)->getValueOperand()->getType(); 1006 if (StoreTy->isIntOrIntVectorTy()) 1007 break; 1008 1009 if (StoreTy->isPtrOrPtrVectorTy()) { 1010 StoreTy = Type::getIntNTy(F.getParent()->getContext(), 1011 DL.getTypeSizeInBits(StoreTy)); 1012 break; 1013 } 1014 } 1015 assert(StoreTy && "Failed to find store type"); 1016 1017 unsigned Sz = DL.getTypeSizeInBits(StoreTy); 1018 unsigned AS = S0->getPointerAddressSpace(); 1019 unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); 1020 unsigned VF = VecRegSize / Sz; 1021 unsigned ChainSize = Chain.size(); 1022 Align Alignment = S0->getAlign(); 1023 1024 if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) { 1025 InstructionsProcessed->insert(Chain.begin(), Chain.end()); 1026 return false; 1027 } 1028 1029 ArrayRef<Instruction *> NewChain = getVectorizablePrefix(Chain); 1030 if (NewChain.empty()) { 1031 // No vectorization possible. 1032 InstructionsProcessed->insert(Chain.begin(), Chain.end()); 1033 return false; 1034 } 1035 if (NewChain.size() == 1) { 1036 // Failed after the first instruction. Discard it and try the smaller chain. 1037 InstructionsProcessed->insert(NewChain.front()); 1038 return false; 1039 } 1040 1041 // Update Chain to the valid vectorizable subchain. 1042 Chain = NewChain; 1043 ChainSize = Chain.size(); 1044 1045 // Check if it's legal to vectorize this chain. If not, split the chain and 1046 // try again. 1047 unsigned EltSzInBytes = Sz / 8; 1048 unsigned SzInBytes = EltSzInBytes * ChainSize; 1049 1050 FixedVectorType *VecTy; 1051 auto *VecStoreTy = dyn_cast<FixedVectorType>(StoreTy); 1052 if (VecStoreTy) 1053 VecTy = FixedVectorType::get(StoreTy->getScalarType(), 1054 Chain.size() * VecStoreTy->getNumElements()); 1055 else 1056 VecTy = FixedVectorType::get(StoreTy, Chain.size()); 1057 1058 // If it's more than the max vector size or the target has a better 1059 // vector factor, break it into two pieces. 1060 unsigned TargetVF = TTI.getStoreVectorFactor(VF, Sz, SzInBytes, VecTy); 1061 if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) { 1062 LLVM_DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor." 1063 " Creating two separate arrays.\n"); 1064 bool Vectorized = false; 1065 Vectorized |= 1066 vectorizeStoreChain(Chain.slice(0, TargetVF), InstructionsProcessed); 1067 Vectorized |= 1068 vectorizeStoreChain(Chain.slice(TargetVF), InstructionsProcessed); 1069 return Vectorized; 1070 } 1071 1072 LLVM_DEBUG({ 1073 dbgs() << "LSV: Stores to vectorize:\n"; 1074 for (Instruction *I : Chain) 1075 dbgs() << " " << *I << "\n"; 1076 }); 1077 1078 // We won't try again to vectorize the elements of the chain, regardless of 1079 // whether we succeed below. 1080 InstructionsProcessed->insert(Chain.begin(), Chain.end()); 1081 1082 // If the store is going to be misaligned, don't vectorize it. 1083 if (accessIsMisaligned(SzInBytes, AS, Alignment)) { 1084 if (S0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) { 1085 auto Chains = splitOddVectorElts(Chain, Sz); 1086 bool Vectorized = false; 1087 Vectorized |= vectorizeStoreChain(Chains.first, InstructionsProcessed); 1088 Vectorized |= vectorizeStoreChain(Chains.second, InstructionsProcessed); 1089 return Vectorized; 1090 } 1091 1092 Align NewAlign = getOrEnforceKnownAlignment(S0->getPointerOperand(), 1093 Align(StackAdjustedAlignment), 1094 DL, S0, nullptr, &DT); 1095 if (NewAlign >= Alignment) 1096 Alignment = NewAlign; 1097 else 1098 return false; 1099 } 1100 1101 if (!TTI.isLegalToVectorizeStoreChain(SzInBytes, Alignment, AS)) { 1102 auto Chains = splitOddVectorElts(Chain, Sz); 1103 bool Vectorized = false; 1104 Vectorized |= vectorizeStoreChain(Chains.first, InstructionsProcessed); 1105 Vectorized |= vectorizeStoreChain(Chains.second, InstructionsProcessed); 1106 return Vectorized; 1107 } 1108 1109 BasicBlock::iterator First, Last; 1110 std::tie(First, Last) = getBoundaryInstrs(Chain); 1111 Builder.SetInsertPoint(&*Last); 1112 1113 Value *Vec = PoisonValue::get(VecTy); 1114 1115 if (VecStoreTy) { 1116 unsigned VecWidth = VecStoreTy->getNumElements(); 1117 for (unsigned I = 0, E = Chain.size(); I != E; ++I) { 1118 StoreInst *Store = cast<StoreInst>(Chain[I]); 1119 for (unsigned J = 0, NE = VecStoreTy->getNumElements(); J != NE; ++J) { 1120 unsigned NewIdx = J + I * VecWidth; 1121 Value *Extract = Builder.CreateExtractElement(Store->getValueOperand(), 1122 Builder.getInt32(J)); 1123 if (Extract->getType() != StoreTy->getScalarType()) 1124 Extract = Builder.CreateBitCast(Extract, StoreTy->getScalarType()); 1125 1126 Value *Insert = 1127 Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(NewIdx)); 1128 Vec = Insert; 1129 } 1130 } 1131 } else { 1132 for (unsigned I = 0, E = Chain.size(); I != E; ++I) { 1133 StoreInst *Store = cast<StoreInst>(Chain[I]); 1134 Value *Extract = Store->getValueOperand(); 1135 if (Extract->getType() != StoreTy->getScalarType()) 1136 Extract = 1137 Builder.CreateBitOrPointerCast(Extract, StoreTy->getScalarType()); 1138 1139 Value *Insert = 1140 Builder.CreateInsertElement(Vec, Extract, Builder.getInt32(I)); 1141 Vec = Insert; 1142 } 1143 } 1144 1145 StoreInst *SI = Builder.CreateAlignedStore( 1146 Vec, 1147 Builder.CreateBitCast(S0->getPointerOperand(), VecTy->getPointerTo(AS)), 1148 Alignment); 1149 propagateMetadata(SI, Chain); 1150 1151 eraseInstructions(Chain); 1152 ++NumVectorInstructions; 1153 NumScalarsVectorized += Chain.size(); 1154 return true; 1155 } 1156 1157 bool Vectorizer::vectorizeLoadChain( 1158 ArrayRef<Instruction *> Chain, 1159 SmallPtrSet<Instruction *, 16> *InstructionsProcessed) { 1160 LoadInst *L0 = cast<LoadInst>(Chain[0]); 1161 1162 // If the vector has an int element, default to int for the whole load. 1163 Type *LoadTy = nullptr; 1164 for (const auto &V : Chain) { 1165 LoadTy = cast<LoadInst>(V)->getType(); 1166 if (LoadTy->isIntOrIntVectorTy()) 1167 break; 1168 1169 if (LoadTy->isPtrOrPtrVectorTy()) { 1170 LoadTy = Type::getIntNTy(F.getParent()->getContext(), 1171 DL.getTypeSizeInBits(LoadTy)); 1172 break; 1173 } 1174 } 1175 assert(LoadTy && "Can't determine LoadInst type from chain"); 1176 1177 unsigned Sz = DL.getTypeSizeInBits(LoadTy); 1178 unsigned AS = L0->getPointerAddressSpace(); 1179 unsigned VecRegSize = TTI.getLoadStoreVecRegBitWidth(AS); 1180 unsigned VF = VecRegSize / Sz; 1181 unsigned ChainSize = Chain.size(); 1182 Align Alignment = L0->getAlign(); 1183 1184 if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) { 1185 InstructionsProcessed->insert(Chain.begin(), Chain.end()); 1186 return false; 1187 } 1188 1189 ArrayRef<Instruction *> NewChain = getVectorizablePrefix(Chain); 1190 if (NewChain.empty()) { 1191 // No vectorization possible. 1192 InstructionsProcessed->insert(Chain.begin(), Chain.end()); 1193 return false; 1194 } 1195 if (NewChain.size() == 1) { 1196 // Failed after the first instruction. Discard it and try the smaller chain. 1197 InstructionsProcessed->insert(NewChain.front()); 1198 return false; 1199 } 1200 1201 // Update Chain to the valid vectorizable subchain. 1202 Chain = NewChain; 1203 ChainSize = Chain.size(); 1204 1205 // Check if it's legal to vectorize this chain. If not, split the chain and 1206 // try again. 1207 unsigned EltSzInBytes = Sz / 8; 1208 unsigned SzInBytes = EltSzInBytes * ChainSize; 1209 VectorType *VecTy; 1210 auto *VecLoadTy = dyn_cast<FixedVectorType>(LoadTy); 1211 if (VecLoadTy) 1212 VecTy = FixedVectorType::get(LoadTy->getScalarType(), 1213 Chain.size() * VecLoadTy->getNumElements()); 1214 else 1215 VecTy = FixedVectorType::get(LoadTy, Chain.size()); 1216 1217 // If it's more than the max vector size or the target has a better 1218 // vector factor, break it into two pieces. 1219 unsigned TargetVF = TTI.getLoadVectorFactor(VF, Sz, SzInBytes, VecTy); 1220 if (ChainSize > VF || (VF != TargetVF && TargetVF < ChainSize)) { 1221 LLVM_DEBUG(dbgs() << "LSV: Chain doesn't match with the vector factor." 1222 " Creating two separate arrays.\n"); 1223 bool Vectorized = false; 1224 Vectorized |= 1225 vectorizeLoadChain(Chain.slice(0, TargetVF), InstructionsProcessed); 1226 Vectorized |= 1227 vectorizeLoadChain(Chain.slice(TargetVF), InstructionsProcessed); 1228 return Vectorized; 1229 } 1230 1231 // We won't try again to vectorize the elements of the chain, regardless of 1232 // whether we succeed below. 1233 InstructionsProcessed->insert(Chain.begin(), Chain.end()); 1234 1235 // If the load is going to be misaligned, don't vectorize it. 1236 if (accessIsMisaligned(SzInBytes, AS, Alignment)) { 1237 if (L0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) { 1238 auto Chains = splitOddVectorElts(Chain, Sz); 1239 bool Vectorized = false; 1240 Vectorized |= vectorizeLoadChain(Chains.first, InstructionsProcessed); 1241 Vectorized |= vectorizeLoadChain(Chains.second, InstructionsProcessed); 1242 return Vectorized; 1243 } 1244 1245 Align NewAlign = getOrEnforceKnownAlignment(L0->getPointerOperand(), 1246 Align(StackAdjustedAlignment), 1247 DL, L0, nullptr, &DT); 1248 if (NewAlign >= Alignment) 1249 Alignment = NewAlign; 1250 else 1251 return false; 1252 } 1253 1254 if (!TTI.isLegalToVectorizeLoadChain(SzInBytes, Alignment, AS)) { 1255 auto Chains = splitOddVectorElts(Chain, Sz); 1256 bool Vectorized = false; 1257 Vectorized |= vectorizeLoadChain(Chains.first, InstructionsProcessed); 1258 Vectorized |= vectorizeLoadChain(Chains.second, InstructionsProcessed); 1259 return Vectorized; 1260 } 1261 1262 LLVM_DEBUG({ 1263 dbgs() << "LSV: Loads to vectorize:\n"; 1264 for (Instruction *I : Chain) 1265 I->dump(); 1266 }); 1267 1268 // getVectorizablePrefix already computed getBoundaryInstrs. The value of 1269 // Last may have changed since then, but the value of First won't have. If it 1270 // matters, we could compute getBoundaryInstrs only once and reuse it here. 1271 BasicBlock::iterator First, Last; 1272 std::tie(First, Last) = getBoundaryInstrs(Chain); 1273 Builder.SetInsertPoint(&*First); 1274 1275 Value *Bitcast = 1276 Builder.CreateBitCast(L0->getPointerOperand(), VecTy->getPointerTo(AS)); 1277 LoadInst *LI = 1278 Builder.CreateAlignedLoad(VecTy, Bitcast, MaybeAlign(Alignment)); 1279 propagateMetadata(LI, Chain); 1280 1281 for (unsigned I = 0, E = Chain.size(); I != E; ++I) { 1282 Value *CV = Chain[I]; 1283 Value *V; 1284 if (VecLoadTy) { 1285 // Extract a subvector using shufflevector. 1286 unsigned VecWidth = VecLoadTy->getNumElements(); 1287 auto Mask = 1288 llvm::to_vector<8>(llvm::seq<int>(I * VecWidth, (I + 1) * VecWidth)); 1289 V = Builder.CreateShuffleVector(LI, Mask, CV->getName()); 1290 } else { 1291 V = Builder.CreateExtractElement(LI, Builder.getInt32(I), CV->getName()); 1292 } 1293 1294 if (V->getType() != CV->getType()) { 1295 V = Builder.CreateBitOrPointerCast(V, CV->getType()); 1296 } 1297 1298 // Replace the old instruction. 1299 CV->replaceAllUsesWith(V); 1300 } 1301 1302 // Bitcast might not be an Instruction, if the value being loaded is a 1303 // constant. In that case, no need to reorder anything. 1304 if (Instruction *BitcastInst = dyn_cast<Instruction>(Bitcast)) 1305 reorder(BitcastInst); 1306 1307 eraseInstructions(Chain); 1308 1309 ++NumVectorInstructions; 1310 NumScalarsVectorized += Chain.size(); 1311 return true; 1312 } 1313 1314 bool Vectorizer::accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace, 1315 Align Alignment) { 1316 if (Alignment.value() % SzInBytes == 0) 1317 return false; 1318 1319 bool Fast = false; 1320 bool Allows = TTI.allowsMisalignedMemoryAccesses(F.getParent()->getContext(), 1321 SzInBytes * 8, AddressSpace, 1322 Alignment, &Fast); 1323 LLVM_DEBUG(dbgs() << "LSV: Target said misaligned is allowed? " << Allows 1324 << " and fast? " << Fast << "\n";); 1325 return !Allows || !Fast; 1326 } 1327