1 //===- LoopIdiomRecognize.cpp - Loop idiom recognition --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements an idiom recognizer that transforms simple loops into a 10 // non-loop form. In cases that this kicks in, it can be a significant 11 // performance win. 12 // 13 // If compiling for code size we avoid idiom recognition if the resulting 14 // code could be larger than the code for the original loop. One way this could 15 // happen is if the loop is not removable after idiom recognition due to the 16 // presence of non-idiom instructions. The initial implementation of the 17 // heuristics applies to idioms in multi-block loops. 18 // 19 //===----------------------------------------------------------------------===// 20 // 21 // TODO List: 22 // 23 // Future loop memory idioms to recognize: 24 // memcmp, memmove, strlen, etc. 25 // Future floating point idioms to recognize in -ffast-math mode: 26 // fpowi 27 // Future integer operation idioms to recognize: 28 // ctpop 29 // 30 // Beware that isel's default lowering for ctpop is highly inefficient for 31 // i64 and larger types when i64 is legal and the value has few bits set. It 32 // would be good to enhance isel to emit a loop for ctpop in this case. 33 // 34 // This could recognize common matrix multiplies and dot product idioms and 35 // replace them with calls to BLAS (if linked in??). 36 // 37 //===----------------------------------------------------------------------===// 38 39 #include "llvm/Transforms/Scalar/LoopIdiomRecognize.h" 40 #include "llvm/ADT/APInt.h" 41 #include "llvm/ADT/ArrayRef.h" 42 #include "llvm/ADT/DenseMap.h" 43 #include "llvm/ADT/MapVector.h" 44 #include "llvm/ADT/SetVector.h" 45 #include "llvm/ADT/SmallPtrSet.h" 46 #include "llvm/ADT/SmallVector.h" 47 #include "llvm/ADT/Statistic.h" 48 #include "llvm/ADT/StringRef.h" 49 #include "llvm/Analysis/AliasAnalysis.h" 50 #include "llvm/Analysis/LoopAccessAnalysis.h" 51 #include "llvm/Analysis/LoopInfo.h" 52 #include "llvm/Analysis/LoopPass.h" 53 #include "llvm/Analysis/MemoryLocation.h" 54 #include "llvm/Analysis/MemorySSA.h" 55 #include "llvm/Analysis/MemorySSAUpdater.h" 56 #include "llvm/Analysis/MustExecute.h" 57 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 58 #include "llvm/Analysis/ScalarEvolution.h" 59 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 60 #include "llvm/Analysis/TargetLibraryInfo.h" 61 #include "llvm/Analysis/TargetTransformInfo.h" 62 #include "llvm/Analysis/ValueTracking.h" 63 #include "llvm/IR/Attributes.h" 64 #include "llvm/IR/BasicBlock.h" 65 #include "llvm/IR/Constant.h" 66 #include "llvm/IR/Constants.h" 67 #include "llvm/IR/DataLayout.h" 68 #include "llvm/IR/DebugLoc.h" 69 #include "llvm/IR/DerivedTypes.h" 70 #include "llvm/IR/Dominators.h" 71 #include "llvm/IR/GlobalValue.h" 72 #include "llvm/IR/GlobalVariable.h" 73 #include "llvm/IR/IRBuilder.h" 74 #include "llvm/IR/InstrTypes.h" 75 #include "llvm/IR/Instruction.h" 76 #include "llvm/IR/Instructions.h" 77 #include "llvm/IR/IntrinsicInst.h" 78 #include "llvm/IR/Intrinsics.h" 79 #include "llvm/IR/LLVMContext.h" 80 #include "llvm/IR/Module.h" 81 #include "llvm/IR/PassManager.h" 82 #include "llvm/IR/Type.h" 83 #include "llvm/IR/User.h" 84 #include "llvm/IR/Value.h" 85 #include "llvm/IR/ValueHandle.h" 86 #include "llvm/InitializePasses.h" 87 #include "llvm/Pass.h" 88 #include "llvm/Support/Casting.h" 89 #include "llvm/Support/CommandLine.h" 90 #include "llvm/Support/Debug.h" 91 #include "llvm/Support/raw_ostream.h" 92 #include "llvm/Transforms/Scalar.h" 93 #include "llvm/Transforms/Utils/BuildLibCalls.h" 94 #include "llvm/Transforms/Utils/Local.h" 95 #include "llvm/Transforms/Utils/LoopUtils.h" 96 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 97 #include <algorithm> 98 #include <cassert> 99 #include <cstdint> 100 #include <utility> 101 #include <vector> 102 103 using namespace llvm; 104 105 #define DEBUG_TYPE "loop-idiom" 106 107 STATISTIC(NumMemSet, "Number of memset's formed from loop stores"); 108 STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores"); 109 110 static cl::opt<bool> UseLIRCodeSizeHeurs( 111 "use-lir-code-size-heurs", 112 cl::desc("Use loop idiom recognition code size heuristics when compiling" 113 "with -Os/-Oz"), 114 cl::init(true), cl::Hidden); 115 116 namespace { 117 118 class LoopIdiomRecognize { 119 Loop *CurLoop = nullptr; 120 AliasAnalysis *AA; 121 DominatorTree *DT; 122 LoopInfo *LI; 123 ScalarEvolution *SE; 124 TargetLibraryInfo *TLI; 125 const TargetTransformInfo *TTI; 126 const DataLayout *DL; 127 OptimizationRemarkEmitter &ORE; 128 bool ApplyCodeSizeHeuristics; 129 std::unique_ptr<MemorySSAUpdater> MSSAU; 130 131 public: 132 explicit LoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT, 133 LoopInfo *LI, ScalarEvolution *SE, 134 TargetLibraryInfo *TLI, 135 const TargetTransformInfo *TTI, MemorySSA *MSSA, 136 const DataLayout *DL, 137 OptimizationRemarkEmitter &ORE) 138 : AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), DL(DL), ORE(ORE) { 139 if (MSSA) 140 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA); 141 } 142 143 bool runOnLoop(Loop *L); 144 145 private: 146 using StoreList = SmallVector<StoreInst *, 8>; 147 using StoreListMap = MapVector<Value *, StoreList>; 148 149 StoreListMap StoreRefsForMemset; 150 StoreListMap StoreRefsForMemsetPattern; 151 StoreList StoreRefsForMemcpy; 152 bool HasMemset; 153 bool HasMemsetPattern; 154 bool HasMemcpy; 155 156 /// Return code for isLegalStore() 157 enum LegalStoreKind { 158 None = 0, 159 Memset, 160 MemsetPattern, 161 Memcpy, 162 UnorderedAtomicMemcpy, 163 DontUse // Dummy retval never to be used. Allows catching errors in retval 164 // handling. 165 }; 166 167 /// \name Countable Loop Idiom Handling 168 /// @{ 169 170 bool runOnCountableLoop(); 171 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount, 172 SmallVectorImpl<BasicBlock *> &ExitBlocks); 173 174 void collectStores(BasicBlock *BB); 175 LegalStoreKind isLegalStore(StoreInst *SI); 176 enum class ForMemset { No, Yes }; 177 bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount, 178 ForMemset For); 179 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount); 180 181 bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize, 182 MaybeAlign StoreAlignment, Value *StoredVal, 183 Instruction *TheStore, 184 SmallPtrSetImpl<Instruction *> &Stores, 185 const SCEVAddRecExpr *Ev, const SCEV *BECount, 186 bool NegStride, bool IsLoopMemset = false); 187 bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount); 188 bool avoidLIRForMultiBlockLoop(bool IsMemset = false, 189 bool IsLoopMemset = false); 190 191 /// @} 192 /// \name Noncountable Loop Idiom Handling 193 /// @{ 194 195 bool runOnNoncountableLoop(); 196 197 bool recognizePopcount(); 198 void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst, 199 PHINode *CntPhi, Value *Var); 200 bool recognizeAndInsertFFS(); /// Find First Set: ctlz or cttz 201 void transformLoopToCountable(Intrinsic::ID IntrinID, BasicBlock *PreCondBB, 202 Instruction *CntInst, PHINode *CntPhi, 203 Value *Var, Instruction *DefX, 204 const DebugLoc &DL, bool ZeroCheck, 205 bool IsCntPhiUsedOutsideLoop); 206 207 /// @} 208 }; 209 210 class LoopIdiomRecognizeLegacyPass : public LoopPass { 211 public: 212 static char ID; 213 214 explicit LoopIdiomRecognizeLegacyPass() : LoopPass(ID) { 215 initializeLoopIdiomRecognizeLegacyPassPass( 216 *PassRegistry::getPassRegistry()); 217 } 218 219 bool runOnLoop(Loop *L, LPPassManager &LPM) override { 220 if (skipLoop(L)) 221 return false; 222 223 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 224 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 225 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 226 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 227 TargetLibraryInfo *TLI = 228 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI( 229 *L->getHeader()->getParent()); 230 const TargetTransformInfo *TTI = 231 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI( 232 *L->getHeader()->getParent()); 233 const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout(); 234 auto *MSSAAnalysis = getAnalysisIfAvailable<MemorySSAWrapperPass>(); 235 MemorySSA *MSSA = nullptr; 236 if (MSSAAnalysis) 237 MSSA = &MSSAAnalysis->getMSSA(); 238 239 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis 240 // pass. Function analyses need to be preserved across loop transformations 241 // but ORE cannot be preserved (see comment before the pass definition). 242 OptimizationRemarkEmitter ORE(L->getHeader()->getParent()); 243 244 LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, MSSA, DL, ORE); 245 return LIR.runOnLoop(L); 246 } 247 248 /// This transformation requires natural loop information & requires that 249 /// loop preheaders be inserted into the CFG. 250 void getAnalysisUsage(AnalysisUsage &AU) const override { 251 AU.addRequired<TargetLibraryInfoWrapperPass>(); 252 AU.addRequired<TargetTransformInfoWrapperPass>(); 253 AU.addPreserved<MemorySSAWrapperPass>(); 254 getLoopAnalysisUsage(AU); 255 } 256 }; 257 258 } // end anonymous namespace 259 260 char LoopIdiomRecognizeLegacyPass::ID = 0; 261 262 PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM, 263 LoopStandardAnalysisResults &AR, 264 LPMUpdater &) { 265 const auto *DL = &L.getHeader()->getModule()->getDataLayout(); 266 267 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis 268 // pass. Function analyses need to be preserved across loop transformations 269 // but ORE cannot be preserved (see comment before the pass definition). 270 OptimizationRemarkEmitter ORE(L.getHeader()->getParent()); 271 272 LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI, 273 AR.MSSA, DL, ORE); 274 if (!LIR.runOnLoop(&L)) 275 return PreservedAnalyses::all(); 276 277 auto PA = getLoopPassPreservedAnalyses(); 278 if (AR.MSSA) 279 PA.preserve<MemorySSAAnalysis>(); 280 return PA; 281 } 282 283 INITIALIZE_PASS_BEGIN(LoopIdiomRecognizeLegacyPass, "loop-idiom", 284 "Recognize loop idioms", false, false) 285 INITIALIZE_PASS_DEPENDENCY(LoopPass) 286 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 287 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 288 INITIALIZE_PASS_END(LoopIdiomRecognizeLegacyPass, "loop-idiom", 289 "Recognize loop idioms", false, false) 290 291 Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognizeLegacyPass(); } 292 293 static void deleteDeadInstruction(Instruction *I) { 294 I->replaceAllUsesWith(UndefValue::get(I->getType())); 295 I->eraseFromParent(); 296 } 297 298 //===----------------------------------------------------------------------===// 299 // 300 // Implementation of LoopIdiomRecognize 301 // 302 //===----------------------------------------------------------------------===// 303 304 bool LoopIdiomRecognize::runOnLoop(Loop *L) { 305 CurLoop = L; 306 // If the loop could not be converted to canonical form, it must have an 307 // indirectbr in it, just give up. 308 if (!L->getLoopPreheader()) 309 return false; 310 311 // Disable loop idiom recognition if the function's name is a common idiom. 312 StringRef Name = L->getHeader()->getParent()->getName(); 313 if (Name == "memset" || Name == "memcpy") 314 return false; 315 316 // Determine if code size heuristics need to be applied. 317 ApplyCodeSizeHeuristics = 318 L->getHeader()->getParent()->hasOptSize() && UseLIRCodeSizeHeurs; 319 320 HasMemset = TLI->has(LibFunc_memset); 321 HasMemsetPattern = TLI->has(LibFunc_memset_pattern16); 322 HasMemcpy = TLI->has(LibFunc_memcpy); 323 324 if (HasMemset || HasMemsetPattern || HasMemcpy) 325 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 326 return runOnCountableLoop(); 327 328 return runOnNoncountableLoop(); 329 } 330 331 bool LoopIdiomRecognize::runOnCountableLoop() { 332 const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop); 333 assert(!isa<SCEVCouldNotCompute>(BECount) && 334 "runOnCountableLoop() called on a loop without a predictable" 335 "backedge-taken count"); 336 337 // If this loop executes exactly one time, then it should be peeled, not 338 // optimized by this pass. 339 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) 340 if (BECst->getAPInt() == 0) 341 return false; 342 343 SmallVector<BasicBlock *, 8> ExitBlocks; 344 CurLoop->getUniqueExitBlocks(ExitBlocks); 345 346 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Scanning: F[" 347 << CurLoop->getHeader()->getParent()->getName() 348 << "] Countable Loop %" << CurLoop->getHeader()->getName() 349 << "\n"); 350 351 // The following transforms hoist stores/memsets into the loop pre-header. 352 // Give up if the loop has instructions that may throw. 353 SimpleLoopSafetyInfo SafetyInfo; 354 SafetyInfo.computeLoopSafetyInfo(CurLoop); 355 if (SafetyInfo.anyBlockMayThrow()) 356 return false; 357 358 bool MadeChange = false; 359 360 // Scan all the blocks in the loop that are not in subloops. 361 for (auto *BB : CurLoop->getBlocks()) { 362 // Ignore blocks in subloops. 363 if (LI->getLoopFor(BB) != CurLoop) 364 continue; 365 366 MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks); 367 } 368 return MadeChange; 369 } 370 371 static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) { 372 const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1)); 373 return ConstStride->getAPInt(); 374 } 375 376 /// getMemSetPatternValue - If a strided store of the specified value is safe to 377 /// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should 378 /// be passed in. Otherwise, return null. 379 /// 380 /// Note that we don't ever attempt to use memset_pattern8 or 4, because these 381 /// just replicate their input array and then pass on to memset_pattern16. 382 static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) { 383 // FIXME: This could check for UndefValue because it can be merged into any 384 // other valid pattern. 385 386 // If the value isn't a constant, we can't promote it to being in a constant 387 // array. We could theoretically do a store to an alloca or something, but 388 // that doesn't seem worthwhile. 389 Constant *C = dyn_cast<Constant>(V); 390 if (!C) 391 return nullptr; 392 393 // Only handle simple values that are a power of two bytes in size. 394 uint64_t Size = DL->getTypeSizeInBits(V->getType()); 395 if (Size == 0 || (Size & 7) || (Size & (Size - 1))) 396 return nullptr; 397 398 // Don't care enough about darwin/ppc to implement this. 399 if (DL->isBigEndian()) 400 return nullptr; 401 402 // Convert to size in bytes. 403 Size /= 8; 404 405 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see 406 // if the top and bottom are the same (e.g. for vectors and large integers). 407 if (Size > 16) 408 return nullptr; 409 410 // If the constant is exactly 16 bytes, just use it. 411 if (Size == 16) 412 return C; 413 414 // Otherwise, we'll use an array of the constants. 415 unsigned ArraySize = 16 / Size; 416 ArrayType *AT = ArrayType::get(V->getType(), ArraySize); 417 return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C)); 418 } 419 420 LoopIdiomRecognize::LegalStoreKind 421 LoopIdiomRecognize::isLegalStore(StoreInst *SI) { 422 // Don't touch volatile stores. 423 if (SI->isVolatile()) 424 return LegalStoreKind::None; 425 // We only want simple or unordered-atomic stores. 426 if (!SI->isUnordered()) 427 return LegalStoreKind::None; 428 429 // Don't convert stores of non-integral pointer types to memsets (which stores 430 // integers). 431 if (DL->isNonIntegralPointerType(SI->getValueOperand()->getType())) 432 return LegalStoreKind::None; 433 434 // Avoid merging nontemporal stores. 435 if (SI->getMetadata(LLVMContext::MD_nontemporal)) 436 return LegalStoreKind::None; 437 438 Value *StoredVal = SI->getValueOperand(); 439 Value *StorePtr = SI->getPointerOperand(); 440 441 // Reject stores that are so large that they overflow an unsigned. 442 uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType()); 443 if ((SizeInBits & 7) || (SizeInBits >> 32) != 0) 444 return LegalStoreKind::None; 445 446 // See if the pointer expression is an AddRec like {base,+,1} on the current 447 // loop, which indicates a strided store. If we have something else, it's a 448 // random store we can't handle. 449 const SCEVAddRecExpr *StoreEv = 450 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 451 if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine()) 452 return LegalStoreKind::None; 453 454 // Check to see if we have a constant stride. 455 if (!isa<SCEVConstant>(StoreEv->getOperand(1))) 456 return LegalStoreKind::None; 457 458 // See if the store can be turned into a memset. 459 460 // If the stored value is a byte-wise value (like i32 -1), then it may be 461 // turned into a memset of i8 -1, assuming that all the consecutive bytes 462 // are stored. A store of i32 0x01020304 can never be turned into a memset, 463 // but it can be turned into memset_pattern if the target supports it. 464 Value *SplatValue = isBytewiseValue(StoredVal, *DL); 465 Constant *PatternValue = nullptr; 466 467 // Note: memset and memset_pattern on unordered-atomic is yet not supported 468 bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple(); 469 470 // If we're allowed to form a memset, and the stored value would be 471 // acceptable for memset, use it. 472 if (!UnorderedAtomic && HasMemset && SplatValue && 473 // Verify that the stored value is loop invariant. If not, we can't 474 // promote the memset. 475 CurLoop->isLoopInvariant(SplatValue)) { 476 // It looks like we can use SplatValue. 477 return LegalStoreKind::Memset; 478 } else if (!UnorderedAtomic && HasMemsetPattern && 479 // Don't create memset_pattern16s with address spaces. 480 StorePtr->getType()->getPointerAddressSpace() == 0 && 481 (PatternValue = getMemSetPatternValue(StoredVal, DL))) { 482 // It looks like we can use PatternValue! 483 return LegalStoreKind::MemsetPattern; 484 } 485 486 // Otherwise, see if the store can be turned into a memcpy. 487 if (HasMemcpy) { 488 // Check to see if the stride matches the size of the store. If so, then we 489 // know that every byte is touched in the loop. 490 APInt Stride = getStoreStride(StoreEv); 491 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType()); 492 if (StoreSize != Stride && StoreSize != -Stride) 493 return LegalStoreKind::None; 494 495 // The store must be feeding a non-volatile load. 496 LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand()); 497 498 // Only allow non-volatile loads 499 if (!LI || LI->isVolatile()) 500 return LegalStoreKind::None; 501 // Only allow simple or unordered-atomic loads 502 if (!LI->isUnordered()) 503 return LegalStoreKind::None; 504 505 // See if the pointer expression is an AddRec like {base,+,1} on the current 506 // loop, which indicates a strided load. If we have something else, it's a 507 // random load we can't handle. 508 const SCEVAddRecExpr *LoadEv = 509 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand())); 510 if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine()) 511 return LegalStoreKind::None; 512 513 // The store and load must share the same stride. 514 if (StoreEv->getOperand(1) != LoadEv->getOperand(1)) 515 return LegalStoreKind::None; 516 517 // Success. This store can be converted into a memcpy. 518 UnorderedAtomic = UnorderedAtomic || LI->isAtomic(); 519 return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy 520 : LegalStoreKind::Memcpy; 521 } 522 // This store can't be transformed into a memset/memcpy. 523 return LegalStoreKind::None; 524 } 525 526 void LoopIdiomRecognize::collectStores(BasicBlock *BB) { 527 StoreRefsForMemset.clear(); 528 StoreRefsForMemsetPattern.clear(); 529 StoreRefsForMemcpy.clear(); 530 for (Instruction &I : *BB) { 531 StoreInst *SI = dyn_cast<StoreInst>(&I); 532 if (!SI) 533 continue; 534 535 // Make sure this is a strided store with a constant stride. 536 switch (isLegalStore(SI)) { 537 case LegalStoreKind::None: 538 // Nothing to do 539 break; 540 case LegalStoreKind::Memset: { 541 // Find the base pointer. 542 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL); 543 StoreRefsForMemset[Ptr].push_back(SI); 544 } break; 545 case LegalStoreKind::MemsetPattern: { 546 // Find the base pointer. 547 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL); 548 StoreRefsForMemsetPattern[Ptr].push_back(SI); 549 } break; 550 case LegalStoreKind::Memcpy: 551 case LegalStoreKind::UnorderedAtomicMemcpy: 552 StoreRefsForMemcpy.push_back(SI); 553 break; 554 default: 555 assert(false && "unhandled return value"); 556 break; 557 } 558 } 559 } 560 561 /// runOnLoopBlock - Process the specified block, which lives in a counted loop 562 /// with the specified backedge count. This block is known to be in the current 563 /// loop and not in any subloops. 564 bool LoopIdiomRecognize::runOnLoopBlock( 565 BasicBlock *BB, const SCEV *BECount, 566 SmallVectorImpl<BasicBlock *> &ExitBlocks) { 567 // We can only promote stores in this block if they are unconditionally 568 // executed in the loop. For a block to be unconditionally executed, it has 569 // to dominate all the exit blocks of the loop. Verify this now. 570 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) 571 if (!DT->dominates(BB, ExitBlocks[i])) 572 return false; 573 574 bool MadeChange = false; 575 // Look for store instructions, which may be optimized to memset/memcpy. 576 collectStores(BB); 577 578 // Look for a single store or sets of stores with a common base, which can be 579 // optimized into a memset (memset_pattern). The latter most commonly happens 580 // with structs and handunrolled loops. 581 for (auto &SL : StoreRefsForMemset) 582 MadeChange |= processLoopStores(SL.second, BECount, ForMemset::Yes); 583 584 for (auto &SL : StoreRefsForMemsetPattern) 585 MadeChange |= processLoopStores(SL.second, BECount, ForMemset::No); 586 587 // Optimize the store into a memcpy, if it feeds an similarly strided load. 588 for (auto &SI : StoreRefsForMemcpy) 589 MadeChange |= processLoopStoreOfLoopLoad(SI, BECount); 590 591 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 592 Instruction *Inst = &*I++; 593 // Look for memset instructions, which may be optimized to a larger memset. 594 if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) { 595 WeakTrackingVH InstPtr(&*I); 596 if (!processLoopMemSet(MSI, BECount)) 597 continue; 598 MadeChange = true; 599 600 // If processing the memset invalidated our iterator, start over from the 601 // top of the block. 602 if (!InstPtr) 603 I = BB->begin(); 604 continue; 605 } 606 } 607 608 return MadeChange; 609 } 610 611 /// See if this store(s) can be promoted to a memset. 612 bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL, 613 const SCEV *BECount, ForMemset For) { 614 // Try to find consecutive stores that can be transformed into memsets. 615 SetVector<StoreInst *> Heads, Tails; 616 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; 617 618 // Do a quadratic search on all of the given stores and find 619 // all of the pairs of stores that follow each other. 620 SmallVector<unsigned, 16> IndexQueue; 621 for (unsigned i = 0, e = SL.size(); i < e; ++i) { 622 assert(SL[i]->isSimple() && "Expected only non-volatile stores."); 623 624 Value *FirstStoredVal = SL[i]->getValueOperand(); 625 Value *FirstStorePtr = SL[i]->getPointerOperand(); 626 const SCEVAddRecExpr *FirstStoreEv = 627 cast<SCEVAddRecExpr>(SE->getSCEV(FirstStorePtr)); 628 APInt FirstStride = getStoreStride(FirstStoreEv); 629 unsigned FirstStoreSize = DL->getTypeStoreSize(SL[i]->getValueOperand()->getType()); 630 631 // See if we can optimize just this store in isolation. 632 if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) { 633 Heads.insert(SL[i]); 634 continue; 635 } 636 637 Value *FirstSplatValue = nullptr; 638 Constant *FirstPatternValue = nullptr; 639 640 if (For == ForMemset::Yes) 641 FirstSplatValue = isBytewiseValue(FirstStoredVal, *DL); 642 else 643 FirstPatternValue = getMemSetPatternValue(FirstStoredVal, DL); 644 645 assert((FirstSplatValue || FirstPatternValue) && 646 "Expected either splat value or pattern value."); 647 648 IndexQueue.clear(); 649 // If a store has multiple consecutive store candidates, search Stores 650 // array according to the sequence: from i+1 to e, then from i-1 to 0. 651 // This is because usually pairing with immediate succeeding or preceding 652 // candidate create the best chance to find memset opportunity. 653 unsigned j = 0; 654 for (j = i + 1; j < e; ++j) 655 IndexQueue.push_back(j); 656 for (j = i; j > 0; --j) 657 IndexQueue.push_back(j - 1); 658 659 for (auto &k : IndexQueue) { 660 assert(SL[k]->isSimple() && "Expected only non-volatile stores."); 661 Value *SecondStorePtr = SL[k]->getPointerOperand(); 662 const SCEVAddRecExpr *SecondStoreEv = 663 cast<SCEVAddRecExpr>(SE->getSCEV(SecondStorePtr)); 664 APInt SecondStride = getStoreStride(SecondStoreEv); 665 666 if (FirstStride != SecondStride) 667 continue; 668 669 Value *SecondStoredVal = SL[k]->getValueOperand(); 670 Value *SecondSplatValue = nullptr; 671 Constant *SecondPatternValue = nullptr; 672 673 if (For == ForMemset::Yes) 674 SecondSplatValue = isBytewiseValue(SecondStoredVal, *DL); 675 else 676 SecondPatternValue = getMemSetPatternValue(SecondStoredVal, DL); 677 678 assert((SecondSplatValue || SecondPatternValue) && 679 "Expected either splat value or pattern value."); 680 681 if (isConsecutiveAccess(SL[i], SL[k], *DL, *SE, false)) { 682 if (For == ForMemset::Yes) { 683 if (isa<UndefValue>(FirstSplatValue)) 684 FirstSplatValue = SecondSplatValue; 685 if (FirstSplatValue != SecondSplatValue) 686 continue; 687 } else { 688 if (isa<UndefValue>(FirstPatternValue)) 689 FirstPatternValue = SecondPatternValue; 690 if (FirstPatternValue != SecondPatternValue) 691 continue; 692 } 693 Tails.insert(SL[k]); 694 Heads.insert(SL[i]); 695 ConsecutiveChain[SL[i]] = SL[k]; 696 break; 697 } 698 } 699 } 700 701 // We may run into multiple chains that merge into a single chain. We mark the 702 // stores that we transformed so that we don't visit the same store twice. 703 SmallPtrSet<Value *, 16> TransformedStores; 704 bool Changed = false; 705 706 // For stores that start but don't end a link in the chain: 707 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end(); 708 it != e; ++it) { 709 if (Tails.count(*it)) 710 continue; 711 712 // We found a store instr that starts a chain. Now follow the chain and try 713 // to transform it. 714 SmallPtrSet<Instruction *, 8> AdjacentStores; 715 StoreInst *I = *it; 716 717 StoreInst *HeadStore = I; 718 unsigned StoreSize = 0; 719 720 // Collect the chain into a list. 721 while (Tails.count(I) || Heads.count(I)) { 722 if (TransformedStores.count(I)) 723 break; 724 AdjacentStores.insert(I); 725 726 StoreSize += DL->getTypeStoreSize(I->getValueOperand()->getType()); 727 // Move to the next value in the chain. 728 I = ConsecutiveChain[I]; 729 } 730 731 Value *StoredVal = HeadStore->getValueOperand(); 732 Value *StorePtr = HeadStore->getPointerOperand(); 733 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 734 APInt Stride = getStoreStride(StoreEv); 735 736 // Check to see if the stride matches the size of the stores. If so, then 737 // we know that every byte is touched in the loop. 738 if (StoreSize != Stride && StoreSize != -Stride) 739 continue; 740 741 bool NegStride = StoreSize == -Stride; 742 743 if (processLoopStridedStore(StorePtr, StoreSize, 744 MaybeAlign(HeadStore->getAlignment()), 745 StoredVal, HeadStore, AdjacentStores, StoreEv, 746 BECount, NegStride)) { 747 TransformedStores.insert(AdjacentStores.begin(), AdjacentStores.end()); 748 Changed = true; 749 } 750 } 751 752 return Changed; 753 } 754 755 /// processLoopMemSet - See if this memset can be promoted to a large memset. 756 bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI, 757 const SCEV *BECount) { 758 // We can only handle non-volatile memsets with a constant size. 759 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength())) 760 return false; 761 762 // If we're not allowed to hack on memset, we fail. 763 if (!HasMemset) 764 return false; 765 766 Value *Pointer = MSI->getDest(); 767 768 // See if the pointer expression is an AddRec like {base,+,1} on the current 769 // loop, which indicates a strided store. If we have something else, it's a 770 // random store we can't handle. 771 const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer)); 772 if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine()) 773 return false; 774 775 // Reject memsets that are so large that they overflow an unsigned. 776 uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 777 if ((SizeInBytes >> 32) != 0) 778 return false; 779 780 // Check to see if the stride matches the size of the memset. If so, then we 781 // know that every byte is touched in the loop. 782 const SCEVConstant *ConstStride = dyn_cast<SCEVConstant>(Ev->getOperand(1)); 783 if (!ConstStride) 784 return false; 785 786 APInt Stride = ConstStride->getAPInt(); 787 if (SizeInBytes != Stride && SizeInBytes != -Stride) 788 return false; 789 790 // Verify that the memset value is loop invariant. If not, we can't promote 791 // the memset. 792 Value *SplatValue = MSI->getValue(); 793 if (!SplatValue || !CurLoop->isLoopInvariant(SplatValue)) 794 return false; 795 796 SmallPtrSet<Instruction *, 1> MSIs; 797 MSIs.insert(MSI); 798 bool NegStride = SizeInBytes == -Stride; 799 return processLoopStridedStore( 800 Pointer, (unsigned)SizeInBytes, MaybeAlign(MSI->getDestAlignment()), 801 SplatValue, MSI, MSIs, Ev, BECount, NegStride, /*IsLoopMemset=*/true); 802 } 803 804 /// mayLoopAccessLocation - Return true if the specified loop might access the 805 /// specified pointer location, which is a loop-strided access. The 'Access' 806 /// argument specifies what the verboten forms of access are (read or write). 807 static bool 808 mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L, 809 const SCEV *BECount, unsigned StoreSize, 810 AliasAnalysis &AA, 811 SmallPtrSetImpl<Instruction *> &IgnoredStores) { 812 // Get the location that may be stored across the loop. Since the access is 813 // strided positively through memory, we say that the modified location starts 814 // at the pointer and has infinite size. 815 LocationSize AccessSize = LocationSize::unknown(); 816 817 // If the loop iterates a fixed number of times, we can refine the access size 818 // to be exactly the size of the memset, which is (BECount+1)*StoreSize 819 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) 820 AccessSize = LocationSize::precise((BECst->getValue()->getZExtValue() + 1) * 821 StoreSize); 822 823 // TODO: For this to be really effective, we have to dive into the pointer 824 // operand in the store. Store to &A[i] of 100 will always return may alias 825 // with store of &A[100], we need to StoreLoc to be "A" with size of 100, 826 // which will then no-alias a store to &A[100]. 827 MemoryLocation StoreLoc(Ptr, AccessSize); 828 829 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E; 830 ++BI) 831 for (Instruction &I : **BI) 832 if (IgnoredStores.count(&I) == 0 && 833 isModOrRefSet( 834 intersectModRef(AA.getModRefInfo(&I, StoreLoc), Access))) 835 return true; 836 837 return false; 838 } 839 840 // If we have a negative stride, Start refers to the end of the memory location 841 // we're trying to memset. Therefore, we need to recompute the base pointer, 842 // which is just Start - BECount*Size. 843 static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount, 844 Type *IntPtr, unsigned StoreSize, 845 ScalarEvolution *SE) { 846 const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr); 847 if (StoreSize != 1) 848 Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize), 849 SCEV::FlagNUW); 850 return SE->getMinusSCEV(Start, Index); 851 } 852 853 /// Compute the number of bytes as a SCEV from the backedge taken count. 854 /// 855 /// This also maps the SCEV into the provided type and tries to handle the 856 /// computation in a way that will fold cleanly. 857 static const SCEV *getNumBytes(const SCEV *BECount, Type *IntPtr, 858 unsigned StoreSize, Loop *CurLoop, 859 const DataLayout *DL, ScalarEvolution *SE) { 860 const SCEV *NumBytesS; 861 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to 862 // pointer size if it isn't already. 863 // 864 // If we're going to need to zero extend the BE count, check if we can add 865 // one to it prior to zero extending without overflow. Provided this is safe, 866 // it allows better simplification of the +1. 867 if (DL->getTypeSizeInBits(BECount->getType()) < 868 DL->getTypeSizeInBits(IntPtr) && 869 SE->isLoopEntryGuardedByCond( 870 CurLoop, ICmpInst::ICMP_NE, BECount, 871 SE->getNegativeSCEV(SE->getOne(BECount->getType())))) { 872 NumBytesS = SE->getZeroExtendExpr( 873 SE->getAddExpr(BECount, SE->getOne(BECount->getType()), SCEV::FlagNUW), 874 IntPtr); 875 } else { 876 NumBytesS = SE->getAddExpr(SE->getTruncateOrZeroExtend(BECount, IntPtr), 877 SE->getOne(IntPtr), SCEV::FlagNUW); 878 } 879 880 // And scale it based on the store size. 881 if (StoreSize != 1) { 882 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize), 883 SCEV::FlagNUW); 884 } 885 return NumBytesS; 886 } 887 888 /// processLoopStridedStore - We see a strided store of some value. If we can 889 /// transform this into a memset or memset_pattern in the loop preheader, do so. 890 bool LoopIdiomRecognize::processLoopStridedStore( 891 Value *DestPtr, unsigned StoreSize, MaybeAlign StoreAlignment, 892 Value *StoredVal, Instruction *TheStore, 893 SmallPtrSetImpl<Instruction *> &Stores, const SCEVAddRecExpr *Ev, 894 const SCEV *BECount, bool NegStride, bool IsLoopMemset) { 895 Value *SplatValue = isBytewiseValue(StoredVal, *DL); 896 Constant *PatternValue = nullptr; 897 898 if (!SplatValue) 899 PatternValue = getMemSetPatternValue(StoredVal, DL); 900 901 assert((SplatValue || PatternValue) && 902 "Expected either splat value or pattern value."); 903 904 // The trip count of the loop and the base pointer of the addrec SCEV is 905 // guaranteed to be loop invariant, which means that it should dominate the 906 // header. This allows us to insert code for it in the preheader. 907 unsigned DestAS = DestPtr->getType()->getPointerAddressSpace(); 908 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 909 IRBuilder<> Builder(Preheader->getTerminator()); 910 SCEVExpander Expander(*SE, *DL, "loop-idiom"); 911 912 Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS); 913 Type *IntIdxTy = DL->getIndexType(DestPtr->getType()); 914 915 const SCEV *Start = Ev->getStart(); 916 // Handle negative strided loops. 917 if (NegStride) 918 Start = getStartForNegStride(Start, BECount, IntIdxTy, StoreSize, SE); 919 920 // TODO: ideally we should still be able to generate memset if SCEV expander 921 // is taught to generate the dependencies at the latest point. 922 if (!isSafeToExpand(Start, *SE)) 923 return false; 924 925 // Okay, we have a strided store "p[i]" of a splattable value. We can turn 926 // this into a memset in the loop preheader now if we want. However, this 927 // would be unsafe to do if there is anything else in the loop that may read 928 // or write to the aliased location. Check for any overlap by generating the 929 // base pointer and checking the region. 930 Value *BasePtr = 931 Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator()); 932 if (mayLoopAccessLocation(BasePtr, ModRefInfo::ModRef, CurLoop, BECount, 933 StoreSize, *AA, Stores)) { 934 Expander.clear(); 935 // If we generated new code for the base pointer, clean up. 936 RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI); 937 return false; 938 } 939 940 if (avoidLIRForMultiBlockLoop(/*IsMemset=*/true, IsLoopMemset)) 941 return false; 942 943 // Okay, everything looks good, insert the memset. 944 945 const SCEV *NumBytesS = 946 getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE); 947 948 // TODO: ideally we should still be able to generate memset if SCEV expander 949 // is taught to generate the dependencies at the latest point. 950 if (!isSafeToExpand(NumBytesS, *SE)) 951 return false; 952 953 Value *NumBytes = 954 Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator()); 955 956 CallInst *NewCall; 957 if (SplatValue) { 958 NewCall = Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, 959 MaybeAlign(StoreAlignment)); 960 } else { 961 // Everything is emitted in default address space 962 Type *Int8PtrTy = DestInt8PtrTy; 963 964 Module *M = TheStore->getModule(); 965 StringRef FuncName = "memset_pattern16"; 966 FunctionCallee MSP = M->getOrInsertFunction(FuncName, Builder.getVoidTy(), 967 Int8PtrTy, Int8PtrTy, IntIdxTy); 968 inferLibFuncAttributes(M, FuncName, *TLI); 969 970 // Otherwise we should form a memset_pattern16. PatternValue is known to be 971 // an constant array of 16-bytes. Plop the value into a mergable global. 972 GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true, 973 GlobalValue::PrivateLinkage, 974 PatternValue, ".memset_pattern"); 975 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); // Ok to merge these. 976 GV->setAlignment(Align(16)); 977 Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy); 978 NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes}); 979 } 980 NewCall->setDebugLoc(TheStore->getDebugLoc()); 981 982 if (MSSAU) { 983 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB( 984 NewCall, nullptr, NewCall->getParent(), MemorySSA::BeforeTerminator); 985 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true); 986 } 987 988 LLVM_DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n" 989 << " from store to: " << *Ev << " at: " << *TheStore 990 << "\n"); 991 992 ORE.emit([&]() { 993 return OptimizationRemark(DEBUG_TYPE, "ProcessLoopStridedStore", 994 NewCall->getDebugLoc(), Preheader) 995 << "Transformed loop-strided store into a call to " 996 << ore::NV("NewFunction", NewCall->getCalledFunction()) 997 << "() function"; 998 }); 999 1000 // Okay, the memset has been formed. Zap the original store and anything that 1001 // feeds into it. 1002 for (auto *I : Stores) { 1003 if (MSSAU) 1004 MSSAU->removeMemoryAccess(I, true); 1005 deleteDeadInstruction(I); 1006 } 1007 if (MSSAU && VerifyMemorySSA) 1008 MSSAU->getMemorySSA()->verifyMemorySSA(); 1009 ++NumMemSet; 1010 return true; 1011 } 1012 1013 /// If the stored value is a strided load in the same loop with the same stride 1014 /// this may be transformable into a memcpy. This kicks in for stuff like 1015 /// for (i) A[i] = B[i]; 1016 bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI, 1017 const SCEV *BECount) { 1018 assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores."); 1019 1020 Value *StorePtr = SI->getPointerOperand(); 1021 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 1022 APInt Stride = getStoreStride(StoreEv); 1023 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType()); 1024 bool NegStride = StoreSize == -Stride; 1025 1026 // The store must be feeding a non-volatile load. 1027 LoadInst *LI = cast<LoadInst>(SI->getValueOperand()); 1028 assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads."); 1029 1030 // See if the pointer expression is an AddRec like {base,+,1} on the current 1031 // loop, which indicates a strided load. If we have something else, it's a 1032 // random load we can't handle. 1033 const SCEVAddRecExpr *LoadEv = 1034 cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand())); 1035 1036 // The trip count of the loop and the base pointer of the addrec SCEV is 1037 // guaranteed to be loop invariant, which means that it should dominate the 1038 // header. This allows us to insert code for it in the preheader. 1039 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 1040 IRBuilder<> Builder(Preheader->getTerminator()); 1041 SCEVExpander Expander(*SE, *DL, "loop-idiom"); 1042 1043 const SCEV *StrStart = StoreEv->getStart(); 1044 unsigned StrAS = SI->getPointerAddressSpace(); 1045 Type *IntIdxTy = Builder.getIntNTy(DL->getIndexSizeInBits(StrAS)); 1046 1047 // Handle negative strided loops. 1048 if (NegStride) 1049 StrStart = getStartForNegStride(StrStart, BECount, IntIdxTy, StoreSize, SE); 1050 1051 // Okay, we have a strided store "p[i]" of a loaded value. We can turn 1052 // this into a memcpy in the loop preheader now if we want. However, this 1053 // would be unsafe to do if there is anything else in the loop that may read 1054 // or write the memory region we're storing to. This includes the load that 1055 // feeds the stores. Check for an alias by generating the base address and 1056 // checking everything. 1057 Value *StoreBasePtr = Expander.expandCodeFor( 1058 StrStart, Builder.getInt8PtrTy(StrAS), Preheader->getTerminator()); 1059 1060 SmallPtrSet<Instruction *, 1> Stores; 1061 Stores.insert(SI); 1062 if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount, 1063 StoreSize, *AA, Stores)) { 1064 Expander.clear(); 1065 // If we generated new code for the base pointer, clean up. 1066 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI); 1067 return false; 1068 } 1069 1070 const SCEV *LdStart = LoadEv->getStart(); 1071 unsigned LdAS = LI->getPointerAddressSpace(); 1072 1073 // Handle negative strided loops. 1074 if (NegStride) 1075 LdStart = getStartForNegStride(LdStart, BECount, IntIdxTy, StoreSize, SE); 1076 1077 // For a memcpy, we have to make sure that the input array is not being 1078 // mutated by the loop. 1079 Value *LoadBasePtr = Expander.expandCodeFor( 1080 LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator()); 1081 1082 if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount, 1083 StoreSize, *AA, Stores)) { 1084 Expander.clear(); 1085 // If we generated new code for the base pointer, clean up. 1086 RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI); 1087 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI); 1088 return false; 1089 } 1090 1091 if (avoidLIRForMultiBlockLoop()) 1092 return false; 1093 1094 // Okay, everything is safe, we can transform this! 1095 1096 const SCEV *NumBytesS = 1097 getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE); 1098 1099 Value *NumBytes = 1100 Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator()); 1101 1102 CallInst *NewCall = nullptr; 1103 // Check whether to generate an unordered atomic memcpy: 1104 // If the load or store are atomic, then they must necessarily be unordered 1105 // by previous checks. 1106 if (!SI->isAtomic() && !LI->isAtomic()) 1107 NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlign(), LoadBasePtr, 1108 LI->getAlign(), NumBytes); 1109 else { 1110 // We cannot allow unaligned ops for unordered load/store, so reject 1111 // anything where the alignment isn't at least the element size. 1112 const Align StoreAlign = SI->getAlign(); 1113 const Align LoadAlign = LI->getAlign(); 1114 if (StoreAlign < StoreSize || LoadAlign < StoreSize) 1115 return false; 1116 1117 // If the element.atomic memcpy is not lowered into explicit 1118 // loads/stores later, then it will be lowered into an element-size 1119 // specific lib call. If the lib call doesn't exist for our store size, then 1120 // we shouldn't generate the memcpy. 1121 if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize()) 1122 return false; 1123 1124 // Create the call. 1125 // Note that unordered atomic loads/stores are *required* by the spec to 1126 // have an alignment but non-atomic loads/stores may not. 1127 NewCall = Builder.CreateElementUnorderedAtomicMemCpy( 1128 StoreBasePtr, StoreAlign, LoadBasePtr, LoadAlign, NumBytes, 1129 StoreSize); 1130 } 1131 NewCall->setDebugLoc(SI->getDebugLoc()); 1132 1133 if (MSSAU) { 1134 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB( 1135 NewCall, nullptr, NewCall->getParent(), MemorySSA::BeforeTerminator); 1136 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true); 1137 } 1138 1139 LLVM_DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n" 1140 << " from load ptr=" << *LoadEv << " at: " << *LI << "\n" 1141 << " from store ptr=" << *StoreEv << " at: " << *SI 1142 << "\n"); 1143 1144 ORE.emit([&]() { 1145 return OptimizationRemark(DEBUG_TYPE, "ProcessLoopStoreOfLoopLoad", 1146 NewCall->getDebugLoc(), Preheader) 1147 << "Formed a call to " 1148 << ore::NV("NewFunction", NewCall->getCalledFunction()) 1149 << "() function"; 1150 }); 1151 1152 // Okay, the memcpy has been formed. Zap the original store and anything that 1153 // feeds into it. 1154 if (MSSAU) 1155 MSSAU->removeMemoryAccess(SI, true); 1156 deleteDeadInstruction(SI); 1157 if (MSSAU && VerifyMemorySSA) 1158 MSSAU->getMemorySSA()->verifyMemorySSA(); 1159 ++NumMemCpy; 1160 return true; 1161 } 1162 1163 // When compiling for codesize we avoid idiom recognition for a multi-block loop 1164 // unless it is a loop_memset idiom or a memset/memcpy idiom in a nested loop. 1165 // 1166 bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset, 1167 bool IsLoopMemset) { 1168 if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) { 1169 if (!CurLoop->getParentLoop() && (!IsMemset || !IsLoopMemset)) { 1170 LLVM_DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName() 1171 << " : LIR " << (IsMemset ? "Memset" : "Memcpy") 1172 << " avoided: multi-block top-level loop\n"); 1173 return true; 1174 } 1175 } 1176 1177 return false; 1178 } 1179 1180 bool LoopIdiomRecognize::runOnNoncountableLoop() { 1181 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Scanning: F[" 1182 << CurLoop->getHeader()->getParent()->getName() 1183 << "] Noncountable Loop %" 1184 << CurLoop->getHeader()->getName() << "\n"); 1185 1186 return recognizePopcount() || recognizeAndInsertFFS(); 1187 } 1188 1189 /// Check if the given conditional branch is based on the comparison between 1190 /// a variable and zero, and if the variable is non-zero or zero (JmpOnZero is 1191 /// true), the control yields to the loop entry. If the branch matches the 1192 /// behavior, the variable involved in the comparison is returned. This function 1193 /// will be called to see if the precondition and postcondition of the loop are 1194 /// in desirable form. 1195 static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry, 1196 bool JmpOnZero = false) { 1197 if (!BI || !BI->isConditional()) 1198 return nullptr; 1199 1200 ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition()); 1201 if (!Cond) 1202 return nullptr; 1203 1204 ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1)); 1205 if (!CmpZero || !CmpZero->isZero()) 1206 return nullptr; 1207 1208 BasicBlock *TrueSucc = BI->getSuccessor(0); 1209 BasicBlock *FalseSucc = BI->getSuccessor(1); 1210 if (JmpOnZero) 1211 std::swap(TrueSucc, FalseSucc); 1212 1213 ICmpInst::Predicate Pred = Cond->getPredicate(); 1214 if ((Pred == ICmpInst::ICMP_NE && TrueSucc == LoopEntry) || 1215 (Pred == ICmpInst::ICMP_EQ && FalseSucc == LoopEntry)) 1216 return Cond->getOperand(0); 1217 1218 return nullptr; 1219 } 1220 1221 // Check if the recurrence variable `VarX` is in the right form to create 1222 // the idiom. Returns the value coerced to a PHINode if so. 1223 static PHINode *getRecurrenceVar(Value *VarX, Instruction *DefX, 1224 BasicBlock *LoopEntry) { 1225 auto *PhiX = dyn_cast<PHINode>(VarX); 1226 if (PhiX && PhiX->getParent() == LoopEntry && 1227 (PhiX->getOperand(0) == DefX || PhiX->getOperand(1) == DefX)) 1228 return PhiX; 1229 return nullptr; 1230 } 1231 1232 /// Return true iff the idiom is detected in the loop. 1233 /// 1234 /// Additionally: 1235 /// 1) \p CntInst is set to the instruction counting the population bit. 1236 /// 2) \p CntPhi is set to the corresponding phi node. 1237 /// 3) \p Var is set to the value whose population bits are being counted. 1238 /// 1239 /// The core idiom we are trying to detect is: 1240 /// \code 1241 /// if (x0 != 0) 1242 /// goto loop-exit // the precondition of the loop 1243 /// cnt0 = init-val; 1244 /// do { 1245 /// x1 = phi (x0, x2); 1246 /// cnt1 = phi(cnt0, cnt2); 1247 /// 1248 /// cnt2 = cnt1 + 1; 1249 /// ... 1250 /// x2 = x1 & (x1 - 1); 1251 /// ... 1252 /// } while(x != 0); 1253 /// 1254 /// loop-exit: 1255 /// \endcode 1256 static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB, 1257 Instruction *&CntInst, PHINode *&CntPhi, 1258 Value *&Var) { 1259 // step 1: Check to see if the look-back branch match this pattern: 1260 // "if (a!=0) goto loop-entry". 1261 BasicBlock *LoopEntry; 1262 Instruction *DefX2, *CountInst; 1263 Value *VarX1, *VarX0; 1264 PHINode *PhiX, *CountPhi; 1265 1266 DefX2 = CountInst = nullptr; 1267 VarX1 = VarX0 = nullptr; 1268 PhiX = CountPhi = nullptr; 1269 LoopEntry = *(CurLoop->block_begin()); 1270 1271 // step 1: Check if the loop-back branch is in desirable form. 1272 { 1273 if (Value *T = matchCondition( 1274 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry)) 1275 DefX2 = dyn_cast<Instruction>(T); 1276 else 1277 return false; 1278 } 1279 1280 // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)" 1281 { 1282 if (!DefX2 || DefX2->getOpcode() != Instruction::And) 1283 return false; 1284 1285 BinaryOperator *SubOneOp; 1286 1287 if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0)))) 1288 VarX1 = DefX2->getOperand(1); 1289 else { 1290 VarX1 = DefX2->getOperand(0); 1291 SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1)); 1292 } 1293 if (!SubOneOp || SubOneOp->getOperand(0) != VarX1) 1294 return false; 1295 1296 ConstantInt *Dec = dyn_cast<ConstantInt>(SubOneOp->getOperand(1)); 1297 if (!Dec || 1298 !((SubOneOp->getOpcode() == Instruction::Sub && Dec->isOne()) || 1299 (SubOneOp->getOpcode() == Instruction::Add && 1300 Dec->isMinusOne()))) { 1301 return false; 1302 } 1303 } 1304 1305 // step 3: Check the recurrence of variable X 1306 PhiX = getRecurrenceVar(VarX1, DefX2, LoopEntry); 1307 if (!PhiX) 1308 return false; 1309 1310 // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1 1311 { 1312 CountInst = nullptr; 1313 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(), 1314 IterE = LoopEntry->end(); 1315 Iter != IterE; Iter++) { 1316 Instruction *Inst = &*Iter; 1317 if (Inst->getOpcode() != Instruction::Add) 1318 continue; 1319 1320 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1)); 1321 if (!Inc || !Inc->isOne()) 1322 continue; 1323 1324 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry); 1325 if (!Phi) 1326 continue; 1327 1328 // Check if the result of the instruction is live of the loop. 1329 bool LiveOutLoop = false; 1330 for (User *U : Inst->users()) { 1331 if ((cast<Instruction>(U))->getParent() != LoopEntry) { 1332 LiveOutLoop = true; 1333 break; 1334 } 1335 } 1336 1337 if (LiveOutLoop) { 1338 CountInst = Inst; 1339 CountPhi = Phi; 1340 break; 1341 } 1342 } 1343 1344 if (!CountInst) 1345 return false; 1346 } 1347 1348 // step 5: check if the precondition is in this form: 1349 // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;" 1350 { 1351 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1352 Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader()); 1353 if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1)) 1354 return false; 1355 1356 CntInst = CountInst; 1357 CntPhi = CountPhi; 1358 Var = T; 1359 } 1360 1361 return true; 1362 } 1363 1364 /// Return true if the idiom is detected in the loop. 1365 /// 1366 /// Additionally: 1367 /// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ) 1368 /// or nullptr if there is no such. 1369 /// 2) \p CntPhi is set to the corresponding phi node 1370 /// or nullptr if there is no such. 1371 /// 3) \p Var is set to the value whose CTLZ could be used. 1372 /// 4) \p DefX is set to the instruction calculating Loop exit condition. 1373 /// 1374 /// The core idiom we are trying to detect is: 1375 /// \code 1376 /// if (x0 == 0) 1377 /// goto loop-exit // the precondition of the loop 1378 /// cnt0 = init-val; 1379 /// do { 1380 /// x = phi (x0, x.next); //PhiX 1381 /// cnt = phi(cnt0, cnt.next); 1382 /// 1383 /// cnt.next = cnt + 1; 1384 /// ... 1385 /// x.next = x >> 1; // DefX 1386 /// ... 1387 /// } while(x.next != 0); 1388 /// 1389 /// loop-exit: 1390 /// \endcode 1391 static bool detectShiftUntilZeroIdiom(Loop *CurLoop, const DataLayout &DL, 1392 Intrinsic::ID &IntrinID, Value *&InitX, 1393 Instruction *&CntInst, PHINode *&CntPhi, 1394 Instruction *&DefX) { 1395 BasicBlock *LoopEntry; 1396 Value *VarX = nullptr; 1397 1398 DefX = nullptr; 1399 CntInst = nullptr; 1400 CntPhi = nullptr; 1401 LoopEntry = *(CurLoop->block_begin()); 1402 1403 // step 1: Check if the loop-back branch is in desirable form. 1404 if (Value *T = matchCondition( 1405 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry)) 1406 DefX = dyn_cast<Instruction>(T); 1407 else 1408 return false; 1409 1410 // step 2: detect instructions corresponding to "x.next = x >> 1 or x << 1" 1411 if (!DefX || !DefX->isShift()) 1412 return false; 1413 IntrinID = DefX->getOpcode() == Instruction::Shl ? Intrinsic::cttz : 1414 Intrinsic::ctlz; 1415 ConstantInt *Shft = dyn_cast<ConstantInt>(DefX->getOperand(1)); 1416 if (!Shft || !Shft->isOne()) 1417 return false; 1418 VarX = DefX->getOperand(0); 1419 1420 // step 3: Check the recurrence of variable X 1421 PHINode *PhiX = getRecurrenceVar(VarX, DefX, LoopEntry); 1422 if (!PhiX) 1423 return false; 1424 1425 InitX = PhiX->getIncomingValueForBlock(CurLoop->getLoopPreheader()); 1426 1427 // Make sure the initial value can't be negative otherwise the ashr in the 1428 // loop might never reach zero which would make the loop infinite. 1429 if (DefX->getOpcode() == Instruction::AShr && !isKnownNonNegative(InitX, DL)) 1430 return false; 1431 1432 // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1 1433 // TODO: We can skip the step. If loop trip count is known (CTLZ), 1434 // then all uses of "cnt.next" could be optimized to the trip count 1435 // plus "cnt0". Currently it is not optimized. 1436 // This step could be used to detect POPCNT instruction: 1437 // cnt.next = cnt + (x.next & 1) 1438 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(), 1439 IterE = LoopEntry->end(); 1440 Iter != IterE; Iter++) { 1441 Instruction *Inst = &*Iter; 1442 if (Inst->getOpcode() != Instruction::Add) 1443 continue; 1444 1445 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1)); 1446 if (!Inc || !Inc->isOne()) 1447 continue; 1448 1449 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry); 1450 if (!Phi) 1451 continue; 1452 1453 CntInst = Inst; 1454 CntPhi = Phi; 1455 break; 1456 } 1457 if (!CntInst) 1458 return false; 1459 1460 return true; 1461 } 1462 1463 /// Recognize CTLZ or CTTZ idiom in a non-countable loop and convert the loop 1464 /// to countable (with CTLZ / CTTZ trip count). If CTLZ / CTTZ inserted as a new 1465 /// trip count returns true; otherwise, returns false. 1466 bool LoopIdiomRecognize::recognizeAndInsertFFS() { 1467 // Give up if the loop has multiple blocks or multiple backedges. 1468 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1) 1469 return false; 1470 1471 Intrinsic::ID IntrinID; 1472 Value *InitX; 1473 Instruction *DefX = nullptr; 1474 PHINode *CntPhi = nullptr; 1475 Instruction *CntInst = nullptr; 1476 // Help decide if transformation is profitable. For ShiftUntilZero idiom, 1477 // this is always 6. 1478 size_t IdiomCanonicalSize = 6; 1479 1480 if (!detectShiftUntilZeroIdiom(CurLoop, *DL, IntrinID, InitX, 1481 CntInst, CntPhi, DefX)) 1482 return false; 1483 1484 bool IsCntPhiUsedOutsideLoop = false; 1485 for (User *U : CntPhi->users()) 1486 if (!CurLoop->contains(cast<Instruction>(U))) { 1487 IsCntPhiUsedOutsideLoop = true; 1488 break; 1489 } 1490 bool IsCntInstUsedOutsideLoop = false; 1491 for (User *U : CntInst->users()) 1492 if (!CurLoop->contains(cast<Instruction>(U))) { 1493 IsCntInstUsedOutsideLoop = true; 1494 break; 1495 } 1496 // If both CntInst and CntPhi are used outside the loop the profitability 1497 // is questionable. 1498 if (IsCntInstUsedOutsideLoop && IsCntPhiUsedOutsideLoop) 1499 return false; 1500 1501 // For some CPUs result of CTLZ(X) intrinsic is undefined 1502 // when X is 0. If we can not guarantee X != 0, we need to check this 1503 // when expand. 1504 bool ZeroCheck = false; 1505 // It is safe to assume Preheader exist as it was checked in 1506 // parent function RunOnLoop. 1507 BasicBlock *PH = CurLoop->getLoopPreheader(); 1508 1509 // If we are using the count instruction outside the loop, make sure we 1510 // have a zero check as a precondition. Without the check the loop would run 1511 // one iteration for before any check of the input value. This means 0 and 1 1512 // would have identical behavior in the original loop and thus 1513 if (!IsCntPhiUsedOutsideLoop) { 1514 auto *PreCondBB = PH->getSinglePredecessor(); 1515 if (!PreCondBB) 1516 return false; 1517 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1518 if (!PreCondBI) 1519 return false; 1520 if (matchCondition(PreCondBI, PH) != InitX) 1521 return false; 1522 ZeroCheck = true; 1523 } 1524 1525 // Check if CTLZ / CTTZ intrinsic is profitable. Assume it is always 1526 // profitable if we delete the loop. 1527 1528 // the loop has only 6 instructions: 1529 // %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ] 1530 // %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ] 1531 // %shr = ashr %n.addr.0, 1 1532 // %tobool = icmp eq %shr, 0 1533 // %inc = add nsw %i.0, 1 1534 // br i1 %tobool 1535 1536 Value *Args[] = 1537 {InitX, ZeroCheck ? ConstantInt::getTrue(InitX->getContext()) 1538 : ConstantInt::getFalse(InitX->getContext())}; 1539 1540 // @llvm.dbg doesn't count as they have no semantic effect. 1541 auto InstWithoutDebugIt = CurLoop->getHeader()->instructionsWithoutDebug(); 1542 uint32_t HeaderSize = 1543 std::distance(InstWithoutDebugIt.begin(), InstWithoutDebugIt.end()); 1544 1545 IntrinsicCostAttributes Attrs(IntrinID, InitX->getType(), Args); 1546 int Cost = 1547 TTI->getIntrinsicInstrCost(Attrs, TargetTransformInfo::TCK_SizeAndLatency); 1548 if (HeaderSize != IdiomCanonicalSize && 1549 Cost > TargetTransformInfo::TCC_Basic) 1550 return false; 1551 1552 transformLoopToCountable(IntrinID, PH, CntInst, CntPhi, InitX, DefX, 1553 DefX->getDebugLoc(), ZeroCheck, 1554 IsCntPhiUsedOutsideLoop); 1555 return true; 1556 } 1557 1558 /// Recognizes a population count idiom in a non-countable loop. 1559 /// 1560 /// If detected, transforms the relevant code to issue the popcount intrinsic 1561 /// function call, and returns true; otherwise, returns false. 1562 bool LoopIdiomRecognize::recognizePopcount() { 1563 if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware) 1564 return false; 1565 1566 // Counting population are usually conducted by few arithmetic instructions. 1567 // Such instructions can be easily "absorbed" by vacant slots in a 1568 // non-compact loop. Therefore, recognizing popcount idiom only makes sense 1569 // in a compact loop. 1570 1571 // Give up if the loop has multiple blocks or multiple backedges. 1572 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1) 1573 return false; 1574 1575 BasicBlock *LoopBody = *(CurLoop->block_begin()); 1576 if (LoopBody->size() >= 20) { 1577 // The loop is too big, bail out. 1578 return false; 1579 } 1580 1581 // It should have a preheader containing nothing but an unconditional branch. 1582 BasicBlock *PH = CurLoop->getLoopPreheader(); 1583 if (!PH || &PH->front() != PH->getTerminator()) 1584 return false; 1585 auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator()); 1586 if (!EntryBI || EntryBI->isConditional()) 1587 return false; 1588 1589 // It should have a precondition block where the generated popcount intrinsic 1590 // function can be inserted. 1591 auto *PreCondBB = PH->getSinglePredecessor(); 1592 if (!PreCondBB) 1593 return false; 1594 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1595 if (!PreCondBI || PreCondBI->isUnconditional()) 1596 return false; 1597 1598 Instruction *CntInst; 1599 PHINode *CntPhi; 1600 Value *Val; 1601 if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val)) 1602 return false; 1603 1604 transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val); 1605 return true; 1606 } 1607 1608 static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val, 1609 const DebugLoc &DL) { 1610 Value *Ops[] = {Val}; 1611 Type *Tys[] = {Val->getType()}; 1612 1613 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent(); 1614 Function *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys); 1615 CallInst *CI = IRBuilder.CreateCall(Func, Ops); 1616 CI->setDebugLoc(DL); 1617 1618 return CI; 1619 } 1620 1621 static CallInst *createFFSIntrinsic(IRBuilder<> &IRBuilder, Value *Val, 1622 const DebugLoc &DL, bool ZeroCheck, 1623 Intrinsic::ID IID) { 1624 Value *Ops[] = {Val, ZeroCheck ? IRBuilder.getTrue() : IRBuilder.getFalse()}; 1625 Type *Tys[] = {Val->getType()}; 1626 1627 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent(); 1628 Function *Func = Intrinsic::getDeclaration(M, IID, Tys); 1629 CallInst *CI = IRBuilder.CreateCall(Func, Ops); 1630 CI->setDebugLoc(DL); 1631 1632 return CI; 1633 } 1634 1635 /// Transform the following loop (Using CTLZ, CTTZ is similar): 1636 /// loop: 1637 /// CntPhi = PHI [Cnt0, CntInst] 1638 /// PhiX = PHI [InitX, DefX] 1639 /// CntInst = CntPhi + 1 1640 /// DefX = PhiX >> 1 1641 /// LOOP_BODY 1642 /// Br: loop if (DefX != 0) 1643 /// Use(CntPhi) or Use(CntInst) 1644 /// 1645 /// Into: 1646 /// If CntPhi used outside the loop: 1647 /// CountPrev = BitWidth(InitX) - CTLZ(InitX >> 1) 1648 /// Count = CountPrev + 1 1649 /// else 1650 /// Count = BitWidth(InitX) - CTLZ(InitX) 1651 /// loop: 1652 /// CntPhi = PHI [Cnt0, CntInst] 1653 /// PhiX = PHI [InitX, DefX] 1654 /// PhiCount = PHI [Count, Dec] 1655 /// CntInst = CntPhi + 1 1656 /// DefX = PhiX >> 1 1657 /// Dec = PhiCount - 1 1658 /// LOOP_BODY 1659 /// Br: loop if (Dec != 0) 1660 /// Use(CountPrev + Cnt0) // Use(CntPhi) 1661 /// or 1662 /// Use(Count + Cnt0) // Use(CntInst) 1663 /// 1664 /// If LOOP_BODY is empty the loop will be deleted. 1665 /// If CntInst and DefX are not used in LOOP_BODY they will be removed. 1666 void LoopIdiomRecognize::transformLoopToCountable( 1667 Intrinsic::ID IntrinID, BasicBlock *Preheader, Instruction *CntInst, 1668 PHINode *CntPhi, Value *InitX, Instruction *DefX, const DebugLoc &DL, 1669 bool ZeroCheck, bool IsCntPhiUsedOutsideLoop) { 1670 BranchInst *PreheaderBr = cast<BranchInst>(Preheader->getTerminator()); 1671 1672 // Step 1: Insert the CTLZ/CTTZ instruction at the end of the preheader block 1673 IRBuilder<> Builder(PreheaderBr); 1674 Builder.SetCurrentDebugLocation(DL); 1675 Value *FFS, *Count, *CountPrev, *NewCount, *InitXNext; 1676 1677 // Count = BitWidth - CTLZ(InitX); 1678 // If there are uses of CntPhi create: 1679 // CountPrev = BitWidth - CTLZ(InitX >> 1); 1680 if (IsCntPhiUsedOutsideLoop) { 1681 if (DefX->getOpcode() == Instruction::AShr) 1682 InitXNext = 1683 Builder.CreateAShr(InitX, ConstantInt::get(InitX->getType(), 1)); 1684 else if (DefX->getOpcode() == Instruction::LShr) 1685 InitXNext = 1686 Builder.CreateLShr(InitX, ConstantInt::get(InitX->getType(), 1)); 1687 else if (DefX->getOpcode() == Instruction::Shl) // cttz 1688 InitXNext = 1689 Builder.CreateShl(InitX, ConstantInt::get(InitX->getType(), 1)); 1690 else 1691 llvm_unreachable("Unexpected opcode!"); 1692 } else 1693 InitXNext = InitX; 1694 FFS = createFFSIntrinsic(Builder, InitXNext, DL, ZeroCheck, IntrinID); 1695 Count = Builder.CreateSub( 1696 ConstantInt::get(FFS->getType(), 1697 FFS->getType()->getIntegerBitWidth()), 1698 FFS); 1699 if (IsCntPhiUsedOutsideLoop) { 1700 CountPrev = Count; 1701 Count = Builder.CreateAdd( 1702 CountPrev, 1703 ConstantInt::get(CountPrev->getType(), 1)); 1704 } 1705 1706 NewCount = Builder.CreateZExtOrTrunc( 1707 IsCntPhiUsedOutsideLoop ? CountPrev : Count, 1708 cast<IntegerType>(CntInst->getType())); 1709 1710 // If the counter's initial value is not zero, insert Add Inst. 1711 Value *CntInitVal = CntPhi->getIncomingValueForBlock(Preheader); 1712 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal); 1713 if (!InitConst || !InitConst->isZero()) 1714 NewCount = Builder.CreateAdd(NewCount, CntInitVal); 1715 1716 // Step 2: Insert new IV and loop condition: 1717 // loop: 1718 // ... 1719 // PhiCount = PHI [Count, Dec] 1720 // ... 1721 // Dec = PhiCount - 1 1722 // ... 1723 // Br: loop if (Dec != 0) 1724 BasicBlock *Body = *(CurLoop->block_begin()); 1725 auto *LbBr = cast<BranchInst>(Body->getTerminator()); 1726 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition()); 1727 Type *Ty = Count->getType(); 1728 1729 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front()); 1730 1731 Builder.SetInsertPoint(LbCond); 1732 Instruction *TcDec = cast<Instruction>( 1733 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1), 1734 "tcdec", false, true)); 1735 1736 TcPhi->addIncoming(Count, Preheader); 1737 TcPhi->addIncoming(TcDec, Body); 1738 1739 CmpInst::Predicate Pred = 1740 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ; 1741 LbCond->setPredicate(Pred); 1742 LbCond->setOperand(0, TcDec); 1743 LbCond->setOperand(1, ConstantInt::get(Ty, 0)); 1744 1745 // Step 3: All the references to the original counter outside 1746 // the loop are replaced with the NewCount 1747 if (IsCntPhiUsedOutsideLoop) 1748 CntPhi->replaceUsesOutsideBlock(NewCount, Body); 1749 else 1750 CntInst->replaceUsesOutsideBlock(NewCount, Body); 1751 1752 // step 4: Forget the "non-computable" trip-count SCEV associated with the 1753 // loop. The loop would otherwise not be deleted even if it becomes empty. 1754 SE->forgetLoop(CurLoop); 1755 } 1756 1757 void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB, 1758 Instruction *CntInst, 1759 PHINode *CntPhi, Value *Var) { 1760 BasicBlock *PreHead = CurLoop->getLoopPreheader(); 1761 auto *PreCondBr = cast<BranchInst>(PreCondBB->getTerminator()); 1762 const DebugLoc &DL = CntInst->getDebugLoc(); 1763 1764 // Assuming before transformation, the loop is following: 1765 // if (x) // the precondition 1766 // do { cnt++; x &= x - 1; } while(x); 1767 1768 // Step 1: Insert the ctpop instruction at the end of the precondition block 1769 IRBuilder<> Builder(PreCondBr); 1770 Value *PopCnt, *PopCntZext, *NewCount, *TripCnt; 1771 { 1772 PopCnt = createPopcntIntrinsic(Builder, Var, DL); 1773 NewCount = PopCntZext = 1774 Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType())); 1775 1776 if (NewCount != PopCnt) 1777 (cast<Instruction>(NewCount))->setDebugLoc(DL); 1778 1779 // TripCnt is exactly the number of iterations the loop has 1780 TripCnt = NewCount; 1781 1782 // If the population counter's initial value is not zero, insert Add Inst. 1783 Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead); 1784 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal); 1785 if (!InitConst || !InitConst->isZero()) { 1786 NewCount = Builder.CreateAdd(NewCount, CntInitVal); 1787 (cast<Instruction>(NewCount))->setDebugLoc(DL); 1788 } 1789 } 1790 1791 // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to 1792 // "if (NewCount == 0) loop-exit". Without this change, the intrinsic 1793 // function would be partial dead code, and downstream passes will drag 1794 // it back from the precondition block to the preheader. 1795 { 1796 ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition()); 1797 1798 Value *Opnd0 = PopCntZext; 1799 Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0); 1800 if (PreCond->getOperand(0) != Var) 1801 std::swap(Opnd0, Opnd1); 1802 1803 ICmpInst *NewPreCond = cast<ICmpInst>( 1804 Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1)); 1805 PreCondBr->setCondition(NewPreCond); 1806 1807 RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI); 1808 } 1809 1810 // Step 3: Note that the population count is exactly the trip count of the 1811 // loop in question, which enable us to convert the loop from noncountable 1812 // loop into a countable one. The benefit is twofold: 1813 // 1814 // - If the loop only counts population, the entire loop becomes dead after 1815 // the transformation. It is a lot easier to prove a countable loop dead 1816 // than to prove a noncountable one. (In some C dialects, an infinite loop 1817 // isn't dead even if it computes nothing useful. In general, DCE needs 1818 // to prove a noncountable loop finite before safely delete it.) 1819 // 1820 // - If the loop also performs something else, it remains alive. 1821 // Since it is transformed to countable form, it can be aggressively 1822 // optimized by some optimizations which are in general not applicable 1823 // to a noncountable loop. 1824 // 1825 // After this step, this loop (conceptually) would look like following: 1826 // newcnt = __builtin_ctpop(x); 1827 // t = newcnt; 1828 // if (x) 1829 // do { cnt++; x &= x-1; t--) } while (t > 0); 1830 BasicBlock *Body = *(CurLoop->block_begin()); 1831 { 1832 auto *LbBr = cast<BranchInst>(Body->getTerminator()); 1833 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition()); 1834 Type *Ty = TripCnt->getType(); 1835 1836 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front()); 1837 1838 Builder.SetInsertPoint(LbCond); 1839 Instruction *TcDec = cast<Instruction>( 1840 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1), 1841 "tcdec", false, true)); 1842 1843 TcPhi->addIncoming(TripCnt, PreHead); 1844 TcPhi->addIncoming(TcDec, Body); 1845 1846 CmpInst::Predicate Pred = 1847 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE; 1848 LbCond->setPredicate(Pred); 1849 LbCond->setOperand(0, TcDec); 1850 LbCond->setOperand(1, ConstantInt::get(Ty, 0)); 1851 } 1852 1853 // Step 4: All the references to the original population counter outside 1854 // the loop are replaced with the NewCount -- the value returned from 1855 // __builtin_ctpop(). 1856 CntInst->replaceUsesOutsideBlock(NewCount, Body); 1857 1858 // step 5: Forget the "non-computable" trip-count SCEV associated with the 1859 // loop. The loop would otherwise not be deleted even if it becomes empty. 1860 SE->forgetLoop(CurLoop); 1861 } 1862