1 //===- LoopIdiomRecognize.cpp - Loop idiom recognition --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements an idiom recognizer that transforms simple loops into a 10 // non-loop form. In cases that this kicks in, it can be a significant 11 // performance win. 12 // 13 // If compiling for code size we avoid idiom recognition if the resulting 14 // code could be larger than the code for the original loop. One way this could 15 // happen is if the loop is not removable after idiom recognition due to the 16 // presence of non-idiom instructions. The initial implementation of the 17 // heuristics applies to idioms in multi-block loops. 18 // 19 //===----------------------------------------------------------------------===// 20 // 21 // TODO List: 22 // 23 // Future loop memory idioms to recognize: 24 // memcmp, memmove, strlen, etc. 25 // Future floating point idioms to recognize in -ffast-math mode: 26 // fpowi 27 // Future integer operation idioms to recognize: 28 // ctpop 29 // 30 // Beware that isel's default lowering for ctpop is highly inefficient for 31 // i64 and larger types when i64 is legal and the value has few bits set. It 32 // would be good to enhance isel to emit a loop for ctpop in this case. 33 // 34 // This could recognize common matrix multiplies and dot product idioms and 35 // replace them with calls to BLAS (if linked in??). 36 // 37 //===----------------------------------------------------------------------===// 38 39 #include "llvm/Transforms/Scalar/LoopIdiomRecognize.h" 40 #include "llvm/ADT/APInt.h" 41 #include "llvm/ADT/ArrayRef.h" 42 #include "llvm/ADT/DenseMap.h" 43 #include "llvm/ADT/MapVector.h" 44 #include "llvm/ADT/SetVector.h" 45 #include "llvm/ADT/SmallPtrSet.h" 46 #include "llvm/ADT/SmallVector.h" 47 #include "llvm/ADT/Statistic.h" 48 #include "llvm/ADT/StringRef.h" 49 #include "llvm/Analysis/AliasAnalysis.h" 50 #include "llvm/Analysis/LoopAccessAnalysis.h" 51 #include "llvm/Analysis/LoopInfo.h" 52 #include "llvm/Analysis/LoopPass.h" 53 #include "llvm/Analysis/MemoryLocation.h" 54 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 55 #include "llvm/Analysis/ScalarEvolution.h" 56 #include "llvm/Analysis/ScalarEvolutionExpander.h" 57 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 58 #include "llvm/Analysis/TargetLibraryInfo.h" 59 #include "llvm/Analysis/TargetTransformInfo.h" 60 #include "llvm/Analysis/ValueTracking.h" 61 #include "llvm/IR/Attributes.h" 62 #include "llvm/IR/BasicBlock.h" 63 #include "llvm/IR/Constant.h" 64 #include "llvm/IR/Constants.h" 65 #include "llvm/IR/DataLayout.h" 66 #include "llvm/IR/DebugLoc.h" 67 #include "llvm/IR/DerivedTypes.h" 68 #include "llvm/IR/Dominators.h" 69 #include "llvm/IR/GlobalValue.h" 70 #include "llvm/IR/GlobalVariable.h" 71 #include "llvm/IR/IRBuilder.h" 72 #include "llvm/IR/InstrTypes.h" 73 #include "llvm/IR/Instruction.h" 74 #include "llvm/IR/Instructions.h" 75 #include "llvm/IR/IntrinsicInst.h" 76 #include "llvm/IR/Intrinsics.h" 77 #include "llvm/IR/LLVMContext.h" 78 #include "llvm/IR/Module.h" 79 #include "llvm/IR/PassManager.h" 80 #include "llvm/IR/Type.h" 81 #include "llvm/IR/User.h" 82 #include "llvm/IR/Value.h" 83 #include "llvm/IR/ValueHandle.h" 84 #include "llvm/InitializePasses.h" 85 #include "llvm/Pass.h" 86 #include "llvm/Support/Casting.h" 87 #include "llvm/Support/CommandLine.h" 88 #include "llvm/Support/Debug.h" 89 #include "llvm/Support/raw_ostream.h" 90 #include "llvm/Transforms/Scalar.h" 91 #include "llvm/Transforms/Utils/BuildLibCalls.h" 92 #include "llvm/Transforms/Utils/Local.h" 93 #include "llvm/Transforms/Utils/LoopUtils.h" 94 #include <algorithm> 95 #include <cassert> 96 #include <cstdint> 97 #include <utility> 98 #include <vector> 99 100 using namespace llvm; 101 102 #define DEBUG_TYPE "loop-idiom" 103 104 STATISTIC(NumMemSet, "Number of memset's formed from loop stores"); 105 STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores"); 106 107 static cl::opt<bool> UseLIRCodeSizeHeurs( 108 "use-lir-code-size-heurs", 109 cl::desc("Use loop idiom recognition code size heuristics when compiling" 110 "with -Os/-Oz"), 111 cl::init(true), cl::Hidden); 112 113 namespace { 114 115 class LoopIdiomRecognize { 116 Loop *CurLoop = nullptr; 117 AliasAnalysis *AA; 118 DominatorTree *DT; 119 LoopInfo *LI; 120 ScalarEvolution *SE; 121 TargetLibraryInfo *TLI; 122 const TargetTransformInfo *TTI; 123 const DataLayout *DL; 124 OptimizationRemarkEmitter &ORE; 125 bool ApplyCodeSizeHeuristics; 126 127 public: 128 explicit LoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT, 129 LoopInfo *LI, ScalarEvolution *SE, 130 TargetLibraryInfo *TLI, 131 const TargetTransformInfo *TTI, 132 const DataLayout *DL, 133 OptimizationRemarkEmitter &ORE) 134 : AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), DL(DL), ORE(ORE) {} 135 136 bool runOnLoop(Loop *L); 137 138 private: 139 using StoreList = SmallVector<StoreInst *, 8>; 140 using StoreListMap = MapVector<Value *, StoreList>; 141 142 StoreListMap StoreRefsForMemset; 143 StoreListMap StoreRefsForMemsetPattern; 144 StoreList StoreRefsForMemcpy; 145 bool HasMemset; 146 bool HasMemsetPattern; 147 bool HasMemcpy; 148 149 /// Return code for isLegalStore() 150 enum LegalStoreKind { 151 None = 0, 152 Memset, 153 MemsetPattern, 154 Memcpy, 155 UnorderedAtomicMemcpy, 156 DontUse // Dummy retval never to be used. Allows catching errors in retval 157 // handling. 158 }; 159 160 /// \name Countable Loop Idiom Handling 161 /// @{ 162 163 bool runOnCountableLoop(); 164 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount, 165 SmallVectorImpl<BasicBlock *> &ExitBlocks); 166 167 void collectStores(BasicBlock *BB); 168 LegalStoreKind isLegalStore(StoreInst *SI); 169 enum class ForMemset { No, Yes }; 170 bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount, 171 ForMemset For); 172 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount); 173 174 bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize, 175 MaybeAlign StoreAlignment, Value *StoredVal, 176 Instruction *TheStore, 177 SmallPtrSetImpl<Instruction *> &Stores, 178 const SCEVAddRecExpr *Ev, const SCEV *BECount, 179 bool NegStride, bool IsLoopMemset = false); 180 bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount); 181 bool avoidLIRForMultiBlockLoop(bool IsMemset = false, 182 bool IsLoopMemset = false); 183 184 /// @} 185 /// \name Noncountable Loop Idiom Handling 186 /// @{ 187 188 bool runOnNoncountableLoop(); 189 190 bool recognizePopcount(); 191 void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst, 192 PHINode *CntPhi, Value *Var); 193 bool recognizeAndInsertFFS(); /// Find First Set: ctlz or cttz 194 void transformLoopToCountable(Intrinsic::ID IntrinID, BasicBlock *PreCondBB, 195 Instruction *CntInst, PHINode *CntPhi, 196 Value *Var, Instruction *DefX, 197 const DebugLoc &DL, bool ZeroCheck, 198 bool IsCntPhiUsedOutsideLoop); 199 200 /// @} 201 }; 202 203 class LoopIdiomRecognizeLegacyPass : public LoopPass { 204 public: 205 static char ID; 206 207 explicit LoopIdiomRecognizeLegacyPass() : LoopPass(ID) { 208 initializeLoopIdiomRecognizeLegacyPassPass( 209 *PassRegistry::getPassRegistry()); 210 } 211 212 bool runOnLoop(Loop *L, LPPassManager &LPM) override { 213 if (skipLoop(L)) 214 return false; 215 216 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 217 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 218 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 219 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 220 TargetLibraryInfo *TLI = 221 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI( 222 *L->getHeader()->getParent()); 223 const TargetTransformInfo *TTI = 224 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI( 225 *L->getHeader()->getParent()); 226 const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout(); 227 228 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis 229 // pass. Function analyses need to be preserved across loop transformations 230 // but ORE cannot be preserved (see comment before the pass definition). 231 OptimizationRemarkEmitter ORE(L->getHeader()->getParent()); 232 233 LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, DL, ORE); 234 return LIR.runOnLoop(L); 235 } 236 237 /// This transformation requires natural loop information & requires that 238 /// loop preheaders be inserted into the CFG. 239 void getAnalysisUsage(AnalysisUsage &AU) const override { 240 AU.addRequired<TargetLibraryInfoWrapperPass>(); 241 AU.addRequired<TargetTransformInfoWrapperPass>(); 242 getLoopAnalysisUsage(AU); 243 } 244 }; 245 246 } // end anonymous namespace 247 248 char LoopIdiomRecognizeLegacyPass::ID = 0; 249 250 PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM, 251 LoopStandardAnalysisResults &AR, 252 LPMUpdater &) { 253 const auto *DL = &L.getHeader()->getModule()->getDataLayout(); 254 255 const auto &FAM = 256 AM.getResult<FunctionAnalysisManagerLoopProxy>(L, AR).getManager(); 257 Function *F = L.getHeader()->getParent(); 258 259 auto *ORE = FAM.getCachedResult<OptimizationRemarkEmitterAnalysis>(*F); 260 // FIXME: This should probably be optional rather than required. 261 if (!ORE) 262 report_fatal_error( 263 "LoopIdiomRecognizePass: OptimizationRemarkEmitterAnalysis not cached " 264 "at a higher level"); 265 266 LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI, DL, 267 *ORE); 268 if (!LIR.runOnLoop(&L)) 269 return PreservedAnalyses::all(); 270 271 return getLoopPassPreservedAnalyses(); 272 } 273 274 INITIALIZE_PASS_BEGIN(LoopIdiomRecognizeLegacyPass, "loop-idiom", 275 "Recognize loop idioms", false, false) 276 INITIALIZE_PASS_DEPENDENCY(LoopPass) 277 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 278 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 279 INITIALIZE_PASS_END(LoopIdiomRecognizeLegacyPass, "loop-idiom", 280 "Recognize loop idioms", false, false) 281 282 Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognizeLegacyPass(); } 283 284 static void deleteDeadInstruction(Instruction *I) { 285 I->replaceAllUsesWith(UndefValue::get(I->getType())); 286 I->eraseFromParent(); 287 } 288 289 //===----------------------------------------------------------------------===// 290 // 291 // Implementation of LoopIdiomRecognize 292 // 293 //===----------------------------------------------------------------------===// 294 295 bool LoopIdiomRecognize::runOnLoop(Loop *L) { 296 CurLoop = L; 297 // If the loop could not be converted to canonical form, it must have an 298 // indirectbr in it, just give up. 299 if (!L->getLoopPreheader()) 300 return false; 301 302 // Disable loop idiom recognition if the function's name is a common idiom. 303 StringRef Name = L->getHeader()->getParent()->getName(); 304 if (Name == "memset" || Name == "memcpy") 305 return false; 306 307 // Determine if code size heuristics need to be applied. 308 ApplyCodeSizeHeuristics = 309 L->getHeader()->getParent()->hasOptSize() && UseLIRCodeSizeHeurs; 310 311 HasMemset = TLI->has(LibFunc_memset); 312 HasMemsetPattern = TLI->has(LibFunc_memset_pattern16); 313 HasMemcpy = TLI->has(LibFunc_memcpy); 314 315 if (HasMemset || HasMemsetPattern || HasMemcpy) 316 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 317 return runOnCountableLoop(); 318 319 return runOnNoncountableLoop(); 320 } 321 322 bool LoopIdiomRecognize::runOnCountableLoop() { 323 const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop); 324 assert(!isa<SCEVCouldNotCompute>(BECount) && 325 "runOnCountableLoop() called on a loop without a predictable" 326 "backedge-taken count"); 327 328 // If this loop executes exactly one time, then it should be peeled, not 329 // optimized by this pass. 330 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) 331 if (BECst->getAPInt() == 0) 332 return false; 333 334 SmallVector<BasicBlock *, 8> ExitBlocks; 335 CurLoop->getUniqueExitBlocks(ExitBlocks); 336 337 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Scanning: F[" 338 << CurLoop->getHeader()->getParent()->getName() 339 << "] Countable Loop %" << CurLoop->getHeader()->getName() 340 << "\n"); 341 342 bool MadeChange = false; 343 344 // The following transforms hoist stores/memsets into the loop pre-header. 345 // Give up if the loop has instructions may throw. 346 SimpleLoopSafetyInfo SafetyInfo; 347 SafetyInfo.computeLoopSafetyInfo(CurLoop); 348 if (SafetyInfo.anyBlockMayThrow()) 349 return MadeChange; 350 351 // Scan all the blocks in the loop that are not in subloops. 352 for (auto *BB : CurLoop->getBlocks()) { 353 // Ignore blocks in subloops. 354 if (LI->getLoopFor(BB) != CurLoop) 355 continue; 356 357 MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks); 358 } 359 return MadeChange; 360 } 361 362 static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) { 363 const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1)); 364 return ConstStride->getAPInt(); 365 } 366 367 /// getMemSetPatternValue - If a strided store of the specified value is safe to 368 /// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should 369 /// be passed in. Otherwise, return null. 370 /// 371 /// Note that we don't ever attempt to use memset_pattern8 or 4, because these 372 /// just replicate their input array and then pass on to memset_pattern16. 373 static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) { 374 // FIXME: This could check for UndefValue because it can be merged into any 375 // other valid pattern. 376 377 // If the value isn't a constant, we can't promote it to being in a constant 378 // array. We could theoretically do a store to an alloca or something, but 379 // that doesn't seem worthwhile. 380 Constant *C = dyn_cast<Constant>(V); 381 if (!C) 382 return nullptr; 383 384 // Only handle simple values that are a power of two bytes in size. 385 uint64_t Size = DL->getTypeSizeInBits(V->getType()); 386 if (Size == 0 || (Size & 7) || (Size & (Size - 1))) 387 return nullptr; 388 389 // Don't care enough about darwin/ppc to implement this. 390 if (DL->isBigEndian()) 391 return nullptr; 392 393 // Convert to size in bytes. 394 Size /= 8; 395 396 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see 397 // if the top and bottom are the same (e.g. for vectors and large integers). 398 if (Size > 16) 399 return nullptr; 400 401 // If the constant is exactly 16 bytes, just use it. 402 if (Size == 16) 403 return C; 404 405 // Otherwise, we'll use an array of the constants. 406 unsigned ArraySize = 16 / Size; 407 ArrayType *AT = ArrayType::get(V->getType(), ArraySize); 408 return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C)); 409 } 410 411 LoopIdiomRecognize::LegalStoreKind 412 LoopIdiomRecognize::isLegalStore(StoreInst *SI) { 413 // Don't touch volatile stores. 414 if (SI->isVolatile()) 415 return LegalStoreKind::None; 416 // We only want simple or unordered-atomic stores. 417 if (!SI->isUnordered()) 418 return LegalStoreKind::None; 419 420 // Don't convert stores of non-integral pointer types to memsets (which stores 421 // integers). 422 if (DL->isNonIntegralPointerType(SI->getValueOperand()->getType())) 423 return LegalStoreKind::None; 424 425 // Avoid merging nontemporal stores. 426 if (SI->getMetadata(LLVMContext::MD_nontemporal)) 427 return LegalStoreKind::None; 428 429 Value *StoredVal = SI->getValueOperand(); 430 Value *StorePtr = SI->getPointerOperand(); 431 432 // Reject stores that are so large that they overflow an unsigned. 433 uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType()); 434 if ((SizeInBits & 7) || (SizeInBits >> 32) != 0) 435 return LegalStoreKind::None; 436 437 // See if the pointer expression is an AddRec like {base,+,1} on the current 438 // loop, which indicates a strided store. If we have something else, it's a 439 // random store we can't handle. 440 const SCEVAddRecExpr *StoreEv = 441 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 442 if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine()) 443 return LegalStoreKind::None; 444 445 // Check to see if we have a constant stride. 446 if (!isa<SCEVConstant>(StoreEv->getOperand(1))) 447 return LegalStoreKind::None; 448 449 // See if the store can be turned into a memset. 450 451 // If the stored value is a byte-wise value (like i32 -1), then it may be 452 // turned into a memset of i8 -1, assuming that all the consecutive bytes 453 // are stored. A store of i32 0x01020304 can never be turned into a memset, 454 // but it can be turned into memset_pattern if the target supports it. 455 Value *SplatValue = isBytewiseValue(StoredVal, *DL); 456 Constant *PatternValue = nullptr; 457 458 // Note: memset and memset_pattern on unordered-atomic is yet not supported 459 bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple(); 460 461 // If we're allowed to form a memset, and the stored value would be 462 // acceptable for memset, use it. 463 if (!UnorderedAtomic && HasMemset && SplatValue && 464 // Verify that the stored value is loop invariant. If not, we can't 465 // promote the memset. 466 CurLoop->isLoopInvariant(SplatValue)) { 467 // It looks like we can use SplatValue. 468 return LegalStoreKind::Memset; 469 } else if (!UnorderedAtomic && HasMemsetPattern && 470 // Don't create memset_pattern16s with address spaces. 471 StorePtr->getType()->getPointerAddressSpace() == 0 && 472 (PatternValue = getMemSetPatternValue(StoredVal, DL))) { 473 // It looks like we can use PatternValue! 474 return LegalStoreKind::MemsetPattern; 475 } 476 477 // Otherwise, see if the store can be turned into a memcpy. 478 if (HasMemcpy) { 479 // Check to see if the stride matches the size of the store. If so, then we 480 // know that every byte is touched in the loop. 481 APInt Stride = getStoreStride(StoreEv); 482 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType()); 483 if (StoreSize != Stride && StoreSize != -Stride) 484 return LegalStoreKind::None; 485 486 // The store must be feeding a non-volatile load. 487 LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand()); 488 489 // Only allow non-volatile loads 490 if (!LI || LI->isVolatile()) 491 return LegalStoreKind::None; 492 // Only allow simple or unordered-atomic loads 493 if (!LI->isUnordered()) 494 return LegalStoreKind::None; 495 496 // See if the pointer expression is an AddRec like {base,+,1} on the current 497 // loop, which indicates a strided load. If we have something else, it's a 498 // random load we can't handle. 499 const SCEVAddRecExpr *LoadEv = 500 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand())); 501 if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine()) 502 return LegalStoreKind::None; 503 504 // The store and load must share the same stride. 505 if (StoreEv->getOperand(1) != LoadEv->getOperand(1)) 506 return LegalStoreKind::None; 507 508 // Success. This store can be converted into a memcpy. 509 UnorderedAtomic = UnorderedAtomic || LI->isAtomic(); 510 return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy 511 : LegalStoreKind::Memcpy; 512 } 513 // This store can't be transformed into a memset/memcpy. 514 return LegalStoreKind::None; 515 } 516 517 void LoopIdiomRecognize::collectStores(BasicBlock *BB) { 518 StoreRefsForMemset.clear(); 519 StoreRefsForMemsetPattern.clear(); 520 StoreRefsForMemcpy.clear(); 521 for (Instruction &I : *BB) { 522 StoreInst *SI = dyn_cast<StoreInst>(&I); 523 if (!SI) 524 continue; 525 526 // Make sure this is a strided store with a constant stride. 527 switch (isLegalStore(SI)) { 528 case LegalStoreKind::None: 529 // Nothing to do 530 break; 531 case LegalStoreKind::Memset: { 532 // Find the base pointer. 533 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL); 534 StoreRefsForMemset[Ptr].push_back(SI); 535 } break; 536 case LegalStoreKind::MemsetPattern: { 537 // Find the base pointer. 538 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL); 539 StoreRefsForMemsetPattern[Ptr].push_back(SI); 540 } break; 541 case LegalStoreKind::Memcpy: 542 case LegalStoreKind::UnorderedAtomicMemcpy: 543 StoreRefsForMemcpy.push_back(SI); 544 break; 545 default: 546 assert(false && "unhandled return value"); 547 break; 548 } 549 } 550 } 551 552 /// runOnLoopBlock - Process the specified block, which lives in a counted loop 553 /// with the specified backedge count. This block is known to be in the current 554 /// loop and not in any subloops. 555 bool LoopIdiomRecognize::runOnLoopBlock( 556 BasicBlock *BB, const SCEV *BECount, 557 SmallVectorImpl<BasicBlock *> &ExitBlocks) { 558 // We can only promote stores in this block if they are unconditionally 559 // executed in the loop. For a block to be unconditionally executed, it has 560 // to dominate all the exit blocks of the loop. Verify this now. 561 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) 562 if (!DT->dominates(BB, ExitBlocks[i])) 563 return false; 564 565 bool MadeChange = false; 566 // Look for store instructions, which may be optimized to memset/memcpy. 567 collectStores(BB); 568 569 // Look for a single store or sets of stores with a common base, which can be 570 // optimized into a memset (memset_pattern). The latter most commonly happens 571 // with structs and handunrolled loops. 572 for (auto &SL : StoreRefsForMemset) 573 MadeChange |= processLoopStores(SL.second, BECount, ForMemset::Yes); 574 575 for (auto &SL : StoreRefsForMemsetPattern) 576 MadeChange |= processLoopStores(SL.second, BECount, ForMemset::No); 577 578 // Optimize the store into a memcpy, if it feeds an similarly strided load. 579 for (auto &SI : StoreRefsForMemcpy) 580 MadeChange |= processLoopStoreOfLoopLoad(SI, BECount); 581 582 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 583 Instruction *Inst = &*I++; 584 // Look for memset instructions, which may be optimized to a larger memset. 585 if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) { 586 WeakTrackingVH InstPtr(&*I); 587 if (!processLoopMemSet(MSI, BECount)) 588 continue; 589 MadeChange = true; 590 591 // If processing the memset invalidated our iterator, start over from the 592 // top of the block. 593 if (!InstPtr) 594 I = BB->begin(); 595 continue; 596 } 597 } 598 599 return MadeChange; 600 } 601 602 /// See if this store(s) can be promoted to a memset. 603 bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL, 604 const SCEV *BECount, ForMemset For) { 605 // Try to find consecutive stores that can be transformed into memsets. 606 SetVector<StoreInst *> Heads, Tails; 607 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; 608 609 // Do a quadratic search on all of the given stores and find 610 // all of the pairs of stores that follow each other. 611 SmallVector<unsigned, 16> IndexQueue; 612 for (unsigned i = 0, e = SL.size(); i < e; ++i) { 613 assert(SL[i]->isSimple() && "Expected only non-volatile stores."); 614 615 Value *FirstStoredVal = SL[i]->getValueOperand(); 616 Value *FirstStorePtr = SL[i]->getPointerOperand(); 617 const SCEVAddRecExpr *FirstStoreEv = 618 cast<SCEVAddRecExpr>(SE->getSCEV(FirstStorePtr)); 619 APInt FirstStride = getStoreStride(FirstStoreEv); 620 unsigned FirstStoreSize = DL->getTypeStoreSize(SL[i]->getValueOperand()->getType()); 621 622 // See if we can optimize just this store in isolation. 623 if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) { 624 Heads.insert(SL[i]); 625 continue; 626 } 627 628 Value *FirstSplatValue = nullptr; 629 Constant *FirstPatternValue = nullptr; 630 631 if (For == ForMemset::Yes) 632 FirstSplatValue = isBytewiseValue(FirstStoredVal, *DL); 633 else 634 FirstPatternValue = getMemSetPatternValue(FirstStoredVal, DL); 635 636 assert((FirstSplatValue || FirstPatternValue) && 637 "Expected either splat value or pattern value."); 638 639 IndexQueue.clear(); 640 // If a store has multiple consecutive store candidates, search Stores 641 // array according to the sequence: from i+1 to e, then from i-1 to 0. 642 // This is because usually pairing with immediate succeeding or preceding 643 // candidate create the best chance to find memset opportunity. 644 unsigned j = 0; 645 for (j = i + 1; j < e; ++j) 646 IndexQueue.push_back(j); 647 for (j = i; j > 0; --j) 648 IndexQueue.push_back(j - 1); 649 650 for (auto &k : IndexQueue) { 651 assert(SL[k]->isSimple() && "Expected only non-volatile stores."); 652 Value *SecondStorePtr = SL[k]->getPointerOperand(); 653 const SCEVAddRecExpr *SecondStoreEv = 654 cast<SCEVAddRecExpr>(SE->getSCEV(SecondStorePtr)); 655 APInt SecondStride = getStoreStride(SecondStoreEv); 656 657 if (FirstStride != SecondStride) 658 continue; 659 660 Value *SecondStoredVal = SL[k]->getValueOperand(); 661 Value *SecondSplatValue = nullptr; 662 Constant *SecondPatternValue = nullptr; 663 664 if (For == ForMemset::Yes) 665 SecondSplatValue = isBytewiseValue(SecondStoredVal, *DL); 666 else 667 SecondPatternValue = getMemSetPatternValue(SecondStoredVal, DL); 668 669 assert((SecondSplatValue || SecondPatternValue) && 670 "Expected either splat value or pattern value."); 671 672 if (isConsecutiveAccess(SL[i], SL[k], *DL, *SE, false)) { 673 if (For == ForMemset::Yes) { 674 if (isa<UndefValue>(FirstSplatValue)) 675 FirstSplatValue = SecondSplatValue; 676 if (FirstSplatValue != SecondSplatValue) 677 continue; 678 } else { 679 if (isa<UndefValue>(FirstPatternValue)) 680 FirstPatternValue = SecondPatternValue; 681 if (FirstPatternValue != SecondPatternValue) 682 continue; 683 } 684 Tails.insert(SL[k]); 685 Heads.insert(SL[i]); 686 ConsecutiveChain[SL[i]] = SL[k]; 687 break; 688 } 689 } 690 } 691 692 // We may run into multiple chains that merge into a single chain. We mark the 693 // stores that we transformed so that we don't visit the same store twice. 694 SmallPtrSet<Value *, 16> TransformedStores; 695 bool Changed = false; 696 697 // For stores that start but don't end a link in the chain: 698 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end(); 699 it != e; ++it) { 700 if (Tails.count(*it)) 701 continue; 702 703 // We found a store instr that starts a chain. Now follow the chain and try 704 // to transform it. 705 SmallPtrSet<Instruction *, 8> AdjacentStores; 706 StoreInst *I = *it; 707 708 StoreInst *HeadStore = I; 709 unsigned StoreSize = 0; 710 711 // Collect the chain into a list. 712 while (Tails.count(I) || Heads.count(I)) { 713 if (TransformedStores.count(I)) 714 break; 715 AdjacentStores.insert(I); 716 717 StoreSize += DL->getTypeStoreSize(I->getValueOperand()->getType()); 718 // Move to the next value in the chain. 719 I = ConsecutiveChain[I]; 720 } 721 722 Value *StoredVal = HeadStore->getValueOperand(); 723 Value *StorePtr = HeadStore->getPointerOperand(); 724 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 725 APInt Stride = getStoreStride(StoreEv); 726 727 // Check to see if the stride matches the size of the stores. If so, then 728 // we know that every byte is touched in the loop. 729 if (StoreSize != Stride && StoreSize != -Stride) 730 continue; 731 732 bool NegStride = StoreSize == -Stride; 733 734 if (processLoopStridedStore(StorePtr, StoreSize, 735 MaybeAlign(HeadStore->getAlignment()), 736 StoredVal, HeadStore, AdjacentStores, StoreEv, 737 BECount, NegStride)) { 738 TransformedStores.insert(AdjacentStores.begin(), AdjacentStores.end()); 739 Changed = true; 740 } 741 } 742 743 return Changed; 744 } 745 746 /// processLoopMemSet - See if this memset can be promoted to a large memset. 747 bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI, 748 const SCEV *BECount) { 749 // We can only handle non-volatile memsets with a constant size. 750 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength())) 751 return false; 752 753 // If we're not allowed to hack on memset, we fail. 754 if (!HasMemset) 755 return false; 756 757 Value *Pointer = MSI->getDest(); 758 759 // See if the pointer expression is an AddRec like {base,+,1} on the current 760 // loop, which indicates a strided store. If we have something else, it's a 761 // random store we can't handle. 762 const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer)); 763 if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine()) 764 return false; 765 766 // Reject memsets that are so large that they overflow an unsigned. 767 uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 768 if ((SizeInBytes >> 32) != 0) 769 return false; 770 771 // Check to see if the stride matches the size of the memset. If so, then we 772 // know that every byte is touched in the loop. 773 const SCEVConstant *ConstStride = dyn_cast<SCEVConstant>(Ev->getOperand(1)); 774 if (!ConstStride) 775 return false; 776 777 APInt Stride = ConstStride->getAPInt(); 778 if (SizeInBytes != Stride && SizeInBytes != -Stride) 779 return false; 780 781 // Verify that the memset value is loop invariant. If not, we can't promote 782 // the memset. 783 Value *SplatValue = MSI->getValue(); 784 if (!SplatValue || !CurLoop->isLoopInvariant(SplatValue)) 785 return false; 786 787 SmallPtrSet<Instruction *, 1> MSIs; 788 MSIs.insert(MSI); 789 bool NegStride = SizeInBytes == -Stride; 790 return processLoopStridedStore( 791 Pointer, (unsigned)SizeInBytes, MaybeAlign(MSI->getDestAlignment()), 792 SplatValue, MSI, MSIs, Ev, BECount, NegStride, /*IsLoopMemset=*/true); 793 } 794 795 /// mayLoopAccessLocation - Return true if the specified loop might access the 796 /// specified pointer location, which is a loop-strided access. The 'Access' 797 /// argument specifies what the verboten forms of access are (read or write). 798 static bool 799 mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L, 800 const SCEV *BECount, unsigned StoreSize, 801 AliasAnalysis &AA, 802 SmallPtrSetImpl<Instruction *> &IgnoredStores) { 803 // Get the location that may be stored across the loop. Since the access is 804 // strided positively through memory, we say that the modified location starts 805 // at the pointer and has infinite size. 806 LocationSize AccessSize = LocationSize::unknown(); 807 808 // If the loop iterates a fixed number of times, we can refine the access size 809 // to be exactly the size of the memset, which is (BECount+1)*StoreSize 810 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) 811 AccessSize = LocationSize::precise((BECst->getValue()->getZExtValue() + 1) * 812 StoreSize); 813 814 // TODO: For this to be really effective, we have to dive into the pointer 815 // operand in the store. Store to &A[i] of 100 will always return may alias 816 // with store of &A[100], we need to StoreLoc to be "A" with size of 100, 817 // which will then no-alias a store to &A[100]. 818 MemoryLocation StoreLoc(Ptr, AccessSize); 819 820 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E; 821 ++BI) 822 for (Instruction &I : **BI) 823 if (IgnoredStores.count(&I) == 0 && 824 isModOrRefSet( 825 intersectModRef(AA.getModRefInfo(&I, StoreLoc), Access))) 826 return true; 827 828 return false; 829 } 830 831 // If we have a negative stride, Start refers to the end of the memory location 832 // we're trying to memset. Therefore, we need to recompute the base pointer, 833 // which is just Start - BECount*Size. 834 static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount, 835 Type *IntPtr, unsigned StoreSize, 836 ScalarEvolution *SE) { 837 const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr); 838 if (StoreSize != 1) 839 Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize), 840 SCEV::FlagNUW); 841 return SE->getMinusSCEV(Start, Index); 842 } 843 844 /// Compute the number of bytes as a SCEV from the backedge taken count. 845 /// 846 /// This also maps the SCEV into the provided type and tries to handle the 847 /// computation in a way that will fold cleanly. 848 static const SCEV *getNumBytes(const SCEV *BECount, Type *IntPtr, 849 unsigned StoreSize, Loop *CurLoop, 850 const DataLayout *DL, ScalarEvolution *SE) { 851 const SCEV *NumBytesS; 852 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to 853 // pointer size if it isn't already. 854 // 855 // If we're going to need to zero extend the BE count, check if we can add 856 // one to it prior to zero extending without overflow. Provided this is safe, 857 // it allows better simplification of the +1. 858 if (DL->getTypeSizeInBits(BECount->getType()) < 859 DL->getTypeSizeInBits(IntPtr) && 860 SE->isLoopEntryGuardedByCond( 861 CurLoop, ICmpInst::ICMP_NE, BECount, 862 SE->getNegativeSCEV(SE->getOne(BECount->getType())))) { 863 NumBytesS = SE->getZeroExtendExpr( 864 SE->getAddExpr(BECount, SE->getOne(BECount->getType()), SCEV::FlagNUW), 865 IntPtr); 866 } else { 867 NumBytesS = SE->getAddExpr(SE->getTruncateOrZeroExtend(BECount, IntPtr), 868 SE->getOne(IntPtr), SCEV::FlagNUW); 869 } 870 871 // And scale it based on the store size. 872 if (StoreSize != 1) { 873 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize), 874 SCEV::FlagNUW); 875 } 876 return NumBytesS; 877 } 878 879 /// processLoopStridedStore - We see a strided store of some value. If we can 880 /// transform this into a memset or memset_pattern in the loop preheader, do so. 881 bool LoopIdiomRecognize::processLoopStridedStore( 882 Value *DestPtr, unsigned StoreSize, MaybeAlign StoreAlignment, 883 Value *StoredVal, Instruction *TheStore, 884 SmallPtrSetImpl<Instruction *> &Stores, const SCEVAddRecExpr *Ev, 885 const SCEV *BECount, bool NegStride, bool IsLoopMemset) { 886 Value *SplatValue = isBytewiseValue(StoredVal, *DL); 887 Constant *PatternValue = nullptr; 888 889 if (!SplatValue) 890 PatternValue = getMemSetPatternValue(StoredVal, DL); 891 892 assert((SplatValue || PatternValue) && 893 "Expected either splat value or pattern value."); 894 895 // The trip count of the loop and the base pointer of the addrec SCEV is 896 // guaranteed to be loop invariant, which means that it should dominate the 897 // header. This allows us to insert code for it in the preheader. 898 unsigned DestAS = DestPtr->getType()->getPointerAddressSpace(); 899 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 900 IRBuilder<> Builder(Preheader->getTerminator()); 901 SCEVExpander Expander(*SE, *DL, "loop-idiom"); 902 903 Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS); 904 Type *IntIdxTy = DL->getIndexType(DestPtr->getType()); 905 906 const SCEV *Start = Ev->getStart(); 907 // Handle negative strided loops. 908 if (NegStride) 909 Start = getStartForNegStride(Start, BECount, IntIdxTy, StoreSize, SE); 910 911 // TODO: ideally we should still be able to generate memset if SCEV expander 912 // is taught to generate the dependencies at the latest point. 913 if (!isSafeToExpand(Start, *SE)) 914 return false; 915 916 // Okay, we have a strided store "p[i]" of a splattable value. We can turn 917 // this into a memset in the loop preheader now if we want. However, this 918 // would be unsafe to do if there is anything else in the loop that may read 919 // or write to the aliased location. Check for any overlap by generating the 920 // base pointer and checking the region. 921 Value *BasePtr = 922 Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator()); 923 if (mayLoopAccessLocation(BasePtr, ModRefInfo::ModRef, CurLoop, BECount, 924 StoreSize, *AA, Stores)) { 925 Expander.clear(); 926 // If we generated new code for the base pointer, clean up. 927 RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI); 928 return false; 929 } 930 931 if (avoidLIRForMultiBlockLoop(/*IsMemset=*/true, IsLoopMemset)) 932 return false; 933 934 // Okay, everything looks good, insert the memset. 935 936 const SCEV *NumBytesS = 937 getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE); 938 939 // TODO: ideally we should still be able to generate memset if SCEV expander 940 // is taught to generate the dependencies at the latest point. 941 if (!isSafeToExpand(NumBytesS, *SE)) 942 return false; 943 944 Value *NumBytes = 945 Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator()); 946 947 CallInst *NewCall; 948 if (SplatValue) { 949 NewCall = Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, 950 MaybeAlign(StoreAlignment)); 951 } else { 952 // Everything is emitted in default address space 953 Type *Int8PtrTy = DestInt8PtrTy; 954 955 Module *M = TheStore->getModule(); 956 StringRef FuncName = "memset_pattern16"; 957 FunctionCallee MSP = M->getOrInsertFunction(FuncName, Builder.getVoidTy(), 958 Int8PtrTy, Int8PtrTy, IntIdxTy); 959 inferLibFuncAttributes(M, FuncName, *TLI); 960 961 // Otherwise we should form a memset_pattern16. PatternValue is known to be 962 // an constant array of 16-bytes. Plop the value into a mergable global. 963 GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true, 964 GlobalValue::PrivateLinkage, 965 PatternValue, ".memset_pattern"); 966 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); // Ok to merge these. 967 GV->setAlignment(Align(16)); 968 Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy); 969 NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes}); 970 } 971 972 LLVM_DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n" 973 << " from store to: " << *Ev << " at: " << *TheStore 974 << "\n"); 975 NewCall->setDebugLoc(TheStore->getDebugLoc()); 976 977 ORE.emit([&]() { 978 return OptimizationRemark(DEBUG_TYPE, "ProcessLoopStridedStore", 979 NewCall->getDebugLoc(), Preheader) 980 << "Transformed loop-strided store into a call to " 981 << ore::NV("NewFunction", NewCall->getCalledFunction()) 982 << "() function"; 983 }); 984 985 // Okay, the memset has been formed. Zap the original store and anything that 986 // feeds into it. 987 for (auto *I : Stores) 988 deleteDeadInstruction(I); 989 ++NumMemSet; 990 return true; 991 } 992 993 /// If the stored value is a strided load in the same loop with the same stride 994 /// this may be transformable into a memcpy. This kicks in for stuff like 995 /// for (i) A[i] = B[i]; 996 bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI, 997 const SCEV *BECount) { 998 assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores."); 999 1000 Value *StorePtr = SI->getPointerOperand(); 1001 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 1002 APInt Stride = getStoreStride(StoreEv); 1003 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType()); 1004 bool NegStride = StoreSize == -Stride; 1005 1006 // The store must be feeding a non-volatile load. 1007 LoadInst *LI = cast<LoadInst>(SI->getValueOperand()); 1008 assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads."); 1009 1010 // See if the pointer expression is an AddRec like {base,+,1} on the current 1011 // loop, which indicates a strided load. If we have something else, it's a 1012 // random load we can't handle. 1013 const SCEVAddRecExpr *LoadEv = 1014 cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand())); 1015 1016 // The trip count of the loop and the base pointer of the addrec SCEV is 1017 // guaranteed to be loop invariant, which means that it should dominate the 1018 // header. This allows us to insert code for it in the preheader. 1019 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 1020 IRBuilder<> Builder(Preheader->getTerminator()); 1021 SCEVExpander Expander(*SE, *DL, "loop-idiom"); 1022 1023 const SCEV *StrStart = StoreEv->getStart(); 1024 unsigned StrAS = SI->getPointerAddressSpace(); 1025 Type *IntIdxTy = Builder.getIntNTy(DL->getIndexSizeInBits(StrAS)); 1026 1027 // Handle negative strided loops. 1028 if (NegStride) 1029 StrStart = getStartForNegStride(StrStart, BECount, IntIdxTy, StoreSize, SE); 1030 1031 // Okay, we have a strided store "p[i]" of a loaded value. We can turn 1032 // this into a memcpy in the loop preheader now if we want. However, this 1033 // would be unsafe to do if there is anything else in the loop that may read 1034 // or write the memory region we're storing to. This includes the load that 1035 // feeds the stores. Check for an alias by generating the base address and 1036 // checking everything. 1037 Value *StoreBasePtr = Expander.expandCodeFor( 1038 StrStart, Builder.getInt8PtrTy(StrAS), Preheader->getTerminator()); 1039 1040 SmallPtrSet<Instruction *, 1> Stores; 1041 Stores.insert(SI); 1042 if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount, 1043 StoreSize, *AA, Stores)) { 1044 Expander.clear(); 1045 // If we generated new code for the base pointer, clean up. 1046 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI); 1047 return false; 1048 } 1049 1050 const SCEV *LdStart = LoadEv->getStart(); 1051 unsigned LdAS = LI->getPointerAddressSpace(); 1052 1053 // Handle negative strided loops. 1054 if (NegStride) 1055 LdStart = getStartForNegStride(LdStart, BECount, IntIdxTy, StoreSize, SE); 1056 1057 // For a memcpy, we have to make sure that the input array is not being 1058 // mutated by the loop. 1059 Value *LoadBasePtr = Expander.expandCodeFor( 1060 LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator()); 1061 1062 if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount, 1063 StoreSize, *AA, Stores)) { 1064 Expander.clear(); 1065 // If we generated new code for the base pointer, clean up. 1066 RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI); 1067 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI); 1068 return false; 1069 } 1070 1071 if (avoidLIRForMultiBlockLoop()) 1072 return false; 1073 1074 // Okay, everything is safe, we can transform this! 1075 1076 const SCEV *NumBytesS = 1077 getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE); 1078 1079 Value *NumBytes = 1080 Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator()); 1081 1082 CallInst *NewCall = nullptr; 1083 // Check whether to generate an unordered atomic memcpy: 1084 // If the load or store are atomic, then they must necessarily be unordered 1085 // by previous checks. 1086 if (!SI->isAtomic() && !LI->isAtomic()) 1087 NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlign(), LoadBasePtr, 1088 LI->getAlign(), NumBytes); 1089 else { 1090 // We cannot allow unaligned ops for unordered load/store, so reject 1091 // anything where the alignment isn't at least the element size. 1092 const MaybeAlign StoreAlign = SI->getAlign(); 1093 const MaybeAlign LoadAlign = LI->getAlign(); 1094 if (StoreAlign == None || LoadAlign == None) 1095 return false; 1096 if (*StoreAlign < StoreSize || *LoadAlign < StoreSize) 1097 return false; 1098 1099 // If the element.atomic memcpy is not lowered into explicit 1100 // loads/stores later, then it will be lowered into an element-size 1101 // specific lib call. If the lib call doesn't exist for our store size, then 1102 // we shouldn't generate the memcpy. 1103 if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize()) 1104 return false; 1105 1106 // Create the call. 1107 // Note that unordered atomic loads/stores are *required* by the spec to 1108 // have an alignment but non-atomic loads/stores may not. 1109 NewCall = Builder.CreateElementUnorderedAtomicMemCpy( 1110 StoreBasePtr, *StoreAlign, LoadBasePtr, *LoadAlign, NumBytes, 1111 StoreSize); 1112 } 1113 NewCall->setDebugLoc(SI->getDebugLoc()); 1114 1115 LLVM_DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n" 1116 << " from load ptr=" << *LoadEv << " at: " << *LI << "\n" 1117 << " from store ptr=" << *StoreEv << " at: " << *SI 1118 << "\n"); 1119 1120 ORE.emit([&]() { 1121 return OptimizationRemark(DEBUG_TYPE, "ProcessLoopStoreOfLoopLoad", 1122 NewCall->getDebugLoc(), Preheader) 1123 << "Formed a call to " 1124 << ore::NV("NewFunction", NewCall->getCalledFunction()) 1125 << "() function"; 1126 }); 1127 1128 // Okay, the memcpy has been formed. Zap the original store and anything that 1129 // feeds into it. 1130 deleteDeadInstruction(SI); 1131 ++NumMemCpy; 1132 return true; 1133 } 1134 1135 // When compiling for codesize we avoid idiom recognition for a multi-block loop 1136 // unless it is a loop_memset idiom or a memset/memcpy idiom in a nested loop. 1137 // 1138 bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset, 1139 bool IsLoopMemset) { 1140 if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) { 1141 if (!CurLoop->getParentLoop() && (!IsMemset || !IsLoopMemset)) { 1142 LLVM_DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName() 1143 << " : LIR " << (IsMemset ? "Memset" : "Memcpy") 1144 << " avoided: multi-block top-level loop\n"); 1145 return true; 1146 } 1147 } 1148 1149 return false; 1150 } 1151 1152 bool LoopIdiomRecognize::runOnNoncountableLoop() { 1153 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Scanning: F[" 1154 << CurLoop->getHeader()->getParent()->getName() 1155 << "] Noncountable Loop %" 1156 << CurLoop->getHeader()->getName() << "\n"); 1157 1158 return recognizePopcount() || recognizeAndInsertFFS(); 1159 } 1160 1161 /// Check if the given conditional branch is based on the comparison between 1162 /// a variable and zero, and if the variable is non-zero or zero (JmpOnZero is 1163 /// true), the control yields to the loop entry. If the branch matches the 1164 /// behavior, the variable involved in the comparison is returned. This function 1165 /// will be called to see if the precondition and postcondition of the loop are 1166 /// in desirable form. 1167 static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry, 1168 bool JmpOnZero = false) { 1169 if (!BI || !BI->isConditional()) 1170 return nullptr; 1171 1172 ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition()); 1173 if (!Cond) 1174 return nullptr; 1175 1176 ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1)); 1177 if (!CmpZero || !CmpZero->isZero()) 1178 return nullptr; 1179 1180 BasicBlock *TrueSucc = BI->getSuccessor(0); 1181 BasicBlock *FalseSucc = BI->getSuccessor(1); 1182 if (JmpOnZero) 1183 std::swap(TrueSucc, FalseSucc); 1184 1185 ICmpInst::Predicate Pred = Cond->getPredicate(); 1186 if ((Pred == ICmpInst::ICMP_NE && TrueSucc == LoopEntry) || 1187 (Pred == ICmpInst::ICMP_EQ && FalseSucc == LoopEntry)) 1188 return Cond->getOperand(0); 1189 1190 return nullptr; 1191 } 1192 1193 // Check if the recurrence variable `VarX` is in the right form to create 1194 // the idiom. Returns the value coerced to a PHINode if so. 1195 static PHINode *getRecurrenceVar(Value *VarX, Instruction *DefX, 1196 BasicBlock *LoopEntry) { 1197 auto *PhiX = dyn_cast<PHINode>(VarX); 1198 if (PhiX && PhiX->getParent() == LoopEntry && 1199 (PhiX->getOperand(0) == DefX || PhiX->getOperand(1) == DefX)) 1200 return PhiX; 1201 return nullptr; 1202 } 1203 1204 /// Return true iff the idiom is detected in the loop. 1205 /// 1206 /// Additionally: 1207 /// 1) \p CntInst is set to the instruction counting the population bit. 1208 /// 2) \p CntPhi is set to the corresponding phi node. 1209 /// 3) \p Var is set to the value whose population bits are being counted. 1210 /// 1211 /// The core idiom we are trying to detect is: 1212 /// \code 1213 /// if (x0 != 0) 1214 /// goto loop-exit // the precondition of the loop 1215 /// cnt0 = init-val; 1216 /// do { 1217 /// x1 = phi (x0, x2); 1218 /// cnt1 = phi(cnt0, cnt2); 1219 /// 1220 /// cnt2 = cnt1 + 1; 1221 /// ... 1222 /// x2 = x1 & (x1 - 1); 1223 /// ... 1224 /// } while(x != 0); 1225 /// 1226 /// loop-exit: 1227 /// \endcode 1228 static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB, 1229 Instruction *&CntInst, PHINode *&CntPhi, 1230 Value *&Var) { 1231 // step 1: Check to see if the look-back branch match this pattern: 1232 // "if (a!=0) goto loop-entry". 1233 BasicBlock *LoopEntry; 1234 Instruction *DefX2, *CountInst; 1235 Value *VarX1, *VarX0; 1236 PHINode *PhiX, *CountPhi; 1237 1238 DefX2 = CountInst = nullptr; 1239 VarX1 = VarX0 = nullptr; 1240 PhiX = CountPhi = nullptr; 1241 LoopEntry = *(CurLoop->block_begin()); 1242 1243 // step 1: Check if the loop-back branch is in desirable form. 1244 { 1245 if (Value *T = matchCondition( 1246 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry)) 1247 DefX2 = dyn_cast<Instruction>(T); 1248 else 1249 return false; 1250 } 1251 1252 // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)" 1253 { 1254 if (!DefX2 || DefX2->getOpcode() != Instruction::And) 1255 return false; 1256 1257 BinaryOperator *SubOneOp; 1258 1259 if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0)))) 1260 VarX1 = DefX2->getOperand(1); 1261 else { 1262 VarX1 = DefX2->getOperand(0); 1263 SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1)); 1264 } 1265 if (!SubOneOp || SubOneOp->getOperand(0) != VarX1) 1266 return false; 1267 1268 ConstantInt *Dec = dyn_cast<ConstantInt>(SubOneOp->getOperand(1)); 1269 if (!Dec || 1270 !((SubOneOp->getOpcode() == Instruction::Sub && Dec->isOne()) || 1271 (SubOneOp->getOpcode() == Instruction::Add && 1272 Dec->isMinusOne()))) { 1273 return false; 1274 } 1275 } 1276 1277 // step 3: Check the recurrence of variable X 1278 PhiX = getRecurrenceVar(VarX1, DefX2, LoopEntry); 1279 if (!PhiX) 1280 return false; 1281 1282 // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1 1283 { 1284 CountInst = nullptr; 1285 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(), 1286 IterE = LoopEntry->end(); 1287 Iter != IterE; Iter++) { 1288 Instruction *Inst = &*Iter; 1289 if (Inst->getOpcode() != Instruction::Add) 1290 continue; 1291 1292 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1)); 1293 if (!Inc || !Inc->isOne()) 1294 continue; 1295 1296 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry); 1297 if (!Phi) 1298 continue; 1299 1300 // Check if the result of the instruction is live of the loop. 1301 bool LiveOutLoop = false; 1302 for (User *U : Inst->users()) { 1303 if ((cast<Instruction>(U))->getParent() != LoopEntry) { 1304 LiveOutLoop = true; 1305 break; 1306 } 1307 } 1308 1309 if (LiveOutLoop) { 1310 CountInst = Inst; 1311 CountPhi = Phi; 1312 break; 1313 } 1314 } 1315 1316 if (!CountInst) 1317 return false; 1318 } 1319 1320 // step 5: check if the precondition is in this form: 1321 // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;" 1322 { 1323 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1324 Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader()); 1325 if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1)) 1326 return false; 1327 1328 CntInst = CountInst; 1329 CntPhi = CountPhi; 1330 Var = T; 1331 } 1332 1333 return true; 1334 } 1335 1336 /// Return true if the idiom is detected in the loop. 1337 /// 1338 /// Additionally: 1339 /// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ) 1340 /// or nullptr if there is no such. 1341 /// 2) \p CntPhi is set to the corresponding phi node 1342 /// or nullptr if there is no such. 1343 /// 3) \p Var is set to the value whose CTLZ could be used. 1344 /// 4) \p DefX is set to the instruction calculating Loop exit condition. 1345 /// 1346 /// The core idiom we are trying to detect is: 1347 /// \code 1348 /// if (x0 == 0) 1349 /// goto loop-exit // the precondition of the loop 1350 /// cnt0 = init-val; 1351 /// do { 1352 /// x = phi (x0, x.next); //PhiX 1353 /// cnt = phi(cnt0, cnt.next); 1354 /// 1355 /// cnt.next = cnt + 1; 1356 /// ... 1357 /// x.next = x >> 1; // DefX 1358 /// ... 1359 /// } while(x.next != 0); 1360 /// 1361 /// loop-exit: 1362 /// \endcode 1363 static bool detectShiftUntilZeroIdiom(Loop *CurLoop, const DataLayout &DL, 1364 Intrinsic::ID &IntrinID, Value *&InitX, 1365 Instruction *&CntInst, PHINode *&CntPhi, 1366 Instruction *&DefX) { 1367 BasicBlock *LoopEntry; 1368 Value *VarX = nullptr; 1369 1370 DefX = nullptr; 1371 CntInst = nullptr; 1372 CntPhi = nullptr; 1373 LoopEntry = *(CurLoop->block_begin()); 1374 1375 // step 1: Check if the loop-back branch is in desirable form. 1376 if (Value *T = matchCondition( 1377 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry)) 1378 DefX = dyn_cast<Instruction>(T); 1379 else 1380 return false; 1381 1382 // step 2: detect instructions corresponding to "x.next = x >> 1 or x << 1" 1383 if (!DefX || !DefX->isShift()) 1384 return false; 1385 IntrinID = DefX->getOpcode() == Instruction::Shl ? Intrinsic::cttz : 1386 Intrinsic::ctlz; 1387 ConstantInt *Shft = dyn_cast<ConstantInt>(DefX->getOperand(1)); 1388 if (!Shft || !Shft->isOne()) 1389 return false; 1390 VarX = DefX->getOperand(0); 1391 1392 // step 3: Check the recurrence of variable X 1393 PHINode *PhiX = getRecurrenceVar(VarX, DefX, LoopEntry); 1394 if (!PhiX) 1395 return false; 1396 1397 InitX = PhiX->getIncomingValueForBlock(CurLoop->getLoopPreheader()); 1398 1399 // Make sure the initial value can't be negative otherwise the ashr in the 1400 // loop might never reach zero which would make the loop infinite. 1401 if (DefX->getOpcode() == Instruction::AShr && !isKnownNonNegative(InitX, DL)) 1402 return false; 1403 1404 // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1 1405 // TODO: We can skip the step. If loop trip count is known (CTLZ), 1406 // then all uses of "cnt.next" could be optimized to the trip count 1407 // plus "cnt0". Currently it is not optimized. 1408 // This step could be used to detect POPCNT instruction: 1409 // cnt.next = cnt + (x.next & 1) 1410 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(), 1411 IterE = LoopEntry->end(); 1412 Iter != IterE; Iter++) { 1413 Instruction *Inst = &*Iter; 1414 if (Inst->getOpcode() != Instruction::Add) 1415 continue; 1416 1417 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1)); 1418 if (!Inc || !Inc->isOne()) 1419 continue; 1420 1421 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry); 1422 if (!Phi) 1423 continue; 1424 1425 CntInst = Inst; 1426 CntPhi = Phi; 1427 break; 1428 } 1429 if (!CntInst) 1430 return false; 1431 1432 return true; 1433 } 1434 1435 /// Recognize CTLZ or CTTZ idiom in a non-countable loop and convert the loop 1436 /// to countable (with CTLZ / CTTZ trip count). If CTLZ / CTTZ inserted as a new 1437 /// trip count returns true; otherwise, returns false. 1438 bool LoopIdiomRecognize::recognizeAndInsertFFS() { 1439 // Give up if the loop has multiple blocks or multiple backedges. 1440 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1) 1441 return false; 1442 1443 Intrinsic::ID IntrinID; 1444 Value *InitX; 1445 Instruction *DefX = nullptr; 1446 PHINode *CntPhi = nullptr; 1447 Instruction *CntInst = nullptr; 1448 // Help decide if transformation is profitable. For ShiftUntilZero idiom, 1449 // this is always 6. 1450 size_t IdiomCanonicalSize = 6; 1451 1452 if (!detectShiftUntilZeroIdiom(CurLoop, *DL, IntrinID, InitX, 1453 CntInst, CntPhi, DefX)) 1454 return false; 1455 1456 bool IsCntPhiUsedOutsideLoop = false; 1457 for (User *U : CntPhi->users()) 1458 if (!CurLoop->contains(cast<Instruction>(U))) { 1459 IsCntPhiUsedOutsideLoop = true; 1460 break; 1461 } 1462 bool IsCntInstUsedOutsideLoop = false; 1463 for (User *U : CntInst->users()) 1464 if (!CurLoop->contains(cast<Instruction>(U))) { 1465 IsCntInstUsedOutsideLoop = true; 1466 break; 1467 } 1468 // If both CntInst and CntPhi are used outside the loop the profitability 1469 // is questionable. 1470 if (IsCntInstUsedOutsideLoop && IsCntPhiUsedOutsideLoop) 1471 return false; 1472 1473 // For some CPUs result of CTLZ(X) intrinsic is undefined 1474 // when X is 0. If we can not guarantee X != 0, we need to check this 1475 // when expand. 1476 bool ZeroCheck = false; 1477 // It is safe to assume Preheader exist as it was checked in 1478 // parent function RunOnLoop. 1479 BasicBlock *PH = CurLoop->getLoopPreheader(); 1480 1481 // If we are using the count instruction outside the loop, make sure we 1482 // have a zero check as a precondition. Without the check the loop would run 1483 // one iteration for before any check of the input value. This means 0 and 1 1484 // would have identical behavior in the original loop and thus 1485 if (!IsCntPhiUsedOutsideLoop) { 1486 auto *PreCondBB = PH->getSinglePredecessor(); 1487 if (!PreCondBB) 1488 return false; 1489 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1490 if (!PreCondBI) 1491 return false; 1492 if (matchCondition(PreCondBI, PH) != InitX) 1493 return false; 1494 ZeroCheck = true; 1495 } 1496 1497 // Check if CTLZ / CTTZ intrinsic is profitable. Assume it is always 1498 // profitable if we delete the loop. 1499 1500 // the loop has only 6 instructions: 1501 // %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ] 1502 // %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ] 1503 // %shr = ashr %n.addr.0, 1 1504 // %tobool = icmp eq %shr, 0 1505 // %inc = add nsw %i.0, 1 1506 // br i1 %tobool 1507 1508 const Value *Args[] = 1509 {InitX, ZeroCheck ? ConstantInt::getTrue(InitX->getContext()) 1510 : ConstantInt::getFalse(InitX->getContext())}; 1511 1512 // @llvm.dbg doesn't count as they have no semantic effect. 1513 auto InstWithoutDebugIt = CurLoop->getHeader()->instructionsWithoutDebug(); 1514 uint32_t HeaderSize = 1515 std::distance(InstWithoutDebugIt.begin(), InstWithoutDebugIt.end()); 1516 1517 if (HeaderSize != IdiomCanonicalSize && 1518 TTI->getIntrinsicCost(IntrinID, InitX->getType(), Args) > 1519 TargetTransformInfo::TCC_Basic) 1520 return false; 1521 1522 transformLoopToCountable(IntrinID, PH, CntInst, CntPhi, InitX, DefX, 1523 DefX->getDebugLoc(), ZeroCheck, 1524 IsCntPhiUsedOutsideLoop); 1525 return true; 1526 } 1527 1528 /// Recognizes a population count idiom in a non-countable loop. 1529 /// 1530 /// If detected, transforms the relevant code to issue the popcount intrinsic 1531 /// function call, and returns true; otherwise, returns false. 1532 bool LoopIdiomRecognize::recognizePopcount() { 1533 if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware) 1534 return false; 1535 1536 // Counting population are usually conducted by few arithmetic instructions. 1537 // Such instructions can be easily "absorbed" by vacant slots in a 1538 // non-compact loop. Therefore, recognizing popcount idiom only makes sense 1539 // in a compact loop. 1540 1541 // Give up if the loop has multiple blocks or multiple backedges. 1542 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1) 1543 return false; 1544 1545 BasicBlock *LoopBody = *(CurLoop->block_begin()); 1546 if (LoopBody->size() >= 20) { 1547 // The loop is too big, bail out. 1548 return false; 1549 } 1550 1551 // It should have a preheader containing nothing but an unconditional branch. 1552 BasicBlock *PH = CurLoop->getLoopPreheader(); 1553 if (!PH || &PH->front() != PH->getTerminator()) 1554 return false; 1555 auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator()); 1556 if (!EntryBI || EntryBI->isConditional()) 1557 return false; 1558 1559 // It should have a precondition block where the generated popcount intrinsic 1560 // function can be inserted. 1561 auto *PreCondBB = PH->getSinglePredecessor(); 1562 if (!PreCondBB) 1563 return false; 1564 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1565 if (!PreCondBI || PreCondBI->isUnconditional()) 1566 return false; 1567 1568 Instruction *CntInst; 1569 PHINode *CntPhi; 1570 Value *Val; 1571 if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val)) 1572 return false; 1573 1574 transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val); 1575 return true; 1576 } 1577 1578 static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val, 1579 const DebugLoc &DL) { 1580 Value *Ops[] = {Val}; 1581 Type *Tys[] = {Val->getType()}; 1582 1583 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent(); 1584 Function *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys); 1585 CallInst *CI = IRBuilder.CreateCall(Func, Ops); 1586 CI->setDebugLoc(DL); 1587 1588 return CI; 1589 } 1590 1591 static CallInst *createFFSIntrinsic(IRBuilder<> &IRBuilder, Value *Val, 1592 const DebugLoc &DL, bool ZeroCheck, 1593 Intrinsic::ID IID) { 1594 Value *Ops[] = {Val, ZeroCheck ? IRBuilder.getTrue() : IRBuilder.getFalse()}; 1595 Type *Tys[] = {Val->getType()}; 1596 1597 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent(); 1598 Function *Func = Intrinsic::getDeclaration(M, IID, Tys); 1599 CallInst *CI = IRBuilder.CreateCall(Func, Ops); 1600 CI->setDebugLoc(DL); 1601 1602 return CI; 1603 } 1604 1605 /// Transform the following loop (Using CTLZ, CTTZ is similar): 1606 /// loop: 1607 /// CntPhi = PHI [Cnt0, CntInst] 1608 /// PhiX = PHI [InitX, DefX] 1609 /// CntInst = CntPhi + 1 1610 /// DefX = PhiX >> 1 1611 /// LOOP_BODY 1612 /// Br: loop if (DefX != 0) 1613 /// Use(CntPhi) or Use(CntInst) 1614 /// 1615 /// Into: 1616 /// If CntPhi used outside the loop: 1617 /// CountPrev = BitWidth(InitX) - CTLZ(InitX >> 1) 1618 /// Count = CountPrev + 1 1619 /// else 1620 /// Count = BitWidth(InitX) - CTLZ(InitX) 1621 /// loop: 1622 /// CntPhi = PHI [Cnt0, CntInst] 1623 /// PhiX = PHI [InitX, DefX] 1624 /// PhiCount = PHI [Count, Dec] 1625 /// CntInst = CntPhi + 1 1626 /// DefX = PhiX >> 1 1627 /// Dec = PhiCount - 1 1628 /// LOOP_BODY 1629 /// Br: loop if (Dec != 0) 1630 /// Use(CountPrev + Cnt0) // Use(CntPhi) 1631 /// or 1632 /// Use(Count + Cnt0) // Use(CntInst) 1633 /// 1634 /// If LOOP_BODY is empty the loop will be deleted. 1635 /// If CntInst and DefX are not used in LOOP_BODY they will be removed. 1636 void LoopIdiomRecognize::transformLoopToCountable( 1637 Intrinsic::ID IntrinID, BasicBlock *Preheader, Instruction *CntInst, 1638 PHINode *CntPhi, Value *InitX, Instruction *DefX, const DebugLoc &DL, 1639 bool ZeroCheck, bool IsCntPhiUsedOutsideLoop) { 1640 BranchInst *PreheaderBr = cast<BranchInst>(Preheader->getTerminator()); 1641 1642 // Step 1: Insert the CTLZ/CTTZ instruction at the end of the preheader block 1643 IRBuilder<> Builder(PreheaderBr); 1644 Builder.SetCurrentDebugLocation(DL); 1645 Value *FFS, *Count, *CountPrev, *NewCount, *InitXNext; 1646 1647 // Count = BitWidth - CTLZ(InitX); 1648 // If there are uses of CntPhi create: 1649 // CountPrev = BitWidth - CTLZ(InitX >> 1); 1650 if (IsCntPhiUsedOutsideLoop) { 1651 if (DefX->getOpcode() == Instruction::AShr) 1652 InitXNext = 1653 Builder.CreateAShr(InitX, ConstantInt::get(InitX->getType(), 1)); 1654 else if (DefX->getOpcode() == Instruction::LShr) 1655 InitXNext = 1656 Builder.CreateLShr(InitX, ConstantInt::get(InitX->getType(), 1)); 1657 else if (DefX->getOpcode() == Instruction::Shl) // cttz 1658 InitXNext = 1659 Builder.CreateShl(InitX, ConstantInt::get(InitX->getType(), 1)); 1660 else 1661 llvm_unreachable("Unexpected opcode!"); 1662 } else 1663 InitXNext = InitX; 1664 FFS = createFFSIntrinsic(Builder, InitXNext, DL, ZeroCheck, IntrinID); 1665 Count = Builder.CreateSub( 1666 ConstantInt::get(FFS->getType(), 1667 FFS->getType()->getIntegerBitWidth()), 1668 FFS); 1669 if (IsCntPhiUsedOutsideLoop) { 1670 CountPrev = Count; 1671 Count = Builder.CreateAdd( 1672 CountPrev, 1673 ConstantInt::get(CountPrev->getType(), 1)); 1674 } 1675 1676 NewCount = Builder.CreateZExtOrTrunc( 1677 IsCntPhiUsedOutsideLoop ? CountPrev : Count, 1678 cast<IntegerType>(CntInst->getType())); 1679 1680 // If the counter's initial value is not zero, insert Add Inst. 1681 Value *CntInitVal = CntPhi->getIncomingValueForBlock(Preheader); 1682 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal); 1683 if (!InitConst || !InitConst->isZero()) 1684 NewCount = Builder.CreateAdd(NewCount, CntInitVal); 1685 1686 // Step 2: Insert new IV and loop condition: 1687 // loop: 1688 // ... 1689 // PhiCount = PHI [Count, Dec] 1690 // ... 1691 // Dec = PhiCount - 1 1692 // ... 1693 // Br: loop if (Dec != 0) 1694 BasicBlock *Body = *(CurLoop->block_begin()); 1695 auto *LbBr = cast<BranchInst>(Body->getTerminator()); 1696 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition()); 1697 Type *Ty = Count->getType(); 1698 1699 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front()); 1700 1701 Builder.SetInsertPoint(LbCond); 1702 Instruction *TcDec = cast<Instruction>( 1703 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1), 1704 "tcdec", false, true)); 1705 1706 TcPhi->addIncoming(Count, Preheader); 1707 TcPhi->addIncoming(TcDec, Body); 1708 1709 CmpInst::Predicate Pred = 1710 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ; 1711 LbCond->setPredicate(Pred); 1712 LbCond->setOperand(0, TcDec); 1713 LbCond->setOperand(1, ConstantInt::get(Ty, 0)); 1714 1715 // Step 3: All the references to the original counter outside 1716 // the loop are replaced with the NewCount 1717 if (IsCntPhiUsedOutsideLoop) 1718 CntPhi->replaceUsesOutsideBlock(NewCount, Body); 1719 else 1720 CntInst->replaceUsesOutsideBlock(NewCount, Body); 1721 1722 // step 4: Forget the "non-computable" trip-count SCEV associated with the 1723 // loop. The loop would otherwise not be deleted even if it becomes empty. 1724 SE->forgetLoop(CurLoop); 1725 } 1726 1727 void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB, 1728 Instruction *CntInst, 1729 PHINode *CntPhi, Value *Var) { 1730 BasicBlock *PreHead = CurLoop->getLoopPreheader(); 1731 auto *PreCondBr = cast<BranchInst>(PreCondBB->getTerminator()); 1732 const DebugLoc &DL = CntInst->getDebugLoc(); 1733 1734 // Assuming before transformation, the loop is following: 1735 // if (x) // the precondition 1736 // do { cnt++; x &= x - 1; } while(x); 1737 1738 // Step 1: Insert the ctpop instruction at the end of the precondition block 1739 IRBuilder<> Builder(PreCondBr); 1740 Value *PopCnt, *PopCntZext, *NewCount, *TripCnt; 1741 { 1742 PopCnt = createPopcntIntrinsic(Builder, Var, DL); 1743 NewCount = PopCntZext = 1744 Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType())); 1745 1746 if (NewCount != PopCnt) 1747 (cast<Instruction>(NewCount))->setDebugLoc(DL); 1748 1749 // TripCnt is exactly the number of iterations the loop has 1750 TripCnt = NewCount; 1751 1752 // If the population counter's initial value is not zero, insert Add Inst. 1753 Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead); 1754 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal); 1755 if (!InitConst || !InitConst->isZero()) { 1756 NewCount = Builder.CreateAdd(NewCount, CntInitVal); 1757 (cast<Instruction>(NewCount))->setDebugLoc(DL); 1758 } 1759 } 1760 1761 // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to 1762 // "if (NewCount == 0) loop-exit". Without this change, the intrinsic 1763 // function would be partial dead code, and downstream passes will drag 1764 // it back from the precondition block to the preheader. 1765 { 1766 ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition()); 1767 1768 Value *Opnd0 = PopCntZext; 1769 Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0); 1770 if (PreCond->getOperand(0) != Var) 1771 std::swap(Opnd0, Opnd1); 1772 1773 ICmpInst *NewPreCond = cast<ICmpInst>( 1774 Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1)); 1775 PreCondBr->setCondition(NewPreCond); 1776 1777 RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI); 1778 } 1779 1780 // Step 3: Note that the population count is exactly the trip count of the 1781 // loop in question, which enable us to convert the loop from noncountable 1782 // loop into a countable one. The benefit is twofold: 1783 // 1784 // - If the loop only counts population, the entire loop becomes dead after 1785 // the transformation. It is a lot easier to prove a countable loop dead 1786 // than to prove a noncountable one. (In some C dialects, an infinite loop 1787 // isn't dead even if it computes nothing useful. In general, DCE needs 1788 // to prove a noncountable loop finite before safely delete it.) 1789 // 1790 // - If the loop also performs something else, it remains alive. 1791 // Since it is transformed to countable form, it can be aggressively 1792 // optimized by some optimizations which are in general not applicable 1793 // to a noncountable loop. 1794 // 1795 // After this step, this loop (conceptually) would look like following: 1796 // newcnt = __builtin_ctpop(x); 1797 // t = newcnt; 1798 // if (x) 1799 // do { cnt++; x &= x-1; t--) } while (t > 0); 1800 BasicBlock *Body = *(CurLoop->block_begin()); 1801 { 1802 auto *LbBr = cast<BranchInst>(Body->getTerminator()); 1803 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition()); 1804 Type *Ty = TripCnt->getType(); 1805 1806 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front()); 1807 1808 Builder.SetInsertPoint(LbCond); 1809 Instruction *TcDec = cast<Instruction>( 1810 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1), 1811 "tcdec", false, true)); 1812 1813 TcPhi->addIncoming(TripCnt, PreHead); 1814 TcPhi->addIncoming(TcDec, Body); 1815 1816 CmpInst::Predicate Pred = 1817 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE; 1818 LbCond->setPredicate(Pred); 1819 LbCond->setOperand(0, TcDec); 1820 LbCond->setOperand(1, ConstantInt::get(Ty, 0)); 1821 } 1822 1823 // Step 4: All the references to the original population counter outside 1824 // the loop are replaced with the NewCount -- the value returned from 1825 // __builtin_ctpop(). 1826 CntInst->replaceUsesOutsideBlock(NewCount, Body); 1827 1828 // step 5: Forget the "non-computable" trip-count SCEV associated with the 1829 // loop. The loop would otherwise not be deleted even if it becomes empty. 1830 SE->forgetLoop(CurLoop); 1831 } 1832