1 //===-- LoopIdiomRecognize.cpp - Loop idiom recognition -------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass implements an idiom recognizer that transforms simple loops into a 11 // non-loop form. In cases that this kicks in, it can be a significant 12 // performance win. 13 // 14 //===----------------------------------------------------------------------===// 15 // 16 // TODO List: 17 // 18 // Future loop memory idioms to recognize: 19 // memcmp, memmove, strlen, etc. 20 // Future floating point idioms to recognize in -ffast-math mode: 21 // fpowi 22 // Future integer operation idioms to recognize: 23 // ctpop, ctlz, cttz 24 // 25 // Beware that isel's default lowering for ctpop is highly inefficient for 26 // i64 and larger types when i64 is legal and the value has few bits set. It 27 // would be good to enhance isel to emit a loop for ctpop in this case. 28 // 29 // This could recognize common matrix multiplies and dot product idioms and 30 // replace them with calls to BLAS (if linked in??). 31 // 32 //===----------------------------------------------------------------------===// 33 34 #include "llvm/Transforms/Scalar.h" 35 #include "llvm/ADT/MapVector.h" 36 #include "llvm/ADT/SetVector.h" 37 #include "llvm/ADT/Statistic.h" 38 #include "llvm/Analysis/AliasAnalysis.h" 39 #include "llvm/Analysis/BasicAliasAnalysis.h" 40 #include "llvm/Analysis/GlobalsModRef.h" 41 #include "llvm/Analysis/LoopPass.h" 42 #include "llvm/Analysis/LoopAccessAnalysis.h" 43 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 44 #include "llvm/Analysis/ScalarEvolutionExpander.h" 45 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 46 #include "llvm/Analysis/TargetLibraryInfo.h" 47 #include "llvm/Analysis/TargetTransformInfo.h" 48 #include "llvm/Analysis/ValueTracking.h" 49 #include "llvm/IR/DataLayout.h" 50 #include "llvm/IR/Dominators.h" 51 #include "llvm/IR/IRBuilder.h" 52 #include "llvm/IR/IntrinsicInst.h" 53 #include "llvm/IR/Module.h" 54 #include "llvm/Support/Debug.h" 55 #include "llvm/Support/raw_ostream.h" 56 #include "llvm/Transforms/Utils/BuildLibCalls.h" 57 #include "llvm/Transforms/Utils/Local.h" 58 #include "llvm/Transforms/Utils/LoopUtils.h" 59 using namespace llvm; 60 61 #define DEBUG_TYPE "loop-idiom" 62 63 STATISTIC(NumMemSet, "Number of memset's formed from loop stores"); 64 STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores"); 65 66 namespace { 67 68 class LoopIdiomRecognize : public LoopPass { 69 Loop *CurLoop; 70 AliasAnalysis *AA; 71 DominatorTree *DT; 72 LoopInfo *LI; 73 ScalarEvolution *SE; 74 TargetLibraryInfo *TLI; 75 const TargetTransformInfo *TTI; 76 const DataLayout *DL; 77 78 public: 79 static char ID; 80 explicit LoopIdiomRecognize() : LoopPass(ID) { 81 initializeLoopIdiomRecognizePass(*PassRegistry::getPassRegistry()); 82 } 83 84 bool runOnLoop(Loop *L, LPPassManager &LPM) override; 85 86 /// This transformation requires natural loop information & requires that 87 /// loop preheaders be inserted into the CFG. 88 /// 89 void getAnalysisUsage(AnalysisUsage &AU) const override { 90 AU.addRequired<TargetLibraryInfoWrapperPass>(); 91 AU.addRequired<TargetTransformInfoWrapperPass>(); 92 getLoopAnalysisUsage(AU); 93 } 94 95 private: 96 typedef SmallVector<StoreInst *, 8> StoreList; 97 typedef MapVector<Value *, StoreList> StoreListMap; 98 StoreListMap StoreRefsForMemset; 99 StoreListMap StoreRefsForMemsetPattern; 100 StoreList StoreRefsForMemcpy; 101 bool HasMemset; 102 bool HasMemsetPattern; 103 bool HasMemcpy; 104 105 /// \name Countable Loop Idiom Handling 106 /// @{ 107 108 bool runOnCountableLoop(); 109 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount, 110 SmallVectorImpl<BasicBlock *> &ExitBlocks); 111 112 void collectStores(BasicBlock *BB); 113 bool isLegalStore(StoreInst *SI, bool &ForMemset, bool &ForMemsetPattern, 114 bool &ForMemcpy); 115 bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount, 116 bool ForMemset); 117 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount); 118 119 bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize, 120 unsigned StoreAlignment, Value *StoredVal, 121 Instruction *TheStore, 122 SmallPtrSetImpl<Instruction *> &Stores, 123 const SCEVAddRecExpr *Ev, const SCEV *BECount, 124 bool NegStride); 125 bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount); 126 127 /// @} 128 /// \name Noncountable Loop Idiom Handling 129 /// @{ 130 131 bool runOnNoncountableLoop(); 132 133 bool recognizePopcount(); 134 void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst, 135 PHINode *CntPhi, Value *Var); 136 137 /// @} 138 }; 139 140 } // End anonymous namespace. 141 142 char LoopIdiomRecognize::ID = 0; 143 INITIALIZE_PASS_BEGIN(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms", 144 false, false) 145 INITIALIZE_PASS_DEPENDENCY(LoopPass) 146 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 147 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 148 INITIALIZE_PASS_END(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms", 149 false, false) 150 151 Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognize(); } 152 153 static void deleteDeadInstruction(Instruction *I) { 154 I->replaceAllUsesWith(UndefValue::get(I->getType())); 155 I->eraseFromParent(); 156 } 157 158 //===----------------------------------------------------------------------===// 159 // 160 // Implementation of LoopIdiomRecognize 161 // 162 //===----------------------------------------------------------------------===// 163 164 bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) { 165 if (skipLoop(L)) 166 return false; 167 168 CurLoop = L; 169 // If the loop could not be converted to canonical form, it must have an 170 // indirectbr in it, just give up. 171 if (!L->getLoopPreheader()) 172 return false; 173 174 // Disable loop idiom recognition if the function's name is a common idiom. 175 StringRef Name = L->getHeader()->getParent()->getName(); 176 if (Name == "memset" || Name == "memcpy") 177 return false; 178 179 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 180 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 181 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 182 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 183 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 184 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI( 185 *CurLoop->getHeader()->getParent()); 186 DL = &CurLoop->getHeader()->getModule()->getDataLayout(); 187 188 HasMemset = TLI->has(LibFunc::memset); 189 HasMemsetPattern = TLI->has(LibFunc::memset_pattern16); 190 HasMemcpy = TLI->has(LibFunc::memcpy); 191 192 if (HasMemset || HasMemsetPattern || HasMemcpy) 193 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 194 return runOnCountableLoop(); 195 196 return runOnNoncountableLoop(); 197 } 198 199 bool LoopIdiomRecognize::runOnCountableLoop() { 200 const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop); 201 assert(!isa<SCEVCouldNotCompute>(BECount) && 202 "runOnCountableLoop() called on a loop without a predictable" 203 "backedge-taken count"); 204 205 // If this loop executes exactly one time, then it should be peeled, not 206 // optimized by this pass. 207 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) 208 if (BECst->getAPInt() == 0) 209 return false; 210 211 SmallVector<BasicBlock *, 8> ExitBlocks; 212 CurLoop->getUniqueExitBlocks(ExitBlocks); 213 214 DEBUG(dbgs() << "loop-idiom Scanning: F[" 215 << CurLoop->getHeader()->getParent()->getName() << "] Loop %" 216 << CurLoop->getHeader()->getName() << "\n"); 217 218 bool MadeChange = false; 219 // Scan all the blocks in the loop that are not in subloops. 220 for (auto *BB : CurLoop->getBlocks()) { 221 // Ignore blocks in subloops. 222 if (LI->getLoopFor(BB) != CurLoop) 223 continue; 224 225 MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks); 226 } 227 return MadeChange; 228 } 229 230 static unsigned getStoreSizeInBytes(StoreInst *SI, const DataLayout *DL) { 231 uint64_t SizeInBits = DL->getTypeSizeInBits(SI->getValueOperand()->getType()); 232 assert(((SizeInBits & 7) || (SizeInBits >> 32) == 0) && 233 "Don't overflow unsigned."); 234 return (unsigned)SizeInBits >> 3; 235 } 236 237 static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) { 238 const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1)); 239 return ConstStride->getAPInt(); 240 } 241 242 /// getMemSetPatternValue - If a strided store of the specified value is safe to 243 /// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should 244 /// be passed in. Otherwise, return null. 245 /// 246 /// Note that we don't ever attempt to use memset_pattern8 or 4, because these 247 /// just replicate their input array and then pass on to memset_pattern16. 248 static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) { 249 // If the value isn't a constant, we can't promote it to being in a constant 250 // array. We could theoretically do a store to an alloca or something, but 251 // that doesn't seem worthwhile. 252 Constant *C = dyn_cast<Constant>(V); 253 if (!C) 254 return nullptr; 255 256 // Only handle simple values that are a power of two bytes in size. 257 uint64_t Size = DL->getTypeSizeInBits(V->getType()); 258 if (Size == 0 || (Size & 7) || (Size & (Size - 1))) 259 return nullptr; 260 261 // Don't care enough about darwin/ppc to implement this. 262 if (DL->isBigEndian()) 263 return nullptr; 264 265 // Convert to size in bytes. 266 Size /= 8; 267 268 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see 269 // if the top and bottom are the same (e.g. for vectors and large integers). 270 if (Size > 16) 271 return nullptr; 272 273 // If the constant is exactly 16 bytes, just use it. 274 if (Size == 16) 275 return C; 276 277 // Otherwise, we'll use an array of the constants. 278 unsigned ArraySize = 16 / Size; 279 ArrayType *AT = ArrayType::get(V->getType(), ArraySize); 280 return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C)); 281 } 282 283 bool LoopIdiomRecognize::isLegalStore(StoreInst *SI, bool &ForMemset, 284 bool &ForMemsetPattern, bool &ForMemcpy) { 285 // Don't touch volatile stores. 286 if (!SI->isSimple()) 287 return false; 288 289 // Avoid merging nontemporal stores. 290 if (SI->getMetadata(LLVMContext::MD_nontemporal)) 291 return false; 292 293 Value *StoredVal = SI->getValueOperand(); 294 Value *StorePtr = SI->getPointerOperand(); 295 296 // Reject stores that are so large that they overflow an unsigned. 297 uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType()); 298 if ((SizeInBits & 7) || (SizeInBits >> 32) != 0) 299 return false; 300 301 // See if the pointer expression is an AddRec like {base,+,1} on the current 302 // loop, which indicates a strided store. If we have something else, it's a 303 // random store we can't handle. 304 const SCEVAddRecExpr *StoreEv = 305 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 306 if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine()) 307 return false; 308 309 // Check to see if we have a constant stride. 310 if (!isa<SCEVConstant>(StoreEv->getOperand(1))) 311 return false; 312 313 // See if the store can be turned into a memset. 314 315 // If the stored value is a byte-wise value (like i32 -1), then it may be 316 // turned into a memset of i8 -1, assuming that all the consecutive bytes 317 // are stored. A store of i32 0x01020304 can never be turned into a memset, 318 // but it can be turned into memset_pattern if the target supports it. 319 Value *SplatValue = isBytewiseValue(StoredVal); 320 Constant *PatternValue = nullptr; 321 322 // If we're allowed to form a memset, and the stored value would be 323 // acceptable for memset, use it. 324 if (HasMemset && SplatValue && 325 // Verify that the stored value is loop invariant. If not, we can't 326 // promote the memset. 327 CurLoop->isLoopInvariant(SplatValue)) { 328 // It looks like we can use SplatValue. 329 ForMemset = true; 330 return true; 331 } else if (HasMemsetPattern && 332 // Don't create memset_pattern16s with address spaces. 333 StorePtr->getType()->getPointerAddressSpace() == 0 && 334 (PatternValue = getMemSetPatternValue(StoredVal, DL))) { 335 // It looks like we can use PatternValue! 336 ForMemsetPattern = true; 337 return true; 338 } 339 340 // Otherwise, see if the store can be turned into a memcpy. 341 if (HasMemcpy) { 342 // Check to see if the stride matches the size of the store. If so, then we 343 // know that every byte is touched in the loop. 344 APInt Stride = getStoreStride(StoreEv); 345 unsigned StoreSize = getStoreSizeInBytes(SI, DL); 346 if (StoreSize != Stride && StoreSize != -Stride) 347 return false; 348 349 // The store must be feeding a non-volatile load. 350 LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand()); 351 if (!LI || !LI->isSimple()) 352 return false; 353 354 // See if the pointer expression is an AddRec like {base,+,1} on the current 355 // loop, which indicates a strided load. If we have something else, it's a 356 // random load we can't handle. 357 const SCEVAddRecExpr *LoadEv = 358 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand())); 359 if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine()) 360 return false; 361 362 // The store and load must share the same stride. 363 if (StoreEv->getOperand(1) != LoadEv->getOperand(1)) 364 return false; 365 366 // Success. This store can be converted into a memcpy. 367 ForMemcpy = true; 368 return true; 369 } 370 // This store can't be transformed into a memset/memcpy. 371 return false; 372 } 373 374 void LoopIdiomRecognize::collectStores(BasicBlock *BB) { 375 StoreRefsForMemset.clear(); 376 StoreRefsForMemsetPattern.clear(); 377 StoreRefsForMemcpy.clear(); 378 for (Instruction &I : *BB) { 379 StoreInst *SI = dyn_cast<StoreInst>(&I); 380 if (!SI) 381 continue; 382 383 bool ForMemset = false; 384 bool ForMemsetPattern = false; 385 bool ForMemcpy = false; 386 // Make sure this is a strided store with a constant stride. 387 if (!isLegalStore(SI, ForMemset, ForMemsetPattern, ForMemcpy)) 388 continue; 389 390 // Save the store locations. 391 if (ForMemset) { 392 // Find the base pointer. 393 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL); 394 StoreRefsForMemset[Ptr].push_back(SI); 395 } else if (ForMemsetPattern) { 396 // Find the base pointer. 397 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL); 398 StoreRefsForMemsetPattern[Ptr].push_back(SI); 399 } else if (ForMemcpy) 400 StoreRefsForMemcpy.push_back(SI); 401 } 402 } 403 404 /// runOnLoopBlock - Process the specified block, which lives in a counted loop 405 /// with the specified backedge count. This block is known to be in the current 406 /// loop and not in any subloops. 407 bool LoopIdiomRecognize::runOnLoopBlock( 408 BasicBlock *BB, const SCEV *BECount, 409 SmallVectorImpl<BasicBlock *> &ExitBlocks) { 410 // We can only promote stores in this block if they are unconditionally 411 // executed in the loop. For a block to be unconditionally executed, it has 412 // to dominate all the exit blocks of the loop. Verify this now. 413 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) 414 if (!DT->dominates(BB, ExitBlocks[i])) 415 return false; 416 417 bool MadeChange = false; 418 // Look for store instructions, which may be optimized to memset/memcpy. 419 collectStores(BB); 420 421 // Look for a single store or sets of stores with a common base, which can be 422 // optimized into a memset (memset_pattern). The latter most commonly happens 423 // with structs and handunrolled loops. 424 for (auto &SL : StoreRefsForMemset) 425 MadeChange |= processLoopStores(SL.second, BECount, true); 426 427 for (auto &SL : StoreRefsForMemsetPattern) 428 MadeChange |= processLoopStores(SL.second, BECount, false); 429 430 // Optimize the store into a memcpy, if it feeds an similarly strided load. 431 for (auto &SI : StoreRefsForMemcpy) 432 MadeChange |= processLoopStoreOfLoopLoad(SI, BECount); 433 434 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 435 Instruction *Inst = &*I++; 436 // Look for memset instructions, which may be optimized to a larger memset. 437 if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) { 438 WeakVH InstPtr(&*I); 439 if (!processLoopMemSet(MSI, BECount)) 440 continue; 441 MadeChange = true; 442 443 // If processing the memset invalidated our iterator, start over from the 444 // top of the block. 445 if (!InstPtr) 446 I = BB->begin(); 447 continue; 448 } 449 } 450 451 return MadeChange; 452 } 453 454 /// processLoopStores - See if this store(s) can be promoted to a memset. 455 bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL, 456 const SCEV *BECount, 457 bool ForMemset) { 458 // Try to find consecutive stores that can be transformed into memsets. 459 SetVector<StoreInst *> Heads, Tails; 460 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; 461 462 // Do a quadratic search on all of the given stores and find 463 // all of the pairs of stores that follow each other. 464 SmallVector<unsigned, 16> IndexQueue; 465 for (unsigned i = 0, e = SL.size(); i < e; ++i) { 466 assert(SL[i]->isSimple() && "Expected only non-volatile stores."); 467 468 Value *FirstStoredVal = SL[i]->getValueOperand(); 469 Value *FirstStorePtr = SL[i]->getPointerOperand(); 470 const SCEVAddRecExpr *FirstStoreEv = 471 cast<SCEVAddRecExpr>(SE->getSCEV(FirstStorePtr)); 472 APInt FirstStride = getStoreStride(FirstStoreEv); 473 unsigned FirstStoreSize = getStoreSizeInBytes(SL[i], DL); 474 475 // See if we can optimize just this store in isolation. 476 if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) { 477 Heads.insert(SL[i]); 478 continue; 479 } 480 481 Value *FirstSplatValue = nullptr; 482 Constant *FirstPatternValue = nullptr; 483 484 if (ForMemset) 485 FirstSplatValue = isBytewiseValue(FirstStoredVal); 486 else 487 FirstPatternValue = getMemSetPatternValue(FirstStoredVal, DL); 488 489 assert((FirstSplatValue || FirstPatternValue) && 490 "Expected either splat value or pattern value."); 491 492 IndexQueue.clear(); 493 // If a store has multiple consecutive store candidates, search Stores 494 // array according to the sequence: from i+1 to e, then from i-1 to 0. 495 // This is because usually pairing with immediate succeeding or preceding 496 // candidate create the best chance to find memset opportunity. 497 unsigned j = 0; 498 for (j = i + 1; j < e; ++j) 499 IndexQueue.push_back(j); 500 for (j = i; j > 0; --j) 501 IndexQueue.push_back(j - 1); 502 503 for (auto &k : IndexQueue) { 504 assert(SL[k]->isSimple() && "Expected only non-volatile stores."); 505 Value *SecondStorePtr = SL[k]->getPointerOperand(); 506 const SCEVAddRecExpr *SecondStoreEv = 507 cast<SCEVAddRecExpr>(SE->getSCEV(SecondStorePtr)); 508 APInt SecondStride = getStoreStride(SecondStoreEv); 509 510 if (FirstStride != SecondStride) 511 continue; 512 513 Value *SecondStoredVal = SL[k]->getValueOperand(); 514 Value *SecondSplatValue = nullptr; 515 Constant *SecondPatternValue = nullptr; 516 517 if (ForMemset) 518 SecondSplatValue = isBytewiseValue(SecondStoredVal); 519 else 520 SecondPatternValue = getMemSetPatternValue(SecondStoredVal, DL); 521 522 assert((SecondSplatValue || SecondPatternValue) && 523 "Expected either splat value or pattern value."); 524 525 if (isConsecutiveAccess(SL[i], SL[k], *DL, *SE, false)) { 526 if (ForMemset) { 527 if (FirstSplatValue != SecondSplatValue) 528 continue; 529 } else { 530 if (FirstPatternValue != SecondPatternValue) 531 continue; 532 } 533 Tails.insert(SL[k]); 534 Heads.insert(SL[i]); 535 ConsecutiveChain[SL[i]] = SL[k]; 536 break; 537 } 538 } 539 } 540 541 // We may run into multiple chains that merge into a single chain. We mark the 542 // stores that we transformed so that we don't visit the same store twice. 543 SmallPtrSet<Value *, 16> TransformedStores; 544 bool Changed = false; 545 546 // For stores that start but don't end a link in the chain: 547 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end(); 548 it != e; ++it) { 549 if (Tails.count(*it)) 550 continue; 551 552 // We found a store instr that starts a chain. Now follow the chain and try 553 // to transform it. 554 SmallPtrSet<Instruction *, 8> AdjacentStores; 555 StoreInst *I = *it; 556 557 StoreInst *HeadStore = I; 558 unsigned StoreSize = 0; 559 560 // Collect the chain into a list. 561 while (Tails.count(I) || Heads.count(I)) { 562 if (TransformedStores.count(I)) 563 break; 564 AdjacentStores.insert(I); 565 566 StoreSize += getStoreSizeInBytes(I, DL); 567 // Move to the next value in the chain. 568 I = ConsecutiveChain[I]; 569 } 570 571 Value *StoredVal = HeadStore->getValueOperand(); 572 Value *StorePtr = HeadStore->getPointerOperand(); 573 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 574 APInt Stride = getStoreStride(StoreEv); 575 576 // Check to see if the stride matches the size of the stores. If so, then 577 // we know that every byte is touched in the loop. 578 if (StoreSize != Stride && StoreSize != -Stride) 579 continue; 580 581 bool NegStride = StoreSize == -Stride; 582 583 if (processLoopStridedStore(StorePtr, StoreSize, HeadStore->getAlignment(), 584 StoredVal, HeadStore, AdjacentStores, StoreEv, 585 BECount, NegStride)) { 586 TransformedStores.insert(AdjacentStores.begin(), AdjacentStores.end()); 587 Changed = true; 588 } 589 } 590 591 return Changed; 592 } 593 594 /// processLoopMemSet - See if this memset can be promoted to a large memset. 595 bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI, 596 const SCEV *BECount) { 597 // We can only handle non-volatile memsets with a constant size. 598 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength())) 599 return false; 600 601 // If we're not allowed to hack on memset, we fail. 602 if (!HasMemset) 603 return false; 604 605 Value *Pointer = MSI->getDest(); 606 607 // See if the pointer expression is an AddRec like {base,+,1} on the current 608 // loop, which indicates a strided store. If we have something else, it's a 609 // random store we can't handle. 610 const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer)); 611 if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine()) 612 return false; 613 614 // Reject memsets that are so large that they overflow an unsigned. 615 uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 616 if ((SizeInBytes >> 32) != 0) 617 return false; 618 619 // Check to see if the stride matches the size of the memset. If so, then we 620 // know that every byte is touched in the loop. 621 const SCEVConstant *ConstStride = dyn_cast<SCEVConstant>(Ev->getOperand(1)); 622 if (!ConstStride) 623 return false; 624 625 APInt Stride = ConstStride->getAPInt(); 626 if (SizeInBytes != Stride && SizeInBytes != -Stride) 627 return false; 628 629 // Verify that the memset value is loop invariant. If not, we can't promote 630 // the memset. 631 Value *SplatValue = MSI->getValue(); 632 if (!SplatValue || !CurLoop->isLoopInvariant(SplatValue)) 633 return false; 634 635 SmallPtrSet<Instruction *, 1> MSIs; 636 MSIs.insert(MSI); 637 bool NegStride = SizeInBytes == -Stride; 638 return processLoopStridedStore(Pointer, (unsigned)SizeInBytes, 639 MSI->getAlignment(), SplatValue, MSI, MSIs, Ev, 640 BECount, NegStride); 641 } 642 643 /// mayLoopAccessLocation - Return true if the specified loop might access the 644 /// specified pointer location, which is a loop-strided access. The 'Access' 645 /// argument specifies what the verboten forms of access are (read or write). 646 static bool 647 mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L, 648 const SCEV *BECount, unsigned StoreSize, 649 AliasAnalysis &AA, 650 SmallPtrSetImpl<Instruction *> &IgnoredStores) { 651 // Get the location that may be stored across the loop. Since the access is 652 // strided positively through memory, we say that the modified location starts 653 // at the pointer and has infinite size. 654 uint64_t AccessSize = MemoryLocation::UnknownSize; 655 656 // If the loop iterates a fixed number of times, we can refine the access size 657 // to be exactly the size of the memset, which is (BECount+1)*StoreSize 658 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) 659 AccessSize = (BECst->getValue()->getZExtValue() + 1) * StoreSize; 660 661 // TODO: For this to be really effective, we have to dive into the pointer 662 // operand in the store. Store to &A[i] of 100 will always return may alias 663 // with store of &A[100], we need to StoreLoc to be "A" with size of 100, 664 // which will then no-alias a store to &A[100]. 665 MemoryLocation StoreLoc(Ptr, AccessSize); 666 667 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E; 668 ++BI) 669 for (Instruction &I : **BI) 670 if (IgnoredStores.count(&I) == 0 && 671 (AA.getModRefInfo(&I, StoreLoc) & Access)) 672 return true; 673 674 return false; 675 } 676 677 // If we have a negative stride, Start refers to the end of the memory location 678 // we're trying to memset. Therefore, we need to recompute the base pointer, 679 // which is just Start - BECount*Size. 680 static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount, 681 Type *IntPtr, unsigned StoreSize, 682 ScalarEvolution *SE) { 683 const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr); 684 if (StoreSize != 1) 685 Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize), 686 SCEV::FlagNUW); 687 return SE->getMinusSCEV(Start, Index); 688 } 689 690 /// processLoopStridedStore - We see a strided store of some value. If we can 691 /// transform this into a memset or memset_pattern in the loop preheader, do so. 692 bool LoopIdiomRecognize::processLoopStridedStore( 693 Value *DestPtr, unsigned StoreSize, unsigned StoreAlignment, 694 Value *StoredVal, Instruction *TheStore, 695 SmallPtrSetImpl<Instruction *> &Stores, const SCEVAddRecExpr *Ev, 696 const SCEV *BECount, bool NegStride) { 697 Value *SplatValue = isBytewiseValue(StoredVal); 698 Constant *PatternValue = nullptr; 699 700 if (!SplatValue) 701 PatternValue = getMemSetPatternValue(StoredVal, DL); 702 703 assert((SplatValue || PatternValue) && 704 "Expected either splat value or pattern value."); 705 706 // The trip count of the loop and the base pointer of the addrec SCEV is 707 // guaranteed to be loop invariant, which means that it should dominate the 708 // header. This allows us to insert code for it in the preheader. 709 unsigned DestAS = DestPtr->getType()->getPointerAddressSpace(); 710 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 711 IRBuilder<> Builder(Preheader->getTerminator()); 712 SCEVExpander Expander(*SE, *DL, "loop-idiom"); 713 714 Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS); 715 Type *IntPtr = Builder.getIntPtrTy(*DL, DestAS); 716 717 const SCEV *Start = Ev->getStart(); 718 // Handle negative strided loops. 719 if (NegStride) 720 Start = getStartForNegStride(Start, BECount, IntPtr, StoreSize, SE); 721 722 // Okay, we have a strided store "p[i]" of a splattable value. We can turn 723 // this into a memset in the loop preheader now if we want. However, this 724 // would be unsafe to do if there is anything else in the loop that may read 725 // or write to the aliased location. Check for any overlap by generating the 726 // base pointer and checking the region. 727 Value *BasePtr = 728 Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator()); 729 if (mayLoopAccessLocation(BasePtr, MRI_ModRef, CurLoop, BECount, StoreSize, 730 *AA, Stores)) { 731 Expander.clear(); 732 // If we generated new code for the base pointer, clean up. 733 RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI); 734 return false; 735 } 736 737 // Okay, everything looks good, insert the memset. 738 739 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to 740 // pointer size if it isn't already. 741 BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr); 742 743 const SCEV *NumBytesS = 744 SE->getAddExpr(BECount, SE->getOne(IntPtr), SCEV::FlagNUW); 745 if (StoreSize != 1) { 746 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize), 747 SCEV::FlagNUW); 748 } 749 750 Value *NumBytes = 751 Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator()); 752 753 CallInst *NewCall; 754 if (SplatValue) { 755 NewCall = 756 Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, StoreAlignment); 757 } else { 758 // Everything is emitted in default address space 759 Type *Int8PtrTy = DestInt8PtrTy; 760 761 Module *M = TheStore->getModule(); 762 Value *MSP = 763 M->getOrInsertFunction("memset_pattern16", Builder.getVoidTy(), 764 Int8PtrTy, Int8PtrTy, IntPtr, (void *)nullptr); 765 inferLibFuncAttributes(*M->getFunction("memset_pattern16"), *TLI); 766 767 // Otherwise we should form a memset_pattern16. PatternValue is known to be 768 // an constant array of 16-bytes. Plop the value into a mergable global. 769 GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true, 770 GlobalValue::PrivateLinkage, 771 PatternValue, ".memset_pattern"); 772 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); // Ok to merge these. 773 GV->setAlignment(16); 774 Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy); 775 NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes}); 776 } 777 778 DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n" 779 << " from store to: " << *Ev << " at: " << *TheStore << "\n"); 780 NewCall->setDebugLoc(TheStore->getDebugLoc()); 781 782 // Okay, the memset has been formed. Zap the original store and anything that 783 // feeds into it. 784 for (auto *I : Stores) 785 deleteDeadInstruction(I); 786 ++NumMemSet; 787 return true; 788 } 789 790 /// If the stored value is a strided load in the same loop with the same stride 791 /// this may be transformable into a memcpy. This kicks in for stuff like 792 /// for (i) A[i] = B[i]; 793 bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI, 794 const SCEV *BECount) { 795 assert(SI->isSimple() && "Expected only non-volatile stores."); 796 797 Value *StorePtr = SI->getPointerOperand(); 798 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 799 APInt Stride = getStoreStride(StoreEv); 800 unsigned StoreSize = getStoreSizeInBytes(SI, DL); 801 bool NegStride = StoreSize == -Stride; 802 803 // The store must be feeding a non-volatile load. 804 LoadInst *LI = cast<LoadInst>(SI->getValueOperand()); 805 assert(LI->isSimple() && "Expected only non-volatile stores."); 806 807 // See if the pointer expression is an AddRec like {base,+,1} on the current 808 // loop, which indicates a strided load. If we have something else, it's a 809 // random load we can't handle. 810 const SCEVAddRecExpr *LoadEv = 811 cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand())); 812 813 // The trip count of the loop and the base pointer of the addrec SCEV is 814 // guaranteed to be loop invariant, which means that it should dominate the 815 // header. This allows us to insert code for it in the preheader. 816 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 817 IRBuilder<> Builder(Preheader->getTerminator()); 818 SCEVExpander Expander(*SE, *DL, "loop-idiom"); 819 820 const SCEV *StrStart = StoreEv->getStart(); 821 unsigned StrAS = SI->getPointerAddressSpace(); 822 Type *IntPtrTy = Builder.getIntPtrTy(*DL, StrAS); 823 824 // Handle negative strided loops. 825 if (NegStride) 826 StrStart = getStartForNegStride(StrStart, BECount, IntPtrTy, StoreSize, SE); 827 828 // Okay, we have a strided store "p[i]" of a loaded value. We can turn 829 // this into a memcpy in the loop preheader now if we want. However, this 830 // would be unsafe to do if there is anything else in the loop that may read 831 // or write the memory region we're storing to. This includes the load that 832 // feeds the stores. Check for an alias by generating the base address and 833 // checking everything. 834 Value *StoreBasePtr = Expander.expandCodeFor( 835 StrStart, Builder.getInt8PtrTy(StrAS), Preheader->getTerminator()); 836 837 SmallPtrSet<Instruction *, 1> Stores; 838 Stores.insert(SI); 839 if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount, 840 StoreSize, *AA, Stores)) { 841 Expander.clear(); 842 // If we generated new code for the base pointer, clean up. 843 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI); 844 return false; 845 } 846 847 const SCEV *LdStart = LoadEv->getStart(); 848 unsigned LdAS = LI->getPointerAddressSpace(); 849 850 // Handle negative strided loops. 851 if (NegStride) 852 LdStart = getStartForNegStride(LdStart, BECount, IntPtrTy, StoreSize, SE); 853 854 // For a memcpy, we have to make sure that the input array is not being 855 // mutated by the loop. 856 Value *LoadBasePtr = Expander.expandCodeFor( 857 LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator()); 858 859 if (mayLoopAccessLocation(LoadBasePtr, MRI_Mod, CurLoop, BECount, StoreSize, 860 *AA, Stores)) { 861 Expander.clear(); 862 // If we generated new code for the base pointer, clean up. 863 RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI); 864 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI); 865 return false; 866 } 867 868 // Okay, everything is safe, we can transform this! 869 870 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to 871 // pointer size if it isn't already. 872 BECount = SE->getTruncateOrZeroExtend(BECount, IntPtrTy); 873 874 const SCEV *NumBytesS = 875 SE->getAddExpr(BECount, SE->getOne(IntPtrTy), SCEV::FlagNUW); 876 if (StoreSize != 1) 877 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtrTy, StoreSize), 878 SCEV::FlagNUW); 879 880 Value *NumBytes = 881 Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator()); 882 883 CallInst *NewCall = 884 Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes, 885 std::min(SI->getAlignment(), LI->getAlignment())); 886 NewCall->setDebugLoc(SI->getDebugLoc()); 887 888 DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n" 889 << " from load ptr=" << *LoadEv << " at: " << *LI << "\n" 890 << " from store ptr=" << *StoreEv << " at: " << *SI << "\n"); 891 892 // Okay, the memcpy has been formed. Zap the original store and anything that 893 // feeds into it. 894 deleteDeadInstruction(SI); 895 ++NumMemCpy; 896 return true; 897 } 898 899 bool LoopIdiomRecognize::runOnNoncountableLoop() { 900 return recognizePopcount(); 901 } 902 903 /// Check if the given conditional branch is based on the comparison between 904 /// a variable and zero, and if the variable is non-zero, the control yields to 905 /// the loop entry. If the branch matches the behavior, the variable involved 906 /// in the comparion is returned. This function will be called to see if the 907 /// precondition and postcondition of the loop are in desirable form. 908 static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry) { 909 if (!BI || !BI->isConditional()) 910 return nullptr; 911 912 ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition()); 913 if (!Cond) 914 return nullptr; 915 916 ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1)); 917 if (!CmpZero || !CmpZero->isZero()) 918 return nullptr; 919 920 ICmpInst::Predicate Pred = Cond->getPredicate(); 921 if ((Pred == ICmpInst::ICMP_NE && BI->getSuccessor(0) == LoopEntry) || 922 (Pred == ICmpInst::ICMP_EQ && BI->getSuccessor(1) == LoopEntry)) 923 return Cond->getOperand(0); 924 925 return nullptr; 926 } 927 928 /// Return true iff the idiom is detected in the loop. 929 /// 930 /// Additionally: 931 /// 1) \p CntInst is set to the instruction counting the population bit. 932 /// 2) \p CntPhi is set to the corresponding phi node. 933 /// 3) \p Var is set to the value whose population bits are being counted. 934 /// 935 /// The core idiom we are trying to detect is: 936 /// \code 937 /// if (x0 != 0) 938 /// goto loop-exit // the precondition of the loop 939 /// cnt0 = init-val; 940 /// do { 941 /// x1 = phi (x0, x2); 942 /// cnt1 = phi(cnt0, cnt2); 943 /// 944 /// cnt2 = cnt1 + 1; 945 /// ... 946 /// x2 = x1 & (x1 - 1); 947 /// ... 948 /// } while(x != 0); 949 /// 950 /// loop-exit: 951 /// \endcode 952 static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB, 953 Instruction *&CntInst, PHINode *&CntPhi, 954 Value *&Var) { 955 // step 1: Check to see if the look-back branch match this pattern: 956 // "if (a!=0) goto loop-entry". 957 BasicBlock *LoopEntry; 958 Instruction *DefX2, *CountInst; 959 Value *VarX1, *VarX0; 960 PHINode *PhiX, *CountPhi; 961 962 DefX2 = CountInst = nullptr; 963 VarX1 = VarX0 = nullptr; 964 PhiX = CountPhi = nullptr; 965 LoopEntry = *(CurLoop->block_begin()); 966 967 // step 1: Check if the loop-back branch is in desirable form. 968 { 969 if (Value *T = matchCondition( 970 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry)) 971 DefX2 = dyn_cast<Instruction>(T); 972 else 973 return false; 974 } 975 976 // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)" 977 { 978 if (!DefX2 || DefX2->getOpcode() != Instruction::And) 979 return false; 980 981 BinaryOperator *SubOneOp; 982 983 if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0)))) 984 VarX1 = DefX2->getOperand(1); 985 else { 986 VarX1 = DefX2->getOperand(0); 987 SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1)); 988 } 989 if (!SubOneOp) 990 return false; 991 992 Instruction *SubInst = cast<Instruction>(SubOneOp); 993 ConstantInt *Dec = dyn_cast<ConstantInt>(SubInst->getOperand(1)); 994 if (!Dec || 995 !((SubInst->getOpcode() == Instruction::Sub && Dec->isOne()) || 996 (SubInst->getOpcode() == Instruction::Add && 997 Dec->isAllOnesValue()))) { 998 return false; 999 } 1000 } 1001 1002 // step 3: Check the recurrence of variable X 1003 { 1004 PhiX = dyn_cast<PHINode>(VarX1); 1005 if (!PhiX || 1006 (PhiX->getOperand(0) != DefX2 && PhiX->getOperand(1) != DefX2)) { 1007 return false; 1008 } 1009 } 1010 1011 // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1 1012 { 1013 CountInst = nullptr; 1014 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(), 1015 IterE = LoopEntry->end(); 1016 Iter != IterE; Iter++) { 1017 Instruction *Inst = &*Iter; 1018 if (Inst->getOpcode() != Instruction::Add) 1019 continue; 1020 1021 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1)); 1022 if (!Inc || !Inc->isOne()) 1023 continue; 1024 1025 PHINode *Phi = dyn_cast<PHINode>(Inst->getOperand(0)); 1026 if (!Phi || Phi->getParent() != LoopEntry) 1027 continue; 1028 1029 // Check if the result of the instruction is live of the loop. 1030 bool LiveOutLoop = false; 1031 for (User *U : Inst->users()) { 1032 if ((cast<Instruction>(U))->getParent() != LoopEntry) { 1033 LiveOutLoop = true; 1034 break; 1035 } 1036 } 1037 1038 if (LiveOutLoop) { 1039 CountInst = Inst; 1040 CountPhi = Phi; 1041 break; 1042 } 1043 } 1044 1045 if (!CountInst) 1046 return false; 1047 } 1048 1049 // step 5: check if the precondition is in this form: 1050 // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;" 1051 { 1052 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1053 Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader()); 1054 if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1)) 1055 return false; 1056 1057 CntInst = CountInst; 1058 CntPhi = CountPhi; 1059 Var = T; 1060 } 1061 1062 return true; 1063 } 1064 1065 /// Recognizes a population count idiom in a non-countable loop. 1066 /// 1067 /// If detected, transforms the relevant code to issue the popcount intrinsic 1068 /// function call, and returns true; otherwise, returns false. 1069 bool LoopIdiomRecognize::recognizePopcount() { 1070 if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware) 1071 return false; 1072 1073 // Counting population are usually conducted by few arithmetic instructions. 1074 // Such instructions can be easily "absorbed" by vacant slots in a 1075 // non-compact loop. Therefore, recognizing popcount idiom only makes sense 1076 // in a compact loop. 1077 1078 // Give up if the loop has multiple blocks or multiple backedges. 1079 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1) 1080 return false; 1081 1082 BasicBlock *LoopBody = *(CurLoop->block_begin()); 1083 if (LoopBody->size() >= 20) { 1084 // The loop is too big, bail out. 1085 return false; 1086 } 1087 1088 // It should have a preheader containing nothing but an unconditional branch. 1089 BasicBlock *PH = CurLoop->getLoopPreheader(); 1090 if (!PH) 1091 return false; 1092 if (&PH->front() != PH->getTerminator()) 1093 return false; 1094 auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator()); 1095 if (!EntryBI || EntryBI->isConditional()) 1096 return false; 1097 1098 // It should have a precondition block where the generated popcount instrinsic 1099 // function can be inserted. 1100 auto *PreCondBB = PH->getSinglePredecessor(); 1101 if (!PreCondBB) 1102 return false; 1103 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1104 if (!PreCondBI || PreCondBI->isUnconditional()) 1105 return false; 1106 1107 Instruction *CntInst; 1108 PHINode *CntPhi; 1109 Value *Val; 1110 if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val)) 1111 return false; 1112 1113 transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val); 1114 return true; 1115 } 1116 1117 static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val, 1118 const DebugLoc &DL) { 1119 Value *Ops[] = {Val}; 1120 Type *Tys[] = {Val->getType()}; 1121 1122 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent(); 1123 Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys); 1124 CallInst *CI = IRBuilder.CreateCall(Func, Ops); 1125 CI->setDebugLoc(DL); 1126 1127 return CI; 1128 } 1129 1130 void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB, 1131 Instruction *CntInst, 1132 PHINode *CntPhi, Value *Var) { 1133 BasicBlock *PreHead = CurLoop->getLoopPreheader(); 1134 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1135 const DebugLoc DL = CntInst->getDebugLoc(); 1136 1137 // Assuming before transformation, the loop is following: 1138 // if (x) // the precondition 1139 // do { cnt++; x &= x - 1; } while(x); 1140 1141 // Step 1: Insert the ctpop instruction at the end of the precondition block 1142 IRBuilder<> Builder(PreCondBr); 1143 Value *PopCnt, *PopCntZext, *NewCount, *TripCnt; 1144 { 1145 PopCnt = createPopcntIntrinsic(Builder, Var, DL); 1146 NewCount = PopCntZext = 1147 Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType())); 1148 1149 if (NewCount != PopCnt) 1150 (cast<Instruction>(NewCount))->setDebugLoc(DL); 1151 1152 // TripCnt is exactly the number of iterations the loop has 1153 TripCnt = NewCount; 1154 1155 // If the population counter's initial value is not zero, insert Add Inst. 1156 Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead); 1157 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal); 1158 if (!InitConst || !InitConst->isZero()) { 1159 NewCount = Builder.CreateAdd(NewCount, CntInitVal); 1160 (cast<Instruction>(NewCount))->setDebugLoc(DL); 1161 } 1162 } 1163 1164 // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to 1165 // "if (NewCount == 0) loop-exit". Without this change, the intrinsic 1166 // function would be partial dead code, and downstream passes will drag 1167 // it back from the precondition block to the preheader. 1168 { 1169 ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition()); 1170 1171 Value *Opnd0 = PopCntZext; 1172 Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0); 1173 if (PreCond->getOperand(0) != Var) 1174 std::swap(Opnd0, Opnd1); 1175 1176 ICmpInst *NewPreCond = cast<ICmpInst>( 1177 Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1)); 1178 PreCondBr->setCondition(NewPreCond); 1179 1180 RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI); 1181 } 1182 1183 // Step 3: Note that the population count is exactly the trip count of the 1184 // loop in question, which enable us to to convert the loop from noncountable 1185 // loop into a countable one. The benefit is twofold: 1186 // 1187 // - If the loop only counts population, the entire loop becomes dead after 1188 // the transformation. It is a lot easier to prove a countable loop dead 1189 // than to prove a noncountable one. (In some C dialects, an infinite loop 1190 // isn't dead even if it computes nothing useful. In general, DCE needs 1191 // to prove a noncountable loop finite before safely delete it.) 1192 // 1193 // - If the loop also performs something else, it remains alive. 1194 // Since it is transformed to countable form, it can be aggressively 1195 // optimized by some optimizations which are in general not applicable 1196 // to a noncountable loop. 1197 // 1198 // After this step, this loop (conceptually) would look like following: 1199 // newcnt = __builtin_ctpop(x); 1200 // t = newcnt; 1201 // if (x) 1202 // do { cnt++; x &= x-1; t--) } while (t > 0); 1203 BasicBlock *Body = *(CurLoop->block_begin()); 1204 { 1205 auto *LbBr = dyn_cast<BranchInst>(Body->getTerminator()); 1206 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition()); 1207 Type *Ty = TripCnt->getType(); 1208 1209 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front()); 1210 1211 Builder.SetInsertPoint(LbCond); 1212 Instruction *TcDec = cast<Instruction>( 1213 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1), 1214 "tcdec", false, true)); 1215 1216 TcPhi->addIncoming(TripCnt, PreHead); 1217 TcPhi->addIncoming(TcDec, Body); 1218 1219 CmpInst::Predicate Pred = 1220 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE; 1221 LbCond->setPredicate(Pred); 1222 LbCond->setOperand(0, TcDec); 1223 LbCond->setOperand(1, ConstantInt::get(Ty, 0)); 1224 } 1225 1226 // Step 4: All the references to the original population counter outside 1227 // the loop are replaced with the NewCount -- the value returned from 1228 // __builtin_ctpop(). 1229 CntInst->replaceUsesOutsideBlock(NewCount, Body); 1230 1231 // step 5: Forget the "non-computable" trip-count SCEV associated with the 1232 // loop. The loop would otherwise not be deleted even if it becomes empty. 1233 SE->forgetLoop(CurLoop); 1234 } 1235