1 //===- LoopIdiomRecognize.cpp - Loop idiom recognition --------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass implements an idiom recognizer that transforms simple loops into a 10 // non-loop form. In cases that this kicks in, it can be a significant 11 // performance win. 12 // 13 // If compiling for code size we avoid idiom recognition if the resulting 14 // code could be larger than the code for the original loop. One way this could 15 // happen is if the loop is not removable after idiom recognition due to the 16 // presence of non-idiom instructions. The initial implementation of the 17 // heuristics applies to idioms in multi-block loops. 18 // 19 //===----------------------------------------------------------------------===// 20 // 21 // TODO List: 22 // 23 // Future loop memory idioms to recognize: 24 // memcmp, strlen, etc. 25 // Future floating point idioms to recognize in -ffast-math mode: 26 // fpowi 27 // Future integer operation idioms to recognize: 28 // ctpop 29 // 30 // Beware that isel's default lowering for ctpop is highly inefficient for 31 // i64 and larger types when i64 is legal and the value has few bits set. It 32 // would be good to enhance isel to emit a loop for ctpop in this case. 33 // 34 // This could recognize common matrix multiplies and dot product idioms and 35 // replace them with calls to BLAS (if linked in??). 36 // 37 //===----------------------------------------------------------------------===// 38 39 #include "llvm/Transforms/Scalar/LoopIdiomRecognize.h" 40 #include "llvm/ADT/APInt.h" 41 #include "llvm/ADT/ArrayRef.h" 42 #include "llvm/ADT/DenseMap.h" 43 #include "llvm/ADT/MapVector.h" 44 #include "llvm/ADT/SetVector.h" 45 #include "llvm/ADT/SmallPtrSet.h" 46 #include "llvm/ADT/SmallVector.h" 47 #include "llvm/ADT/Statistic.h" 48 #include "llvm/ADT/StringRef.h" 49 #include "llvm/Analysis/AliasAnalysis.h" 50 #include "llvm/Analysis/CmpInstAnalysis.h" 51 #include "llvm/Analysis/LoopAccessAnalysis.h" 52 #include "llvm/Analysis/LoopInfo.h" 53 #include "llvm/Analysis/LoopPass.h" 54 #include "llvm/Analysis/MemoryLocation.h" 55 #include "llvm/Analysis/MemorySSA.h" 56 #include "llvm/Analysis/MemorySSAUpdater.h" 57 #include "llvm/Analysis/MustExecute.h" 58 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 59 #include "llvm/Analysis/ScalarEvolution.h" 60 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 61 #include "llvm/Analysis/TargetLibraryInfo.h" 62 #include "llvm/Analysis/TargetTransformInfo.h" 63 #include "llvm/Analysis/ValueTracking.h" 64 #include "llvm/IR/Attributes.h" 65 #include "llvm/IR/BasicBlock.h" 66 #include "llvm/IR/Constant.h" 67 #include "llvm/IR/Constants.h" 68 #include "llvm/IR/DataLayout.h" 69 #include "llvm/IR/DebugLoc.h" 70 #include "llvm/IR/DerivedTypes.h" 71 #include "llvm/IR/Dominators.h" 72 #include "llvm/IR/GlobalValue.h" 73 #include "llvm/IR/GlobalVariable.h" 74 #include "llvm/IR/IRBuilder.h" 75 #include "llvm/IR/InstrTypes.h" 76 #include "llvm/IR/Instruction.h" 77 #include "llvm/IR/Instructions.h" 78 #include "llvm/IR/IntrinsicInst.h" 79 #include "llvm/IR/Intrinsics.h" 80 #include "llvm/IR/LLVMContext.h" 81 #include "llvm/IR/Module.h" 82 #include "llvm/IR/PassManager.h" 83 #include "llvm/IR/PatternMatch.h" 84 #include "llvm/IR/Type.h" 85 #include "llvm/IR/User.h" 86 #include "llvm/IR/Value.h" 87 #include "llvm/IR/ValueHandle.h" 88 #include "llvm/InitializePasses.h" 89 #include "llvm/Pass.h" 90 #include "llvm/Support/Casting.h" 91 #include "llvm/Support/CommandLine.h" 92 #include "llvm/Support/Debug.h" 93 #include "llvm/Support/InstructionCost.h" 94 #include "llvm/Support/raw_ostream.h" 95 #include "llvm/Transforms/Scalar.h" 96 #include "llvm/Transforms/Utils/BuildLibCalls.h" 97 #include "llvm/Transforms/Utils/Local.h" 98 #include "llvm/Transforms/Utils/LoopUtils.h" 99 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 100 #include <algorithm> 101 #include <cassert> 102 #include <cstdint> 103 #include <utility> 104 #include <vector> 105 106 using namespace llvm; 107 108 #define DEBUG_TYPE "loop-idiom" 109 110 STATISTIC(NumMemSet, "Number of memset's formed from loop stores"); 111 STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores"); 112 STATISTIC(NumMemMove, "Number of memmove's formed from loop load+stores"); 113 STATISTIC( 114 NumShiftUntilBitTest, 115 "Number of uncountable loops recognized as 'shift until bitttest' idiom"); 116 STATISTIC(NumShiftUntilZero, 117 "Number of uncountable loops recognized as 'shift until zero' idiom"); 118 119 bool DisableLIRP::All; 120 static cl::opt<bool, true> 121 DisableLIRPAll("disable-" DEBUG_TYPE "-all", 122 cl::desc("Options to disable Loop Idiom Recognize Pass."), 123 cl::location(DisableLIRP::All), cl::init(false), 124 cl::ReallyHidden); 125 126 bool DisableLIRP::Memset; 127 static cl::opt<bool, true> 128 DisableLIRPMemset("disable-" DEBUG_TYPE "-memset", 129 cl::desc("Proceed with loop idiom recognize pass, but do " 130 "not convert loop(s) to memset."), 131 cl::location(DisableLIRP::Memset), cl::init(false), 132 cl::ReallyHidden); 133 134 bool DisableLIRP::Memcpy; 135 static cl::opt<bool, true> 136 DisableLIRPMemcpy("disable-" DEBUG_TYPE "-memcpy", 137 cl::desc("Proceed with loop idiom recognize pass, but do " 138 "not convert loop(s) to memcpy."), 139 cl::location(DisableLIRP::Memcpy), cl::init(false), 140 cl::ReallyHidden); 141 142 static cl::opt<bool> UseLIRCodeSizeHeurs( 143 "use-lir-code-size-heurs", 144 cl::desc("Use loop idiom recognition code size heuristics when compiling" 145 "with -Os/-Oz"), 146 cl::init(true), cl::Hidden); 147 148 namespace { 149 150 class LoopIdiomRecognize { 151 Loop *CurLoop = nullptr; 152 AliasAnalysis *AA; 153 DominatorTree *DT; 154 LoopInfo *LI; 155 ScalarEvolution *SE; 156 TargetLibraryInfo *TLI; 157 const TargetTransformInfo *TTI; 158 const DataLayout *DL; 159 OptimizationRemarkEmitter &ORE; 160 bool ApplyCodeSizeHeuristics; 161 std::unique_ptr<MemorySSAUpdater> MSSAU; 162 163 public: 164 explicit LoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT, 165 LoopInfo *LI, ScalarEvolution *SE, 166 TargetLibraryInfo *TLI, 167 const TargetTransformInfo *TTI, MemorySSA *MSSA, 168 const DataLayout *DL, 169 OptimizationRemarkEmitter &ORE) 170 : AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), DL(DL), ORE(ORE) { 171 if (MSSA) 172 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA); 173 } 174 175 bool runOnLoop(Loop *L); 176 177 private: 178 using StoreList = SmallVector<StoreInst *, 8>; 179 using StoreListMap = MapVector<Value *, StoreList>; 180 181 StoreListMap StoreRefsForMemset; 182 StoreListMap StoreRefsForMemsetPattern; 183 StoreList StoreRefsForMemcpy; 184 bool HasMemset; 185 bool HasMemsetPattern; 186 bool HasMemcpy; 187 188 /// Return code for isLegalStore() 189 enum LegalStoreKind { 190 None = 0, 191 Memset, 192 MemsetPattern, 193 Memcpy, 194 UnorderedAtomicMemcpy, 195 DontUse // Dummy retval never to be used. Allows catching errors in retval 196 // handling. 197 }; 198 199 /// \name Countable Loop Idiom Handling 200 /// @{ 201 202 bool runOnCountableLoop(); 203 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount, 204 SmallVectorImpl<BasicBlock *> &ExitBlocks); 205 206 void collectStores(BasicBlock *BB); 207 LegalStoreKind isLegalStore(StoreInst *SI); 208 enum class ForMemset { No, Yes }; 209 bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount, 210 ForMemset For); 211 212 template <typename MemInst> 213 bool processLoopMemIntrinsic( 214 BasicBlock *BB, 215 bool (LoopIdiomRecognize::*Processor)(MemInst *, const SCEV *), 216 const SCEV *BECount); 217 bool processLoopMemCpy(MemCpyInst *MCI, const SCEV *BECount); 218 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount); 219 220 bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize, 221 MaybeAlign StoreAlignment, Value *StoredVal, 222 Instruction *TheStore, 223 SmallPtrSetImpl<Instruction *> &Stores, 224 const SCEVAddRecExpr *Ev, const SCEV *BECount, 225 bool NegStride, bool IsLoopMemset = false); 226 bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount); 227 bool processLoopStoreOfLoopLoad(Value *DestPtr, Value *SourcePtr, 228 unsigned StoreSize, MaybeAlign StoreAlign, 229 MaybeAlign LoadAlign, Instruction *TheStore, 230 Instruction *TheLoad, 231 const SCEVAddRecExpr *StoreEv, 232 const SCEVAddRecExpr *LoadEv, 233 const SCEV *BECount); 234 bool avoidLIRForMultiBlockLoop(bool IsMemset = false, 235 bool IsLoopMemset = false); 236 237 /// @} 238 /// \name Noncountable Loop Idiom Handling 239 /// @{ 240 241 bool runOnNoncountableLoop(); 242 243 bool recognizePopcount(); 244 void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst, 245 PHINode *CntPhi, Value *Var); 246 bool recognizeAndInsertFFS(); /// Find First Set: ctlz or cttz 247 void transformLoopToCountable(Intrinsic::ID IntrinID, BasicBlock *PreCondBB, 248 Instruction *CntInst, PHINode *CntPhi, 249 Value *Var, Instruction *DefX, 250 const DebugLoc &DL, bool ZeroCheck, 251 bool IsCntPhiUsedOutsideLoop); 252 253 bool recognizeShiftUntilBitTest(); 254 bool recognizeShiftUntilZero(); 255 256 /// @} 257 }; 258 259 class LoopIdiomRecognizeLegacyPass : public LoopPass { 260 public: 261 static char ID; 262 263 explicit LoopIdiomRecognizeLegacyPass() : LoopPass(ID) { 264 initializeLoopIdiomRecognizeLegacyPassPass( 265 *PassRegistry::getPassRegistry()); 266 } 267 268 bool runOnLoop(Loop *L, LPPassManager &LPM) override { 269 if (DisableLIRP::All) 270 return false; 271 272 if (skipLoop(L)) 273 return false; 274 275 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 276 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 277 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 278 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 279 TargetLibraryInfo *TLI = 280 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI( 281 *L->getHeader()->getParent()); 282 const TargetTransformInfo *TTI = 283 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI( 284 *L->getHeader()->getParent()); 285 const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout(); 286 auto *MSSAAnalysis = getAnalysisIfAvailable<MemorySSAWrapperPass>(); 287 MemorySSA *MSSA = nullptr; 288 if (MSSAAnalysis) 289 MSSA = &MSSAAnalysis->getMSSA(); 290 291 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis 292 // pass. Function analyses need to be preserved across loop transformations 293 // but ORE cannot be preserved (see comment before the pass definition). 294 OptimizationRemarkEmitter ORE(L->getHeader()->getParent()); 295 296 LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, MSSA, DL, ORE); 297 return LIR.runOnLoop(L); 298 } 299 300 /// This transformation requires natural loop information & requires that 301 /// loop preheaders be inserted into the CFG. 302 void getAnalysisUsage(AnalysisUsage &AU) const override { 303 AU.addRequired<TargetLibraryInfoWrapperPass>(); 304 AU.addRequired<TargetTransformInfoWrapperPass>(); 305 AU.addPreserved<MemorySSAWrapperPass>(); 306 getLoopAnalysisUsage(AU); 307 } 308 }; 309 310 } // end anonymous namespace 311 312 char LoopIdiomRecognizeLegacyPass::ID = 0; 313 314 PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM, 315 LoopStandardAnalysisResults &AR, 316 LPMUpdater &) { 317 if (DisableLIRP::All) 318 return PreservedAnalyses::all(); 319 320 const auto *DL = &L.getHeader()->getModule()->getDataLayout(); 321 322 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis 323 // pass. Function analyses need to be preserved across loop transformations 324 // but ORE cannot be preserved (see comment before the pass definition). 325 OptimizationRemarkEmitter ORE(L.getHeader()->getParent()); 326 327 LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI, 328 AR.MSSA, DL, ORE); 329 if (!LIR.runOnLoop(&L)) 330 return PreservedAnalyses::all(); 331 332 auto PA = getLoopPassPreservedAnalyses(); 333 if (AR.MSSA) 334 PA.preserve<MemorySSAAnalysis>(); 335 return PA; 336 } 337 338 INITIALIZE_PASS_BEGIN(LoopIdiomRecognizeLegacyPass, "loop-idiom", 339 "Recognize loop idioms", false, false) 340 INITIALIZE_PASS_DEPENDENCY(LoopPass) 341 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 342 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 343 INITIALIZE_PASS_END(LoopIdiomRecognizeLegacyPass, "loop-idiom", 344 "Recognize loop idioms", false, false) 345 346 Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognizeLegacyPass(); } 347 348 static void deleteDeadInstruction(Instruction *I) { 349 I->replaceAllUsesWith(UndefValue::get(I->getType())); 350 I->eraseFromParent(); 351 } 352 353 //===----------------------------------------------------------------------===// 354 // 355 // Implementation of LoopIdiomRecognize 356 // 357 //===----------------------------------------------------------------------===// 358 359 bool LoopIdiomRecognize::runOnLoop(Loop *L) { 360 CurLoop = L; 361 // If the loop could not be converted to canonical form, it must have an 362 // indirectbr in it, just give up. 363 if (!L->getLoopPreheader()) 364 return false; 365 366 // Disable loop idiom recognition if the function's name is a common idiom. 367 StringRef Name = L->getHeader()->getParent()->getName(); 368 if (Name == "memset" || Name == "memcpy") 369 return false; 370 371 // Determine if code size heuristics need to be applied. 372 ApplyCodeSizeHeuristics = 373 L->getHeader()->getParent()->hasOptSize() && UseLIRCodeSizeHeurs; 374 375 HasMemset = TLI->has(LibFunc_memset); 376 HasMemsetPattern = TLI->has(LibFunc_memset_pattern16); 377 HasMemcpy = TLI->has(LibFunc_memcpy); 378 379 if (HasMemset || HasMemsetPattern || HasMemcpy) 380 if (SE->hasLoopInvariantBackedgeTakenCount(L)) 381 return runOnCountableLoop(); 382 383 return runOnNoncountableLoop(); 384 } 385 386 bool LoopIdiomRecognize::runOnCountableLoop() { 387 const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop); 388 assert(!isa<SCEVCouldNotCompute>(BECount) && 389 "runOnCountableLoop() called on a loop without a predictable" 390 "backedge-taken count"); 391 392 // If this loop executes exactly one time, then it should be peeled, not 393 // optimized by this pass. 394 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) 395 if (BECst->getAPInt() == 0) 396 return false; 397 398 SmallVector<BasicBlock *, 8> ExitBlocks; 399 CurLoop->getUniqueExitBlocks(ExitBlocks); 400 401 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Scanning: F[" 402 << CurLoop->getHeader()->getParent()->getName() 403 << "] Countable Loop %" << CurLoop->getHeader()->getName() 404 << "\n"); 405 406 // The following transforms hoist stores/memsets into the loop pre-header. 407 // Give up if the loop has instructions that may throw. 408 SimpleLoopSafetyInfo SafetyInfo; 409 SafetyInfo.computeLoopSafetyInfo(CurLoop); 410 if (SafetyInfo.anyBlockMayThrow()) 411 return false; 412 413 bool MadeChange = false; 414 415 // Scan all the blocks in the loop that are not in subloops. 416 for (auto *BB : CurLoop->getBlocks()) { 417 // Ignore blocks in subloops. 418 if (LI->getLoopFor(BB) != CurLoop) 419 continue; 420 421 MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks); 422 } 423 return MadeChange; 424 } 425 426 static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) { 427 const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1)); 428 return ConstStride->getAPInt(); 429 } 430 431 /// getMemSetPatternValue - If a strided store of the specified value is safe to 432 /// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should 433 /// be passed in. Otherwise, return null. 434 /// 435 /// Note that we don't ever attempt to use memset_pattern8 or 4, because these 436 /// just replicate their input array and then pass on to memset_pattern16. 437 static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) { 438 // FIXME: This could check for UndefValue because it can be merged into any 439 // other valid pattern. 440 441 // If the value isn't a constant, we can't promote it to being in a constant 442 // array. We could theoretically do a store to an alloca or something, but 443 // that doesn't seem worthwhile. 444 Constant *C = dyn_cast<Constant>(V); 445 if (!C) 446 return nullptr; 447 448 // Only handle simple values that are a power of two bytes in size. 449 uint64_t Size = DL->getTypeSizeInBits(V->getType()); 450 if (Size == 0 || (Size & 7) || (Size & (Size - 1))) 451 return nullptr; 452 453 // Don't care enough about darwin/ppc to implement this. 454 if (DL->isBigEndian()) 455 return nullptr; 456 457 // Convert to size in bytes. 458 Size /= 8; 459 460 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see 461 // if the top and bottom are the same (e.g. for vectors and large integers). 462 if (Size > 16) 463 return nullptr; 464 465 // If the constant is exactly 16 bytes, just use it. 466 if (Size == 16) 467 return C; 468 469 // Otherwise, we'll use an array of the constants. 470 unsigned ArraySize = 16 / Size; 471 ArrayType *AT = ArrayType::get(V->getType(), ArraySize); 472 return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C)); 473 } 474 475 LoopIdiomRecognize::LegalStoreKind 476 LoopIdiomRecognize::isLegalStore(StoreInst *SI) { 477 // Don't touch volatile stores. 478 if (SI->isVolatile()) 479 return LegalStoreKind::None; 480 // We only want simple or unordered-atomic stores. 481 if (!SI->isUnordered()) 482 return LegalStoreKind::None; 483 484 // Avoid merging nontemporal stores. 485 if (SI->getMetadata(LLVMContext::MD_nontemporal)) 486 return LegalStoreKind::None; 487 488 Value *StoredVal = SI->getValueOperand(); 489 Value *StorePtr = SI->getPointerOperand(); 490 491 // Don't convert stores of non-integral pointer types to memsets (which stores 492 // integers). 493 if (DL->isNonIntegralPointerType(StoredVal->getType()->getScalarType())) 494 return LegalStoreKind::None; 495 496 // Reject stores that are so large that they overflow an unsigned. 497 // When storing out scalable vectors we bail out for now, since the code 498 // below currently only works for constant strides. 499 TypeSize SizeInBits = DL->getTypeSizeInBits(StoredVal->getType()); 500 if (SizeInBits.isScalable() || (SizeInBits.getFixedSize() & 7) || 501 (SizeInBits.getFixedSize() >> 32) != 0) 502 return LegalStoreKind::None; 503 504 // See if the pointer expression is an AddRec like {base,+,1} on the current 505 // loop, which indicates a strided store. If we have something else, it's a 506 // random store we can't handle. 507 const SCEVAddRecExpr *StoreEv = 508 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 509 if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine()) 510 return LegalStoreKind::None; 511 512 // Check to see if we have a constant stride. 513 if (!isa<SCEVConstant>(StoreEv->getOperand(1))) 514 return LegalStoreKind::None; 515 516 // See if the store can be turned into a memset. 517 518 // If the stored value is a byte-wise value (like i32 -1), then it may be 519 // turned into a memset of i8 -1, assuming that all the consecutive bytes 520 // are stored. A store of i32 0x01020304 can never be turned into a memset, 521 // but it can be turned into memset_pattern if the target supports it. 522 Value *SplatValue = isBytewiseValue(StoredVal, *DL); 523 524 // Note: memset and memset_pattern on unordered-atomic is yet not supported 525 bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple(); 526 527 // If we're allowed to form a memset, and the stored value would be 528 // acceptable for memset, use it. 529 if (!UnorderedAtomic && HasMemset && SplatValue && !DisableLIRP::Memset && 530 // Verify that the stored value is loop invariant. If not, we can't 531 // promote the memset. 532 CurLoop->isLoopInvariant(SplatValue)) { 533 // It looks like we can use SplatValue. 534 return LegalStoreKind::Memset; 535 } 536 if (!UnorderedAtomic && HasMemsetPattern && !DisableLIRP::Memset && 537 // Don't create memset_pattern16s with address spaces. 538 StorePtr->getType()->getPointerAddressSpace() == 0 && 539 getMemSetPatternValue(StoredVal, DL)) { 540 // It looks like we can use PatternValue! 541 return LegalStoreKind::MemsetPattern; 542 } 543 544 // Otherwise, see if the store can be turned into a memcpy. 545 if (HasMemcpy && !DisableLIRP::Memcpy) { 546 // Check to see if the stride matches the size of the store. If so, then we 547 // know that every byte is touched in the loop. 548 APInt Stride = getStoreStride(StoreEv); 549 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType()); 550 if (StoreSize != Stride && StoreSize != -Stride) 551 return LegalStoreKind::None; 552 553 // The store must be feeding a non-volatile load. 554 LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand()); 555 556 // Only allow non-volatile loads 557 if (!LI || LI->isVolatile()) 558 return LegalStoreKind::None; 559 // Only allow simple or unordered-atomic loads 560 if (!LI->isUnordered()) 561 return LegalStoreKind::None; 562 563 // See if the pointer expression is an AddRec like {base,+,1} on the current 564 // loop, which indicates a strided load. If we have something else, it's a 565 // random load we can't handle. 566 const SCEVAddRecExpr *LoadEv = 567 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand())); 568 if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine()) 569 return LegalStoreKind::None; 570 571 // The store and load must share the same stride. 572 if (StoreEv->getOperand(1) != LoadEv->getOperand(1)) 573 return LegalStoreKind::None; 574 575 // Success. This store can be converted into a memcpy. 576 UnorderedAtomic = UnorderedAtomic || LI->isAtomic(); 577 return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy 578 : LegalStoreKind::Memcpy; 579 } 580 // This store can't be transformed into a memset/memcpy. 581 return LegalStoreKind::None; 582 } 583 584 void LoopIdiomRecognize::collectStores(BasicBlock *BB) { 585 StoreRefsForMemset.clear(); 586 StoreRefsForMemsetPattern.clear(); 587 StoreRefsForMemcpy.clear(); 588 for (Instruction &I : *BB) { 589 StoreInst *SI = dyn_cast<StoreInst>(&I); 590 if (!SI) 591 continue; 592 593 // Make sure this is a strided store with a constant stride. 594 switch (isLegalStore(SI)) { 595 case LegalStoreKind::None: 596 // Nothing to do 597 break; 598 case LegalStoreKind::Memset: { 599 // Find the base pointer. 600 Value *Ptr = getUnderlyingObject(SI->getPointerOperand()); 601 StoreRefsForMemset[Ptr].push_back(SI); 602 } break; 603 case LegalStoreKind::MemsetPattern: { 604 // Find the base pointer. 605 Value *Ptr = getUnderlyingObject(SI->getPointerOperand()); 606 StoreRefsForMemsetPattern[Ptr].push_back(SI); 607 } break; 608 case LegalStoreKind::Memcpy: 609 case LegalStoreKind::UnorderedAtomicMemcpy: 610 StoreRefsForMemcpy.push_back(SI); 611 break; 612 default: 613 assert(false && "unhandled return value"); 614 break; 615 } 616 } 617 } 618 619 /// runOnLoopBlock - Process the specified block, which lives in a counted loop 620 /// with the specified backedge count. This block is known to be in the current 621 /// loop and not in any subloops. 622 bool LoopIdiomRecognize::runOnLoopBlock( 623 BasicBlock *BB, const SCEV *BECount, 624 SmallVectorImpl<BasicBlock *> &ExitBlocks) { 625 // We can only promote stores in this block if they are unconditionally 626 // executed in the loop. For a block to be unconditionally executed, it has 627 // to dominate all the exit blocks of the loop. Verify this now. 628 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) 629 if (!DT->dominates(BB, ExitBlocks[i])) 630 return false; 631 632 bool MadeChange = false; 633 // Look for store instructions, which may be optimized to memset/memcpy. 634 collectStores(BB); 635 636 // Look for a single store or sets of stores with a common base, which can be 637 // optimized into a memset (memset_pattern). The latter most commonly happens 638 // with structs and handunrolled loops. 639 for (auto &SL : StoreRefsForMemset) 640 MadeChange |= processLoopStores(SL.second, BECount, ForMemset::Yes); 641 642 for (auto &SL : StoreRefsForMemsetPattern) 643 MadeChange |= processLoopStores(SL.second, BECount, ForMemset::No); 644 645 // Optimize the store into a memcpy, if it feeds an similarly strided load. 646 for (auto &SI : StoreRefsForMemcpy) 647 MadeChange |= processLoopStoreOfLoopLoad(SI, BECount); 648 649 MadeChange |= processLoopMemIntrinsic<MemCpyInst>( 650 BB, &LoopIdiomRecognize::processLoopMemCpy, BECount); 651 MadeChange |= processLoopMemIntrinsic<MemSetInst>( 652 BB, &LoopIdiomRecognize::processLoopMemSet, BECount); 653 654 return MadeChange; 655 } 656 657 /// See if this store(s) can be promoted to a memset. 658 bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL, 659 const SCEV *BECount, ForMemset For) { 660 // Try to find consecutive stores that can be transformed into memsets. 661 SetVector<StoreInst *> Heads, Tails; 662 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain; 663 664 // Do a quadratic search on all of the given stores and find 665 // all of the pairs of stores that follow each other. 666 SmallVector<unsigned, 16> IndexQueue; 667 for (unsigned i = 0, e = SL.size(); i < e; ++i) { 668 assert(SL[i]->isSimple() && "Expected only non-volatile stores."); 669 670 Value *FirstStoredVal = SL[i]->getValueOperand(); 671 Value *FirstStorePtr = SL[i]->getPointerOperand(); 672 const SCEVAddRecExpr *FirstStoreEv = 673 cast<SCEVAddRecExpr>(SE->getSCEV(FirstStorePtr)); 674 APInt FirstStride = getStoreStride(FirstStoreEv); 675 unsigned FirstStoreSize = DL->getTypeStoreSize(SL[i]->getValueOperand()->getType()); 676 677 // See if we can optimize just this store in isolation. 678 if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) { 679 Heads.insert(SL[i]); 680 continue; 681 } 682 683 Value *FirstSplatValue = nullptr; 684 Constant *FirstPatternValue = nullptr; 685 686 if (For == ForMemset::Yes) 687 FirstSplatValue = isBytewiseValue(FirstStoredVal, *DL); 688 else 689 FirstPatternValue = getMemSetPatternValue(FirstStoredVal, DL); 690 691 assert((FirstSplatValue || FirstPatternValue) && 692 "Expected either splat value or pattern value."); 693 694 IndexQueue.clear(); 695 // If a store has multiple consecutive store candidates, search Stores 696 // array according to the sequence: from i+1 to e, then from i-1 to 0. 697 // This is because usually pairing with immediate succeeding or preceding 698 // candidate create the best chance to find memset opportunity. 699 unsigned j = 0; 700 for (j = i + 1; j < e; ++j) 701 IndexQueue.push_back(j); 702 for (j = i; j > 0; --j) 703 IndexQueue.push_back(j - 1); 704 705 for (auto &k : IndexQueue) { 706 assert(SL[k]->isSimple() && "Expected only non-volatile stores."); 707 Value *SecondStorePtr = SL[k]->getPointerOperand(); 708 const SCEVAddRecExpr *SecondStoreEv = 709 cast<SCEVAddRecExpr>(SE->getSCEV(SecondStorePtr)); 710 APInt SecondStride = getStoreStride(SecondStoreEv); 711 712 if (FirstStride != SecondStride) 713 continue; 714 715 Value *SecondStoredVal = SL[k]->getValueOperand(); 716 Value *SecondSplatValue = nullptr; 717 Constant *SecondPatternValue = nullptr; 718 719 if (For == ForMemset::Yes) 720 SecondSplatValue = isBytewiseValue(SecondStoredVal, *DL); 721 else 722 SecondPatternValue = getMemSetPatternValue(SecondStoredVal, DL); 723 724 assert((SecondSplatValue || SecondPatternValue) && 725 "Expected either splat value or pattern value."); 726 727 if (isConsecutiveAccess(SL[i], SL[k], *DL, *SE, false)) { 728 if (For == ForMemset::Yes) { 729 if (isa<UndefValue>(FirstSplatValue)) 730 FirstSplatValue = SecondSplatValue; 731 if (FirstSplatValue != SecondSplatValue) 732 continue; 733 } else { 734 if (isa<UndefValue>(FirstPatternValue)) 735 FirstPatternValue = SecondPatternValue; 736 if (FirstPatternValue != SecondPatternValue) 737 continue; 738 } 739 Tails.insert(SL[k]); 740 Heads.insert(SL[i]); 741 ConsecutiveChain[SL[i]] = SL[k]; 742 break; 743 } 744 } 745 } 746 747 // We may run into multiple chains that merge into a single chain. We mark the 748 // stores that we transformed so that we don't visit the same store twice. 749 SmallPtrSet<Value *, 16> TransformedStores; 750 bool Changed = false; 751 752 // For stores that start but don't end a link in the chain: 753 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end(); 754 it != e; ++it) { 755 if (Tails.count(*it)) 756 continue; 757 758 // We found a store instr that starts a chain. Now follow the chain and try 759 // to transform it. 760 SmallPtrSet<Instruction *, 8> AdjacentStores; 761 StoreInst *I = *it; 762 763 StoreInst *HeadStore = I; 764 unsigned StoreSize = 0; 765 766 // Collect the chain into a list. 767 while (Tails.count(I) || Heads.count(I)) { 768 if (TransformedStores.count(I)) 769 break; 770 AdjacentStores.insert(I); 771 772 StoreSize += DL->getTypeStoreSize(I->getValueOperand()->getType()); 773 // Move to the next value in the chain. 774 I = ConsecutiveChain[I]; 775 } 776 777 Value *StoredVal = HeadStore->getValueOperand(); 778 Value *StorePtr = HeadStore->getPointerOperand(); 779 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 780 APInt Stride = getStoreStride(StoreEv); 781 782 // Check to see if the stride matches the size of the stores. If so, then 783 // we know that every byte is touched in the loop. 784 if (StoreSize != Stride && StoreSize != -Stride) 785 continue; 786 787 bool NegStride = StoreSize == -Stride; 788 789 if (processLoopStridedStore(StorePtr, StoreSize, 790 MaybeAlign(HeadStore->getAlignment()), 791 StoredVal, HeadStore, AdjacentStores, StoreEv, 792 BECount, NegStride)) { 793 TransformedStores.insert(AdjacentStores.begin(), AdjacentStores.end()); 794 Changed = true; 795 } 796 } 797 798 return Changed; 799 } 800 801 /// processLoopMemIntrinsic - Template function for calling different processor 802 /// functions based on mem instrinsic type. 803 template <typename MemInst> 804 bool LoopIdiomRecognize::processLoopMemIntrinsic( 805 BasicBlock *BB, 806 bool (LoopIdiomRecognize::*Processor)(MemInst *, const SCEV *), 807 const SCEV *BECount) { 808 bool MadeChange = false; 809 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 810 Instruction *Inst = &*I++; 811 // Look for memory instructions, which may be optimized to a larger one. 812 if (MemInst *MI = dyn_cast<MemInst>(Inst)) { 813 WeakTrackingVH InstPtr(&*I); 814 if (!(this->*Processor)(MI, BECount)) 815 continue; 816 MadeChange = true; 817 818 // If processing the instruction invalidated our iterator, start over from 819 // the top of the block. 820 if (!InstPtr) 821 I = BB->begin(); 822 } 823 } 824 return MadeChange; 825 } 826 827 /// processLoopMemCpy - See if this memcpy can be promoted to a large memcpy 828 bool LoopIdiomRecognize::processLoopMemCpy(MemCpyInst *MCI, 829 const SCEV *BECount) { 830 // We can only handle non-volatile memcpys with a constant size. 831 if (MCI->isVolatile() || !isa<ConstantInt>(MCI->getLength())) 832 return false; 833 834 // If we're not allowed to hack on memcpy, we fail. 835 if ((!HasMemcpy && !isa<MemCpyInlineInst>(MCI)) || DisableLIRP::Memcpy) 836 return false; 837 838 Value *Dest = MCI->getDest(); 839 Value *Source = MCI->getSource(); 840 if (!Dest || !Source) 841 return false; 842 843 // See if the load and store pointer expressions are AddRec like {base,+,1} on 844 // the current loop, which indicates a strided load and store. If we have 845 // something else, it's a random load or store we can't handle. 846 const SCEVAddRecExpr *StoreEv = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Dest)); 847 if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine()) 848 return false; 849 const SCEVAddRecExpr *LoadEv = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Source)); 850 if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine()) 851 return false; 852 853 // Reject memcpys that are so large that they overflow an unsigned. 854 uint64_t SizeInBytes = cast<ConstantInt>(MCI->getLength())->getZExtValue(); 855 if ((SizeInBytes >> 32) != 0) 856 return false; 857 858 // Check if the stride matches the size of the memcpy. If so, then we know 859 // that every byte is touched in the loop. 860 const SCEVConstant *StoreStride = 861 dyn_cast<SCEVConstant>(StoreEv->getOperand(1)); 862 const SCEVConstant *LoadStride = 863 dyn_cast<SCEVConstant>(LoadEv->getOperand(1)); 864 if (!StoreStride || !LoadStride) 865 return false; 866 867 APInt StoreStrideValue = StoreStride->getAPInt(); 868 APInt LoadStrideValue = LoadStride->getAPInt(); 869 // Huge stride value - give up 870 if (StoreStrideValue.getBitWidth() > 64 || LoadStrideValue.getBitWidth() > 64) 871 return false; 872 873 if (SizeInBytes != StoreStrideValue && SizeInBytes != -StoreStrideValue) { 874 ORE.emit([&]() { 875 return OptimizationRemarkMissed(DEBUG_TYPE, "SizeStrideUnequal", MCI) 876 << ore::NV("Inst", "memcpy") << " in " 877 << ore::NV("Function", MCI->getFunction()) 878 << " function will not be hoised: " 879 << ore::NV("Reason", "memcpy size is not equal to stride"); 880 }); 881 return false; 882 } 883 884 int64_t StoreStrideInt = StoreStrideValue.getSExtValue(); 885 int64_t LoadStrideInt = LoadStrideValue.getSExtValue(); 886 // Check if the load stride matches the store stride. 887 if (StoreStrideInt != LoadStrideInt) 888 return false; 889 890 return processLoopStoreOfLoopLoad(Dest, Source, (unsigned)SizeInBytes, 891 MCI->getDestAlign(), MCI->getSourceAlign(), 892 MCI, MCI, StoreEv, LoadEv, BECount); 893 } 894 895 /// processLoopMemSet - See if this memset can be promoted to a large memset. 896 bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI, 897 const SCEV *BECount) { 898 // We can only handle non-volatile memsets with a constant size. 899 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength())) 900 return false; 901 902 // If we're not allowed to hack on memset, we fail. 903 if (!HasMemset || DisableLIRP::Memset) 904 return false; 905 906 Value *Pointer = MSI->getDest(); 907 908 // See if the pointer expression is an AddRec like {base,+,1} on the current 909 // loop, which indicates a strided store. If we have something else, it's a 910 // random store we can't handle. 911 const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer)); 912 if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine()) 913 return false; 914 915 // Reject memsets that are so large that they overflow an unsigned. 916 uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 917 if ((SizeInBytes >> 32) != 0) 918 return false; 919 920 // Check to see if the stride matches the size of the memset. If so, then we 921 // know that every byte is touched in the loop. 922 const SCEVConstant *ConstStride = dyn_cast<SCEVConstant>(Ev->getOperand(1)); 923 if (!ConstStride) 924 return false; 925 926 APInt Stride = ConstStride->getAPInt(); 927 if (SizeInBytes != Stride && SizeInBytes != -Stride) 928 return false; 929 930 // Verify that the memset value is loop invariant. If not, we can't promote 931 // the memset. 932 Value *SplatValue = MSI->getValue(); 933 if (!SplatValue || !CurLoop->isLoopInvariant(SplatValue)) 934 return false; 935 936 SmallPtrSet<Instruction *, 1> MSIs; 937 MSIs.insert(MSI); 938 bool NegStride = SizeInBytes == -Stride; 939 return processLoopStridedStore( 940 Pointer, (unsigned)SizeInBytes, MaybeAlign(MSI->getDestAlignment()), 941 SplatValue, MSI, MSIs, Ev, BECount, NegStride, /*IsLoopMemset=*/true); 942 } 943 944 /// mayLoopAccessLocation - Return true if the specified loop might access the 945 /// specified pointer location, which is a loop-strided access. The 'Access' 946 /// argument specifies what the verboten forms of access are (read or write). 947 static bool 948 mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L, 949 const SCEV *BECount, unsigned StoreSize, 950 AliasAnalysis &AA, 951 SmallPtrSetImpl<Instruction *> &IgnoredStores) { 952 // Get the location that may be stored across the loop. Since the access is 953 // strided positively through memory, we say that the modified location starts 954 // at the pointer and has infinite size. 955 LocationSize AccessSize = LocationSize::afterPointer(); 956 957 // If the loop iterates a fixed number of times, we can refine the access size 958 // to be exactly the size of the memset, which is (BECount+1)*StoreSize 959 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount)) 960 AccessSize = LocationSize::precise((BECst->getValue()->getZExtValue() + 1) * 961 StoreSize); 962 963 // TODO: For this to be really effective, we have to dive into the pointer 964 // operand in the store. Store to &A[i] of 100 will always return may alias 965 // with store of &A[100], we need to StoreLoc to be "A" with size of 100, 966 // which will then no-alias a store to &A[100]. 967 MemoryLocation StoreLoc(Ptr, AccessSize); 968 969 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E; 970 ++BI) 971 for (Instruction &I : **BI) 972 if (IgnoredStores.count(&I) == 0 && 973 isModOrRefSet( 974 intersectModRef(AA.getModRefInfo(&I, StoreLoc), Access))) 975 return true; 976 977 return false; 978 } 979 980 // If we have a negative stride, Start refers to the end of the memory location 981 // we're trying to memset. Therefore, we need to recompute the base pointer, 982 // which is just Start - BECount*Size. 983 static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount, 984 Type *IntPtr, unsigned StoreSize, 985 ScalarEvolution *SE) { 986 const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr); 987 if (StoreSize != 1) 988 Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize), 989 SCEV::FlagNUW); 990 return SE->getMinusSCEV(Start, Index); 991 } 992 993 /// Compute the number of bytes as a SCEV from the backedge taken count. 994 /// 995 /// This also maps the SCEV into the provided type and tries to handle the 996 /// computation in a way that will fold cleanly. 997 static const SCEV *getNumBytes(const SCEV *BECount, Type *IntPtr, 998 unsigned StoreSize, Loop *CurLoop, 999 const DataLayout *DL, ScalarEvolution *SE) { 1000 const SCEV *NumBytesS; 1001 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to 1002 // pointer size if it isn't already. 1003 // 1004 // If we're going to need to zero extend the BE count, check if we can add 1005 // one to it prior to zero extending without overflow. Provided this is safe, 1006 // it allows better simplification of the +1. 1007 if (DL->getTypeSizeInBits(BECount->getType()).getFixedSize() < 1008 DL->getTypeSizeInBits(IntPtr).getFixedSize() && 1009 SE->isLoopEntryGuardedByCond( 1010 CurLoop, ICmpInst::ICMP_NE, BECount, 1011 SE->getNegativeSCEV(SE->getOne(BECount->getType())))) { 1012 NumBytesS = SE->getZeroExtendExpr( 1013 SE->getAddExpr(BECount, SE->getOne(BECount->getType()), SCEV::FlagNUW), 1014 IntPtr); 1015 } else { 1016 NumBytesS = SE->getAddExpr(SE->getTruncateOrZeroExtend(BECount, IntPtr), 1017 SE->getOne(IntPtr), SCEV::FlagNUW); 1018 } 1019 1020 // And scale it based on the store size. 1021 if (StoreSize != 1) { 1022 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize), 1023 SCEV::FlagNUW); 1024 } 1025 return NumBytesS; 1026 } 1027 1028 /// processLoopStridedStore - We see a strided store of some value. If we can 1029 /// transform this into a memset or memset_pattern in the loop preheader, do so. 1030 bool LoopIdiomRecognize::processLoopStridedStore( 1031 Value *DestPtr, unsigned StoreSize, MaybeAlign StoreAlignment, 1032 Value *StoredVal, Instruction *TheStore, 1033 SmallPtrSetImpl<Instruction *> &Stores, const SCEVAddRecExpr *Ev, 1034 const SCEV *BECount, bool NegStride, bool IsLoopMemset) { 1035 Value *SplatValue = isBytewiseValue(StoredVal, *DL); 1036 Constant *PatternValue = nullptr; 1037 1038 if (!SplatValue) 1039 PatternValue = getMemSetPatternValue(StoredVal, DL); 1040 1041 assert((SplatValue || PatternValue) && 1042 "Expected either splat value or pattern value."); 1043 1044 // The trip count of the loop and the base pointer of the addrec SCEV is 1045 // guaranteed to be loop invariant, which means that it should dominate the 1046 // header. This allows us to insert code for it in the preheader. 1047 unsigned DestAS = DestPtr->getType()->getPointerAddressSpace(); 1048 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 1049 IRBuilder<> Builder(Preheader->getTerminator()); 1050 SCEVExpander Expander(*SE, *DL, "loop-idiom"); 1051 SCEVExpanderCleaner ExpCleaner(Expander, *DT); 1052 1053 Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS); 1054 Type *IntIdxTy = DL->getIndexType(DestPtr->getType()); 1055 1056 bool Changed = false; 1057 const SCEV *Start = Ev->getStart(); 1058 // Handle negative strided loops. 1059 if (NegStride) 1060 Start = getStartForNegStride(Start, BECount, IntIdxTy, StoreSize, SE); 1061 1062 // TODO: ideally we should still be able to generate memset if SCEV expander 1063 // is taught to generate the dependencies at the latest point. 1064 if (!isSafeToExpand(Start, *SE)) 1065 return Changed; 1066 1067 // Okay, we have a strided store "p[i]" of a splattable value. We can turn 1068 // this into a memset in the loop preheader now if we want. However, this 1069 // would be unsafe to do if there is anything else in the loop that may read 1070 // or write to the aliased location. Check for any overlap by generating the 1071 // base pointer and checking the region. 1072 Value *BasePtr = 1073 Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator()); 1074 1075 // From here on out, conservatively report to the pass manager that we've 1076 // changed the IR, even if we later clean up these added instructions. There 1077 // may be structural differences e.g. in the order of use lists not accounted 1078 // for in just a textual dump of the IR. This is written as a variable, even 1079 // though statically all the places this dominates could be replaced with 1080 // 'true', with the hope that anyone trying to be clever / "more precise" with 1081 // the return value will read this comment, and leave them alone. 1082 Changed = true; 1083 1084 if (mayLoopAccessLocation(BasePtr, ModRefInfo::ModRef, CurLoop, BECount, 1085 StoreSize, *AA, Stores)) 1086 return Changed; 1087 1088 if (avoidLIRForMultiBlockLoop(/*IsMemset=*/true, IsLoopMemset)) 1089 return Changed; 1090 1091 // Okay, everything looks good, insert the memset. 1092 1093 const SCEV *NumBytesS = 1094 getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE); 1095 1096 // TODO: ideally we should still be able to generate memset if SCEV expander 1097 // is taught to generate the dependencies at the latest point. 1098 if (!isSafeToExpand(NumBytesS, *SE)) 1099 return Changed; 1100 1101 Value *NumBytes = 1102 Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator()); 1103 1104 CallInst *NewCall; 1105 if (SplatValue) { 1106 NewCall = Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, 1107 MaybeAlign(StoreAlignment)); 1108 } else { 1109 // Everything is emitted in default address space 1110 Type *Int8PtrTy = DestInt8PtrTy; 1111 1112 Module *M = TheStore->getModule(); 1113 StringRef FuncName = "memset_pattern16"; 1114 FunctionCallee MSP = M->getOrInsertFunction(FuncName, Builder.getVoidTy(), 1115 Int8PtrTy, Int8PtrTy, IntIdxTy); 1116 inferLibFuncAttributes(M, FuncName, *TLI); 1117 1118 // Otherwise we should form a memset_pattern16. PatternValue is known to be 1119 // an constant array of 16-bytes. Plop the value into a mergable global. 1120 GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true, 1121 GlobalValue::PrivateLinkage, 1122 PatternValue, ".memset_pattern"); 1123 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); // Ok to merge these. 1124 GV->setAlignment(Align(16)); 1125 Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy); 1126 NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes}); 1127 } 1128 NewCall->setDebugLoc(TheStore->getDebugLoc()); 1129 1130 if (MSSAU) { 1131 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB( 1132 NewCall, nullptr, NewCall->getParent(), MemorySSA::BeforeTerminator); 1133 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true); 1134 } 1135 1136 LLVM_DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n" 1137 << " from store to: " << *Ev << " at: " << *TheStore 1138 << "\n"); 1139 1140 ORE.emit([&]() { 1141 return OptimizationRemark(DEBUG_TYPE, "ProcessLoopStridedStore", 1142 NewCall->getDebugLoc(), Preheader) 1143 << "Transformed loop-strided store in " 1144 << ore::NV("Function", TheStore->getFunction()) 1145 << " function into a call to " 1146 << ore::NV("NewFunction", NewCall->getCalledFunction()) 1147 << "() intrinsic"; 1148 }); 1149 1150 // Okay, the memset has been formed. Zap the original store and anything that 1151 // feeds into it. 1152 for (auto *I : Stores) { 1153 if (MSSAU) 1154 MSSAU->removeMemoryAccess(I, true); 1155 deleteDeadInstruction(I); 1156 } 1157 if (MSSAU && VerifyMemorySSA) 1158 MSSAU->getMemorySSA()->verifyMemorySSA(); 1159 ++NumMemSet; 1160 ExpCleaner.markResultUsed(); 1161 return true; 1162 } 1163 1164 /// If the stored value is a strided load in the same loop with the same stride 1165 /// this may be transformable into a memcpy. This kicks in for stuff like 1166 /// for (i) A[i] = B[i]; 1167 bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI, 1168 const SCEV *BECount) { 1169 assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores."); 1170 1171 Value *StorePtr = SI->getPointerOperand(); 1172 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr)); 1173 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType()); 1174 1175 // The store must be feeding a non-volatile load. 1176 LoadInst *LI = cast<LoadInst>(SI->getValueOperand()); 1177 assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads."); 1178 1179 // See if the pointer expression is an AddRec like {base,+,1} on the current 1180 // loop, which indicates a strided load. If we have something else, it's a 1181 // random load we can't handle. 1182 Value *LoadPtr = LI->getPointerOperand(); 1183 const SCEVAddRecExpr *LoadEv = cast<SCEVAddRecExpr>(SE->getSCEV(LoadPtr)); 1184 return processLoopStoreOfLoopLoad(StorePtr, LoadPtr, StoreSize, 1185 SI->getAlign(), LI->getAlign(), SI, LI, 1186 StoreEv, LoadEv, BECount); 1187 } 1188 1189 bool LoopIdiomRecognize::processLoopStoreOfLoopLoad( 1190 Value *DestPtr, Value *SourcePtr, unsigned StoreSize, MaybeAlign StoreAlign, 1191 MaybeAlign LoadAlign, Instruction *TheStore, Instruction *TheLoad, 1192 const SCEVAddRecExpr *StoreEv, const SCEVAddRecExpr *LoadEv, 1193 const SCEV *BECount) { 1194 1195 // FIXME: until llvm.memcpy.inline supports dynamic sizes, we need to 1196 // conservatively bail here, since otherwise we may have to transform 1197 // llvm.memcpy.inline into llvm.memcpy which is illegal. 1198 if (isa<MemCpyInlineInst>(TheStore)) 1199 return false; 1200 1201 // The trip count of the loop and the base pointer of the addrec SCEV is 1202 // guaranteed to be loop invariant, which means that it should dominate the 1203 // header. This allows us to insert code for it in the preheader. 1204 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 1205 IRBuilder<> Builder(Preheader->getTerminator()); 1206 SCEVExpander Expander(*SE, *DL, "loop-idiom"); 1207 1208 SCEVExpanderCleaner ExpCleaner(Expander, *DT); 1209 1210 bool Changed = false; 1211 const SCEV *StrStart = StoreEv->getStart(); 1212 unsigned StrAS = DestPtr->getType()->getPointerAddressSpace(); 1213 Type *IntIdxTy = Builder.getIntNTy(DL->getIndexSizeInBits(StrAS)); 1214 1215 APInt Stride = getStoreStride(StoreEv); 1216 bool NegStride = StoreSize == -Stride; 1217 1218 // Handle negative strided loops. 1219 if (NegStride) 1220 StrStart = getStartForNegStride(StrStart, BECount, IntIdxTy, StoreSize, SE); 1221 1222 // Okay, we have a strided store "p[i]" of a loaded value. We can turn 1223 // this into a memcpy in the loop preheader now if we want. However, this 1224 // would be unsafe to do if there is anything else in the loop that may read 1225 // or write the memory region we're storing to. This includes the load that 1226 // feeds the stores. Check for an alias by generating the base address and 1227 // checking everything. 1228 Value *StoreBasePtr = Expander.expandCodeFor( 1229 StrStart, Builder.getInt8PtrTy(StrAS), Preheader->getTerminator()); 1230 1231 // From here on out, conservatively report to the pass manager that we've 1232 // changed the IR, even if we later clean up these added instructions. There 1233 // may be structural differences e.g. in the order of use lists not accounted 1234 // for in just a textual dump of the IR. This is written as a variable, even 1235 // though statically all the places this dominates could be replaced with 1236 // 'true', with the hope that anyone trying to be clever / "more precise" with 1237 // the return value will read this comment, and leave them alone. 1238 Changed = true; 1239 1240 SmallPtrSet<Instruction *, 2> Stores; 1241 Stores.insert(TheStore); 1242 1243 bool IsMemCpy = isa<MemCpyInst>(TheStore); 1244 const StringRef InstRemark = IsMemCpy ? "memcpy" : "load and store"; 1245 1246 bool UseMemMove = 1247 mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount, 1248 StoreSize, *AA, Stores); 1249 if (UseMemMove) { 1250 Stores.insert(TheLoad); 1251 if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, 1252 BECount, StoreSize, *AA, Stores)) { 1253 ORE.emit([&]() { 1254 return OptimizationRemarkMissed(DEBUG_TYPE, "LoopMayAccessStore", 1255 TheStore) 1256 << ore::NV("Inst", InstRemark) << " in " 1257 << ore::NV("Function", TheStore->getFunction()) 1258 << " function will not be hoisted: " 1259 << ore::NV("Reason", "The loop may access store location"); 1260 }); 1261 return Changed; 1262 } 1263 Stores.erase(TheLoad); 1264 } 1265 1266 const SCEV *LdStart = LoadEv->getStart(); 1267 unsigned LdAS = SourcePtr->getType()->getPointerAddressSpace(); 1268 1269 // Handle negative strided loops. 1270 if (NegStride) 1271 LdStart = getStartForNegStride(LdStart, BECount, IntIdxTy, StoreSize, SE); 1272 1273 // For a memcpy, we have to make sure that the input array is not being 1274 // mutated by the loop. 1275 Value *LoadBasePtr = Expander.expandCodeFor( 1276 LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator()); 1277 1278 // If the store is a memcpy instruction, we must check if it will write to 1279 // the load memory locations. So remove it from the ignored stores. 1280 if (IsMemCpy) 1281 Stores.erase(TheStore); 1282 if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount, 1283 StoreSize, *AA, Stores)) { 1284 ORE.emit([&]() { 1285 return OptimizationRemarkMissed(DEBUG_TYPE, "LoopMayAccessLoad", TheLoad) 1286 << ore::NV("Inst", InstRemark) << " in " 1287 << ore::NV("Function", TheStore->getFunction()) 1288 << " function will not be hoisted: " 1289 << ore::NV("Reason", "The loop may access load location"); 1290 }); 1291 return Changed; 1292 } 1293 if (UseMemMove) { 1294 // Ensure that LoadBasePtr is after StoreBasePtr or before StoreBasePtr for 1295 // negative stride. LoadBasePtr shouldn't overlap with StoreBasePtr. 1296 int64_t LoadOff = 0, StoreOff = 0; 1297 const Value *BP1 = llvm::GetPointerBaseWithConstantOffset( 1298 LoadBasePtr->stripPointerCasts(), LoadOff, *DL); 1299 const Value *BP2 = llvm::GetPointerBaseWithConstantOffset( 1300 StoreBasePtr->stripPointerCasts(), StoreOff, *DL); 1301 int64_t LoadSize = 1302 DL->getTypeSizeInBits(TheLoad->getType()).getFixedSize() / 8; 1303 if (BP1 != BP2 || LoadSize != int64_t(StoreSize)) 1304 return Changed; 1305 if ((!NegStride && LoadOff < StoreOff + int64_t(StoreSize)) || 1306 (NegStride && LoadOff + LoadSize > StoreOff)) 1307 return Changed; 1308 } 1309 1310 if (avoidLIRForMultiBlockLoop()) 1311 return Changed; 1312 1313 // Okay, everything is safe, we can transform this! 1314 1315 const SCEV *NumBytesS = 1316 getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE); 1317 1318 Value *NumBytes = 1319 Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator()); 1320 1321 CallInst *NewCall = nullptr; 1322 // Check whether to generate an unordered atomic memcpy: 1323 // If the load or store are atomic, then they must necessarily be unordered 1324 // by previous checks. 1325 if (!TheStore->isAtomic() && !TheLoad->isAtomic()) { 1326 if (UseMemMove) 1327 NewCall = Builder.CreateMemMove(StoreBasePtr, StoreAlign, LoadBasePtr, 1328 LoadAlign, NumBytes); 1329 else 1330 NewCall = Builder.CreateMemCpy(StoreBasePtr, StoreAlign, LoadBasePtr, 1331 LoadAlign, NumBytes); 1332 } else { 1333 // For now don't support unordered atomic memmove. 1334 if (UseMemMove) 1335 return Changed; 1336 // We cannot allow unaligned ops for unordered load/store, so reject 1337 // anything where the alignment isn't at least the element size. 1338 assert((StoreAlign.hasValue() && LoadAlign.hasValue()) && 1339 "Expect unordered load/store to have align."); 1340 if (StoreAlign.getValue() < StoreSize || LoadAlign.getValue() < StoreSize) 1341 return Changed; 1342 1343 // If the element.atomic memcpy is not lowered into explicit 1344 // loads/stores later, then it will be lowered into an element-size 1345 // specific lib call. If the lib call doesn't exist for our store size, then 1346 // we shouldn't generate the memcpy. 1347 if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize()) 1348 return Changed; 1349 1350 // Create the call. 1351 // Note that unordered atomic loads/stores are *required* by the spec to 1352 // have an alignment but non-atomic loads/stores may not. 1353 NewCall = Builder.CreateElementUnorderedAtomicMemCpy( 1354 StoreBasePtr, StoreAlign.getValue(), LoadBasePtr, LoadAlign.getValue(), 1355 NumBytes, StoreSize); 1356 } 1357 NewCall->setDebugLoc(TheStore->getDebugLoc()); 1358 1359 if (MSSAU) { 1360 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB( 1361 NewCall, nullptr, NewCall->getParent(), MemorySSA::BeforeTerminator); 1362 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true); 1363 } 1364 1365 LLVM_DEBUG(dbgs() << " Formed new call: " << *NewCall << "\n" 1366 << " from load ptr=" << *LoadEv << " at: " << *TheLoad 1367 << "\n" 1368 << " from store ptr=" << *StoreEv << " at: " << *TheStore 1369 << "\n"); 1370 1371 ORE.emit([&]() { 1372 return OptimizationRemark(DEBUG_TYPE, "ProcessLoopStoreOfLoopLoad", 1373 NewCall->getDebugLoc(), Preheader) 1374 << "Formed a call to " 1375 << ore::NV("NewFunction", NewCall->getCalledFunction()) 1376 << "() intrinsic from " << ore::NV("Inst", InstRemark) 1377 << " instruction in " << ore::NV("Function", TheStore->getFunction()) 1378 << " function"; 1379 }); 1380 1381 // Okay, the memcpy has been formed. Zap the original store and anything that 1382 // feeds into it. 1383 if (MSSAU) 1384 MSSAU->removeMemoryAccess(TheStore, true); 1385 deleteDeadInstruction(TheStore); 1386 if (MSSAU && VerifyMemorySSA) 1387 MSSAU->getMemorySSA()->verifyMemorySSA(); 1388 if (UseMemMove) 1389 ++NumMemMove; 1390 else 1391 ++NumMemCpy; 1392 ExpCleaner.markResultUsed(); 1393 return true; 1394 } 1395 1396 // When compiling for codesize we avoid idiom recognition for a multi-block loop 1397 // unless it is a loop_memset idiom or a memset/memcpy idiom in a nested loop. 1398 // 1399 bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset, 1400 bool IsLoopMemset) { 1401 if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) { 1402 if (CurLoop->isOutermost() && (!IsMemset || !IsLoopMemset)) { 1403 LLVM_DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName() 1404 << " : LIR " << (IsMemset ? "Memset" : "Memcpy") 1405 << " avoided: multi-block top-level loop\n"); 1406 return true; 1407 } 1408 } 1409 1410 return false; 1411 } 1412 1413 bool LoopIdiomRecognize::runOnNoncountableLoop() { 1414 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Scanning: F[" 1415 << CurLoop->getHeader()->getParent()->getName() 1416 << "] Noncountable Loop %" 1417 << CurLoop->getHeader()->getName() << "\n"); 1418 1419 return recognizePopcount() || recognizeAndInsertFFS() || 1420 recognizeShiftUntilBitTest() || recognizeShiftUntilZero(); 1421 } 1422 1423 /// Check if the given conditional branch is based on the comparison between 1424 /// a variable and zero, and if the variable is non-zero or zero (JmpOnZero is 1425 /// true), the control yields to the loop entry. If the branch matches the 1426 /// behavior, the variable involved in the comparison is returned. This function 1427 /// will be called to see if the precondition and postcondition of the loop are 1428 /// in desirable form. 1429 static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry, 1430 bool JmpOnZero = false) { 1431 if (!BI || !BI->isConditional()) 1432 return nullptr; 1433 1434 ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition()); 1435 if (!Cond) 1436 return nullptr; 1437 1438 ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1)); 1439 if (!CmpZero || !CmpZero->isZero()) 1440 return nullptr; 1441 1442 BasicBlock *TrueSucc = BI->getSuccessor(0); 1443 BasicBlock *FalseSucc = BI->getSuccessor(1); 1444 if (JmpOnZero) 1445 std::swap(TrueSucc, FalseSucc); 1446 1447 ICmpInst::Predicate Pred = Cond->getPredicate(); 1448 if ((Pred == ICmpInst::ICMP_NE && TrueSucc == LoopEntry) || 1449 (Pred == ICmpInst::ICMP_EQ && FalseSucc == LoopEntry)) 1450 return Cond->getOperand(0); 1451 1452 return nullptr; 1453 } 1454 1455 // Check if the recurrence variable `VarX` is in the right form to create 1456 // the idiom. Returns the value coerced to a PHINode if so. 1457 static PHINode *getRecurrenceVar(Value *VarX, Instruction *DefX, 1458 BasicBlock *LoopEntry) { 1459 auto *PhiX = dyn_cast<PHINode>(VarX); 1460 if (PhiX && PhiX->getParent() == LoopEntry && 1461 (PhiX->getOperand(0) == DefX || PhiX->getOperand(1) == DefX)) 1462 return PhiX; 1463 return nullptr; 1464 } 1465 1466 /// Return true iff the idiom is detected in the loop. 1467 /// 1468 /// Additionally: 1469 /// 1) \p CntInst is set to the instruction counting the population bit. 1470 /// 2) \p CntPhi is set to the corresponding phi node. 1471 /// 3) \p Var is set to the value whose population bits are being counted. 1472 /// 1473 /// The core idiom we are trying to detect is: 1474 /// \code 1475 /// if (x0 != 0) 1476 /// goto loop-exit // the precondition of the loop 1477 /// cnt0 = init-val; 1478 /// do { 1479 /// x1 = phi (x0, x2); 1480 /// cnt1 = phi(cnt0, cnt2); 1481 /// 1482 /// cnt2 = cnt1 + 1; 1483 /// ... 1484 /// x2 = x1 & (x1 - 1); 1485 /// ... 1486 /// } while(x != 0); 1487 /// 1488 /// loop-exit: 1489 /// \endcode 1490 static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB, 1491 Instruction *&CntInst, PHINode *&CntPhi, 1492 Value *&Var) { 1493 // step 1: Check to see if the look-back branch match this pattern: 1494 // "if (a!=0) goto loop-entry". 1495 BasicBlock *LoopEntry; 1496 Instruction *DefX2, *CountInst; 1497 Value *VarX1, *VarX0; 1498 PHINode *PhiX, *CountPhi; 1499 1500 DefX2 = CountInst = nullptr; 1501 VarX1 = VarX0 = nullptr; 1502 PhiX = CountPhi = nullptr; 1503 LoopEntry = *(CurLoop->block_begin()); 1504 1505 // step 1: Check if the loop-back branch is in desirable form. 1506 { 1507 if (Value *T = matchCondition( 1508 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry)) 1509 DefX2 = dyn_cast<Instruction>(T); 1510 else 1511 return false; 1512 } 1513 1514 // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)" 1515 { 1516 if (!DefX2 || DefX2->getOpcode() != Instruction::And) 1517 return false; 1518 1519 BinaryOperator *SubOneOp; 1520 1521 if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0)))) 1522 VarX1 = DefX2->getOperand(1); 1523 else { 1524 VarX1 = DefX2->getOperand(0); 1525 SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1)); 1526 } 1527 if (!SubOneOp || SubOneOp->getOperand(0) != VarX1) 1528 return false; 1529 1530 ConstantInt *Dec = dyn_cast<ConstantInt>(SubOneOp->getOperand(1)); 1531 if (!Dec || 1532 !((SubOneOp->getOpcode() == Instruction::Sub && Dec->isOne()) || 1533 (SubOneOp->getOpcode() == Instruction::Add && 1534 Dec->isMinusOne()))) { 1535 return false; 1536 } 1537 } 1538 1539 // step 3: Check the recurrence of variable X 1540 PhiX = getRecurrenceVar(VarX1, DefX2, LoopEntry); 1541 if (!PhiX) 1542 return false; 1543 1544 // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1 1545 { 1546 CountInst = nullptr; 1547 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(), 1548 IterE = LoopEntry->end(); 1549 Iter != IterE; Iter++) { 1550 Instruction *Inst = &*Iter; 1551 if (Inst->getOpcode() != Instruction::Add) 1552 continue; 1553 1554 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1)); 1555 if (!Inc || !Inc->isOne()) 1556 continue; 1557 1558 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry); 1559 if (!Phi) 1560 continue; 1561 1562 // Check if the result of the instruction is live of the loop. 1563 bool LiveOutLoop = false; 1564 for (User *U : Inst->users()) { 1565 if ((cast<Instruction>(U))->getParent() != LoopEntry) { 1566 LiveOutLoop = true; 1567 break; 1568 } 1569 } 1570 1571 if (LiveOutLoop) { 1572 CountInst = Inst; 1573 CountPhi = Phi; 1574 break; 1575 } 1576 } 1577 1578 if (!CountInst) 1579 return false; 1580 } 1581 1582 // step 5: check if the precondition is in this form: 1583 // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;" 1584 { 1585 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1586 Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader()); 1587 if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1)) 1588 return false; 1589 1590 CntInst = CountInst; 1591 CntPhi = CountPhi; 1592 Var = T; 1593 } 1594 1595 return true; 1596 } 1597 1598 /// Return true if the idiom is detected in the loop. 1599 /// 1600 /// Additionally: 1601 /// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ) 1602 /// or nullptr if there is no such. 1603 /// 2) \p CntPhi is set to the corresponding phi node 1604 /// or nullptr if there is no such. 1605 /// 3) \p Var is set to the value whose CTLZ could be used. 1606 /// 4) \p DefX is set to the instruction calculating Loop exit condition. 1607 /// 1608 /// The core idiom we are trying to detect is: 1609 /// \code 1610 /// if (x0 == 0) 1611 /// goto loop-exit // the precondition of the loop 1612 /// cnt0 = init-val; 1613 /// do { 1614 /// x = phi (x0, x.next); //PhiX 1615 /// cnt = phi(cnt0, cnt.next); 1616 /// 1617 /// cnt.next = cnt + 1; 1618 /// ... 1619 /// x.next = x >> 1; // DefX 1620 /// ... 1621 /// } while(x.next != 0); 1622 /// 1623 /// loop-exit: 1624 /// \endcode 1625 static bool detectShiftUntilZeroIdiom(Loop *CurLoop, const DataLayout &DL, 1626 Intrinsic::ID &IntrinID, Value *&InitX, 1627 Instruction *&CntInst, PHINode *&CntPhi, 1628 Instruction *&DefX) { 1629 BasicBlock *LoopEntry; 1630 Value *VarX = nullptr; 1631 1632 DefX = nullptr; 1633 CntInst = nullptr; 1634 CntPhi = nullptr; 1635 LoopEntry = *(CurLoop->block_begin()); 1636 1637 // step 1: Check if the loop-back branch is in desirable form. 1638 if (Value *T = matchCondition( 1639 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry)) 1640 DefX = dyn_cast<Instruction>(T); 1641 else 1642 return false; 1643 1644 // step 2: detect instructions corresponding to "x.next = x >> 1 or x << 1" 1645 if (!DefX || !DefX->isShift()) 1646 return false; 1647 IntrinID = DefX->getOpcode() == Instruction::Shl ? Intrinsic::cttz : 1648 Intrinsic::ctlz; 1649 ConstantInt *Shft = dyn_cast<ConstantInt>(DefX->getOperand(1)); 1650 if (!Shft || !Shft->isOne()) 1651 return false; 1652 VarX = DefX->getOperand(0); 1653 1654 // step 3: Check the recurrence of variable X 1655 PHINode *PhiX = getRecurrenceVar(VarX, DefX, LoopEntry); 1656 if (!PhiX) 1657 return false; 1658 1659 InitX = PhiX->getIncomingValueForBlock(CurLoop->getLoopPreheader()); 1660 1661 // Make sure the initial value can't be negative otherwise the ashr in the 1662 // loop might never reach zero which would make the loop infinite. 1663 if (DefX->getOpcode() == Instruction::AShr && !isKnownNonNegative(InitX, DL)) 1664 return false; 1665 1666 // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1 1667 // or cnt.next = cnt + -1. 1668 // TODO: We can skip the step. If loop trip count is known (CTLZ), 1669 // then all uses of "cnt.next" could be optimized to the trip count 1670 // plus "cnt0". Currently it is not optimized. 1671 // This step could be used to detect POPCNT instruction: 1672 // cnt.next = cnt + (x.next & 1) 1673 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(), 1674 IterE = LoopEntry->end(); 1675 Iter != IterE; Iter++) { 1676 Instruction *Inst = &*Iter; 1677 if (Inst->getOpcode() != Instruction::Add) 1678 continue; 1679 1680 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1)); 1681 if (!Inc || (!Inc->isOne() && !Inc->isMinusOne())) 1682 continue; 1683 1684 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry); 1685 if (!Phi) 1686 continue; 1687 1688 CntInst = Inst; 1689 CntPhi = Phi; 1690 break; 1691 } 1692 if (!CntInst) 1693 return false; 1694 1695 return true; 1696 } 1697 1698 /// Recognize CTLZ or CTTZ idiom in a non-countable loop and convert the loop 1699 /// to countable (with CTLZ / CTTZ trip count). If CTLZ / CTTZ inserted as a new 1700 /// trip count returns true; otherwise, returns false. 1701 bool LoopIdiomRecognize::recognizeAndInsertFFS() { 1702 // Give up if the loop has multiple blocks or multiple backedges. 1703 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1) 1704 return false; 1705 1706 Intrinsic::ID IntrinID; 1707 Value *InitX; 1708 Instruction *DefX = nullptr; 1709 PHINode *CntPhi = nullptr; 1710 Instruction *CntInst = nullptr; 1711 // Help decide if transformation is profitable. For ShiftUntilZero idiom, 1712 // this is always 6. 1713 size_t IdiomCanonicalSize = 6; 1714 1715 if (!detectShiftUntilZeroIdiom(CurLoop, *DL, IntrinID, InitX, 1716 CntInst, CntPhi, DefX)) 1717 return false; 1718 1719 bool IsCntPhiUsedOutsideLoop = false; 1720 for (User *U : CntPhi->users()) 1721 if (!CurLoop->contains(cast<Instruction>(U))) { 1722 IsCntPhiUsedOutsideLoop = true; 1723 break; 1724 } 1725 bool IsCntInstUsedOutsideLoop = false; 1726 for (User *U : CntInst->users()) 1727 if (!CurLoop->contains(cast<Instruction>(U))) { 1728 IsCntInstUsedOutsideLoop = true; 1729 break; 1730 } 1731 // If both CntInst and CntPhi are used outside the loop the profitability 1732 // is questionable. 1733 if (IsCntInstUsedOutsideLoop && IsCntPhiUsedOutsideLoop) 1734 return false; 1735 1736 // For some CPUs result of CTLZ(X) intrinsic is undefined 1737 // when X is 0. If we can not guarantee X != 0, we need to check this 1738 // when expand. 1739 bool ZeroCheck = false; 1740 // It is safe to assume Preheader exist as it was checked in 1741 // parent function RunOnLoop. 1742 BasicBlock *PH = CurLoop->getLoopPreheader(); 1743 1744 // If we are using the count instruction outside the loop, make sure we 1745 // have a zero check as a precondition. Without the check the loop would run 1746 // one iteration for before any check of the input value. This means 0 and 1 1747 // would have identical behavior in the original loop and thus 1748 if (!IsCntPhiUsedOutsideLoop) { 1749 auto *PreCondBB = PH->getSinglePredecessor(); 1750 if (!PreCondBB) 1751 return false; 1752 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1753 if (!PreCondBI) 1754 return false; 1755 if (matchCondition(PreCondBI, PH) != InitX) 1756 return false; 1757 ZeroCheck = true; 1758 } 1759 1760 // Check if CTLZ / CTTZ intrinsic is profitable. Assume it is always 1761 // profitable if we delete the loop. 1762 1763 // the loop has only 6 instructions: 1764 // %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ] 1765 // %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ] 1766 // %shr = ashr %n.addr.0, 1 1767 // %tobool = icmp eq %shr, 0 1768 // %inc = add nsw %i.0, 1 1769 // br i1 %tobool 1770 1771 const Value *Args[] = {InitX, 1772 ConstantInt::getBool(InitX->getContext(), ZeroCheck)}; 1773 1774 // @llvm.dbg doesn't count as they have no semantic effect. 1775 auto InstWithoutDebugIt = CurLoop->getHeader()->instructionsWithoutDebug(); 1776 uint32_t HeaderSize = 1777 std::distance(InstWithoutDebugIt.begin(), InstWithoutDebugIt.end()); 1778 1779 IntrinsicCostAttributes Attrs(IntrinID, InitX->getType(), Args); 1780 InstructionCost Cost = 1781 TTI->getIntrinsicInstrCost(Attrs, TargetTransformInfo::TCK_SizeAndLatency); 1782 if (HeaderSize != IdiomCanonicalSize && 1783 Cost > TargetTransformInfo::TCC_Basic) 1784 return false; 1785 1786 transformLoopToCountable(IntrinID, PH, CntInst, CntPhi, InitX, DefX, 1787 DefX->getDebugLoc(), ZeroCheck, 1788 IsCntPhiUsedOutsideLoop); 1789 return true; 1790 } 1791 1792 /// Recognizes a population count idiom in a non-countable loop. 1793 /// 1794 /// If detected, transforms the relevant code to issue the popcount intrinsic 1795 /// function call, and returns true; otherwise, returns false. 1796 bool LoopIdiomRecognize::recognizePopcount() { 1797 if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware) 1798 return false; 1799 1800 // Counting population are usually conducted by few arithmetic instructions. 1801 // Such instructions can be easily "absorbed" by vacant slots in a 1802 // non-compact loop. Therefore, recognizing popcount idiom only makes sense 1803 // in a compact loop. 1804 1805 // Give up if the loop has multiple blocks or multiple backedges. 1806 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1) 1807 return false; 1808 1809 BasicBlock *LoopBody = *(CurLoop->block_begin()); 1810 if (LoopBody->size() >= 20) { 1811 // The loop is too big, bail out. 1812 return false; 1813 } 1814 1815 // It should have a preheader containing nothing but an unconditional branch. 1816 BasicBlock *PH = CurLoop->getLoopPreheader(); 1817 if (!PH || &PH->front() != PH->getTerminator()) 1818 return false; 1819 auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator()); 1820 if (!EntryBI || EntryBI->isConditional()) 1821 return false; 1822 1823 // It should have a precondition block where the generated popcount intrinsic 1824 // function can be inserted. 1825 auto *PreCondBB = PH->getSinglePredecessor(); 1826 if (!PreCondBB) 1827 return false; 1828 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator()); 1829 if (!PreCondBI || PreCondBI->isUnconditional()) 1830 return false; 1831 1832 Instruction *CntInst; 1833 PHINode *CntPhi; 1834 Value *Val; 1835 if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val)) 1836 return false; 1837 1838 transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val); 1839 return true; 1840 } 1841 1842 static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val, 1843 const DebugLoc &DL) { 1844 Value *Ops[] = {Val}; 1845 Type *Tys[] = {Val->getType()}; 1846 1847 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent(); 1848 Function *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys); 1849 CallInst *CI = IRBuilder.CreateCall(Func, Ops); 1850 CI->setDebugLoc(DL); 1851 1852 return CI; 1853 } 1854 1855 static CallInst *createFFSIntrinsic(IRBuilder<> &IRBuilder, Value *Val, 1856 const DebugLoc &DL, bool ZeroCheck, 1857 Intrinsic::ID IID) { 1858 Value *Ops[] = {Val, IRBuilder.getInt1(ZeroCheck)}; 1859 Type *Tys[] = {Val->getType()}; 1860 1861 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent(); 1862 Function *Func = Intrinsic::getDeclaration(M, IID, Tys); 1863 CallInst *CI = IRBuilder.CreateCall(Func, Ops); 1864 CI->setDebugLoc(DL); 1865 1866 return CI; 1867 } 1868 1869 /// Transform the following loop (Using CTLZ, CTTZ is similar): 1870 /// loop: 1871 /// CntPhi = PHI [Cnt0, CntInst] 1872 /// PhiX = PHI [InitX, DefX] 1873 /// CntInst = CntPhi + 1 1874 /// DefX = PhiX >> 1 1875 /// LOOP_BODY 1876 /// Br: loop if (DefX != 0) 1877 /// Use(CntPhi) or Use(CntInst) 1878 /// 1879 /// Into: 1880 /// If CntPhi used outside the loop: 1881 /// CountPrev = BitWidth(InitX) - CTLZ(InitX >> 1) 1882 /// Count = CountPrev + 1 1883 /// else 1884 /// Count = BitWidth(InitX) - CTLZ(InitX) 1885 /// loop: 1886 /// CntPhi = PHI [Cnt0, CntInst] 1887 /// PhiX = PHI [InitX, DefX] 1888 /// PhiCount = PHI [Count, Dec] 1889 /// CntInst = CntPhi + 1 1890 /// DefX = PhiX >> 1 1891 /// Dec = PhiCount - 1 1892 /// LOOP_BODY 1893 /// Br: loop if (Dec != 0) 1894 /// Use(CountPrev + Cnt0) // Use(CntPhi) 1895 /// or 1896 /// Use(Count + Cnt0) // Use(CntInst) 1897 /// 1898 /// If LOOP_BODY is empty the loop will be deleted. 1899 /// If CntInst and DefX are not used in LOOP_BODY they will be removed. 1900 void LoopIdiomRecognize::transformLoopToCountable( 1901 Intrinsic::ID IntrinID, BasicBlock *Preheader, Instruction *CntInst, 1902 PHINode *CntPhi, Value *InitX, Instruction *DefX, const DebugLoc &DL, 1903 bool ZeroCheck, bool IsCntPhiUsedOutsideLoop) { 1904 BranchInst *PreheaderBr = cast<BranchInst>(Preheader->getTerminator()); 1905 1906 // Step 1: Insert the CTLZ/CTTZ instruction at the end of the preheader block 1907 IRBuilder<> Builder(PreheaderBr); 1908 Builder.SetCurrentDebugLocation(DL); 1909 1910 // If there are no uses of CntPhi crate: 1911 // Count = BitWidth - CTLZ(InitX); 1912 // NewCount = Count; 1913 // If there are uses of CntPhi create: 1914 // NewCount = BitWidth - CTLZ(InitX >> 1); 1915 // Count = NewCount + 1; 1916 Value *InitXNext; 1917 if (IsCntPhiUsedOutsideLoop) { 1918 if (DefX->getOpcode() == Instruction::AShr) 1919 InitXNext = Builder.CreateAShr(InitX, 1); 1920 else if (DefX->getOpcode() == Instruction::LShr) 1921 InitXNext = Builder.CreateLShr(InitX, 1); 1922 else if (DefX->getOpcode() == Instruction::Shl) // cttz 1923 InitXNext = Builder.CreateShl(InitX, 1); 1924 else 1925 llvm_unreachable("Unexpected opcode!"); 1926 } else 1927 InitXNext = InitX; 1928 Value *Count = 1929 createFFSIntrinsic(Builder, InitXNext, DL, ZeroCheck, IntrinID); 1930 Type *CountTy = Count->getType(); 1931 Count = Builder.CreateSub( 1932 ConstantInt::get(CountTy, CountTy->getIntegerBitWidth()), Count); 1933 Value *NewCount = Count; 1934 if (IsCntPhiUsedOutsideLoop) 1935 Count = Builder.CreateAdd(Count, ConstantInt::get(CountTy, 1)); 1936 1937 NewCount = Builder.CreateZExtOrTrunc(NewCount, CntInst->getType()); 1938 1939 Value *CntInitVal = CntPhi->getIncomingValueForBlock(Preheader); 1940 if (cast<ConstantInt>(CntInst->getOperand(1))->isOne()) { 1941 // If the counter was being incremented in the loop, add NewCount to the 1942 // counter's initial value, but only if the initial value is not zero. 1943 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal); 1944 if (!InitConst || !InitConst->isZero()) 1945 NewCount = Builder.CreateAdd(NewCount, CntInitVal); 1946 } else { 1947 // If the count was being decremented in the loop, subtract NewCount from 1948 // the counter's initial value. 1949 NewCount = Builder.CreateSub(CntInitVal, NewCount); 1950 } 1951 1952 // Step 2: Insert new IV and loop condition: 1953 // loop: 1954 // ... 1955 // PhiCount = PHI [Count, Dec] 1956 // ... 1957 // Dec = PhiCount - 1 1958 // ... 1959 // Br: loop if (Dec != 0) 1960 BasicBlock *Body = *(CurLoop->block_begin()); 1961 auto *LbBr = cast<BranchInst>(Body->getTerminator()); 1962 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition()); 1963 1964 PHINode *TcPhi = PHINode::Create(CountTy, 2, "tcphi", &Body->front()); 1965 1966 Builder.SetInsertPoint(LbCond); 1967 Instruction *TcDec = cast<Instruction>(Builder.CreateSub( 1968 TcPhi, ConstantInt::get(CountTy, 1), "tcdec", false, true)); 1969 1970 TcPhi->addIncoming(Count, Preheader); 1971 TcPhi->addIncoming(TcDec, Body); 1972 1973 CmpInst::Predicate Pred = 1974 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ; 1975 LbCond->setPredicate(Pred); 1976 LbCond->setOperand(0, TcDec); 1977 LbCond->setOperand(1, ConstantInt::get(CountTy, 0)); 1978 1979 // Step 3: All the references to the original counter outside 1980 // the loop are replaced with the NewCount 1981 if (IsCntPhiUsedOutsideLoop) 1982 CntPhi->replaceUsesOutsideBlock(NewCount, Body); 1983 else 1984 CntInst->replaceUsesOutsideBlock(NewCount, Body); 1985 1986 // step 4: Forget the "non-computable" trip-count SCEV associated with the 1987 // loop. The loop would otherwise not be deleted even if it becomes empty. 1988 SE->forgetLoop(CurLoop); 1989 } 1990 1991 void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB, 1992 Instruction *CntInst, 1993 PHINode *CntPhi, Value *Var) { 1994 BasicBlock *PreHead = CurLoop->getLoopPreheader(); 1995 auto *PreCondBr = cast<BranchInst>(PreCondBB->getTerminator()); 1996 const DebugLoc &DL = CntInst->getDebugLoc(); 1997 1998 // Assuming before transformation, the loop is following: 1999 // if (x) // the precondition 2000 // do { cnt++; x &= x - 1; } while(x); 2001 2002 // Step 1: Insert the ctpop instruction at the end of the precondition block 2003 IRBuilder<> Builder(PreCondBr); 2004 Value *PopCnt, *PopCntZext, *NewCount, *TripCnt; 2005 { 2006 PopCnt = createPopcntIntrinsic(Builder, Var, DL); 2007 NewCount = PopCntZext = 2008 Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType())); 2009 2010 if (NewCount != PopCnt) 2011 (cast<Instruction>(NewCount))->setDebugLoc(DL); 2012 2013 // TripCnt is exactly the number of iterations the loop has 2014 TripCnt = NewCount; 2015 2016 // If the population counter's initial value is not zero, insert Add Inst. 2017 Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead); 2018 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal); 2019 if (!InitConst || !InitConst->isZero()) { 2020 NewCount = Builder.CreateAdd(NewCount, CntInitVal); 2021 (cast<Instruction>(NewCount))->setDebugLoc(DL); 2022 } 2023 } 2024 2025 // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to 2026 // "if (NewCount == 0) loop-exit". Without this change, the intrinsic 2027 // function would be partial dead code, and downstream passes will drag 2028 // it back from the precondition block to the preheader. 2029 { 2030 ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition()); 2031 2032 Value *Opnd0 = PopCntZext; 2033 Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0); 2034 if (PreCond->getOperand(0) != Var) 2035 std::swap(Opnd0, Opnd1); 2036 2037 ICmpInst *NewPreCond = cast<ICmpInst>( 2038 Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1)); 2039 PreCondBr->setCondition(NewPreCond); 2040 2041 RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI); 2042 } 2043 2044 // Step 3: Note that the population count is exactly the trip count of the 2045 // loop in question, which enable us to convert the loop from noncountable 2046 // loop into a countable one. The benefit is twofold: 2047 // 2048 // - If the loop only counts population, the entire loop becomes dead after 2049 // the transformation. It is a lot easier to prove a countable loop dead 2050 // than to prove a noncountable one. (In some C dialects, an infinite loop 2051 // isn't dead even if it computes nothing useful. In general, DCE needs 2052 // to prove a noncountable loop finite before safely delete it.) 2053 // 2054 // - If the loop also performs something else, it remains alive. 2055 // Since it is transformed to countable form, it can be aggressively 2056 // optimized by some optimizations which are in general not applicable 2057 // to a noncountable loop. 2058 // 2059 // After this step, this loop (conceptually) would look like following: 2060 // newcnt = __builtin_ctpop(x); 2061 // t = newcnt; 2062 // if (x) 2063 // do { cnt++; x &= x-1; t--) } while (t > 0); 2064 BasicBlock *Body = *(CurLoop->block_begin()); 2065 { 2066 auto *LbBr = cast<BranchInst>(Body->getTerminator()); 2067 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition()); 2068 Type *Ty = TripCnt->getType(); 2069 2070 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front()); 2071 2072 Builder.SetInsertPoint(LbCond); 2073 Instruction *TcDec = cast<Instruction>( 2074 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1), 2075 "tcdec", false, true)); 2076 2077 TcPhi->addIncoming(TripCnt, PreHead); 2078 TcPhi->addIncoming(TcDec, Body); 2079 2080 CmpInst::Predicate Pred = 2081 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE; 2082 LbCond->setPredicate(Pred); 2083 LbCond->setOperand(0, TcDec); 2084 LbCond->setOperand(1, ConstantInt::get(Ty, 0)); 2085 } 2086 2087 // Step 4: All the references to the original population counter outside 2088 // the loop are replaced with the NewCount -- the value returned from 2089 // __builtin_ctpop(). 2090 CntInst->replaceUsesOutsideBlock(NewCount, Body); 2091 2092 // step 5: Forget the "non-computable" trip-count SCEV associated with the 2093 // loop. The loop would otherwise not be deleted even if it becomes empty. 2094 SE->forgetLoop(CurLoop); 2095 } 2096 2097 /// Match loop-invariant value. 2098 template <typename SubPattern_t> struct match_LoopInvariant { 2099 SubPattern_t SubPattern; 2100 const Loop *L; 2101 2102 match_LoopInvariant(const SubPattern_t &SP, const Loop *L) 2103 : SubPattern(SP), L(L) {} 2104 2105 template <typename ITy> bool match(ITy *V) { 2106 return L->isLoopInvariant(V) && SubPattern.match(V); 2107 } 2108 }; 2109 2110 /// Matches if the value is loop-invariant. 2111 template <typename Ty> 2112 inline match_LoopInvariant<Ty> m_LoopInvariant(const Ty &M, const Loop *L) { 2113 return match_LoopInvariant<Ty>(M, L); 2114 } 2115 2116 /// Return true if the idiom is detected in the loop. 2117 /// 2118 /// The core idiom we are trying to detect is: 2119 /// \code 2120 /// entry: 2121 /// <...> 2122 /// %bitmask = shl i32 1, %bitpos 2123 /// br label %loop 2124 /// 2125 /// loop: 2126 /// %x.curr = phi i32 [ %x, %entry ], [ %x.next, %loop ] 2127 /// %x.curr.bitmasked = and i32 %x.curr, %bitmask 2128 /// %x.curr.isbitunset = icmp eq i32 %x.curr.bitmasked, 0 2129 /// %x.next = shl i32 %x.curr, 1 2130 /// <...> 2131 /// br i1 %x.curr.isbitunset, label %loop, label %end 2132 /// 2133 /// end: 2134 /// %x.curr.res = phi i32 [ %x.curr, %loop ] <...> 2135 /// %x.next.res = phi i32 [ %x.next, %loop ] <...> 2136 /// <...> 2137 /// \endcode 2138 static bool detectShiftUntilBitTestIdiom(Loop *CurLoop, Value *&BaseX, 2139 Value *&BitMask, Value *&BitPos, 2140 Value *&CurrX, Instruction *&NextX) { 2141 LLVM_DEBUG(dbgs() << DEBUG_TYPE 2142 " Performing shift-until-bittest idiom detection.\n"); 2143 2144 // Give up if the loop has multiple blocks or multiple backedges. 2145 if (CurLoop->getNumBlocks() != 1 || CurLoop->getNumBackEdges() != 1) { 2146 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad block/backedge count.\n"); 2147 return false; 2148 } 2149 2150 BasicBlock *LoopHeaderBB = CurLoop->getHeader(); 2151 BasicBlock *LoopPreheaderBB = CurLoop->getLoopPreheader(); 2152 assert(LoopPreheaderBB && "There is always a loop preheader."); 2153 2154 using namespace PatternMatch; 2155 2156 // Step 1: Check if the loop backedge is in desirable form. 2157 2158 ICmpInst::Predicate Pred; 2159 Value *CmpLHS, *CmpRHS; 2160 BasicBlock *TrueBB, *FalseBB; 2161 if (!match(LoopHeaderBB->getTerminator(), 2162 m_Br(m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS)), 2163 m_BasicBlock(TrueBB), m_BasicBlock(FalseBB)))) { 2164 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge structure.\n"); 2165 return false; 2166 } 2167 2168 // Step 2: Check if the backedge's condition is in desirable form. 2169 2170 auto MatchVariableBitMask = [&]() { 2171 return ICmpInst::isEquality(Pred) && match(CmpRHS, m_Zero()) && 2172 match(CmpLHS, 2173 m_c_And(m_Value(CurrX), 2174 m_CombineAnd( 2175 m_Value(BitMask), 2176 m_LoopInvariant(m_Shl(m_One(), m_Value(BitPos)), 2177 CurLoop)))); 2178 }; 2179 auto MatchConstantBitMask = [&]() { 2180 return ICmpInst::isEquality(Pred) && match(CmpRHS, m_Zero()) && 2181 match(CmpLHS, m_And(m_Value(CurrX), 2182 m_CombineAnd(m_Value(BitMask), m_Power2()))) && 2183 (BitPos = ConstantExpr::getExactLogBase2(cast<Constant>(BitMask))); 2184 }; 2185 auto MatchDecomposableConstantBitMask = [&]() { 2186 APInt Mask; 2187 return llvm::decomposeBitTestICmp(CmpLHS, CmpRHS, Pred, CurrX, Mask) && 2188 ICmpInst::isEquality(Pred) && Mask.isPowerOf2() && 2189 (BitMask = ConstantInt::get(CurrX->getType(), Mask)) && 2190 (BitPos = ConstantInt::get(CurrX->getType(), Mask.logBase2())); 2191 }; 2192 2193 if (!MatchVariableBitMask() && !MatchConstantBitMask() && 2194 !MatchDecomposableConstantBitMask()) { 2195 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge comparison.\n"); 2196 return false; 2197 } 2198 2199 // Step 3: Check if the recurrence is in desirable form. 2200 auto *CurrXPN = dyn_cast<PHINode>(CurrX); 2201 if (!CurrXPN || CurrXPN->getParent() != LoopHeaderBB) { 2202 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Not an expected PHI node.\n"); 2203 return false; 2204 } 2205 2206 BaseX = CurrXPN->getIncomingValueForBlock(LoopPreheaderBB); 2207 NextX = 2208 dyn_cast<Instruction>(CurrXPN->getIncomingValueForBlock(LoopHeaderBB)); 2209 2210 assert(CurLoop->isLoopInvariant(BaseX) && 2211 "Expected BaseX to be avaliable in the preheader!"); 2212 2213 if (!NextX || !match(NextX, m_Shl(m_Specific(CurrX), m_One()))) { 2214 // FIXME: support right-shift? 2215 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad recurrence.\n"); 2216 return false; 2217 } 2218 2219 // Step 4: Check if the backedge's destinations are in desirable form. 2220 2221 assert(ICmpInst::isEquality(Pred) && 2222 "Should only get equality predicates here."); 2223 2224 // cmp-br is commutative, so canonicalize to a single variant. 2225 if (Pred != ICmpInst::Predicate::ICMP_EQ) { 2226 Pred = ICmpInst::getInversePredicate(Pred); 2227 std::swap(TrueBB, FalseBB); 2228 } 2229 2230 // We expect to exit loop when comparison yields false, 2231 // so when it yields true we should branch back to loop header. 2232 if (TrueBB != LoopHeaderBB) { 2233 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge flow.\n"); 2234 return false; 2235 } 2236 2237 // Okay, idiom checks out. 2238 return true; 2239 } 2240 2241 /// Look for the following loop: 2242 /// \code 2243 /// entry: 2244 /// <...> 2245 /// %bitmask = shl i32 1, %bitpos 2246 /// br label %loop 2247 /// 2248 /// loop: 2249 /// %x.curr = phi i32 [ %x, %entry ], [ %x.next, %loop ] 2250 /// %x.curr.bitmasked = and i32 %x.curr, %bitmask 2251 /// %x.curr.isbitunset = icmp eq i32 %x.curr.bitmasked, 0 2252 /// %x.next = shl i32 %x.curr, 1 2253 /// <...> 2254 /// br i1 %x.curr.isbitunset, label %loop, label %end 2255 /// 2256 /// end: 2257 /// %x.curr.res = phi i32 [ %x.curr, %loop ] <...> 2258 /// %x.next.res = phi i32 [ %x.next, %loop ] <...> 2259 /// <...> 2260 /// \endcode 2261 /// 2262 /// And transform it into: 2263 /// \code 2264 /// entry: 2265 /// %bitmask = shl i32 1, %bitpos 2266 /// %lowbitmask = add i32 %bitmask, -1 2267 /// %mask = or i32 %lowbitmask, %bitmask 2268 /// %x.masked = and i32 %x, %mask 2269 /// %x.masked.numleadingzeros = call i32 @llvm.ctlz.i32(i32 %x.masked, 2270 /// i1 true) 2271 /// %x.masked.numactivebits = sub i32 32, %x.masked.numleadingzeros 2272 /// %x.masked.leadingonepos = add i32 %x.masked.numactivebits, -1 2273 /// %backedgetakencount = sub i32 %bitpos, %x.masked.leadingonepos 2274 /// %tripcount = add i32 %backedgetakencount, 1 2275 /// %x.curr = shl i32 %x, %backedgetakencount 2276 /// %x.next = shl i32 %x, %tripcount 2277 /// br label %loop 2278 /// 2279 /// loop: 2280 /// %loop.iv = phi i32 [ 0, %entry ], [ %loop.iv.next, %loop ] 2281 /// %loop.iv.next = add nuw i32 %loop.iv, 1 2282 /// %loop.ivcheck = icmp eq i32 %loop.iv.next, %tripcount 2283 /// <...> 2284 /// br i1 %loop.ivcheck, label %end, label %loop 2285 /// 2286 /// end: 2287 /// %x.curr.res = phi i32 [ %x.curr, %loop ] <...> 2288 /// %x.next.res = phi i32 [ %x.next, %loop ] <...> 2289 /// <...> 2290 /// \endcode 2291 bool LoopIdiomRecognize::recognizeShiftUntilBitTest() { 2292 bool MadeChange = false; 2293 2294 Value *X, *BitMask, *BitPos, *XCurr; 2295 Instruction *XNext; 2296 if (!detectShiftUntilBitTestIdiom(CurLoop, X, BitMask, BitPos, XCurr, 2297 XNext)) { 2298 LLVM_DEBUG(dbgs() << DEBUG_TYPE 2299 " shift-until-bittest idiom detection failed.\n"); 2300 return MadeChange; 2301 } 2302 LLVM_DEBUG(dbgs() << DEBUG_TYPE " shift-until-bittest idiom detected!\n"); 2303 2304 // Ok, it is the idiom we were looking for, we *could* transform this loop, 2305 // but is it profitable to transform? 2306 2307 BasicBlock *LoopHeaderBB = CurLoop->getHeader(); 2308 BasicBlock *LoopPreheaderBB = CurLoop->getLoopPreheader(); 2309 assert(LoopPreheaderBB && "There is always a loop preheader."); 2310 2311 BasicBlock *SuccessorBB = CurLoop->getExitBlock(); 2312 assert(SuccessorBB && "There is only a single successor."); 2313 2314 IRBuilder<> Builder(LoopPreheaderBB->getTerminator()); 2315 Builder.SetCurrentDebugLocation(cast<Instruction>(XCurr)->getDebugLoc()); 2316 2317 Intrinsic::ID IntrID = Intrinsic::ctlz; 2318 Type *Ty = X->getType(); 2319 unsigned Bitwidth = Ty->getScalarSizeInBits(); 2320 2321 TargetTransformInfo::TargetCostKind CostKind = 2322 TargetTransformInfo::TCK_SizeAndLatency; 2323 2324 // The rewrite is considered to be unprofitable iff and only iff the 2325 // intrinsic/shift we'll use are not cheap. Note that we are okay with *just* 2326 // making the loop countable, even if nothing else changes. 2327 IntrinsicCostAttributes Attrs( 2328 IntrID, Ty, {UndefValue::get(Ty), /*is_zero_undef=*/Builder.getTrue()}); 2329 InstructionCost Cost = TTI->getIntrinsicInstrCost(Attrs, CostKind); 2330 if (Cost > TargetTransformInfo::TCC_Basic) { 2331 LLVM_DEBUG(dbgs() << DEBUG_TYPE 2332 " Intrinsic is too costly, not beneficial\n"); 2333 return MadeChange; 2334 } 2335 if (TTI->getArithmeticInstrCost(Instruction::Shl, Ty, CostKind) > 2336 TargetTransformInfo::TCC_Basic) { 2337 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Shift is too costly, not beneficial\n"); 2338 return MadeChange; 2339 } 2340 2341 // Ok, transform appears worthwhile. 2342 MadeChange = true; 2343 2344 // Step 1: Compute the loop trip count. 2345 2346 Value *LowBitMask = Builder.CreateAdd(BitMask, Constant::getAllOnesValue(Ty), 2347 BitPos->getName() + ".lowbitmask"); 2348 Value *Mask = 2349 Builder.CreateOr(LowBitMask, BitMask, BitPos->getName() + ".mask"); 2350 Value *XMasked = Builder.CreateAnd(X, Mask, X->getName() + ".masked"); 2351 CallInst *XMaskedNumLeadingZeros = Builder.CreateIntrinsic( 2352 IntrID, Ty, {XMasked, /*is_zero_undef=*/Builder.getTrue()}, 2353 /*FMFSource=*/nullptr, XMasked->getName() + ".numleadingzeros"); 2354 Value *XMaskedNumActiveBits = Builder.CreateSub( 2355 ConstantInt::get(Ty, Ty->getScalarSizeInBits()), XMaskedNumLeadingZeros, 2356 XMasked->getName() + ".numactivebits", /*HasNUW=*/true, 2357 /*HasNSW=*/Bitwidth != 2); 2358 Value *XMaskedLeadingOnePos = 2359 Builder.CreateAdd(XMaskedNumActiveBits, Constant::getAllOnesValue(Ty), 2360 XMasked->getName() + ".leadingonepos", /*HasNUW=*/false, 2361 /*HasNSW=*/Bitwidth > 2); 2362 2363 Value *LoopBackedgeTakenCount = Builder.CreateSub( 2364 BitPos, XMaskedLeadingOnePos, CurLoop->getName() + ".backedgetakencount", 2365 /*HasNUW=*/true, /*HasNSW=*/true); 2366 // We know loop's backedge-taken count, but what's loop's trip count? 2367 // Note that while NUW is always safe, while NSW is only for bitwidths != 2. 2368 Value *LoopTripCount = 2369 Builder.CreateAdd(LoopBackedgeTakenCount, ConstantInt::get(Ty, 1), 2370 CurLoop->getName() + ".tripcount", /*HasNUW=*/true, 2371 /*HasNSW=*/Bitwidth != 2); 2372 2373 // Step 2: Compute the recurrence's final value without a loop. 2374 2375 // NewX is always safe to compute, because `LoopBackedgeTakenCount` 2376 // will always be smaller than `bitwidth(X)`, i.e. we never get poison. 2377 Value *NewX = Builder.CreateShl(X, LoopBackedgeTakenCount); 2378 NewX->takeName(XCurr); 2379 if (auto *I = dyn_cast<Instruction>(NewX)) 2380 I->copyIRFlags(XNext, /*IncludeWrapFlags=*/true); 2381 2382 Value *NewXNext; 2383 // Rewriting XNext is more complicated, however, because `X << LoopTripCount` 2384 // will be poison iff `LoopTripCount == bitwidth(X)` (which will happen 2385 // iff `BitPos` is `bitwidth(x) - 1` and `X` is `1`). So unless we know 2386 // that isn't the case, we'll need to emit an alternative, safe IR. 2387 if (XNext->hasNoSignedWrap() || XNext->hasNoUnsignedWrap() || 2388 PatternMatch::match( 2389 BitPos, PatternMatch::m_SpecificInt_ICMP( 2390 ICmpInst::ICMP_NE, APInt(Ty->getScalarSizeInBits(), 2391 Ty->getScalarSizeInBits() - 1)))) 2392 NewXNext = Builder.CreateShl(X, LoopTripCount); 2393 else { 2394 // Otherwise, just additionally shift by one. It's the smallest solution, 2395 // alternatively, we could check that NewX is INT_MIN (or BitPos is ) 2396 // and select 0 instead. 2397 NewXNext = Builder.CreateShl(NewX, ConstantInt::get(Ty, 1)); 2398 } 2399 2400 NewXNext->takeName(XNext); 2401 if (auto *I = dyn_cast<Instruction>(NewXNext)) 2402 I->copyIRFlags(XNext, /*IncludeWrapFlags=*/true); 2403 2404 // Step 3: Adjust the successor basic block to recieve the computed 2405 // recurrence's final value instead of the recurrence itself. 2406 2407 XCurr->replaceUsesOutsideBlock(NewX, LoopHeaderBB); 2408 XNext->replaceUsesOutsideBlock(NewXNext, LoopHeaderBB); 2409 2410 // Step 4: Rewrite the loop into a countable form, with canonical IV. 2411 2412 // The new canonical induction variable. 2413 Builder.SetInsertPoint(&LoopHeaderBB->front()); 2414 auto *IV = Builder.CreatePHI(Ty, 2, CurLoop->getName() + ".iv"); 2415 2416 // The induction itself. 2417 // Note that while NUW is always safe, while NSW is only for bitwidths != 2. 2418 Builder.SetInsertPoint(LoopHeaderBB->getTerminator()); 2419 auto *IVNext = 2420 Builder.CreateAdd(IV, ConstantInt::get(Ty, 1), IV->getName() + ".next", 2421 /*HasNUW=*/true, /*HasNSW=*/Bitwidth != 2); 2422 2423 // The loop trip count check. 2424 auto *IVCheck = Builder.CreateICmpEQ(IVNext, LoopTripCount, 2425 CurLoop->getName() + ".ivcheck"); 2426 Builder.CreateCondBr(IVCheck, SuccessorBB, LoopHeaderBB); 2427 LoopHeaderBB->getTerminator()->eraseFromParent(); 2428 2429 // Populate the IV PHI. 2430 IV->addIncoming(ConstantInt::get(Ty, 0), LoopPreheaderBB); 2431 IV->addIncoming(IVNext, LoopHeaderBB); 2432 2433 // Step 5: Forget the "non-computable" trip-count SCEV associated with the 2434 // loop. The loop would otherwise not be deleted even if it becomes empty. 2435 2436 SE->forgetLoop(CurLoop); 2437 2438 // Other passes will take care of actually deleting the loop if possible. 2439 2440 LLVM_DEBUG(dbgs() << DEBUG_TYPE " shift-until-bittest idiom optimized!\n"); 2441 2442 ++NumShiftUntilBitTest; 2443 return MadeChange; 2444 } 2445 2446 /// Return true if the idiom is detected in the loop. 2447 /// 2448 /// The core idiom we are trying to detect is: 2449 /// \code 2450 /// entry: 2451 /// <...> 2452 /// %start = <...> 2453 /// %extraoffset = <...> 2454 /// <...> 2455 /// br label %for.cond 2456 /// 2457 /// loop: 2458 /// %iv = phi i8 [ %start, %entry ], [ %iv.next, %for.cond ] 2459 /// %nbits = add nsw i8 %iv, %extraoffset 2460 /// %val.shifted = {{l,a}shr,shl} i8 %val, %nbits 2461 /// %val.shifted.iszero = icmp eq i8 %val.shifted, 0 2462 /// %iv.next = add i8 %iv, 1 2463 /// <...> 2464 /// br i1 %val.shifted.iszero, label %end, label %loop 2465 /// 2466 /// end: 2467 /// %iv.res = phi i8 [ %iv, %loop ] <...> 2468 /// %nbits.res = phi i8 [ %nbits, %loop ] <...> 2469 /// %val.shifted.res = phi i8 [ %val.shifted, %loop ] <...> 2470 /// %val.shifted.iszero.res = phi i1 [ %val.shifted.iszero, %loop ] <...> 2471 /// %iv.next.res = phi i8 [ %iv.next, %loop ] <...> 2472 /// <...> 2473 /// \endcode 2474 static bool detectShiftUntilZeroIdiom(Loop *CurLoop, ScalarEvolution *SE, 2475 Instruction *&ValShiftedIsZero, 2476 Intrinsic::ID &IntrinID, Instruction *&IV, 2477 Value *&Start, Value *&Val, 2478 const SCEV *&ExtraOffsetExpr, 2479 bool &InvertedCond) { 2480 LLVM_DEBUG(dbgs() << DEBUG_TYPE 2481 " Performing shift-until-zero idiom detection.\n"); 2482 2483 // Give up if the loop has multiple blocks or multiple backedges. 2484 if (CurLoop->getNumBlocks() != 1 || CurLoop->getNumBackEdges() != 1) { 2485 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad block/backedge count.\n"); 2486 return false; 2487 } 2488 2489 Instruction *ValShifted, *NBits, *IVNext; 2490 Value *ExtraOffset; 2491 2492 BasicBlock *LoopHeaderBB = CurLoop->getHeader(); 2493 BasicBlock *LoopPreheaderBB = CurLoop->getLoopPreheader(); 2494 assert(LoopPreheaderBB && "There is always a loop preheader."); 2495 2496 using namespace PatternMatch; 2497 2498 // Step 1: Check if the loop backedge, condition is in desirable form. 2499 2500 ICmpInst::Predicate Pred; 2501 BasicBlock *TrueBB, *FalseBB; 2502 if (!match(LoopHeaderBB->getTerminator(), 2503 m_Br(m_Instruction(ValShiftedIsZero), m_BasicBlock(TrueBB), 2504 m_BasicBlock(FalseBB))) || 2505 !match(ValShiftedIsZero, 2506 m_ICmp(Pred, m_Instruction(ValShifted), m_Zero())) || 2507 !ICmpInst::isEquality(Pred)) { 2508 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge structure.\n"); 2509 return false; 2510 } 2511 2512 // Step 2: Check if the comparison's operand is in desirable form. 2513 // FIXME: Val could be a one-input PHI node, which we should look past. 2514 if (!match(ValShifted, m_Shift(m_LoopInvariant(m_Value(Val), CurLoop), 2515 m_Instruction(NBits)))) { 2516 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad comparisons value computation.\n"); 2517 return false; 2518 } 2519 IntrinID = ValShifted->getOpcode() == Instruction::Shl ? Intrinsic::cttz 2520 : Intrinsic::ctlz; 2521 2522 // Step 3: Check if the shift amount is in desirable form. 2523 2524 if (match(NBits, m_c_Add(m_Instruction(IV), 2525 m_LoopInvariant(m_Value(ExtraOffset), CurLoop))) && 2526 (NBits->hasNoSignedWrap() || NBits->hasNoUnsignedWrap())) 2527 ExtraOffsetExpr = SE->getNegativeSCEV(SE->getSCEV(ExtraOffset)); 2528 else if (match(NBits, 2529 m_Sub(m_Instruction(IV), 2530 m_LoopInvariant(m_Value(ExtraOffset), CurLoop))) && 2531 NBits->hasNoSignedWrap()) 2532 ExtraOffsetExpr = SE->getSCEV(ExtraOffset); 2533 else { 2534 IV = NBits; 2535 ExtraOffsetExpr = SE->getZero(NBits->getType()); 2536 } 2537 2538 // Step 4: Check if the recurrence is in desirable form. 2539 auto *IVPN = dyn_cast<PHINode>(IV); 2540 if (!IVPN || IVPN->getParent() != LoopHeaderBB) { 2541 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Not an expected PHI node.\n"); 2542 return false; 2543 } 2544 2545 Start = IVPN->getIncomingValueForBlock(LoopPreheaderBB); 2546 IVNext = dyn_cast<Instruction>(IVPN->getIncomingValueForBlock(LoopHeaderBB)); 2547 2548 if (!IVNext || !match(IVNext, m_Add(m_Specific(IVPN), m_One()))) { 2549 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad recurrence.\n"); 2550 return false; 2551 } 2552 2553 // Step 4: Check if the backedge's destinations are in desirable form. 2554 2555 assert(ICmpInst::isEquality(Pred) && 2556 "Should only get equality predicates here."); 2557 2558 // cmp-br is commutative, so canonicalize to a single variant. 2559 InvertedCond = Pred != ICmpInst::Predicate::ICMP_EQ; 2560 if (InvertedCond) { 2561 Pred = ICmpInst::getInversePredicate(Pred); 2562 std::swap(TrueBB, FalseBB); 2563 } 2564 2565 // We expect to exit loop when comparison yields true, 2566 // so when it yields false we should branch back to loop header. 2567 if (FalseBB != LoopHeaderBB) { 2568 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Bad backedge flow.\n"); 2569 return false; 2570 } 2571 2572 // The new, countable, loop will certainly only run a known number of 2573 // iterations, It won't be infinite. But the old loop might be infinite 2574 // under certain conditions. For logical shifts, the value will become zero 2575 // after at most bitwidth(%Val) loop iterations. However, for arithmetic 2576 // right-shift, iff the sign bit was set, the value will never become zero, 2577 // and the loop may never finish. 2578 if (ValShifted->getOpcode() == Instruction::AShr && 2579 !isMustProgress(CurLoop) && !SE->isKnownNonNegative(SE->getSCEV(Val))) { 2580 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Can not prove the loop is finite.\n"); 2581 return false; 2582 } 2583 2584 // Okay, idiom checks out. 2585 return true; 2586 } 2587 2588 /// Look for the following loop: 2589 /// \code 2590 /// entry: 2591 /// <...> 2592 /// %start = <...> 2593 /// %extraoffset = <...> 2594 /// <...> 2595 /// br label %for.cond 2596 /// 2597 /// loop: 2598 /// %iv = phi i8 [ %start, %entry ], [ %iv.next, %for.cond ] 2599 /// %nbits = add nsw i8 %iv, %extraoffset 2600 /// %val.shifted = {{l,a}shr,shl} i8 %val, %nbits 2601 /// %val.shifted.iszero = icmp eq i8 %val.shifted, 0 2602 /// %iv.next = add i8 %iv, 1 2603 /// <...> 2604 /// br i1 %val.shifted.iszero, label %end, label %loop 2605 /// 2606 /// end: 2607 /// %iv.res = phi i8 [ %iv, %loop ] <...> 2608 /// %nbits.res = phi i8 [ %nbits, %loop ] <...> 2609 /// %val.shifted.res = phi i8 [ %val.shifted, %loop ] <...> 2610 /// %val.shifted.iszero.res = phi i1 [ %val.shifted.iszero, %loop ] <...> 2611 /// %iv.next.res = phi i8 [ %iv.next, %loop ] <...> 2612 /// <...> 2613 /// \endcode 2614 /// 2615 /// And transform it into: 2616 /// \code 2617 /// entry: 2618 /// <...> 2619 /// %start = <...> 2620 /// %extraoffset = <...> 2621 /// <...> 2622 /// %val.numleadingzeros = call i8 @llvm.ct{l,t}z.i8(i8 %val, i1 0) 2623 /// %val.numactivebits = sub i8 8, %val.numleadingzeros 2624 /// %extraoffset.neg = sub i8 0, %extraoffset 2625 /// %tmp = add i8 %val.numactivebits, %extraoffset.neg 2626 /// %iv.final = call i8 @llvm.smax.i8(i8 %tmp, i8 %start) 2627 /// %loop.tripcount = sub i8 %iv.final, %start 2628 /// br label %loop 2629 /// 2630 /// loop: 2631 /// %loop.iv = phi i8 [ 0, %entry ], [ %loop.iv.next, %loop ] 2632 /// %loop.iv.next = add i8 %loop.iv, 1 2633 /// %loop.ivcheck = icmp eq i8 %loop.iv.next, %loop.tripcount 2634 /// %iv = add i8 %loop.iv, %start 2635 /// <...> 2636 /// br i1 %loop.ivcheck, label %end, label %loop 2637 /// 2638 /// end: 2639 /// %iv.res = phi i8 [ %iv.final, %loop ] <...> 2640 /// <...> 2641 /// \endcode 2642 bool LoopIdiomRecognize::recognizeShiftUntilZero() { 2643 bool MadeChange = false; 2644 2645 Instruction *ValShiftedIsZero; 2646 Intrinsic::ID IntrID; 2647 Instruction *IV; 2648 Value *Start, *Val; 2649 const SCEV *ExtraOffsetExpr; 2650 bool InvertedCond; 2651 if (!detectShiftUntilZeroIdiom(CurLoop, SE, ValShiftedIsZero, IntrID, IV, 2652 Start, Val, ExtraOffsetExpr, InvertedCond)) { 2653 LLVM_DEBUG(dbgs() << DEBUG_TYPE 2654 " shift-until-zero idiom detection failed.\n"); 2655 return MadeChange; 2656 } 2657 LLVM_DEBUG(dbgs() << DEBUG_TYPE " shift-until-zero idiom detected!\n"); 2658 2659 // Ok, it is the idiom we were looking for, we *could* transform this loop, 2660 // but is it profitable to transform? 2661 2662 BasicBlock *LoopHeaderBB = CurLoop->getHeader(); 2663 BasicBlock *LoopPreheaderBB = CurLoop->getLoopPreheader(); 2664 assert(LoopPreheaderBB && "There is always a loop preheader."); 2665 2666 BasicBlock *SuccessorBB = CurLoop->getExitBlock(); 2667 assert(SuccessorBB && "There is only a single successor."); 2668 2669 IRBuilder<> Builder(LoopPreheaderBB->getTerminator()); 2670 Builder.SetCurrentDebugLocation(IV->getDebugLoc()); 2671 2672 Type *Ty = Val->getType(); 2673 unsigned Bitwidth = Ty->getScalarSizeInBits(); 2674 2675 TargetTransformInfo::TargetCostKind CostKind = 2676 TargetTransformInfo::TCK_SizeAndLatency; 2677 2678 // The rewrite is considered to be unprofitable iff and only iff the 2679 // intrinsic we'll use are not cheap. Note that we are okay with *just* 2680 // making the loop countable, even if nothing else changes. 2681 IntrinsicCostAttributes Attrs( 2682 IntrID, Ty, {UndefValue::get(Ty), /*is_zero_undef=*/Builder.getFalse()}); 2683 InstructionCost Cost = TTI->getIntrinsicInstrCost(Attrs, CostKind); 2684 if (Cost > TargetTransformInfo::TCC_Basic) { 2685 LLVM_DEBUG(dbgs() << DEBUG_TYPE 2686 " Intrinsic is too costly, not beneficial\n"); 2687 return MadeChange; 2688 } 2689 2690 // Ok, transform appears worthwhile. 2691 MadeChange = true; 2692 2693 bool OffsetIsZero = false; 2694 if (auto *ExtraOffsetExprC = dyn_cast<SCEVConstant>(ExtraOffsetExpr)) 2695 OffsetIsZero = ExtraOffsetExprC->isZero(); 2696 2697 // Step 1: Compute the loop's final IV value / trip count. 2698 2699 CallInst *ValNumLeadingZeros = Builder.CreateIntrinsic( 2700 IntrID, Ty, {Val, /*is_zero_undef=*/Builder.getFalse()}, 2701 /*FMFSource=*/nullptr, Val->getName() + ".numleadingzeros"); 2702 Value *ValNumActiveBits = Builder.CreateSub( 2703 ConstantInt::get(Ty, Ty->getScalarSizeInBits()), ValNumLeadingZeros, 2704 Val->getName() + ".numactivebits", /*HasNUW=*/true, 2705 /*HasNSW=*/Bitwidth != 2); 2706 2707 SCEVExpander Expander(*SE, *DL, "loop-idiom"); 2708 Expander.setInsertPoint(&*Builder.GetInsertPoint()); 2709 Value *ExtraOffset = Expander.expandCodeFor(ExtraOffsetExpr); 2710 2711 Value *ValNumActiveBitsOffset = Builder.CreateAdd( 2712 ValNumActiveBits, ExtraOffset, ValNumActiveBits->getName() + ".offset", 2713 /*HasNUW=*/OffsetIsZero, /*HasNSW=*/true); 2714 Value *IVFinal = Builder.CreateIntrinsic(Intrinsic::smax, {Ty}, 2715 {ValNumActiveBitsOffset, Start}, 2716 /*FMFSource=*/nullptr, "iv.final"); 2717 2718 auto *LoopBackedgeTakenCount = cast<Instruction>(Builder.CreateSub( 2719 IVFinal, Start, CurLoop->getName() + ".backedgetakencount", 2720 /*HasNUW=*/OffsetIsZero, /*HasNSW=*/true)); 2721 // FIXME: or when the offset was `add nuw` 2722 2723 // We know loop's backedge-taken count, but what's loop's trip count? 2724 Value *LoopTripCount = 2725 Builder.CreateAdd(LoopBackedgeTakenCount, ConstantInt::get(Ty, 1), 2726 CurLoop->getName() + ".tripcount", /*HasNUW=*/true, 2727 /*HasNSW=*/Bitwidth != 2); 2728 2729 // Step 2: Adjust the successor basic block to recieve the original 2730 // induction variable's final value instead of the orig. IV itself. 2731 2732 IV->replaceUsesOutsideBlock(IVFinal, LoopHeaderBB); 2733 2734 // Step 3: Rewrite the loop into a countable form, with canonical IV. 2735 2736 // The new canonical induction variable. 2737 Builder.SetInsertPoint(&LoopHeaderBB->front()); 2738 auto *CIV = Builder.CreatePHI(Ty, 2, CurLoop->getName() + ".iv"); 2739 2740 // The induction itself. 2741 Builder.SetInsertPoint(LoopHeaderBB->getFirstNonPHI()); 2742 auto *CIVNext = 2743 Builder.CreateAdd(CIV, ConstantInt::get(Ty, 1), CIV->getName() + ".next", 2744 /*HasNUW=*/true, /*HasNSW=*/Bitwidth != 2); 2745 2746 // The loop trip count check. 2747 auto *CIVCheck = Builder.CreateICmpEQ(CIVNext, LoopTripCount, 2748 CurLoop->getName() + ".ivcheck"); 2749 auto *NewIVCheck = CIVCheck; 2750 if (InvertedCond) { 2751 NewIVCheck = Builder.CreateNot(CIVCheck); 2752 NewIVCheck->takeName(ValShiftedIsZero); 2753 } 2754 2755 // The original IV, but rebased to be an offset to the CIV. 2756 auto *IVDePHId = Builder.CreateAdd(CIV, Start, "", /*HasNUW=*/false, 2757 /*HasNSW=*/true); // FIXME: what about NUW? 2758 IVDePHId->takeName(IV); 2759 2760 // The loop terminator. 2761 Builder.SetInsertPoint(LoopHeaderBB->getTerminator()); 2762 Builder.CreateCondBr(CIVCheck, SuccessorBB, LoopHeaderBB); 2763 LoopHeaderBB->getTerminator()->eraseFromParent(); 2764 2765 // Populate the IV PHI. 2766 CIV->addIncoming(ConstantInt::get(Ty, 0), LoopPreheaderBB); 2767 CIV->addIncoming(CIVNext, LoopHeaderBB); 2768 2769 // Step 4: Forget the "non-computable" trip-count SCEV associated with the 2770 // loop. The loop would otherwise not be deleted even if it becomes empty. 2771 2772 SE->forgetLoop(CurLoop); 2773 2774 // Step 5: Try to cleanup the loop's body somewhat. 2775 IV->replaceAllUsesWith(IVDePHId); 2776 IV->eraseFromParent(); 2777 2778 ValShiftedIsZero->replaceAllUsesWith(NewIVCheck); 2779 ValShiftedIsZero->eraseFromParent(); 2780 2781 // Other passes will take care of actually deleting the loop if possible. 2782 2783 LLVM_DEBUG(dbgs() << DEBUG_TYPE " shift-until-zero idiom optimized!\n"); 2784 2785 ++NumShiftUntilZero; 2786 return MadeChange; 2787 } 2788