1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements a trivial dead store elimination that only considers 11 // basic-block local redundant stores. 12 // 13 // FIXME: This should eventually be extended to be a post-dominator tree 14 // traversal. Doing so would be pretty trivial. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/Transforms/Scalar/DeadStoreElimination.h" 19 #include "llvm/ADT/APInt.h" 20 #include "llvm/ADT/DenseMap.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/CaptureTracking.h" 28 #include "llvm/Analysis/GlobalsModRef.h" 29 #include "llvm/Analysis/MemoryBuiltins.h" 30 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 31 #include "llvm/Analysis/MemoryLocation.h" 32 #include "llvm/Analysis/TargetLibraryInfo.h" 33 #include "llvm/Transforms/Utils/Local.h" 34 #include "llvm/Analysis/ValueTracking.h" 35 #include "llvm/IR/Argument.h" 36 #include "llvm/IR/BasicBlock.h" 37 #include "llvm/IR/CallSite.h" 38 #include "llvm/IR/Constant.h" 39 #include "llvm/IR/Constants.h" 40 #include "llvm/IR/DataLayout.h" 41 #include "llvm/IR/Dominators.h" 42 #include "llvm/IR/Function.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/LLVMContext.h" 49 #include "llvm/IR/Module.h" 50 #include "llvm/IR/PassManager.h" 51 #include "llvm/IR/Value.h" 52 #include "llvm/Pass.h" 53 #include "llvm/Support/Casting.h" 54 #include "llvm/Support/CommandLine.h" 55 #include "llvm/Support/Debug.h" 56 #include "llvm/Support/ErrorHandling.h" 57 #include "llvm/Support/MathExtras.h" 58 #include "llvm/Support/raw_ostream.h" 59 #include "llvm/Transforms/Scalar.h" 60 #include <algorithm> 61 #include <cassert> 62 #include <cstddef> 63 #include <cstdint> 64 #include <iterator> 65 #include <map> 66 #include <utility> 67 68 using namespace llvm; 69 70 #define DEBUG_TYPE "dse" 71 72 STATISTIC(NumRedundantStores, "Number of redundant stores deleted"); 73 STATISTIC(NumFastStores, "Number of stores deleted"); 74 STATISTIC(NumFastOther, "Number of other instrs removed"); 75 STATISTIC(NumCompletePartials, "Number of stores dead by later partials"); 76 STATISTIC(NumModifiedStores, "Number of stores modified"); 77 78 static cl::opt<bool> 79 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking", 80 cl::init(true), cl::Hidden, 81 cl::desc("Enable partial-overwrite tracking in DSE")); 82 83 static cl::opt<bool> 84 EnablePartialStoreMerging("enable-dse-partial-store-merging", 85 cl::init(true), cl::Hidden, 86 cl::desc("Enable partial store merging in DSE")); 87 88 //===----------------------------------------------------------------------===// 89 // Helper functions 90 //===----------------------------------------------------------------------===// 91 using OverlapIntervalsTy = std::map<int64_t, int64_t>; 92 using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>; 93 94 /// Delete this instruction. Before we do, go through and zero out all the 95 /// operands of this instruction. If any of them become dead, delete them and 96 /// the computation tree that feeds them. 97 /// If ValueSet is non-null, remove any deleted instructions from it as well. 98 static void 99 deleteDeadInstruction(Instruction *I, BasicBlock::iterator *BBI, 100 MemoryDependenceResults &MD, const TargetLibraryInfo &TLI, 101 InstOverlapIntervalsTy &IOL, 102 DenseMap<Instruction*, size_t> *InstrOrdering, 103 SmallSetVector<Value *, 16> *ValueSet = nullptr) { 104 SmallVector<Instruction*, 32> NowDeadInsts; 105 106 NowDeadInsts.push_back(I); 107 --NumFastOther; 108 109 // Keeping the iterator straight is a pain, so we let this routine tell the 110 // caller what the next instruction is after we're done mucking about. 111 BasicBlock::iterator NewIter = *BBI; 112 113 // Before we touch this instruction, remove it from memdep! 114 do { 115 Instruction *DeadInst = NowDeadInsts.pop_back_val(); 116 ++NumFastOther; 117 118 // Try to preserve debug information attached to the dead instruction. 119 salvageDebugInfo(*DeadInst); 120 121 // This instruction is dead, zap it, in stages. Start by removing it from 122 // MemDep, which needs to know the operands and needs it to be in the 123 // function. 124 MD.removeInstruction(DeadInst); 125 126 for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) { 127 Value *Op = DeadInst->getOperand(op); 128 DeadInst->setOperand(op, nullptr); 129 130 // If this operand just became dead, add it to the NowDeadInsts list. 131 if (!Op->use_empty()) continue; 132 133 if (Instruction *OpI = dyn_cast<Instruction>(Op)) 134 if (isInstructionTriviallyDead(OpI, &TLI)) 135 NowDeadInsts.push_back(OpI); 136 } 137 138 if (ValueSet) ValueSet->remove(DeadInst); 139 InstrOrdering->erase(DeadInst); 140 IOL.erase(DeadInst); 141 142 if (NewIter == DeadInst->getIterator()) 143 NewIter = DeadInst->eraseFromParent(); 144 else 145 DeadInst->eraseFromParent(); 146 } while (!NowDeadInsts.empty()); 147 *BBI = NewIter; 148 } 149 150 /// Does this instruction write some memory? This only returns true for things 151 /// that we can analyze with other helpers below. 152 static bool hasAnalyzableMemoryWrite(Instruction *I, 153 const TargetLibraryInfo &TLI) { 154 if (isa<StoreInst>(I)) 155 return true; 156 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 157 switch (II->getIntrinsicID()) { 158 default: 159 return false; 160 case Intrinsic::memset: 161 case Intrinsic::memmove: 162 case Intrinsic::memcpy: 163 case Intrinsic::memcpy_element_unordered_atomic: 164 case Intrinsic::memmove_element_unordered_atomic: 165 case Intrinsic::memset_element_unordered_atomic: 166 case Intrinsic::init_trampoline: 167 case Intrinsic::lifetime_end: 168 return true; 169 } 170 } 171 if (auto CS = CallSite(I)) { 172 if (Function *F = CS.getCalledFunction()) { 173 StringRef FnName = F->getName(); 174 if (TLI.has(LibFunc_strcpy) && FnName == TLI.getName(LibFunc_strcpy)) 175 return true; 176 if (TLI.has(LibFunc_strncpy) && FnName == TLI.getName(LibFunc_strncpy)) 177 return true; 178 if (TLI.has(LibFunc_strcat) && FnName == TLI.getName(LibFunc_strcat)) 179 return true; 180 if (TLI.has(LibFunc_strncat) && FnName == TLI.getName(LibFunc_strncat)) 181 return true; 182 } 183 } 184 return false; 185 } 186 187 /// Return a Location stored to by the specified instruction. If isRemovable 188 /// returns true, this function and getLocForRead completely describe the memory 189 /// operations for this instruction. 190 static MemoryLocation getLocForWrite(Instruction *Inst) { 191 192 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 193 return MemoryLocation::get(SI); 194 195 if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst)) { 196 // memcpy/memmove/memset. 197 MemoryLocation Loc = MemoryLocation::getForDest(MI); 198 return Loc; 199 } 200 201 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 202 switch (II->getIntrinsicID()) { 203 default: 204 return MemoryLocation(); // Unhandled intrinsic. 205 case Intrinsic::init_trampoline: 206 return MemoryLocation(II->getArgOperand(0)); 207 case Intrinsic::lifetime_end: { 208 uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(); 209 return MemoryLocation(II->getArgOperand(1), Len); 210 } 211 } 212 } 213 if (auto CS = CallSite(Inst)) 214 // All the supported TLI functions so far happen to have dest as their 215 // first argument. 216 return MemoryLocation(CS.getArgument(0)); 217 return MemoryLocation(); 218 } 219 220 /// Return the location read by the specified "hasAnalyzableMemoryWrite" 221 /// instruction if any. 222 static MemoryLocation getLocForRead(Instruction *Inst, 223 const TargetLibraryInfo &TLI) { 224 assert(hasAnalyzableMemoryWrite(Inst, TLI) && "Unknown instruction case"); 225 226 // The only instructions that both read and write are the mem transfer 227 // instructions (memcpy/memmove). 228 if (auto *MTI = dyn_cast<AnyMemTransferInst>(Inst)) 229 return MemoryLocation::getForSource(MTI); 230 return MemoryLocation(); 231 } 232 233 /// If the value of this instruction and the memory it writes to is unused, may 234 /// we delete this instruction? 235 static bool isRemovable(Instruction *I) { 236 // Don't remove volatile/atomic stores. 237 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 238 return SI->isUnordered(); 239 240 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 241 switch (II->getIntrinsicID()) { 242 default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate"); 243 case Intrinsic::lifetime_end: 244 // Never remove dead lifetime_end's, e.g. because it is followed by a 245 // free. 246 return false; 247 case Intrinsic::init_trampoline: 248 // Always safe to remove init_trampoline. 249 return true; 250 case Intrinsic::memset: 251 case Intrinsic::memmove: 252 case Intrinsic::memcpy: 253 // Don't remove volatile memory intrinsics. 254 return !cast<MemIntrinsic>(II)->isVolatile(); 255 case Intrinsic::memcpy_element_unordered_atomic: 256 case Intrinsic::memmove_element_unordered_atomic: 257 case Intrinsic::memset_element_unordered_atomic: 258 return true; 259 } 260 } 261 262 // note: only get here for calls with analyzable writes - i.e. libcalls 263 if (auto CS = CallSite(I)) 264 return CS.getInstruction()->use_empty(); 265 266 return false; 267 } 268 269 /// Returns true if the end of this instruction can be safely shortened in 270 /// length. 271 static bool isShortenableAtTheEnd(Instruction *I) { 272 // Don't shorten stores for now 273 if (isa<StoreInst>(I)) 274 return false; 275 276 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 277 switch (II->getIntrinsicID()) { 278 default: return false; 279 case Intrinsic::memset: 280 case Intrinsic::memcpy: 281 case Intrinsic::memcpy_element_unordered_atomic: 282 case Intrinsic::memset_element_unordered_atomic: 283 // Do shorten memory intrinsics. 284 // FIXME: Add memmove if it's also safe to transform. 285 return true; 286 } 287 } 288 289 // Don't shorten libcalls calls for now. 290 291 return false; 292 } 293 294 /// Returns true if the beginning of this instruction can be safely shortened 295 /// in length. 296 static bool isShortenableAtTheBeginning(Instruction *I) { 297 // FIXME: Handle only memset for now. Supporting memcpy/memmove should be 298 // easily done by offsetting the source address. 299 return isa<AnyMemSetInst>(I); 300 } 301 302 /// Return the pointer that is being written to. 303 static Value *getStoredPointerOperand(Instruction *I) { 304 //TODO: factor this to reuse getLocForWrite 305 MemoryLocation Loc = getLocForWrite(I); 306 assert(Loc.Ptr && 307 "unable to find pointer written for analyzable instruction?"); 308 // TODO: most APIs don't expect const Value * 309 return const_cast<Value*>(Loc.Ptr); 310 } 311 312 static uint64_t getPointerSize(const Value *V, const DataLayout &DL, 313 const TargetLibraryInfo &TLI, 314 const Function *F) { 315 uint64_t Size; 316 ObjectSizeOpts Opts; 317 Opts.NullIsUnknownSize = NullPointerIsDefined(F); 318 319 if (getObjectSize(V, Size, DL, &TLI, Opts)) 320 return Size; 321 return MemoryLocation::UnknownSize; 322 } 323 324 namespace { 325 326 enum OverwriteResult { 327 OW_Begin, 328 OW_Complete, 329 OW_End, 330 OW_PartialEarlierWithFullLater, 331 OW_Unknown 332 }; 333 334 } // end anonymous namespace 335 336 /// Return 'OW_Complete' if a store to the 'Later' location completely 337 /// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the 338 /// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the 339 /// beginning of the 'Earlier' location is overwritten by 'Later'. 340 /// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was 341 /// overwritten by a latter (smaller) store which doesn't write outside the big 342 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined. 343 static OverwriteResult isOverwrite(const MemoryLocation &Later, 344 const MemoryLocation &Earlier, 345 const DataLayout &DL, 346 const TargetLibraryInfo &TLI, 347 int64_t &EarlierOff, int64_t &LaterOff, 348 Instruction *DepWrite, 349 InstOverlapIntervalsTy &IOL, 350 AliasAnalysis &AA, 351 const Function *F) { 352 // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll 353 // get imprecise values here, though (except for unknown sizes). 354 if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise()) 355 return OW_Unknown; 356 357 const uint64_t LaterSize = Later.Size.getValue(); 358 const uint64_t EarlierSize = Earlier.Size.getValue(); 359 360 const Value *P1 = Earlier.Ptr->stripPointerCasts(); 361 const Value *P2 = Later.Ptr->stripPointerCasts(); 362 363 // If the start pointers are the same, we just have to compare sizes to see if 364 // the later store was larger than the earlier store. 365 if (P1 == P2 || AA.isMustAlias(P1, P2)) { 366 // Make sure that the Later size is >= the Earlier size. 367 if (LaterSize >= EarlierSize) 368 return OW_Complete; 369 } 370 371 // Check to see if the later store is to the entire object (either a global, 372 // an alloca, or a byval/inalloca argument). If so, then it clearly 373 // overwrites any other store to the same object. 374 const Value *UO1 = GetUnderlyingObject(P1, DL), 375 *UO2 = GetUnderlyingObject(P2, DL); 376 377 // If we can't resolve the same pointers to the same object, then we can't 378 // analyze them at all. 379 if (UO1 != UO2) 380 return OW_Unknown; 381 382 // If the "Later" store is to a recognizable object, get its size. 383 uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, F); 384 if (ObjectSize != MemoryLocation::UnknownSize) 385 if (ObjectSize == LaterSize && ObjectSize >= EarlierSize) 386 return OW_Complete; 387 388 // Okay, we have stores to two completely different pointers. Try to 389 // decompose the pointer into a "base + constant_offset" form. If the base 390 // pointers are equal, then we can reason about the two stores. 391 EarlierOff = 0; 392 LaterOff = 0; 393 const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL); 394 const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL); 395 396 // If the base pointers still differ, we have two completely different stores. 397 if (BP1 != BP2) 398 return OW_Unknown; 399 400 // The later store completely overlaps the earlier store if: 401 // 402 // 1. Both start at the same offset and the later one's size is greater than 403 // or equal to the earlier one's, or 404 // 405 // |--earlier--| 406 // |-- later --| 407 // 408 // 2. The earlier store has an offset greater than the later offset, but which 409 // still lies completely within the later store. 410 // 411 // |--earlier--| 412 // |----- later ------| 413 // 414 // We have to be careful here as *Off is signed while *.Size is unsigned. 415 if (EarlierOff >= LaterOff && 416 LaterSize >= EarlierSize && 417 uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize) 418 return OW_Complete; 419 420 // We may now overlap, although the overlap is not complete. There might also 421 // be other incomplete overlaps, and together, they might cover the complete 422 // earlier write. 423 // Note: The correctness of this logic depends on the fact that this function 424 // is not even called providing DepWrite when there are any intervening reads. 425 if (EnablePartialOverwriteTracking && 426 LaterOff < int64_t(EarlierOff + EarlierSize) && 427 int64_t(LaterOff + LaterSize) >= EarlierOff) { 428 429 // Insert our part of the overlap into the map. 430 auto &IM = IOL[DepWrite]; 431 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff 432 << ", " << int64_t(EarlierOff + EarlierSize) 433 << ") Later [" << LaterOff << ", " 434 << int64_t(LaterOff + LaterSize) << ")\n"); 435 436 // Make sure that we only insert non-overlapping intervals and combine 437 // adjacent intervals. The intervals are stored in the map with the ending 438 // offset as the key (in the half-open sense) and the starting offset as 439 // the value. 440 int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize; 441 442 // Find any intervals ending at, or after, LaterIntStart which start 443 // before LaterIntEnd. 444 auto ILI = IM.lower_bound(LaterIntStart); 445 if (ILI != IM.end() && ILI->second <= LaterIntEnd) { 446 // This existing interval is overlapped with the current store somewhere 447 // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing 448 // intervals and adjusting our start and end. 449 LaterIntStart = std::min(LaterIntStart, ILI->second); 450 LaterIntEnd = std::max(LaterIntEnd, ILI->first); 451 ILI = IM.erase(ILI); 452 453 // Continue erasing and adjusting our end in case other previous 454 // intervals are also overlapped with the current store. 455 // 456 // |--- ealier 1 ---| |--- ealier 2 ---| 457 // |------- later---------| 458 // 459 while (ILI != IM.end() && ILI->second <= LaterIntEnd) { 460 assert(ILI->second > LaterIntStart && "Unexpected interval"); 461 LaterIntEnd = std::max(LaterIntEnd, ILI->first); 462 ILI = IM.erase(ILI); 463 } 464 } 465 466 IM[LaterIntEnd] = LaterIntStart; 467 468 ILI = IM.begin(); 469 if (ILI->second <= EarlierOff && 470 ILI->first >= int64_t(EarlierOff + EarlierSize)) { 471 LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier [" 472 << EarlierOff << ", " 473 << int64_t(EarlierOff + EarlierSize) 474 << ") Composite Later [" << ILI->second << ", " 475 << ILI->first << ")\n"); 476 ++NumCompletePartials; 477 return OW_Complete; 478 } 479 } 480 481 // Check for an earlier store which writes to all the memory locations that 482 // the later store writes to. 483 if (EnablePartialStoreMerging && LaterOff >= EarlierOff && 484 int64_t(EarlierOff + EarlierSize) > LaterOff && 485 uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) { 486 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load [" 487 << EarlierOff << ", " 488 << int64_t(EarlierOff + EarlierSize) 489 << ") by a later store [" << LaterOff << ", " 490 << int64_t(LaterOff + LaterSize) << ")\n"); 491 // TODO: Maybe come up with a better name? 492 return OW_PartialEarlierWithFullLater; 493 } 494 495 // Another interesting case is if the later store overwrites the end of the 496 // earlier store. 497 // 498 // |--earlier--| 499 // |-- later --| 500 // 501 // In this case we may want to trim the size of earlier to avoid generating 502 // writes to addresses which will definitely be overwritten later 503 if (!EnablePartialOverwriteTracking && 504 (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) && 505 int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize))) 506 return OW_End; 507 508 // Finally, we also need to check if the later store overwrites the beginning 509 // of the earlier store. 510 // 511 // |--earlier--| 512 // |-- later --| 513 // 514 // In this case we may want to move the destination address and trim the size 515 // of earlier to avoid generating writes to addresses which will definitely 516 // be overwritten later. 517 if (!EnablePartialOverwriteTracking && 518 (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) { 519 assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) && 520 "Expect to be handled as OW_Complete"); 521 return OW_Begin; 522 } 523 // Otherwise, they don't completely overlap. 524 return OW_Unknown; 525 } 526 527 /// If 'Inst' might be a self read (i.e. a noop copy of a 528 /// memory region into an identical pointer) then it doesn't actually make its 529 /// input dead in the traditional sense. Consider this case: 530 /// 531 /// memmove(A <- B) 532 /// memmove(A <- A) 533 /// 534 /// In this case, the second store to A does not make the first store to A dead. 535 /// The usual situation isn't an explicit A<-A store like this (which can be 536 /// trivially removed) but a case where two pointers may alias. 537 /// 538 /// This function detects when it is unsafe to remove a dependent instruction 539 /// because the DSE inducing instruction may be a self-read. 540 static bool isPossibleSelfRead(Instruction *Inst, 541 const MemoryLocation &InstStoreLoc, 542 Instruction *DepWrite, 543 const TargetLibraryInfo &TLI, 544 AliasAnalysis &AA) { 545 // Self reads can only happen for instructions that read memory. Get the 546 // location read. 547 MemoryLocation InstReadLoc = getLocForRead(Inst, TLI); 548 if (!InstReadLoc.Ptr) 549 return false; // Not a reading instruction. 550 551 // If the read and written loc obviously don't alias, it isn't a read. 552 if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) 553 return false; 554 555 if (isa<AnyMemCpyInst>(Inst)) { 556 // LLVM's memcpy overlap semantics are not fully fleshed out (see PR11763) 557 // but in practice memcpy(A <- B) either means that A and B are disjoint or 558 // are equal (i.e. there are not partial overlaps). Given that, if we have: 559 // 560 // memcpy/memmove(A <- B) // DepWrite 561 // memcpy(A <- B) // Inst 562 // 563 // with Inst reading/writing a >= size than DepWrite, we can reason as 564 // follows: 565 // 566 // - If A == B then both the copies are no-ops, so the DepWrite can be 567 // removed. 568 // - If A != B then A and B are disjoint locations in Inst. Since 569 // Inst.size >= DepWrite.size A and B are disjoint in DepWrite too. 570 // Therefore DepWrite can be removed. 571 MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI); 572 573 if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr)) 574 return false; 575 } 576 577 // If DepWrite doesn't read memory or if we can't prove it is a must alias, 578 // then it can't be considered dead. 579 return true; 580 } 581 582 /// Returns true if the memory which is accessed by the second instruction is not 583 /// modified between the first and the second instruction. 584 /// Precondition: Second instruction must be dominated by the first 585 /// instruction. 586 static bool memoryIsNotModifiedBetween(Instruction *FirstI, 587 Instruction *SecondI, 588 AliasAnalysis *AA) { 589 SmallVector<BasicBlock *, 16> WorkList; 590 SmallPtrSet<BasicBlock *, 8> Visited; 591 BasicBlock::iterator FirstBBI(FirstI); 592 ++FirstBBI; 593 BasicBlock::iterator SecondBBI(SecondI); 594 BasicBlock *FirstBB = FirstI->getParent(); 595 BasicBlock *SecondBB = SecondI->getParent(); 596 MemoryLocation MemLoc = MemoryLocation::get(SecondI); 597 598 // Start checking the store-block. 599 WorkList.push_back(SecondBB); 600 bool isFirstBlock = true; 601 602 // Check all blocks going backward until we reach the load-block. 603 while (!WorkList.empty()) { 604 BasicBlock *B = WorkList.pop_back_val(); 605 606 // Ignore instructions before LI if this is the FirstBB. 607 BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin()); 608 609 BasicBlock::iterator EI; 610 if (isFirstBlock) { 611 // Ignore instructions after SI if this is the first visit of SecondBB. 612 assert(B == SecondBB && "first block is not the store block"); 613 EI = SecondBBI; 614 isFirstBlock = false; 615 } else { 616 // It's not SecondBB or (in case of a loop) the second visit of SecondBB. 617 // In this case we also have to look at instructions after SI. 618 EI = B->end(); 619 } 620 for (; BI != EI; ++BI) { 621 Instruction *I = &*BI; 622 if (I->mayWriteToMemory() && I != SecondI) 623 if (isModSet(AA->getModRefInfo(I, MemLoc))) 624 return false; 625 } 626 if (B != FirstBB) { 627 assert(B != &FirstBB->getParent()->getEntryBlock() && 628 "Should not hit the entry block because SI must be dominated by LI"); 629 for (auto PredI = pred_begin(B), PE = pred_end(B); PredI != PE; ++PredI) { 630 if (!Visited.insert(*PredI).second) 631 continue; 632 WorkList.push_back(*PredI); 633 } 634 } 635 } 636 return true; 637 } 638 639 /// Find all blocks that will unconditionally lead to the block BB and append 640 /// them to F. 641 static void findUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks, 642 BasicBlock *BB, DominatorTree *DT) { 643 for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) { 644 BasicBlock *Pred = *I; 645 if (Pred == BB) continue; 646 Instruction *PredTI = Pred->getTerminator(); 647 if (PredTI->getNumSuccessors() != 1) 648 continue; 649 650 if (DT->isReachableFromEntry(Pred)) 651 Blocks.push_back(Pred); 652 } 653 } 654 655 /// Handle frees of entire structures whose dependency is a store 656 /// to a field of that structure. 657 static bool handleFree(CallInst *F, AliasAnalysis *AA, 658 MemoryDependenceResults *MD, DominatorTree *DT, 659 const TargetLibraryInfo *TLI, 660 InstOverlapIntervalsTy &IOL, 661 DenseMap<Instruction*, size_t> *InstrOrdering) { 662 bool MadeChange = false; 663 664 MemoryLocation Loc = MemoryLocation(F->getOperand(0)); 665 SmallVector<BasicBlock *, 16> Blocks; 666 Blocks.push_back(F->getParent()); 667 const DataLayout &DL = F->getModule()->getDataLayout(); 668 669 while (!Blocks.empty()) { 670 BasicBlock *BB = Blocks.pop_back_val(); 671 Instruction *InstPt = BB->getTerminator(); 672 if (BB == F->getParent()) InstPt = F; 673 674 MemDepResult Dep = 675 MD->getPointerDependencyFrom(Loc, false, InstPt->getIterator(), BB); 676 while (Dep.isDef() || Dep.isClobber()) { 677 Instruction *Dependency = Dep.getInst(); 678 if (!hasAnalyzableMemoryWrite(Dependency, *TLI) || 679 !isRemovable(Dependency)) 680 break; 681 682 Value *DepPointer = 683 GetUnderlyingObject(getStoredPointerOperand(Dependency), DL); 684 685 // Check for aliasing. 686 if (!AA->isMustAlias(F->getArgOperand(0), DepPointer)) 687 break; 688 689 LLVM_DEBUG( 690 dbgs() << "DSE: Dead Store to soon to be freed memory:\n DEAD: " 691 << *Dependency << '\n'); 692 693 // DCE instructions only used to calculate that store. 694 BasicBlock::iterator BBI(Dependency); 695 deleteDeadInstruction(Dependency, &BBI, *MD, *TLI, IOL, InstrOrdering); 696 ++NumFastStores; 697 MadeChange = true; 698 699 // Inst's old Dependency is now deleted. Compute the next dependency, 700 // which may also be dead, as in 701 // s[0] = 0; 702 // s[1] = 0; // This has just been deleted. 703 // free(s); 704 Dep = MD->getPointerDependencyFrom(Loc, false, BBI, BB); 705 } 706 707 if (Dep.isNonLocal()) 708 findUnconditionalPreds(Blocks, BB, DT); 709 } 710 711 return MadeChange; 712 } 713 714 /// Check to see if the specified location may alias any of the stack objects in 715 /// the DeadStackObjects set. If so, they become live because the location is 716 /// being loaded. 717 static void removeAccessedObjects(const MemoryLocation &LoadedLoc, 718 SmallSetVector<Value *, 16> &DeadStackObjects, 719 const DataLayout &DL, AliasAnalysis *AA, 720 const TargetLibraryInfo *TLI, 721 const Function *F) { 722 const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL); 723 724 // A constant can't be in the dead pointer set. 725 if (isa<Constant>(UnderlyingPointer)) 726 return; 727 728 // If the kill pointer can be easily reduced to an alloca, don't bother doing 729 // extraneous AA queries. 730 if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) { 731 DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer)); 732 return; 733 } 734 735 // Remove objects that could alias LoadedLoc. 736 DeadStackObjects.remove_if([&](Value *I) { 737 // See if the loaded location could alias the stack location. 738 MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI, F)); 739 return !AA->isNoAlias(StackLoc, LoadedLoc); 740 }); 741 } 742 743 /// Remove dead stores to stack-allocated locations in the function end block. 744 /// Ex: 745 /// %A = alloca i32 746 /// ... 747 /// store i32 1, i32* %A 748 /// ret void 749 static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA, 750 MemoryDependenceResults *MD, 751 const TargetLibraryInfo *TLI, 752 InstOverlapIntervalsTy &IOL, 753 DenseMap<Instruction*, size_t> *InstrOrdering) { 754 bool MadeChange = false; 755 756 // Keep track of all of the stack objects that are dead at the end of the 757 // function. 758 SmallSetVector<Value*, 16> DeadStackObjects; 759 760 // Find all of the alloca'd pointers in the entry block. 761 BasicBlock &Entry = BB.getParent()->front(); 762 for (Instruction &I : Entry) { 763 if (isa<AllocaInst>(&I)) 764 DeadStackObjects.insert(&I); 765 766 // Okay, so these are dead heap objects, but if the pointer never escapes 767 // then it's leaked by this function anyways. 768 else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true)) 769 DeadStackObjects.insert(&I); 770 } 771 772 // Treat byval or inalloca arguments the same, stores to them are dead at the 773 // end of the function. 774 for (Argument &AI : BB.getParent()->args()) 775 if (AI.hasByValOrInAllocaAttr()) 776 DeadStackObjects.insert(&AI); 777 778 const DataLayout &DL = BB.getModule()->getDataLayout(); 779 780 // Scan the basic block backwards 781 for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){ 782 --BBI; 783 784 // If we find a store, check to see if it points into a dead stack value. 785 if (hasAnalyzableMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) { 786 // See through pointer-to-pointer bitcasts 787 SmallVector<Value *, 4> Pointers; 788 GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL); 789 790 // Stores to stack values are valid candidates for removal. 791 bool AllDead = true; 792 for (Value *Pointer : Pointers) 793 if (!DeadStackObjects.count(Pointer)) { 794 AllDead = false; 795 break; 796 } 797 798 if (AllDead) { 799 Instruction *Dead = &*BBI; 800 801 LLVM_DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: " 802 << *Dead << "\n Objects: "; 803 for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(), 804 E = Pointers.end(); 805 I != E; ++I) { 806 dbgs() << **I; 807 if (std::next(I) != E) 808 dbgs() << ", "; 809 } dbgs() 810 << '\n'); 811 812 // DCE instructions only used to calculate that store. 813 deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects); 814 ++NumFastStores; 815 MadeChange = true; 816 continue; 817 } 818 } 819 820 // Remove any dead non-memory-mutating instructions. 821 if (isInstructionTriviallyDead(&*BBI, TLI)) { 822 LLVM_DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n DEAD: " 823 << *&*BBI << '\n'); 824 deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects); 825 ++NumFastOther; 826 MadeChange = true; 827 continue; 828 } 829 830 if (isa<AllocaInst>(BBI)) { 831 // Remove allocas from the list of dead stack objects; there can't be 832 // any references before the definition. 833 DeadStackObjects.remove(&*BBI); 834 continue; 835 } 836 837 if (auto CS = CallSite(&*BBI)) { 838 // Remove allocation function calls from the list of dead stack objects; 839 // there can't be any references before the definition. 840 if (isAllocLikeFn(&*BBI, TLI)) 841 DeadStackObjects.remove(&*BBI); 842 843 // If this call does not access memory, it can't be loading any of our 844 // pointers. 845 if (AA->doesNotAccessMemory(CS)) 846 continue; 847 848 // If the call might load from any of our allocas, then any store above 849 // the call is live. 850 DeadStackObjects.remove_if([&](Value *I) { 851 // See if the call site touches the value. 852 return isRefSet(AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI, 853 BB.getParent()))); 854 }); 855 856 // If all of the allocas were clobbered by the call then we're not going 857 // to find anything else to process. 858 if (DeadStackObjects.empty()) 859 break; 860 861 continue; 862 } 863 864 // We can remove the dead stores, irrespective of the fence and its ordering 865 // (release/acquire/seq_cst). Fences only constraints the ordering of 866 // already visible stores, it does not make a store visible to other 867 // threads. So, skipping over a fence does not change a store from being 868 // dead. 869 if (isa<FenceInst>(*BBI)) 870 continue; 871 872 MemoryLocation LoadedLoc; 873 874 // If we encounter a use of the pointer, it is no longer considered dead 875 if (LoadInst *L = dyn_cast<LoadInst>(BBI)) { 876 if (!L->isUnordered()) // Be conservative with atomic/volatile load 877 break; 878 LoadedLoc = MemoryLocation::get(L); 879 } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) { 880 LoadedLoc = MemoryLocation::get(V); 881 } else if (!BBI->mayReadFromMemory()) { 882 // Instruction doesn't read memory. Note that stores that weren't removed 883 // above will hit this case. 884 continue; 885 } else { 886 // Unknown inst; assume it clobbers everything. 887 break; 888 } 889 890 // Remove any allocas from the DeadPointer set that are loaded, as this 891 // makes any stores above the access live. 892 removeAccessedObjects(LoadedLoc, DeadStackObjects, DL, AA, TLI, BB.getParent()); 893 894 // If all of the allocas were clobbered by the access then we're not going 895 // to find anything else to process. 896 if (DeadStackObjects.empty()) 897 break; 898 } 899 900 return MadeChange; 901 } 902 903 static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset, 904 int64_t &EarlierSize, int64_t LaterOffset, 905 int64_t LaterSize, bool IsOverwriteEnd) { 906 // TODO: base this on the target vector size so that if the earlier 907 // store was too small to get vector writes anyway then its likely 908 // a good idea to shorten it 909 // Power of 2 vector writes are probably always a bad idea to optimize 910 // as any store/memset/memcpy is likely using vector instructions so 911 // shortening it to not vector size is likely to be slower 912 auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite); 913 unsigned EarlierWriteAlign = EarlierIntrinsic->getDestAlignment(); 914 if (!IsOverwriteEnd) 915 LaterOffset = int64_t(LaterOffset + LaterSize); 916 917 if (!(isPowerOf2_64(LaterOffset) && EarlierWriteAlign <= LaterOffset) && 918 !((EarlierWriteAlign != 0) && LaterOffset % EarlierWriteAlign == 0)) 919 return false; 920 921 int64_t NewLength = IsOverwriteEnd 922 ? LaterOffset - EarlierOffset 923 : EarlierSize - (LaterOffset - EarlierOffset); 924 925 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(EarlierWrite)) { 926 // When shortening an atomic memory intrinsic, the newly shortened 927 // length must remain an integer multiple of the element size. 928 const uint32_t ElementSize = AMI->getElementSizeInBytes(); 929 if (0 != NewLength % ElementSize) 930 return false; 931 } 932 933 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW " 934 << (IsOverwriteEnd ? "END" : "BEGIN") << ": " 935 << *EarlierWrite << "\n KILLER (offset " << LaterOffset 936 << ", " << EarlierSize << ")\n"); 937 938 Value *EarlierWriteLength = EarlierIntrinsic->getLength(); 939 Value *TrimmedLength = 940 ConstantInt::get(EarlierWriteLength->getType(), NewLength); 941 EarlierIntrinsic->setLength(TrimmedLength); 942 943 EarlierSize = NewLength; 944 if (!IsOverwriteEnd) { 945 int64_t OffsetMoved = (LaterOffset - EarlierOffset); 946 Value *Indices[1] = { 947 ConstantInt::get(EarlierWriteLength->getType(), OffsetMoved)}; 948 GetElementPtrInst *NewDestGEP = GetElementPtrInst::CreateInBounds( 949 EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite); 950 EarlierIntrinsic->setDest(NewDestGEP); 951 EarlierOffset = EarlierOffset + OffsetMoved; 952 } 953 return true; 954 } 955 956 static bool tryToShortenEnd(Instruction *EarlierWrite, 957 OverlapIntervalsTy &IntervalMap, 958 int64_t &EarlierStart, int64_t &EarlierSize) { 959 if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite)) 960 return false; 961 962 OverlapIntervalsTy::iterator OII = --IntervalMap.end(); 963 int64_t LaterStart = OII->second; 964 int64_t LaterSize = OII->first - LaterStart; 965 966 if (LaterStart > EarlierStart && LaterStart < EarlierStart + EarlierSize && 967 LaterStart + LaterSize >= EarlierStart + EarlierSize) { 968 if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart, 969 LaterSize, true)) { 970 IntervalMap.erase(OII); 971 return true; 972 } 973 } 974 return false; 975 } 976 977 static bool tryToShortenBegin(Instruction *EarlierWrite, 978 OverlapIntervalsTy &IntervalMap, 979 int64_t &EarlierStart, int64_t &EarlierSize) { 980 if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite)) 981 return false; 982 983 OverlapIntervalsTy::iterator OII = IntervalMap.begin(); 984 int64_t LaterStart = OII->second; 985 int64_t LaterSize = OII->first - LaterStart; 986 987 if (LaterStart <= EarlierStart && LaterStart + LaterSize > EarlierStart) { 988 assert(LaterStart + LaterSize < EarlierStart + EarlierSize && 989 "Should have been handled as OW_Complete"); 990 if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart, 991 LaterSize, false)) { 992 IntervalMap.erase(OII); 993 return true; 994 } 995 } 996 return false; 997 } 998 999 static bool removePartiallyOverlappedStores(AliasAnalysis *AA, 1000 const DataLayout &DL, 1001 InstOverlapIntervalsTy &IOL) { 1002 bool Changed = false; 1003 for (auto OI : IOL) { 1004 Instruction *EarlierWrite = OI.first; 1005 MemoryLocation Loc = getLocForWrite(EarlierWrite); 1006 assert(isRemovable(EarlierWrite) && "Expect only removable instruction"); 1007 1008 const Value *Ptr = Loc.Ptr->stripPointerCasts(); 1009 int64_t EarlierStart = 0; 1010 int64_t EarlierSize = int64_t(Loc.Size.getValue()); 1011 GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL); 1012 OverlapIntervalsTy &IntervalMap = OI.second; 1013 Changed |= 1014 tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize); 1015 if (IntervalMap.empty()) 1016 continue; 1017 Changed |= 1018 tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize); 1019 } 1020 return Changed; 1021 } 1022 1023 static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI, 1024 AliasAnalysis *AA, MemoryDependenceResults *MD, 1025 const DataLayout &DL, 1026 const TargetLibraryInfo *TLI, 1027 InstOverlapIntervalsTy &IOL, 1028 DenseMap<Instruction*, size_t> *InstrOrdering) { 1029 // Must be a store instruction. 1030 StoreInst *SI = dyn_cast<StoreInst>(Inst); 1031 if (!SI) 1032 return false; 1033 1034 // If we're storing the same value back to a pointer that we just loaded from, 1035 // then the store can be removed. 1036 if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) { 1037 if (SI->getPointerOperand() == DepLoad->getPointerOperand() && 1038 isRemovable(SI) && memoryIsNotModifiedBetween(DepLoad, SI, AA)) { 1039 1040 LLVM_DEBUG( 1041 dbgs() << "DSE: Remove Store Of Load from same pointer:\n LOAD: " 1042 << *DepLoad << "\n STORE: " << *SI << '\n'); 1043 1044 deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering); 1045 ++NumRedundantStores; 1046 return true; 1047 } 1048 } 1049 1050 // Remove null stores into the calloc'ed objects 1051 Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand()); 1052 if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) { 1053 Instruction *UnderlyingPointer = 1054 dyn_cast<Instruction>(GetUnderlyingObject(SI->getPointerOperand(), DL)); 1055 1056 if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) && 1057 memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA)) { 1058 LLVM_DEBUG( 1059 dbgs() << "DSE: Remove null store to the calloc'ed object:\n DEAD: " 1060 << *Inst << "\n OBJECT: " << *UnderlyingPointer << '\n'); 1061 1062 deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering); 1063 ++NumRedundantStores; 1064 return true; 1065 } 1066 } 1067 return false; 1068 } 1069 1070 static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA, 1071 MemoryDependenceResults *MD, DominatorTree *DT, 1072 const TargetLibraryInfo *TLI) { 1073 const DataLayout &DL = BB.getModule()->getDataLayout(); 1074 bool MadeChange = false; 1075 1076 // FIXME: Maybe change this to use some abstraction like OrderedBasicBlock? 1077 // The current OrderedBasicBlock can't deal with mutation at the moment. 1078 size_t LastThrowingInstIndex = 0; 1079 DenseMap<Instruction*, size_t> InstrOrdering; 1080 size_t InstrIndex = 1; 1081 1082 // A map of interval maps representing partially-overwritten value parts. 1083 InstOverlapIntervalsTy IOL; 1084 1085 // Do a top-down walk on the BB. 1086 for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) { 1087 // Handle 'free' calls specially. 1088 if (CallInst *F = isFreeCall(&*BBI, TLI)) { 1089 MadeChange |= handleFree(F, AA, MD, DT, TLI, IOL, &InstrOrdering); 1090 // Increment BBI after handleFree has potentially deleted instructions. 1091 // This ensures we maintain a valid iterator. 1092 ++BBI; 1093 continue; 1094 } 1095 1096 Instruction *Inst = &*BBI++; 1097 1098 size_t CurInstNumber = InstrIndex++; 1099 InstrOrdering.insert(std::make_pair(Inst, CurInstNumber)); 1100 if (Inst->mayThrow()) { 1101 LastThrowingInstIndex = CurInstNumber; 1102 continue; 1103 } 1104 1105 // Check to see if Inst writes to memory. If not, continue. 1106 if (!hasAnalyzableMemoryWrite(Inst, *TLI)) 1107 continue; 1108 1109 // eliminateNoopStore will update in iterator, if necessary. 1110 if (eliminateNoopStore(Inst, BBI, AA, MD, DL, TLI, IOL, &InstrOrdering)) { 1111 MadeChange = true; 1112 continue; 1113 } 1114 1115 // If we find something that writes memory, get its memory dependence. 1116 MemDepResult InstDep = MD->getDependency(Inst); 1117 1118 // Ignore any store where we can't find a local dependence. 1119 // FIXME: cross-block DSE would be fun. :) 1120 if (!InstDep.isDef() && !InstDep.isClobber()) 1121 continue; 1122 1123 // Figure out what location is being stored to. 1124 MemoryLocation Loc = getLocForWrite(Inst); 1125 1126 // If we didn't get a useful location, fail. 1127 if (!Loc.Ptr) 1128 continue; 1129 1130 // Loop until we find a store we can eliminate or a load that 1131 // invalidates the analysis. Without an upper bound on the number of 1132 // instructions examined, this analysis can become very time-consuming. 1133 // However, the potential gain diminishes as we process more instructions 1134 // without eliminating any of them. Therefore, we limit the number of 1135 // instructions we look at. 1136 auto Limit = MD->getDefaultBlockScanLimit(); 1137 while (InstDep.isDef() || InstDep.isClobber()) { 1138 // Get the memory clobbered by the instruction we depend on. MemDep will 1139 // skip any instructions that 'Loc' clearly doesn't interact with. If we 1140 // end up depending on a may- or must-aliased load, then we can't optimize 1141 // away the store and we bail out. However, if we depend on something 1142 // that overwrites the memory location we *can* potentially optimize it. 1143 // 1144 // Find out what memory location the dependent instruction stores. 1145 Instruction *DepWrite = InstDep.getInst(); 1146 if (!hasAnalyzableMemoryWrite(DepWrite, *TLI)) 1147 break; 1148 MemoryLocation DepLoc = getLocForWrite(DepWrite); 1149 // If we didn't get a useful location, or if it isn't a size, bail out. 1150 if (!DepLoc.Ptr) 1151 break; 1152 1153 // Make sure we don't look past a call which might throw. This is an 1154 // issue because MemoryDependenceAnalysis works in the wrong direction: 1155 // it finds instructions which dominate the current instruction, rather than 1156 // instructions which are post-dominated by the current instruction. 1157 // 1158 // If the underlying object is a non-escaping memory allocation, any store 1159 // to it is dead along the unwind edge. Otherwise, we need to preserve 1160 // the store. 1161 size_t DepIndex = InstrOrdering.lookup(DepWrite); 1162 assert(DepIndex && "Unexpected instruction"); 1163 if (DepIndex <= LastThrowingInstIndex) { 1164 const Value* Underlying = GetUnderlyingObject(DepLoc.Ptr, DL); 1165 bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying); 1166 if (!IsStoreDeadOnUnwind) { 1167 // We're looking for a call to an allocation function 1168 // where the allocation doesn't escape before the last 1169 // throwing instruction; PointerMayBeCaptured 1170 // reasonably fast approximation. 1171 IsStoreDeadOnUnwind = isAllocLikeFn(Underlying, TLI) && 1172 !PointerMayBeCaptured(Underlying, false, true); 1173 } 1174 if (!IsStoreDeadOnUnwind) 1175 break; 1176 } 1177 1178 // If we find a write that is a) removable (i.e., non-volatile), b) is 1179 // completely obliterated by the store to 'Loc', and c) which we know that 1180 // 'Inst' doesn't load from, then we can remove it. 1181 // Also try to merge two stores if a later one only touches memory written 1182 // to by the earlier one. 1183 if (isRemovable(DepWrite) && 1184 !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) { 1185 int64_t InstWriteOffset, DepWriteOffset; 1186 OverwriteResult OR = isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset, 1187 InstWriteOffset, DepWrite, IOL, *AA, 1188 BB.getParent()); 1189 if (OR == OW_Complete) { 1190 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DepWrite 1191 << "\n KILLER: " << *Inst << '\n'); 1192 1193 // Delete the store and now-dead instructions that feed it. 1194 deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL, &InstrOrdering); 1195 ++NumFastStores; 1196 MadeChange = true; 1197 1198 // We erased DepWrite; start over. 1199 InstDep = MD->getDependency(Inst); 1200 continue; 1201 } else if ((OR == OW_End && isShortenableAtTheEnd(DepWrite)) || 1202 ((OR == OW_Begin && 1203 isShortenableAtTheBeginning(DepWrite)))) { 1204 assert(!EnablePartialOverwriteTracking && "Do not expect to perform " 1205 "when partial-overwrite " 1206 "tracking is enabled"); 1207 // The overwrite result is known, so these must be known, too. 1208 int64_t EarlierSize = DepLoc.Size.getValue(); 1209 int64_t LaterSize = Loc.Size.getValue(); 1210 bool IsOverwriteEnd = (OR == OW_End); 1211 MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize, 1212 InstWriteOffset, LaterSize, IsOverwriteEnd); 1213 } else if (EnablePartialStoreMerging && 1214 OR == OW_PartialEarlierWithFullLater) { 1215 auto *Earlier = dyn_cast<StoreInst>(DepWrite); 1216 auto *Later = dyn_cast<StoreInst>(Inst); 1217 if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) && 1218 Later && isa<ConstantInt>(Later->getValueOperand()) && 1219 memoryIsNotModifiedBetween(Earlier, Later, AA)) { 1220 // If the store we find is: 1221 // a) partially overwritten by the store to 'Loc' 1222 // b) the later store is fully contained in the earlier one and 1223 // c) they both have a constant value 1224 // Merge the two stores, replacing the earlier store's value with a 1225 // merge of both values. 1226 // TODO: Deal with other constant types (vectors, etc), and probably 1227 // some mem intrinsics (if needed) 1228 1229 APInt EarlierValue = 1230 cast<ConstantInt>(Earlier->getValueOperand())->getValue(); 1231 APInt LaterValue = 1232 cast<ConstantInt>(Later->getValueOperand())->getValue(); 1233 unsigned LaterBits = LaterValue.getBitWidth(); 1234 assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth()); 1235 LaterValue = LaterValue.zext(EarlierValue.getBitWidth()); 1236 1237 // Offset of the smaller store inside the larger store 1238 unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8; 1239 unsigned LShiftAmount = 1240 DL.isBigEndian() 1241 ? EarlierValue.getBitWidth() - BitOffsetDiff - LaterBits 1242 : BitOffsetDiff; 1243 APInt Mask = 1244 APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount, 1245 LShiftAmount + LaterBits); 1246 // Clear the bits we'll be replacing, then OR with the smaller 1247 // store, shifted appropriately. 1248 APInt Merged = 1249 (EarlierValue & ~Mask) | (LaterValue << LShiftAmount); 1250 LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Earlier: " << *DepWrite 1251 << "\n Later: " << *Inst 1252 << "\n Merged Value: " << Merged << '\n'); 1253 1254 auto *SI = new StoreInst( 1255 ConstantInt::get(Earlier->getValueOperand()->getType(), Merged), 1256 Earlier->getPointerOperand(), false, Earlier->getAlignment(), 1257 Earlier->getOrdering(), Earlier->getSyncScopeID(), DepWrite); 1258 1259 unsigned MDToKeep[] = {LLVMContext::MD_dbg, LLVMContext::MD_tbaa, 1260 LLVMContext::MD_alias_scope, 1261 LLVMContext::MD_noalias, 1262 LLVMContext::MD_nontemporal}; 1263 SI->copyMetadata(*DepWrite, MDToKeep); 1264 ++NumModifiedStores; 1265 1266 // Remove earlier, wider, store 1267 size_t Idx = InstrOrdering.lookup(DepWrite); 1268 InstrOrdering.erase(DepWrite); 1269 InstrOrdering.insert(std::make_pair(SI, Idx)); 1270 1271 // Delete the old stores and now-dead instructions that feed them. 1272 deleteDeadInstruction(Inst, &BBI, *MD, *TLI, IOL, &InstrOrdering); 1273 deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL, 1274 &InstrOrdering); 1275 MadeChange = true; 1276 1277 // We erased DepWrite and Inst (Loc); start over. 1278 break; 1279 } 1280 } 1281 } 1282 1283 // If this is a may-aliased store that is clobbering the store value, we 1284 // can keep searching past it for another must-aliased pointer that stores 1285 // to the same location. For example, in: 1286 // store -> P 1287 // store -> Q 1288 // store -> P 1289 // we can remove the first store to P even though we don't know if P and Q 1290 // alias. 1291 if (DepWrite == &BB.front()) break; 1292 1293 // Can't look past this instruction if it might read 'Loc'. 1294 if (isRefSet(AA->getModRefInfo(DepWrite, Loc))) 1295 break; 1296 1297 InstDep = MD->getPointerDependencyFrom(Loc, /*isLoad=*/ false, 1298 DepWrite->getIterator(), &BB, 1299 /*QueryInst=*/ nullptr, &Limit); 1300 } 1301 } 1302 1303 if (EnablePartialOverwriteTracking) 1304 MadeChange |= removePartiallyOverlappedStores(AA, DL, IOL); 1305 1306 // If this block ends in a return, unwind, or unreachable, all allocas are 1307 // dead at its end, which means stores to them are also dead. 1308 if (BB.getTerminator()->getNumSuccessors() == 0) 1309 MadeChange |= handleEndBlock(BB, AA, MD, TLI, IOL, &InstrOrdering); 1310 1311 return MadeChange; 1312 } 1313 1314 static bool eliminateDeadStores(Function &F, AliasAnalysis *AA, 1315 MemoryDependenceResults *MD, DominatorTree *DT, 1316 const TargetLibraryInfo *TLI) { 1317 bool MadeChange = false; 1318 for (BasicBlock &BB : F) 1319 // Only check non-dead blocks. Dead blocks may have strange pointer 1320 // cycles that will confuse alias analysis. 1321 if (DT->isReachableFromEntry(&BB)) 1322 MadeChange |= eliminateDeadStores(BB, AA, MD, DT, TLI); 1323 1324 return MadeChange; 1325 } 1326 1327 //===----------------------------------------------------------------------===// 1328 // DSE Pass 1329 //===----------------------------------------------------------------------===// 1330 PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) { 1331 AliasAnalysis *AA = &AM.getResult<AAManager>(F); 1332 DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F); 1333 MemoryDependenceResults *MD = &AM.getResult<MemoryDependenceAnalysis>(F); 1334 const TargetLibraryInfo *TLI = &AM.getResult<TargetLibraryAnalysis>(F); 1335 1336 if (!eliminateDeadStores(F, AA, MD, DT, TLI)) 1337 return PreservedAnalyses::all(); 1338 1339 PreservedAnalyses PA; 1340 PA.preserveSet<CFGAnalyses>(); 1341 PA.preserve<GlobalsAA>(); 1342 PA.preserve<MemoryDependenceAnalysis>(); 1343 return PA; 1344 } 1345 1346 namespace { 1347 1348 /// A legacy pass for the legacy pass manager that wraps \c DSEPass. 1349 class DSELegacyPass : public FunctionPass { 1350 public: 1351 static char ID; // Pass identification, replacement for typeid 1352 1353 DSELegacyPass() : FunctionPass(ID) { 1354 initializeDSELegacyPassPass(*PassRegistry::getPassRegistry()); 1355 } 1356 1357 bool runOnFunction(Function &F) override { 1358 if (skipFunction(F)) 1359 return false; 1360 1361 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1362 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1363 MemoryDependenceResults *MD = 1364 &getAnalysis<MemoryDependenceWrapperPass>().getMemDep(); 1365 const TargetLibraryInfo *TLI = 1366 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 1367 1368 return eliminateDeadStores(F, AA, MD, DT, TLI); 1369 } 1370 1371 void getAnalysisUsage(AnalysisUsage &AU) const override { 1372 AU.setPreservesCFG(); 1373 AU.addRequired<DominatorTreeWrapperPass>(); 1374 AU.addRequired<AAResultsWrapperPass>(); 1375 AU.addRequired<MemoryDependenceWrapperPass>(); 1376 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1377 AU.addPreserved<DominatorTreeWrapperPass>(); 1378 AU.addPreserved<GlobalsAAWrapperPass>(); 1379 AU.addPreserved<MemoryDependenceWrapperPass>(); 1380 } 1381 }; 1382 1383 } // end anonymous namespace 1384 1385 char DSELegacyPass::ID = 0; 1386 1387 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false, 1388 false) 1389 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1390 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 1391 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 1392 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 1393 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1394 INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false, 1395 false) 1396 1397 FunctionPass *llvm::createDeadStoreEliminationPass() { 1398 return new DSELegacyPass(); 1399 } 1400