1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements a trivial dead store elimination that only considers
11 // basic-block local redundant stores.
12 //
13 // FIXME: This should eventually be extended to be a post-dominator tree
14 // traversal.  Doing so would be pretty trivial.
15 //
16 //===----------------------------------------------------------------------===//
17 
18 #include "llvm/Transforms/Scalar/DeadStoreElimination.h"
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/GlobalsModRef.h"
29 #include "llvm/Analysis/MemoryBuiltins.h"
30 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
31 #include "llvm/Analysis/MemoryLocation.h"
32 #include "llvm/Analysis/TargetLibraryInfo.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/Constant.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/Dominators.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/LLVMContext.h"
48 #include "llvm/IR/Module.h"
49 #include "llvm/IR/PassManager.h"
50 #include "llvm/IR/Value.h"
51 #include "llvm/Pass.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/CommandLine.h"
54 #include "llvm/Support/Debug.h"
55 #include "llvm/Support/ErrorHandling.h"
56 #include "llvm/Support/MathExtras.h"
57 #include "llvm/Support/raw_ostream.h"
58 #include "llvm/Transforms/Scalar.h"
59 #include "llvm/Transforms/Utils/Local.h"
60 #include <algorithm>
61 #include <cassert>
62 #include <cstdint>
63 #include <cstddef>
64 #include <iterator>
65 #include <map>
66 #include <utility>
67 
68 using namespace llvm;
69 
70 #define DEBUG_TYPE "dse"
71 
72 STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
73 STATISTIC(NumFastStores, "Number of stores deleted");
74 STATISTIC(NumFastOther , "Number of other instrs removed");
75 STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
76 STATISTIC(NumModifiedStores, "Number of stores modified");
77 
78 static cl::opt<bool>
79 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
80   cl::init(true), cl::Hidden,
81   cl::desc("Enable partial-overwrite tracking in DSE"));
82 
83 static cl::opt<bool>
84 EnablePartialStoreMerging("enable-dse-partial-store-merging",
85   cl::init(true), cl::Hidden,
86   cl::desc("Enable partial store merging in DSE"));
87 
88 //===----------------------------------------------------------------------===//
89 // Helper functions
90 //===----------------------------------------------------------------------===//
91 using OverlapIntervalsTy = std::map<int64_t, int64_t>;
92 using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
93 
94 /// Delete this instruction.  Before we do, go through and zero out all the
95 /// operands of this instruction.  If any of them become dead, delete them and
96 /// the computation tree that feeds them.
97 /// If ValueSet is non-null, remove any deleted instructions from it as well.
98 static void
99 deleteDeadInstruction(Instruction *I, BasicBlock::iterator *BBI,
100                       MemoryDependenceResults &MD, const TargetLibraryInfo &TLI,
101                       InstOverlapIntervalsTy &IOL,
102                       DenseMap<Instruction*, size_t> *InstrOrdering,
103                       SmallSetVector<Value *, 16> *ValueSet = nullptr) {
104   SmallVector<Instruction*, 32> NowDeadInsts;
105 
106   NowDeadInsts.push_back(I);
107   --NumFastOther;
108 
109   // Keeping the iterator straight is a pain, so we let this routine tell the
110   // caller what the next instruction is after we're done mucking about.
111   BasicBlock::iterator NewIter = *BBI;
112 
113   // Before we touch this instruction, remove it from memdep!
114   do {
115     Instruction *DeadInst = NowDeadInsts.pop_back_val();
116     ++NumFastOther;
117 
118     // This instruction is dead, zap it, in stages.  Start by removing it from
119     // MemDep, which needs to know the operands and needs it to be in the
120     // function.
121     MD.removeInstruction(DeadInst);
122 
123     for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
124       Value *Op = DeadInst->getOperand(op);
125       DeadInst->setOperand(op, nullptr);
126 
127       // If this operand just became dead, add it to the NowDeadInsts list.
128       if (!Op->use_empty()) continue;
129 
130       if (Instruction *OpI = dyn_cast<Instruction>(Op))
131         if (isInstructionTriviallyDead(OpI, &TLI))
132           NowDeadInsts.push_back(OpI);
133     }
134 
135     if (ValueSet) ValueSet->remove(DeadInst);
136     InstrOrdering->erase(DeadInst);
137     IOL.erase(DeadInst);
138 
139     if (NewIter == DeadInst->getIterator())
140       NewIter = DeadInst->eraseFromParent();
141     else
142       DeadInst->eraseFromParent();
143   } while (!NowDeadInsts.empty());
144   *BBI = NewIter;
145 }
146 
147 /// Does this instruction write some memory?  This only returns true for things
148 /// that we can analyze with other helpers below.
149 static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo &TLI) {
150   if (isa<StoreInst>(I))
151     return true;
152   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
153     switch (II->getIntrinsicID()) {
154     default:
155       return false;
156     case Intrinsic::memset:
157     case Intrinsic::memmove:
158     case Intrinsic::memcpy:
159     case Intrinsic::init_trampoline:
160     case Intrinsic::lifetime_end:
161       return true;
162     }
163   }
164   if (auto CS = CallSite(I)) {
165     if (Function *F = CS.getCalledFunction()) {
166       StringRef FnName = F->getName();
167       if (TLI.has(LibFunc_strcpy) && FnName == TLI.getName(LibFunc_strcpy))
168         return true;
169       if (TLI.has(LibFunc_strncpy) && FnName == TLI.getName(LibFunc_strncpy))
170         return true;
171       if (TLI.has(LibFunc_strcat) && FnName == TLI.getName(LibFunc_strcat))
172         return true;
173       if (TLI.has(LibFunc_strncat) && FnName == TLI.getName(LibFunc_strncat))
174         return true;
175     }
176   }
177   return false;
178 }
179 
180 /// Return a Location stored to by the specified instruction. If isRemovable
181 /// returns true, this function and getLocForRead completely describe the memory
182 /// operations for this instruction.
183 static MemoryLocation getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
184   if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
185     return MemoryLocation::get(SI);
186 
187   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
188     // memcpy/memmove/memset.
189     MemoryLocation Loc = MemoryLocation::getForDest(MI);
190     return Loc;
191   }
192 
193   IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
194   if (!II)
195     return MemoryLocation();
196 
197   switch (II->getIntrinsicID()) {
198   default:
199     return MemoryLocation(); // Unhandled intrinsic.
200   case Intrinsic::init_trampoline:
201     // FIXME: We don't know the size of the trampoline, so we can't really
202     // handle it here.
203     return MemoryLocation(II->getArgOperand(0));
204   case Intrinsic::lifetime_end: {
205     uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
206     return MemoryLocation(II->getArgOperand(1), Len);
207   }
208   }
209 }
210 
211 /// Return the location read by the specified "hasMemoryWrite" instruction if
212 /// any.
213 static MemoryLocation getLocForRead(Instruction *Inst,
214                                     const TargetLibraryInfo &TLI) {
215   assert(hasMemoryWrite(Inst, TLI) && "Unknown instruction case");
216 
217   // The only instructions that both read and write are the mem transfer
218   // instructions (memcpy/memmove).
219   if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
220     return MemoryLocation::getForSource(MTI);
221   return MemoryLocation();
222 }
223 
224 /// If the value of this instruction and the memory it writes to is unused, may
225 /// we delete this instruction?
226 static bool isRemovable(Instruction *I) {
227   // Don't remove volatile/atomic stores.
228   if (StoreInst *SI = dyn_cast<StoreInst>(I))
229     return SI->isUnordered();
230 
231   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
232     switch (II->getIntrinsicID()) {
233     default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate");
234     case Intrinsic::lifetime_end:
235       // Never remove dead lifetime_end's, e.g. because it is followed by a
236       // free.
237       return false;
238     case Intrinsic::init_trampoline:
239       // Always safe to remove init_trampoline.
240       return true;
241     case Intrinsic::memset:
242     case Intrinsic::memmove:
243     case Intrinsic::memcpy:
244       // Don't remove volatile memory intrinsics.
245       return !cast<MemIntrinsic>(II)->isVolatile();
246     }
247   }
248 
249   if (auto CS = CallSite(I))
250     return CS.getInstruction()->use_empty();
251 
252   return false;
253 }
254 
255 /// Returns true if the end of this instruction can be safely shortened in
256 /// length.
257 static bool isShortenableAtTheEnd(Instruction *I) {
258   // Don't shorten stores for now
259   if (isa<StoreInst>(I))
260     return false;
261 
262   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
263     switch (II->getIntrinsicID()) {
264       default: return false;
265       case Intrinsic::memset:
266       case Intrinsic::memcpy:
267         // Do shorten memory intrinsics.
268         // FIXME: Add memmove if it's also safe to transform.
269         return true;
270     }
271   }
272 
273   // Don't shorten libcalls calls for now.
274 
275   return false;
276 }
277 
278 /// Returns true if the beginning of this instruction can be safely shortened
279 /// in length.
280 static bool isShortenableAtTheBeginning(Instruction *I) {
281   // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
282   // easily done by offsetting the source address.
283   IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
284   return II && II->getIntrinsicID() == Intrinsic::memset;
285 }
286 
287 /// Return the pointer that is being written to.
288 static Value *getStoredPointerOperand(Instruction *I) {
289   if (StoreInst *SI = dyn_cast<StoreInst>(I))
290     return SI->getPointerOperand();
291   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
292     return MI->getDest();
293 
294   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
295     switch (II->getIntrinsicID()) {
296     default: llvm_unreachable("Unexpected intrinsic!");
297     case Intrinsic::init_trampoline:
298       return II->getArgOperand(0);
299     }
300   }
301 
302   CallSite CS(I);
303   // All the supported functions so far happen to have dest as their first
304   // argument.
305   return CS.getArgument(0);
306 }
307 
308 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
309                                const TargetLibraryInfo &TLI) {
310   uint64_t Size;
311   if (getObjectSize(V, Size, DL, &TLI))
312     return Size;
313   return MemoryLocation::UnknownSize;
314 }
315 
316 namespace {
317 
318 enum OverwriteResult {
319   OW_Begin,
320   OW_Complete,
321   OW_End,
322   OW_PartialEarlierWithFullLater,
323   OW_Unknown
324 };
325 
326 } // end anonymous namespace
327 
328 /// Return 'OW_Complete' if a store to the 'Later' location completely
329 /// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
330 /// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
331 /// beginning of the 'Earlier' location is overwritten by 'Later'.
332 /// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
333 /// overwritten by a latter (smaller) store which doesn't write outside the big
334 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
335 static OverwriteResult isOverwrite(const MemoryLocation &Later,
336                                    const MemoryLocation &Earlier,
337                                    const DataLayout &DL,
338                                    const TargetLibraryInfo &TLI,
339                                    int64_t &EarlierOff, int64_t &LaterOff,
340                                    Instruction *DepWrite,
341                                    InstOverlapIntervalsTy &IOL) {
342   // If we don't know the sizes of either access, then we can't do a comparison.
343   if (Later.Size == MemoryLocation::UnknownSize ||
344       Earlier.Size == MemoryLocation::UnknownSize)
345     return OW_Unknown;
346 
347   const Value *P1 = Earlier.Ptr->stripPointerCasts();
348   const Value *P2 = Later.Ptr->stripPointerCasts();
349 
350   // If the start pointers are the same, we just have to compare sizes to see if
351   // the later store was larger than the earlier store.
352   if (P1 == P2) {
353     // Make sure that the Later size is >= the Earlier size.
354     if (Later.Size >= Earlier.Size)
355       return OW_Complete;
356   }
357 
358   // Check to see if the later store is to the entire object (either a global,
359   // an alloca, or a byval/inalloca argument).  If so, then it clearly
360   // overwrites any other store to the same object.
361   const Value *UO1 = GetUnderlyingObject(P1, DL),
362               *UO2 = GetUnderlyingObject(P2, DL);
363 
364   // If we can't resolve the same pointers to the same object, then we can't
365   // analyze them at all.
366   if (UO1 != UO2)
367     return OW_Unknown;
368 
369   // If the "Later" store is to a recognizable object, get its size.
370   uint64_t ObjectSize = getPointerSize(UO2, DL, TLI);
371   if (ObjectSize != MemoryLocation::UnknownSize)
372     if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
373       return OW_Complete;
374 
375   // Okay, we have stores to two completely different pointers.  Try to
376   // decompose the pointer into a "base + constant_offset" form.  If the base
377   // pointers are equal, then we can reason about the two stores.
378   EarlierOff = 0;
379   LaterOff = 0;
380   const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
381   const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
382 
383   // If the base pointers still differ, we have two completely different stores.
384   if (BP1 != BP2)
385     return OW_Unknown;
386 
387   // The later store completely overlaps the earlier store if:
388   //
389   // 1. Both start at the same offset and the later one's size is greater than
390   //    or equal to the earlier one's, or
391   //
392   //      |--earlier--|
393   //      |--   later   --|
394   //
395   // 2. The earlier store has an offset greater than the later offset, but which
396   //    still lies completely within the later store.
397   //
398   //        |--earlier--|
399   //    |-----  later  ------|
400   //
401   // We have to be careful here as *Off is signed while *.Size is unsigned.
402   if (EarlierOff >= LaterOff &&
403       Later.Size >= Earlier.Size &&
404       uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
405     return OW_Complete;
406 
407   // We may now overlap, although the overlap is not complete. There might also
408   // be other incomplete overlaps, and together, they might cover the complete
409   // earlier write.
410   // Note: The correctness of this logic depends on the fact that this function
411   // is not even called providing DepWrite when there are any intervening reads.
412   if (EnablePartialOverwriteTracking &&
413       LaterOff < int64_t(EarlierOff + Earlier.Size) &&
414       int64_t(LaterOff + Later.Size) >= EarlierOff) {
415 
416     // Insert our part of the overlap into the map.
417     auto &IM = IOL[DepWrite];
418     DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff << ", " <<
419                     int64_t(EarlierOff + Earlier.Size) << ") Later [" <<
420                     LaterOff << ", " << int64_t(LaterOff + Later.Size) << ")\n");
421 
422     // Make sure that we only insert non-overlapping intervals and combine
423     // adjacent intervals. The intervals are stored in the map with the ending
424     // offset as the key (in the half-open sense) and the starting offset as
425     // the value.
426     int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + Later.Size;
427 
428     // Find any intervals ending at, or after, LaterIntStart which start
429     // before LaterIntEnd.
430     auto ILI = IM.lower_bound(LaterIntStart);
431     if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
432       // This existing interval is overlapped with the current store somewhere
433       // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
434       // intervals and adjusting our start and end.
435       LaterIntStart = std::min(LaterIntStart, ILI->second);
436       LaterIntEnd = std::max(LaterIntEnd, ILI->first);
437       ILI = IM.erase(ILI);
438 
439       // Continue erasing and adjusting our end in case other previous
440       // intervals are also overlapped with the current store.
441       //
442       // |--- ealier 1 ---|  |--- ealier 2 ---|
443       //     |------- later---------|
444       //
445       while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
446         assert(ILI->second > LaterIntStart && "Unexpected interval");
447         LaterIntEnd = std::max(LaterIntEnd, ILI->first);
448         ILI = IM.erase(ILI);
449       }
450     }
451 
452     IM[LaterIntEnd] = LaterIntStart;
453 
454     ILI = IM.begin();
455     if (ILI->second <= EarlierOff &&
456         ILI->first >= int64_t(EarlierOff + Earlier.Size)) {
457       DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier [" <<
458                       EarlierOff << ", " <<
459                       int64_t(EarlierOff + Earlier.Size) <<
460                       ") Composite Later [" <<
461                       ILI->second << ", " << ILI->first << ")\n");
462       ++NumCompletePartials;
463       return OW_Complete;
464     }
465   }
466 
467   // Check for an earlier store which writes to all the memory locations that
468   // the later store writes to.
469   if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
470       int64_t(EarlierOff + Earlier.Size) > LaterOff &&
471       uint64_t(LaterOff - EarlierOff) + Later.Size <= Earlier.Size) {
472     DEBUG(dbgs() << "DSE: Partial overwrite an earlier load [" << EarlierOff
473                  << ", " << int64_t(EarlierOff + Earlier.Size)
474                  << ") by a later store [" << LaterOff << ", "
475                  << int64_t(LaterOff + Later.Size) << ")\n");
476     // TODO: Maybe come up with a better name?
477     return OW_PartialEarlierWithFullLater;
478   }
479 
480   // Another interesting case is if the later store overwrites the end of the
481   // earlier store.
482   //
483   //      |--earlier--|
484   //                |--   later   --|
485   //
486   // In this case we may want to trim the size of earlier to avoid generating
487   // writes to addresses which will definitely be overwritten later
488   if (!EnablePartialOverwriteTracking &&
489       (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + Earlier.Size) &&
490        int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size)))
491     return OW_End;
492 
493   // Finally, we also need to check if the later store overwrites the beginning
494   // of the earlier store.
495   //
496   //                |--earlier--|
497   //      |--   later   --|
498   //
499   // In this case we may want to move the destination address and trim the size
500   // of earlier to avoid generating writes to addresses which will definitely
501   // be overwritten later.
502   if (!EnablePartialOverwriteTracking &&
503       (LaterOff <= EarlierOff && int64_t(LaterOff + Later.Size) > EarlierOff)) {
504     assert(int64_t(LaterOff + Later.Size) <
505                int64_t(EarlierOff + Earlier.Size) &&
506            "Expect to be handled as OW_Complete");
507     return OW_Begin;
508   }
509   // Otherwise, they don't completely overlap.
510   return OW_Unknown;
511 }
512 
513 /// If 'Inst' might be a self read (i.e. a noop copy of a
514 /// memory region into an identical pointer) then it doesn't actually make its
515 /// input dead in the traditional sense.  Consider this case:
516 ///
517 ///   memcpy(A <- B)
518 ///   memcpy(A <- A)
519 ///
520 /// In this case, the second store to A does not make the first store to A dead.
521 /// The usual situation isn't an explicit A<-A store like this (which can be
522 /// trivially removed) but a case where two pointers may alias.
523 ///
524 /// This function detects when it is unsafe to remove a dependent instruction
525 /// because the DSE inducing instruction may be a self-read.
526 static bool isPossibleSelfRead(Instruction *Inst,
527                                const MemoryLocation &InstStoreLoc,
528                                Instruction *DepWrite,
529                                const TargetLibraryInfo &TLI,
530                                AliasAnalysis &AA) {
531   // Self reads can only happen for instructions that read memory.  Get the
532   // location read.
533   MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
534   if (!InstReadLoc.Ptr) return false;  // Not a reading instruction.
535 
536   // If the read and written loc obviously don't alias, it isn't a read.
537   if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
538 
539   // Okay, 'Inst' may copy over itself.  However, we can still remove a the
540   // DepWrite instruction if we can prove that it reads from the same location
541   // as Inst.  This handles useful cases like:
542   //   memcpy(A <- B)
543   //   memcpy(A <- B)
544   // Here we don't know if A/B may alias, but we do know that B/B are must
545   // aliases, so removing the first memcpy is safe (assuming it writes <= #
546   // bytes as the second one.
547   MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
548 
549   if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
550     return false;
551 
552   // If DepWrite doesn't read memory or if we can't prove it is a must alias,
553   // then it can't be considered dead.
554   return true;
555 }
556 
557 /// Returns true if the memory which is accessed by the second instruction is not
558 /// modified between the first and the second instruction.
559 /// Precondition: Second instruction must be dominated by the first
560 /// instruction.
561 static bool memoryIsNotModifiedBetween(Instruction *FirstI,
562                                        Instruction *SecondI,
563                                        AliasAnalysis *AA) {
564   SmallVector<BasicBlock *, 16> WorkList;
565   SmallPtrSet<BasicBlock *, 8> Visited;
566   BasicBlock::iterator FirstBBI(FirstI);
567   ++FirstBBI;
568   BasicBlock::iterator SecondBBI(SecondI);
569   BasicBlock *FirstBB = FirstI->getParent();
570   BasicBlock *SecondBB = SecondI->getParent();
571   MemoryLocation MemLoc = MemoryLocation::get(SecondI);
572 
573   // Start checking the store-block.
574   WorkList.push_back(SecondBB);
575   bool isFirstBlock = true;
576 
577   // Check all blocks going backward until we reach the load-block.
578   while (!WorkList.empty()) {
579     BasicBlock *B = WorkList.pop_back_val();
580 
581     // Ignore instructions before LI if this is the FirstBB.
582     BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
583 
584     BasicBlock::iterator EI;
585     if (isFirstBlock) {
586       // Ignore instructions after SI if this is the first visit of SecondBB.
587       assert(B == SecondBB && "first block is not the store block");
588       EI = SecondBBI;
589       isFirstBlock = false;
590     } else {
591       // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
592       // In this case we also have to look at instructions after SI.
593       EI = B->end();
594     }
595     for (; BI != EI; ++BI) {
596       Instruction *I = &*BI;
597       if (I->mayWriteToMemory() && I != SecondI)
598         if (isModSet(AA->getModRefInfo(I, MemLoc)))
599           return false;
600     }
601     if (B != FirstBB) {
602       assert(B != &FirstBB->getParent()->getEntryBlock() &&
603           "Should not hit the entry block because SI must be dominated by LI");
604       for (auto PredI = pred_begin(B), PE = pred_end(B); PredI != PE; ++PredI) {
605         if (!Visited.insert(*PredI).second)
606           continue;
607         WorkList.push_back(*PredI);
608       }
609     }
610   }
611   return true;
612 }
613 
614 /// Find all blocks that will unconditionally lead to the block BB and append
615 /// them to F.
616 static void findUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
617                                    BasicBlock *BB, DominatorTree *DT) {
618   for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
619     BasicBlock *Pred = *I;
620     if (Pred == BB) continue;
621     TerminatorInst *PredTI = Pred->getTerminator();
622     if (PredTI->getNumSuccessors() != 1)
623       continue;
624 
625     if (DT->isReachableFromEntry(Pred))
626       Blocks.push_back(Pred);
627   }
628 }
629 
630 /// Handle frees of entire structures whose dependency is a store
631 /// to a field of that structure.
632 static bool handleFree(CallInst *F, AliasAnalysis *AA,
633                        MemoryDependenceResults *MD, DominatorTree *DT,
634                        const TargetLibraryInfo *TLI,
635                        InstOverlapIntervalsTy &IOL,
636                        DenseMap<Instruction*, size_t> *InstrOrdering) {
637   bool MadeChange = false;
638 
639   MemoryLocation Loc = MemoryLocation(F->getOperand(0));
640   SmallVector<BasicBlock *, 16> Blocks;
641   Blocks.push_back(F->getParent());
642   const DataLayout &DL = F->getModule()->getDataLayout();
643 
644   while (!Blocks.empty()) {
645     BasicBlock *BB = Blocks.pop_back_val();
646     Instruction *InstPt = BB->getTerminator();
647     if (BB == F->getParent()) InstPt = F;
648 
649     MemDepResult Dep =
650         MD->getPointerDependencyFrom(Loc, false, InstPt->getIterator(), BB);
651     while (Dep.isDef() || Dep.isClobber()) {
652       Instruction *Dependency = Dep.getInst();
653       if (!hasMemoryWrite(Dependency, *TLI) || !isRemovable(Dependency))
654         break;
655 
656       Value *DepPointer =
657           GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
658 
659       // Check for aliasing.
660       if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
661         break;
662 
663       DEBUG(dbgs() << "DSE: Dead Store to soon to be freed memory:\n  DEAD: "
664                    << *Dependency << '\n');
665 
666       // DCE instructions only used to calculate that store.
667       BasicBlock::iterator BBI(Dependency);
668       deleteDeadInstruction(Dependency, &BBI, *MD, *TLI, IOL, InstrOrdering);
669       ++NumFastStores;
670       MadeChange = true;
671 
672       // Inst's old Dependency is now deleted. Compute the next dependency,
673       // which may also be dead, as in
674       //    s[0] = 0;
675       //    s[1] = 0; // This has just been deleted.
676       //    free(s);
677       Dep = MD->getPointerDependencyFrom(Loc, false, BBI, BB);
678     }
679 
680     if (Dep.isNonLocal())
681       findUnconditionalPreds(Blocks, BB, DT);
682   }
683 
684   return MadeChange;
685 }
686 
687 /// Check to see if the specified location may alias any of the stack objects in
688 /// the DeadStackObjects set. If so, they become live because the location is
689 /// being loaded.
690 static void removeAccessedObjects(const MemoryLocation &LoadedLoc,
691                                   SmallSetVector<Value *, 16> &DeadStackObjects,
692                                   const DataLayout &DL, AliasAnalysis *AA,
693                                   const TargetLibraryInfo *TLI) {
694   const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
695 
696   // A constant can't be in the dead pointer set.
697   if (isa<Constant>(UnderlyingPointer))
698     return;
699 
700   // If the kill pointer can be easily reduced to an alloca, don't bother doing
701   // extraneous AA queries.
702   if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
703     DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
704     return;
705   }
706 
707   // Remove objects that could alias LoadedLoc.
708   DeadStackObjects.remove_if([&](Value *I) {
709     // See if the loaded location could alias the stack location.
710     MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI));
711     return !AA->isNoAlias(StackLoc, LoadedLoc);
712   });
713 }
714 
715 /// Remove dead stores to stack-allocated locations in the function end block.
716 /// Ex:
717 /// %A = alloca i32
718 /// ...
719 /// store i32 1, i32* %A
720 /// ret void
721 static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
722                              MemoryDependenceResults *MD,
723                              const TargetLibraryInfo *TLI,
724                              InstOverlapIntervalsTy &IOL,
725                              DenseMap<Instruction*, size_t> *InstrOrdering) {
726   bool MadeChange = false;
727 
728   // Keep track of all of the stack objects that are dead at the end of the
729   // function.
730   SmallSetVector<Value*, 16> DeadStackObjects;
731 
732   // Find all of the alloca'd pointers in the entry block.
733   BasicBlock &Entry = BB.getParent()->front();
734   for (Instruction &I : Entry) {
735     if (isa<AllocaInst>(&I))
736       DeadStackObjects.insert(&I);
737 
738     // Okay, so these are dead heap objects, but if the pointer never escapes
739     // then it's leaked by this function anyways.
740     else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true))
741       DeadStackObjects.insert(&I);
742   }
743 
744   // Treat byval or inalloca arguments the same, stores to them are dead at the
745   // end of the function.
746   for (Argument &AI : BB.getParent()->args())
747     if (AI.hasByValOrInAllocaAttr())
748       DeadStackObjects.insert(&AI);
749 
750   const DataLayout &DL = BB.getModule()->getDataLayout();
751 
752   // Scan the basic block backwards
753   for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
754     --BBI;
755 
756     // If we find a store, check to see if it points into a dead stack value.
757     if (hasMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
758       // See through pointer-to-pointer bitcasts
759       SmallVector<Value *, 4> Pointers;
760       GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
761 
762       // Stores to stack values are valid candidates for removal.
763       bool AllDead = true;
764       for (Value *Pointer : Pointers)
765         if (!DeadStackObjects.count(Pointer)) {
766           AllDead = false;
767           break;
768         }
769 
770       if (AllDead) {
771         Instruction *Dead = &*BBI;
772 
773         DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n  DEAD: "
774                      << *Dead << "\n  Objects: ";
775               for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
776                    E = Pointers.end(); I != E; ++I) {
777                 dbgs() << **I;
778                 if (std::next(I) != E)
779                   dbgs() << ", ";
780               }
781               dbgs() << '\n');
782 
783         // DCE instructions only used to calculate that store.
784         deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
785         ++NumFastStores;
786         MadeChange = true;
787         continue;
788       }
789     }
790 
791     // Remove any dead non-memory-mutating instructions.
792     if (isInstructionTriviallyDead(&*BBI, TLI)) {
793       DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n  DEAD: "
794                    << *&*BBI << '\n');
795       deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
796       ++NumFastOther;
797       MadeChange = true;
798       continue;
799     }
800 
801     if (isa<AllocaInst>(BBI)) {
802       // Remove allocas from the list of dead stack objects; there can't be
803       // any references before the definition.
804       DeadStackObjects.remove(&*BBI);
805       continue;
806     }
807 
808     if (auto CS = CallSite(&*BBI)) {
809       // Remove allocation function calls from the list of dead stack objects;
810       // there can't be any references before the definition.
811       if (isAllocLikeFn(&*BBI, TLI))
812         DeadStackObjects.remove(&*BBI);
813 
814       // If this call does not access memory, it can't be loading any of our
815       // pointers.
816       if (AA->doesNotAccessMemory(CS))
817         continue;
818 
819       // If the call might load from any of our allocas, then any store above
820       // the call is live.
821       DeadStackObjects.remove_if([&](Value *I) {
822         // See if the call site touches the value.
823         return isRefSet(AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI)));
824       });
825 
826       // If all of the allocas were clobbered by the call then we're not going
827       // to find anything else to process.
828       if (DeadStackObjects.empty())
829         break;
830 
831       continue;
832     }
833 
834     // We can remove the dead stores, irrespective of the fence and its ordering
835     // (release/acquire/seq_cst). Fences only constraints the ordering of
836     // already visible stores, it does not make a store visible to other
837     // threads. So, skipping over a fence does not change a store from being
838     // dead.
839     if (isa<FenceInst>(*BBI))
840       continue;
841 
842     MemoryLocation LoadedLoc;
843 
844     // If we encounter a use of the pointer, it is no longer considered dead
845     if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
846       if (!L->isUnordered()) // Be conservative with atomic/volatile load
847         break;
848       LoadedLoc = MemoryLocation::get(L);
849     } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
850       LoadedLoc = MemoryLocation::get(V);
851     } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
852       LoadedLoc = MemoryLocation::getForSource(MTI);
853     } else if (!BBI->mayReadFromMemory()) {
854       // Instruction doesn't read memory.  Note that stores that weren't removed
855       // above will hit this case.
856       continue;
857     } else {
858       // Unknown inst; assume it clobbers everything.
859       break;
860     }
861 
862     // Remove any allocas from the DeadPointer set that are loaded, as this
863     // makes any stores above the access live.
864     removeAccessedObjects(LoadedLoc, DeadStackObjects, DL, AA, TLI);
865 
866     // If all of the allocas were clobbered by the access then we're not going
867     // to find anything else to process.
868     if (DeadStackObjects.empty())
869       break;
870   }
871 
872   return MadeChange;
873 }
874 
875 static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
876                          int64_t &EarlierSize, int64_t LaterOffset,
877                          int64_t LaterSize, bool IsOverwriteEnd) {
878   // TODO: base this on the target vector size so that if the earlier
879   // store was too small to get vector writes anyway then its likely
880   // a good idea to shorten it
881   // Power of 2 vector writes are probably always a bad idea to optimize
882   // as any store/memset/memcpy is likely using vector instructions so
883   // shortening it to not vector size is likely to be slower
884   MemIntrinsic *EarlierIntrinsic = cast<MemIntrinsic>(EarlierWrite);
885   unsigned EarlierWriteAlign = EarlierIntrinsic->getAlignment();
886   if (!IsOverwriteEnd)
887     LaterOffset = int64_t(LaterOffset + LaterSize);
888 
889   if (!(isPowerOf2_64(LaterOffset) && EarlierWriteAlign <= LaterOffset) &&
890       !((EarlierWriteAlign != 0) && LaterOffset % EarlierWriteAlign == 0))
891     return false;
892 
893   DEBUG(dbgs() << "DSE: Remove Dead Store:\n  OW "
894                << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite
895                << "\n  KILLER (offset " << LaterOffset << ", " << EarlierSize
896                << ")\n");
897 
898   int64_t NewLength = IsOverwriteEnd
899                           ? LaterOffset - EarlierOffset
900                           : EarlierSize - (LaterOffset - EarlierOffset);
901 
902   Value *EarlierWriteLength = EarlierIntrinsic->getLength();
903   Value *TrimmedLength =
904       ConstantInt::get(EarlierWriteLength->getType(), NewLength);
905   EarlierIntrinsic->setLength(TrimmedLength);
906 
907   EarlierSize = NewLength;
908   if (!IsOverwriteEnd) {
909     int64_t OffsetMoved = (LaterOffset - EarlierOffset);
910     Value *Indices[1] = {
911         ConstantInt::get(EarlierWriteLength->getType(), OffsetMoved)};
912     GetElementPtrInst *NewDestGEP = GetElementPtrInst::CreateInBounds(
913         EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite);
914     EarlierIntrinsic->setDest(NewDestGEP);
915     EarlierOffset = EarlierOffset + OffsetMoved;
916   }
917   return true;
918 }
919 
920 static bool tryToShortenEnd(Instruction *EarlierWrite,
921                             OverlapIntervalsTy &IntervalMap,
922                             int64_t &EarlierStart, int64_t &EarlierSize) {
923   if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
924     return false;
925 
926   OverlapIntervalsTy::iterator OII = --IntervalMap.end();
927   int64_t LaterStart = OII->second;
928   int64_t LaterSize = OII->first - LaterStart;
929 
930   if (LaterStart > EarlierStart && LaterStart < EarlierStart + EarlierSize &&
931       LaterStart + LaterSize >= EarlierStart + EarlierSize) {
932     if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
933                      LaterSize, true)) {
934       IntervalMap.erase(OII);
935       return true;
936     }
937   }
938   return false;
939 }
940 
941 static bool tryToShortenBegin(Instruction *EarlierWrite,
942                               OverlapIntervalsTy &IntervalMap,
943                               int64_t &EarlierStart, int64_t &EarlierSize) {
944   if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
945     return false;
946 
947   OverlapIntervalsTy::iterator OII = IntervalMap.begin();
948   int64_t LaterStart = OII->second;
949   int64_t LaterSize = OII->first - LaterStart;
950 
951   if (LaterStart <= EarlierStart && LaterStart + LaterSize > EarlierStart) {
952     assert(LaterStart + LaterSize < EarlierStart + EarlierSize &&
953            "Should have been handled as OW_Complete");
954     if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
955                      LaterSize, false)) {
956       IntervalMap.erase(OII);
957       return true;
958     }
959   }
960   return false;
961 }
962 
963 static bool removePartiallyOverlappedStores(AliasAnalysis *AA,
964                                             const DataLayout &DL,
965                                             InstOverlapIntervalsTy &IOL) {
966   bool Changed = false;
967   for (auto OI : IOL) {
968     Instruction *EarlierWrite = OI.first;
969     MemoryLocation Loc = getLocForWrite(EarlierWrite, *AA);
970     assert(isRemovable(EarlierWrite) && "Expect only removable instruction");
971     assert(Loc.Size != MemoryLocation::UnknownSize && "Unexpected mem loc");
972 
973     const Value *Ptr = Loc.Ptr->stripPointerCasts();
974     int64_t EarlierStart = 0;
975     int64_t EarlierSize = int64_t(Loc.Size);
976     GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
977     OverlapIntervalsTy &IntervalMap = OI.second;
978     Changed |=
979         tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
980     if (IntervalMap.empty())
981       continue;
982     Changed |=
983         tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
984   }
985   return Changed;
986 }
987 
988 static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI,
989                                AliasAnalysis *AA, MemoryDependenceResults *MD,
990                                const DataLayout &DL,
991                                const TargetLibraryInfo *TLI,
992                                InstOverlapIntervalsTy &IOL,
993                                DenseMap<Instruction*, size_t> *InstrOrdering) {
994   // Must be a store instruction.
995   StoreInst *SI = dyn_cast<StoreInst>(Inst);
996   if (!SI)
997     return false;
998 
999   // If we're storing the same value back to a pointer that we just loaded from,
1000   // then the store can be removed.
1001   if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) {
1002     if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
1003         isRemovable(SI) && memoryIsNotModifiedBetween(DepLoad, SI, AA)) {
1004 
1005       DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n  LOAD: "
1006                    << *DepLoad << "\n  STORE: " << *SI << '\n');
1007 
1008       deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering);
1009       ++NumRedundantStores;
1010       return true;
1011     }
1012   }
1013 
1014   // Remove null stores into the calloc'ed objects
1015   Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
1016   if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) {
1017     Instruction *UnderlyingPointer =
1018         dyn_cast<Instruction>(GetUnderlyingObject(SI->getPointerOperand(), DL));
1019 
1020     if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
1021         memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA)) {
1022       DEBUG(
1023           dbgs() << "DSE: Remove null store to the calloc'ed object:\n  DEAD: "
1024                  << *Inst << "\n  OBJECT: " << *UnderlyingPointer << '\n');
1025 
1026       deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering);
1027       ++NumRedundantStores;
1028       return true;
1029     }
1030   }
1031   return false;
1032 }
1033 
1034 static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
1035                                 MemoryDependenceResults *MD, DominatorTree *DT,
1036                                 const TargetLibraryInfo *TLI) {
1037   const DataLayout &DL = BB.getModule()->getDataLayout();
1038   bool MadeChange = false;
1039 
1040   // FIXME: Maybe change this to use some abstraction like OrderedBasicBlock?
1041   // The current OrderedBasicBlock can't deal with mutation at the moment.
1042   size_t LastThrowingInstIndex = 0;
1043   DenseMap<Instruction*, size_t> InstrOrdering;
1044   size_t InstrIndex = 1;
1045 
1046   // A map of interval maps representing partially-overwritten value parts.
1047   InstOverlapIntervalsTy IOL;
1048 
1049   // Do a top-down walk on the BB.
1050   for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
1051     // Handle 'free' calls specially.
1052     if (CallInst *F = isFreeCall(&*BBI, TLI)) {
1053       MadeChange |= handleFree(F, AA, MD, DT, TLI, IOL, &InstrOrdering);
1054       // Increment BBI after handleFree has potentially deleted instructions.
1055       // This ensures we maintain a valid iterator.
1056       ++BBI;
1057       continue;
1058     }
1059 
1060     Instruction *Inst = &*BBI++;
1061 
1062     size_t CurInstNumber = InstrIndex++;
1063     InstrOrdering.insert(std::make_pair(Inst, CurInstNumber));
1064     if (Inst->mayThrow()) {
1065       LastThrowingInstIndex = CurInstNumber;
1066       continue;
1067     }
1068 
1069     // Check to see if Inst writes to memory.  If not, continue.
1070     if (!hasMemoryWrite(Inst, *TLI))
1071       continue;
1072 
1073     // eliminateNoopStore will update in iterator, if necessary.
1074     if (eliminateNoopStore(Inst, BBI, AA, MD, DL, TLI, IOL, &InstrOrdering)) {
1075       MadeChange = true;
1076       continue;
1077     }
1078 
1079     // If we find something that writes memory, get its memory dependence.
1080     MemDepResult InstDep = MD->getDependency(Inst);
1081 
1082     // Ignore any store where we can't find a local dependence.
1083     // FIXME: cross-block DSE would be fun. :)
1084     if (!InstDep.isDef() && !InstDep.isClobber())
1085       continue;
1086 
1087     // Figure out what location is being stored to.
1088     MemoryLocation Loc = getLocForWrite(Inst, *AA);
1089 
1090     // If we didn't get a useful location, fail.
1091     if (!Loc.Ptr)
1092       continue;
1093 
1094     // Loop until we find a store we can eliminate or a load that
1095     // invalidates the analysis. Without an upper bound on the number of
1096     // instructions examined, this analysis can become very time-consuming.
1097     // However, the potential gain diminishes as we process more instructions
1098     // without eliminating any of them. Therefore, we limit the number of
1099     // instructions we look at.
1100     auto Limit = MD->getDefaultBlockScanLimit();
1101     while (InstDep.isDef() || InstDep.isClobber()) {
1102       // Get the memory clobbered by the instruction we depend on.  MemDep will
1103       // skip any instructions that 'Loc' clearly doesn't interact with.  If we
1104       // end up depending on a may- or must-aliased load, then we can't optimize
1105       // away the store and we bail out.  However, if we depend on something
1106       // that overwrites the memory location we *can* potentially optimize it.
1107       //
1108       // Find out what memory location the dependent instruction stores.
1109       Instruction *DepWrite = InstDep.getInst();
1110       MemoryLocation DepLoc = getLocForWrite(DepWrite, *AA);
1111       // If we didn't get a useful location, or if it isn't a size, bail out.
1112       if (!DepLoc.Ptr)
1113         break;
1114 
1115       // Make sure we don't look past a call which might throw. This is an
1116       // issue because MemoryDependenceAnalysis works in the wrong direction:
1117       // it finds instructions which dominate the current instruction, rather than
1118       // instructions which are post-dominated by the current instruction.
1119       //
1120       // If the underlying object is a non-escaping memory allocation, any store
1121       // to it is dead along the unwind edge. Otherwise, we need to preserve
1122       // the store.
1123       size_t DepIndex = InstrOrdering.lookup(DepWrite);
1124       assert(DepIndex && "Unexpected instruction");
1125       if (DepIndex <= LastThrowingInstIndex) {
1126         const Value* Underlying = GetUnderlyingObject(DepLoc.Ptr, DL);
1127         bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying);
1128         if (!IsStoreDeadOnUnwind) {
1129             // We're looking for a call to an allocation function
1130             // where the allocation doesn't escape before the last
1131             // throwing instruction; PointerMayBeCaptured
1132             // reasonably fast approximation.
1133             IsStoreDeadOnUnwind = isAllocLikeFn(Underlying, TLI) &&
1134                 !PointerMayBeCaptured(Underlying, false, true);
1135         }
1136         if (!IsStoreDeadOnUnwind)
1137           break;
1138       }
1139 
1140       // If we find a write that is a) removable (i.e., non-volatile), b) is
1141       // completely obliterated by the store to 'Loc', and c) which we know that
1142       // 'Inst' doesn't load from, then we can remove it.
1143       // Also try to merge two stores if a later one only touches memory written
1144       // to by the earlier one.
1145       if (isRemovable(DepWrite) &&
1146           !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
1147         int64_t InstWriteOffset, DepWriteOffset;
1148         OverwriteResult OR =
1149             isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset, InstWriteOffset,
1150                         DepWrite, IOL);
1151         if (OR == OW_Complete) {
1152           DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: "
1153                 << *DepWrite << "\n  KILLER: " << *Inst << '\n');
1154 
1155           // Delete the store and now-dead instructions that feed it.
1156           deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL, &InstrOrdering);
1157           ++NumFastStores;
1158           MadeChange = true;
1159 
1160           // We erased DepWrite; start over.
1161           InstDep = MD->getDependency(Inst);
1162           continue;
1163         } else if ((OR == OW_End && isShortenableAtTheEnd(DepWrite)) ||
1164                    ((OR == OW_Begin &&
1165                      isShortenableAtTheBeginning(DepWrite)))) {
1166           assert(!EnablePartialOverwriteTracking && "Do not expect to perform "
1167                                                     "when partial-overwrite "
1168                                                     "tracking is enabled");
1169           int64_t EarlierSize = DepLoc.Size;
1170           int64_t LaterSize = Loc.Size;
1171           bool IsOverwriteEnd = (OR == OW_End);
1172           MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize,
1173                                     InstWriteOffset, LaterSize, IsOverwriteEnd);
1174         } else if (EnablePartialStoreMerging &&
1175                    OR == OW_PartialEarlierWithFullLater) {
1176           auto *Earlier = dyn_cast<StoreInst>(DepWrite);
1177           auto *Later = dyn_cast<StoreInst>(Inst);
1178           if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) &&
1179               Later && isa<ConstantInt>(Later->getValueOperand()) &&
1180               memoryIsNotModifiedBetween(Earlier, Later, AA)) {
1181             // If the store we find is:
1182             //   a) partially overwritten by the store to 'Loc'
1183             //   b) the later store is fully contained in the earlier one and
1184             //   c) they both have a constant value
1185             // Merge the two stores, replacing the earlier store's value with a
1186             // merge of both values.
1187             // TODO: Deal with other constant types (vectors, etc), and probably
1188             // some mem intrinsics (if needed)
1189 
1190             APInt EarlierValue =
1191                 cast<ConstantInt>(Earlier->getValueOperand())->getValue();
1192             APInt LaterValue =
1193                 cast<ConstantInt>(Later->getValueOperand())->getValue();
1194             unsigned LaterBits = LaterValue.getBitWidth();
1195             assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth());
1196             LaterValue = LaterValue.zext(EarlierValue.getBitWidth());
1197 
1198             // Offset of the smaller store inside the larger store
1199             unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8;
1200             unsigned LShiftAmount =
1201                 DL.isBigEndian()
1202                     ? EarlierValue.getBitWidth() - BitOffsetDiff - LaterBits
1203                     : BitOffsetDiff;
1204             APInt Mask =
1205                 APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount,
1206                                   LShiftAmount + LaterBits);
1207             // Clear the bits we'll be replacing, then OR with the smaller
1208             // store, shifted appropriately.
1209             APInt Merged =
1210                 (EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
1211             DEBUG(dbgs() << "DSE: Merge Stores:\n  Earlier: " << *DepWrite
1212                          << "\n  Later: " << *Inst
1213                          << "\n  Merged Value: " << Merged << '\n');
1214 
1215             auto *SI = new StoreInst(
1216                 ConstantInt::get(Earlier->getValueOperand()->getType(), Merged),
1217                 Earlier->getPointerOperand(), false, Earlier->getAlignment(),
1218                 Earlier->getOrdering(), Earlier->getSyncScopeID(), DepWrite);
1219 
1220             unsigned MDToKeep[] = {LLVMContext::MD_dbg, LLVMContext::MD_tbaa,
1221                                    LLVMContext::MD_alias_scope,
1222                                    LLVMContext::MD_noalias,
1223                                    LLVMContext::MD_nontemporal};
1224             SI->copyMetadata(*DepWrite, MDToKeep);
1225             ++NumModifiedStores;
1226 
1227             // Remove earlier, wider, store
1228             size_t Idx = InstrOrdering.lookup(DepWrite);
1229             InstrOrdering.erase(DepWrite);
1230             InstrOrdering.insert(std::make_pair(SI, Idx));
1231 
1232             // Delete the old stores and now-dead instructions that feed them.
1233             deleteDeadInstruction(Inst, &BBI, *MD, *TLI, IOL, &InstrOrdering);
1234             deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL,
1235                                   &InstrOrdering);
1236             MadeChange = true;
1237 
1238             // We erased DepWrite and Inst (Loc); start over.
1239             break;
1240           }
1241         }
1242       }
1243 
1244       // If this is a may-aliased store that is clobbering the store value, we
1245       // can keep searching past it for another must-aliased pointer that stores
1246       // to the same location.  For example, in:
1247       //   store -> P
1248       //   store -> Q
1249       //   store -> P
1250       // we can remove the first store to P even though we don't know if P and Q
1251       // alias.
1252       if (DepWrite == &BB.front()) break;
1253 
1254       // Can't look past this instruction if it might read 'Loc'.
1255       if (isRefSet(AA->getModRefInfo(DepWrite, Loc)))
1256         break;
1257 
1258       InstDep = MD->getPointerDependencyFrom(Loc, /*isLoad=*/ false,
1259                                              DepWrite->getIterator(), &BB,
1260                                              /*QueryInst=*/ nullptr, &Limit);
1261     }
1262   }
1263 
1264   if (EnablePartialOverwriteTracking)
1265     MadeChange |= removePartiallyOverlappedStores(AA, DL, IOL);
1266 
1267   // If this block ends in a return, unwind, or unreachable, all allocas are
1268   // dead at its end, which means stores to them are also dead.
1269   if (BB.getTerminator()->getNumSuccessors() == 0)
1270     MadeChange |= handleEndBlock(BB, AA, MD, TLI, IOL, &InstrOrdering);
1271 
1272   return MadeChange;
1273 }
1274 
1275 static bool eliminateDeadStores(Function &F, AliasAnalysis *AA,
1276                                 MemoryDependenceResults *MD, DominatorTree *DT,
1277                                 const TargetLibraryInfo *TLI) {
1278   bool MadeChange = false;
1279   for (BasicBlock &BB : F)
1280     // Only check non-dead blocks.  Dead blocks may have strange pointer
1281     // cycles that will confuse alias analysis.
1282     if (DT->isReachableFromEntry(&BB))
1283       MadeChange |= eliminateDeadStores(BB, AA, MD, DT, TLI);
1284 
1285   return MadeChange;
1286 }
1287 
1288 //===----------------------------------------------------------------------===//
1289 // DSE Pass
1290 //===----------------------------------------------------------------------===//
1291 PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
1292   AliasAnalysis *AA = &AM.getResult<AAManager>(F);
1293   DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1294   MemoryDependenceResults *MD = &AM.getResult<MemoryDependenceAnalysis>(F);
1295   const TargetLibraryInfo *TLI = &AM.getResult<TargetLibraryAnalysis>(F);
1296 
1297   if (!eliminateDeadStores(F, AA, MD, DT, TLI))
1298     return PreservedAnalyses::all();
1299 
1300   PreservedAnalyses PA;
1301   PA.preserveSet<CFGAnalyses>();
1302   PA.preserve<GlobalsAA>();
1303   PA.preserve<MemoryDependenceAnalysis>();
1304   return PA;
1305 }
1306 
1307 namespace {
1308 
1309 /// A legacy pass for the legacy pass manager that wraps \c DSEPass.
1310 class DSELegacyPass : public FunctionPass {
1311 public:
1312   static char ID; // Pass identification, replacement for typeid
1313 
1314   DSELegacyPass() : FunctionPass(ID) {
1315     initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
1316   }
1317 
1318   bool runOnFunction(Function &F) override {
1319     if (skipFunction(F))
1320       return false;
1321 
1322     DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1323     AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1324     MemoryDependenceResults *MD =
1325         &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
1326     const TargetLibraryInfo *TLI =
1327         &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1328 
1329     return eliminateDeadStores(F, AA, MD, DT, TLI);
1330   }
1331 
1332   void getAnalysisUsage(AnalysisUsage &AU) const override {
1333     AU.setPreservesCFG();
1334     AU.addRequired<DominatorTreeWrapperPass>();
1335     AU.addRequired<AAResultsWrapperPass>();
1336     AU.addRequired<MemoryDependenceWrapperPass>();
1337     AU.addRequired<TargetLibraryInfoWrapperPass>();
1338     AU.addPreserved<DominatorTreeWrapperPass>();
1339     AU.addPreserved<GlobalsAAWrapperPass>();
1340     AU.addPreserved<MemoryDependenceWrapperPass>();
1341   }
1342 };
1343 
1344 } // end anonymous namespace
1345 
1346 char DSELegacyPass::ID = 0;
1347 
1348 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
1349                       false)
1350 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1351 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
1352 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
1353 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
1354 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1355 INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,
1356                     false)
1357 
1358 FunctionPass *llvm::createDeadStoreEliminationPass() {
1359   return new DSELegacyPass();
1360 }
1361