1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements a trivial dead store elimination that only considers
11 // basic-block local redundant stores.
12 //
13 // FIXME: This should eventually be extended to be a post-dominator tree
14 // traversal.  Doing so would be pretty trivial.
15 //
16 //===----------------------------------------------------------------------===//
17 
18 #include "llvm/Transforms/Scalar/DeadStoreElimination.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/GlobalsModRef.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/GlobalVariable.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/Pass.h"
38 #include "llvm/Support/CommandLine.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/Transforms/Scalar.h"
42 #include "llvm/Transforms/Utils/Local.h"
43 #include <map>
44 using namespace llvm;
45 
46 #define DEBUG_TYPE "dse"
47 
48 STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
49 STATISTIC(NumFastStores, "Number of stores deleted");
50 STATISTIC(NumFastOther , "Number of other instrs removed");
51 STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
52 
53 static cl::opt<bool>
54 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
55   cl::init(true), cl::Hidden,
56   cl::desc("Enable partial-overwrite tracking in DSE"));
57 
58 
59 //===----------------------------------------------------------------------===//
60 // Helper functions
61 //===----------------------------------------------------------------------===//
62 typedef std::map<int64_t, int64_t> OverlapIntervalsTy;
63 typedef DenseMap<Instruction *, OverlapIntervalsTy> InstOverlapIntervalsTy;
64 
65 /// Delete this instruction.  Before we do, go through and zero out all the
66 /// operands of this instruction.  If any of them become dead, delete them and
67 /// the computation tree that feeds them.
68 /// If ValueSet is non-null, remove any deleted instructions from it as well.
69 static void
70 deleteDeadInstruction(Instruction *I, BasicBlock::iterator *BBI,
71                       MemoryDependenceResults &MD, const TargetLibraryInfo &TLI,
72                       InstOverlapIntervalsTy &IOL,
73                       DenseMap<Instruction*, size_t> *InstrOrdering,
74                       SmallSetVector<Value *, 16> *ValueSet = nullptr) {
75   SmallVector<Instruction*, 32> NowDeadInsts;
76 
77   NowDeadInsts.push_back(I);
78   --NumFastOther;
79 
80   // Keeping the iterator straight is a pain, so we let this routine tell the
81   // caller what the next instruction is after we're done mucking about.
82   BasicBlock::iterator NewIter = *BBI;
83 
84   // Before we touch this instruction, remove it from memdep!
85   do {
86     Instruction *DeadInst = NowDeadInsts.pop_back_val();
87     ++NumFastOther;
88 
89     // This instruction is dead, zap it, in stages.  Start by removing it from
90     // MemDep, which needs to know the operands and needs it to be in the
91     // function.
92     MD.removeInstruction(DeadInst);
93 
94     for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
95       Value *Op = DeadInst->getOperand(op);
96       DeadInst->setOperand(op, nullptr);
97 
98       // If this operand just became dead, add it to the NowDeadInsts list.
99       if (!Op->use_empty()) continue;
100 
101       if (Instruction *OpI = dyn_cast<Instruction>(Op))
102         if (isInstructionTriviallyDead(OpI, &TLI))
103           NowDeadInsts.push_back(OpI);
104     }
105 
106     if (ValueSet) ValueSet->remove(DeadInst);
107     InstrOrdering->erase(DeadInst);
108     IOL.erase(DeadInst);
109 
110     if (NewIter == DeadInst->getIterator())
111       NewIter = DeadInst->eraseFromParent();
112     else
113       DeadInst->eraseFromParent();
114   } while (!NowDeadInsts.empty());
115   *BBI = NewIter;
116 }
117 
118 /// Does this instruction write some memory?  This only returns true for things
119 /// that we can analyze with other helpers below.
120 static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo &TLI) {
121   if (isa<StoreInst>(I))
122     return true;
123   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
124     switch (II->getIntrinsicID()) {
125     default:
126       return false;
127     case Intrinsic::memset:
128     case Intrinsic::memmove:
129     case Intrinsic::memcpy:
130     case Intrinsic::init_trampoline:
131     case Intrinsic::lifetime_end:
132       return true;
133     }
134   }
135   if (auto CS = CallSite(I)) {
136     if (Function *F = CS.getCalledFunction()) {
137       StringRef FnName = F->getName();
138       if (TLI.has(LibFunc_strcpy) && FnName == TLI.getName(LibFunc_strcpy))
139         return true;
140       if (TLI.has(LibFunc_strncpy) && FnName == TLI.getName(LibFunc_strncpy))
141         return true;
142       if (TLI.has(LibFunc_strcat) && FnName == TLI.getName(LibFunc_strcat))
143         return true;
144       if (TLI.has(LibFunc_strncat) && FnName == TLI.getName(LibFunc_strncat))
145         return true;
146     }
147   }
148   return false;
149 }
150 
151 /// Return a Location stored to by the specified instruction. If isRemovable
152 /// returns true, this function and getLocForRead completely describe the memory
153 /// operations for this instruction.
154 static MemoryLocation getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
155   if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
156     return MemoryLocation::get(SI);
157 
158   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
159     // memcpy/memmove/memset.
160     MemoryLocation Loc = MemoryLocation::getForDest(MI);
161     return Loc;
162   }
163 
164   IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
165   if (!II)
166     return MemoryLocation();
167 
168   switch (II->getIntrinsicID()) {
169   default:
170     return MemoryLocation(); // Unhandled intrinsic.
171   case Intrinsic::init_trampoline:
172     // FIXME: We don't know the size of the trampoline, so we can't really
173     // handle it here.
174     return MemoryLocation(II->getArgOperand(0));
175   case Intrinsic::lifetime_end: {
176     uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
177     return MemoryLocation(II->getArgOperand(1), Len);
178   }
179   }
180 }
181 
182 /// Return the location read by the specified "hasMemoryWrite" instruction if
183 /// any.
184 static MemoryLocation getLocForRead(Instruction *Inst,
185                                     const TargetLibraryInfo &TLI) {
186   assert(hasMemoryWrite(Inst, TLI) && "Unknown instruction case");
187 
188   // The only instructions that both read and write are the mem transfer
189   // instructions (memcpy/memmove).
190   if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
191     return MemoryLocation::getForSource(MTI);
192   return MemoryLocation();
193 }
194 
195 /// If the value of this instruction and the memory it writes to is unused, may
196 /// we delete this instruction?
197 static bool isRemovable(Instruction *I) {
198   // Don't remove volatile/atomic stores.
199   if (StoreInst *SI = dyn_cast<StoreInst>(I))
200     return SI->isUnordered();
201 
202   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
203     switch (II->getIntrinsicID()) {
204     default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate");
205     case Intrinsic::lifetime_end:
206       // Never remove dead lifetime_end's, e.g. because it is followed by a
207       // free.
208       return false;
209     case Intrinsic::init_trampoline:
210       // Always safe to remove init_trampoline.
211       return true;
212 
213     case Intrinsic::memset:
214     case Intrinsic::memmove:
215     case Intrinsic::memcpy:
216       // Don't remove volatile memory intrinsics.
217       return !cast<MemIntrinsic>(II)->isVolatile();
218     }
219   }
220 
221   if (auto CS = CallSite(I))
222     return CS.getInstruction()->use_empty();
223 
224   return false;
225 }
226 
227 
228 /// Returns true if the end of this instruction can be safely shortened in
229 /// length.
230 static bool isShortenableAtTheEnd(Instruction *I) {
231   // Don't shorten stores for now
232   if (isa<StoreInst>(I))
233     return false;
234 
235   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
236     switch (II->getIntrinsicID()) {
237       default: return false;
238       case Intrinsic::memset:
239       case Intrinsic::memcpy:
240         // Do shorten memory intrinsics.
241         // FIXME: Add memmove if it's also safe to transform.
242         return true;
243     }
244   }
245 
246   // Don't shorten libcalls calls for now.
247 
248   return false;
249 }
250 
251 /// Returns true if the beginning of this instruction can be safely shortened
252 /// in length.
253 static bool isShortenableAtTheBeginning(Instruction *I) {
254   // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
255   // easily done by offsetting the source address.
256   IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
257   return II && II->getIntrinsicID() == Intrinsic::memset;
258 }
259 
260 /// Return the pointer that is being written to.
261 static Value *getStoredPointerOperand(Instruction *I) {
262   if (StoreInst *SI = dyn_cast<StoreInst>(I))
263     return SI->getPointerOperand();
264   if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
265     return MI->getDest();
266 
267   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
268     switch (II->getIntrinsicID()) {
269     default: llvm_unreachable("Unexpected intrinsic!");
270     case Intrinsic::init_trampoline:
271       return II->getArgOperand(0);
272     }
273   }
274 
275   CallSite CS(I);
276   // All the supported functions so far happen to have dest as their first
277   // argument.
278   return CS.getArgument(0);
279 }
280 
281 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
282                                const TargetLibraryInfo &TLI) {
283   uint64_t Size;
284   if (getObjectSize(V, Size, DL, &TLI))
285     return Size;
286   return MemoryLocation::UnknownSize;
287 }
288 
289 namespace {
290 enum OverwriteResult { OW_Begin, OW_Complete, OW_End, OW_Unknown };
291 }
292 
293 /// Return 'OW_Complete' if a store to the 'Later' location completely
294 /// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
295 /// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
296 /// beginning of the 'Earlier' location is overwritten by 'Later', or
297 /// 'OW_Unknown' if nothing can be determined.
298 static OverwriteResult isOverwrite(const MemoryLocation &Later,
299                                    const MemoryLocation &Earlier,
300                                    const DataLayout &DL,
301                                    const TargetLibraryInfo &TLI,
302                                    int64_t &EarlierOff, int64_t &LaterOff,
303                                    Instruction *DepWrite,
304                                    InstOverlapIntervalsTy &IOL) {
305   // If we don't know the sizes of either access, then we can't do a comparison.
306   if (Later.Size == MemoryLocation::UnknownSize ||
307       Earlier.Size == MemoryLocation::UnknownSize)
308     return OW_Unknown;
309 
310   const Value *P1 = Earlier.Ptr->stripPointerCasts();
311   const Value *P2 = Later.Ptr->stripPointerCasts();
312 
313   // If the start pointers are the same, we just have to compare sizes to see if
314   // the later store was larger than the earlier store.
315   if (P1 == P2) {
316     // Make sure that the Later size is >= the Earlier size.
317     if (Later.Size >= Earlier.Size)
318       return OW_Complete;
319   }
320 
321   // Check to see if the later store is to the entire object (either a global,
322   // an alloca, or a byval/inalloca argument).  If so, then it clearly
323   // overwrites any other store to the same object.
324   const Value *UO1 = GetUnderlyingObject(P1, DL),
325               *UO2 = GetUnderlyingObject(P2, DL);
326 
327   // If we can't resolve the same pointers to the same object, then we can't
328   // analyze them at all.
329   if (UO1 != UO2)
330     return OW_Unknown;
331 
332   // If the "Later" store is to a recognizable object, get its size.
333   uint64_t ObjectSize = getPointerSize(UO2, DL, TLI);
334   if (ObjectSize != MemoryLocation::UnknownSize)
335     if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
336       return OW_Complete;
337 
338   // Okay, we have stores to two completely different pointers.  Try to
339   // decompose the pointer into a "base + constant_offset" form.  If the base
340   // pointers are equal, then we can reason about the two stores.
341   EarlierOff = 0;
342   LaterOff = 0;
343   const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
344   const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
345 
346   // If the base pointers still differ, we have two completely different stores.
347   if (BP1 != BP2)
348     return OW_Unknown;
349 
350   // The later store completely overlaps the earlier store if:
351   //
352   // 1. Both start at the same offset and the later one's size is greater than
353   //    or equal to the earlier one's, or
354   //
355   //      |--earlier--|
356   //      |--   later   --|
357   //
358   // 2. The earlier store has an offset greater than the later offset, but which
359   //    still lies completely within the later store.
360   //
361   //        |--earlier--|
362   //    |-----  later  ------|
363   //
364   // We have to be careful here as *Off is signed while *.Size is unsigned.
365   if (EarlierOff >= LaterOff &&
366       Later.Size >= Earlier.Size &&
367       uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
368     return OW_Complete;
369 
370   // We may now overlap, although the overlap is not complete. There might also
371   // be other incomplete overlaps, and together, they might cover the complete
372   // earlier write.
373   // Note: The correctness of this logic depends on the fact that this function
374   // is not even called providing DepWrite when there are any intervening reads.
375   if (EnablePartialOverwriteTracking &&
376       LaterOff < int64_t(EarlierOff + Earlier.Size) &&
377       int64_t(LaterOff + Later.Size) >= EarlierOff) {
378 
379     // Insert our part of the overlap into the map.
380     auto &IM = IOL[DepWrite];
381     DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff << ", " <<
382                     int64_t(EarlierOff + Earlier.Size) << ") Later [" <<
383                     LaterOff << ", " << int64_t(LaterOff + Later.Size) << ")\n");
384 
385     // Make sure that we only insert non-overlapping intervals and combine
386     // adjacent intervals. The intervals are stored in the map with the ending
387     // offset as the key (in the half-open sense) and the starting offset as
388     // the value.
389     int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + Later.Size;
390 
391     // Find any intervals ending at, or after, LaterIntStart which start
392     // before LaterIntEnd.
393     auto ILI = IM.lower_bound(LaterIntStart);
394     if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
395       // This existing interval is overlapped with the current store somewhere
396       // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
397       // intervals and adjusting our start and end.
398       LaterIntStart = std::min(LaterIntStart, ILI->second);
399       LaterIntEnd = std::max(LaterIntEnd, ILI->first);
400       ILI = IM.erase(ILI);
401 
402       // Continue erasing and adjusting our end in case other previous
403       // intervals are also overlapped with the current store.
404       //
405       // |--- ealier 1 ---|  |--- ealier 2 ---|
406       //     |------- later---------|
407       //
408       while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
409         assert(ILI->second > LaterIntStart && "Unexpected interval");
410         LaterIntEnd = std::max(LaterIntEnd, ILI->first);
411         ILI = IM.erase(ILI);
412       }
413     }
414 
415     IM[LaterIntEnd] = LaterIntStart;
416 
417     ILI = IM.begin();
418     if (ILI->second <= EarlierOff &&
419         ILI->first >= int64_t(EarlierOff + Earlier.Size)) {
420       DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier [" <<
421                       EarlierOff << ", " <<
422                       int64_t(EarlierOff + Earlier.Size) <<
423                       ") Composite Later [" <<
424                       ILI->second << ", " << ILI->first << ")\n");
425       ++NumCompletePartials;
426       return OW_Complete;
427     }
428   }
429 
430   // Another interesting case is if the later store overwrites the end of the
431   // earlier store.
432   //
433   //      |--earlier--|
434   //                |--   later   --|
435   //
436   // In this case we may want to trim the size of earlier to avoid generating
437   // writes to addresses which will definitely be overwritten later
438   if (!EnablePartialOverwriteTracking &&
439       (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + Earlier.Size) &&
440        int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size)))
441     return OW_End;
442 
443   // Finally, we also need to check if the later store overwrites the beginning
444   // of the earlier store.
445   //
446   //                |--earlier--|
447   //      |--   later   --|
448   //
449   // In this case we may want to move the destination address and trim the size
450   // of earlier to avoid generating writes to addresses which will definitely
451   // be overwritten later.
452   if (!EnablePartialOverwriteTracking &&
453       (LaterOff <= EarlierOff && int64_t(LaterOff + Later.Size) > EarlierOff)) {
454     assert(int64_t(LaterOff + Later.Size) <
455                int64_t(EarlierOff + Earlier.Size) &&
456            "Expect to be handled as OW_Complete");
457     return OW_Begin;
458   }
459   // Otherwise, they don't completely overlap.
460   return OW_Unknown;
461 }
462 
463 /// If 'Inst' might be a self read (i.e. a noop copy of a
464 /// memory region into an identical pointer) then it doesn't actually make its
465 /// input dead in the traditional sense.  Consider this case:
466 ///
467 ///   memcpy(A <- B)
468 ///   memcpy(A <- A)
469 ///
470 /// In this case, the second store to A does not make the first store to A dead.
471 /// The usual situation isn't an explicit A<-A store like this (which can be
472 /// trivially removed) but a case where two pointers may alias.
473 ///
474 /// This function detects when it is unsafe to remove a dependent instruction
475 /// because the DSE inducing instruction may be a self-read.
476 static bool isPossibleSelfRead(Instruction *Inst,
477                                const MemoryLocation &InstStoreLoc,
478                                Instruction *DepWrite,
479                                const TargetLibraryInfo &TLI,
480                                AliasAnalysis &AA) {
481   // Self reads can only happen for instructions that read memory.  Get the
482   // location read.
483   MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
484   if (!InstReadLoc.Ptr) return false;  // Not a reading instruction.
485 
486   // If the read and written loc obviously don't alias, it isn't a read.
487   if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
488 
489   // Okay, 'Inst' may copy over itself.  However, we can still remove a the
490   // DepWrite instruction if we can prove that it reads from the same location
491   // as Inst.  This handles useful cases like:
492   //   memcpy(A <- B)
493   //   memcpy(A <- B)
494   // Here we don't know if A/B may alias, but we do know that B/B are must
495   // aliases, so removing the first memcpy is safe (assuming it writes <= #
496   // bytes as the second one.
497   MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
498 
499   if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
500     return false;
501 
502   // If DepWrite doesn't read memory or if we can't prove it is a must alias,
503   // then it can't be considered dead.
504   return true;
505 }
506 
507 /// Returns true if the memory which is accessed by the second instruction is not
508 /// modified between the first and the second instruction.
509 /// Precondition: Second instruction must be dominated by the first
510 /// instruction.
511 static bool memoryIsNotModifiedBetween(Instruction *FirstI,
512                                        Instruction *SecondI,
513                                        AliasAnalysis *AA) {
514   SmallVector<BasicBlock *, 16> WorkList;
515   SmallPtrSet<BasicBlock *, 8> Visited;
516   BasicBlock::iterator FirstBBI(FirstI);
517   ++FirstBBI;
518   BasicBlock::iterator SecondBBI(SecondI);
519   BasicBlock *FirstBB = FirstI->getParent();
520   BasicBlock *SecondBB = SecondI->getParent();
521   MemoryLocation MemLoc = MemoryLocation::get(SecondI);
522 
523   // Start checking the store-block.
524   WorkList.push_back(SecondBB);
525   bool isFirstBlock = true;
526 
527   // Check all blocks going backward until we reach the load-block.
528   while (!WorkList.empty()) {
529     BasicBlock *B = WorkList.pop_back_val();
530 
531     // Ignore instructions before LI if this is the FirstBB.
532     BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
533 
534     BasicBlock::iterator EI;
535     if (isFirstBlock) {
536       // Ignore instructions after SI if this is the first visit of SecondBB.
537       assert(B == SecondBB && "first block is not the store block");
538       EI = SecondBBI;
539       isFirstBlock = false;
540     } else {
541       // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
542       // In this case we also have to look at instructions after SI.
543       EI = B->end();
544     }
545     for (; BI != EI; ++BI) {
546       Instruction *I = &*BI;
547       if (I->mayWriteToMemory() && I != SecondI) {
548         auto Res = AA->getModRefInfo(I, MemLoc);
549         if (Res & MRI_Mod)
550           return false;
551       }
552     }
553     if (B != FirstBB) {
554       assert(B != &FirstBB->getParent()->getEntryBlock() &&
555           "Should not hit the entry block because SI must be dominated by LI");
556       for (auto PredI = pred_begin(B), PE = pred_end(B); PredI != PE; ++PredI) {
557         if (!Visited.insert(*PredI).second)
558           continue;
559         WorkList.push_back(*PredI);
560       }
561     }
562   }
563   return true;
564 }
565 
566 /// Find all blocks that will unconditionally lead to the block BB and append
567 /// them to F.
568 static void findUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
569                                    BasicBlock *BB, DominatorTree *DT) {
570   for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
571     BasicBlock *Pred = *I;
572     if (Pred == BB) continue;
573     TerminatorInst *PredTI = Pred->getTerminator();
574     if (PredTI->getNumSuccessors() != 1)
575       continue;
576 
577     if (DT->isReachableFromEntry(Pred))
578       Blocks.push_back(Pred);
579   }
580 }
581 
582 /// Handle frees of entire structures whose dependency is a store
583 /// to a field of that structure.
584 static bool handleFree(CallInst *F, AliasAnalysis *AA,
585                        MemoryDependenceResults *MD, DominatorTree *DT,
586                        const TargetLibraryInfo *TLI,
587                        InstOverlapIntervalsTy &IOL,
588                        DenseMap<Instruction*, size_t> *InstrOrdering) {
589   bool MadeChange = false;
590 
591   MemoryLocation Loc = MemoryLocation(F->getOperand(0));
592   SmallVector<BasicBlock *, 16> Blocks;
593   Blocks.push_back(F->getParent());
594   const DataLayout &DL = F->getModule()->getDataLayout();
595 
596   while (!Blocks.empty()) {
597     BasicBlock *BB = Blocks.pop_back_val();
598     Instruction *InstPt = BB->getTerminator();
599     if (BB == F->getParent()) InstPt = F;
600 
601     MemDepResult Dep =
602         MD->getPointerDependencyFrom(Loc, false, InstPt->getIterator(), BB);
603     while (Dep.isDef() || Dep.isClobber()) {
604       Instruction *Dependency = Dep.getInst();
605       if (!hasMemoryWrite(Dependency, *TLI) || !isRemovable(Dependency))
606         break;
607 
608       Value *DepPointer =
609           GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
610 
611       // Check for aliasing.
612       if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
613         break;
614 
615       DEBUG(dbgs() << "DSE: Dead Store to soon to be freed memory:\n  DEAD: "
616                    << *Dependency << '\n');
617 
618       // DCE instructions only used to calculate that store.
619       BasicBlock::iterator BBI(Dependency);
620       deleteDeadInstruction(Dependency, &BBI, *MD, *TLI, IOL, InstrOrdering);
621       ++NumFastStores;
622       MadeChange = true;
623 
624       // Inst's old Dependency is now deleted. Compute the next dependency,
625       // which may also be dead, as in
626       //    s[0] = 0;
627       //    s[1] = 0; // This has just been deleted.
628       //    free(s);
629       Dep = MD->getPointerDependencyFrom(Loc, false, BBI, BB);
630     }
631 
632     if (Dep.isNonLocal())
633       findUnconditionalPreds(Blocks, BB, DT);
634   }
635 
636   return MadeChange;
637 }
638 
639 /// Check to see if the specified location may alias any of the stack objects in
640 /// the DeadStackObjects set. If so, they become live because the location is
641 /// being loaded.
642 static void removeAccessedObjects(const MemoryLocation &LoadedLoc,
643                                   SmallSetVector<Value *, 16> &DeadStackObjects,
644                                   const DataLayout &DL, AliasAnalysis *AA,
645                                   const TargetLibraryInfo *TLI) {
646   const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
647 
648   // A constant can't be in the dead pointer set.
649   if (isa<Constant>(UnderlyingPointer))
650     return;
651 
652   // If the kill pointer can be easily reduced to an alloca, don't bother doing
653   // extraneous AA queries.
654   if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
655     DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
656     return;
657   }
658 
659   // Remove objects that could alias LoadedLoc.
660   DeadStackObjects.remove_if([&](Value *I) {
661     // See if the loaded location could alias the stack location.
662     MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI));
663     return !AA->isNoAlias(StackLoc, LoadedLoc);
664   });
665 }
666 
667 /// Remove dead stores to stack-allocated locations in the function end block.
668 /// Ex:
669 /// %A = alloca i32
670 /// ...
671 /// store i32 1, i32* %A
672 /// ret void
673 static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
674                              MemoryDependenceResults *MD,
675                              const TargetLibraryInfo *TLI,
676                              InstOverlapIntervalsTy &IOL,
677                              DenseMap<Instruction*, size_t> *InstrOrdering) {
678   bool MadeChange = false;
679 
680   // Keep track of all of the stack objects that are dead at the end of the
681   // function.
682   SmallSetVector<Value*, 16> DeadStackObjects;
683 
684   // Find all of the alloca'd pointers in the entry block.
685   BasicBlock &Entry = BB.getParent()->front();
686   for (Instruction &I : Entry) {
687     if (isa<AllocaInst>(&I))
688       DeadStackObjects.insert(&I);
689 
690     // Okay, so these are dead heap objects, but if the pointer never escapes
691     // then it's leaked by this function anyways.
692     else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true))
693       DeadStackObjects.insert(&I);
694   }
695 
696   // Treat byval or inalloca arguments the same, stores to them are dead at the
697   // end of the function.
698   for (Argument &AI : BB.getParent()->args())
699     if (AI.hasByValOrInAllocaAttr())
700       DeadStackObjects.insert(&AI);
701 
702   const DataLayout &DL = BB.getModule()->getDataLayout();
703 
704   // Scan the basic block backwards
705   for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
706     --BBI;
707 
708     // If we find a store, check to see if it points into a dead stack value.
709     if (hasMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
710       // See through pointer-to-pointer bitcasts
711       SmallVector<Value *, 4> Pointers;
712       GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
713 
714       // Stores to stack values are valid candidates for removal.
715       bool AllDead = true;
716       for (Value *Pointer : Pointers)
717         if (!DeadStackObjects.count(Pointer)) {
718           AllDead = false;
719           break;
720         }
721 
722       if (AllDead) {
723         Instruction *Dead = &*BBI;
724 
725         DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n  DEAD: "
726                      << *Dead << "\n  Objects: ";
727               for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
728                    E = Pointers.end(); I != E; ++I) {
729                 dbgs() << **I;
730                 if (std::next(I) != E)
731                   dbgs() << ", ";
732               }
733               dbgs() << '\n');
734 
735         // DCE instructions only used to calculate that store.
736         deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
737         ++NumFastStores;
738         MadeChange = true;
739         continue;
740       }
741     }
742 
743     // Remove any dead non-memory-mutating instructions.
744     if (isInstructionTriviallyDead(&*BBI, TLI)) {
745       DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n  DEAD: "
746                    << *&*BBI << '\n');
747       deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
748       ++NumFastOther;
749       MadeChange = true;
750       continue;
751     }
752 
753     if (isa<AllocaInst>(BBI)) {
754       // Remove allocas from the list of dead stack objects; there can't be
755       // any references before the definition.
756       DeadStackObjects.remove(&*BBI);
757       continue;
758     }
759 
760     if (auto CS = CallSite(&*BBI)) {
761       // Remove allocation function calls from the list of dead stack objects;
762       // there can't be any references before the definition.
763       if (isAllocLikeFn(&*BBI, TLI))
764         DeadStackObjects.remove(&*BBI);
765 
766       // If this call does not access memory, it can't be loading any of our
767       // pointers.
768       if (AA->doesNotAccessMemory(CS))
769         continue;
770 
771       // If the call might load from any of our allocas, then any store above
772       // the call is live.
773       DeadStackObjects.remove_if([&](Value *I) {
774         // See if the call site touches the value.
775         ModRefInfo A = AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI));
776 
777         return A == MRI_ModRef || A == MRI_Ref;
778       });
779 
780       // If all of the allocas were clobbered by the call then we're not going
781       // to find anything else to process.
782       if (DeadStackObjects.empty())
783         break;
784 
785       continue;
786     }
787 
788     // We can remove the dead stores, irrespective of the fence and its ordering
789     // (release/acquire/seq_cst). Fences only constraints the ordering of
790     // already visible stores, it does not make a store visible to other
791     // threads. So, skipping over a fence does not change a store from being
792     // dead.
793     if (isa<FenceInst>(*BBI))
794       continue;
795 
796     MemoryLocation LoadedLoc;
797 
798     // If we encounter a use of the pointer, it is no longer considered dead
799     if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
800       if (!L->isUnordered()) // Be conservative with atomic/volatile load
801         break;
802       LoadedLoc = MemoryLocation::get(L);
803     } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
804       LoadedLoc = MemoryLocation::get(V);
805     } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(BBI)) {
806       LoadedLoc = MemoryLocation::getForSource(MTI);
807     } else if (!BBI->mayReadFromMemory()) {
808       // Instruction doesn't read memory.  Note that stores that weren't removed
809       // above will hit this case.
810       continue;
811     } else {
812       // Unknown inst; assume it clobbers everything.
813       break;
814     }
815 
816     // Remove any allocas from the DeadPointer set that are loaded, as this
817     // makes any stores above the access live.
818     removeAccessedObjects(LoadedLoc, DeadStackObjects, DL, AA, TLI);
819 
820     // If all of the allocas were clobbered by the access then we're not going
821     // to find anything else to process.
822     if (DeadStackObjects.empty())
823       break;
824   }
825 
826   return MadeChange;
827 }
828 
829 static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
830                          int64_t &EarlierSize, int64_t LaterOffset,
831                          int64_t LaterSize, bool IsOverwriteEnd) {
832   // TODO: base this on the target vector size so that if the earlier
833   // store was too small to get vector writes anyway then its likely
834   // a good idea to shorten it
835   // Power of 2 vector writes are probably always a bad idea to optimize
836   // as any store/memset/memcpy is likely using vector instructions so
837   // shortening it to not vector size is likely to be slower
838   MemIntrinsic *EarlierIntrinsic = cast<MemIntrinsic>(EarlierWrite);
839   unsigned EarlierWriteAlign = EarlierIntrinsic->getAlignment();
840   if (!IsOverwriteEnd)
841     LaterOffset = int64_t(LaterOffset + LaterSize);
842 
843   if (!(llvm::isPowerOf2_64(LaterOffset) && EarlierWriteAlign <= LaterOffset) &&
844       !((EarlierWriteAlign != 0) && LaterOffset % EarlierWriteAlign == 0))
845     return false;
846 
847   DEBUG(dbgs() << "DSE: Remove Dead Store:\n  OW "
848                << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *EarlierWrite
849                << "\n  KILLER (offset " << LaterOffset << ", " << EarlierSize
850                << ")\n");
851 
852   int64_t NewLength = IsOverwriteEnd
853                           ? LaterOffset - EarlierOffset
854                           : EarlierSize - (LaterOffset - EarlierOffset);
855 
856   Value *EarlierWriteLength = EarlierIntrinsic->getLength();
857   Value *TrimmedLength =
858       ConstantInt::get(EarlierWriteLength->getType(), NewLength);
859   EarlierIntrinsic->setLength(TrimmedLength);
860 
861   EarlierSize = NewLength;
862   if (!IsOverwriteEnd) {
863     int64_t OffsetMoved = (LaterOffset - EarlierOffset);
864     Value *Indices[1] = {
865         ConstantInt::get(EarlierWriteLength->getType(), OffsetMoved)};
866     GetElementPtrInst *NewDestGEP = GetElementPtrInst::CreateInBounds(
867         EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite);
868     EarlierIntrinsic->setDest(NewDestGEP);
869     EarlierOffset = EarlierOffset + OffsetMoved;
870   }
871   return true;
872 }
873 
874 static bool tryToShortenEnd(Instruction *EarlierWrite,
875                             OverlapIntervalsTy &IntervalMap,
876                             int64_t &EarlierStart, int64_t &EarlierSize) {
877   if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
878     return false;
879 
880   OverlapIntervalsTy::iterator OII = --IntervalMap.end();
881   int64_t LaterStart = OII->second;
882   int64_t LaterSize = OII->first - LaterStart;
883 
884   if (LaterStart > EarlierStart && LaterStart < EarlierStart + EarlierSize &&
885       LaterStart + LaterSize >= EarlierStart + EarlierSize) {
886     if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
887                      LaterSize, true)) {
888       IntervalMap.erase(OII);
889       return true;
890     }
891   }
892   return false;
893 }
894 
895 static bool tryToShortenBegin(Instruction *EarlierWrite,
896                               OverlapIntervalsTy &IntervalMap,
897                               int64_t &EarlierStart, int64_t &EarlierSize) {
898   if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
899     return false;
900 
901   OverlapIntervalsTy::iterator OII = IntervalMap.begin();
902   int64_t LaterStart = OII->second;
903   int64_t LaterSize = OII->first - LaterStart;
904 
905   if (LaterStart <= EarlierStart && LaterStart + LaterSize > EarlierStart) {
906     assert(LaterStart + LaterSize < EarlierStart + EarlierSize &&
907            "Should have been handled as OW_Complete");
908     if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
909                      LaterSize, false)) {
910       IntervalMap.erase(OII);
911       return true;
912     }
913   }
914   return false;
915 }
916 
917 static bool removePartiallyOverlappedStores(AliasAnalysis *AA,
918                                             const DataLayout &DL,
919                                             InstOverlapIntervalsTy &IOL) {
920   bool Changed = false;
921   for (auto OI : IOL) {
922     Instruction *EarlierWrite = OI.first;
923     MemoryLocation Loc = getLocForWrite(EarlierWrite, *AA);
924     assert(isRemovable(EarlierWrite) && "Expect only removable instruction");
925     assert(Loc.Size != MemoryLocation::UnknownSize && "Unexpected mem loc");
926 
927     const Value *Ptr = Loc.Ptr->stripPointerCasts();
928     int64_t EarlierStart = 0;
929     int64_t EarlierSize = int64_t(Loc.Size);
930     GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
931     OverlapIntervalsTy &IntervalMap = OI.second;
932     Changed |=
933         tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
934     if (IntervalMap.empty())
935       continue;
936     Changed |=
937         tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
938   }
939   return Changed;
940 }
941 
942 static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI,
943                                AliasAnalysis *AA, MemoryDependenceResults *MD,
944                                const DataLayout &DL,
945                                const TargetLibraryInfo *TLI,
946                                InstOverlapIntervalsTy &IOL,
947                                DenseMap<Instruction*, size_t> *InstrOrdering) {
948   // Must be a store instruction.
949   StoreInst *SI = dyn_cast<StoreInst>(Inst);
950   if (!SI)
951     return false;
952 
953   // If we're storing the same value back to a pointer that we just loaded from,
954   // then the store can be removed.
955   if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) {
956     if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
957         isRemovable(SI) && memoryIsNotModifiedBetween(DepLoad, SI, AA)) {
958 
959       DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n  LOAD: "
960                    << *DepLoad << "\n  STORE: " << *SI << '\n');
961 
962       deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering);
963       ++NumRedundantStores;
964       return true;
965     }
966   }
967 
968   // Remove null stores into the calloc'ed objects
969   Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
970   if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) {
971     Instruction *UnderlyingPointer =
972         dyn_cast<Instruction>(GetUnderlyingObject(SI->getPointerOperand(), DL));
973 
974     if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
975         memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA)) {
976       DEBUG(
977           dbgs() << "DSE: Remove null store to the calloc'ed object:\n  DEAD: "
978                  << *Inst << "\n  OBJECT: " << *UnderlyingPointer << '\n');
979 
980       deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering);
981       ++NumRedundantStores;
982       return true;
983     }
984   }
985   return false;
986 }
987 
988 static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
989                                 MemoryDependenceResults *MD, DominatorTree *DT,
990                                 const TargetLibraryInfo *TLI) {
991   const DataLayout &DL = BB.getModule()->getDataLayout();
992   bool MadeChange = false;
993 
994   // FIXME: Maybe change this to use some abstraction like OrderedBasicBlock?
995   // The current OrderedBasicBlock can't deal with mutation at the moment.
996   size_t LastThrowingInstIndex = 0;
997   DenseMap<Instruction*, size_t> InstrOrdering;
998   size_t InstrIndex = 1;
999 
1000   // A map of interval maps representing partially-overwritten value parts.
1001   InstOverlapIntervalsTy IOL;
1002 
1003   // Do a top-down walk on the BB.
1004   for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
1005     // Handle 'free' calls specially.
1006     if (CallInst *F = isFreeCall(&*BBI, TLI)) {
1007       MadeChange |= handleFree(F, AA, MD, DT, TLI, IOL, &InstrOrdering);
1008       // Increment BBI after handleFree has potentially deleted instructions.
1009       // This ensures we maintain a valid iterator.
1010       ++BBI;
1011       continue;
1012     }
1013 
1014     Instruction *Inst = &*BBI++;
1015 
1016     size_t CurInstNumber = InstrIndex++;
1017     InstrOrdering.insert(std::make_pair(Inst, CurInstNumber));
1018     if (Inst->mayThrow()) {
1019       LastThrowingInstIndex = CurInstNumber;
1020       continue;
1021     }
1022 
1023     // Check to see if Inst writes to memory.  If not, continue.
1024     if (!hasMemoryWrite(Inst, *TLI))
1025       continue;
1026 
1027     // eliminateNoopStore will update in iterator, if necessary.
1028     if (eliminateNoopStore(Inst, BBI, AA, MD, DL, TLI, IOL, &InstrOrdering)) {
1029       MadeChange = true;
1030       continue;
1031     }
1032 
1033     // If we find something that writes memory, get its memory dependence.
1034     MemDepResult InstDep = MD->getDependency(Inst);
1035 
1036     // Ignore any store where we can't find a local dependence.
1037     // FIXME: cross-block DSE would be fun. :)
1038     if (!InstDep.isDef() && !InstDep.isClobber())
1039       continue;
1040 
1041     // Figure out what location is being stored to.
1042     MemoryLocation Loc = getLocForWrite(Inst, *AA);
1043 
1044     // If we didn't get a useful location, fail.
1045     if (!Loc.Ptr)
1046       continue;
1047 
1048     // Loop until we find a store we can eliminate or a load that
1049     // invalidates the analysis. Without an upper bound on the number of
1050     // instructions examined, this analysis can become very time-consuming.
1051     // However, the potential gain diminishes as we process more instructions
1052     // without eliminating any of them. Therefore, we limit the number of
1053     // instructions we look at.
1054     auto Limit = MD->getDefaultBlockScanLimit();
1055     while (InstDep.isDef() || InstDep.isClobber()) {
1056       // Get the memory clobbered by the instruction we depend on.  MemDep will
1057       // skip any instructions that 'Loc' clearly doesn't interact with.  If we
1058       // end up depending on a may- or must-aliased load, then we can't optimize
1059       // away the store and we bail out.  However, if we depend on something
1060       // that overwrites the memory location we *can* potentially optimize it.
1061       //
1062       // Find out what memory location the dependent instruction stores.
1063       Instruction *DepWrite = InstDep.getInst();
1064       MemoryLocation DepLoc = getLocForWrite(DepWrite, *AA);
1065       // If we didn't get a useful location, or if it isn't a size, bail out.
1066       if (!DepLoc.Ptr)
1067         break;
1068 
1069       // Make sure we don't look past a call which might throw. This is an
1070       // issue because MemoryDependenceAnalysis works in the wrong direction:
1071       // it finds instructions which dominate the current instruction, rather than
1072       // instructions which are post-dominated by the current instruction.
1073       //
1074       // If the underlying object is a non-escaping memory allocation, any store
1075       // to it is dead along the unwind edge. Otherwise, we need to preserve
1076       // the store.
1077       size_t DepIndex = InstrOrdering.lookup(DepWrite);
1078       assert(DepIndex && "Unexpected instruction");
1079       if (DepIndex <= LastThrowingInstIndex) {
1080         const Value* Underlying = GetUnderlyingObject(DepLoc.Ptr, DL);
1081         bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying);
1082         if (!IsStoreDeadOnUnwind) {
1083             // We're looking for a call to an allocation function
1084             // where the allocation doesn't escape before the last
1085             // throwing instruction; PointerMayBeCaptured
1086             // reasonably fast approximation.
1087             IsStoreDeadOnUnwind = isAllocLikeFn(Underlying, TLI) &&
1088                 !PointerMayBeCaptured(Underlying, false, true);
1089         }
1090         if (!IsStoreDeadOnUnwind)
1091           break;
1092       }
1093 
1094       // If we find a write that is a) removable (i.e., non-volatile), b) is
1095       // completely obliterated by the store to 'Loc', and c) which we know that
1096       // 'Inst' doesn't load from, then we can remove it.
1097       if (isRemovable(DepWrite) &&
1098           !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
1099         int64_t InstWriteOffset, DepWriteOffset;
1100         OverwriteResult OR =
1101             isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset, InstWriteOffset,
1102                         DepWrite, IOL);
1103         if (OR == OW_Complete) {
1104           DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: "
1105                 << *DepWrite << "\n  KILLER: " << *Inst << '\n');
1106 
1107           // Delete the store and now-dead instructions that feed it.
1108           deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL, &InstrOrdering);
1109           ++NumFastStores;
1110           MadeChange = true;
1111 
1112           // We erased DepWrite; start over.
1113           InstDep = MD->getDependency(Inst);
1114           continue;
1115         } else if ((OR == OW_End && isShortenableAtTheEnd(DepWrite)) ||
1116                    ((OR == OW_Begin &&
1117                      isShortenableAtTheBeginning(DepWrite)))) {
1118           assert(!EnablePartialOverwriteTracking && "Do not expect to perform "
1119                                                     "when partial-overwrite "
1120                                                     "tracking is enabled");
1121           int64_t EarlierSize = DepLoc.Size;
1122           int64_t LaterSize = Loc.Size;
1123           bool IsOverwriteEnd = (OR == OW_End);
1124           MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize,
1125                                     InstWriteOffset, LaterSize, IsOverwriteEnd);
1126         }
1127       }
1128 
1129       // If this is a may-aliased store that is clobbering the store value, we
1130       // can keep searching past it for another must-aliased pointer that stores
1131       // to the same location.  For example, in:
1132       //   store -> P
1133       //   store -> Q
1134       //   store -> P
1135       // we can remove the first store to P even though we don't know if P and Q
1136       // alias.
1137       if (DepWrite == &BB.front()) break;
1138 
1139       // Can't look past this instruction if it might read 'Loc'.
1140       if (AA->getModRefInfo(DepWrite, Loc) & MRI_Ref)
1141         break;
1142 
1143       InstDep = MD->getPointerDependencyFrom(Loc, /*isLoad=*/ false,
1144                                              DepWrite->getIterator(), &BB,
1145                                              /*QueryInst=*/ nullptr, &Limit);
1146     }
1147   }
1148 
1149   if (EnablePartialOverwriteTracking)
1150     MadeChange |= removePartiallyOverlappedStores(AA, DL, IOL);
1151 
1152   // If this block ends in a return, unwind, or unreachable, all allocas are
1153   // dead at its end, which means stores to them are also dead.
1154   if (BB.getTerminator()->getNumSuccessors() == 0)
1155     MadeChange |= handleEndBlock(BB, AA, MD, TLI, IOL, &InstrOrdering);
1156 
1157   return MadeChange;
1158 }
1159 
1160 static bool eliminateDeadStores(Function &F, AliasAnalysis *AA,
1161                                 MemoryDependenceResults *MD, DominatorTree *DT,
1162                                 const TargetLibraryInfo *TLI) {
1163   bool MadeChange = false;
1164   for (BasicBlock &BB : F)
1165     // Only check non-dead blocks.  Dead blocks may have strange pointer
1166     // cycles that will confuse alias analysis.
1167     if (DT->isReachableFromEntry(&BB))
1168       MadeChange |= eliminateDeadStores(BB, AA, MD, DT, TLI);
1169 
1170   return MadeChange;
1171 }
1172 
1173 //===----------------------------------------------------------------------===//
1174 // DSE Pass
1175 //===----------------------------------------------------------------------===//
1176 PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
1177   AliasAnalysis *AA = &AM.getResult<AAManager>(F);
1178   DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1179   MemoryDependenceResults *MD = &AM.getResult<MemoryDependenceAnalysis>(F);
1180   const TargetLibraryInfo *TLI = &AM.getResult<TargetLibraryAnalysis>(F);
1181 
1182   if (!eliminateDeadStores(F, AA, MD, DT, TLI))
1183     return PreservedAnalyses::all();
1184 
1185   PreservedAnalyses PA;
1186   PA.preserveSet<CFGAnalyses>();
1187   PA.preserve<GlobalsAA>();
1188   PA.preserve<MemoryDependenceAnalysis>();
1189   return PA;
1190 }
1191 
1192 namespace {
1193 /// A legacy pass for the legacy pass manager that wraps \c DSEPass.
1194 class DSELegacyPass : public FunctionPass {
1195 public:
1196   DSELegacyPass() : FunctionPass(ID) {
1197     initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
1198   }
1199 
1200   bool runOnFunction(Function &F) override {
1201     if (skipFunction(F))
1202       return false;
1203 
1204     DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1205     AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1206     MemoryDependenceResults *MD =
1207         &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
1208     const TargetLibraryInfo *TLI =
1209         &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1210 
1211     return eliminateDeadStores(F, AA, MD, DT, TLI);
1212   }
1213 
1214   void getAnalysisUsage(AnalysisUsage &AU) const override {
1215     AU.setPreservesCFG();
1216     AU.addRequired<DominatorTreeWrapperPass>();
1217     AU.addRequired<AAResultsWrapperPass>();
1218     AU.addRequired<MemoryDependenceWrapperPass>();
1219     AU.addRequired<TargetLibraryInfoWrapperPass>();
1220     AU.addPreserved<DominatorTreeWrapperPass>();
1221     AU.addPreserved<GlobalsAAWrapperPass>();
1222     AU.addPreserved<MemoryDependenceWrapperPass>();
1223   }
1224 
1225   static char ID; // Pass identification, replacement for typeid
1226 };
1227 } // end anonymous namespace
1228 
1229 char DSELegacyPass::ID = 0;
1230 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
1231                       false)
1232 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1233 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
1234 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
1235 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
1236 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1237 INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,
1238                     false)
1239 
1240 FunctionPass *llvm::createDeadStoreEliminationPass() {
1241   return new DSELegacyPass();
1242 }
1243