1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a trivial dead store elimination that only considers
10 // basic-block local redundant stores.
11 //
12 // FIXME: This should eventually be extended to be a post-dominator tree
13 // traversal.  Doing so would be pretty trivial.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "llvm/Transforms/Scalar/DeadStoreElimination.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/Analysis/CaptureTracking.h"
27 #include "llvm/Analysis/GlobalsModRef.h"
28 #include "llvm/Analysis/MemoryBuiltins.h"
29 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
30 #include "llvm/Analysis/MemoryLocation.h"
31 #include "llvm/Analysis/OrderedBasicBlock.h"
32 #include "llvm/Analysis/TargetLibraryInfo.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/Constant.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/Dominators.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/LLVMContext.h"
48 #include "llvm/IR/Module.h"
49 #include "llvm/IR/PassManager.h"
50 #include "llvm/IR/Value.h"
51 #include "llvm/Pass.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/CommandLine.h"
54 #include "llvm/Support/Debug.h"
55 #include "llvm/Support/ErrorHandling.h"
56 #include "llvm/Support/MathExtras.h"
57 #include "llvm/Support/raw_ostream.h"
58 #include "llvm/Transforms/Scalar.h"
59 #include "llvm/Transforms/Utils/Local.h"
60 #include <algorithm>
61 #include <cassert>
62 #include <cstddef>
63 #include <cstdint>
64 #include <iterator>
65 #include <map>
66 #include <utility>
67 
68 using namespace llvm;
69 
70 #define DEBUG_TYPE "dse"
71 
72 STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
73 STATISTIC(NumFastStores, "Number of stores deleted");
74 STATISTIC(NumFastOther, "Number of other instrs removed");
75 STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
76 STATISTIC(NumModifiedStores, "Number of stores modified");
77 
78 static cl::opt<bool>
79 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
80   cl::init(true), cl::Hidden,
81   cl::desc("Enable partial-overwrite tracking in DSE"));
82 
83 static cl::opt<bool>
84 EnablePartialStoreMerging("enable-dse-partial-store-merging",
85   cl::init(true), cl::Hidden,
86   cl::desc("Enable partial store merging in DSE"));
87 
88 //===----------------------------------------------------------------------===//
89 // Helper functions
90 //===----------------------------------------------------------------------===//
91 using OverlapIntervalsTy = std::map<int64_t, int64_t>;
92 using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
93 
94 /// Delete this instruction.  Before we do, go through and zero out all the
95 /// operands of this instruction.  If any of them become dead, delete them and
96 /// the computation tree that feeds them.
97 /// If ValueSet is non-null, remove any deleted instructions from it as well.
98 static void
99 deleteDeadInstruction(Instruction *I, BasicBlock::iterator *BBI,
100                       MemoryDependenceResults &MD, const TargetLibraryInfo &TLI,
101                       InstOverlapIntervalsTy &IOL, OrderedBasicBlock &OBB,
102                       SmallSetVector<Value *, 16> *ValueSet = nullptr) {
103   SmallVector<Instruction*, 32> NowDeadInsts;
104 
105   NowDeadInsts.push_back(I);
106   --NumFastOther;
107 
108   // Keeping the iterator straight is a pain, so we let this routine tell the
109   // caller what the next instruction is after we're done mucking about.
110   BasicBlock::iterator NewIter = *BBI;
111 
112   // Before we touch this instruction, remove it from memdep!
113   do {
114     Instruction *DeadInst = NowDeadInsts.pop_back_val();
115     ++NumFastOther;
116 
117     // Try to preserve debug information attached to the dead instruction.
118     salvageDebugInfo(*DeadInst);
119 
120     // This instruction is dead, zap it, in stages.  Start by removing it from
121     // MemDep, which needs to know the operands and needs it to be in the
122     // function.
123     MD.removeInstruction(DeadInst);
124 
125     for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
126       Value *Op = DeadInst->getOperand(op);
127       DeadInst->setOperand(op, nullptr);
128 
129       // If this operand just became dead, add it to the NowDeadInsts list.
130       if (!Op->use_empty()) continue;
131 
132       if (Instruction *OpI = dyn_cast<Instruction>(Op))
133         if (isInstructionTriviallyDead(OpI, &TLI))
134           NowDeadInsts.push_back(OpI);
135     }
136 
137     if (ValueSet) ValueSet->remove(DeadInst);
138     IOL.erase(DeadInst);
139     OBB.eraseInstruction(DeadInst);
140 
141     if (NewIter == DeadInst->getIterator())
142       NewIter = DeadInst->eraseFromParent();
143     else
144       DeadInst->eraseFromParent();
145   } while (!NowDeadInsts.empty());
146   *BBI = NewIter;
147 }
148 
149 /// Does this instruction write some memory?  This only returns true for things
150 /// that we can analyze with other helpers below.
151 static bool hasAnalyzableMemoryWrite(Instruction *I,
152                                      const TargetLibraryInfo &TLI) {
153   if (isa<StoreInst>(I))
154     return true;
155   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
156     switch (II->getIntrinsicID()) {
157     default:
158       return false;
159     case Intrinsic::memset:
160     case Intrinsic::memmove:
161     case Intrinsic::memcpy:
162     case Intrinsic::memcpy_element_unordered_atomic:
163     case Intrinsic::memmove_element_unordered_atomic:
164     case Intrinsic::memset_element_unordered_atomic:
165     case Intrinsic::init_trampoline:
166     case Intrinsic::lifetime_end:
167       return true;
168     }
169   }
170   if (auto CS = CallSite(I)) {
171     if (Function *F = CS.getCalledFunction()) {
172       StringRef FnName = F->getName();
173       if (TLI.has(LibFunc_strcpy) && FnName == TLI.getName(LibFunc_strcpy))
174         return true;
175       if (TLI.has(LibFunc_strncpy) && FnName == TLI.getName(LibFunc_strncpy))
176         return true;
177       if (TLI.has(LibFunc_strcat) && FnName == TLI.getName(LibFunc_strcat))
178         return true;
179       if (TLI.has(LibFunc_strncat) && FnName == TLI.getName(LibFunc_strncat))
180         return true;
181     }
182   }
183   return false;
184 }
185 
186 /// Return a Location stored to by the specified instruction. If isRemovable
187 /// returns true, this function and getLocForRead completely describe the memory
188 /// operations for this instruction.
189 static MemoryLocation getLocForWrite(Instruction *Inst) {
190 
191   if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
192     return MemoryLocation::get(SI);
193 
194   if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst)) {
195     // memcpy/memmove/memset.
196     MemoryLocation Loc = MemoryLocation::getForDest(MI);
197     return Loc;
198   }
199 
200   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
201     switch (II->getIntrinsicID()) {
202     default:
203       return MemoryLocation(); // Unhandled intrinsic.
204     case Intrinsic::init_trampoline:
205       return MemoryLocation(II->getArgOperand(0));
206     case Intrinsic::lifetime_end: {
207       uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
208       return MemoryLocation(II->getArgOperand(1), Len);
209     }
210     }
211   }
212   if (auto CS = CallSite(Inst))
213     // All the supported TLI functions so far happen to have dest as their
214     // first argument.
215     return MemoryLocation(CS.getArgument(0));
216   return MemoryLocation();
217 }
218 
219 /// Return the location read by the specified "hasAnalyzableMemoryWrite"
220 /// instruction if any.
221 static MemoryLocation getLocForRead(Instruction *Inst,
222                                     const TargetLibraryInfo &TLI) {
223   assert(hasAnalyzableMemoryWrite(Inst, TLI) && "Unknown instruction case");
224 
225   // The only instructions that both read and write are the mem transfer
226   // instructions (memcpy/memmove).
227   if (auto *MTI = dyn_cast<AnyMemTransferInst>(Inst))
228     return MemoryLocation::getForSource(MTI);
229   return MemoryLocation();
230 }
231 
232 /// If the value of this instruction and the memory it writes to is unused, may
233 /// we delete this instruction?
234 static bool isRemovable(Instruction *I) {
235   // Don't remove volatile/atomic stores.
236   if (StoreInst *SI = dyn_cast<StoreInst>(I))
237     return SI->isUnordered();
238 
239   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
240     switch (II->getIntrinsicID()) {
241     default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate");
242     case Intrinsic::lifetime_end:
243       // Never remove dead lifetime_end's, e.g. because it is followed by a
244       // free.
245       return false;
246     case Intrinsic::init_trampoline:
247       // Always safe to remove init_trampoline.
248       return true;
249     case Intrinsic::memset:
250     case Intrinsic::memmove:
251     case Intrinsic::memcpy:
252       // Don't remove volatile memory intrinsics.
253       return !cast<MemIntrinsic>(II)->isVolatile();
254     case Intrinsic::memcpy_element_unordered_atomic:
255     case Intrinsic::memmove_element_unordered_atomic:
256     case Intrinsic::memset_element_unordered_atomic:
257       return true;
258     }
259   }
260 
261   // note: only get here for calls with analyzable writes - i.e. libcalls
262   if (auto CS = CallSite(I))
263     return CS.getInstruction()->use_empty();
264 
265   return false;
266 }
267 
268 /// Returns true if the end of this instruction can be safely shortened in
269 /// length.
270 static bool isShortenableAtTheEnd(Instruction *I) {
271   // Don't shorten stores for now
272   if (isa<StoreInst>(I))
273     return false;
274 
275   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
276     switch (II->getIntrinsicID()) {
277       default: return false;
278       case Intrinsic::memset:
279       case Intrinsic::memcpy:
280       case Intrinsic::memcpy_element_unordered_atomic:
281       case Intrinsic::memset_element_unordered_atomic:
282         // Do shorten memory intrinsics.
283         // FIXME: Add memmove if it's also safe to transform.
284         return true;
285     }
286   }
287 
288   // Don't shorten libcalls calls for now.
289 
290   return false;
291 }
292 
293 /// Returns true if the beginning of this instruction can be safely shortened
294 /// in length.
295 static bool isShortenableAtTheBeginning(Instruction *I) {
296   // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
297   // easily done by offsetting the source address.
298   return isa<AnyMemSetInst>(I);
299 }
300 
301 /// Return the pointer that is being written to.
302 static Value *getStoredPointerOperand(Instruction *I) {
303   //TODO: factor this to reuse getLocForWrite
304   MemoryLocation Loc = getLocForWrite(I);
305   assert(Loc.Ptr &&
306          "unable to find pointer written for analyzable instruction?");
307   // TODO: most APIs don't expect const Value *
308   return const_cast<Value*>(Loc.Ptr);
309 }
310 
311 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
312                                const TargetLibraryInfo &TLI,
313                                const Function *F) {
314   uint64_t Size;
315   ObjectSizeOpts Opts;
316   Opts.NullIsUnknownSize = NullPointerIsDefined(F);
317 
318   if (getObjectSize(V, Size, DL, &TLI, Opts))
319     return Size;
320   return MemoryLocation::UnknownSize;
321 }
322 
323 namespace {
324 
325 enum OverwriteResult {
326   OW_Begin,
327   OW_Complete,
328   OW_End,
329   OW_PartialEarlierWithFullLater,
330   OW_Unknown
331 };
332 
333 } // end anonymous namespace
334 
335 /// Return 'OW_Complete' if a store to the 'Later' location completely
336 /// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
337 /// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
338 /// beginning of the 'Earlier' location is overwritten by 'Later'.
339 /// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
340 /// overwritten by a latter (smaller) store which doesn't write outside the big
341 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
342 static OverwriteResult isOverwrite(const MemoryLocation &Later,
343                                    const MemoryLocation &Earlier,
344                                    const DataLayout &DL,
345                                    const TargetLibraryInfo &TLI,
346                                    int64_t &EarlierOff, int64_t &LaterOff,
347                                    Instruction *DepWrite,
348                                    InstOverlapIntervalsTy &IOL,
349                                    AliasAnalysis &AA,
350                                    const Function *F) {
351   // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
352   // get imprecise values here, though (except for unknown sizes).
353   if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise())
354     return OW_Unknown;
355 
356   const uint64_t LaterSize = Later.Size.getValue();
357   const uint64_t EarlierSize = Earlier.Size.getValue();
358 
359   const Value *P1 = Earlier.Ptr->stripPointerCasts();
360   const Value *P2 = Later.Ptr->stripPointerCasts();
361 
362   // If the start pointers are the same, we just have to compare sizes to see if
363   // the later store was larger than the earlier store.
364   if (P1 == P2 || AA.isMustAlias(P1, P2)) {
365     // Make sure that the Later size is >= the Earlier size.
366     if (LaterSize >= EarlierSize)
367       return OW_Complete;
368   }
369 
370   // Check to see if the later store is to the entire object (either a global,
371   // an alloca, or a byval/inalloca argument).  If so, then it clearly
372   // overwrites any other store to the same object.
373   const Value *UO1 = GetUnderlyingObject(P1, DL),
374               *UO2 = GetUnderlyingObject(P2, DL);
375 
376   // If we can't resolve the same pointers to the same object, then we can't
377   // analyze them at all.
378   if (UO1 != UO2)
379     return OW_Unknown;
380 
381   // If the "Later" store is to a recognizable object, get its size.
382   uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, F);
383   if (ObjectSize != MemoryLocation::UnknownSize)
384     if (ObjectSize == LaterSize && ObjectSize >= EarlierSize)
385       return OW_Complete;
386 
387   // Okay, we have stores to two completely different pointers.  Try to
388   // decompose the pointer into a "base + constant_offset" form.  If the base
389   // pointers are equal, then we can reason about the two stores.
390   EarlierOff = 0;
391   LaterOff = 0;
392   const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
393   const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
394 
395   // If the base pointers still differ, we have two completely different stores.
396   if (BP1 != BP2)
397     return OW_Unknown;
398 
399   // The later store completely overlaps the earlier store if:
400   //
401   // 1. Both start at the same offset and the later one's size is greater than
402   //    or equal to the earlier one's, or
403   //
404   //      |--earlier--|
405   //      |--   later   --|
406   //
407   // 2. The earlier store has an offset greater than the later offset, but which
408   //    still lies completely within the later store.
409   //
410   //        |--earlier--|
411   //    |-----  later  ------|
412   //
413   // We have to be careful here as *Off is signed while *.Size is unsigned.
414   if (EarlierOff >= LaterOff &&
415       LaterSize >= EarlierSize &&
416       uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize)
417     return OW_Complete;
418 
419   // We may now overlap, although the overlap is not complete. There might also
420   // be other incomplete overlaps, and together, they might cover the complete
421   // earlier write.
422   // Note: The correctness of this logic depends on the fact that this function
423   // is not even called providing DepWrite when there are any intervening reads.
424   if (EnablePartialOverwriteTracking &&
425       LaterOff < int64_t(EarlierOff + EarlierSize) &&
426       int64_t(LaterOff + LaterSize) >= EarlierOff) {
427 
428     // Insert our part of the overlap into the map.
429     auto &IM = IOL[DepWrite];
430     LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff
431                       << ", " << int64_t(EarlierOff + EarlierSize)
432                       << ") Later [" << LaterOff << ", "
433                       << int64_t(LaterOff + LaterSize) << ")\n");
434 
435     // Make sure that we only insert non-overlapping intervals and combine
436     // adjacent intervals. The intervals are stored in the map with the ending
437     // offset as the key (in the half-open sense) and the starting offset as
438     // the value.
439     int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize;
440 
441     // Find any intervals ending at, or after, LaterIntStart which start
442     // before LaterIntEnd.
443     auto ILI = IM.lower_bound(LaterIntStart);
444     if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
445       // This existing interval is overlapped with the current store somewhere
446       // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
447       // intervals and adjusting our start and end.
448       LaterIntStart = std::min(LaterIntStart, ILI->second);
449       LaterIntEnd = std::max(LaterIntEnd, ILI->first);
450       ILI = IM.erase(ILI);
451 
452       // Continue erasing and adjusting our end in case other previous
453       // intervals are also overlapped with the current store.
454       //
455       // |--- ealier 1 ---|  |--- ealier 2 ---|
456       //     |------- later---------|
457       //
458       while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
459         assert(ILI->second > LaterIntStart && "Unexpected interval");
460         LaterIntEnd = std::max(LaterIntEnd, ILI->first);
461         ILI = IM.erase(ILI);
462       }
463     }
464 
465     IM[LaterIntEnd] = LaterIntStart;
466 
467     ILI = IM.begin();
468     if (ILI->second <= EarlierOff &&
469         ILI->first >= int64_t(EarlierOff + EarlierSize)) {
470       LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["
471                         << EarlierOff << ", "
472                         << int64_t(EarlierOff + EarlierSize)
473                         << ") Composite Later [" << ILI->second << ", "
474                         << ILI->first << ")\n");
475       ++NumCompletePartials;
476       return OW_Complete;
477     }
478   }
479 
480   // Check for an earlier store which writes to all the memory locations that
481   // the later store writes to.
482   if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
483       int64_t(EarlierOff + EarlierSize) > LaterOff &&
484       uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) {
485     LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["
486                       << EarlierOff << ", "
487                       << int64_t(EarlierOff + EarlierSize)
488                       << ") by a later store [" << LaterOff << ", "
489                       << int64_t(LaterOff + LaterSize) << ")\n");
490     // TODO: Maybe come up with a better name?
491     return OW_PartialEarlierWithFullLater;
492   }
493 
494   // Another interesting case is if the later store overwrites the end of the
495   // earlier store.
496   //
497   //      |--earlier--|
498   //                |--   later   --|
499   //
500   // In this case we may want to trim the size of earlier to avoid generating
501   // writes to addresses which will definitely be overwritten later
502   if (!EnablePartialOverwriteTracking &&
503       (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) &&
504        int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize)))
505     return OW_End;
506 
507   // Finally, we also need to check if the later store overwrites the beginning
508   // of the earlier store.
509   //
510   //                |--earlier--|
511   //      |--   later   --|
512   //
513   // In this case we may want to move the destination address and trim the size
514   // of earlier to avoid generating writes to addresses which will definitely
515   // be overwritten later.
516   if (!EnablePartialOverwriteTracking &&
517       (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) {
518     assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) &&
519            "Expect to be handled as OW_Complete");
520     return OW_Begin;
521   }
522   // Otherwise, they don't completely overlap.
523   return OW_Unknown;
524 }
525 
526 /// If 'Inst' might be a self read (i.e. a noop copy of a
527 /// memory region into an identical pointer) then it doesn't actually make its
528 /// input dead in the traditional sense.  Consider this case:
529 ///
530 ///   memmove(A <- B)
531 ///   memmove(A <- A)
532 ///
533 /// In this case, the second store to A does not make the first store to A dead.
534 /// The usual situation isn't an explicit A<-A store like this (which can be
535 /// trivially removed) but a case where two pointers may alias.
536 ///
537 /// This function detects when it is unsafe to remove a dependent instruction
538 /// because the DSE inducing instruction may be a self-read.
539 static bool isPossibleSelfRead(Instruction *Inst,
540                                const MemoryLocation &InstStoreLoc,
541                                Instruction *DepWrite,
542                                const TargetLibraryInfo &TLI,
543                                AliasAnalysis &AA) {
544   // Self reads can only happen for instructions that read memory.  Get the
545   // location read.
546   MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
547   if (!InstReadLoc.Ptr)
548     return false; // Not a reading instruction.
549 
550   // If the read and written loc obviously don't alias, it isn't a read.
551   if (AA.isNoAlias(InstReadLoc, InstStoreLoc))
552     return false;
553 
554   if (isa<AnyMemCpyInst>(Inst)) {
555     // LLVM's memcpy overlap semantics are not fully fleshed out (see PR11763)
556     // but in practice memcpy(A <- B) either means that A and B are disjoint or
557     // are equal (i.e. there are not partial overlaps).  Given that, if we have:
558     //
559     //   memcpy/memmove(A <- B)  // DepWrite
560     //   memcpy(A <- B)  // Inst
561     //
562     // with Inst reading/writing a >= size than DepWrite, we can reason as
563     // follows:
564     //
565     //   - If A == B then both the copies are no-ops, so the DepWrite can be
566     //     removed.
567     //   - If A != B then A and B are disjoint locations in Inst.  Since
568     //     Inst.size >= DepWrite.size A and B are disjoint in DepWrite too.
569     //     Therefore DepWrite can be removed.
570     MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
571 
572     if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
573       return false;
574   }
575 
576   // If DepWrite doesn't read memory or if we can't prove it is a must alias,
577   // then it can't be considered dead.
578   return true;
579 }
580 
581 /// Returns true if the memory which is accessed by the second instruction is not
582 /// modified between the first and the second instruction.
583 /// Precondition: Second instruction must be dominated by the first
584 /// instruction.
585 static bool memoryIsNotModifiedBetween(Instruction *FirstI,
586                                        Instruction *SecondI,
587                                        AliasAnalysis *AA) {
588   SmallVector<BasicBlock *, 16> WorkList;
589   SmallPtrSet<BasicBlock *, 8> Visited;
590   BasicBlock::iterator FirstBBI(FirstI);
591   ++FirstBBI;
592   BasicBlock::iterator SecondBBI(SecondI);
593   BasicBlock *FirstBB = FirstI->getParent();
594   BasicBlock *SecondBB = SecondI->getParent();
595   MemoryLocation MemLoc = MemoryLocation::get(SecondI);
596 
597   // Start checking the store-block.
598   WorkList.push_back(SecondBB);
599   bool isFirstBlock = true;
600 
601   // Check all blocks going backward until we reach the load-block.
602   while (!WorkList.empty()) {
603     BasicBlock *B = WorkList.pop_back_val();
604 
605     // Ignore instructions before LI if this is the FirstBB.
606     BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
607 
608     BasicBlock::iterator EI;
609     if (isFirstBlock) {
610       // Ignore instructions after SI if this is the first visit of SecondBB.
611       assert(B == SecondBB && "first block is not the store block");
612       EI = SecondBBI;
613       isFirstBlock = false;
614     } else {
615       // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
616       // In this case we also have to look at instructions after SI.
617       EI = B->end();
618     }
619     for (; BI != EI; ++BI) {
620       Instruction *I = &*BI;
621       if (I->mayWriteToMemory() && I != SecondI)
622         if (isModSet(AA->getModRefInfo(I, MemLoc)))
623           return false;
624     }
625     if (B != FirstBB) {
626       assert(B != &FirstBB->getParent()->getEntryBlock() &&
627           "Should not hit the entry block because SI must be dominated by LI");
628       for (auto PredI = pred_begin(B), PE = pred_end(B); PredI != PE; ++PredI) {
629         if (!Visited.insert(*PredI).second)
630           continue;
631         WorkList.push_back(*PredI);
632       }
633     }
634   }
635   return true;
636 }
637 
638 /// Find all blocks that will unconditionally lead to the block BB and append
639 /// them to F.
640 static void findUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
641                                    BasicBlock *BB, DominatorTree *DT) {
642   for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
643     BasicBlock *Pred = *I;
644     if (Pred == BB) continue;
645     Instruction *PredTI = Pred->getTerminator();
646     if (PredTI->getNumSuccessors() != 1)
647       continue;
648 
649     if (DT->isReachableFromEntry(Pred))
650       Blocks.push_back(Pred);
651   }
652 }
653 
654 /// Handle frees of entire structures whose dependency is a store
655 /// to a field of that structure.
656 static bool handleFree(CallInst *F, AliasAnalysis *AA,
657                        MemoryDependenceResults *MD, DominatorTree *DT,
658                        const TargetLibraryInfo *TLI,
659                        InstOverlapIntervalsTy &IOL, OrderedBasicBlock &OBB) {
660   bool MadeChange = false;
661 
662   MemoryLocation Loc = MemoryLocation(F->getOperand(0));
663   SmallVector<BasicBlock *, 16> Blocks;
664   Blocks.push_back(F->getParent());
665   const DataLayout &DL = F->getModule()->getDataLayout();
666 
667   while (!Blocks.empty()) {
668     BasicBlock *BB = Blocks.pop_back_val();
669     Instruction *InstPt = BB->getTerminator();
670     if (BB == F->getParent()) InstPt = F;
671 
672     MemDepResult Dep =
673         MD->getPointerDependencyFrom(Loc, false, InstPt->getIterator(), BB);
674     while (Dep.isDef() || Dep.isClobber()) {
675       Instruction *Dependency = Dep.getInst();
676       if (!hasAnalyzableMemoryWrite(Dependency, *TLI) ||
677           !isRemovable(Dependency))
678         break;
679 
680       Value *DepPointer =
681           GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
682 
683       // Check for aliasing.
684       if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
685         break;
686 
687       LLVM_DEBUG(
688           dbgs() << "DSE: Dead Store to soon to be freed memory:\n  DEAD: "
689                  << *Dependency << '\n');
690 
691       // DCE instructions only used to calculate that store.
692       BasicBlock::iterator BBI(Dependency);
693       deleteDeadInstruction(Dependency, &BBI, *MD, *TLI, IOL, OBB);
694       ++NumFastStores;
695       MadeChange = true;
696 
697       // Inst's old Dependency is now deleted. Compute the next dependency,
698       // which may also be dead, as in
699       //    s[0] = 0;
700       //    s[1] = 0; // This has just been deleted.
701       //    free(s);
702       Dep = MD->getPointerDependencyFrom(Loc, false, BBI, BB);
703     }
704 
705     if (Dep.isNonLocal())
706       findUnconditionalPreds(Blocks, BB, DT);
707   }
708 
709   return MadeChange;
710 }
711 
712 /// Check to see if the specified location may alias any of the stack objects in
713 /// the DeadStackObjects set. If so, they become live because the location is
714 /// being loaded.
715 static void removeAccessedObjects(const MemoryLocation &LoadedLoc,
716                                   SmallSetVector<Value *, 16> &DeadStackObjects,
717                                   const DataLayout &DL, AliasAnalysis *AA,
718                                   const TargetLibraryInfo *TLI,
719                                   const Function *F) {
720   const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
721 
722   // A constant can't be in the dead pointer set.
723   if (isa<Constant>(UnderlyingPointer))
724     return;
725 
726   // If the kill pointer can be easily reduced to an alloca, don't bother doing
727   // extraneous AA queries.
728   if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
729     DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
730     return;
731   }
732 
733   // Remove objects that could alias LoadedLoc.
734   DeadStackObjects.remove_if([&](Value *I) {
735     // See if the loaded location could alias the stack location.
736     MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI, F));
737     return !AA->isNoAlias(StackLoc, LoadedLoc);
738   });
739 }
740 
741 /// Remove dead stores to stack-allocated locations in the function end block.
742 /// Ex:
743 /// %A = alloca i32
744 /// ...
745 /// store i32 1, i32* %A
746 /// ret void
747 static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
748                            MemoryDependenceResults *MD,
749                            const TargetLibraryInfo *TLI,
750                            InstOverlapIntervalsTy &IOL,
751                            OrderedBasicBlock &OBB) {
752   bool MadeChange = false;
753 
754   // Keep track of all of the stack objects that are dead at the end of the
755   // function.
756   SmallSetVector<Value*, 16> DeadStackObjects;
757 
758   // Find all of the alloca'd pointers in the entry block.
759   BasicBlock &Entry = BB.getParent()->front();
760   for (Instruction &I : Entry) {
761     if (isa<AllocaInst>(&I))
762       DeadStackObjects.insert(&I);
763 
764     // Okay, so these are dead heap objects, but if the pointer never escapes
765     // then it's leaked by this function anyways.
766     else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true))
767       DeadStackObjects.insert(&I);
768   }
769 
770   // Treat byval or inalloca arguments the same, stores to them are dead at the
771   // end of the function.
772   for (Argument &AI : BB.getParent()->args())
773     if (AI.hasByValOrInAllocaAttr())
774       DeadStackObjects.insert(&AI);
775 
776   const DataLayout &DL = BB.getModule()->getDataLayout();
777 
778   // Scan the basic block backwards
779   for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
780     --BBI;
781 
782     // If we find a store, check to see if it points into a dead stack value.
783     if (hasAnalyzableMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
784       // See through pointer-to-pointer bitcasts
785       SmallVector<Value *, 4> Pointers;
786       GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
787 
788       // Stores to stack values are valid candidates for removal.
789       bool AllDead = true;
790       for (Value *Pointer : Pointers)
791         if (!DeadStackObjects.count(Pointer)) {
792           AllDead = false;
793           break;
794         }
795 
796       if (AllDead) {
797         Instruction *Dead = &*BBI;
798 
799         LLVM_DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n  DEAD: "
800                           << *Dead << "\n  Objects: ";
801                    for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
802                         E = Pointers.end();
803                         I != E; ++I) {
804                      dbgs() << **I;
805                      if (std::next(I) != E)
806                        dbgs() << ", ";
807                    } dbgs()
808                    << '\n');
809 
810         // DCE instructions only used to calculate that store.
811         deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, OBB,
812                               &DeadStackObjects);
813         ++NumFastStores;
814         MadeChange = true;
815         continue;
816       }
817     }
818 
819     // Remove any dead non-memory-mutating instructions.
820     if (isInstructionTriviallyDead(&*BBI, TLI)) {
821       LLVM_DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n  DEAD: "
822                         << *&*BBI << '\n');
823       deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, OBB,
824                             &DeadStackObjects);
825       ++NumFastOther;
826       MadeChange = true;
827       continue;
828     }
829 
830     if (isa<AllocaInst>(BBI)) {
831       // Remove allocas from the list of dead stack objects; there can't be
832       // any references before the definition.
833       DeadStackObjects.remove(&*BBI);
834       continue;
835     }
836 
837     if (auto *Call = dyn_cast<CallBase>(&*BBI)) {
838       // Remove allocation function calls from the list of dead stack objects;
839       // there can't be any references before the definition.
840       if (isAllocLikeFn(&*BBI, TLI))
841         DeadStackObjects.remove(&*BBI);
842 
843       // If this call does not access memory, it can't be loading any of our
844       // pointers.
845       if (AA->doesNotAccessMemory(Call))
846         continue;
847 
848       // If the call might load from any of our allocas, then any store above
849       // the call is live.
850       DeadStackObjects.remove_if([&](Value *I) {
851         // See if the call site touches the value.
852         return isRefSet(AA->getModRefInfo(
853             Call, I, getPointerSize(I, DL, *TLI, BB.getParent())));
854       });
855 
856       // If all of the allocas were clobbered by the call then we're not going
857       // to find anything else to process.
858       if (DeadStackObjects.empty())
859         break;
860 
861       continue;
862     }
863 
864     // We can remove the dead stores, irrespective of the fence and its ordering
865     // (release/acquire/seq_cst). Fences only constraints the ordering of
866     // already visible stores, it does not make a store visible to other
867     // threads. So, skipping over a fence does not change a store from being
868     // dead.
869     if (isa<FenceInst>(*BBI))
870       continue;
871 
872     MemoryLocation LoadedLoc;
873 
874     // If we encounter a use of the pointer, it is no longer considered dead
875     if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
876       if (!L->isUnordered()) // Be conservative with atomic/volatile load
877         break;
878       LoadedLoc = MemoryLocation::get(L);
879     } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
880       LoadedLoc = MemoryLocation::get(V);
881     } else if (!BBI->mayReadFromMemory()) {
882       // Instruction doesn't read memory.  Note that stores that weren't removed
883       // above will hit this case.
884       continue;
885     } else {
886       // Unknown inst; assume it clobbers everything.
887       break;
888     }
889 
890     // Remove any allocas from the DeadPointer set that are loaded, as this
891     // makes any stores above the access live.
892     removeAccessedObjects(LoadedLoc, DeadStackObjects, DL, AA, TLI, BB.getParent());
893 
894     // If all of the allocas were clobbered by the access then we're not going
895     // to find anything else to process.
896     if (DeadStackObjects.empty())
897       break;
898   }
899 
900   return MadeChange;
901 }
902 
903 static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
904                          int64_t &EarlierSize, int64_t LaterOffset,
905                          int64_t LaterSize, bool IsOverwriteEnd) {
906   // TODO: base this on the target vector size so that if the earlier
907   // store was too small to get vector writes anyway then its likely
908   // a good idea to shorten it
909   // Power of 2 vector writes are probably always a bad idea to optimize
910   // as any store/memset/memcpy is likely using vector instructions so
911   // shortening it to not vector size is likely to be slower
912   auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite);
913   unsigned EarlierWriteAlign = EarlierIntrinsic->getDestAlignment();
914   if (!IsOverwriteEnd)
915     LaterOffset = int64_t(LaterOffset + LaterSize);
916 
917   if (!(isPowerOf2_64(LaterOffset) && EarlierWriteAlign <= LaterOffset) &&
918       !((EarlierWriteAlign != 0) && LaterOffset % EarlierWriteAlign == 0))
919     return false;
920 
921   int64_t NewLength = IsOverwriteEnd
922                           ? LaterOffset - EarlierOffset
923                           : EarlierSize - (LaterOffset - EarlierOffset);
924 
925   if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(EarlierWrite)) {
926     // When shortening an atomic memory intrinsic, the newly shortened
927     // length must remain an integer multiple of the element size.
928     const uint32_t ElementSize = AMI->getElementSizeInBytes();
929     if (0 != NewLength % ElementSize)
930       return false;
931   }
932 
933   LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  OW "
934                     << (IsOverwriteEnd ? "END" : "BEGIN") << ": "
935                     << *EarlierWrite << "\n  KILLER (offset " << LaterOffset
936                     << ", " << EarlierSize << ")\n");
937 
938   Value *EarlierWriteLength = EarlierIntrinsic->getLength();
939   Value *TrimmedLength =
940       ConstantInt::get(EarlierWriteLength->getType(), NewLength);
941   EarlierIntrinsic->setLength(TrimmedLength);
942 
943   EarlierSize = NewLength;
944   if (!IsOverwriteEnd) {
945     int64_t OffsetMoved = (LaterOffset - EarlierOffset);
946     Value *Indices[1] = {
947         ConstantInt::get(EarlierWriteLength->getType(), OffsetMoved)};
948     GetElementPtrInst *NewDestGEP = GetElementPtrInst::CreateInBounds(
949         EarlierIntrinsic->getRawDest()->getType()->getPointerElementType(),
950         EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite);
951     NewDestGEP->setDebugLoc(EarlierIntrinsic->getDebugLoc());
952     EarlierIntrinsic->setDest(NewDestGEP);
953     EarlierOffset = EarlierOffset + OffsetMoved;
954   }
955   return true;
956 }
957 
958 static bool tryToShortenEnd(Instruction *EarlierWrite,
959                             OverlapIntervalsTy &IntervalMap,
960                             int64_t &EarlierStart, int64_t &EarlierSize) {
961   if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
962     return false;
963 
964   OverlapIntervalsTy::iterator OII = --IntervalMap.end();
965   int64_t LaterStart = OII->second;
966   int64_t LaterSize = OII->first - LaterStart;
967 
968   if (LaterStart > EarlierStart && LaterStart < EarlierStart + EarlierSize &&
969       LaterStart + LaterSize >= EarlierStart + EarlierSize) {
970     if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
971                      LaterSize, true)) {
972       IntervalMap.erase(OII);
973       return true;
974     }
975   }
976   return false;
977 }
978 
979 static bool tryToShortenBegin(Instruction *EarlierWrite,
980                               OverlapIntervalsTy &IntervalMap,
981                               int64_t &EarlierStart, int64_t &EarlierSize) {
982   if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
983     return false;
984 
985   OverlapIntervalsTy::iterator OII = IntervalMap.begin();
986   int64_t LaterStart = OII->second;
987   int64_t LaterSize = OII->first - LaterStart;
988 
989   if (LaterStart <= EarlierStart && LaterStart + LaterSize > EarlierStart) {
990     assert(LaterStart + LaterSize < EarlierStart + EarlierSize &&
991            "Should have been handled as OW_Complete");
992     if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
993                      LaterSize, false)) {
994       IntervalMap.erase(OII);
995       return true;
996     }
997   }
998   return false;
999 }
1000 
1001 static bool removePartiallyOverlappedStores(AliasAnalysis *AA,
1002                                             const DataLayout &DL,
1003                                             InstOverlapIntervalsTy &IOL) {
1004   bool Changed = false;
1005   for (auto OI : IOL) {
1006     Instruction *EarlierWrite = OI.first;
1007     MemoryLocation Loc = getLocForWrite(EarlierWrite);
1008     assert(isRemovable(EarlierWrite) && "Expect only removable instruction");
1009 
1010     const Value *Ptr = Loc.Ptr->stripPointerCasts();
1011     int64_t EarlierStart = 0;
1012     int64_t EarlierSize = int64_t(Loc.Size.getValue());
1013     GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
1014     OverlapIntervalsTy &IntervalMap = OI.second;
1015     Changed |=
1016         tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1017     if (IntervalMap.empty())
1018       continue;
1019     Changed |=
1020         tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1021   }
1022   return Changed;
1023 }
1024 
1025 static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI,
1026                                AliasAnalysis *AA, MemoryDependenceResults *MD,
1027                                const DataLayout &DL,
1028                                const TargetLibraryInfo *TLI,
1029                                InstOverlapIntervalsTy &IOL,
1030                                OrderedBasicBlock &OBB) {
1031   // Must be a store instruction.
1032   StoreInst *SI = dyn_cast<StoreInst>(Inst);
1033   if (!SI)
1034     return false;
1035 
1036   // If we're storing the same value back to a pointer that we just loaded from,
1037   // then the store can be removed.
1038   if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) {
1039     if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
1040         isRemovable(SI) && memoryIsNotModifiedBetween(DepLoad, SI, AA)) {
1041 
1042       LLVM_DEBUG(
1043           dbgs() << "DSE: Remove Store Of Load from same pointer:\n  LOAD: "
1044                  << *DepLoad << "\n  STORE: " << *SI << '\n');
1045 
1046       deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, OBB);
1047       ++NumRedundantStores;
1048       return true;
1049     }
1050   }
1051 
1052   // Remove null stores into the calloc'ed objects
1053   Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
1054   if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) {
1055     Instruction *UnderlyingPointer =
1056         dyn_cast<Instruction>(GetUnderlyingObject(SI->getPointerOperand(), DL));
1057 
1058     if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
1059         memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA)) {
1060       LLVM_DEBUG(
1061           dbgs() << "DSE: Remove null store to the calloc'ed object:\n  DEAD: "
1062                  << *Inst << "\n  OBJECT: " << *UnderlyingPointer << '\n');
1063 
1064       deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, OBB);
1065       ++NumRedundantStores;
1066       return true;
1067     }
1068   }
1069   return false;
1070 }
1071 
1072 static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
1073                                 MemoryDependenceResults *MD, DominatorTree *DT,
1074                                 const TargetLibraryInfo *TLI) {
1075   const DataLayout &DL = BB.getModule()->getDataLayout();
1076   bool MadeChange = false;
1077 
1078   OrderedBasicBlock OBB(&BB);
1079   Instruction *LastThrowing = nullptr;
1080 
1081   // A map of interval maps representing partially-overwritten value parts.
1082   InstOverlapIntervalsTy IOL;
1083 
1084   // Do a top-down walk on the BB.
1085   for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
1086     // Handle 'free' calls specially.
1087     if (CallInst *F = isFreeCall(&*BBI, TLI)) {
1088       MadeChange |= handleFree(F, AA, MD, DT, TLI, IOL, OBB);
1089       // Increment BBI after handleFree has potentially deleted instructions.
1090       // This ensures we maintain a valid iterator.
1091       ++BBI;
1092       continue;
1093     }
1094 
1095     Instruction *Inst = &*BBI++;
1096 
1097     if (Inst->mayThrow()) {
1098       LastThrowing = Inst;
1099       continue;
1100     }
1101 
1102     // Check to see if Inst writes to memory.  If not, continue.
1103     if (!hasAnalyzableMemoryWrite(Inst, *TLI))
1104       continue;
1105 
1106     // eliminateNoopStore will update in iterator, if necessary.
1107     if (eliminateNoopStore(Inst, BBI, AA, MD, DL, TLI, IOL, OBB)) {
1108       MadeChange = true;
1109       continue;
1110     }
1111 
1112     // If we find something that writes memory, get its memory dependence.
1113     MemDepResult InstDep = MD->getDependency(Inst, &OBB);
1114 
1115     // Ignore any store where we can't find a local dependence.
1116     // FIXME: cross-block DSE would be fun. :)
1117     if (!InstDep.isDef() && !InstDep.isClobber())
1118       continue;
1119 
1120     // Figure out what location is being stored to.
1121     MemoryLocation Loc = getLocForWrite(Inst);
1122 
1123     // If we didn't get a useful location, fail.
1124     if (!Loc.Ptr)
1125       continue;
1126 
1127     // Loop until we find a store we can eliminate or a load that
1128     // invalidates the analysis. Without an upper bound on the number of
1129     // instructions examined, this analysis can become very time-consuming.
1130     // However, the potential gain diminishes as we process more instructions
1131     // without eliminating any of them. Therefore, we limit the number of
1132     // instructions we look at.
1133     auto Limit = MD->getDefaultBlockScanLimit();
1134     while (InstDep.isDef() || InstDep.isClobber()) {
1135       // Get the memory clobbered by the instruction we depend on.  MemDep will
1136       // skip any instructions that 'Loc' clearly doesn't interact with.  If we
1137       // end up depending on a may- or must-aliased load, then we can't optimize
1138       // away the store and we bail out.  However, if we depend on something
1139       // that overwrites the memory location we *can* potentially optimize it.
1140       //
1141       // Find out what memory location the dependent instruction stores.
1142       Instruction *DepWrite = InstDep.getInst();
1143       if (!hasAnalyzableMemoryWrite(DepWrite, *TLI))
1144         break;
1145       MemoryLocation DepLoc = getLocForWrite(DepWrite);
1146       // If we didn't get a useful location, or if it isn't a size, bail out.
1147       if (!DepLoc.Ptr)
1148         break;
1149 
1150       // Make sure we don't look past a call which might throw. This is an
1151       // issue because MemoryDependenceAnalysis works in the wrong direction:
1152       // it finds instructions which dominate the current instruction, rather than
1153       // instructions which are post-dominated by the current instruction.
1154       //
1155       // If the underlying object is a non-escaping memory allocation, any store
1156       // to it is dead along the unwind edge. Otherwise, we need to preserve
1157       // the store.
1158       if (LastThrowing && OBB.dominates(DepWrite, LastThrowing)) {
1159         const Value* Underlying = GetUnderlyingObject(DepLoc.Ptr, DL);
1160         bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying);
1161         if (!IsStoreDeadOnUnwind) {
1162             // We're looking for a call to an allocation function
1163             // where the allocation doesn't escape before the last
1164             // throwing instruction; PointerMayBeCaptured
1165             // reasonably fast approximation.
1166             IsStoreDeadOnUnwind = isAllocLikeFn(Underlying, TLI) &&
1167                 !PointerMayBeCaptured(Underlying, false, true);
1168         }
1169         if (!IsStoreDeadOnUnwind)
1170           break;
1171       }
1172 
1173       // If we find a write that is a) removable (i.e., non-volatile), b) is
1174       // completely obliterated by the store to 'Loc', and c) which we know that
1175       // 'Inst' doesn't load from, then we can remove it.
1176       // Also try to merge two stores if a later one only touches memory written
1177       // to by the earlier one.
1178       if (isRemovable(DepWrite) &&
1179           !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
1180         int64_t InstWriteOffset, DepWriteOffset;
1181         OverwriteResult OR = isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset,
1182                                          InstWriteOffset, DepWrite, IOL, *AA,
1183                                          BB.getParent());
1184         if (OR == OW_Complete) {
1185           LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *DepWrite
1186                             << "\n  KILLER: " << *Inst << '\n');
1187 
1188           // Delete the store and now-dead instructions that feed it.
1189           deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL, OBB);
1190           ++NumFastStores;
1191           MadeChange = true;
1192 
1193           // We erased DepWrite; start over.
1194           InstDep = MD->getDependency(Inst, &OBB);
1195           continue;
1196         } else if ((OR == OW_End && isShortenableAtTheEnd(DepWrite)) ||
1197                    ((OR == OW_Begin &&
1198                      isShortenableAtTheBeginning(DepWrite)))) {
1199           assert(!EnablePartialOverwriteTracking && "Do not expect to perform "
1200                                                     "when partial-overwrite "
1201                                                     "tracking is enabled");
1202           // The overwrite result is known, so these must be known, too.
1203           int64_t EarlierSize = DepLoc.Size.getValue();
1204           int64_t LaterSize = Loc.Size.getValue();
1205           bool IsOverwriteEnd = (OR == OW_End);
1206           MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize,
1207                                     InstWriteOffset, LaterSize, IsOverwriteEnd);
1208         } else if (EnablePartialStoreMerging &&
1209                    OR == OW_PartialEarlierWithFullLater) {
1210           auto *Earlier = dyn_cast<StoreInst>(DepWrite);
1211           auto *Later = dyn_cast<StoreInst>(Inst);
1212           if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) &&
1213               Later && isa<ConstantInt>(Later->getValueOperand()) &&
1214               memoryIsNotModifiedBetween(Earlier, Later, AA)) {
1215             // If the store we find is:
1216             //   a) partially overwritten by the store to 'Loc'
1217             //   b) the later store is fully contained in the earlier one and
1218             //   c) they both have a constant value
1219             // Merge the two stores, replacing the earlier store's value with a
1220             // merge of both values.
1221             // TODO: Deal with other constant types (vectors, etc), and probably
1222             // some mem intrinsics (if needed)
1223 
1224             APInt EarlierValue =
1225                 cast<ConstantInt>(Earlier->getValueOperand())->getValue();
1226             APInt LaterValue =
1227                 cast<ConstantInt>(Later->getValueOperand())->getValue();
1228             unsigned LaterBits = LaterValue.getBitWidth();
1229             assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth());
1230             LaterValue = LaterValue.zext(EarlierValue.getBitWidth());
1231 
1232             // Offset of the smaller store inside the larger store
1233             unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8;
1234             unsigned LShiftAmount =
1235                 DL.isBigEndian()
1236                     ? EarlierValue.getBitWidth() - BitOffsetDiff - LaterBits
1237                     : BitOffsetDiff;
1238             APInt Mask =
1239                 APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount,
1240                                   LShiftAmount + LaterBits);
1241             // Clear the bits we'll be replacing, then OR with the smaller
1242             // store, shifted appropriately.
1243             APInt Merged =
1244                 (EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
1245             LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n  Earlier: " << *DepWrite
1246                               << "\n  Later: " << *Inst
1247                               << "\n  Merged Value: " << Merged << '\n');
1248 
1249             auto *SI = new StoreInst(
1250                 ConstantInt::get(Earlier->getValueOperand()->getType(), Merged),
1251                 Earlier->getPointerOperand(), false, Earlier->getAlignment(),
1252                 Earlier->getOrdering(), Earlier->getSyncScopeID(), DepWrite);
1253 
1254             unsigned MDToKeep[] = {LLVMContext::MD_dbg, LLVMContext::MD_tbaa,
1255                                    LLVMContext::MD_alias_scope,
1256                                    LLVMContext::MD_noalias,
1257                                    LLVMContext::MD_nontemporal};
1258             SI->copyMetadata(*DepWrite, MDToKeep);
1259             ++NumModifiedStores;
1260 
1261             // Remove earlier, wider, store
1262             OBB.replaceInstruction(DepWrite, SI);
1263 
1264             // Delete the old stores and now-dead instructions that feed them.
1265             deleteDeadInstruction(Inst, &BBI, *MD, *TLI, IOL, OBB);
1266             deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL, OBB);
1267             MadeChange = true;
1268 
1269             // We erased DepWrite and Inst (Loc); start over.
1270             break;
1271           }
1272         }
1273       }
1274 
1275       // If this is a may-aliased store that is clobbering the store value, we
1276       // can keep searching past it for another must-aliased pointer that stores
1277       // to the same location.  For example, in:
1278       //   store -> P
1279       //   store -> Q
1280       //   store -> P
1281       // we can remove the first store to P even though we don't know if P and Q
1282       // alias.
1283       if (DepWrite == &BB.front()) break;
1284 
1285       // Can't look past this instruction if it might read 'Loc'.
1286       if (isRefSet(AA->getModRefInfo(DepWrite, Loc)))
1287         break;
1288 
1289       InstDep = MD->getPointerDependencyFrom(Loc, /*isLoad=*/ false,
1290                                              DepWrite->getIterator(), &BB,
1291                                              /*QueryInst=*/ nullptr, &Limit);
1292     }
1293   }
1294 
1295   if (EnablePartialOverwriteTracking)
1296     MadeChange |= removePartiallyOverlappedStores(AA, DL, IOL);
1297 
1298   // If this block ends in a return, unwind, or unreachable, all allocas are
1299   // dead at its end, which means stores to them are also dead.
1300   if (BB.getTerminator()->getNumSuccessors() == 0)
1301     MadeChange |= handleEndBlock(BB, AA, MD, TLI, IOL, OBB);
1302 
1303   return MadeChange;
1304 }
1305 
1306 static bool eliminateDeadStores(Function &F, AliasAnalysis *AA,
1307                                 MemoryDependenceResults *MD, DominatorTree *DT,
1308                                 const TargetLibraryInfo *TLI) {
1309   bool MadeChange = false;
1310   for (BasicBlock &BB : F)
1311     // Only check non-dead blocks.  Dead blocks may have strange pointer
1312     // cycles that will confuse alias analysis.
1313     if (DT->isReachableFromEntry(&BB))
1314       MadeChange |= eliminateDeadStores(BB, AA, MD, DT, TLI);
1315 
1316   return MadeChange;
1317 }
1318 
1319 //===----------------------------------------------------------------------===//
1320 // DSE Pass
1321 //===----------------------------------------------------------------------===//
1322 PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
1323   AliasAnalysis *AA = &AM.getResult<AAManager>(F);
1324   DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1325   MemoryDependenceResults *MD = &AM.getResult<MemoryDependenceAnalysis>(F);
1326   const TargetLibraryInfo *TLI = &AM.getResult<TargetLibraryAnalysis>(F);
1327 
1328   if (!eliminateDeadStores(F, AA, MD, DT, TLI))
1329     return PreservedAnalyses::all();
1330 
1331   PreservedAnalyses PA;
1332   PA.preserveSet<CFGAnalyses>();
1333   PA.preserve<GlobalsAA>();
1334   PA.preserve<MemoryDependenceAnalysis>();
1335   return PA;
1336 }
1337 
1338 namespace {
1339 
1340 /// A legacy pass for the legacy pass manager that wraps \c DSEPass.
1341 class DSELegacyPass : public FunctionPass {
1342 public:
1343   static char ID; // Pass identification, replacement for typeid
1344 
1345   DSELegacyPass() : FunctionPass(ID) {
1346     initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
1347   }
1348 
1349   bool runOnFunction(Function &F) override {
1350     if (skipFunction(F))
1351       return false;
1352 
1353     DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1354     AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1355     MemoryDependenceResults *MD =
1356         &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
1357     const TargetLibraryInfo *TLI =
1358         &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1359 
1360     return eliminateDeadStores(F, AA, MD, DT, TLI);
1361   }
1362 
1363   void getAnalysisUsage(AnalysisUsage &AU) const override {
1364     AU.setPreservesCFG();
1365     AU.addRequired<DominatorTreeWrapperPass>();
1366     AU.addRequired<AAResultsWrapperPass>();
1367     AU.addRequired<MemoryDependenceWrapperPass>();
1368     AU.addRequired<TargetLibraryInfoWrapperPass>();
1369     AU.addPreserved<DominatorTreeWrapperPass>();
1370     AU.addPreserved<GlobalsAAWrapperPass>();
1371     AU.addPreserved<MemoryDependenceWrapperPass>();
1372   }
1373 };
1374 
1375 } // end anonymous namespace
1376 
1377 char DSELegacyPass::ID = 0;
1378 
1379 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
1380                       false)
1381 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1382 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
1383 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
1384 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
1385 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1386 INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,
1387                     false)
1388 
1389 FunctionPass *llvm::createDeadStoreEliminationPass() {
1390   return new DSELegacyPass();
1391 }
1392