1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a trivial dead store elimination that only considers
10 // basic-block local redundant stores.
11 //
12 // FIXME: This should eventually be extended to be a post-dominator tree
13 // traversal.  Doing so would be pretty trivial.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "llvm/Transforms/Scalar/DeadStoreElimination.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/MapVector.h"
21 #include "llvm/ADT/PostOrderIterator.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/CaptureTracking.h"
29 #include "llvm/Analysis/GlobalsModRef.h"
30 #include "llvm/Analysis/MemoryBuiltins.h"
31 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
32 #include "llvm/Analysis/MemoryLocation.h"
33 #include "llvm/Analysis/MemorySSA.h"
34 #include "llvm/Analysis/MemorySSAUpdater.h"
35 #include "llvm/Analysis/PostDominators.h"
36 #include "llvm/Analysis/TargetLibraryInfo.h"
37 #include "llvm/Analysis/ValueTracking.h"
38 #include "llvm/IR/Argument.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/Constant.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/InstIterator.h"
46 #include "llvm/IR/InstrTypes.h"
47 #include "llvm/IR/Instruction.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/IntrinsicInst.h"
50 #include "llvm/IR/Intrinsics.h"
51 #include "llvm/IR/LLVMContext.h"
52 #include "llvm/IR/Module.h"
53 #include "llvm/IR/PassManager.h"
54 #include "llvm/IR/PatternMatch.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/InitializePasses.h"
57 #include "llvm/Pass.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/DebugCounter.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Support/MathExtras.h"
64 #include "llvm/Support/raw_ostream.h"
65 #include "llvm/Transforms/Scalar.h"
66 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
67 #include "llvm/Transforms/Utils/Local.h"
68 #include <algorithm>
69 #include <cassert>
70 #include <cstddef>
71 #include <cstdint>
72 #include <iterator>
73 #include <map>
74 #include <utility>
75 
76 using namespace llvm;
77 using namespace PatternMatch;
78 
79 #define DEBUG_TYPE "dse"
80 
81 STATISTIC(NumRemainingStores, "Number of stores remaining after DSE");
82 STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
83 STATISTIC(NumFastStores, "Number of stores deleted");
84 STATISTIC(NumFastOther, "Number of other instrs removed");
85 STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
86 STATISTIC(NumModifiedStores, "Number of stores modified");
87 STATISTIC(NumCFGChecks, "Number of stores modified");
88 STATISTIC(NumCFGTries, "Number of stores modified");
89 STATISTIC(NumCFGSuccess, "Number of stores modified");
90 STATISTIC(NumGetDomMemoryDefPassed,
91           "Number of times a valid candidate is returned from getDomMemoryDef");
92 STATISTIC(NumDomMemDefChecks,
93           "Number iterations check for reads in getDomMemoryDef");
94 
95 DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa",
96               "Controls which MemoryDefs are eliminated.");
97 
98 static cl::opt<bool>
99 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
100   cl::init(true), cl::Hidden,
101   cl::desc("Enable partial-overwrite tracking in DSE"));
102 
103 static cl::opt<bool>
104 EnablePartialStoreMerging("enable-dse-partial-store-merging",
105   cl::init(true), cl::Hidden,
106   cl::desc("Enable partial store merging in DSE"));
107 
108 static cl::opt<bool>
109     EnableMemorySSA("enable-dse-memoryssa", cl::init(true), cl::Hidden,
110                     cl::desc("Use the new MemorySSA-backed DSE."));
111 
112 static cl::opt<unsigned>
113     MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden,
114                        cl::desc("The number of memory instructions to scan for "
115                                 "dead store elimination (default = 100)"));
116 static cl::opt<unsigned> MemorySSAUpwardsStepLimit(
117     "dse-memoryssa-walklimit", cl::init(90), cl::Hidden,
118     cl::desc("The maximum number of steps while walking upwards to find "
119              "MemoryDefs that may be killed (default = 90)"));
120 
121 static cl::opt<unsigned> MemorySSAPartialStoreLimit(
122     "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden,
123     cl::desc("The maximum number candidates that only partially overwrite the "
124              "killing MemoryDef to consider"
125              " (default = 5)"));
126 
127 static cl::opt<unsigned> MemorySSADefsPerBlockLimit(
128     "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden,
129     cl::desc("The number of MemoryDefs we consider as candidates to eliminated "
130              "other stores per basic block (default = 5000)"));
131 
132 static cl::opt<unsigned> MemorySSASameBBStepCost(
133     "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden,
134     cl::desc(
135         "The cost of a step in the same basic block as the killing MemoryDef"
136         "(default = 1)"));
137 
138 static cl::opt<unsigned>
139     MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5),
140                              cl::Hidden,
141                              cl::desc("The cost of a step in a different basic "
142                                       "block than the killing MemoryDef"
143                                       "(default = 5)"));
144 
145 static cl::opt<unsigned> MemorySSAPathCheckLimit(
146     "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden,
147     cl::desc("The maximum number of blocks to check when trying to prove that "
148              "all paths to an exit go through a killing block (default = 50)"));
149 
150 //===----------------------------------------------------------------------===//
151 // Helper functions
152 //===----------------------------------------------------------------------===//
153 using OverlapIntervalsTy = std::map<int64_t, int64_t>;
154 using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
155 
156 /// Delete this instruction.  Before we do, go through and zero out all the
157 /// operands of this instruction.  If any of them become dead, delete them and
158 /// the computation tree that feeds them.
159 /// If ValueSet is non-null, remove any deleted instructions from it as well.
160 static void
161 deleteDeadInstruction(Instruction *I, BasicBlock::iterator *BBI,
162                       MemoryDependenceResults &MD, const TargetLibraryInfo &TLI,
163                       InstOverlapIntervalsTy &IOL,
164                       MapVector<Instruction *, bool> &ThrowableInst,
165                       SmallSetVector<const Value *, 16> *ValueSet = nullptr) {
166   SmallVector<Instruction*, 32> NowDeadInsts;
167 
168   NowDeadInsts.push_back(I);
169   --NumFastOther;
170 
171   // Keeping the iterator straight is a pain, so we let this routine tell the
172   // caller what the next instruction is after we're done mucking about.
173   BasicBlock::iterator NewIter = *BBI;
174 
175   // Before we touch this instruction, remove it from memdep!
176   do {
177     Instruction *DeadInst = NowDeadInsts.pop_back_val();
178     // Mark the DeadInst as dead in the list of throwable instructions.
179     auto It = ThrowableInst.find(DeadInst);
180     if (It != ThrowableInst.end())
181       ThrowableInst[It->first] = false;
182     ++NumFastOther;
183 
184     // Try to preserve debug information attached to the dead instruction.
185     salvageDebugInfo(*DeadInst);
186     salvageKnowledge(DeadInst);
187 
188     // This instruction is dead, zap it, in stages.  Start by removing it from
189     // MemDep, which needs to know the operands and needs it to be in the
190     // function.
191     MD.removeInstruction(DeadInst);
192 
193     for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
194       Value *Op = DeadInst->getOperand(op);
195       DeadInst->setOperand(op, nullptr);
196 
197       // If this operand just became dead, add it to the NowDeadInsts list.
198       if (!Op->use_empty()) continue;
199 
200       if (Instruction *OpI = dyn_cast<Instruction>(Op))
201         if (isInstructionTriviallyDead(OpI, &TLI))
202           NowDeadInsts.push_back(OpI);
203     }
204 
205     if (ValueSet) ValueSet->remove(DeadInst);
206     IOL.erase(DeadInst);
207 
208     if (NewIter == DeadInst->getIterator())
209       NewIter = DeadInst->eraseFromParent();
210     else
211       DeadInst->eraseFromParent();
212   } while (!NowDeadInsts.empty());
213   *BBI = NewIter;
214   // Pop dead entries from back of ThrowableInst till we find an alive entry.
215   while (!ThrowableInst.empty() && !ThrowableInst.back().second)
216     ThrowableInst.pop_back();
217 }
218 
219 /// Does this instruction write some memory?  This only returns true for things
220 /// that we can analyze with other helpers below.
221 static bool hasAnalyzableMemoryWrite(Instruction *I,
222                                      const TargetLibraryInfo &TLI) {
223   if (isa<StoreInst>(I))
224     return true;
225   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
226     switch (II->getIntrinsicID()) {
227     default:
228       return false;
229     case Intrinsic::memset:
230     case Intrinsic::memmove:
231     case Intrinsic::memcpy:
232     case Intrinsic::memcpy_inline:
233     case Intrinsic::memcpy_element_unordered_atomic:
234     case Intrinsic::memmove_element_unordered_atomic:
235     case Intrinsic::memset_element_unordered_atomic:
236     case Intrinsic::init_trampoline:
237     case Intrinsic::lifetime_end:
238     case Intrinsic::masked_store:
239       return true;
240     }
241   }
242   if (auto *CB = dyn_cast<CallBase>(I)) {
243     LibFunc LF;
244     if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
245       switch (LF) {
246       case LibFunc_strcpy:
247       case LibFunc_strncpy:
248       case LibFunc_strcat:
249       case LibFunc_strncat:
250         return true;
251       default:
252         return false;
253       }
254     }
255   }
256   return false;
257 }
258 
259 /// Return a Location stored to by the specified instruction. If isRemovable
260 /// returns true, this function and getLocForRead completely describe the memory
261 /// operations for this instruction.
262 static MemoryLocation getLocForWrite(Instruction *Inst,
263                                      const TargetLibraryInfo &TLI) {
264   if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
265     return MemoryLocation::get(SI);
266 
267   // memcpy/memmove/memset.
268   if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst))
269     return MemoryLocation::getForDest(MI);
270 
271   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
272     switch (II->getIntrinsicID()) {
273     default:
274       return MemoryLocation(); // Unhandled intrinsic.
275     case Intrinsic::init_trampoline:
276       return MemoryLocation::getAfter(II->getArgOperand(0));
277     case Intrinsic::masked_store:
278       return MemoryLocation::getForArgument(II, 1, TLI);
279     case Intrinsic::lifetime_end: {
280       uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
281       return MemoryLocation(II->getArgOperand(1), Len);
282     }
283     }
284   }
285   if (auto *CB = dyn_cast<CallBase>(Inst))
286     // All the supported TLI functions so far happen to have dest as their
287     // first argument.
288     return MemoryLocation::getAfter(CB->getArgOperand(0));
289   return MemoryLocation();
290 }
291 
292 /// Return the location read by the specified "hasAnalyzableMemoryWrite"
293 /// instruction if any.
294 static MemoryLocation getLocForRead(Instruction *Inst,
295                                     const TargetLibraryInfo &TLI) {
296   assert(hasAnalyzableMemoryWrite(Inst, TLI) && "Unknown instruction case");
297 
298   // The only instructions that both read and write are the mem transfer
299   // instructions (memcpy/memmove).
300   if (auto *MTI = dyn_cast<AnyMemTransferInst>(Inst))
301     return MemoryLocation::getForSource(MTI);
302   return MemoryLocation();
303 }
304 
305 /// If the value of this instruction and the memory it writes to is unused, may
306 /// we delete this instruction?
307 static bool isRemovable(Instruction *I) {
308   // Don't remove volatile/atomic stores.
309   if (StoreInst *SI = dyn_cast<StoreInst>(I))
310     return SI->isUnordered();
311 
312   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
313     switch (II->getIntrinsicID()) {
314     default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate");
315     case Intrinsic::lifetime_end:
316       // Never remove dead lifetime_end's, e.g. because it is followed by a
317       // free.
318       return false;
319     case Intrinsic::init_trampoline:
320       // Always safe to remove init_trampoline.
321       return true;
322     case Intrinsic::memset:
323     case Intrinsic::memmove:
324     case Intrinsic::memcpy:
325     case Intrinsic::memcpy_inline:
326       // Don't remove volatile memory intrinsics.
327       return !cast<MemIntrinsic>(II)->isVolatile();
328     case Intrinsic::memcpy_element_unordered_atomic:
329     case Intrinsic::memmove_element_unordered_atomic:
330     case Intrinsic::memset_element_unordered_atomic:
331     case Intrinsic::masked_store:
332       return true;
333     }
334   }
335 
336   // note: only get here for calls with analyzable writes - i.e. libcalls
337   if (auto *CB = dyn_cast<CallBase>(I))
338     return CB->use_empty();
339 
340   return false;
341 }
342 
343 /// Returns true if the end of this instruction can be safely shortened in
344 /// length.
345 static bool isShortenableAtTheEnd(Instruction *I) {
346   // Don't shorten stores for now
347   if (isa<StoreInst>(I))
348     return false;
349 
350   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
351     switch (II->getIntrinsicID()) {
352       default: return false;
353       case Intrinsic::memset:
354       case Intrinsic::memcpy:
355       case Intrinsic::memcpy_element_unordered_atomic:
356       case Intrinsic::memset_element_unordered_atomic:
357         // Do shorten memory intrinsics.
358         // FIXME: Add memmove if it's also safe to transform.
359         return true;
360     }
361   }
362 
363   // Don't shorten libcalls calls for now.
364 
365   return false;
366 }
367 
368 /// Returns true if the beginning of this instruction can be safely shortened
369 /// in length.
370 static bool isShortenableAtTheBeginning(Instruction *I) {
371   // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
372   // easily done by offsetting the source address.
373   return isa<AnyMemSetInst>(I);
374 }
375 
376 /// Return the pointer that is being written to.
377 static Value *getStoredPointerOperand(Instruction *I,
378                                       const TargetLibraryInfo &TLI) {
379   //TODO: factor this to reuse getLocForWrite
380   MemoryLocation Loc = getLocForWrite(I, TLI);
381   assert(Loc.Ptr &&
382          "unable to find pointer written for analyzable instruction?");
383   // TODO: most APIs don't expect const Value *
384   return const_cast<Value*>(Loc.Ptr);
385 }
386 
387 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
388                                const TargetLibraryInfo &TLI,
389                                const Function *F) {
390   uint64_t Size;
391   ObjectSizeOpts Opts;
392   Opts.NullIsUnknownSize = NullPointerIsDefined(F);
393 
394   if (getObjectSize(V, Size, DL, &TLI, Opts))
395     return Size;
396   return MemoryLocation::UnknownSize;
397 }
398 
399 namespace {
400 
401 enum OverwriteResult {
402   OW_Begin,
403   OW_Complete,
404   OW_End,
405   OW_PartialEarlierWithFullLater,
406   OW_MaybePartial,
407   OW_Unknown
408 };
409 
410 } // end anonymous namespace
411 
412 /// Check if two instruction are masked stores that completely
413 /// overwrite one another. More specifically, \p Later has to
414 /// overwrite \p Earlier.
415 template <typename AATy>
416 static OverwriteResult isMaskedStoreOverwrite(const Instruction *Later,
417                                               const Instruction *Earlier,
418                                               AATy &AA) {
419   const auto *IIL = dyn_cast<IntrinsicInst>(Later);
420   const auto *IIE = dyn_cast<IntrinsicInst>(Earlier);
421   if (IIL == nullptr || IIE == nullptr)
422     return OW_Unknown;
423   if (IIL->getIntrinsicID() != Intrinsic::masked_store ||
424       IIE->getIntrinsicID() != Intrinsic::masked_store)
425     return OW_Unknown;
426   // Pointers.
427   Value *LP = IIL->getArgOperand(1)->stripPointerCasts();
428   Value *EP = IIE->getArgOperand(1)->stripPointerCasts();
429   if (LP != EP && !AA.isMustAlias(LP, EP))
430     return OW_Unknown;
431   // Masks.
432   // TODO: check that Later's mask is a superset of the Earlier's mask.
433   if (IIL->getArgOperand(3) != IIE->getArgOperand(3))
434     return OW_Unknown;
435   return OW_Complete;
436 }
437 
438 /// Return 'OW_Complete' if a store to the 'Later' location (by \p LaterI
439 /// instruction) completely overwrites a store to the 'Earlier' location.
440 /// (by \p EarlierI instruction).
441 /// Return OW_MaybePartial if \p Later does not completely overwrite
442 /// \p Earlier, but they both write to the same underlying object. In that
443 /// case, use isPartialOverwrite to check if \p Later partially overwrites
444 /// \p Earlier. Returns 'OW_Unknown' if nothing can be determined.
445 template <typename AATy>
446 static OverwriteResult
447 isOverwrite(const Instruction *LaterI, const Instruction *EarlierI,
448             const MemoryLocation &Later, const MemoryLocation &Earlier,
449             const DataLayout &DL, const TargetLibraryInfo &TLI,
450             int64_t &EarlierOff, int64_t &LaterOff, AATy &AA,
451             const Function *F) {
452   // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
453   // get imprecise values here, though (except for unknown sizes).
454   if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise()) {
455     // Masked stores have imprecise locations, but we can reason about them
456     // to some extent.
457     return isMaskedStoreOverwrite(LaterI, EarlierI, AA);
458   }
459 
460   const uint64_t LaterSize = Later.Size.getValue();
461   const uint64_t EarlierSize = Earlier.Size.getValue();
462 
463   const Value *P1 = Earlier.Ptr->stripPointerCasts();
464   const Value *P2 = Later.Ptr->stripPointerCasts();
465 
466   // If the start pointers are the same, we just have to compare sizes to see if
467   // the later store was larger than the earlier store.
468   if (P1 == P2 || AA.isMustAlias(P1, P2)) {
469     // Make sure that the Later size is >= the Earlier size.
470     if (LaterSize >= EarlierSize)
471       return OW_Complete;
472   }
473 
474   // Check to see if the later store is to the entire object (either a global,
475   // an alloca, or a byval/inalloca argument).  If so, then it clearly
476   // overwrites any other store to the same object.
477   const Value *UO1 = getUnderlyingObject(P1), *UO2 = getUnderlyingObject(P2);
478 
479   // If we can't resolve the same pointers to the same object, then we can't
480   // analyze them at all.
481   if (UO1 != UO2)
482     return OW_Unknown;
483 
484   // If the "Later" store is to a recognizable object, get its size.
485   uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, F);
486   if (ObjectSize != MemoryLocation::UnknownSize)
487     if (ObjectSize == LaterSize && ObjectSize >= EarlierSize)
488       return OW_Complete;
489 
490   // Okay, we have stores to two completely different pointers.  Try to
491   // decompose the pointer into a "base + constant_offset" form.  If the base
492   // pointers are equal, then we can reason about the two stores.
493   EarlierOff = 0;
494   LaterOff = 0;
495   const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
496   const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
497 
498   // If the base pointers still differ, we have two completely different stores.
499   if (BP1 != BP2)
500     return OW_Unknown;
501 
502   // The later access completely overlaps the earlier store if and only if
503   // both start and end of the earlier one is "inside" the later one:
504   //    |<->|--earlier--|<->|
505   //    |-------later-------|
506   // Accesses may overlap if and only if start of one of them is "inside"
507   // another one:
508   //    |<->|--earlier--|<----->|
509   //    |-------later-------|
510   //           OR
511   //    |----- earlier -----|
512   //    |<->|---later---|<----->|
513   //
514   // We have to be careful here as *Off is signed while *.Size is unsigned.
515 
516   // Check if the earlier access starts "not before" the later one.
517   if (EarlierOff >= LaterOff) {
518     // If the earlier access ends "not after" the later access then the earlier
519     // one is completely overwritten by the later one.
520     if (uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize)
521       return OW_Complete;
522     // If start of the earlier access is "before" end of the later access then
523     // accesses overlap.
524     else if ((uint64_t)(EarlierOff - LaterOff) < LaterSize)
525       return OW_MaybePartial;
526   }
527   // If start of the later access is "before" end of the earlier access then
528   // accesses overlap.
529   else if ((uint64_t)(LaterOff - EarlierOff) < EarlierSize) {
530     return OW_MaybePartial;
531   }
532 
533   // Can reach here only if accesses are known not to overlap. There is no
534   // dedicated code to indicate no overlap so signal "unknown".
535   return OW_Unknown;
536 }
537 
538 /// Return 'OW_Complete' if a store to the 'Later' location completely
539 /// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
540 /// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
541 /// beginning of the 'Earlier' location is overwritten by 'Later'.
542 /// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
543 /// overwritten by a latter (smaller) store which doesn't write outside the big
544 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
545 /// NOTE: This function must only be called if both \p Later and \p Earlier
546 /// write to the same underlying object with valid \p EarlierOff and \p
547 /// LaterOff.
548 static OverwriteResult isPartialOverwrite(const MemoryLocation &Later,
549                                           const MemoryLocation &Earlier,
550                                           int64_t EarlierOff, int64_t LaterOff,
551                                           Instruction *DepWrite,
552                                           InstOverlapIntervalsTy &IOL) {
553   const uint64_t LaterSize = Later.Size.getValue();
554   const uint64_t EarlierSize = Earlier.Size.getValue();
555   // We may now overlap, although the overlap is not complete. There might also
556   // be other incomplete overlaps, and together, they might cover the complete
557   // earlier write.
558   // Note: The correctness of this logic depends on the fact that this function
559   // is not even called providing DepWrite when there are any intervening reads.
560   if (EnablePartialOverwriteTracking &&
561       LaterOff < int64_t(EarlierOff + EarlierSize) &&
562       int64_t(LaterOff + LaterSize) >= EarlierOff) {
563 
564     // Insert our part of the overlap into the map.
565     auto &IM = IOL[DepWrite];
566     LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff
567                       << ", " << int64_t(EarlierOff + EarlierSize)
568                       << ") Later [" << LaterOff << ", "
569                       << int64_t(LaterOff + LaterSize) << ")\n");
570 
571     // Make sure that we only insert non-overlapping intervals and combine
572     // adjacent intervals. The intervals are stored in the map with the ending
573     // offset as the key (in the half-open sense) and the starting offset as
574     // the value.
575     int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize;
576 
577     // Find any intervals ending at, or after, LaterIntStart which start
578     // before LaterIntEnd.
579     auto ILI = IM.lower_bound(LaterIntStart);
580     if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
581       // This existing interval is overlapped with the current store somewhere
582       // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
583       // intervals and adjusting our start and end.
584       LaterIntStart = std::min(LaterIntStart, ILI->second);
585       LaterIntEnd = std::max(LaterIntEnd, ILI->first);
586       ILI = IM.erase(ILI);
587 
588       // Continue erasing and adjusting our end in case other previous
589       // intervals are also overlapped with the current store.
590       //
591       // |--- ealier 1 ---|  |--- ealier 2 ---|
592       //     |------- later---------|
593       //
594       while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
595         assert(ILI->second > LaterIntStart && "Unexpected interval");
596         LaterIntEnd = std::max(LaterIntEnd, ILI->first);
597         ILI = IM.erase(ILI);
598       }
599     }
600 
601     IM[LaterIntEnd] = LaterIntStart;
602 
603     ILI = IM.begin();
604     if (ILI->second <= EarlierOff &&
605         ILI->first >= int64_t(EarlierOff + EarlierSize)) {
606       LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["
607                         << EarlierOff << ", "
608                         << int64_t(EarlierOff + EarlierSize)
609                         << ") Composite Later [" << ILI->second << ", "
610                         << ILI->first << ")\n");
611       ++NumCompletePartials;
612       return OW_Complete;
613     }
614   }
615 
616   // Check for an earlier store which writes to all the memory locations that
617   // the later store writes to.
618   if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
619       int64_t(EarlierOff + EarlierSize) > LaterOff &&
620       uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) {
621     LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["
622                       << EarlierOff << ", "
623                       << int64_t(EarlierOff + EarlierSize)
624                       << ") by a later store [" << LaterOff << ", "
625                       << int64_t(LaterOff + LaterSize) << ")\n");
626     // TODO: Maybe come up with a better name?
627     return OW_PartialEarlierWithFullLater;
628   }
629 
630   // Another interesting case is if the later store overwrites the end of the
631   // earlier store.
632   //
633   //      |--earlier--|
634   //                |--   later   --|
635   //
636   // In this case we may want to trim the size of earlier to avoid generating
637   // writes to addresses which will definitely be overwritten later
638   if (!EnablePartialOverwriteTracking &&
639       (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) &&
640        int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize)))
641     return OW_End;
642 
643   // Finally, we also need to check if the later store overwrites the beginning
644   // of the earlier store.
645   //
646   //                |--earlier--|
647   //      |--   later   --|
648   //
649   // In this case we may want to move the destination address and trim the size
650   // of earlier to avoid generating writes to addresses which will definitely
651   // be overwritten later.
652   if (!EnablePartialOverwriteTracking &&
653       (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) {
654     assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) &&
655            "Expect to be handled as OW_Complete");
656     return OW_Begin;
657   }
658   // Otherwise, they don't completely overlap.
659   return OW_Unknown;
660 }
661 
662 /// If 'Inst' might be a self read (i.e. a noop copy of a
663 /// memory region into an identical pointer) then it doesn't actually make its
664 /// input dead in the traditional sense.  Consider this case:
665 ///
666 ///   memmove(A <- B)
667 ///   memmove(A <- A)
668 ///
669 /// In this case, the second store to A does not make the first store to A dead.
670 /// The usual situation isn't an explicit A<-A store like this (which can be
671 /// trivially removed) but a case where two pointers may alias.
672 ///
673 /// This function detects when it is unsafe to remove a dependent instruction
674 /// because the DSE inducing instruction may be a self-read.
675 static bool isPossibleSelfRead(Instruction *Inst,
676                                const MemoryLocation &InstStoreLoc,
677                                Instruction *DepWrite,
678                                const TargetLibraryInfo &TLI,
679                                AliasAnalysis &AA) {
680   // Self reads can only happen for instructions that read memory.  Get the
681   // location read.
682   MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
683   if (!InstReadLoc.Ptr)
684     return false; // Not a reading instruction.
685 
686   // If the read and written loc obviously don't alias, it isn't a read.
687   if (AA.isNoAlias(InstReadLoc, InstStoreLoc))
688     return false;
689 
690   if (isa<AnyMemCpyInst>(Inst)) {
691     // LLVM's memcpy overlap semantics are not fully fleshed out (see PR11763)
692     // but in practice memcpy(A <- B) either means that A and B are disjoint or
693     // are equal (i.e. there are not partial overlaps).  Given that, if we have:
694     //
695     //   memcpy/memmove(A <- B)  // DepWrite
696     //   memcpy(A <- B)  // Inst
697     //
698     // with Inst reading/writing a >= size than DepWrite, we can reason as
699     // follows:
700     //
701     //   - If A == B then both the copies are no-ops, so the DepWrite can be
702     //     removed.
703     //   - If A != B then A and B are disjoint locations in Inst.  Since
704     //     Inst.size >= DepWrite.size A and B are disjoint in DepWrite too.
705     //     Therefore DepWrite can be removed.
706     MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
707 
708     if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
709       return false;
710   }
711 
712   // If DepWrite doesn't read memory or if we can't prove it is a must alias,
713   // then it can't be considered dead.
714   return true;
715 }
716 
717 /// Returns true if the memory which is accessed by the second instruction is not
718 /// modified between the first and the second instruction.
719 /// Precondition: Second instruction must be dominated by the first
720 /// instruction.
721 template <typename AATy>
722 static bool
723 memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI, AATy &AA,
724                            const DataLayout &DL, DominatorTree *DT) {
725   // Do a backwards scan through the CFG from SecondI to FirstI. Look for
726   // instructions which can modify the memory location accessed by SecondI.
727   //
728   // While doing the walk keep track of the address to check. It might be
729   // different in different basic blocks due to PHI translation.
730   using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>;
731   SmallVector<BlockAddressPair, 16> WorkList;
732   // Keep track of the address we visited each block with. Bail out if we
733   // visit a block with different addresses.
734   DenseMap<BasicBlock *, Value *> Visited;
735 
736   BasicBlock::iterator FirstBBI(FirstI);
737   ++FirstBBI;
738   BasicBlock::iterator SecondBBI(SecondI);
739   BasicBlock *FirstBB = FirstI->getParent();
740   BasicBlock *SecondBB = SecondI->getParent();
741   MemoryLocation MemLoc = MemoryLocation::get(SecondI);
742   auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr);
743 
744   // Start checking the SecondBB.
745   WorkList.push_back(
746       std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr)));
747   bool isFirstBlock = true;
748 
749   // Check all blocks going backward until we reach the FirstBB.
750   while (!WorkList.empty()) {
751     BlockAddressPair Current = WorkList.pop_back_val();
752     BasicBlock *B = Current.first;
753     PHITransAddr &Addr = Current.second;
754     Value *Ptr = Addr.getAddr();
755 
756     // Ignore instructions before FirstI if this is the FirstBB.
757     BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
758 
759     BasicBlock::iterator EI;
760     if (isFirstBlock) {
761       // Ignore instructions after SecondI if this is the first visit of SecondBB.
762       assert(B == SecondBB && "first block is not the store block");
763       EI = SecondBBI;
764       isFirstBlock = false;
765     } else {
766       // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
767       // In this case we also have to look at instructions after SecondI.
768       EI = B->end();
769     }
770     for (; BI != EI; ++BI) {
771       Instruction *I = &*BI;
772       if (I->mayWriteToMemory() && I != SecondI)
773         if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr))))
774           return false;
775     }
776     if (B != FirstBB) {
777       assert(B != &FirstBB->getParent()->getEntryBlock() &&
778           "Should not hit the entry block because SI must be dominated by LI");
779       for (BasicBlock *Pred : predecessors(B)) {
780         PHITransAddr PredAddr = Addr;
781         if (PredAddr.NeedsPHITranslationFromBlock(B)) {
782           if (!PredAddr.IsPotentiallyPHITranslatable())
783             return false;
784           if (PredAddr.PHITranslateValue(B, Pred, DT, false))
785             return false;
786         }
787         Value *TranslatedPtr = PredAddr.getAddr();
788         auto Inserted = Visited.insert(std::make_pair(Pred, TranslatedPtr));
789         if (!Inserted.second) {
790           // We already visited this block before. If it was with a different
791           // address - bail out!
792           if (TranslatedPtr != Inserted.first->second)
793             return false;
794           // ... otherwise just skip it.
795           continue;
796         }
797         WorkList.push_back(std::make_pair(Pred, PredAddr));
798       }
799     }
800   }
801   return true;
802 }
803 
804 /// Find all blocks that will unconditionally lead to the block BB and append
805 /// them to F.
806 static void findUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
807                                    BasicBlock *BB, DominatorTree *DT) {
808   for (BasicBlock *Pred : predecessors(BB)) {
809     if (Pred == BB) continue;
810     Instruction *PredTI = Pred->getTerminator();
811     if (PredTI->getNumSuccessors() != 1)
812       continue;
813 
814     if (DT->isReachableFromEntry(Pred))
815       Blocks.push_back(Pred);
816   }
817 }
818 
819 /// Handle frees of entire structures whose dependency is a store
820 /// to a field of that structure.
821 static bool handleFree(CallInst *F, AliasAnalysis *AA,
822                        MemoryDependenceResults *MD, DominatorTree *DT,
823                        const TargetLibraryInfo *TLI,
824                        InstOverlapIntervalsTy &IOL,
825                        MapVector<Instruction *, bool> &ThrowableInst) {
826   bool MadeChange = false;
827 
828   MemoryLocation Loc = MemoryLocation::getAfter(F->getOperand(0));
829   SmallVector<BasicBlock *, 16> Blocks;
830   Blocks.push_back(F->getParent());
831 
832   while (!Blocks.empty()) {
833     BasicBlock *BB = Blocks.pop_back_val();
834     Instruction *InstPt = BB->getTerminator();
835     if (BB == F->getParent()) InstPt = F;
836 
837     MemDepResult Dep =
838         MD->getPointerDependencyFrom(Loc, false, InstPt->getIterator(), BB);
839     while (Dep.isDef() || Dep.isClobber()) {
840       Instruction *Dependency = Dep.getInst();
841       if (!hasAnalyzableMemoryWrite(Dependency, *TLI) ||
842           !isRemovable(Dependency))
843         break;
844 
845       Value *DepPointer =
846           getUnderlyingObject(getStoredPointerOperand(Dependency, *TLI));
847 
848       // Check for aliasing.
849       if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
850         break;
851 
852       LLVM_DEBUG(
853           dbgs() << "DSE: Dead Store to soon to be freed memory:\n  DEAD: "
854                  << *Dependency << '\n');
855 
856       // DCE instructions only used to calculate that store.
857       BasicBlock::iterator BBI(Dependency);
858       deleteDeadInstruction(Dependency, &BBI, *MD, *TLI, IOL,
859                             ThrowableInst);
860       ++NumFastStores;
861       MadeChange = true;
862 
863       // Inst's old Dependency is now deleted. Compute the next dependency,
864       // which may also be dead, as in
865       //    s[0] = 0;
866       //    s[1] = 0; // This has just been deleted.
867       //    free(s);
868       Dep = MD->getPointerDependencyFrom(Loc, false, BBI, BB);
869     }
870 
871     if (Dep.isNonLocal())
872       findUnconditionalPreds(Blocks, BB, DT);
873   }
874 
875   return MadeChange;
876 }
877 
878 /// Check to see if the specified location may alias any of the stack objects in
879 /// the DeadStackObjects set. If so, they become live because the location is
880 /// being loaded.
881 static void removeAccessedObjects(const MemoryLocation &LoadedLoc,
882                                   SmallSetVector<const Value *, 16> &DeadStackObjects,
883                                   const DataLayout &DL, AliasAnalysis *AA,
884                                   const TargetLibraryInfo *TLI,
885                                   const Function *F) {
886   const Value *UnderlyingPointer = getUnderlyingObject(LoadedLoc.Ptr);
887 
888   // A constant can't be in the dead pointer set.
889   if (isa<Constant>(UnderlyingPointer))
890     return;
891 
892   // If the kill pointer can be easily reduced to an alloca, don't bother doing
893   // extraneous AA queries.
894   if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
895     DeadStackObjects.remove(UnderlyingPointer);
896     return;
897   }
898 
899   // Remove objects that could alias LoadedLoc.
900   DeadStackObjects.remove_if([&](const Value *I) {
901     // See if the loaded location could alias the stack location.
902     MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI, F));
903     return !AA->isNoAlias(StackLoc, LoadedLoc);
904   });
905 }
906 
907 /// Remove dead stores to stack-allocated locations in the function end block.
908 /// Ex:
909 /// %A = alloca i32
910 /// ...
911 /// store i32 1, i32* %A
912 /// ret void
913 static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
914                            MemoryDependenceResults *MD,
915                            const TargetLibraryInfo *TLI,
916                            InstOverlapIntervalsTy &IOL,
917                            MapVector<Instruction *, bool> &ThrowableInst) {
918   bool MadeChange = false;
919 
920   // Keep track of all of the stack objects that are dead at the end of the
921   // function.
922   SmallSetVector<const Value*, 16> DeadStackObjects;
923 
924   // Find all of the alloca'd pointers in the entry block.
925   BasicBlock &Entry = BB.getParent()->front();
926   for (Instruction &I : Entry) {
927     if (isa<AllocaInst>(&I))
928       DeadStackObjects.insert(&I);
929 
930     // Okay, so these are dead heap objects, but if the pointer never escapes
931     // then it's leaked by this function anyways.
932     else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true))
933       DeadStackObjects.insert(&I);
934   }
935 
936   // Treat byval or inalloca arguments the same, stores to them are dead at the
937   // end of the function.
938   for (Argument &AI : BB.getParent()->args())
939     if (AI.hasPassPointeeByValueCopyAttr())
940       DeadStackObjects.insert(&AI);
941 
942   const DataLayout &DL = BB.getModule()->getDataLayout();
943 
944   // Scan the basic block backwards
945   for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
946     --BBI;
947 
948     // If we find a store, check to see if it points into a dead stack value.
949     if (hasAnalyzableMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
950       // See through pointer-to-pointer bitcasts
951       SmallVector<const Value *, 4> Pointers;
952       getUnderlyingObjects(getStoredPointerOperand(&*BBI, *TLI), Pointers);
953 
954       // Stores to stack values are valid candidates for removal.
955       bool AllDead = true;
956       for (const Value *Pointer : Pointers)
957         if (!DeadStackObjects.count(Pointer)) {
958           AllDead = false;
959           break;
960         }
961 
962       if (AllDead) {
963         Instruction *Dead = &*BBI;
964 
965         LLVM_DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n  DEAD: "
966                           << *Dead << "\n  Objects: ";
967                    for (SmallVectorImpl<const Value *>::iterator I =
968                             Pointers.begin(),
969                         E = Pointers.end();
970                         I != E; ++I) {
971                      dbgs() << **I;
972                      if (std::next(I) != E)
973                        dbgs() << ", ";
974                    } dbgs()
975                    << '\n');
976 
977         // DCE instructions only used to calculate that store.
978         deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, ThrowableInst,
979                               &DeadStackObjects);
980         ++NumFastStores;
981         MadeChange = true;
982         continue;
983       }
984     }
985 
986     // Remove any dead non-memory-mutating instructions.
987     if (isInstructionTriviallyDead(&*BBI, TLI)) {
988       LLVM_DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n  DEAD: "
989                         << *&*BBI << '\n');
990       deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, ThrowableInst,
991                             &DeadStackObjects);
992       ++NumFastOther;
993       MadeChange = true;
994       continue;
995     }
996 
997     if (isa<AllocaInst>(BBI)) {
998       // Remove allocas from the list of dead stack objects; there can't be
999       // any references before the definition.
1000       DeadStackObjects.remove(&*BBI);
1001       continue;
1002     }
1003 
1004     if (auto *Call = dyn_cast<CallBase>(&*BBI)) {
1005       // Remove allocation function calls from the list of dead stack objects;
1006       // there can't be any references before the definition.
1007       if (isAllocLikeFn(&*BBI, TLI))
1008         DeadStackObjects.remove(&*BBI);
1009 
1010       // If this call does not access memory, it can't be loading any of our
1011       // pointers.
1012       if (AA->doesNotAccessMemory(Call))
1013         continue;
1014 
1015       // If the call might load from any of our allocas, then any store above
1016       // the call is live.
1017       DeadStackObjects.remove_if([&](const Value *I) {
1018         // See if the call site touches the value.
1019         return isRefSet(AA->getModRefInfo(
1020             Call, I, getPointerSize(I, DL, *TLI, BB.getParent())));
1021       });
1022 
1023       // If all of the allocas were clobbered by the call then we're not going
1024       // to find anything else to process.
1025       if (DeadStackObjects.empty())
1026         break;
1027 
1028       continue;
1029     }
1030 
1031     // We can remove the dead stores, irrespective of the fence and its ordering
1032     // (release/acquire/seq_cst). Fences only constraints the ordering of
1033     // already visible stores, it does not make a store visible to other
1034     // threads. So, skipping over a fence does not change a store from being
1035     // dead.
1036     if (isa<FenceInst>(*BBI))
1037       continue;
1038 
1039     MemoryLocation LoadedLoc;
1040 
1041     // If we encounter a use of the pointer, it is no longer considered dead
1042     if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
1043       if (!L->isUnordered()) // Be conservative with atomic/volatile load
1044         break;
1045       LoadedLoc = MemoryLocation::get(L);
1046     } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
1047       LoadedLoc = MemoryLocation::get(V);
1048     } else if (!BBI->mayReadFromMemory()) {
1049       // Instruction doesn't read memory.  Note that stores that weren't removed
1050       // above will hit this case.
1051       continue;
1052     } else {
1053       // Unknown inst; assume it clobbers everything.
1054       break;
1055     }
1056 
1057     // Remove any allocas from the DeadPointer set that are loaded, as this
1058     // makes any stores above the access live.
1059     removeAccessedObjects(LoadedLoc, DeadStackObjects, DL, AA, TLI, BB.getParent());
1060 
1061     // If all of the allocas were clobbered by the access then we're not going
1062     // to find anything else to process.
1063     if (DeadStackObjects.empty())
1064       break;
1065   }
1066 
1067   return MadeChange;
1068 }
1069 
1070 static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierStart,
1071                          uint64_t &EarlierSize, int64_t LaterStart,
1072                          uint64_t LaterSize, bool IsOverwriteEnd) {
1073   auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite);
1074   Align PrefAlign = EarlierIntrinsic->getDestAlign().valueOrOne();
1075 
1076   // We assume that memet/memcpy operates in chunks of the "largest" native
1077   // type size and aligned on the same value. That means optimal start and size
1078   // of memset/memcpy should be modulo of preferred alignment of that type. That
1079   // is it there is no any sense in trying to reduce store size any further
1080   // since any "extra" stores comes for free anyway.
1081   // On the other hand, maximum alignment we can achieve is limited by alignment
1082   // of initial store.
1083 
1084   // TODO: Limit maximum alignment by preferred (or abi?) alignment of the
1085   // "largest" native type.
1086   // Note: What is the proper way to get that value?
1087   // Should TargetTransformInfo::getRegisterBitWidth be used or anything else?
1088   // PrefAlign = std::min(DL.getPrefTypeAlign(LargestType), PrefAlign);
1089 
1090   int64_t ToRemoveStart = 0;
1091   uint64_t ToRemoveSize = 0;
1092   // Compute start and size of the region to remove. Make sure 'PrefAlign' is
1093   // maintained on the remaining store.
1094   if (IsOverwriteEnd) {
1095     // Calculate required adjustment for 'LaterStart'in order to keep remaining
1096     // store size aligned on 'PerfAlign'.
1097     uint64_t Off =
1098         offsetToAlignment(uint64_t(LaterStart - EarlierStart), PrefAlign);
1099     ToRemoveStart = LaterStart + Off;
1100     if (EarlierSize <= uint64_t(ToRemoveStart - EarlierStart))
1101       return false;
1102     ToRemoveSize = EarlierSize - uint64_t(ToRemoveStart - EarlierStart);
1103   } else {
1104     ToRemoveStart = EarlierStart;
1105     assert(LaterSize >= uint64_t(EarlierStart - LaterStart) &&
1106            "Not overlapping accesses?");
1107     ToRemoveSize = LaterSize - uint64_t(EarlierStart - LaterStart);
1108     // Calculate required adjustment for 'ToRemoveSize'in order to keep
1109     // start of the remaining store aligned on 'PerfAlign'.
1110     uint64_t Off = offsetToAlignment(ToRemoveSize, PrefAlign);
1111     if (Off != 0) {
1112       if (ToRemoveSize <= (PrefAlign.value() - Off))
1113         return false;
1114       ToRemoveSize -= PrefAlign.value() - Off;
1115     }
1116     assert(isAligned(PrefAlign, ToRemoveSize) &&
1117            "Should preserve selected alignment");
1118   }
1119 
1120   assert(ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove");
1121   assert(EarlierSize > ToRemoveSize && "Can't remove more than original size");
1122 
1123   uint64_t NewSize = EarlierSize - ToRemoveSize;
1124   if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(EarlierWrite)) {
1125     // When shortening an atomic memory intrinsic, the newly shortened
1126     // length must remain an integer multiple of the element size.
1127     const uint32_t ElementSize = AMI->getElementSizeInBytes();
1128     if (0 != NewSize % ElementSize)
1129       return false;
1130   }
1131 
1132   LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  OW "
1133                     << (IsOverwriteEnd ? "END" : "BEGIN") << ": "
1134                     << *EarlierWrite << "\n  KILLER [" << ToRemoveStart << ", "
1135                     << int64_t(ToRemoveStart + ToRemoveSize) << ")\n");
1136 
1137   Value *EarlierWriteLength = EarlierIntrinsic->getLength();
1138   Value *TrimmedLength =
1139       ConstantInt::get(EarlierWriteLength->getType(), NewSize);
1140   EarlierIntrinsic->setLength(TrimmedLength);
1141   EarlierIntrinsic->setDestAlignment(PrefAlign);
1142 
1143   if (!IsOverwriteEnd) {
1144     Value *Indices[1] = {
1145         ConstantInt::get(EarlierWriteLength->getType(), ToRemoveSize)};
1146     GetElementPtrInst *NewDestGEP = GetElementPtrInst::CreateInBounds(
1147         EarlierIntrinsic->getRawDest()->getType()->getPointerElementType(),
1148         EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite);
1149     NewDestGEP->setDebugLoc(EarlierIntrinsic->getDebugLoc());
1150     EarlierIntrinsic->setDest(NewDestGEP);
1151   }
1152 
1153   // Finally update start and size of earlier access.
1154   if (!IsOverwriteEnd)
1155     EarlierStart += ToRemoveSize;
1156   EarlierSize = NewSize;
1157 
1158   return true;
1159 }
1160 
1161 static bool tryToShortenEnd(Instruction *EarlierWrite,
1162                             OverlapIntervalsTy &IntervalMap,
1163                             int64_t &EarlierStart, uint64_t &EarlierSize) {
1164   if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
1165     return false;
1166 
1167   OverlapIntervalsTy::iterator OII = --IntervalMap.end();
1168   int64_t LaterStart = OII->second;
1169   uint64_t LaterSize = OII->first - LaterStart;
1170 
1171   assert(OII->first - LaterStart >= 0 && "Size expected to be positive");
1172 
1173   if (LaterStart > EarlierStart &&
1174       // Note: "LaterStart - EarlierStart" is known to be positive due to
1175       // preceding check.
1176       (uint64_t)(LaterStart - EarlierStart) < EarlierSize &&
1177       // Note: "EarlierSize - (uint64_t)(LaterStart - EarlierStart)" is known to
1178       // be non negative due to preceding checks.
1179       LaterSize >= EarlierSize - (uint64_t)(LaterStart - EarlierStart)) {
1180     if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
1181                      LaterSize, true)) {
1182       IntervalMap.erase(OII);
1183       return true;
1184     }
1185   }
1186   return false;
1187 }
1188 
1189 static bool tryToShortenBegin(Instruction *EarlierWrite,
1190                               OverlapIntervalsTy &IntervalMap,
1191                               int64_t &EarlierStart, uint64_t &EarlierSize) {
1192   if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
1193     return false;
1194 
1195   OverlapIntervalsTy::iterator OII = IntervalMap.begin();
1196   int64_t LaterStart = OII->second;
1197   uint64_t LaterSize = OII->first - LaterStart;
1198 
1199   assert(OII->first - LaterStart >= 0 && "Size expected to be positive");
1200 
1201   if (LaterStart <= EarlierStart &&
1202       // Note: "EarlierStart - LaterStart" is known to be non negative due to
1203       // preceding check.
1204       LaterSize > (uint64_t)(EarlierStart - LaterStart)) {
1205     // Note: "LaterSize - (uint64_t)(EarlierStart - LaterStart)" is known to be
1206     // positive due to preceding checks.
1207     assert(LaterSize - (uint64_t)(EarlierStart - LaterStart) < EarlierSize &&
1208            "Should have been handled as OW_Complete");
1209     if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
1210                      LaterSize, false)) {
1211       IntervalMap.erase(OII);
1212       return true;
1213     }
1214   }
1215   return false;
1216 }
1217 
1218 static bool removePartiallyOverlappedStores(const DataLayout &DL,
1219                                             InstOverlapIntervalsTy &IOL,
1220                                             const TargetLibraryInfo &TLI) {
1221   bool Changed = false;
1222   for (auto OI : IOL) {
1223     Instruction *EarlierWrite = OI.first;
1224     MemoryLocation Loc = getLocForWrite(EarlierWrite, TLI);
1225     assert(isRemovable(EarlierWrite) && "Expect only removable instruction");
1226 
1227     const Value *Ptr = Loc.Ptr->stripPointerCasts();
1228     int64_t EarlierStart = 0;
1229     uint64_t EarlierSize = Loc.Size.getValue();
1230     GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
1231     OverlapIntervalsTy &IntervalMap = OI.second;
1232     Changed |=
1233         tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1234     if (IntervalMap.empty())
1235       continue;
1236     Changed |=
1237         tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1238   }
1239   return Changed;
1240 }
1241 
1242 static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI,
1243                                AliasAnalysis *AA, MemoryDependenceResults *MD,
1244                                const DataLayout &DL,
1245                                const TargetLibraryInfo *TLI,
1246                                InstOverlapIntervalsTy &IOL,
1247                                MapVector<Instruction *, bool> &ThrowableInst,
1248                                DominatorTree *DT) {
1249   // Must be a store instruction.
1250   StoreInst *SI = dyn_cast<StoreInst>(Inst);
1251   if (!SI)
1252     return false;
1253 
1254   // If we're storing the same value back to a pointer that we just loaded from,
1255   // then the store can be removed.
1256   if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) {
1257     if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
1258         isRemovable(SI) &&
1259         memoryIsNotModifiedBetween(DepLoad, SI, *AA, DL, DT)) {
1260 
1261       LLVM_DEBUG(
1262           dbgs() << "DSE: Remove Store Of Load from same pointer:\n  LOAD: "
1263                  << *DepLoad << "\n  STORE: " << *SI << '\n');
1264 
1265       deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, ThrowableInst);
1266       ++NumRedundantStores;
1267       return true;
1268     }
1269   }
1270 
1271   // Remove null stores into the calloc'ed objects
1272   Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
1273   if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) {
1274     Instruction *UnderlyingPointer =
1275         dyn_cast<Instruction>(getUnderlyingObject(SI->getPointerOperand()));
1276 
1277     if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
1278         memoryIsNotModifiedBetween(UnderlyingPointer, SI, *AA, DL, DT)) {
1279       LLVM_DEBUG(
1280           dbgs() << "DSE: Remove null store to the calloc'ed object:\n  DEAD: "
1281                  << *Inst << "\n  OBJECT: " << *UnderlyingPointer << '\n');
1282 
1283       deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, ThrowableInst);
1284       ++NumRedundantStores;
1285       return true;
1286     }
1287   }
1288   return false;
1289 }
1290 
1291 template <typename AATy>
1292 static Constant *tryToMergePartialOverlappingStores(
1293     StoreInst *Earlier, StoreInst *Later, int64_t InstWriteOffset,
1294     int64_t DepWriteOffset, const DataLayout &DL, AATy &AA, DominatorTree *DT) {
1295 
1296   if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) &&
1297       DL.typeSizeEqualsStoreSize(Earlier->getValueOperand()->getType()) &&
1298       Later && isa<ConstantInt>(Later->getValueOperand()) &&
1299       DL.typeSizeEqualsStoreSize(Later->getValueOperand()->getType()) &&
1300       memoryIsNotModifiedBetween(Earlier, Later, AA, DL, DT)) {
1301     // If the store we find is:
1302     //   a) partially overwritten by the store to 'Loc'
1303     //   b) the later store is fully contained in the earlier one and
1304     //   c) they both have a constant value
1305     //   d) none of the two stores need padding
1306     // Merge the two stores, replacing the earlier store's value with a
1307     // merge of both values.
1308     // TODO: Deal with other constant types (vectors, etc), and probably
1309     // some mem intrinsics (if needed)
1310 
1311     APInt EarlierValue =
1312         cast<ConstantInt>(Earlier->getValueOperand())->getValue();
1313     APInt LaterValue = cast<ConstantInt>(Later->getValueOperand())->getValue();
1314     unsigned LaterBits = LaterValue.getBitWidth();
1315     assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth());
1316     LaterValue = LaterValue.zext(EarlierValue.getBitWidth());
1317 
1318     // Offset of the smaller store inside the larger store
1319     unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8;
1320     unsigned LShiftAmount = DL.isBigEndian() ? EarlierValue.getBitWidth() -
1321                                                    BitOffsetDiff - LaterBits
1322                                              : BitOffsetDiff;
1323     APInt Mask = APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount,
1324                                    LShiftAmount + LaterBits);
1325     // Clear the bits we'll be replacing, then OR with the smaller
1326     // store, shifted appropriately.
1327     APInt Merged = (EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
1328     LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n  Earlier: " << *Earlier
1329                       << "\n  Later: " << *Later
1330                       << "\n  Merged Value: " << Merged << '\n');
1331     return ConstantInt::get(Earlier->getValueOperand()->getType(), Merged);
1332   }
1333   return nullptr;
1334 }
1335 
1336 static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
1337                                 MemoryDependenceResults *MD, DominatorTree *DT,
1338                                 const TargetLibraryInfo *TLI) {
1339   const DataLayout &DL = BB.getModule()->getDataLayout();
1340   bool MadeChange = false;
1341 
1342   MapVector<Instruction *, bool> ThrowableInst;
1343 
1344   // A map of interval maps representing partially-overwritten value parts.
1345   InstOverlapIntervalsTy IOL;
1346 
1347   // Do a top-down walk on the BB.
1348   for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
1349     // Handle 'free' calls specially.
1350     if (CallInst *F = isFreeCall(&*BBI, TLI)) {
1351       MadeChange |= handleFree(F, AA, MD, DT, TLI, IOL, ThrowableInst);
1352       // Increment BBI after handleFree has potentially deleted instructions.
1353       // This ensures we maintain a valid iterator.
1354       ++BBI;
1355       continue;
1356     }
1357 
1358     Instruction *Inst = &*BBI++;
1359 
1360     if (Inst->mayThrow()) {
1361       ThrowableInst[Inst] = true;
1362       continue;
1363     }
1364 
1365     // Check to see if Inst writes to memory.  If not, continue.
1366     if (!hasAnalyzableMemoryWrite(Inst, *TLI))
1367       continue;
1368 
1369     // eliminateNoopStore will update in iterator, if necessary.
1370     if (eliminateNoopStore(Inst, BBI, AA, MD, DL, TLI, IOL,
1371                            ThrowableInst, DT)) {
1372       MadeChange = true;
1373       continue;
1374     }
1375 
1376     // If we find something that writes memory, get its memory dependence.
1377     MemDepResult InstDep = MD->getDependency(Inst);
1378 
1379     // Ignore any store where we can't find a local dependence.
1380     // FIXME: cross-block DSE would be fun. :)
1381     if (!InstDep.isDef() && !InstDep.isClobber())
1382       continue;
1383 
1384     // Figure out what location is being stored to.
1385     MemoryLocation Loc = getLocForWrite(Inst, *TLI);
1386 
1387     // If we didn't get a useful location, fail.
1388     if (!Loc.Ptr)
1389       continue;
1390 
1391     // Loop until we find a store we can eliminate or a load that
1392     // invalidates the analysis. Without an upper bound on the number of
1393     // instructions examined, this analysis can become very time-consuming.
1394     // However, the potential gain diminishes as we process more instructions
1395     // without eliminating any of them. Therefore, we limit the number of
1396     // instructions we look at.
1397     auto Limit = MD->getDefaultBlockScanLimit();
1398     while (InstDep.isDef() || InstDep.isClobber()) {
1399       // Get the memory clobbered by the instruction we depend on.  MemDep will
1400       // skip any instructions that 'Loc' clearly doesn't interact with.  If we
1401       // end up depending on a may- or must-aliased load, then we can't optimize
1402       // away the store and we bail out.  However, if we depend on something
1403       // that overwrites the memory location we *can* potentially optimize it.
1404       //
1405       // Find out what memory location the dependent instruction stores.
1406       Instruction *DepWrite = InstDep.getInst();
1407       if (!hasAnalyzableMemoryWrite(DepWrite, *TLI))
1408         break;
1409       MemoryLocation DepLoc = getLocForWrite(DepWrite, *TLI);
1410       // If we didn't get a useful location, or if it isn't a size, bail out.
1411       if (!DepLoc.Ptr)
1412         break;
1413 
1414       // Find the last throwable instruction not removed by call to
1415       // deleteDeadInstruction.
1416       Instruction *LastThrowing = nullptr;
1417       if (!ThrowableInst.empty())
1418         LastThrowing = ThrowableInst.back().first;
1419 
1420       // Make sure we don't look past a call which might throw. This is an
1421       // issue because MemoryDependenceAnalysis works in the wrong direction:
1422       // it finds instructions which dominate the current instruction, rather than
1423       // instructions which are post-dominated by the current instruction.
1424       //
1425       // If the underlying object is a non-escaping memory allocation, any store
1426       // to it is dead along the unwind edge. Otherwise, we need to preserve
1427       // the store.
1428       if (LastThrowing && DepWrite->comesBefore(LastThrowing)) {
1429         const Value *Underlying = getUnderlyingObject(DepLoc.Ptr);
1430         bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying);
1431         if (!IsStoreDeadOnUnwind) {
1432             // We're looking for a call to an allocation function
1433             // where the allocation doesn't escape before the last
1434             // throwing instruction; PointerMayBeCaptured
1435             // reasonably fast approximation.
1436             IsStoreDeadOnUnwind = isAllocLikeFn(Underlying, TLI) &&
1437                 !PointerMayBeCaptured(Underlying, false, true);
1438         }
1439         if (!IsStoreDeadOnUnwind)
1440           break;
1441       }
1442 
1443       // If we find a write that is a) removable (i.e., non-volatile), b) is
1444       // completely obliterated by the store to 'Loc', and c) which we know that
1445       // 'Inst' doesn't load from, then we can remove it.
1446       // Also try to merge two stores if a later one only touches memory written
1447       // to by the earlier one.
1448       if (isRemovable(DepWrite) &&
1449           !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
1450         int64_t InstWriteOffset, DepWriteOffset;
1451         OverwriteResult OR = isOverwrite(Inst, DepWrite, Loc, DepLoc, DL, *TLI,
1452                                          DepWriteOffset, InstWriteOffset, *AA,
1453                                          BB.getParent());
1454         if (OR == OW_MaybePartial)
1455           OR = isPartialOverwrite(Loc, DepLoc, DepWriteOffset, InstWriteOffset,
1456                                   DepWrite, IOL);
1457 
1458         if (OR == OW_Complete) {
1459           LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *DepWrite
1460                             << "\n  KILLER: " << *Inst << '\n');
1461 
1462           // Delete the store and now-dead instructions that feed it.
1463           deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL,
1464                                 ThrowableInst);
1465           ++NumFastStores;
1466           MadeChange = true;
1467 
1468           // We erased DepWrite; start over.
1469           InstDep = MD->getDependency(Inst);
1470           continue;
1471         } else if ((OR == OW_End && isShortenableAtTheEnd(DepWrite)) ||
1472                    ((OR == OW_Begin &&
1473                      isShortenableAtTheBeginning(DepWrite)))) {
1474           assert(!EnablePartialOverwriteTracking && "Do not expect to perform "
1475                                                     "when partial-overwrite "
1476                                                     "tracking is enabled");
1477           // The overwrite result is known, so these must be known, too.
1478           uint64_t EarlierSize = DepLoc.Size.getValue();
1479           uint64_t LaterSize = Loc.Size.getValue();
1480           bool IsOverwriteEnd = (OR == OW_End);
1481           MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize,
1482                                     InstWriteOffset, LaterSize, IsOverwriteEnd);
1483         } else if (EnablePartialStoreMerging &&
1484                    OR == OW_PartialEarlierWithFullLater) {
1485           auto *Earlier = dyn_cast<StoreInst>(DepWrite);
1486           auto *Later = dyn_cast<StoreInst>(Inst);
1487           if (Constant *C = tryToMergePartialOverlappingStores(
1488                   Earlier, Later, InstWriteOffset, DepWriteOffset, DL, *AA,
1489                   DT)) {
1490             auto *SI = new StoreInst(
1491                 C, Earlier->getPointerOperand(), false, Earlier->getAlign(),
1492                 Earlier->getOrdering(), Earlier->getSyncScopeID(), DepWrite);
1493 
1494             unsigned MDToKeep[] = {LLVMContext::MD_dbg, LLVMContext::MD_tbaa,
1495                                    LLVMContext::MD_alias_scope,
1496                                    LLVMContext::MD_noalias,
1497                                    LLVMContext::MD_nontemporal};
1498             SI->copyMetadata(*DepWrite, MDToKeep);
1499             ++NumModifiedStores;
1500 
1501             // Delete the old stores and now-dead instructions that feed them.
1502             deleteDeadInstruction(Inst, &BBI, *MD, *TLI, IOL,
1503                                   ThrowableInst);
1504             deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL,
1505                                   ThrowableInst);
1506             MadeChange = true;
1507 
1508             // We erased DepWrite and Inst (Loc); start over.
1509             break;
1510           }
1511         }
1512       }
1513 
1514       // If this is a may-aliased store that is clobbering the store value, we
1515       // can keep searching past it for another must-aliased pointer that stores
1516       // to the same location.  For example, in:
1517       //   store -> P
1518       //   store -> Q
1519       //   store -> P
1520       // we can remove the first store to P even though we don't know if P and Q
1521       // alias.
1522       if (DepWrite == &BB.front()) break;
1523 
1524       // Can't look past this instruction if it might read 'Loc'.
1525       if (isRefSet(AA->getModRefInfo(DepWrite, Loc)))
1526         break;
1527 
1528       InstDep = MD->getPointerDependencyFrom(Loc, /*isLoad=*/ false,
1529                                              DepWrite->getIterator(), &BB,
1530                                              /*QueryInst=*/ nullptr, &Limit);
1531     }
1532   }
1533 
1534   if (EnablePartialOverwriteTracking)
1535     MadeChange |= removePartiallyOverlappedStores(DL, IOL, *TLI);
1536 
1537   // If this block ends in a return, unwind, or unreachable, all allocas are
1538   // dead at its end, which means stores to them are also dead.
1539   if (BB.getTerminator()->getNumSuccessors() == 0)
1540     MadeChange |= handleEndBlock(BB, AA, MD, TLI, IOL, ThrowableInst);
1541 
1542   return MadeChange;
1543 }
1544 
1545 static bool eliminateDeadStores(Function &F, AliasAnalysis *AA,
1546                                 MemoryDependenceResults *MD, DominatorTree *DT,
1547                                 const TargetLibraryInfo *TLI) {
1548   bool MadeChange = false;
1549   for (BasicBlock &BB : F)
1550     // Only check non-dead blocks.  Dead blocks may have strange pointer
1551     // cycles that will confuse alias analysis.
1552     if (DT->isReachableFromEntry(&BB))
1553       MadeChange |= eliminateDeadStores(BB, AA, MD, DT, TLI);
1554 
1555   return MadeChange;
1556 }
1557 
1558 namespace {
1559 //=============================================================================
1560 // MemorySSA backed dead store elimination.
1561 //
1562 // The code below implements dead store elimination using MemorySSA. It uses
1563 // the following general approach: given a MemoryDef, walk upwards to find
1564 // clobbering MemoryDefs that may be killed by the starting def. Then check
1565 // that there are no uses that may read the location of the original MemoryDef
1566 // in between both MemoryDefs. A bit more concretely:
1567 //
1568 // For all MemoryDefs StartDef:
1569 // 1. Get the next dominating clobbering MemoryDef (EarlierAccess) by walking
1570 //    upwards.
1571 // 2. Check that there are no reads between EarlierAccess and the StartDef by
1572 //    checking all uses starting at EarlierAccess and walking until we see
1573 //    StartDef.
1574 // 3. For each found CurrentDef, check that:
1575 //   1. There are no barrier instructions between CurrentDef and StartDef (like
1576 //       throws or stores with ordering constraints).
1577 //   2. StartDef is executed whenever CurrentDef is executed.
1578 //   3. StartDef completely overwrites CurrentDef.
1579 // 4. Erase CurrentDef from the function and MemorySSA.
1580 
1581 // Returns true if \p I is an intrisnic that does not read or write memory.
1582 bool isNoopIntrinsic(Instruction *I) {
1583   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1584     switch (II->getIntrinsicID()) {
1585     case Intrinsic::lifetime_start:
1586     case Intrinsic::lifetime_end:
1587     case Intrinsic::invariant_end:
1588     case Intrinsic::launder_invariant_group:
1589     case Intrinsic::assume:
1590       return true;
1591     case Intrinsic::dbg_addr:
1592     case Intrinsic::dbg_declare:
1593     case Intrinsic::dbg_label:
1594     case Intrinsic::dbg_value:
1595       llvm_unreachable("Intrinsic should not be modeled in MemorySSA");
1596     default:
1597       return false;
1598     }
1599   }
1600   return false;
1601 }
1602 
1603 // Check if we can ignore \p D for DSE.
1604 bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) {
1605   Instruction *DI = D->getMemoryInst();
1606   // Calls that only access inaccessible memory cannot read or write any memory
1607   // locations we consider for elimination.
1608   if (auto *CB = dyn_cast<CallBase>(DI))
1609     if (CB->onlyAccessesInaccessibleMemory())
1610       return true;
1611 
1612   // We can eliminate stores to locations not visible to the caller across
1613   // throwing instructions.
1614   if (DI->mayThrow() && !DefVisibleToCaller)
1615     return true;
1616 
1617   // We can remove the dead stores, irrespective of the fence and its ordering
1618   // (release/acquire/seq_cst). Fences only constraints the ordering of
1619   // already visible stores, it does not make a store visible to other
1620   // threads. So, skipping over a fence does not change a store from being
1621   // dead.
1622   if (isa<FenceInst>(DI))
1623     return true;
1624 
1625   // Skip intrinsics that do not really read or modify memory.
1626   if (isNoopIntrinsic(D->getMemoryInst()))
1627     return true;
1628 
1629   return false;
1630 }
1631 
1632 struct DSEState {
1633   Function &F;
1634   AliasAnalysis &AA;
1635 
1636   /// The single BatchAA instance that is used to cache AA queries. It will
1637   /// not be invalidated over the whole run. This is safe, because:
1638   /// 1. Only memory writes are removed, so the alias cache for memory
1639   ///    locations remains valid.
1640   /// 2. No new instructions are added (only instructions removed), so cached
1641   ///    information for a deleted value cannot be accessed by a re-used new
1642   ///    value pointer.
1643   BatchAAResults BatchAA;
1644 
1645   MemorySSA &MSSA;
1646   DominatorTree &DT;
1647   PostDominatorTree &PDT;
1648   const TargetLibraryInfo &TLI;
1649   const DataLayout &DL;
1650 
1651   // All MemoryDefs that potentially could kill other MemDefs.
1652   SmallVector<MemoryDef *, 64> MemDefs;
1653   // Any that should be skipped as they are already deleted
1654   SmallPtrSet<MemoryAccess *, 4> SkipStores;
1655   // Keep track of all of the objects that are invisible to the caller before
1656   // the function returns.
1657   // SmallPtrSet<const Value *, 16> InvisibleToCallerBeforeRet;
1658   DenseMap<const Value *, bool> InvisibleToCallerBeforeRet;
1659   // Keep track of all of the objects that are invisible to the caller after
1660   // the function returns.
1661   DenseMap<const Value *, bool> InvisibleToCallerAfterRet;
1662   // Keep track of blocks with throwing instructions not modeled in MemorySSA.
1663   SmallPtrSet<BasicBlock *, 16> ThrowingBlocks;
1664   // Post-order numbers for each basic block. Used to figure out if memory
1665   // accesses are executed before another access.
1666   DenseMap<BasicBlock *, unsigned> PostOrderNumbers;
1667 
1668   /// Keep track of instructions (partly) overlapping with killing MemoryDefs per
1669   /// basic block.
1670   DenseMap<BasicBlock *, InstOverlapIntervalsTy> IOLs;
1671 
1672   DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT,
1673            PostDominatorTree &PDT, const TargetLibraryInfo &TLI)
1674       : F(F), AA(AA), BatchAA(AA), MSSA(MSSA), DT(DT), PDT(PDT), TLI(TLI),
1675         DL(F.getParent()->getDataLayout()) {}
1676 
1677   static DSEState get(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
1678                       DominatorTree &DT, PostDominatorTree &PDT,
1679                       const TargetLibraryInfo &TLI) {
1680     DSEState State(F, AA, MSSA, DT, PDT, TLI);
1681     // Collect blocks with throwing instructions not modeled in MemorySSA and
1682     // alloc-like objects.
1683     unsigned PO = 0;
1684     for (BasicBlock *BB : post_order(&F)) {
1685       State.PostOrderNumbers[BB] = PO++;
1686       for (Instruction &I : *BB) {
1687         MemoryAccess *MA = MSSA.getMemoryAccess(&I);
1688         if (I.mayThrow() && !MA)
1689           State.ThrowingBlocks.insert(I.getParent());
1690 
1691         auto *MD = dyn_cast_or_null<MemoryDef>(MA);
1692         if (MD && State.MemDefs.size() < MemorySSADefsPerBlockLimit &&
1693             (State.getLocForWriteEx(&I) || State.isMemTerminatorInst(&I)))
1694           State.MemDefs.push_back(MD);
1695       }
1696     }
1697 
1698     // Treat byval or inalloca arguments the same as Allocas, stores to them are
1699     // dead at the end of the function.
1700     for (Argument &AI : F.args())
1701       if (AI.hasPassPointeeByValueCopyAttr()) {
1702         // For byval, the caller doesn't know the address of the allocation.
1703         if (AI.hasByValAttr())
1704           State.InvisibleToCallerBeforeRet.insert({&AI, true});
1705         State.InvisibleToCallerAfterRet.insert({&AI, true});
1706       }
1707 
1708     return State;
1709   }
1710 
1711   bool isInvisibleToCallerAfterRet(const Value *V) {
1712     if (isa<AllocaInst>(V))
1713       return true;
1714     auto I = InvisibleToCallerAfterRet.insert({V, false});
1715     if (I.second) {
1716       if (!isInvisibleToCallerBeforeRet(V)) {
1717         I.first->second = false;
1718       } else {
1719         auto *Inst = dyn_cast<Instruction>(V);
1720         if (Inst && isAllocLikeFn(Inst, &TLI))
1721           I.first->second = !PointerMayBeCaptured(V, true, false);
1722       }
1723     }
1724     return I.first->second;
1725   }
1726 
1727   bool isInvisibleToCallerBeforeRet(const Value *V) {
1728     if (isa<AllocaInst>(V))
1729       return true;
1730     auto I = InvisibleToCallerBeforeRet.insert({V, false});
1731     if (I.second) {
1732       auto *Inst = dyn_cast<Instruction>(V);
1733       if (Inst && isAllocLikeFn(Inst, &TLI))
1734         // NOTE: This could be made more precise by PointerMayBeCapturedBefore
1735         // with the killing MemoryDef. But we refrain from doing so for now to
1736         // limit compile-time and this does not cause any changes to the number
1737         // of stores removed on a large test set in practice.
1738         I.first->second = !PointerMayBeCaptured(V, false, true);
1739     }
1740     return I.first->second;
1741   }
1742 
1743   Optional<MemoryLocation> getLocForWriteEx(Instruction *I) const {
1744     if (!I->mayWriteToMemory())
1745       return None;
1746 
1747     if (auto *MTI = dyn_cast<AnyMemIntrinsic>(I))
1748       return {MemoryLocation::getForDest(MTI)};
1749 
1750     if (auto *CB = dyn_cast<CallBase>(I)) {
1751       // If the functions may write to memory we do not know about, bail out.
1752       if (!CB->onlyAccessesArgMemory() &&
1753           !CB->onlyAccessesInaccessibleMemOrArgMem())
1754         return None;
1755 
1756       LibFunc LF;
1757       if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
1758         switch (LF) {
1759         case LibFunc_strcpy:
1760         case LibFunc_strncpy:
1761         case LibFunc_strcat:
1762         case LibFunc_strncat:
1763           return {MemoryLocation::getAfter(CB->getArgOperand(0))};
1764         default:
1765           break;
1766         }
1767       }
1768       switch (CB->getIntrinsicID()) {
1769       case Intrinsic::init_trampoline:
1770         return {MemoryLocation::getAfter(CB->getArgOperand(0))};
1771       case Intrinsic::masked_store:
1772         return {MemoryLocation::getForArgument(CB, 1, TLI)};
1773       default:
1774         break;
1775       }
1776       return None;
1777     }
1778 
1779     return MemoryLocation::getOrNone(I);
1780   }
1781 
1782   /// Returns true if \p UseInst completely overwrites \p DefLoc
1783   /// (stored by \p DefInst).
1784   bool isCompleteOverwrite(const MemoryLocation &DefLoc, Instruction *DefInst,
1785                            Instruction *UseInst) {
1786     // UseInst has a MemoryDef associated in MemorySSA. It's possible for a
1787     // MemoryDef to not write to memory, e.g. a volatile load is modeled as a
1788     // MemoryDef.
1789     if (!UseInst->mayWriteToMemory())
1790       return false;
1791 
1792     if (auto *CB = dyn_cast<CallBase>(UseInst))
1793       if (CB->onlyAccessesInaccessibleMemory())
1794         return false;
1795 
1796     int64_t InstWriteOffset, DepWriteOffset;
1797     if (auto CC = getLocForWriteEx(UseInst))
1798       return isOverwrite(UseInst, DefInst, *CC, DefLoc, DL, TLI, DepWriteOffset,
1799                          InstWriteOffset, BatchAA, &F) == OW_Complete;
1800     return false;
1801   }
1802 
1803   /// Returns true if \p Def is not read before returning from the function.
1804   bool isWriteAtEndOfFunction(MemoryDef *Def) {
1805     LLVM_DEBUG(dbgs() << "  Check if def " << *Def << " ("
1806                       << *Def->getMemoryInst()
1807                       << ") is at the end the function \n");
1808 
1809     auto MaybeLoc = getLocForWriteEx(Def->getMemoryInst());
1810     if (!MaybeLoc) {
1811       LLVM_DEBUG(dbgs() << "  ... could not get location for write.\n");
1812       return false;
1813     }
1814 
1815     SmallVector<MemoryAccess *, 4> WorkList;
1816     SmallPtrSet<MemoryAccess *, 8> Visited;
1817     auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) {
1818       if (!Visited.insert(Acc).second)
1819         return;
1820       for (Use &U : Acc->uses())
1821         WorkList.push_back(cast<MemoryAccess>(U.getUser()));
1822     };
1823     PushMemUses(Def);
1824     for (unsigned I = 0; I < WorkList.size(); I++) {
1825       if (WorkList.size() >= MemorySSAScanLimit) {
1826         LLVM_DEBUG(dbgs() << "  ... hit exploration limit.\n");
1827         return false;
1828       }
1829 
1830       MemoryAccess *UseAccess = WorkList[I];
1831       // Simply adding the users of MemoryPhi to the worklist is not enough,
1832       // because we might miss read clobbers in different iterations of a loop,
1833       // for example.
1834       // TODO: Add support for phi translation to handle the loop case.
1835       if (isa<MemoryPhi>(UseAccess))
1836         return false;
1837 
1838       // TODO: Checking for aliasing is expensive. Consider reducing the amount
1839       // of times this is called and/or caching it.
1840       Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1841       if (isReadClobber(*MaybeLoc, UseInst)) {
1842         LLVM_DEBUG(dbgs() << "  ... hit read clobber " << *UseInst << ".\n");
1843         return false;
1844       }
1845 
1846       if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess))
1847         PushMemUses(UseDef);
1848     }
1849     return true;
1850   }
1851 
1852   /// If \p I is a memory  terminator like llvm.lifetime.end or free, return a
1853   /// pair with the MemoryLocation terminated by \p I and a boolean flag
1854   /// indicating whether \p I is a free-like call.
1855   Optional<std::pair<MemoryLocation, bool>>
1856   getLocForTerminator(Instruction *I) const {
1857     uint64_t Len;
1858     Value *Ptr;
1859     if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len),
1860                                                       m_Value(Ptr))))
1861       return {std::make_pair(MemoryLocation(Ptr, Len), false)};
1862 
1863     if (auto *CB = dyn_cast<CallBase>(I)) {
1864       if (isFreeCall(I, &TLI))
1865         return {std::make_pair(MemoryLocation::getAfter(CB->getArgOperand(0)),
1866                                true)};
1867     }
1868 
1869     return None;
1870   }
1871 
1872   /// Returns true if \p I is a memory terminator instruction like
1873   /// llvm.lifetime.end or free.
1874   bool isMemTerminatorInst(Instruction *I) const {
1875     IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1876     return (II && II->getIntrinsicID() == Intrinsic::lifetime_end) ||
1877            isFreeCall(I, &TLI);
1878   }
1879 
1880   /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from
1881   /// instruction \p AccessI.
1882   bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI,
1883                        Instruction *MaybeTerm) {
1884     Optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
1885         getLocForTerminator(MaybeTerm);
1886 
1887     if (!MaybeTermLoc)
1888       return false;
1889 
1890     // If the terminator is a free-like call, all accesses to the underlying
1891     // object can be considered terminated.
1892     if (getUnderlyingObject(Loc.Ptr) !=
1893         getUnderlyingObject(MaybeTermLoc->first.Ptr))
1894       return false;
1895 
1896     auto TermLoc = MaybeTermLoc->first;
1897     if (MaybeTermLoc->second) {
1898       const Value *LocUO = getUnderlyingObject(Loc.Ptr);
1899       return BatchAA.isMustAlias(TermLoc.Ptr, LocUO);
1900     }
1901     int64_t InstWriteOffset, DepWriteOffset;
1902     return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, DL, TLI,
1903                        DepWriteOffset, InstWriteOffset, BatchAA,
1904                        &F) == OW_Complete;
1905   }
1906 
1907   // Returns true if \p Use may read from \p DefLoc.
1908   bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) {
1909     if (isNoopIntrinsic(UseInst))
1910       return false;
1911 
1912     // Monotonic or weaker atomic stores can be re-ordered and do not need to be
1913     // treated as read clobber.
1914     if (auto SI = dyn_cast<StoreInst>(UseInst))
1915       return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic);
1916 
1917     if (!UseInst->mayReadFromMemory())
1918       return false;
1919 
1920     if (auto *CB = dyn_cast<CallBase>(UseInst))
1921       if (CB->onlyAccessesInaccessibleMemory())
1922         return false;
1923 
1924     // NOTE: For calls, the number of stores removed could be slightly improved
1925     // by using AA.callCapturesBefore(UseInst, DefLoc, &DT), but that showed to
1926     // be expensive compared to the benefits in practice. For now, avoid more
1927     // expensive analysis to limit compile-time.
1928     return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc));
1929   }
1930 
1931   /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
1932   /// loop. In particular, this guarantees that it only references a single
1933   /// MemoryLocation during execution of the containing function.
1934   bool IsGuaranteedLoopInvariant(Value *Ptr) {
1935     auto IsGuaranteedLoopInvariantBase = [this](Value *Ptr) {
1936       Ptr = Ptr->stripPointerCasts();
1937       if (auto *I = dyn_cast<Instruction>(Ptr)) {
1938         if (isa<AllocaInst>(Ptr))
1939           return true;
1940 
1941         if (isAllocLikeFn(I, &TLI))
1942           return true;
1943 
1944         return false;
1945       }
1946       return true;
1947     };
1948 
1949     Ptr = Ptr->stripPointerCasts();
1950     if (auto *I = dyn_cast<Instruction>(Ptr)) {
1951       if (I->getParent() == &I->getFunction()->getEntryBlock()) {
1952         return true;
1953       }
1954     }
1955     if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
1956       return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
1957              GEP->hasAllConstantIndices();
1958     }
1959     return IsGuaranteedLoopInvariantBase(Ptr);
1960   }
1961 
1962   // Find a MemoryDef writing to \p DefLoc and dominating \p StartAccess, with
1963   // no read access between them or on any other path to a function exit block
1964   // if \p DefLoc is not accessible after the function returns. If there is no
1965   // such MemoryDef, return None. The returned value may not (completely)
1966   // overwrite \p DefLoc. Currently we bail out when we encounter an aliasing
1967   // MemoryUse (read).
1968   Optional<MemoryAccess *>
1969   getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess,
1970                   const MemoryLocation &DefLoc, const Value *DefUO,
1971                   unsigned &ScanLimit, unsigned &WalkerStepLimit,
1972                   bool IsMemTerm, unsigned &PartialLimit) {
1973     if (ScanLimit == 0 || WalkerStepLimit == 0) {
1974       LLVM_DEBUG(dbgs() << "\n    ...  hit scan limit\n");
1975       return None;
1976     }
1977 
1978     MemoryAccess *Current = StartAccess;
1979     Instruction *KillingI = KillingDef->getMemoryInst();
1980     bool StepAgain;
1981     LLVM_DEBUG(dbgs() << "  trying to get dominating access\n");
1982 
1983     // Find the next clobbering Mod access for DefLoc, starting at StartAccess.
1984     Optional<MemoryLocation> CurrentLoc;
1985     do {
1986       StepAgain = false;
1987       LLVM_DEBUG({
1988         dbgs() << "   visiting " << *Current;
1989         if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current))
1990           dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst()
1991                  << ")";
1992         dbgs() << "\n";
1993       });
1994 
1995       // Reached TOP.
1996       if (MSSA.isLiveOnEntryDef(Current)) {
1997         LLVM_DEBUG(dbgs() << "   ...  found LiveOnEntryDef\n");
1998         return None;
1999       }
2000 
2001       // Cost of a step. Accesses in the same block are more likely to be valid
2002       // candidates for elimination, hence consider them cheaper.
2003       unsigned StepCost = KillingDef->getBlock() == Current->getBlock()
2004                               ? MemorySSASameBBStepCost
2005                               : MemorySSAOtherBBStepCost;
2006       if (WalkerStepLimit <= StepCost) {
2007         LLVM_DEBUG(dbgs() << "   ...  hit walker step limit\n");
2008         return None;
2009       }
2010       WalkerStepLimit -= StepCost;
2011 
2012       // Return for MemoryPhis. They cannot be eliminated directly and the
2013       // caller is responsible for traversing them.
2014       if (isa<MemoryPhi>(Current)) {
2015         LLVM_DEBUG(dbgs() << "   ...  found MemoryPhi\n");
2016         return Current;
2017       }
2018 
2019       // Below, check if CurrentDef is a valid candidate to be eliminated by
2020       // KillingDef. If it is not, check the next candidate.
2021       MemoryDef *CurrentDef = cast<MemoryDef>(Current);
2022       Instruction *CurrentI = CurrentDef->getMemoryInst();
2023 
2024       if (canSkipDef(CurrentDef, !isInvisibleToCallerBeforeRet(DefUO))) {
2025         StepAgain = true;
2026         Current = CurrentDef->getDefiningAccess();
2027         continue;
2028       }
2029 
2030       // Before we try to remove anything, check for any extra throwing
2031       // instructions that block us from DSEing
2032       if (mayThrowBetween(KillingI, CurrentI, DefUO)) {
2033         LLVM_DEBUG(dbgs() << "  ... skip, may throw!\n");
2034         return None;
2035       }
2036 
2037       // Check for anything that looks like it will be a barrier to further
2038       // removal
2039       if (isDSEBarrier(DefUO, CurrentI)) {
2040         LLVM_DEBUG(dbgs() << "  ... skip, barrier\n");
2041         return None;
2042       }
2043 
2044       // If Current is known to be on path that reads DefLoc or is a read
2045       // clobber, bail out, as the path is not profitable. We skip this check
2046       // for intrinsic calls, because the code knows how to handle memcpy
2047       // intrinsics.
2048       if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(DefLoc, CurrentI))
2049         return None;
2050 
2051       // Quick check if there are direct uses that are read-clobbers.
2052       if (any_of(Current->uses(), [this, &DefLoc, StartAccess](Use &U) {
2053             if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser()))
2054               return !MSSA.dominates(StartAccess, UseOrDef) &&
2055                      isReadClobber(DefLoc, UseOrDef->getMemoryInst());
2056             return false;
2057           })) {
2058         LLVM_DEBUG(dbgs() << "   ...  found a read clobber\n");
2059         return None;
2060       }
2061 
2062       // If Current cannot be analyzed or is not removable, check the next
2063       // candidate.
2064       if (!hasAnalyzableMemoryWrite(CurrentI, TLI) || !isRemovable(CurrentI)) {
2065         StepAgain = true;
2066         Current = CurrentDef->getDefiningAccess();
2067         continue;
2068       }
2069 
2070       // If Current does not have an analyzable write location, skip it
2071       CurrentLoc = getLocForWriteEx(CurrentI);
2072       if (!CurrentLoc) {
2073         StepAgain = true;
2074         Current = CurrentDef->getDefiningAccess();
2075         continue;
2076       }
2077 
2078       // AliasAnalysis does not account for loops. Limit elimination to
2079       // candidates for which we can guarantee they always store to the same
2080       // memory location and not multiple locations in a loop.
2081       if (Current->getBlock() != KillingDef->getBlock() &&
2082           !IsGuaranteedLoopInvariant(const_cast<Value *>(CurrentLoc->Ptr))) {
2083         StepAgain = true;
2084         Current = CurrentDef->getDefiningAccess();
2085         WalkerStepLimit -= 1;
2086         continue;
2087       }
2088 
2089       if (IsMemTerm) {
2090         // If the killing def is a memory terminator (e.g. lifetime.end), check
2091         // the next candidate if the current Current does not write the same
2092         // underlying object as the terminator.
2093         if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) {
2094           StepAgain = true;
2095           Current = CurrentDef->getDefiningAccess();
2096         }
2097         continue;
2098       } else {
2099         int64_t InstWriteOffset, DepWriteOffset;
2100         auto OR = isOverwrite(KillingI, CurrentI, DefLoc, *CurrentLoc, DL, TLI,
2101                               DepWriteOffset, InstWriteOffset, BatchAA, &F);
2102         // If Current does not write to the same object as KillingDef, check
2103         // the next candidate.
2104         if (OR == OW_Unknown) {
2105           StepAgain = true;
2106           Current = CurrentDef->getDefiningAccess();
2107         } else if (OR == OW_MaybePartial) {
2108           // If KillingDef only partially overwrites Current, check the next
2109           // candidate if the partial step limit is exceeded. This aggressively
2110           // limits the number of candidates for partial store elimination,
2111           // which are less likely to be removable in the end.
2112           if (PartialLimit <= 1) {
2113             StepAgain = true;
2114             Current = CurrentDef->getDefiningAccess();
2115             WalkerStepLimit -= 1;
2116             continue;
2117           }
2118           PartialLimit -= 1;
2119         }
2120       }
2121     } while (StepAgain);
2122 
2123     // Accesses to objects accessible after the function returns can only be
2124     // eliminated if the access is killed along all paths to the exit. Collect
2125     // the blocks with killing (=completely overwriting MemoryDefs) and check if
2126     // they cover all paths from EarlierAccess to any function exit.
2127     SmallPtrSet<Instruction *, 16> KillingDefs;
2128     KillingDefs.insert(KillingDef->getMemoryInst());
2129     MemoryAccess *EarlierAccess = Current;
2130     Instruction *EarlierMemInst =
2131         cast<MemoryDef>(EarlierAccess)->getMemoryInst();
2132     LLVM_DEBUG(dbgs() << "  Checking for reads of " << *EarlierAccess << " ("
2133                       << *EarlierMemInst << ")\n");
2134 
2135     SmallSetVector<MemoryAccess *, 32> WorkList;
2136     auto PushMemUses = [&WorkList](MemoryAccess *Acc) {
2137       for (Use &U : Acc->uses())
2138         WorkList.insert(cast<MemoryAccess>(U.getUser()));
2139     };
2140     PushMemUses(EarlierAccess);
2141 
2142     // Optimistically collect all accesses for reads. If we do not find any
2143     // read clobbers, add them to the cache.
2144     SmallPtrSet<MemoryAccess *, 16> KnownNoReads;
2145     if (!EarlierMemInst->mayReadFromMemory())
2146       KnownNoReads.insert(EarlierAccess);
2147     // Check if EarlierDef may be read.
2148     for (unsigned I = 0; I < WorkList.size(); I++) {
2149       MemoryAccess *UseAccess = WorkList[I];
2150 
2151       LLVM_DEBUG(dbgs() << "   " << *UseAccess);
2152       // Bail out if the number of accesses to check exceeds the scan limit.
2153       if (ScanLimit < (WorkList.size() - I)) {
2154         LLVM_DEBUG(dbgs() << "\n    ...  hit scan limit\n");
2155         return None;
2156       }
2157       --ScanLimit;
2158       NumDomMemDefChecks++;
2159       KnownNoReads.insert(UseAccess);
2160 
2161       if (isa<MemoryPhi>(UseAccess)) {
2162         if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) {
2163               return DT.properlyDominates(KI->getParent(),
2164                                           UseAccess->getBlock());
2165             })) {
2166           LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n");
2167           continue;
2168         }
2169         LLVM_DEBUG(dbgs() << "\n    ... adding PHI uses\n");
2170         PushMemUses(UseAccess);
2171         continue;
2172       }
2173 
2174       Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
2175       LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n");
2176 
2177       if (any_of(KillingDefs, [this, UseInst](Instruction *KI) {
2178             return DT.dominates(KI, UseInst);
2179           })) {
2180         LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n");
2181         continue;
2182       }
2183 
2184       // A memory terminator kills all preceeding MemoryDefs and all succeeding
2185       // MemoryAccesses. We do not have to check it's users.
2186       if (isMemTerminator(*CurrentLoc, EarlierMemInst, UseInst)) {
2187         LLVM_DEBUG(
2188             dbgs()
2189             << " ... skipping, memterminator invalidates following accesses\n");
2190         continue;
2191       }
2192 
2193       if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) {
2194         LLVM_DEBUG(dbgs() << "    ... adding uses of intrinsic\n");
2195         PushMemUses(UseAccess);
2196         continue;
2197       }
2198 
2199       if (UseInst->mayThrow() && !isInvisibleToCallerBeforeRet(DefUO)) {
2200         LLVM_DEBUG(dbgs() << "  ... found throwing instruction\n");
2201         return None;
2202       }
2203 
2204       // Uses which may read the original MemoryDef mean we cannot eliminate the
2205       // original MD. Stop walk.
2206       if (isReadClobber(*CurrentLoc, UseInst)) {
2207         LLVM_DEBUG(dbgs() << "    ... found read clobber\n");
2208         return None;
2209       }
2210 
2211       // For the KillingDef and EarlierAccess we only have to check if it reads
2212       // the memory location.
2213       // TODO: It would probably be better to check for self-reads before
2214       // calling the function.
2215       if (KillingDef == UseAccess || EarlierAccess == UseAccess) {
2216         LLVM_DEBUG(dbgs() << "    ... skipping killing def/dom access\n");
2217         continue;
2218       }
2219 
2220       // Check all uses for MemoryDefs, except for defs completely overwriting
2221       // the original location. Otherwise we have to check uses of *all*
2222       // MemoryDefs we discover, including non-aliasing ones. Otherwise we might
2223       // miss cases like the following
2224       //   1 = Def(LoE) ; <----- EarlierDef stores [0,1]
2225       //   2 = Def(1)   ; (2, 1) = NoAlias,   stores [2,3]
2226       //   Use(2)       ; MayAlias 2 *and* 1, loads [0, 3].
2227       //                  (The Use points to the *first* Def it may alias)
2228       //   3 = Def(1)   ; <---- Current  (3, 2) = NoAlias, (3,1) = MayAlias,
2229       //                  stores [0,1]
2230       if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) {
2231         if (isCompleteOverwrite(*CurrentLoc, EarlierMemInst, UseInst)) {
2232           if (!isInvisibleToCallerAfterRet(DefUO) &&
2233               UseAccess != EarlierAccess) {
2234             BasicBlock *MaybeKillingBlock = UseInst->getParent();
2235             if (PostOrderNumbers.find(MaybeKillingBlock)->second <
2236                 PostOrderNumbers.find(EarlierAccess->getBlock())->second) {
2237 
2238               LLVM_DEBUG(dbgs()
2239                          << "    ... found killing def " << *UseInst << "\n");
2240               KillingDefs.insert(UseInst);
2241             }
2242           }
2243         } else
2244           PushMemUses(UseDef);
2245       }
2246     }
2247 
2248     // For accesses to locations visible after the function returns, make sure
2249     // that the location is killed (=overwritten) along all paths from
2250     // EarlierAccess to the exit.
2251     if (!isInvisibleToCallerAfterRet(DefUO)) {
2252       SmallPtrSet<BasicBlock *, 16> KillingBlocks;
2253       for (Instruction *KD : KillingDefs)
2254         KillingBlocks.insert(KD->getParent());
2255       assert(!KillingBlocks.empty() &&
2256              "Expected at least a single killing block");
2257 
2258       // Find the common post-dominator of all killing blocks.
2259       BasicBlock *CommonPred = *KillingBlocks.begin();
2260       for (auto I = std::next(KillingBlocks.begin()), E = KillingBlocks.end();
2261            I != E; I++) {
2262         if (!CommonPred)
2263           break;
2264         CommonPred = PDT.findNearestCommonDominator(CommonPred, *I);
2265       }
2266 
2267       // If CommonPred is in the set of killing blocks, just check if it
2268       // post-dominates EarlierAccess.
2269       if (KillingBlocks.count(CommonPred)) {
2270         if (PDT.dominates(CommonPred, EarlierAccess->getBlock()))
2271           return {EarlierAccess};
2272         return None;
2273       }
2274 
2275       // If the common post-dominator does not post-dominate EarlierAccess,
2276       // there is a path from EarlierAccess to an exit not going through a
2277       // killing block.
2278       if (PDT.dominates(CommonPred, EarlierAccess->getBlock())) {
2279         SetVector<BasicBlock *> WorkList;
2280 
2281         // If CommonPred is null, there are multiple exits from the function.
2282         // They all have to be added to the worklist.
2283         if (CommonPred)
2284           WorkList.insert(CommonPred);
2285         else
2286           for (BasicBlock *R : PDT.roots())
2287             WorkList.insert(R);
2288 
2289         NumCFGTries++;
2290         // Check if all paths starting from an exit node go through one of the
2291         // killing blocks before reaching EarlierAccess.
2292         for (unsigned I = 0; I < WorkList.size(); I++) {
2293           NumCFGChecks++;
2294           BasicBlock *Current = WorkList[I];
2295           if (KillingBlocks.count(Current))
2296             continue;
2297           if (Current == EarlierAccess->getBlock())
2298             return None;
2299 
2300           // EarlierAccess is reachable from the entry, so we don't have to
2301           // explore unreachable blocks further.
2302           if (!DT.isReachableFromEntry(Current))
2303             continue;
2304 
2305           for (BasicBlock *Pred : predecessors(Current))
2306             WorkList.insert(Pred);
2307 
2308           if (WorkList.size() >= MemorySSAPathCheckLimit)
2309             return None;
2310         }
2311         NumCFGSuccess++;
2312         return {EarlierAccess};
2313       }
2314       return None;
2315     }
2316 
2317     // No aliasing MemoryUses of EarlierAccess found, EarlierAccess is
2318     // potentially dead.
2319     return {EarlierAccess};
2320   }
2321 
2322   // Delete dead memory defs
2323   void deleteDeadInstruction(Instruction *SI) {
2324     MemorySSAUpdater Updater(&MSSA);
2325     SmallVector<Instruction *, 32> NowDeadInsts;
2326     NowDeadInsts.push_back(SI);
2327     --NumFastOther;
2328 
2329     while (!NowDeadInsts.empty()) {
2330       Instruction *DeadInst = NowDeadInsts.pop_back_val();
2331       ++NumFastOther;
2332 
2333       // Try to preserve debug information attached to the dead instruction.
2334       salvageDebugInfo(*DeadInst);
2335       salvageKnowledge(DeadInst);
2336 
2337       // Remove the Instruction from MSSA.
2338       if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) {
2339         if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) {
2340           SkipStores.insert(MD);
2341         }
2342         Updater.removeMemoryAccess(MA);
2343       }
2344 
2345       auto I = IOLs.find(DeadInst->getParent());
2346       if (I != IOLs.end())
2347         I->second.erase(DeadInst);
2348       // Remove its operands
2349       for (Use &O : DeadInst->operands())
2350         if (Instruction *OpI = dyn_cast<Instruction>(O)) {
2351           O = nullptr;
2352           if (isInstructionTriviallyDead(OpI, &TLI))
2353             NowDeadInsts.push_back(OpI);
2354         }
2355 
2356       DeadInst->eraseFromParent();
2357     }
2358   }
2359 
2360   // Check for any extra throws between SI and NI that block DSE.  This only
2361   // checks extra maythrows (those that aren't MemoryDef's). MemoryDef that may
2362   // throw are handled during the walk from one def to the next.
2363   bool mayThrowBetween(Instruction *SI, Instruction *NI,
2364                        const Value *SILocUnd) {
2365     // First see if we can ignore it by using the fact that SI is an
2366     // alloca/alloca like object that is not visible to the caller during
2367     // execution of the function.
2368     if (SILocUnd && isInvisibleToCallerBeforeRet(SILocUnd))
2369       return false;
2370 
2371     if (SI->getParent() == NI->getParent())
2372       return ThrowingBlocks.count(SI->getParent());
2373     return !ThrowingBlocks.empty();
2374   }
2375 
2376   // Check if \p NI acts as a DSE barrier for \p SI. The following instructions
2377   // act as barriers:
2378   //  * A memory instruction that may throw and \p SI accesses a non-stack
2379   //  object.
2380   //  * Atomic stores stronger that monotonic.
2381   bool isDSEBarrier(const Value *SILocUnd, Instruction *NI) {
2382     // If NI may throw it acts as a barrier, unless we are to an alloca/alloca
2383     // like object that does not escape.
2384     if (NI->mayThrow() && !isInvisibleToCallerBeforeRet(SILocUnd))
2385       return true;
2386 
2387     // If NI is an atomic load/store stronger than monotonic, do not try to
2388     // eliminate/reorder it.
2389     if (NI->isAtomic()) {
2390       if (auto *LI = dyn_cast<LoadInst>(NI))
2391         return isStrongerThanMonotonic(LI->getOrdering());
2392       if (auto *SI = dyn_cast<StoreInst>(NI))
2393         return isStrongerThanMonotonic(SI->getOrdering());
2394       if (auto *ARMW = dyn_cast<AtomicRMWInst>(NI))
2395         return isStrongerThanMonotonic(ARMW->getOrdering());
2396       if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(NI))
2397         return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) ||
2398                isStrongerThanMonotonic(CmpXchg->getFailureOrdering());
2399       llvm_unreachable("other instructions should be skipped in MemorySSA");
2400     }
2401     return false;
2402   }
2403 
2404   /// Eliminate writes to objects that are not visible in the caller and are not
2405   /// accessed before returning from the function.
2406   bool eliminateDeadWritesAtEndOfFunction() {
2407     bool MadeChange = false;
2408     LLVM_DEBUG(
2409         dbgs()
2410         << "Trying to eliminate MemoryDefs at the end of the function\n");
2411     for (int I = MemDefs.size() - 1; I >= 0; I--) {
2412       MemoryDef *Def = MemDefs[I];
2413       if (SkipStores.contains(Def) || !isRemovable(Def->getMemoryInst()))
2414         continue;
2415 
2416       Instruction *DefI = Def->getMemoryInst();
2417       SmallVector<const Value *, 4> Pointers;
2418       auto DefLoc = getLocForWriteEx(DefI);
2419       if (!DefLoc)
2420         continue;
2421 
2422       // NOTE: Currently eliminating writes at the end of a function is limited
2423       // to MemoryDefs with a single underlying object, to save compile-time. In
2424       // practice it appears the case with multiple underlying objects is very
2425       // uncommon. If it turns out to be important, we can use
2426       // getUnderlyingObjects here instead.
2427       const Value *UO = getUnderlyingObject(DefLoc->Ptr);
2428       if (!UO || !isInvisibleToCallerAfterRet(UO))
2429         continue;
2430 
2431       if (isWriteAtEndOfFunction(Def)) {
2432         // See through pointer-to-pointer bitcasts
2433         LLVM_DEBUG(dbgs() << "   ... MemoryDef is not accessed until the end "
2434                              "of the function\n");
2435         deleteDeadInstruction(DefI);
2436         ++NumFastStores;
2437         MadeChange = true;
2438       }
2439     }
2440     return MadeChange;
2441   }
2442 
2443   /// \returns true if \p Def is a no-op store, either because it
2444   /// directly stores back a loaded value or stores zero to a calloced object.
2445   bool storeIsNoop(MemoryDef *Def, const MemoryLocation &DefLoc,
2446                    const Value *DefUO) {
2447     StoreInst *Store = dyn_cast<StoreInst>(Def->getMemoryInst());
2448     if (!Store)
2449       return false;
2450 
2451     if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) {
2452       if (LoadI->getPointerOperand() == Store->getOperand(1)) {
2453         // Get the defining access for the load.
2454         auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess();
2455         // Fast path: the defining accesses are the same.
2456         if (LoadAccess == Def->getDefiningAccess())
2457           return true;
2458 
2459         // Look through phi accesses. Recursively scan all phi accesses by
2460         // adding them to a worklist. Bail when we run into a memory def that
2461         // does not match LoadAccess.
2462         SetVector<MemoryAccess *> ToCheck;
2463         MemoryAccess *Current =
2464             MSSA.getWalker()->getClobberingMemoryAccess(Def);
2465         // We don't want to bail when we run into the store memory def. But,
2466         // the phi access may point to it. So, pretend like we've already
2467         // checked it.
2468         ToCheck.insert(Def);
2469         ToCheck.insert(Current);
2470         // Start at current (1) to simulate already having checked Def.
2471         for (unsigned I = 1; I < ToCheck.size(); ++I) {
2472           Current = ToCheck[I];
2473           if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) {
2474             // Check all the operands.
2475             for (auto &Use : PhiAccess->incoming_values())
2476               ToCheck.insert(cast<MemoryAccess>(&Use));
2477             continue;
2478           }
2479 
2480           // If we found a memory def, bail. This happens when we have an
2481           // unrelated write in between an otherwise noop store.
2482           assert(isa<MemoryDef>(Current) &&
2483                  "Only MemoryDefs should reach here.");
2484           // TODO: Skip no alias MemoryDefs that have no aliasing reads.
2485           // We are searching for the definition of the store's destination.
2486           // So, if that is the same definition as the load, then this is a
2487           // noop. Otherwise, fail.
2488           if (LoadAccess != Current)
2489             return false;
2490         }
2491         return true;
2492       }
2493     }
2494 
2495     Constant *StoredConstant = dyn_cast<Constant>(Store->getOperand(0));
2496     if (StoredConstant && StoredConstant->isNullValue()) {
2497       auto *DefUOInst = dyn_cast<Instruction>(DefUO);
2498       if (DefUOInst && isCallocLikeFn(DefUOInst, &TLI)) {
2499         auto *UnderlyingDef = cast<MemoryDef>(MSSA.getMemoryAccess(DefUOInst));
2500         // If UnderlyingDef is the clobbering access of Def, no instructions
2501         // between them can modify the memory location.
2502         auto *ClobberDef =
2503             MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def);
2504         return UnderlyingDef == ClobberDef;
2505       }
2506     }
2507     return false;
2508   }
2509 };
2510 
2511 bool eliminateDeadStoresMemorySSA(Function &F, AliasAnalysis &AA,
2512                                   MemorySSA &MSSA, DominatorTree &DT,
2513                                   PostDominatorTree &PDT,
2514                                   const TargetLibraryInfo &TLI) {
2515   bool MadeChange = false;
2516 
2517   DSEState State = DSEState::get(F, AA, MSSA, DT, PDT, TLI);
2518   // For each store:
2519   for (unsigned I = 0; I < State.MemDefs.size(); I++) {
2520     MemoryDef *KillingDef = State.MemDefs[I];
2521     if (State.SkipStores.count(KillingDef))
2522       continue;
2523     Instruction *SI = KillingDef->getMemoryInst();
2524 
2525     Optional<MemoryLocation> MaybeSILoc;
2526     if (State.isMemTerminatorInst(SI))
2527       MaybeSILoc = State.getLocForTerminator(SI).map(
2528           [](const std::pair<MemoryLocation, bool> &P) { return P.first; });
2529     else
2530       MaybeSILoc = State.getLocForWriteEx(SI);
2531 
2532     if (!MaybeSILoc) {
2533       LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "
2534                         << *SI << "\n");
2535       continue;
2536     }
2537     MemoryLocation SILoc = *MaybeSILoc;
2538     assert(SILoc.Ptr && "SILoc should not be null");
2539     const Value *SILocUnd = getUnderlyingObject(SILoc.Ptr);
2540 
2541     MemoryAccess *Current = KillingDef;
2542     LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "
2543                       << *Current << " (" << *SI << ")\n");
2544 
2545     unsigned ScanLimit = MemorySSAScanLimit;
2546     unsigned WalkerStepLimit = MemorySSAUpwardsStepLimit;
2547     unsigned PartialLimit = MemorySSAPartialStoreLimit;
2548     // Worklist of MemoryAccesses that may be killed by KillingDef.
2549     SetVector<MemoryAccess *> ToCheck;
2550 
2551     if (SILocUnd)
2552       ToCheck.insert(KillingDef->getDefiningAccess());
2553 
2554     bool Shortend = false;
2555     bool IsMemTerm = State.isMemTerminatorInst(SI);
2556     // Check if MemoryAccesses in the worklist are killed by KillingDef.
2557     for (unsigned I = 0; I < ToCheck.size(); I++) {
2558       Current = ToCheck[I];
2559       if (State.SkipStores.count(Current))
2560         continue;
2561 
2562       Optional<MemoryAccess *> Next = State.getDomMemoryDef(
2563           KillingDef, Current, SILoc, SILocUnd, ScanLimit, WalkerStepLimit,
2564           IsMemTerm, PartialLimit);
2565 
2566       if (!Next) {
2567         LLVM_DEBUG(dbgs() << "  finished walk\n");
2568         continue;
2569       }
2570 
2571       MemoryAccess *EarlierAccess = *Next;
2572       LLVM_DEBUG(dbgs() << " Checking if we can kill " << *EarlierAccess);
2573       if (isa<MemoryPhi>(EarlierAccess)) {
2574         LLVM_DEBUG(dbgs() << "\n  ... adding incoming values to worklist\n");
2575         for (Value *V : cast<MemoryPhi>(EarlierAccess)->incoming_values()) {
2576           MemoryAccess *IncomingAccess = cast<MemoryAccess>(V);
2577           BasicBlock *IncomingBlock = IncomingAccess->getBlock();
2578           BasicBlock *PhiBlock = EarlierAccess->getBlock();
2579 
2580           // We only consider incoming MemoryAccesses that come before the
2581           // MemoryPhi. Otherwise we could discover candidates that do not
2582           // strictly dominate our starting def.
2583           if (State.PostOrderNumbers[IncomingBlock] >
2584               State.PostOrderNumbers[PhiBlock])
2585             ToCheck.insert(IncomingAccess);
2586         }
2587         continue;
2588       }
2589       auto *NextDef = cast<MemoryDef>(EarlierAccess);
2590       Instruction *NI = NextDef->getMemoryInst();
2591       LLVM_DEBUG(dbgs() << " (" << *NI << ")\n");
2592       ToCheck.insert(NextDef->getDefiningAccess());
2593       NumGetDomMemoryDefPassed++;
2594 
2595       if (!DebugCounter::shouldExecute(MemorySSACounter))
2596         continue;
2597 
2598       MemoryLocation NILoc = *State.getLocForWriteEx(NI);
2599 
2600       if (IsMemTerm) {
2601         const Value *NIUnd = getUnderlyingObject(NILoc.Ptr);
2602         if (SILocUnd != NIUnd)
2603           continue;
2604         LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *NI
2605                           << "\n  KILLER: " << *SI << '\n');
2606         State.deleteDeadInstruction(NI);
2607         ++NumFastStores;
2608         MadeChange = true;
2609       } else {
2610         // Check if NI overwrites SI.
2611         int64_t InstWriteOffset, DepWriteOffset;
2612         OverwriteResult OR =
2613             isOverwrite(SI, NI, SILoc, NILoc, State.DL, TLI, DepWriteOffset,
2614                         InstWriteOffset, State.BatchAA, &F);
2615         if (OR == OW_MaybePartial) {
2616           auto Iter = State.IOLs.insert(
2617               std::make_pair<BasicBlock *, InstOverlapIntervalsTy>(
2618                   NI->getParent(), InstOverlapIntervalsTy()));
2619           auto &IOL = Iter.first->second;
2620           OR = isPartialOverwrite(SILoc, NILoc, DepWriteOffset, InstWriteOffset,
2621                                   NI, IOL);
2622         }
2623 
2624         if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) {
2625           auto *Earlier = dyn_cast<StoreInst>(NI);
2626           auto *Later = dyn_cast<StoreInst>(SI);
2627           // We are re-using tryToMergePartialOverlappingStores, which requires
2628           // Earlier to domiante Later.
2629           // TODO: implement tryToMergeParialOverlappingStores using MemorySSA.
2630           if (Earlier && Later && DT.dominates(Earlier, Later)) {
2631             if (Constant *Merged = tryToMergePartialOverlappingStores(
2632                     Earlier, Later, InstWriteOffset, DepWriteOffset, State.DL,
2633                     State.BatchAA, &DT)) {
2634 
2635               // Update stored value of earlier store to merged constant.
2636               Earlier->setOperand(0, Merged);
2637               ++NumModifiedStores;
2638               MadeChange = true;
2639 
2640               Shortend = true;
2641               // Remove later store and remove any outstanding overlap intervals
2642               // for the updated store.
2643               State.deleteDeadInstruction(Later);
2644               auto I = State.IOLs.find(Earlier->getParent());
2645               if (I != State.IOLs.end())
2646                 I->second.erase(Earlier);
2647               break;
2648             }
2649           }
2650         }
2651 
2652         if (OR == OW_Complete) {
2653           LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *NI
2654                             << "\n  KILLER: " << *SI << '\n');
2655           State.deleteDeadInstruction(NI);
2656           ++NumFastStores;
2657           MadeChange = true;
2658         }
2659       }
2660     }
2661 
2662     // Check if the store is a no-op.
2663     if (!Shortend && isRemovable(SI) &&
2664         State.storeIsNoop(KillingDef, SILoc, SILocUnd)) {
2665       LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n  DEAD: " << *SI << '\n');
2666       State.deleteDeadInstruction(SI);
2667       NumRedundantStores++;
2668       MadeChange = true;
2669       continue;
2670     }
2671   }
2672 
2673   if (EnablePartialOverwriteTracking)
2674     for (auto &KV : State.IOLs)
2675       MadeChange |= removePartiallyOverlappedStores(State.DL, KV.second, TLI);
2676 
2677   MadeChange |= State.eliminateDeadWritesAtEndOfFunction();
2678   return MadeChange;
2679 }
2680 } // end anonymous namespace
2681 
2682 //===----------------------------------------------------------------------===//
2683 // DSE Pass
2684 //===----------------------------------------------------------------------===//
2685 PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
2686   AliasAnalysis &AA = AM.getResult<AAManager>(F);
2687   const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F);
2688   DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
2689 
2690   bool Changed = false;
2691   if (EnableMemorySSA) {
2692     MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2693     PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F);
2694 
2695     Changed = eliminateDeadStoresMemorySSA(F, AA, MSSA, DT, PDT, TLI);
2696   } else {
2697     MemoryDependenceResults &MD = AM.getResult<MemoryDependenceAnalysis>(F);
2698 
2699     Changed = eliminateDeadStores(F, &AA, &MD, &DT, &TLI);
2700   }
2701 
2702 #ifdef LLVM_ENABLE_STATS
2703   if (AreStatisticsEnabled())
2704     for (auto &I : instructions(F))
2705       NumRemainingStores += isa<StoreInst>(&I);
2706 #endif
2707 
2708   if (!Changed)
2709     return PreservedAnalyses::all();
2710 
2711   PreservedAnalyses PA;
2712   PA.preserveSet<CFGAnalyses>();
2713   PA.preserve<GlobalsAA>();
2714   if (EnableMemorySSA)
2715     PA.preserve<MemorySSAAnalysis>();
2716   else
2717     PA.preserve<MemoryDependenceAnalysis>();
2718   return PA;
2719 }
2720 
2721 namespace {
2722 
2723 /// A legacy pass for the legacy pass manager that wraps \c DSEPass.
2724 class DSELegacyPass : public FunctionPass {
2725 public:
2726   static char ID; // Pass identification, replacement for typeid
2727 
2728   DSELegacyPass() : FunctionPass(ID) {
2729     initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
2730   }
2731 
2732   bool runOnFunction(Function &F) override {
2733     if (skipFunction(F))
2734       return false;
2735 
2736     AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2737     DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2738     const TargetLibraryInfo &TLI =
2739         getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2740 
2741     bool Changed = false;
2742     if (EnableMemorySSA) {
2743       MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2744       PostDominatorTree &PDT =
2745           getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
2746 
2747       Changed = eliminateDeadStoresMemorySSA(F, AA, MSSA, DT, PDT, TLI);
2748     } else {
2749       MemoryDependenceResults &MD =
2750           getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
2751 
2752       Changed = eliminateDeadStores(F, &AA, &MD, &DT, &TLI);
2753     }
2754 
2755 #ifdef LLVM_ENABLE_STATS
2756     if (AreStatisticsEnabled())
2757       for (auto &I : instructions(F))
2758         NumRemainingStores += isa<StoreInst>(&I);
2759 #endif
2760 
2761     return Changed;
2762   }
2763 
2764   void getAnalysisUsage(AnalysisUsage &AU) const override {
2765     AU.setPreservesCFG();
2766     AU.addRequired<AAResultsWrapperPass>();
2767     AU.addRequired<TargetLibraryInfoWrapperPass>();
2768     AU.addPreserved<GlobalsAAWrapperPass>();
2769     AU.addRequired<DominatorTreeWrapperPass>();
2770     AU.addPreserved<DominatorTreeWrapperPass>();
2771 
2772     if (EnableMemorySSA) {
2773       AU.addRequired<PostDominatorTreeWrapperPass>();
2774       AU.addRequired<MemorySSAWrapperPass>();
2775       AU.addPreserved<PostDominatorTreeWrapperPass>();
2776       AU.addPreserved<MemorySSAWrapperPass>();
2777     } else {
2778       AU.addRequired<MemoryDependenceWrapperPass>();
2779       AU.addPreserved<MemoryDependenceWrapperPass>();
2780     }
2781   }
2782 };
2783 
2784 } // end anonymous namespace
2785 
2786 char DSELegacyPass::ID = 0;
2787 
2788 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
2789                       false)
2790 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2791 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
2792 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2793 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
2794 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
2795 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
2796 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2797 INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,
2798                     false)
2799 
2800 FunctionPass *llvm::createDeadStoreEliminationPass() {
2801   return new DSELegacyPass();
2802 }
2803