1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs various transformations related to eliminating memcpy
10 // calls, or transforming sets of stores into memset's.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/Scalar/MemCpyOptimizer.h"
15 #include "llvm/ADT/DenseSet.h"
16 #include "llvm/ADT/None.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/ADT/iterator_range.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/GlobalsModRef.h"
24 #include "llvm/Analysis/Loads.h"
25 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
26 #include "llvm/Analysis/MemoryLocation.h"
27 #include "llvm/Analysis/MemorySSA.h"
28 #include "llvm/Analysis/MemorySSAUpdater.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/BasicBlock.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/GetElementPtrTypeIterator.h"
39 #include "llvm/IR/GlobalVariable.h"
40 #include "llvm/IR/IRBuilder.h"
41 #include "llvm/IR/InstrTypes.h"
42 #include "llvm/IR/Instruction.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/Intrinsics.h"
46 #include "llvm/IR/LLVMContext.h"
47 #include "llvm/IR/Module.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/PassManager.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/User.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/InitializePasses.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/MathExtras.h"
58 #include "llvm/Support/raw_ostream.h"
59 #include "llvm/Transforms/Scalar.h"
60 #include "llvm/Transforms/Utils/Local.h"
61 #include <algorithm>
62 #include <cassert>
63 #include <cstdint>
64 #include <utility>
65 
66 using namespace llvm;
67 
68 #define DEBUG_TYPE "memcpyopt"
69 
70 static cl::opt<bool>
71     EnableMemorySSA("enable-memcpyopt-memoryssa", cl::init(true), cl::Hidden,
72                     cl::desc("Use MemorySSA-backed MemCpyOpt."));
73 
74 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
75 STATISTIC(NumMemSetInfer, "Number of memsets inferred");
76 STATISTIC(NumMoveToCpy,   "Number of memmoves converted to memcpy");
77 STATISTIC(NumCpyToSet,    "Number of memcpys converted to memset");
78 STATISTIC(NumCallSlot,    "Number of call slot optimizations performed");
79 
80 namespace {
81 
82 /// Represents a range of memset'd bytes with the ByteVal value.
83 /// This allows us to analyze stores like:
84 ///   store 0 -> P+1
85 ///   store 0 -> P+0
86 ///   store 0 -> P+3
87 ///   store 0 -> P+2
88 /// which sometimes happens with stores to arrays of structs etc.  When we see
89 /// the first store, we make a range [1, 2).  The second store extends the range
90 /// to [0, 2).  The third makes a new range [2, 3).  The fourth store joins the
91 /// two ranges into [0, 3) which is memset'able.
92 struct MemsetRange {
93   // Start/End - A semi range that describes the span that this range covers.
94   // The range is closed at the start and open at the end: [Start, End).
95   int64_t Start, End;
96 
97   /// StartPtr - The getelementptr instruction that points to the start of the
98   /// range.
99   Value *StartPtr;
100 
101   /// Alignment - The known alignment of the first store.
102   unsigned Alignment;
103 
104   /// TheStores - The actual stores that make up this range.
105   SmallVector<Instruction*, 16> TheStores;
106 
107   bool isProfitableToUseMemset(const DataLayout &DL) const;
108 };
109 
110 } // end anonymous namespace
111 
112 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
113   // If we found more than 4 stores to merge or 16 bytes, use memset.
114   if (TheStores.size() >= 4 || End-Start >= 16) return true;
115 
116   // If there is nothing to merge, don't do anything.
117   if (TheStores.size() < 2) return false;
118 
119   // If any of the stores are a memset, then it is always good to extend the
120   // memset.
121   for (Instruction *SI : TheStores)
122     if (!isa<StoreInst>(SI))
123       return true;
124 
125   // Assume that the code generator is capable of merging pairs of stores
126   // together if it wants to.
127   if (TheStores.size() == 2) return false;
128 
129   // If we have fewer than 8 stores, it can still be worthwhile to do this.
130   // For example, merging 4 i8 stores into an i32 store is useful almost always.
131   // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
132   // memset will be split into 2 32-bit stores anyway) and doing so can
133   // pessimize the llvm optimizer.
134   //
135   // Since we don't have perfect knowledge here, make some assumptions: assume
136   // the maximum GPR width is the same size as the largest legal integer
137   // size. If so, check to see whether we will end up actually reducing the
138   // number of stores used.
139   unsigned Bytes = unsigned(End-Start);
140   unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8;
141   if (MaxIntSize == 0)
142     MaxIntSize = 1;
143   unsigned NumPointerStores = Bytes / MaxIntSize;
144 
145   // Assume the remaining bytes if any are done a byte at a time.
146   unsigned NumByteStores = Bytes % MaxIntSize;
147 
148   // If we will reduce the # stores (according to this heuristic), do the
149   // transformation.  This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
150   // etc.
151   return TheStores.size() > NumPointerStores+NumByteStores;
152 }
153 
154 namespace {
155 
156 class MemsetRanges {
157   using range_iterator = SmallVectorImpl<MemsetRange>::iterator;
158 
159   /// A sorted list of the memset ranges.
160   SmallVector<MemsetRange, 8> Ranges;
161 
162   const DataLayout &DL;
163 
164 public:
165   MemsetRanges(const DataLayout &DL) : DL(DL) {}
166 
167   using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator;
168 
169   const_iterator begin() const { return Ranges.begin(); }
170   const_iterator end() const { return Ranges.end(); }
171   bool empty() const { return Ranges.empty(); }
172 
173   void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
174     if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
175       addStore(OffsetFromFirst, SI);
176     else
177       addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
178   }
179 
180   void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
181     int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
182 
183     addRange(OffsetFromFirst, StoreSize, SI->getPointerOperand(),
184              SI->getAlign().value(), SI);
185   }
186 
187   void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
188     int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
189     addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI);
190   }
191 
192   void addRange(int64_t Start, int64_t Size, Value *Ptr,
193                 unsigned Alignment, Instruction *Inst);
194 };
195 
196 } // end anonymous namespace
197 
198 /// Add a new store to the MemsetRanges data structure.  This adds a
199 /// new range for the specified store at the specified offset, merging into
200 /// existing ranges as appropriate.
201 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
202                             unsigned Alignment, Instruction *Inst) {
203   int64_t End = Start+Size;
204 
205   range_iterator I = partition_point(
206       Ranges, [=](const MemsetRange &O) { return O.End < Start; });
207 
208   // We now know that I == E, in which case we didn't find anything to merge
209   // with, or that Start <= I->End.  If End < I->Start or I == E, then we need
210   // to insert a new range.  Handle this now.
211   if (I == Ranges.end() || End < I->Start) {
212     MemsetRange &R = *Ranges.insert(I, MemsetRange());
213     R.Start        = Start;
214     R.End          = End;
215     R.StartPtr     = Ptr;
216     R.Alignment    = Alignment;
217     R.TheStores.push_back(Inst);
218     return;
219   }
220 
221   // This store overlaps with I, add it.
222   I->TheStores.push_back(Inst);
223 
224   // At this point, we may have an interval that completely contains our store.
225   // If so, just add it to the interval and return.
226   if (I->Start <= Start && I->End >= End)
227     return;
228 
229   // Now we know that Start <= I->End and End >= I->Start so the range overlaps
230   // but is not entirely contained within the range.
231 
232   // See if the range extends the start of the range.  In this case, it couldn't
233   // possibly cause it to join the prior range, because otherwise we would have
234   // stopped on *it*.
235   if (Start < I->Start) {
236     I->Start = Start;
237     I->StartPtr = Ptr;
238     I->Alignment = Alignment;
239   }
240 
241   // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
242   // is in or right at the end of I), and that End >= I->Start.  Extend I out to
243   // End.
244   if (End > I->End) {
245     I->End = End;
246     range_iterator NextI = I;
247     while (++NextI != Ranges.end() && End >= NextI->Start) {
248       // Merge the range in.
249       I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
250       if (NextI->End > I->End)
251         I->End = NextI->End;
252       Ranges.erase(NextI);
253       NextI = I;
254     }
255   }
256 }
257 
258 //===----------------------------------------------------------------------===//
259 //                         MemCpyOptLegacyPass Pass
260 //===----------------------------------------------------------------------===//
261 
262 namespace {
263 
264 class MemCpyOptLegacyPass : public FunctionPass {
265   MemCpyOptPass Impl;
266 
267 public:
268   static char ID; // Pass identification, replacement for typeid
269 
270   MemCpyOptLegacyPass() : FunctionPass(ID) {
271     initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry());
272   }
273 
274   bool runOnFunction(Function &F) override;
275 
276 private:
277   // This transformation requires dominator postdominator info
278   void getAnalysisUsage(AnalysisUsage &AU) const override {
279     AU.setPreservesCFG();
280     AU.addRequired<AssumptionCacheTracker>();
281     AU.addRequired<DominatorTreeWrapperPass>();
282     AU.addPreserved<DominatorTreeWrapperPass>();
283     AU.addPreserved<GlobalsAAWrapperPass>();
284     AU.addRequired<TargetLibraryInfoWrapperPass>();
285     if (!EnableMemorySSA)
286       AU.addRequired<MemoryDependenceWrapperPass>();
287     AU.addPreserved<MemoryDependenceWrapperPass>();
288     AU.addRequired<AAResultsWrapperPass>();
289     AU.addPreserved<AAResultsWrapperPass>();
290     if (EnableMemorySSA)
291       AU.addRequired<MemorySSAWrapperPass>();
292     AU.addPreserved<MemorySSAWrapperPass>();
293   }
294 };
295 
296 } // end anonymous namespace
297 
298 char MemCpyOptLegacyPass::ID = 0;
299 
300 /// The public interface to this file...
301 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); }
302 
303 INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
304                       false, false)
305 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
306 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
307 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
308 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
309 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
310 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
311 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
312 INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
313                     false, false)
314 
315 // Check that V is either not accessible by the caller, or unwinding cannot
316 // occur between Start and End.
317 static bool mayBeVisibleThroughUnwinding(Value *V, Instruction *Start,
318                                          Instruction *End) {
319   assert(Start->getParent() == End->getParent() && "Must be in same block");
320   if (!Start->getFunction()->doesNotThrow() &&
321       !isa<AllocaInst>(getUnderlyingObject(V))) {
322     for (const Instruction &I :
323          make_range(Start->getIterator(), End->getIterator())) {
324       if (I.mayThrow())
325         return true;
326     }
327   }
328   return false;
329 }
330 
331 void MemCpyOptPass::eraseInstruction(Instruction *I) {
332   if (MSSAU)
333     MSSAU->removeMemoryAccess(I);
334   if (MD)
335     MD->removeInstruction(I);
336   I->eraseFromParent();
337 }
338 
339 // Check for mod or ref of Loc between Start and End, excluding both boundaries.
340 // Start and End must be in the same block
341 static bool accessedBetween(AliasAnalysis &AA, MemoryLocation Loc,
342                             const MemoryUseOrDef *Start,
343                             const MemoryUseOrDef *End) {
344   assert(Start->getBlock() == End->getBlock() && "Only local supported");
345   for (const MemoryAccess &MA :
346        make_range(++Start->getIterator(), End->getIterator())) {
347     if (isModOrRefSet(AA.getModRefInfo(cast<MemoryUseOrDef>(MA).getMemoryInst(),
348                                        Loc)))
349       return true;
350   }
351   return false;
352 }
353 
354 // Check for mod of Loc between Start and End, excluding both boundaries.
355 // Start and End can be in different blocks.
356 static bool writtenBetween(MemorySSA *MSSA, MemoryLocation Loc,
357                            const MemoryUseOrDef *Start,
358                            const MemoryUseOrDef *End) {
359   // TODO: Only walk until we hit Start.
360   MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess(
361       End->getDefiningAccess(), Loc);
362   return !MSSA->dominates(Clobber, Start);
363 }
364 
365 /// When scanning forward over instructions, we look for some other patterns to
366 /// fold away. In particular, this looks for stores to neighboring locations of
367 /// memory. If it sees enough consecutive ones, it attempts to merge them
368 /// together into a memcpy/memset.
369 Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
370                                                  Value *StartPtr,
371                                                  Value *ByteVal) {
372   const DataLayout &DL = StartInst->getModule()->getDataLayout();
373 
374   // Okay, so we now have a single store that can be splatable.  Scan to find
375   // all subsequent stores of the same value to offset from the same pointer.
376   // Join these together into ranges, so we can decide whether contiguous blocks
377   // are stored.
378   MemsetRanges Ranges(DL);
379 
380   BasicBlock::iterator BI(StartInst);
381 
382   // Keeps track of the last memory use or def before the insertion point for
383   // the new memset. The new MemoryDef for the inserted memsets will be inserted
384   // after MemInsertPoint. It points to either LastMemDef or to the last user
385   // before the insertion point of the memset, if there are any such users.
386   MemoryUseOrDef *MemInsertPoint = nullptr;
387   // Keeps track of the last MemoryDef between StartInst and the insertion point
388   // for the new memset. This will become the defining access of the inserted
389   // memsets.
390   MemoryDef *LastMemDef = nullptr;
391   for (++BI; !BI->isTerminator(); ++BI) {
392     if (MSSAU) {
393       auto *CurrentAcc = cast_or_null<MemoryUseOrDef>(
394           MSSAU->getMemorySSA()->getMemoryAccess(&*BI));
395       if (CurrentAcc) {
396         MemInsertPoint = CurrentAcc;
397         if (auto *CurrentDef = dyn_cast<MemoryDef>(CurrentAcc))
398           LastMemDef = CurrentDef;
399       }
400     }
401 
402     // Calls that only access inaccessible memory do not block merging
403     // accessible stores.
404     if (auto *CB = dyn_cast<CallBase>(BI)) {
405       if (CB->onlyAccessesInaccessibleMemory())
406         continue;
407     }
408 
409     if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
410       // If the instruction is readnone, ignore it, otherwise bail out.  We
411       // don't even allow readonly here because we don't want something like:
412       // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
413       if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
414         break;
415       continue;
416     }
417 
418     if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
419       // If this is a store, see if we can merge it in.
420       if (!NextStore->isSimple()) break;
421 
422       Value *StoredVal = NextStore->getValueOperand();
423 
424       // Don't convert stores of non-integral pointer types to memsets (which
425       // stores integers).
426       if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType()))
427         break;
428 
429       // Check to see if this stored value is of the same byte-splattable value.
430       Value *StoredByte = isBytewiseValue(StoredVal, DL);
431       if (isa<UndefValue>(ByteVal) && StoredByte)
432         ByteVal = StoredByte;
433       if (ByteVal != StoredByte)
434         break;
435 
436       // Check to see if this store is to a constant offset from the start ptr.
437       Optional<int64_t> Offset =
438           isPointerOffset(StartPtr, NextStore->getPointerOperand(), DL);
439       if (!Offset)
440         break;
441 
442       Ranges.addStore(*Offset, NextStore);
443     } else {
444       MemSetInst *MSI = cast<MemSetInst>(BI);
445 
446       if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
447           !isa<ConstantInt>(MSI->getLength()))
448         break;
449 
450       // Check to see if this store is to a constant offset from the start ptr.
451       Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), DL);
452       if (!Offset)
453         break;
454 
455       Ranges.addMemSet(*Offset, MSI);
456     }
457   }
458 
459   // If we have no ranges, then we just had a single store with nothing that
460   // could be merged in.  This is a very common case of course.
461   if (Ranges.empty())
462     return nullptr;
463 
464   // If we had at least one store that could be merged in, add the starting
465   // store as well.  We try to avoid this unless there is at least something
466   // interesting as a small compile-time optimization.
467   Ranges.addInst(0, StartInst);
468 
469   // If we create any memsets, we put it right before the first instruction that
470   // isn't part of the memset block.  This ensure that the memset is dominated
471   // by any addressing instruction needed by the start of the block.
472   IRBuilder<> Builder(&*BI);
473 
474   // Now that we have full information about ranges, loop over the ranges and
475   // emit memset's for anything big enough to be worthwhile.
476   Instruction *AMemSet = nullptr;
477   for (const MemsetRange &Range : Ranges) {
478     if (Range.TheStores.size() == 1) continue;
479 
480     // If it is profitable to lower this range to memset, do so now.
481     if (!Range.isProfitableToUseMemset(DL))
482       continue;
483 
484     // Otherwise, we do want to transform this!  Create a new memset.
485     // Get the starting pointer of the block.
486     StartPtr = Range.StartPtr;
487 
488     AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End - Range.Start,
489                                    MaybeAlign(Range.Alignment));
490     LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI
491                                                    : Range.TheStores) dbgs()
492                                               << *SI << '\n';
493                dbgs() << "With: " << *AMemSet << '\n');
494     if (!Range.TheStores.empty())
495       AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
496 
497     if (MSSAU) {
498       assert(LastMemDef && MemInsertPoint &&
499              "Both LastMemDef and MemInsertPoint need to be set");
500       auto *NewDef =
501           cast<MemoryDef>(MemInsertPoint->getMemoryInst() == &*BI
502                               ? MSSAU->createMemoryAccessBefore(
503                                     AMemSet, LastMemDef, MemInsertPoint)
504                               : MSSAU->createMemoryAccessAfter(
505                                     AMemSet, LastMemDef, MemInsertPoint));
506       MSSAU->insertDef(NewDef, /*RenameUses=*/true);
507       LastMemDef = NewDef;
508       MemInsertPoint = NewDef;
509     }
510 
511     // Zap all the stores.
512     for (Instruction *SI : Range.TheStores)
513       eraseInstruction(SI);
514 
515     ++NumMemSetInfer;
516   }
517 
518   return AMemSet;
519 }
520 
521 // This method try to lift a store instruction before position P.
522 // It will lift the store and its argument + that anything that
523 // may alias with these.
524 // The method returns true if it was successful.
525 bool MemCpyOptPass::moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI) {
526   // If the store alias this position, early bail out.
527   MemoryLocation StoreLoc = MemoryLocation::get(SI);
528   if (isModOrRefSet(AA->getModRefInfo(P, StoreLoc)))
529     return false;
530 
531   // Keep track of the arguments of all instruction we plan to lift
532   // so we can make sure to lift them as well if appropriate.
533   DenseSet<Instruction*> Args;
534   if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand()))
535     if (Ptr->getParent() == SI->getParent())
536       Args.insert(Ptr);
537 
538   // Instruction to lift before P.
539   SmallVector<Instruction *, 8> ToLift{SI};
540 
541   // Memory locations of lifted instructions.
542   SmallVector<MemoryLocation, 8> MemLocs{StoreLoc};
543 
544   // Lifted calls.
545   SmallVector<const CallBase *, 8> Calls;
546 
547   const MemoryLocation LoadLoc = MemoryLocation::get(LI);
548 
549   for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) {
550     auto *C = &*I;
551 
552     // Make sure hoisting does not perform a store that was not guaranteed to
553     // happen.
554     if (!isGuaranteedToTransferExecutionToSuccessor(C))
555       return false;
556 
557     bool MayAlias = isModOrRefSet(AA->getModRefInfo(C, None));
558 
559     bool NeedLift = false;
560     if (Args.erase(C))
561       NeedLift = true;
562     else if (MayAlias) {
563       NeedLift = llvm::any_of(MemLocs, [C, this](const MemoryLocation &ML) {
564         return isModOrRefSet(AA->getModRefInfo(C, ML));
565       });
566 
567       if (!NeedLift)
568         NeedLift = llvm::any_of(Calls, [C, this](const CallBase *Call) {
569           return isModOrRefSet(AA->getModRefInfo(C, Call));
570         });
571     }
572 
573     if (!NeedLift)
574       continue;
575 
576     if (MayAlias) {
577       // Since LI is implicitly moved downwards past the lifted instructions,
578       // none of them may modify its source.
579       if (isModSet(AA->getModRefInfo(C, LoadLoc)))
580         return false;
581       else if (const auto *Call = dyn_cast<CallBase>(C)) {
582         // If we can't lift this before P, it's game over.
583         if (isModOrRefSet(AA->getModRefInfo(P, Call)))
584           return false;
585 
586         Calls.push_back(Call);
587       } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) {
588         // If we can't lift this before P, it's game over.
589         auto ML = MemoryLocation::get(C);
590         if (isModOrRefSet(AA->getModRefInfo(P, ML)))
591           return false;
592 
593         MemLocs.push_back(ML);
594       } else
595         // We don't know how to lift this instruction.
596         return false;
597     }
598 
599     ToLift.push_back(C);
600     for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k)
601       if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) {
602         if (A->getParent() == SI->getParent()) {
603           // Cannot hoist user of P above P
604           if(A == P) return false;
605           Args.insert(A);
606         }
607       }
608   }
609 
610   // Find MSSA insertion point. Normally P will always have a corresponding
611   // memory access before which we can insert. However, with non-standard AA
612   // pipelines, there may be a mismatch between AA and MSSA, in which case we
613   // will scan for a memory access before P. In either case, we know for sure
614   // that at least the load will have a memory access.
615   // TODO: Simplify this once P will be determined by MSSA, in which case the
616   // discrepancy can no longer occur.
617   MemoryUseOrDef *MemInsertPoint = nullptr;
618   if (MSSAU) {
619     if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(P)) {
620       MemInsertPoint = cast<MemoryUseOrDef>(--MA->getIterator());
621     } else {
622       const Instruction *ConstP = P;
623       for (const Instruction &I : make_range(++ConstP->getReverseIterator(),
624                                              ++LI->getReverseIterator())) {
625         if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(&I)) {
626           MemInsertPoint = MA;
627           break;
628         }
629       }
630     }
631   }
632 
633   // We made it, we need to lift.
634   for (auto *I : llvm::reverse(ToLift)) {
635     LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n");
636     I->moveBefore(P);
637     if (MSSAU) {
638       assert(MemInsertPoint && "Must have found insert point");
639       if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(I)) {
640         MSSAU->moveAfter(MA, MemInsertPoint);
641         MemInsertPoint = MA;
642       }
643     }
644   }
645 
646   return true;
647 }
648 
649 bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
650   if (!SI->isSimple()) return false;
651 
652   // Avoid merging nontemporal stores since the resulting
653   // memcpy/memset would not be able to preserve the nontemporal hint.
654   // In theory we could teach how to propagate the !nontemporal metadata to
655   // memset calls. However, that change would force the backend to
656   // conservatively expand !nontemporal memset calls back to sequences of
657   // store instructions (effectively undoing the merging).
658   if (SI->getMetadata(LLVMContext::MD_nontemporal))
659     return false;
660 
661   const DataLayout &DL = SI->getModule()->getDataLayout();
662 
663   Value *StoredVal = SI->getValueOperand();
664 
665   // Not all the transforms below are correct for non-integral pointers, bail
666   // until we've audited the individual pieces.
667   if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType()))
668     return false;
669 
670   // Load to store forwarding can be interpreted as memcpy.
671   if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
672     if (LI->isSimple() && LI->hasOneUse() &&
673         LI->getParent() == SI->getParent()) {
674 
675       auto *T = LI->getType();
676       if (T->isAggregateType()) {
677         MemoryLocation LoadLoc = MemoryLocation::get(LI);
678 
679         // We use alias analysis to check if an instruction may store to
680         // the memory we load from in between the load and the store. If
681         // such an instruction is found, we try to promote there instead
682         // of at the store position.
683         // TODO: Can use MSSA for this.
684         Instruction *P = SI;
685         for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) {
686           if (isModSet(AA->getModRefInfo(&I, LoadLoc))) {
687             P = &I;
688             break;
689           }
690         }
691 
692         // We found an instruction that may write to the loaded memory.
693         // We can try to promote at this position instead of the store
694         // position if nothing aliases the store memory after this and the store
695         // destination is not in the range.
696         if (P && P != SI) {
697           if (!moveUp(SI, P, LI))
698             P = nullptr;
699         }
700 
701         // If a valid insertion position is found, then we can promote
702         // the load/store pair to a memcpy.
703         if (P) {
704           // If we load from memory that may alias the memory we store to,
705           // memmove must be used to preserve semantic. If not, memcpy can
706           // be used.
707           bool UseMemMove = false;
708           if (!AA->isNoAlias(MemoryLocation::get(SI), LoadLoc))
709             UseMemMove = true;
710 
711           uint64_t Size = DL.getTypeStoreSize(T);
712 
713           IRBuilder<> Builder(P);
714           Instruction *M;
715           if (UseMemMove)
716             M = Builder.CreateMemMove(
717                 SI->getPointerOperand(), SI->getAlign(),
718                 LI->getPointerOperand(), LI->getAlign(), Size);
719           else
720             M = Builder.CreateMemCpy(
721                 SI->getPointerOperand(), SI->getAlign(),
722                 LI->getPointerOperand(), LI->getAlign(), Size);
723 
724           LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => "
725                             << *M << "\n");
726 
727           if (MSSAU) {
728             auto *LastDef =
729                 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI));
730             auto *NewAccess =
731                 MSSAU->createMemoryAccessAfter(M, LastDef, LastDef);
732             MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
733           }
734 
735           eraseInstruction(SI);
736           eraseInstruction(LI);
737           ++NumMemCpyInstr;
738 
739           // Make sure we do not invalidate the iterator.
740           BBI = M->getIterator();
741           return true;
742         }
743       }
744 
745       // Detect cases where we're performing call slot forwarding, but
746       // happen to be using a load-store pair to implement it, rather than
747       // a memcpy.
748       CallInst *C = nullptr;
749       if (EnableMemorySSA) {
750         if (auto *LoadClobber = dyn_cast<MemoryUseOrDef>(
751                 MSSA->getWalker()->getClobberingMemoryAccess(LI))) {
752           // The load most post-dom the call. Limit to the same block for now.
753           // TODO: Support non-local call-slot optimization?
754           if (LoadClobber->getBlock() == SI->getParent())
755             C = dyn_cast_or_null<CallInst>(LoadClobber->getMemoryInst());
756         }
757       } else {
758         MemDepResult ldep = MD->getDependency(LI);
759         if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
760           C = dyn_cast<CallInst>(ldep.getInst());
761       }
762 
763       if (C) {
764         // Check that nothing touches the dest of the "copy" between
765         // the call and the store.
766         MemoryLocation StoreLoc = MemoryLocation::get(SI);
767         if (EnableMemorySSA) {
768           if (accessedBetween(*AA, StoreLoc, MSSA->getMemoryAccess(C),
769                               MSSA->getMemoryAccess(SI)))
770             C = nullptr;
771         } else {
772           for (BasicBlock::iterator I = --SI->getIterator(),
773                                     E = C->getIterator();
774                I != E; --I) {
775             if (isModOrRefSet(AA->getModRefInfo(&*I, StoreLoc))) {
776               C = nullptr;
777               break;
778             }
779           }
780         }
781       }
782 
783       if (C) {
784         bool changed = performCallSlotOptzn(
785             LI, SI, SI->getPointerOperand()->stripPointerCasts(),
786             LI->getPointerOperand()->stripPointerCasts(),
787             DL.getTypeStoreSize(SI->getOperand(0)->getType()),
788             commonAlignment(SI->getAlign(), LI->getAlign()), C);
789         if (changed) {
790           eraseInstruction(SI);
791           eraseInstruction(LI);
792           ++NumMemCpyInstr;
793           return true;
794         }
795       }
796     }
797   }
798 
799   // There are two cases that are interesting for this code to handle: memcpy
800   // and memset.  Right now we only handle memset.
801 
802   // Ensure that the value being stored is something that can be memset'able a
803   // byte at a time like "0" or "-1" or any width, as well as things like
804   // 0xA0A0A0A0 and 0.0.
805   auto *V = SI->getOperand(0);
806   if (Value *ByteVal = isBytewiseValue(V, DL)) {
807     if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
808                                               ByteVal)) {
809       BBI = I->getIterator(); // Don't invalidate iterator.
810       return true;
811     }
812 
813     // If we have an aggregate, we try to promote it to memset regardless
814     // of opportunity for merging as it can expose optimization opportunities
815     // in subsequent passes.
816     auto *T = V->getType();
817     if (T->isAggregateType()) {
818       uint64_t Size = DL.getTypeStoreSize(T);
819       IRBuilder<> Builder(SI);
820       auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size,
821                                      SI->getAlign());
822 
823       LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n");
824 
825       if (MSSAU) {
826         assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)));
827         auto *LastDef =
828             cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI));
829         auto *NewAccess = MSSAU->createMemoryAccessAfter(M, LastDef, LastDef);
830         MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
831       }
832 
833       eraseInstruction(SI);
834       NumMemSetInfer++;
835 
836       // Make sure we do not invalidate the iterator.
837       BBI = M->getIterator();
838       return true;
839     }
840   }
841 
842   return false;
843 }
844 
845 bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
846   // See if there is another memset or store neighboring this memset which
847   // allows us to widen out the memset to do a single larger store.
848   if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
849     if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
850                                               MSI->getValue())) {
851       BBI = I->getIterator(); // Don't invalidate iterator.
852       return true;
853     }
854   return false;
855 }
856 
857 /// Takes a memcpy and a call that it depends on,
858 /// and checks for the possibility of a call slot optimization by having
859 /// the call write its result directly into the destination of the memcpy.
860 bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
861                                          Instruction *cpyStore, Value *cpyDest,
862                                          Value *cpySrc, uint64_t cpyLen,
863                                          Align cpyAlign, CallInst *C) {
864   // The general transformation to keep in mind is
865   //
866   //   call @func(..., src, ...)
867   //   memcpy(dest, src, ...)
868   //
869   // ->
870   //
871   //   memcpy(dest, src, ...)
872   //   call @func(..., dest, ...)
873   //
874   // Since moving the memcpy is technically awkward, we additionally check that
875   // src only holds uninitialized values at the moment of the call, meaning that
876   // the memcpy can be discarded rather than moved.
877 
878   // Lifetime marks shouldn't be operated on.
879   if (Function *F = C->getCalledFunction())
880     if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start)
881       return false;
882 
883   // Require that src be an alloca.  This simplifies the reasoning considerably.
884   AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
885   if (!srcAlloca)
886     return false;
887 
888   ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
889   if (!srcArraySize)
890     return false;
891 
892   const DataLayout &DL = cpyLoad->getModule()->getDataLayout();
893   uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
894                      srcArraySize->getZExtValue();
895 
896   if (cpyLen < srcSize)
897     return false;
898 
899   // Check that accessing the first srcSize bytes of dest will not cause a
900   // trap.  Otherwise the transform is invalid since it might cause a trap
901   // to occur earlier than it otherwise would.
902   if (!isDereferenceableAndAlignedPointer(cpyDest, Align(1), APInt(64, cpyLen),
903                                           DL, C, DT))
904     return false;
905 
906   // Make sure that nothing can observe cpyDest being written early. There are
907   // a number of cases to consider:
908   //  1. cpyDest cannot be accessed between C and cpyStore as a precondition of
909   //     the transform.
910   //  2. C itself may not access cpyDest (prior to the transform). This is
911   //     checked further below.
912   //  3. If cpyDest is accessible to the caller of this function (potentially
913   //     captured and not based on an alloca), we need to ensure that we cannot
914   //     unwind between C and cpyStore. This is checked here.
915   //  4. If cpyDest is potentially captured, there may be accesses to it from
916   //     another thread. In this case, we need to check that cpyStore is
917   //     guaranteed to be executed if C is. As it is a non-atomic access, it
918   //     renders accesses from other threads undefined.
919   //     TODO: This is currently not checked.
920   if (mayBeVisibleThroughUnwinding(cpyDest, C, cpyStore))
921     return false;
922 
923   // Check that dest points to memory that is at least as aligned as src.
924   Align srcAlign = srcAlloca->getAlign();
925   bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
926   // If dest is not aligned enough and we can't increase its alignment then
927   // bail out.
928   if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
929     return false;
930 
931   // Check that src is not accessed except via the call and the memcpy.  This
932   // guarantees that it holds only undefined values when passed in (so the final
933   // memcpy can be dropped), that it is not read or written between the call and
934   // the memcpy, and that writing beyond the end of it is undefined.
935   SmallVector<User *, 8> srcUseList(srcAlloca->users());
936   while (!srcUseList.empty()) {
937     User *U = srcUseList.pop_back_val();
938 
939     if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
940       append_range(srcUseList, U->users());
941       continue;
942     }
943     if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
944       if (!G->hasAllZeroIndices())
945         return false;
946 
947       append_range(srcUseList, U->users());
948       continue;
949     }
950     if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
951       if (IT->isLifetimeStartOrEnd())
952         continue;
953 
954     if (U != C && U != cpyLoad)
955       return false;
956   }
957 
958   // Check that src isn't captured by the called function since the
959   // transformation can cause aliasing issues in that case.
960   for (unsigned ArgI = 0, E = C->arg_size(); ArgI != E; ++ArgI)
961     if (C->getArgOperand(ArgI) == cpySrc && !C->doesNotCapture(ArgI))
962       return false;
963 
964   // Since we're changing the parameter to the callsite, we need to make sure
965   // that what would be the new parameter dominates the callsite.
966   if (!DT->dominates(cpyDest, C)) {
967     // Support moving a constant index GEP before the call.
968     auto *GEP = dyn_cast<GetElementPtrInst>(cpyDest);
969     if (GEP && GEP->hasAllConstantIndices() &&
970         DT->dominates(GEP->getPointerOperand(), C))
971       GEP->moveBefore(C);
972     else
973       return false;
974   }
975 
976   // In addition to knowing that the call does not access src in some
977   // unexpected manner, for example via a global, which we deduce from
978   // the use analysis, we also need to know that it does not sneakily
979   // access dest.  We rely on AA to figure this out for us.
980   ModRefInfo MR = AA->getModRefInfo(C, cpyDest, LocationSize::precise(srcSize));
981   // If necessary, perform additional analysis.
982   if (isModOrRefSet(MR))
983     MR = AA->callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), DT);
984   if (isModOrRefSet(MR))
985     return false;
986 
987   // We can't create address space casts here because we don't know if they're
988   // safe for the target.
989   if (cpySrc->getType()->getPointerAddressSpace() !=
990       cpyDest->getType()->getPointerAddressSpace())
991     return false;
992   for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI)
993     if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc &&
994         cpySrc->getType()->getPointerAddressSpace() !=
995             C->getArgOperand(ArgI)->getType()->getPointerAddressSpace())
996       return false;
997 
998   // All the checks have passed, so do the transformation.
999   bool changedArgument = false;
1000   for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI)
1001     if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc) {
1002       Value *Dest = cpySrc->getType() == cpyDest->getType() ?  cpyDest
1003         : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
1004                                       cpyDest->getName(), C);
1005       changedArgument = true;
1006       if (C->getArgOperand(ArgI)->getType() == Dest->getType())
1007         C->setArgOperand(ArgI, Dest);
1008       else
1009         C->setArgOperand(ArgI, CastInst::CreatePointerCast(
1010                                    Dest, C->getArgOperand(ArgI)->getType(),
1011                                    Dest->getName(), C));
1012     }
1013 
1014   if (!changedArgument)
1015     return false;
1016 
1017   // If the destination wasn't sufficiently aligned then increase its alignment.
1018   if (!isDestSufficientlyAligned) {
1019     assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
1020     cast<AllocaInst>(cpyDest)->setAlignment(srcAlign);
1021   }
1022 
1023   // Drop any cached information about the call, because we may have changed
1024   // its dependence information by changing its parameter.
1025   if (MD)
1026     MD->removeInstruction(C);
1027 
1028   // Update AA metadata
1029   // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
1030   // handled here, but combineMetadata doesn't support them yet
1031   unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
1032                          LLVMContext::MD_noalias,
1033                          LLVMContext::MD_invariant_group,
1034                          LLVMContext::MD_access_group};
1035   combineMetadata(C, cpyLoad, KnownIDs, true);
1036 
1037   ++NumCallSlot;
1038   return true;
1039 }
1040 
1041 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is
1042 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can.
1043 bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
1044                                                   MemCpyInst *MDep) {
1045   // We can only transforms memcpy's where the dest of one is the source of the
1046   // other.
1047   if (M->getSource() != MDep->getDest() || MDep->isVolatile())
1048     return false;
1049 
1050   // If dep instruction is reading from our current input, then it is a noop
1051   // transfer and substituting the input won't change this instruction.  Just
1052   // ignore the input and let someone else zap MDep.  This handles cases like:
1053   //    memcpy(a <- a)
1054   //    memcpy(b <- a)
1055   if (M->getSource() == MDep->getSource())
1056     return false;
1057 
1058   // Second, the length of the memcpy's must be the same, or the preceding one
1059   // must be larger than the following one.
1060   if (MDep->getLength() != M->getLength()) {
1061     ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
1062     ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
1063     if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
1064       return false;
1065   }
1066 
1067   // Verify that the copied-from memory doesn't change in between the two
1068   // transfers.  For example, in:
1069   //    memcpy(a <- b)
1070   //    *b = 42;
1071   //    memcpy(c <- a)
1072   // It would be invalid to transform the second memcpy into memcpy(c <- b).
1073   //
1074   // TODO: If the code between M and MDep is transparent to the destination "c",
1075   // then we could still perform the xform by moving M up to the first memcpy.
1076   if (EnableMemorySSA) {
1077     // TODO: It would be sufficient to check the MDep source up to the memcpy
1078     // size of M, rather than MDep.
1079     if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep),
1080                        MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(M)))
1081       return false;
1082   } else {
1083     // NOTE: This is conservative, it will stop on any read from the source loc,
1084     // not just the defining memcpy.
1085     MemDepResult SourceDep =
1086         MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false,
1087                                      M->getIterator(), M->getParent());
1088     if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1089       return false;
1090   }
1091 
1092   // If the dest of the second might alias the source of the first, then the
1093   // source and dest might overlap.  We still want to eliminate the intermediate
1094   // value, but we have to generate a memmove instead of memcpy.
1095   bool UseMemMove = false;
1096   if (!AA->isNoAlias(MemoryLocation::getForDest(M),
1097                      MemoryLocation::getForSource(MDep)))
1098     UseMemMove = true;
1099 
1100   // If all checks passed, then we can transform M.
1101   LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"
1102                     << *MDep << '\n' << *M << '\n');
1103 
1104   // TODO: Is this worth it if we're creating a less aligned memcpy? For
1105   // example we could be moving from movaps -> movq on x86.
1106   IRBuilder<> Builder(M);
1107   Instruction *NewM;
1108   if (UseMemMove)
1109     NewM = Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(),
1110                                  MDep->getRawSource(), MDep->getSourceAlign(),
1111                                  M->getLength(), M->isVolatile());
1112   else if (isa<MemCpyInlineInst>(M)) {
1113     // llvm.memcpy may be promoted to llvm.memcpy.inline, but the converse is
1114     // never allowed since that would allow the latter to be lowered as a call
1115     // to an external function.
1116     NewM = Builder.CreateMemCpyInline(
1117         M->getRawDest(), M->getDestAlign(), MDep->getRawSource(),
1118         MDep->getSourceAlign(), M->getLength(), M->isVolatile());
1119   } else
1120     NewM = Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(),
1121                                 MDep->getRawSource(), MDep->getSourceAlign(),
1122                                 M->getLength(), M->isVolatile());
1123 
1124   if (MSSAU) {
1125     assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)));
1126     auto *LastDef = cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M));
1127     auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef);
1128     MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1129   }
1130 
1131   // Remove the instruction we're replacing.
1132   eraseInstruction(M);
1133   ++NumMemCpyInstr;
1134   return true;
1135 }
1136 
1137 /// We've found that the (upward scanning) memory dependence of \p MemCpy is
1138 /// \p MemSet.  Try to simplify \p MemSet to only set the trailing bytes that
1139 /// weren't copied over by \p MemCpy.
1140 ///
1141 /// In other words, transform:
1142 /// \code
1143 ///   memset(dst, c, dst_size);
1144 ///   memcpy(dst, src, src_size);
1145 /// \endcode
1146 /// into:
1147 /// \code
1148 ///   memcpy(dst, src, src_size);
1149 ///   memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size);
1150 /// \endcode
1151 bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
1152                                                   MemSetInst *MemSet) {
1153   // We can only transform memset/memcpy with the same destination.
1154   if (!AA->isMustAlias(MemSet->getDest(), MemCpy->getDest()))
1155     return false;
1156 
1157   // Check that src and dst of the memcpy aren't the same. While memcpy
1158   // operands cannot partially overlap, exact equality is allowed.
1159   if (!AA->isNoAlias(MemoryLocation(MemCpy->getSource(),
1160                                     LocationSize::precise(1)),
1161                      MemoryLocation(MemCpy->getDest(),
1162                                     LocationSize::precise(1))))
1163     return false;
1164 
1165   if (EnableMemorySSA) {
1166     // We know that dst up to src_size is not written. We now need to make sure
1167     // that dst up to dst_size is not accessed. (If we did not move the memset,
1168     // checking for reads would be sufficient.)
1169     if (accessedBetween(*AA, MemoryLocation::getForDest(MemSet),
1170                         MSSA->getMemoryAccess(MemSet),
1171                         MSSA->getMemoryAccess(MemCpy))) {
1172       return false;
1173     }
1174   } else {
1175     // We have already checked that dst up to src_size is not accessed. We
1176     // need to make sure that there are no accesses up to dst_size either.
1177     MemDepResult DstDepInfo = MD->getPointerDependencyFrom(
1178         MemoryLocation::getForDest(MemSet), false, MemCpy->getIterator(),
1179         MemCpy->getParent());
1180     if (DstDepInfo.getInst() != MemSet)
1181       return false;
1182   }
1183 
1184   // Use the same i8* dest as the memcpy, killing the memset dest if different.
1185   Value *Dest = MemCpy->getRawDest();
1186   Value *DestSize = MemSet->getLength();
1187   Value *SrcSize = MemCpy->getLength();
1188 
1189   if (mayBeVisibleThroughUnwinding(Dest, MemSet, MemCpy))
1190     return false;
1191 
1192   // If the sizes are the same, simply drop the memset instead of generating
1193   // a replacement with zero size.
1194   if (DestSize == SrcSize) {
1195     eraseInstruction(MemSet);
1196     return true;
1197   }
1198 
1199   // By default, create an unaligned memset.
1200   unsigned Align = 1;
1201   // If Dest is aligned, and SrcSize is constant, use the minimum alignment
1202   // of the sum.
1203   const unsigned DestAlign =
1204       std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment());
1205   if (DestAlign > 1)
1206     if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
1207       Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
1208 
1209   IRBuilder<> Builder(MemCpy);
1210 
1211   // If the sizes have different types, zext the smaller one.
1212   if (DestSize->getType() != SrcSize->getType()) {
1213     if (DestSize->getType()->getIntegerBitWidth() >
1214         SrcSize->getType()->getIntegerBitWidth())
1215       SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType());
1216     else
1217       DestSize = Builder.CreateZExt(DestSize, SrcSize->getType());
1218   }
1219 
1220   Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize);
1221   Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize);
1222   Value *MemsetLen = Builder.CreateSelect(
1223       Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff);
1224   unsigned DestAS = Dest->getType()->getPointerAddressSpace();
1225   Instruction *NewMemSet = Builder.CreateMemSet(
1226       Builder.CreateGEP(Builder.getInt8Ty(),
1227                         Builder.CreatePointerCast(Dest,
1228                                                   Builder.getInt8PtrTy(DestAS)),
1229                         SrcSize),
1230       MemSet->getOperand(1), MemsetLen, MaybeAlign(Align));
1231 
1232   if (MSSAU) {
1233     assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) &&
1234            "MemCpy must be a MemoryDef");
1235     // The new memset is inserted after the memcpy, but it is known that its
1236     // defining access is the memset about to be removed which immediately
1237     // precedes the memcpy.
1238     auto *LastDef =
1239         cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy));
1240     auto *NewAccess = MSSAU->createMemoryAccessBefore(
1241         NewMemSet, LastDef->getDefiningAccess(), LastDef);
1242     MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1243   }
1244 
1245   eraseInstruction(MemSet);
1246   return true;
1247 }
1248 
1249 /// Determine whether the instruction has undefined content for the given Size,
1250 /// either because it was freshly alloca'd or started its lifetime.
1251 static bool hasUndefContents(Instruction *I, Value *Size) {
1252   if (isa<AllocaInst>(I))
1253     return true;
1254 
1255   if (ConstantInt *CSize = dyn_cast<ConstantInt>(Size)) {
1256     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1257       if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1258         if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0)))
1259           if (LTSize->getZExtValue() >= CSize->getZExtValue())
1260             return true;
1261   }
1262 
1263   return false;
1264 }
1265 
1266 static bool hasUndefContentsMSSA(MemorySSA *MSSA, AliasAnalysis *AA, Value *V,
1267                                  MemoryDef *Def, Value *Size) {
1268   if (MSSA->isLiveOnEntryDef(Def))
1269     return isa<AllocaInst>(getUnderlyingObject(V));
1270 
1271   if (IntrinsicInst *II =
1272           dyn_cast_or_null<IntrinsicInst>(Def->getMemoryInst())) {
1273     if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
1274       ConstantInt *LTSize = cast<ConstantInt>(II->getArgOperand(0));
1275 
1276       if (ConstantInt *CSize = dyn_cast<ConstantInt>(Size)) {
1277         if (AA->isMustAlias(V, II->getArgOperand(1)) &&
1278             LTSize->getZExtValue() >= CSize->getZExtValue())
1279           return true;
1280       }
1281 
1282       // If the lifetime.start covers a whole alloca (as it almost always
1283       // does) and we're querying a pointer based on that alloca, then we know
1284       // the memory is definitely undef, regardless of how exactly we alias.
1285       // The size also doesn't matter, as an out-of-bounds access would be UB.
1286       AllocaInst *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(V));
1287       if (getUnderlyingObject(II->getArgOperand(1)) == Alloca) {
1288         const DataLayout &DL = Alloca->getModule()->getDataLayout();
1289         if (Optional<TypeSize> AllocaSize = Alloca->getAllocationSizeInBits(DL))
1290           if (*AllocaSize == LTSize->getValue() * 8)
1291             return true;
1292       }
1293     }
1294   }
1295 
1296   return false;
1297 }
1298 
1299 /// Transform memcpy to memset when its source was just memset.
1300 /// In other words, turn:
1301 /// \code
1302 ///   memset(dst1, c, dst1_size);
1303 ///   memcpy(dst2, dst1, dst2_size);
1304 /// \endcode
1305 /// into:
1306 /// \code
1307 ///   memset(dst1, c, dst1_size);
1308 ///   memset(dst2, c, dst2_size);
1309 /// \endcode
1310 /// When dst2_size <= dst1_size.
1311 bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
1312                                                MemSetInst *MemSet) {
1313   // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and
1314   // memcpying from the same address. Otherwise it is hard to reason about.
1315   if (!AA->isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource()))
1316     return false;
1317 
1318   Value *MemSetSize = MemSet->getLength();
1319   Value *CopySize = MemCpy->getLength();
1320 
1321   if (MemSetSize != CopySize) {
1322     // Make sure the memcpy doesn't read any more than what the memset wrote.
1323     // Don't worry about sizes larger than i64.
1324 
1325     // A known memset size is required.
1326     ConstantInt *CMemSetSize = dyn_cast<ConstantInt>(MemSetSize);
1327     if (!CMemSetSize)
1328       return false;
1329 
1330     // A known memcpy size is also required.
1331     ConstantInt *CCopySize = dyn_cast<ConstantInt>(CopySize);
1332     if (!CCopySize)
1333       return false;
1334     if (CCopySize->getZExtValue() > CMemSetSize->getZExtValue()) {
1335       // If the memcpy is larger than the memset, but the memory was undef prior
1336       // to the memset, we can just ignore the tail. Technically we're only
1337       // interested in the bytes from MemSetSize..CopySize here, but as we can't
1338       // easily represent this location, we use the full 0..CopySize range.
1339       MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy);
1340       bool CanReduceSize = false;
1341       if (EnableMemorySSA) {
1342         MemoryUseOrDef *MemSetAccess = MSSA->getMemoryAccess(MemSet);
1343         MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess(
1344             MemSetAccess->getDefiningAccess(), MemCpyLoc);
1345         if (auto *MD = dyn_cast<MemoryDef>(Clobber))
1346           if (hasUndefContentsMSSA(MSSA, AA, MemCpy->getSource(), MD, CopySize))
1347             CanReduceSize = true;
1348       } else {
1349         MemDepResult DepInfo = MD->getPointerDependencyFrom(
1350             MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent());
1351         if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize))
1352           CanReduceSize = true;
1353       }
1354 
1355       if (!CanReduceSize)
1356         return false;
1357       CopySize = MemSetSize;
1358     }
1359   }
1360 
1361   IRBuilder<> Builder(MemCpy);
1362   Instruction *NewM =
1363       Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1),
1364                            CopySize, MaybeAlign(MemCpy->getDestAlignment()));
1365   if (MSSAU) {
1366     auto *LastDef =
1367         cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy));
1368     auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef);
1369     MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1370   }
1371 
1372   return true;
1373 }
1374 
1375 /// Perform simplification of memcpy's.  If we have memcpy A
1376 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
1377 /// B to be a memcpy from X to Z (or potentially a memmove, depending on
1378 /// circumstances). This allows later passes to remove the first memcpy
1379 /// altogether.
1380 bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) {
1381   // We can only optimize non-volatile memcpy's.
1382   if (M->isVolatile()) return false;
1383 
1384   // If the source and destination of the memcpy are the same, then zap it.
1385   if (M->getSource() == M->getDest()) {
1386     ++BBI;
1387     eraseInstruction(M);
1388     return true;
1389   }
1390 
1391   // If copying from a constant, try to turn the memcpy into a memset.
1392   if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
1393     if (GV->isConstant() && GV->hasDefinitiveInitializer())
1394       if (Value *ByteVal = isBytewiseValue(GV->getInitializer(),
1395                                            M->getModule()->getDataLayout())) {
1396         IRBuilder<> Builder(M);
1397         Instruction *NewM =
1398             Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
1399                                  MaybeAlign(M->getDestAlignment()), false);
1400         if (MSSAU) {
1401           auto *LastDef =
1402               cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M));
1403           auto *NewAccess =
1404               MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef);
1405           MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1406         }
1407 
1408         eraseInstruction(M);
1409         ++NumCpyToSet;
1410         return true;
1411       }
1412 
1413   if (EnableMemorySSA) {
1414     MemoryUseOrDef *MA = MSSA->getMemoryAccess(M);
1415     MemoryAccess *AnyClobber = MSSA->getWalker()->getClobberingMemoryAccess(MA);
1416     MemoryLocation DestLoc = MemoryLocation::getForDest(M);
1417     const MemoryAccess *DestClobber =
1418         MSSA->getWalker()->getClobberingMemoryAccess(AnyClobber, DestLoc);
1419 
1420     // Try to turn a partially redundant memset + memcpy into
1421     // memcpy + smaller memset.  We don't need the memcpy size for this.
1422     // The memcpy most post-dom the memset, so limit this to the same basic
1423     // block. A non-local generalization is likely not worthwhile.
1424     if (auto *MD = dyn_cast<MemoryDef>(DestClobber))
1425       if (auto *MDep = dyn_cast_or_null<MemSetInst>(MD->getMemoryInst()))
1426         if (DestClobber->getBlock() == M->getParent())
1427           if (processMemSetMemCpyDependence(M, MDep))
1428             return true;
1429 
1430     MemoryAccess *SrcClobber = MSSA->getWalker()->getClobberingMemoryAccess(
1431         AnyClobber, MemoryLocation::getForSource(M));
1432 
1433     // There are four possible optimizations we can do for memcpy:
1434     //   a) memcpy-memcpy xform which exposes redundance for DSE.
1435     //   b) call-memcpy xform for return slot optimization.
1436     //   c) memcpy from freshly alloca'd space or space that has just started
1437     //      its lifetime copies undefined data, and we can therefore eliminate
1438     //      the memcpy in favor of the data that was already at the destination.
1439     //   d) memcpy from a just-memset'd source can be turned into memset.
1440     if (auto *MD = dyn_cast<MemoryDef>(SrcClobber)) {
1441       if (Instruction *MI = MD->getMemoryInst()) {
1442         if (ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength())) {
1443           if (auto *C = dyn_cast<CallInst>(MI)) {
1444             // The memcpy must post-dom the call. Limit to the same block for
1445             // now. Additionally, we need to ensure that there are no accesses
1446             // to dest between the call and the memcpy. Accesses to src will be
1447             // checked by performCallSlotOptzn().
1448             // TODO: Support non-local call-slot optimization?
1449             if (C->getParent() == M->getParent() &&
1450                 !accessedBetween(*AA, DestLoc, MD, MA)) {
1451               // FIXME: Can we pass in either of dest/src alignment here instead
1452               // of conservatively taking the minimum?
1453               Align Alignment = std::min(M->getDestAlign().valueOrOne(),
1454                                          M->getSourceAlign().valueOrOne());
1455               if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(),
1456                                        CopySize->getZExtValue(), Alignment,
1457                                        C)) {
1458                 LLVM_DEBUG(dbgs() << "Performed call slot optimization:\n"
1459                                   << "    call: " << *C << "\n"
1460                                   << "    memcpy: " << *M << "\n");
1461                 eraseInstruction(M);
1462                 ++NumMemCpyInstr;
1463                 return true;
1464               }
1465             }
1466           }
1467         }
1468         if (auto *MDep = dyn_cast<MemCpyInst>(MI))
1469           return processMemCpyMemCpyDependence(M, MDep);
1470         if (auto *MDep = dyn_cast<MemSetInst>(MI)) {
1471           if (performMemCpyToMemSetOptzn(M, MDep)) {
1472             LLVM_DEBUG(dbgs() << "Converted memcpy to memset\n");
1473             eraseInstruction(M);
1474             ++NumCpyToSet;
1475             return true;
1476           }
1477         }
1478       }
1479 
1480       if (hasUndefContentsMSSA(MSSA, AA, M->getSource(), MD, M->getLength())) {
1481         LLVM_DEBUG(dbgs() << "Removed memcpy from undef\n");
1482         eraseInstruction(M);
1483         ++NumMemCpyInstr;
1484         return true;
1485       }
1486     }
1487   } else {
1488     MemDepResult DepInfo = MD->getDependency(M);
1489 
1490     // Try to turn a partially redundant memset + memcpy into
1491     // memcpy + smaller memset.  We don't need the memcpy size for this.
1492     if (DepInfo.isClobber())
1493       if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst()))
1494         if (processMemSetMemCpyDependence(M, MDep))
1495           return true;
1496 
1497     // There are four possible optimizations we can do for memcpy:
1498     //   a) memcpy-memcpy xform which exposes redundance for DSE.
1499     //   b) call-memcpy xform for return slot optimization.
1500     //   c) memcpy from freshly alloca'd space or space that has just started
1501     //      its lifetime copies undefined data, and we can therefore eliminate
1502     //      the memcpy in favor of the data that was already at the destination.
1503     //   d) memcpy from a just-memset'd source can be turned into memset.
1504     if (ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength())) {
1505       if (DepInfo.isClobber()) {
1506         if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
1507           // FIXME: Can we pass in either of dest/src alignment here instead
1508           // of conservatively taking the minimum?
1509           Align Alignment = std::min(M->getDestAlign().valueOrOne(),
1510                                      M->getSourceAlign().valueOrOne());
1511           if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(),
1512                                    CopySize->getZExtValue(), Alignment, C)) {
1513             eraseInstruction(M);
1514             ++NumMemCpyInstr;
1515             return true;
1516           }
1517         }
1518       }
1519     }
1520 
1521     MemoryLocation SrcLoc = MemoryLocation::getForSource(M);
1522     MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(
1523         SrcLoc, true, M->getIterator(), M->getParent());
1524 
1525     if (SrcDepInfo.isClobber()) {
1526       if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
1527         return processMemCpyMemCpyDependence(M, MDep);
1528     } else if (SrcDepInfo.isDef()) {
1529       if (hasUndefContents(SrcDepInfo.getInst(), M->getLength())) {
1530         eraseInstruction(M);
1531         ++NumMemCpyInstr;
1532         return true;
1533       }
1534     }
1535 
1536     if (SrcDepInfo.isClobber())
1537       if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst()))
1538         if (performMemCpyToMemSetOptzn(M, MDep)) {
1539           eraseInstruction(M);
1540           ++NumCpyToSet;
1541           return true;
1542         }
1543   }
1544 
1545   return false;
1546 }
1547 
1548 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed
1549 /// not to alias.
1550 bool MemCpyOptPass::processMemMove(MemMoveInst *M) {
1551   if (!TLI->has(LibFunc_memmove))
1552     return false;
1553 
1554   // See if the pointers alias.
1555   if (!AA->isNoAlias(MemoryLocation::getForDest(M),
1556                      MemoryLocation::getForSource(M)))
1557     return false;
1558 
1559   LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M
1560                     << "\n");
1561 
1562   // If not, then we know we can transform this.
1563   Type *ArgTys[3] = { M->getRawDest()->getType(),
1564                       M->getRawSource()->getType(),
1565                       M->getLength()->getType() };
1566   M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(),
1567                                                  Intrinsic::memcpy, ArgTys));
1568 
1569   // For MemorySSA nothing really changes (except that memcpy may imply stricter
1570   // aliasing guarantees).
1571 
1572   // MemDep may have over conservative information about this instruction, just
1573   // conservatively flush it from the cache.
1574   if (MD)
1575     MD->removeInstruction(M);
1576 
1577   ++NumMoveToCpy;
1578   return true;
1579 }
1580 
1581 /// This is called on every byval argument in call sites.
1582 bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) {
1583   const DataLayout &DL = CB.getCaller()->getParent()->getDataLayout();
1584   // Find out what feeds this byval argument.
1585   Value *ByValArg = CB.getArgOperand(ArgNo);
1586   Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
1587   uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
1588   MemoryLocation Loc(ByValArg, LocationSize::precise(ByValSize));
1589   MemCpyInst *MDep = nullptr;
1590   if (EnableMemorySSA) {
1591     MemoryUseOrDef *CallAccess = MSSA->getMemoryAccess(&CB);
1592     if (!CallAccess)
1593       return false;
1594     MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess(
1595         CallAccess->getDefiningAccess(), Loc);
1596     if (auto *MD = dyn_cast<MemoryDef>(Clobber))
1597       MDep = dyn_cast_or_null<MemCpyInst>(MD->getMemoryInst());
1598   } else {
1599     MemDepResult DepInfo = MD->getPointerDependencyFrom(
1600         Loc, true, CB.getIterator(), CB.getParent());
1601     if (!DepInfo.isClobber())
1602       return false;
1603     MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
1604   }
1605 
1606   // If the byval argument isn't fed by a memcpy, ignore it.  If it is fed by
1607   // a memcpy, see if we can byval from the source of the memcpy instead of the
1608   // result.
1609   if (!MDep || MDep->isVolatile() ||
1610       ByValArg->stripPointerCasts() != MDep->getDest())
1611     return false;
1612 
1613   // The length of the memcpy must be larger or equal to the size of the byval.
1614   ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
1615   if (!C1 || C1->getValue().getZExtValue() < ByValSize)
1616     return false;
1617 
1618   // Get the alignment of the byval.  If the call doesn't specify the alignment,
1619   // then it is some target specific value that we can't know.
1620   MaybeAlign ByValAlign = CB.getParamAlign(ArgNo);
1621   if (!ByValAlign) return false;
1622 
1623   // If it is greater than the memcpy, then we check to see if we can force the
1624   // source of the memcpy to the alignment we need.  If we fail, we bail out.
1625   MaybeAlign MemDepAlign = MDep->getSourceAlign();
1626   if ((!MemDepAlign || *MemDepAlign < *ByValAlign) &&
1627       getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &CB, AC,
1628                                  DT) < *ByValAlign)
1629     return false;
1630 
1631   // The address space of the memcpy source must match the byval argument
1632   if (MDep->getSource()->getType()->getPointerAddressSpace() !=
1633       ByValArg->getType()->getPointerAddressSpace())
1634     return false;
1635 
1636   // Verify that the copied-from memory doesn't change in between the memcpy and
1637   // the byval call.
1638   //    memcpy(a <- b)
1639   //    *b = 42;
1640   //    foo(*a)
1641   // It would be invalid to transform the second memcpy into foo(*b).
1642   if (EnableMemorySSA) {
1643     if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep),
1644                        MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(&CB)))
1645       return false;
1646   } else {
1647     // NOTE: This is conservative, it will stop on any read from the source loc,
1648     // not just the defining memcpy.
1649     MemDepResult SourceDep = MD->getPointerDependencyFrom(
1650         MemoryLocation::getForSource(MDep), false,
1651         CB.getIterator(), MDep->getParent());
1652     if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1653       return false;
1654   }
1655 
1656   Value *TmpCast = MDep->getSource();
1657   if (MDep->getSource()->getType() != ByValArg->getType()) {
1658     BitCastInst *TmpBitCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
1659                                               "tmpcast", &CB);
1660     // Set the tmpcast's DebugLoc to MDep's
1661     TmpBitCast->setDebugLoc(MDep->getDebugLoc());
1662     TmpCast = TmpBitCast;
1663   }
1664 
1665   LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
1666                     << "  " << *MDep << "\n"
1667                     << "  " << CB << "\n");
1668 
1669   // Otherwise we're good!  Update the byval argument.
1670   CB.setArgOperand(ArgNo, TmpCast);
1671   ++NumMemCpyInstr;
1672   return true;
1673 }
1674 
1675 /// Executes one iteration of MemCpyOptPass.
1676 bool MemCpyOptPass::iterateOnFunction(Function &F) {
1677   bool MadeChange = false;
1678 
1679   // Walk all instruction in the function.
1680   for (BasicBlock &BB : F) {
1681     // Skip unreachable blocks. For example processStore assumes that an
1682     // instruction in a BB can't be dominated by a later instruction in the
1683     // same BB (which is a scenario that can happen for an unreachable BB that
1684     // has itself as a predecessor).
1685     if (!DT->isReachableFromEntry(&BB))
1686       continue;
1687 
1688     for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
1689         // Avoid invalidating the iterator.
1690       Instruction *I = &*BI++;
1691 
1692       bool RepeatInstruction = false;
1693 
1694       if (StoreInst *SI = dyn_cast<StoreInst>(I))
1695         MadeChange |= processStore(SI, BI);
1696       else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
1697         RepeatInstruction = processMemSet(M, BI);
1698       else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
1699         RepeatInstruction = processMemCpy(M, BI);
1700       else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
1701         RepeatInstruction = processMemMove(M);
1702       else if (auto *CB = dyn_cast<CallBase>(I)) {
1703         for (unsigned i = 0, e = CB->arg_size(); i != e; ++i)
1704           if (CB->isByValArgument(i))
1705             MadeChange |= processByValArgument(*CB, i);
1706       }
1707 
1708       // Reprocess the instruction if desired.
1709       if (RepeatInstruction) {
1710         if (BI != BB.begin())
1711           --BI;
1712         MadeChange = true;
1713       }
1714     }
1715   }
1716 
1717   return MadeChange;
1718 }
1719 
1720 PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) {
1721   auto *MD = !EnableMemorySSA ? &AM.getResult<MemoryDependenceAnalysis>(F)
1722                               : AM.getCachedResult<MemoryDependenceAnalysis>(F);
1723   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1724   auto *AA = &AM.getResult<AAManager>(F);
1725   auto *AC = &AM.getResult<AssumptionAnalysis>(F);
1726   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1727   auto *MSSA = EnableMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F)
1728                                : AM.getCachedResult<MemorySSAAnalysis>(F);
1729 
1730   bool MadeChange =
1731       runImpl(F, MD, &TLI, AA, AC, DT, MSSA ? &MSSA->getMSSA() : nullptr);
1732   if (!MadeChange)
1733     return PreservedAnalyses::all();
1734 
1735   PreservedAnalyses PA;
1736   PA.preserveSet<CFGAnalyses>();
1737   if (MD)
1738     PA.preserve<MemoryDependenceAnalysis>();
1739   if (MSSA)
1740     PA.preserve<MemorySSAAnalysis>();
1741   return PA;
1742 }
1743 
1744 bool MemCpyOptPass::runImpl(Function &F, MemoryDependenceResults *MD_,
1745                             TargetLibraryInfo *TLI_, AliasAnalysis *AA_,
1746                             AssumptionCache *AC_, DominatorTree *DT_,
1747                             MemorySSA *MSSA_) {
1748   bool MadeChange = false;
1749   MD = MD_;
1750   TLI = TLI_;
1751   AA = AA_;
1752   AC = AC_;
1753   DT = DT_;
1754   MSSA = MSSA_;
1755   MemorySSAUpdater MSSAU_(MSSA_);
1756   MSSAU = MSSA_ ? &MSSAU_ : nullptr;
1757   // If we don't have at least memset and memcpy, there is little point of doing
1758   // anything here.  These are required by a freestanding implementation, so if
1759   // even they are disabled, there is no point in trying hard.
1760   if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy))
1761     return false;
1762 
1763   while (true) {
1764     if (!iterateOnFunction(F))
1765       break;
1766     MadeChange = true;
1767   }
1768 
1769   if (MSSA_ && VerifyMemorySSA)
1770     MSSA_->verifyMemorySSA();
1771 
1772   MD = nullptr;
1773   return MadeChange;
1774 }
1775 
1776 /// This is the main transformation entry point for a function.
1777 bool MemCpyOptLegacyPass::runOnFunction(Function &F) {
1778   if (skipFunction(F))
1779     return false;
1780 
1781   auto *MDWP = !EnableMemorySSA
1782       ? &getAnalysis<MemoryDependenceWrapperPass>()
1783       : getAnalysisIfAvailable<MemoryDependenceWrapperPass>();
1784   auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1785   auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1786   auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1787   auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1788   auto *MSSAWP = EnableMemorySSA
1789       ? &getAnalysis<MemorySSAWrapperPass>()
1790       : getAnalysisIfAvailable<MemorySSAWrapperPass>();
1791 
1792   return Impl.runImpl(F, MDWP ? & MDWP->getMemDep() : nullptr, TLI, AA, AC, DT,
1793                       MSSAWP ? &MSSAWP->getMSSA() : nullptr);
1794 }
1795