1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs various transformations related to eliminating memcpy
10 // calls, or transforming sets of stores into memset's.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/Scalar/MemCpyOptimizer.h"
15 #include "llvm/ADT/DenseSet.h"
16 #include "llvm/ADT/None.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/ADT/iterator_range.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/GlobalsModRef.h"
24 #include "llvm/Analysis/Loads.h"
25 #include "llvm/Analysis/MemoryLocation.h"
26 #include "llvm/Analysis/MemorySSA.h"
27 #include "llvm/Analysis/MemorySSAUpdater.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/IR/Argument.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/DerivedTypes.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GetElementPtrTypeIterator.h"
38 #include "llvm/IR/GlobalVariable.h"
39 #include "llvm/IR/IRBuilder.h"
40 #include "llvm/IR/InstrTypes.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/IntrinsicInst.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/IR/LLVMContext.h"
46 #include "llvm/IR/Module.h"
47 #include "llvm/IR/Operator.h"
48 #include "llvm/IR/PassManager.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/InitializePasses.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/Debug.h"
56 #include "llvm/Support/MathExtras.h"
57 #include "llvm/Support/raw_ostream.h"
58 #include "llvm/Transforms/Scalar.h"
59 #include "llvm/Transforms/Utils/Local.h"
60 #include <algorithm>
61 #include <cassert>
62 #include <cstdint>
63 #include <utility>
64 
65 using namespace llvm;
66 
67 #define DEBUG_TYPE "memcpyopt"
68 
69 static cl::opt<bool> EnableMemCpyOptWithoutLibcalls(
70     "enable-memcpyopt-without-libcalls", cl::init(false), cl::Hidden,
71     cl::desc("Enable memcpyopt even when libcalls are disabled"));
72 
73 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
74 STATISTIC(NumMemSetInfer, "Number of memsets inferred");
75 STATISTIC(NumMoveToCpy,   "Number of memmoves converted to memcpy");
76 STATISTIC(NumCpyToSet,    "Number of memcpys converted to memset");
77 STATISTIC(NumCallSlot,    "Number of call slot optimizations performed");
78 
79 namespace {
80 
81 /// Represents a range of memset'd bytes with the ByteVal value.
82 /// This allows us to analyze stores like:
83 ///   store 0 -> P+1
84 ///   store 0 -> P+0
85 ///   store 0 -> P+3
86 ///   store 0 -> P+2
87 /// which sometimes happens with stores to arrays of structs etc.  When we see
88 /// the first store, we make a range [1, 2).  The second store extends the range
89 /// to [0, 2).  The third makes a new range [2, 3).  The fourth store joins the
90 /// two ranges into [0, 3) which is memset'able.
91 struct MemsetRange {
92   // Start/End - A semi range that describes the span that this range covers.
93   // The range is closed at the start and open at the end: [Start, End).
94   int64_t Start, End;
95 
96   /// StartPtr - The getelementptr instruction that points to the start of the
97   /// range.
98   Value *StartPtr;
99 
100   /// Alignment - The known alignment of the first store.
101   unsigned Alignment;
102 
103   /// TheStores - The actual stores that make up this range.
104   SmallVector<Instruction*, 16> TheStores;
105 
106   bool isProfitableToUseMemset(const DataLayout &DL) const;
107 };
108 
109 } // end anonymous namespace
110 
111 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
112   // If we found more than 4 stores to merge or 16 bytes, use memset.
113   if (TheStores.size() >= 4 || End-Start >= 16) return true;
114 
115   // If there is nothing to merge, don't do anything.
116   if (TheStores.size() < 2) return false;
117 
118   // If any of the stores are a memset, then it is always good to extend the
119   // memset.
120   for (Instruction *SI : TheStores)
121     if (!isa<StoreInst>(SI))
122       return true;
123 
124   // Assume that the code generator is capable of merging pairs of stores
125   // together if it wants to.
126   if (TheStores.size() == 2) return false;
127 
128   // If we have fewer than 8 stores, it can still be worthwhile to do this.
129   // For example, merging 4 i8 stores into an i32 store is useful almost always.
130   // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
131   // memset will be split into 2 32-bit stores anyway) and doing so can
132   // pessimize the llvm optimizer.
133   //
134   // Since we don't have perfect knowledge here, make some assumptions: assume
135   // the maximum GPR width is the same size as the largest legal integer
136   // size. If so, check to see whether we will end up actually reducing the
137   // number of stores used.
138   unsigned Bytes = unsigned(End-Start);
139   unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8;
140   if (MaxIntSize == 0)
141     MaxIntSize = 1;
142   unsigned NumPointerStores = Bytes / MaxIntSize;
143 
144   // Assume the remaining bytes if any are done a byte at a time.
145   unsigned NumByteStores = Bytes % MaxIntSize;
146 
147   // If we will reduce the # stores (according to this heuristic), do the
148   // transformation.  This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
149   // etc.
150   return TheStores.size() > NumPointerStores+NumByteStores;
151 }
152 
153 namespace {
154 
155 class MemsetRanges {
156   using range_iterator = SmallVectorImpl<MemsetRange>::iterator;
157 
158   /// A sorted list of the memset ranges.
159   SmallVector<MemsetRange, 8> Ranges;
160 
161   const DataLayout &DL;
162 
163 public:
164   MemsetRanges(const DataLayout &DL) : DL(DL) {}
165 
166   using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator;
167 
168   const_iterator begin() const { return Ranges.begin(); }
169   const_iterator end() const { return Ranges.end(); }
170   bool empty() const { return Ranges.empty(); }
171 
172   void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
173     if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
174       addStore(OffsetFromFirst, SI);
175     else
176       addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
177   }
178 
179   void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
180     int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
181 
182     addRange(OffsetFromFirst, StoreSize, SI->getPointerOperand(),
183              SI->getAlign().value(), SI);
184   }
185 
186   void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
187     int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
188     addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI);
189   }
190 
191   void addRange(int64_t Start, int64_t Size, Value *Ptr,
192                 unsigned Alignment, Instruction *Inst);
193 };
194 
195 } // end anonymous namespace
196 
197 /// Add a new store to the MemsetRanges data structure.  This adds a
198 /// new range for the specified store at the specified offset, merging into
199 /// existing ranges as appropriate.
200 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
201                             unsigned Alignment, Instruction *Inst) {
202   int64_t End = Start+Size;
203 
204   range_iterator I = partition_point(
205       Ranges, [=](const MemsetRange &O) { return O.End < Start; });
206 
207   // We now know that I == E, in which case we didn't find anything to merge
208   // with, or that Start <= I->End.  If End < I->Start or I == E, then we need
209   // to insert a new range.  Handle this now.
210   if (I == Ranges.end() || End < I->Start) {
211     MemsetRange &R = *Ranges.insert(I, MemsetRange());
212     R.Start        = Start;
213     R.End          = End;
214     R.StartPtr     = Ptr;
215     R.Alignment    = Alignment;
216     R.TheStores.push_back(Inst);
217     return;
218   }
219 
220   // This store overlaps with I, add it.
221   I->TheStores.push_back(Inst);
222 
223   // At this point, we may have an interval that completely contains our store.
224   // If so, just add it to the interval and return.
225   if (I->Start <= Start && I->End >= End)
226     return;
227 
228   // Now we know that Start <= I->End and End >= I->Start so the range overlaps
229   // but is not entirely contained within the range.
230 
231   // See if the range extends the start of the range.  In this case, it couldn't
232   // possibly cause it to join the prior range, because otherwise we would have
233   // stopped on *it*.
234   if (Start < I->Start) {
235     I->Start = Start;
236     I->StartPtr = Ptr;
237     I->Alignment = Alignment;
238   }
239 
240   // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
241   // is in or right at the end of I), and that End >= I->Start.  Extend I out to
242   // End.
243   if (End > I->End) {
244     I->End = End;
245     range_iterator NextI = I;
246     while (++NextI != Ranges.end() && End >= NextI->Start) {
247       // Merge the range in.
248       I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
249       if (NextI->End > I->End)
250         I->End = NextI->End;
251       Ranges.erase(NextI);
252       NextI = I;
253     }
254   }
255 }
256 
257 //===----------------------------------------------------------------------===//
258 //                         MemCpyOptLegacyPass Pass
259 //===----------------------------------------------------------------------===//
260 
261 namespace {
262 
263 class MemCpyOptLegacyPass : public FunctionPass {
264   MemCpyOptPass Impl;
265 
266 public:
267   static char ID; // Pass identification, replacement for typeid
268 
269   MemCpyOptLegacyPass() : FunctionPass(ID) {
270     initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry());
271   }
272 
273   bool runOnFunction(Function &F) override;
274 
275 private:
276   // This transformation requires dominator postdominator info
277   void getAnalysisUsage(AnalysisUsage &AU) const override {
278     AU.setPreservesCFG();
279     AU.addRequired<AssumptionCacheTracker>();
280     AU.addRequired<DominatorTreeWrapperPass>();
281     AU.addPreserved<DominatorTreeWrapperPass>();
282     AU.addPreserved<GlobalsAAWrapperPass>();
283     AU.addRequired<TargetLibraryInfoWrapperPass>();
284     AU.addRequired<AAResultsWrapperPass>();
285     AU.addPreserved<AAResultsWrapperPass>();
286     AU.addRequired<MemorySSAWrapperPass>();
287     AU.addPreserved<MemorySSAWrapperPass>();
288   }
289 };
290 
291 } // end anonymous namespace
292 
293 char MemCpyOptLegacyPass::ID = 0;
294 
295 /// The public interface to this file...
296 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); }
297 
298 INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
299                       false, false)
300 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
301 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
302 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
303 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
304 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
305 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
306 INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
307                     false, false)
308 
309 // Check that V is either not accessible by the caller, or unwinding cannot
310 // occur between Start and End.
311 static bool mayBeVisibleThroughUnwinding(Value *V, Instruction *Start,
312                                          Instruction *End) {
313   assert(Start->getParent() == End->getParent() && "Must be in same block");
314   if (!Start->getFunction()->doesNotThrow() &&
315       !isa<AllocaInst>(getUnderlyingObject(V))) {
316     for (const Instruction &I :
317          make_range(Start->getIterator(), End->getIterator())) {
318       if (I.mayThrow())
319         return true;
320     }
321   }
322   return false;
323 }
324 
325 void MemCpyOptPass::eraseInstruction(Instruction *I) {
326   MSSAU->removeMemoryAccess(I);
327   I->eraseFromParent();
328 }
329 
330 // Check for mod or ref of Loc between Start and End, excluding both boundaries.
331 // Start and End must be in the same block
332 static bool accessedBetween(AliasAnalysis &AA, MemoryLocation Loc,
333                             const MemoryUseOrDef *Start,
334                             const MemoryUseOrDef *End) {
335   assert(Start->getBlock() == End->getBlock() && "Only local supported");
336   for (const MemoryAccess &MA :
337        make_range(++Start->getIterator(), End->getIterator())) {
338     if (isModOrRefSet(AA.getModRefInfo(cast<MemoryUseOrDef>(MA).getMemoryInst(),
339                                        Loc)))
340       return true;
341   }
342   return false;
343 }
344 
345 // Check for mod of Loc between Start and End, excluding both boundaries.
346 // Start and End can be in different blocks.
347 static bool writtenBetween(MemorySSA *MSSA, MemoryLocation Loc,
348                            const MemoryUseOrDef *Start,
349                            const MemoryUseOrDef *End) {
350   // TODO: Only walk until we hit Start.
351   MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess(
352       End->getDefiningAccess(), Loc);
353   return !MSSA->dominates(Clobber, Start);
354 }
355 
356 /// When scanning forward over instructions, we look for some other patterns to
357 /// fold away. In particular, this looks for stores to neighboring locations of
358 /// memory. If it sees enough consecutive ones, it attempts to merge them
359 /// together into a memcpy/memset.
360 Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
361                                                  Value *StartPtr,
362                                                  Value *ByteVal) {
363   const DataLayout &DL = StartInst->getModule()->getDataLayout();
364 
365   // Okay, so we now have a single store that can be splatable.  Scan to find
366   // all subsequent stores of the same value to offset from the same pointer.
367   // Join these together into ranges, so we can decide whether contiguous blocks
368   // are stored.
369   MemsetRanges Ranges(DL);
370 
371   BasicBlock::iterator BI(StartInst);
372 
373   // Keeps track of the last memory use or def before the insertion point for
374   // the new memset. The new MemoryDef for the inserted memsets will be inserted
375   // after MemInsertPoint. It points to either LastMemDef or to the last user
376   // before the insertion point of the memset, if there are any such users.
377   MemoryUseOrDef *MemInsertPoint = nullptr;
378   // Keeps track of the last MemoryDef between StartInst and the insertion point
379   // for the new memset. This will become the defining access of the inserted
380   // memsets.
381   MemoryDef *LastMemDef = nullptr;
382   for (++BI; !BI->isTerminator(); ++BI) {
383     auto *CurrentAcc = cast_or_null<MemoryUseOrDef>(
384         MSSAU->getMemorySSA()->getMemoryAccess(&*BI));
385     if (CurrentAcc) {
386       MemInsertPoint = CurrentAcc;
387       if (auto *CurrentDef = dyn_cast<MemoryDef>(CurrentAcc))
388         LastMemDef = CurrentDef;
389     }
390 
391     // Calls that only access inaccessible memory do not block merging
392     // accessible stores.
393     if (auto *CB = dyn_cast<CallBase>(BI)) {
394       if (CB->onlyAccessesInaccessibleMemory())
395         continue;
396     }
397 
398     if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
399       // If the instruction is readnone, ignore it, otherwise bail out.  We
400       // don't even allow readonly here because we don't want something like:
401       // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
402       if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
403         break;
404       continue;
405     }
406 
407     if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
408       // If this is a store, see if we can merge it in.
409       if (!NextStore->isSimple()) break;
410 
411       Value *StoredVal = NextStore->getValueOperand();
412 
413       // Don't convert stores of non-integral pointer types to memsets (which
414       // stores integers).
415       if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType()))
416         break;
417 
418       // Check to see if this stored value is of the same byte-splattable value.
419       Value *StoredByte = isBytewiseValue(StoredVal, DL);
420       if (isa<UndefValue>(ByteVal) && StoredByte)
421         ByteVal = StoredByte;
422       if (ByteVal != StoredByte)
423         break;
424 
425       // Check to see if this store is to a constant offset from the start ptr.
426       Optional<int64_t> Offset =
427           isPointerOffset(StartPtr, NextStore->getPointerOperand(), DL);
428       if (!Offset)
429         break;
430 
431       Ranges.addStore(*Offset, NextStore);
432     } else {
433       MemSetInst *MSI = cast<MemSetInst>(BI);
434 
435       if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
436           !isa<ConstantInt>(MSI->getLength()))
437         break;
438 
439       // Check to see if this store is to a constant offset from the start ptr.
440       Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), DL);
441       if (!Offset)
442         break;
443 
444       Ranges.addMemSet(*Offset, MSI);
445     }
446   }
447 
448   // If we have no ranges, then we just had a single store with nothing that
449   // could be merged in.  This is a very common case of course.
450   if (Ranges.empty())
451     return nullptr;
452 
453   // If we had at least one store that could be merged in, add the starting
454   // store as well.  We try to avoid this unless there is at least something
455   // interesting as a small compile-time optimization.
456   Ranges.addInst(0, StartInst);
457 
458   // If we create any memsets, we put it right before the first instruction that
459   // isn't part of the memset block.  This ensure that the memset is dominated
460   // by any addressing instruction needed by the start of the block.
461   IRBuilder<> Builder(&*BI);
462 
463   // Now that we have full information about ranges, loop over the ranges and
464   // emit memset's for anything big enough to be worthwhile.
465   Instruction *AMemSet = nullptr;
466   for (const MemsetRange &Range : Ranges) {
467     if (Range.TheStores.size() == 1) continue;
468 
469     // If it is profitable to lower this range to memset, do so now.
470     if (!Range.isProfitableToUseMemset(DL))
471       continue;
472 
473     // Otherwise, we do want to transform this!  Create a new memset.
474     // Get the starting pointer of the block.
475     StartPtr = Range.StartPtr;
476 
477     AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End - Range.Start,
478                                    MaybeAlign(Range.Alignment));
479     LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI
480                                                    : Range.TheStores) dbgs()
481                                               << *SI << '\n';
482                dbgs() << "With: " << *AMemSet << '\n');
483     if (!Range.TheStores.empty())
484       AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
485 
486     assert(LastMemDef && MemInsertPoint &&
487            "Both LastMemDef and MemInsertPoint need to be set");
488     auto *NewDef =
489         cast<MemoryDef>(MemInsertPoint->getMemoryInst() == &*BI
490                             ? MSSAU->createMemoryAccessBefore(
491                                   AMemSet, LastMemDef, MemInsertPoint)
492                             : MSSAU->createMemoryAccessAfter(
493                                   AMemSet, LastMemDef, MemInsertPoint));
494     MSSAU->insertDef(NewDef, /*RenameUses=*/true);
495     LastMemDef = NewDef;
496     MemInsertPoint = NewDef;
497 
498     // Zap all the stores.
499     for (Instruction *SI : Range.TheStores)
500       eraseInstruction(SI);
501 
502     ++NumMemSetInfer;
503   }
504 
505   return AMemSet;
506 }
507 
508 // This method try to lift a store instruction before position P.
509 // It will lift the store and its argument + that anything that
510 // may alias with these.
511 // The method returns true if it was successful.
512 bool MemCpyOptPass::moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI) {
513   // If the store alias this position, early bail out.
514   MemoryLocation StoreLoc = MemoryLocation::get(SI);
515   if (isModOrRefSet(AA->getModRefInfo(P, StoreLoc)))
516     return false;
517 
518   // Keep track of the arguments of all instruction we plan to lift
519   // so we can make sure to lift them as well if appropriate.
520   DenseSet<Instruction*> Args;
521   if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand()))
522     if (Ptr->getParent() == SI->getParent())
523       Args.insert(Ptr);
524 
525   // Instruction to lift before P.
526   SmallVector<Instruction *, 8> ToLift{SI};
527 
528   // Memory locations of lifted instructions.
529   SmallVector<MemoryLocation, 8> MemLocs{StoreLoc};
530 
531   // Lifted calls.
532   SmallVector<const CallBase *, 8> Calls;
533 
534   const MemoryLocation LoadLoc = MemoryLocation::get(LI);
535 
536   for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) {
537     auto *C = &*I;
538 
539     // Make sure hoisting does not perform a store that was not guaranteed to
540     // happen.
541     if (!isGuaranteedToTransferExecutionToSuccessor(C))
542       return false;
543 
544     bool MayAlias = isModOrRefSet(AA->getModRefInfo(C, None));
545 
546     bool NeedLift = false;
547     if (Args.erase(C))
548       NeedLift = true;
549     else if (MayAlias) {
550       NeedLift = llvm::any_of(MemLocs, [C, this](const MemoryLocation &ML) {
551         return isModOrRefSet(AA->getModRefInfo(C, ML));
552       });
553 
554       if (!NeedLift)
555         NeedLift = llvm::any_of(Calls, [C, this](const CallBase *Call) {
556           return isModOrRefSet(AA->getModRefInfo(C, Call));
557         });
558     }
559 
560     if (!NeedLift)
561       continue;
562 
563     if (MayAlias) {
564       // Since LI is implicitly moved downwards past the lifted instructions,
565       // none of them may modify its source.
566       if (isModSet(AA->getModRefInfo(C, LoadLoc)))
567         return false;
568       else if (const auto *Call = dyn_cast<CallBase>(C)) {
569         // If we can't lift this before P, it's game over.
570         if (isModOrRefSet(AA->getModRefInfo(P, Call)))
571           return false;
572 
573         Calls.push_back(Call);
574       } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) {
575         // If we can't lift this before P, it's game over.
576         auto ML = MemoryLocation::get(C);
577         if (isModOrRefSet(AA->getModRefInfo(P, ML)))
578           return false;
579 
580         MemLocs.push_back(ML);
581       } else
582         // We don't know how to lift this instruction.
583         return false;
584     }
585 
586     ToLift.push_back(C);
587     for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k)
588       if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) {
589         if (A->getParent() == SI->getParent()) {
590           // Cannot hoist user of P above P
591           if(A == P) return false;
592           Args.insert(A);
593         }
594       }
595   }
596 
597   // Find MSSA insertion point. Normally P will always have a corresponding
598   // memory access before which we can insert. However, with non-standard AA
599   // pipelines, there may be a mismatch between AA and MSSA, in which case we
600   // will scan for a memory access before P. In either case, we know for sure
601   // that at least the load will have a memory access.
602   // TODO: Simplify this once P will be determined by MSSA, in which case the
603   // discrepancy can no longer occur.
604   MemoryUseOrDef *MemInsertPoint = nullptr;
605   if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(P)) {
606     MemInsertPoint = cast<MemoryUseOrDef>(--MA->getIterator());
607   } else {
608     const Instruction *ConstP = P;
609     for (const Instruction &I : make_range(++ConstP->getReverseIterator(),
610                                            ++LI->getReverseIterator())) {
611       if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(&I)) {
612         MemInsertPoint = MA;
613         break;
614       }
615     }
616   }
617 
618   // We made it, we need to lift.
619   for (auto *I : llvm::reverse(ToLift)) {
620     LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n");
621     I->moveBefore(P);
622     assert(MemInsertPoint && "Must have found insert point");
623     if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(I)) {
624       MSSAU->moveAfter(MA, MemInsertPoint);
625       MemInsertPoint = MA;
626     }
627   }
628 
629   return true;
630 }
631 
632 bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
633   if (!SI->isSimple()) return false;
634 
635   // Avoid merging nontemporal stores since the resulting
636   // memcpy/memset would not be able to preserve the nontemporal hint.
637   // In theory we could teach how to propagate the !nontemporal metadata to
638   // memset calls. However, that change would force the backend to
639   // conservatively expand !nontemporal memset calls back to sequences of
640   // store instructions (effectively undoing the merging).
641   if (SI->getMetadata(LLVMContext::MD_nontemporal))
642     return false;
643 
644   const DataLayout &DL = SI->getModule()->getDataLayout();
645 
646   Value *StoredVal = SI->getValueOperand();
647 
648   // Not all the transforms below are correct for non-integral pointers, bail
649   // until we've audited the individual pieces.
650   if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType()))
651     return false;
652 
653   // Load to store forwarding can be interpreted as memcpy.
654   if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
655     if (LI->isSimple() && LI->hasOneUse() &&
656         LI->getParent() == SI->getParent()) {
657 
658       auto *T = LI->getType();
659       // Don't introduce calls to memcpy/memmove intrinsics out of thin air if
660       // the corresponding libcalls are not available.
661       // TODO: We should really distinguish between libcall availability and
662       // our ability to introduce intrinsics.
663       if (T->isAggregateType() &&
664           (EnableMemCpyOptWithoutLibcalls ||
665            (TLI->has(LibFunc_memcpy) && TLI->has(LibFunc_memmove)))) {
666         MemoryLocation LoadLoc = MemoryLocation::get(LI);
667 
668         // We use alias analysis to check if an instruction may store to
669         // the memory we load from in between the load and the store. If
670         // such an instruction is found, we try to promote there instead
671         // of at the store position.
672         // TODO: Can use MSSA for this.
673         Instruction *P = SI;
674         for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) {
675           if (isModSet(AA->getModRefInfo(&I, LoadLoc))) {
676             P = &I;
677             break;
678           }
679         }
680 
681         // We found an instruction that may write to the loaded memory.
682         // We can try to promote at this position instead of the store
683         // position if nothing aliases the store memory after this and the store
684         // destination is not in the range.
685         if (P && P != SI) {
686           if (!moveUp(SI, P, LI))
687             P = nullptr;
688         }
689 
690         // If a valid insertion position is found, then we can promote
691         // the load/store pair to a memcpy.
692         if (P) {
693           // If we load from memory that may alias the memory we store to,
694           // memmove must be used to preserve semantic. If not, memcpy can
695           // be used. Also, if we load from constant memory, memcpy can be used
696           // as the constant memory won't be modified.
697           bool UseMemMove = false;
698           if (isModSet(AA->getModRefInfo(SI, LoadLoc)))
699             UseMemMove = true;
700 
701           uint64_t Size = DL.getTypeStoreSize(T);
702 
703           IRBuilder<> Builder(P);
704           Instruction *M;
705           if (UseMemMove)
706             M = Builder.CreateMemMove(
707                 SI->getPointerOperand(), SI->getAlign(),
708                 LI->getPointerOperand(), LI->getAlign(), Size);
709           else
710             M = Builder.CreateMemCpy(
711                 SI->getPointerOperand(), SI->getAlign(),
712                 LI->getPointerOperand(), LI->getAlign(), Size);
713 
714           LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => "
715                             << *M << "\n");
716 
717           auto *LastDef =
718               cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI));
719           auto *NewAccess = MSSAU->createMemoryAccessAfter(M, LastDef, LastDef);
720           MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
721 
722           eraseInstruction(SI);
723           eraseInstruction(LI);
724           ++NumMemCpyInstr;
725 
726           // Make sure we do not invalidate the iterator.
727           BBI = M->getIterator();
728           return true;
729         }
730       }
731 
732       // Detect cases where we're performing call slot forwarding, but
733       // happen to be using a load-store pair to implement it, rather than
734       // a memcpy.
735       CallInst *C = nullptr;
736       if (auto *LoadClobber = dyn_cast<MemoryUseOrDef>(
737               MSSA->getWalker()->getClobberingMemoryAccess(LI))) {
738         // The load most post-dom the call. Limit to the same block for now.
739         // TODO: Support non-local call-slot optimization?
740         if (LoadClobber->getBlock() == SI->getParent())
741           C = dyn_cast_or_null<CallInst>(LoadClobber->getMemoryInst());
742       }
743 
744       if (C) {
745         // Check that nothing touches the dest of the "copy" between
746         // the call and the store.
747         MemoryLocation StoreLoc = MemoryLocation::get(SI);
748         if (accessedBetween(*AA, StoreLoc, MSSA->getMemoryAccess(C),
749                             MSSA->getMemoryAccess(SI)))
750           C = nullptr;
751       }
752 
753       if (C) {
754         bool changed = performCallSlotOptzn(
755             LI, SI, SI->getPointerOperand()->stripPointerCasts(),
756             LI->getPointerOperand()->stripPointerCasts(),
757             DL.getTypeStoreSize(SI->getOperand(0)->getType()),
758             commonAlignment(SI->getAlign(), LI->getAlign()), C);
759         if (changed) {
760           eraseInstruction(SI);
761           eraseInstruction(LI);
762           ++NumMemCpyInstr;
763           return true;
764         }
765       }
766     }
767   }
768 
769   // The following code creates memset intrinsics out of thin air. Don't do
770   // this if the corresponding libfunc is not available.
771   // TODO: We should really distinguish between libcall availability and
772   // our ability to introduce intrinsics.
773   if (!(TLI->has(LibFunc_memset) || EnableMemCpyOptWithoutLibcalls))
774     return false;
775 
776   // There are two cases that are interesting for this code to handle: memcpy
777   // and memset.  Right now we only handle memset.
778 
779   // Ensure that the value being stored is something that can be memset'able a
780   // byte at a time like "0" or "-1" or any width, as well as things like
781   // 0xA0A0A0A0 and 0.0.
782   auto *V = SI->getOperand(0);
783   if (Value *ByteVal = isBytewiseValue(V, DL)) {
784     if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
785                                               ByteVal)) {
786       BBI = I->getIterator(); // Don't invalidate iterator.
787       return true;
788     }
789 
790     // If we have an aggregate, we try to promote it to memset regardless
791     // of opportunity for merging as it can expose optimization opportunities
792     // in subsequent passes.
793     auto *T = V->getType();
794     if (T->isAggregateType()) {
795       uint64_t Size = DL.getTypeStoreSize(T);
796       IRBuilder<> Builder(SI);
797       auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size,
798                                      SI->getAlign());
799 
800       LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n");
801 
802       // The newly inserted memset is immediately overwritten by the original
803       // store, so we do not need to rename uses.
804       auto *StoreDef = cast<MemoryDef>(MSSA->getMemoryAccess(SI));
805       auto *NewAccess = MSSAU->createMemoryAccessBefore(
806           M, StoreDef->getDefiningAccess(), StoreDef);
807       MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/false);
808 
809       eraseInstruction(SI);
810       NumMemSetInfer++;
811 
812       // Make sure we do not invalidate the iterator.
813       BBI = M->getIterator();
814       return true;
815     }
816   }
817 
818   return false;
819 }
820 
821 bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
822   // See if there is another memset or store neighboring this memset which
823   // allows us to widen out the memset to do a single larger store.
824   if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
825     if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
826                                               MSI->getValue())) {
827       BBI = I->getIterator(); // Don't invalidate iterator.
828       return true;
829     }
830   return false;
831 }
832 
833 /// Takes a memcpy and a call that it depends on,
834 /// and checks for the possibility of a call slot optimization by having
835 /// the call write its result directly into the destination of the memcpy.
836 bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
837                                          Instruction *cpyStore, Value *cpyDest,
838                                          Value *cpySrc, uint64_t cpyLen,
839                                          Align cpyAlign, CallInst *C) {
840   // The general transformation to keep in mind is
841   //
842   //   call @func(..., src, ...)
843   //   memcpy(dest, src, ...)
844   //
845   // ->
846   //
847   //   memcpy(dest, src, ...)
848   //   call @func(..., dest, ...)
849   //
850   // Since moving the memcpy is technically awkward, we additionally check that
851   // src only holds uninitialized values at the moment of the call, meaning that
852   // the memcpy can be discarded rather than moved.
853 
854   // Lifetime marks shouldn't be operated on.
855   if (Function *F = C->getCalledFunction())
856     if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start)
857       return false;
858 
859   // Require that src be an alloca.  This simplifies the reasoning considerably.
860   AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
861   if (!srcAlloca)
862     return false;
863 
864   ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
865   if (!srcArraySize)
866     return false;
867 
868   const DataLayout &DL = cpyLoad->getModule()->getDataLayout();
869   uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
870                      srcArraySize->getZExtValue();
871 
872   if (cpyLen < srcSize)
873     return false;
874 
875   // Check that accessing the first srcSize bytes of dest will not cause a
876   // trap.  Otherwise the transform is invalid since it might cause a trap
877   // to occur earlier than it otherwise would.
878   if (!isDereferenceableAndAlignedPointer(cpyDest, Align(1), APInt(64, cpyLen),
879                                           DL, C, DT))
880     return false;
881 
882   // Make sure that nothing can observe cpyDest being written early. There are
883   // a number of cases to consider:
884   //  1. cpyDest cannot be accessed between C and cpyStore as a precondition of
885   //     the transform.
886   //  2. C itself may not access cpyDest (prior to the transform). This is
887   //     checked further below.
888   //  3. If cpyDest is accessible to the caller of this function (potentially
889   //     captured and not based on an alloca), we need to ensure that we cannot
890   //     unwind between C and cpyStore. This is checked here.
891   //  4. If cpyDest is potentially captured, there may be accesses to it from
892   //     another thread. In this case, we need to check that cpyStore is
893   //     guaranteed to be executed if C is. As it is a non-atomic access, it
894   //     renders accesses from other threads undefined.
895   //     TODO: This is currently not checked.
896   if (mayBeVisibleThroughUnwinding(cpyDest, C, cpyStore))
897     return false;
898 
899   // Check that dest points to memory that is at least as aligned as src.
900   Align srcAlign = srcAlloca->getAlign();
901   bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
902   // If dest is not aligned enough and we can't increase its alignment then
903   // bail out.
904   if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
905     return false;
906 
907   // Check that src is not accessed except via the call and the memcpy.  This
908   // guarantees that it holds only undefined values when passed in (so the final
909   // memcpy can be dropped), that it is not read or written between the call and
910   // the memcpy, and that writing beyond the end of it is undefined.
911   SmallVector<User *, 8> srcUseList(srcAlloca->users());
912   while (!srcUseList.empty()) {
913     User *U = srcUseList.pop_back_val();
914 
915     if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
916       append_range(srcUseList, U->users());
917       continue;
918     }
919     if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
920       if (!G->hasAllZeroIndices())
921         return false;
922 
923       append_range(srcUseList, U->users());
924       continue;
925     }
926     if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
927       if (IT->isLifetimeStartOrEnd())
928         continue;
929 
930     if (U != C && U != cpyLoad)
931       return false;
932   }
933 
934   // Check that src isn't captured by the called function since the
935   // transformation can cause aliasing issues in that case.
936   for (unsigned ArgI = 0, E = C->arg_size(); ArgI != E; ++ArgI)
937     if (C->getArgOperand(ArgI) == cpySrc && !C->doesNotCapture(ArgI))
938       return false;
939 
940   // Since we're changing the parameter to the callsite, we need to make sure
941   // that what would be the new parameter dominates the callsite.
942   if (!DT->dominates(cpyDest, C)) {
943     // Support moving a constant index GEP before the call.
944     auto *GEP = dyn_cast<GetElementPtrInst>(cpyDest);
945     if (GEP && GEP->hasAllConstantIndices() &&
946         DT->dominates(GEP->getPointerOperand(), C))
947       GEP->moveBefore(C);
948     else
949       return false;
950   }
951 
952   // In addition to knowing that the call does not access src in some
953   // unexpected manner, for example via a global, which we deduce from
954   // the use analysis, we also need to know that it does not sneakily
955   // access dest.  We rely on AA to figure this out for us.
956   ModRefInfo MR = AA->getModRefInfo(C, cpyDest, LocationSize::precise(srcSize));
957   // If necessary, perform additional analysis.
958   if (isModOrRefSet(MR))
959     MR = AA->callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), DT);
960   if (isModOrRefSet(MR))
961     return false;
962 
963   // We can't create address space casts here because we don't know if they're
964   // safe for the target.
965   if (cpySrc->getType()->getPointerAddressSpace() !=
966       cpyDest->getType()->getPointerAddressSpace())
967     return false;
968   for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI)
969     if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc &&
970         cpySrc->getType()->getPointerAddressSpace() !=
971             C->getArgOperand(ArgI)->getType()->getPointerAddressSpace())
972       return false;
973 
974   // All the checks have passed, so do the transformation.
975   bool changedArgument = false;
976   for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI)
977     if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc) {
978       Value *Dest = cpySrc->getType() == cpyDest->getType() ?  cpyDest
979         : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
980                                       cpyDest->getName(), C);
981       changedArgument = true;
982       if (C->getArgOperand(ArgI)->getType() == Dest->getType())
983         C->setArgOperand(ArgI, Dest);
984       else
985         C->setArgOperand(ArgI, CastInst::CreatePointerCast(
986                                    Dest, C->getArgOperand(ArgI)->getType(),
987                                    Dest->getName(), C));
988     }
989 
990   if (!changedArgument)
991     return false;
992 
993   // If the destination wasn't sufficiently aligned then increase its alignment.
994   if (!isDestSufficientlyAligned) {
995     assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
996     cast<AllocaInst>(cpyDest)->setAlignment(srcAlign);
997   }
998 
999   // Update AA metadata
1000   // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
1001   // handled here, but combineMetadata doesn't support them yet
1002   unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
1003                          LLVMContext::MD_noalias,
1004                          LLVMContext::MD_invariant_group,
1005                          LLVMContext::MD_access_group};
1006   combineMetadata(C, cpyLoad, KnownIDs, true);
1007 
1008   ++NumCallSlot;
1009   return true;
1010 }
1011 
1012 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is
1013 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can.
1014 bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
1015                                                   MemCpyInst *MDep) {
1016   // We can only transforms memcpy's where the dest of one is the source of the
1017   // other.
1018   if (M->getSource() != MDep->getDest() || MDep->isVolatile())
1019     return false;
1020 
1021   // If dep instruction is reading from our current input, then it is a noop
1022   // transfer and substituting the input won't change this instruction.  Just
1023   // ignore the input and let someone else zap MDep.  This handles cases like:
1024   //    memcpy(a <- a)
1025   //    memcpy(b <- a)
1026   if (M->getSource() == MDep->getSource())
1027     return false;
1028 
1029   // Second, the length of the memcpy's must be the same, or the preceding one
1030   // must be larger than the following one.
1031   if (MDep->getLength() != M->getLength()) {
1032     ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
1033     ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
1034     if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
1035       return false;
1036   }
1037 
1038   // Verify that the copied-from memory doesn't change in between the two
1039   // transfers.  For example, in:
1040   //    memcpy(a <- b)
1041   //    *b = 42;
1042   //    memcpy(c <- a)
1043   // It would be invalid to transform the second memcpy into memcpy(c <- b).
1044   //
1045   // TODO: If the code between M and MDep is transparent to the destination "c",
1046   // then we could still perform the xform by moving M up to the first memcpy.
1047   // TODO: It would be sufficient to check the MDep source up to the memcpy
1048   // size of M, rather than MDep.
1049   if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep),
1050                      MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(M)))
1051     return false;
1052 
1053   // If the dest of the second might alias the source of the first, then the
1054   // source and dest might overlap. In addition, if the source of the first
1055   // points to constant memory, they won't overlap by definition. Otherwise, we
1056   // still want to eliminate the intermediate value, but we have to generate a
1057   // memmove instead of memcpy.
1058   bool UseMemMove = false;
1059   if (isModSet(AA->getModRefInfo(M, MemoryLocation::getForSource(MDep))))
1060     UseMemMove = true;
1061 
1062   // If all checks passed, then we can transform M.
1063   LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"
1064                     << *MDep << '\n' << *M << '\n');
1065 
1066   // TODO: Is this worth it if we're creating a less aligned memcpy? For
1067   // example we could be moving from movaps -> movq on x86.
1068   IRBuilder<> Builder(M);
1069   Instruction *NewM;
1070   if (UseMemMove)
1071     NewM = Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(),
1072                                  MDep->getRawSource(), MDep->getSourceAlign(),
1073                                  M->getLength(), M->isVolatile());
1074   else if (isa<MemCpyInlineInst>(M)) {
1075     // llvm.memcpy may be promoted to llvm.memcpy.inline, but the converse is
1076     // never allowed since that would allow the latter to be lowered as a call
1077     // to an external function.
1078     NewM = Builder.CreateMemCpyInline(
1079         M->getRawDest(), M->getDestAlign(), MDep->getRawSource(),
1080         MDep->getSourceAlign(), M->getLength(), M->isVolatile());
1081   } else
1082     NewM = Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(),
1083                                 MDep->getRawSource(), MDep->getSourceAlign(),
1084                                 M->getLength(), M->isVolatile());
1085 
1086   assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)));
1087   auto *LastDef = cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M));
1088   auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef);
1089   MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1090 
1091   // Remove the instruction we're replacing.
1092   eraseInstruction(M);
1093   ++NumMemCpyInstr;
1094   return true;
1095 }
1096 
1097 /// We've found that the (upward scanning) memory dependence of \p MemCpy is
1098 /// \p MemSet.  Try to simplify \p MemSet to only set the trailing bytes that
1099 /// weren't copied over by \p MemCpy.
1100 ///
1101 /// In other words, transform:
1102 /// \code
1103 ///   memset(dst, c, dst_size);
1104 ///   memcpy(dst, src, src_size);
1105 /// \endcode
1106 /// into:
1107 /// \code
1108 ///   memcpy(dst, src, src_size);
1109 ///   memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size);
1110 /// \endcode
1111 bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
1112                                                   MemSetInst *MemSet) {
1113   // We can only transform memset/memcpy with the same destination.
1114   if (!AA->isMustAlias(MemSet->getDest(), MemCpy->getDest()))
1115     return false;
1116 
1117   // Check that src and dst of the memcpy aren't the same. While memcpy
1118   // operands cannot partially overlap, exact equality is allowed.
1119   if (isModSet(AA->getModRefInfo(MemCpy, MemoryLocation::getForSource(MemCpy))))
1120     return false;
1121 
1122   // We know that dst up to src_size is not written. We now need to make sure
1123   // that dst up to dst_size is not accessed. (If we did not move the memset,
1124   // checking for reads would be sufficient.)
1125   if (accessedBetween(*AA, MemoryLocation::getForDest(MemSet),
1126                       MSSA->getMemoryAccess(MemSet),
1127                       MSSA->getMemoryAccess(MemCpy)))
1128     return false;
1129 
1130   // Use the same i8* dest as the memcpy, killing the memset dest if different.
1131   Value *Dest = MemCpy->getRawDest();
1132   Value *DestSize = MemSet->getLength();
1133   Value *SrcSize = MemCpy->getLength();
1134 
1135   if (mayBeVisibleThroughUnwinding(Dest, MemSet, MemCpy))
1136     return false;
1137 
1138   // If the sizes are the same, simply drop the memset instead of generating
1139   // a replacement with zero size.
1140   if (DestSize == SrcSize) {
1141     eraseInstruction(MemSet);
1142     return true;
1143   }
1144 
1145   // By default, create an unaligned memset.
1146   unsigned Align = 1;
1147   // If Dest is aligned, and SrcSize is constant, use the minimum alignment
1148   // of the sum.
1149   const unsigned DestAlign =
1150       std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment());
1151   if (DestAlign > 1)
1152     if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
1153       Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
1154 
1155   IRBuilder<> Builder(MemCpy);
1156 
1157   // If the sizes have different types, zext the smaller one.
1158   if (DestSize->getType() != SrcSize->getType()) {
1159     if (DestSize->getType()->getIntegerBitWidth() >
1160         SrcSize->getType()->getIntegerBitWidth())
1161       SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType());
1162     else
1163       DestSize = Builder.CreateZExt(DestSize, SrcSize->getType());
1164   }
1165 
1166   Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize);
1167   Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize);
1168   Value *MemsetLen = Builder.CreateSelect(
1169       Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff);
1170   unsigned DestAS = Dest->getType()->getPointerAddressSpace();
1171   Instruction *NewMemSet = Builder.CreateMemSet(
1172       Builder.CreateGEP(Builder.getInt8Ty(),
1173                         Builder.CreatePointerCast(Dest,
1174                                                   Builder.getInt8PtrTy(DestAS)),
1175                         SrcSize),
1176       MemSet->getOperand(1), MemsetLen, MaybeAlign(Align));
1177 
1178   assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) &&
1179          "MemCpy must be a MemoryDef");
1180   // The new memset is inserted after the memcpy, but it is known that its
1181   // defining access is the memset about to be removed which immediately
1182   // precedes the memcpy.
1183   auto *LastDef =
1184       cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy));
1185   auto *NewAccess = MSSAU->createMemoryAccessBefore(
1186       NewMemSet, LastDef->getDefiningAccess(), LastDef);
1187   MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1188 
1189   eraseInstruction(MemSet);
1190   return true;
1191 }
1192 
1193 /// Determine whether the instruction has undefined content for the given Size,
1194 /// either because it was freshly alloca'd or started its lifetime.
1195 static bool hasUndefContents(MemorySSA *MSSA, AliasAnalysis *AA, Value *V,
1196                              MemoryDef *Def, Value *Size) {
1197   if (MSSA->isLiveOnEntryDef(Def))
1198     return isa<AllocaInst>(getUnderlyingObject(V));
1199 
1200   if (IntrinsicInst *II =
1201           dyn_cast_or_null<IntrinsicInst>(Def->getMemoryInst())) {
1202     if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
1203       ConstantInt *LTSize = cast<ConstantInt>(II->getArgOperand(0));
1204 
1205       if (ConstantInt *CSize = dyn_cast<ConstantInt>(Size)) {
1206         if (AA->isMustAlias(V, II->getArgOperand(1)) &&
1207             LTSize->getZExtValue() >= CSize->getZExtValue())
1208           return true;
1209       }
1210 
1211       // If the lifetime.start covers a whole alloca (as it almost always
1212       // does) and we're querying a pointer based on that alloca, then we know
1213       // the memory is definitely undef, regardless of how exactly we alias.
1214       // The size also doesn't matter, as an out-of-bounds access would be UB.
1215       AllocaInst *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(V));
1216       if (getUnderlyingObject(II->getArgOperand(1)) == Alloca) {
1217         const DataLayout &DL = Alloca->getModule()->getDataLayout();
1218         if (Optional<TypeSize> AllocaSize = Alloca->getAllocationSizeInBits(DL))
1219           if (*AllocaSize == LTSize->getValue() * 8)
1220             return true;
1221       }
1222     }
1223   }
1224 
1225   return false;
1226 }
1227 
1228 /// Transform memcpy to memset when its source was just memset.
1229 /// In other words, turn:
1230 /// \code
1231 ///   memset(dst1, c, dst1_size);
1232 ///   memcpy(dst2, dst1, dst2_size);
1233 /// \endcode
1234 /// into:
1235 /// \code
1236 ///   memset(dst1, c, dst1_size);
1237 ///   memset(dst2, c, dst2_size);
1238 /// \endcode
1239 /// When dst2_size <= dst1_size.
1240 bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
1241                                                MemSetInst *MemSet) {
1242   // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and
1243   // memcpying from the same address. Otherwise it is hard to reason about.
1244   if (!AA->isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource()))
1245     return false;
1246 
1247   Value *MemSetSize = MemSet->getLength();
1248   Value *CopySize = MemCpy->getLength();
1249 
1250   if (MemSetSize != CopySize) {
1251     // Make sure the memcpy doesn't read any more than what the memset wrote.
1252     // Don't worry about sizes larger than i64.
1253 
1254     // A known memset size is required.
1255     ConstantInt *CMemSetSize = dyn_cast<ConstantInt>(MemSetSize);
1256     if (!CMemSetSize)
1257       return false;
1258 
1259     // A known memcpy size is also required.
1260     ConstantInt *CCopySize = dyn_cast<ConstantInt>(CopySize);
1261     if (!CCopySize)
1262       return false;
1263     if (CCopySize->getZExtValue() > CMemSetSize->getZExtValue()) {
1264       // If the memcpy is larger than the memset, but the memory was undef prior
1265       // to the memset, we can just ignore the tail. Technically we're only
1266       // interested in the bytes from MemSetSize..CopySize here, but as we can't
1267       // easily represent this location, we use the full 0..CopySize range.
1268       MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy);
1269       bool CanReduceSize = false;
1270       MemoryUseOrDef *MemSetAccess = MSSA->getMemoryAccess(MemSet);
1271       MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess(
1272           MemSetAccess->getDefiningAccess(), MemCpyLoc);
1273       if (auto *MD = dyn_cast<MemoryDef>(Clobber))
1274         if (hasUndefContents(MSSA, AA, MemCpy->getSource(), MD, CopySize))
1275           CanReduceSize = true;
1276 
1277       if (!CanReduceSize)
1278         return false;
1279       CopySize = MemSetSize;
1280     }
1281   }
1282 
1283   IRBuilder<> Builder(MemCpy);
1284   Instruction *NewM =
1285       Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1),
1286                            CopySize, MaybeAlign(MemCpy->getDestAlignment()));
1287   auto *LastDef =
1288       cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy));
1289   auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef);
1290   MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1291 
1292   return true;
1293 }
1294 
1295 /// Perform simplification of memcpy's.  If we have memcpy A
1296 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
1297 /// B to be a memcpy from X to Z (or potentially a memmove, depending on
1298 /// circumstances). This allows later passes to remove the first memcpy
1299 /// altogether.
1300 bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) {
1301   // We can only optimize non-volatile memcpy's.
1302   if (M->isVolatile()) return false;
1303 
1304   // If the source and destination of the memcpy are the same, then zap it.
1305   if (M->getSource() == M->getDest()) {
1306     ++BBI;
1307     eraseInstruction(M);
1308     return true;
1309   }
1310 
1311   // If copying from a constant, try to turn the memcpy into a memset.
1312   if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
1313     if (GV->isConstant() && GV->hasDefinitiveInitializer())
1314       if (Value *ByteVal = isBytewiseValue(GV->getInitializer(),
1315                                            M->getModule()->getDataLayout())) {
1316         IRBuilder<> Builder(M);
1317         Instruction *NewM =
1318             Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
1319                                  MaybeAlign(M->getDestAlignment()), false);
1320         auto *LastDef =
1321             cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M));
1322         auto *NewAccess =
1323             MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef);
1324         MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1325 
1326         eraseInstruction(M);
1327         ++NumCpyToSet;
1328         return true;
1329       }
1330 
1331   MemoryUseOrDef *MA = MSSA->getMemoryAccess(M);
1332   MemoryAccess *AnyClobber = MSSA->getWalker()->getClobberingMemoryAccess(MA);
1333   MemoryLocation DestLoc = MemoryLocation::getForDest(M);
1334   const MemoryAccess *DestClobber =
1335       MSSA->getWalker()->getClobberingMemoryAccess(AnyClobber, DestLoc);
1336 
1337   // Try to turn a partially redundant memset + memcpy into
1338   // memcpy + smaller memset.  We don't need the memcpy size for this.
1339   // The memcpy most post-dom the memset, so limit this to the same basic
1340   // block. A non-local generalization is likely not worthwhile.
1341   if (auto *MD = dyn_cast<MemoryDef>(DestClobber))
1342     if (auto *MDep = dyn_cast_or_null<MemSetInst>(MD->getMemoryInst()))
1343       if (DestClobber->getBlock() == M->getParent())
1344         if (processMemSetMemCpyDependence(M, MDep))
1345           return true;
1346 
1347   MemoryAccess *SrcClobber = MSSA->getWalker()->getClobberingMemoryAccess(
1348       AnyClobber, MemoryLocation::getForSource(M));
1349 
1350   // There are four possible optimizations we can do for memcpy:
1351   //   a) memcpy-memcpy xform which exposes redundance for DSE.
1352   //   b) call-memcpy xform for return slot optimization.
1353   //   c) memcpy from freshly alloca'd space or space that has just started
1354   //      its lifetime copies undefined data, and we can therefore eliminate
1355   //      the memcpy in favor of the data that was already at the destination.
1356   //   d) memcpy from a just-memset'd source can be turned into memset.
1357   if (auto *MD = dyn_cast<MemoryDef>(SrcClobber)) {
1358     if (Instruction *MI = MD->getMemoryInst()) {
1359       if (ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength())) {
1360         if (auto *C = dyn_cast<CallInst>(MI)) {
1361           // The memcpy must post-dom the call. Limit to the same block for
1362           // now. Additionally, we need to ensure that there are no accesses
1363           // to dest between the call and the memcpy. Accesses to src will be
1364           // checked by performCallSlotOptzn().
1365           // TODO: Support non-local call-slot optimization?
1366           if (C->getParent() == M->getParent() &&
1367               !accessedBetween(*AA, DestLoc, MD, MA)) {
1368             // FIXME: Can we pass in either of dest/src alignment here instead
1369             // of conservatively taking the minimum?
1370             Align Alignment = std::min(M->getDestAlign().valueOrOne(),
1371                                        M->getSourceAlign().valueOrOne());
1372             if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(),
1373                                      CopySize->getZExtValue(), Alignment, C)) {
1374               LLVM_DEBUG(dbgs() << "Performed call slot optimization:\n"
1375                                 << "    call: " << *C << "\n"
1376                                 << "    memcpy: " << *M << "\n");
1377               eraseInstruction(M);
1378               ++NumMemCpyInstr;
1379               return true;
1380             }
1381           }
1382         }
1383       }
1384       if (auto *MDep = dyn_cast<MemCpyInst>(MI))
1385         return processMemCpyMemCpyDependence(M, MDep);
1386       if (auto *MDep = dyn_cast<MemSetInst>(MI)) {
1387         if (performMemCpyToMemSetOptzn(M, MDep)) {
1388           LLVM_DEBUG(dbgs() << "Converted memcpy to memset\n");
1389           eraseInstruction(M);
1390           ++NumCpyToSet;
1391           return true;
1392         }
1393       }
1394     }
1395 
1396     if (hasUndefContents(MSSA, AA, M->getSource(), MD, M->getLength())) {
1397       LLVM_DEBUG(dbgs() << "Removed memcpy from undef\n");
1398       eraseInstruction(M);
1399       ++NumMemCpyInstr;
1400       return true;
1401     }
1402   }
1403 
1404   return false;
1405 }
1406 
1407 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed
1408 /// not to alias.
1409 bool MemCpyOptPass::processMemMove(MemMoveInst *M) {
1410   // See if the source could be modified by this memmove potentially.
1411   if (isModSet(AA->getModRefInfo(M, MemoryLocation::getForSource(M))))
1412     return false;
1413 
1414   LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M
1415                     << "\n");
1416 
1417   // If not, then we know we can transform this.
1418   Type *ArgTys[3] = { M->getRawDest()->getType(),
1419                       M->getRawSource()->getType(),
1420                       M->getLength()->getType() };
1421   M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(),
1422                                                  Intrinsic::memcpy, ArgTys));
1423 
1424   // For MemorySSA nothing really changes (except that memcpy may imply stricter
1425   // aliasing guarantees).
1426 
1427   ++NumMoveToCpy;
1428   return true;
1429 }
1430 
1431 /// This is called on every byval argument in call sites.
1432 bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) {
1433   const DataLayout &DL = CB.getCaller()->getParent()->getDataLayout();
1434   // Find out what feeds this byval argument.
1435   Value *ByValArg = CB.getArgOperand(ArgNo);
1436   Type *ByValTy = CB.getParamByValType(ArgNo);
1437   uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
1438   MemoryLocation Loc(ByValArg, LocationSize::precise(ByValSize));
1439   MemoryUseOrDef *CallAccess = MSSA->getMemoryAccess(&CB);
1440   if (!CallAccess)
1441     return false;
1442   MemCpyInst *MDep = nullptr;
1443   MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess(
1444       CallAccess->getDefiningAccess(), Loc);
1445   if (auto *MD = dyn_cast<MemoryDef>(Clobber))
1446     MDep = dyn_cast_or_null<MemCpyInst>(MD->getMemoryInst());
1447 
1448   // If the byval argument isn't fed by a memcpy, ignore it.  If it is fed by
1449   // a memcpy, see if we can byval from the source of the memcpy instead of the
1450   // result.
1451   if (!MDep || MDep->isVolatile() ||
1452       ByValArg->stripPointerCasts() != MDep->getDest())
1453     return false;
1454 
1455   // The length of the memcpy must be larger or equal to the size of the byval.
1456   ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
1457   if (!C1 || C1->getValue().getZExtValue() < ByValSize)
1458     return false;
1459 
1460   // Get the alignment of the byval.  If the call doesn't specify the alignment,
1461   // then it is some target specific value that we can't know.
1462   MaybeAlign ByValAlign = CB.getParamAlign(ArgNo);
1463   if (!ByValAlign) return false;
1464 
1465   // If it is greater than the memcpy, then we check to see if we can force the
1466   // source of the memcpy to the alignment we need.  If we fail, we bail out.
1467   MaybeAlign MemDepAlign = MDep->getSourceAlign();
1468   if ((!MemDepAlign || *MemDepAlign < *ByValAlign) &&
1469       getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &CB, AC,
1470                                  DT) < *ByValAlign)
1471     return false;
1472 
1473   // The address space of the memcpy source must match the byval argument
1474   if (MDep->getSource()->getType()->getPointerAddressSpace() !=
1475       ByValArg->getType()->getPointerAddressSpace())
1476     return false;
1477 
1478   // Verify that the copied-from memory doesn't change in between the memcpy and
1479   // the byval call.
1480   //    memcpy(a <- b)
1481   //    *b = 42;
1482   //    foo(*a)
1483   // It would be invalid to transform the second memcpy into foo(*b).
1484   if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep),
1485                      MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(&CB)))
1486     return false;
1487 
1488   Value *TmpCast = MDep->getSource();
1489   if (MDep->getSource()->getType() != ByValArg->getType()) {
1490     BitCastInst *TmpBitCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
1491                                               "tmpcast", &CB);
1492     // Set the tmpcast's DebugLoc to MDep's
1493     TmpBitCast->setDebugLoc(MDep->getDebugLoc());
1494     TmpCast = TmpBitCast;
1495   }
1496 
1497   LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
1498                     << "  " << *MDep << "\n"
1499                     << "  " << CB << "\n");
1500 
1501   // Otherwise we're good!  Update the byval argument.
1502   CB.setArgOperand(ArgNo, TmpCast);
1503   ++NumMemCpyInstr;
1504   return true;
1505 }
1506 
1507 /// Executes one iteration of MemCpyOptPass.
1508 bool MemCpyOptPass::iterateOnFunction(Function &F) {
1509   bool MadeChange = false;
1510 
1511   // Walk all instruction in the function.
1512   for (BasicBlock &BB : F) {
1513     // Skip unreachable blocks. For example processStore assumes that an
1514     // instruction in a BB can't be dominated by a later instruction in the
1515     // same BB (which is a scenario that can happen for an unreachable BB that
1516     // has itself as a predecessor).
1517     if (!DT->isReachableFromEntry(&BB))
1518       continue;
1519 
1520     for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
1521         // Avoid invalidating the iterator.
1522       Instruction *I = &*BI++;
1523 
1524       bool RepeatInstruction = false;
1525 
1526       if (StoreInst *SI = dyn_cast<StoreInst>(I))
1527         MadeChange |= processStore(SI, BI);
1528       else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
1529         RepeatInstruction = processMemSet(M, BI);
1530       else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
1531         RepeatInstruction = processMemCpy(M, BI);
1532       else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
1533         RepeatInstruction = processMemMove(M);
1534       else if (auto *CB = dyn_cast<CallBase>(I)) {
1535         for (unsigned i = 0, e = CB->arg_size(); i != e; ++i)
1536           if (CB->isByValArgument(i))
1537             MadeChange |= processByValArgument(*CB, i);
1538       }
1539 
1540       // Reprocess the instruction if desired.
1541       if (RepeatInstruction) {
1542         if (BI != BB.begin())
1543           --BI;
1544         MadeChange = true;
1545       }
1546     }
1547   }
1548 
1549   return MadeChange;
1550 }
1551 
1552 PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) {
1553   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1554   auto *AA = &AM.getResult<AAManager>(F);
1555   auto *AC = &AM.getResult<AssumptionAnalysis>(F);
1556   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1557   auto *MSSA = &AM.getResult<MemorySSAAnalysis>(F);
1558 
1559   bool MadeChange = runImpl(F, &TLI, AA, AC, DT, &MSSA->getMSSA());
1560   if (!MadeChange)
1561     return PreservedAnalyses::all();
1562 
1563   PreservedAnalyses PA;
1564   PA.preserveSet<CFGAnalyses>();
1565   PA.preserve<MemorySSAAnalysis>();
1566   return PA;
1567 }
1568 
1569 bool MemCpyOptPass::runImpl(Function &F, TargetLibraryInfo *TLI_,
1570                             AliasAnalysis *AA_, AssumptionCache *AC_,
1571                             DominatorTree *DT_, MemorySSA *MSSA_) {
1572   bool MadeChange = false;
1573   TLI = TLI_;
1574   AA = AA_;
1575   AC = AC_;
1576   DT = DT_;
1577   MSSA = MSSA_;
1578   MemorySSAUpdater MSSAU_(MSSA_);
1579   MSSAU = &MSSAU_;
1580 
1581   while (true) {
1582     if (!iterateOnFunction(F))
1583       break;
1584     MadeChange = true;
1585   }
1586 
1587   if (VerifyMemorySSA)
1588     MSSA_->verifyMemorySSA();
1589 
1590   return MadeChange;
1591 }
1592 
1593 /// This is the main transformation entry point for a function.
1594 bool MemCpyOptLegacyPass::runOnFunction(Function &F) {
1595   if (skipFunction(F))
1596     return false;
1597 
1598   auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1599   auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1600   auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1601   auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1602   auto *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA();
1603 
1604   return Impl.runImpl(F, TLI, AA, AC, DT, MSSA);
1605 }
1606