1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs various transformations related to eliminating memcpy
10 // calls, or transforming sets of stores into memset's.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/Scalar/MemCpyOptimizer.h"
15 #include "llvm/ADT/DenseSet.h"
16 #include "llvm/ADT/None.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/ADT/iterator_range.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/GlobalsModRef.h"
24 #include "llvm/Analysis/Loads.h"
25 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
26 #include "llvm/Analysis/MemoryLocation.h"
27 #include "llvm/Analysis/MemorySSA.h"
28 #include "llvm/Analysis/MemorySSAUpdater.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/BasicBlock.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/GetElementPtrTypeIterator.h"
39 #include "llvm/IR/GlobalVariable.h"
40 #include "llvm/IR/IRBuilder.h"
41 #include "llvm/IR/InstrTypes.h"
42 #include "llvm/IR/Instruction.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/Intrinsics.h"
46 #include "llvm/IR/LLVMContext.h"
47 #include "llvm/IR/Module.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/PassManager.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/User.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/InitializePasses.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/MathExtras.h"
58 #include "llvm/Support/raw_ostream.h"
59 #include "llvm/Transforms/Scalar.h"
60 #include "llvm/Transforms/Utils/Local.h"
61 #include <algorithm>
62 #include <cassert>
63 #include <cstdint>
64 #include <utility>
65 
66 using namespace llvm;
67 
68 #define DEBUG_TYPE "memcpyopt"
69 
70 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
71 STATISTIC(NumMemSetInfer, "Number of memsets inferred");
72 STATISTIC(NumMoveToCpy,   "Number of memmoves converted to memcpy");
73 STATISTIC(NumCpyToSet,    "Number of memcpys converted to memset");
74 STATISTIC(NumCallSlot,    "Number of call slot optimizations performed");
75 
76 namespace {
77 
78 /// Represents a range of memset'd bytes with the ByteVal value.
79 /// This allows us to analyze stores like:
80 ///   store 0 -> P+1
81 ///   store 0 -> P+0
82 ///   store 0 -> P+3
83 ///   store 0 -> P+2
84 /// which sometimes happens with stores to arrays of structs etc.  When we see
85 /// the first store, we make a range [1, 2).  The second store extends the range
86 /// to [0, 2).  The third makes a new range [2, 3).  The fourth store joins the
87 /// two ranges into [0, 3) which is memset'able.
88 struct MemsetRange {
89   // Start/End - A semi range that describes the span that this range covers.
90   // The range is closed at the start and open at the end: [Start, End).
91   int64_t Start, End;
92 
93   /// StartPtr - The getelementptr instruction that points to the start of the
94   /// range.
95   Value *StartPtr;
96 
97   /// Alignment - The known alignment of the first store.
98   unsigned Alignment;
99 
100   /// TheStores - The actual stores that make up this range.
101   SmallVector<Instruction*, 16> TheStores;
102 
103   bool isProfitableToUseMemset(const DataLayout &DL) const;
104 };
105 
106 } // end anonymous namespace
107 
108 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
109   // If we found more than 4 stores to merge or 16 bytes, use memset.
110   if (TheStores.size() >= 4 || End-Start >= 16) return true;
111 
112   // If there is nothing to merge, don't do anything.
113   if (TheStores.size() < 2) return false;
114 
115   // If any of the stores are a memset, then it is always good to extend the
116   // memset.
117   for (Instruction *SI : TheStores)
118     if (!isa<StoreInst>(SI))
119       return true;
120 
121   // Assume that the code generator is capable of merging pairs of stores
122   // together if it wants to.
123   if (TheStores.size() == 2) return false;
124 
125   // If we have fewer than 8 stores, it can still be worthwhile to do this.
126   // For example, merging 4 i8 stores into an i32 store is useful almost always.
127   // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
128   // memset will be split into 2 32-bit stores anyway) and doing so can
129   // pessimize the llvm optimizer.
130   //
131   // Since we don't have perfect knowledge here, make some assumptions: assume
132   // the maximum GPR width is the same size as the largest legal integer
133   // size. If so, check to see whether we will end up actually reducing the
134   // number of stores used.
135   unsigned Bytes = unsigned(End-Start);
136   unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8;
137   if (MaxIntSize == 0)
138     MaxIntSize = 1;
139   unsigned NumPointerStores = Bytes / MaxIntSize;
140 
141   // Assume the remaining bytes if any are done a byte at a time.
142   unsigned NumByteStores = Bytes % MaxIntSize;
143 
144   // If we will reduce the # stores (according to this heuristic), do the
145   // transformation.  This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
146   // etc.
147   return TheStores.size() > NumPointerStores+NumByteStores;
148 }
149 
150 namespace {
151 
152 class MemsetRanges {
153   using range_iterator = SmallVectorImpl<MemsetRange>::iterator;
154 
155   /// A sorted list of the memset ranges.
156   SmallVector<MemsetRange, 8> Ranges;
157 
158   const DataLayout &DL;
159 
160 public:
161   MemsetRanges(const DataLayout &DL) : DL(DL) {}
162 
163   using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator;
164 
165   const_iterator begin() const { return Ranges.begin(); }
166   const_iterator end() const { return Ranges.end(); }
167   bool empty() const { return Ranges.empty(); }
168 
169   void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
170     if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
171       addStore(OffsetFromFirst, SI);
172     else
173       addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
174   }
175 
176   void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
177     int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
178 
179     addRange(OffsetFromFirst, StoreSize, SI->getPointerOperand(),
180              SI->getAlign().value(), SI);
181   }
182 
183   void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
184     int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
185     addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI);
186   }
187 
188   void addRange(int64_t Start, int64_t Size, Value *Ptr,
189                 unsigned Alignment, Instruction *Inst);
190 };
191 
192 } // end anonymous namespace
193 
194 /// Add a new store to the MemsetRanges data structure.  This adds a
195 /// new range for the specified store at the specified offset, merging into
196 /// existing ranges as appropriate.
197 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
198                             unsigned Alignment, Instruction *Inst) {
199   int64_t End = Start+Size;
200 
201   range_iterator I = partition_point(
202       Ranges, [=](const MemsetRange &O) { return O.End < Start; });
203 
204   // We now know that I == E, in which case we didn't find anything to merge
205   // with, or that Start <= I->End.  If End < I->Start or I == E, then we need
206   // to insert a new range.  Handle this now.
207   if (I == Ranges.end() || End < I->Start) {
208     MemsetRange &R = *Ranges.insert(I, MemsetRange());
209     R.Start        = Start;
210     R.End          = End;
211     R.StartPtr     = Ptr;
212     R.Alignment    = Alignment;
213     R.TheStores.push_back(Inst);
214     return;
215   }
216 
217   // This store overlaps with I, add it.
218   I->TheStores.push_back(Inst);
219 
220   // At this point, we may have an interval that completely contains our store.
221   // If so, just add it to the interval and return.
222   if (I->Start <= Start && I->End >= End)
223     return;
224 
225   // Now we know that Start <= I->End and End >= I->Start so the range overlaps
226   // but is not entirely contained within the range.
227 
228   // See if the range extends the start of the range.  In this case, it couldn't
229   // possibly cause it to join the prior range, because otherwise we would have
230   // stopped on *it*.
231   if (Start < I->Start) {
232     I->Start = Start;
233     I->StartPtr = Ptr;
234     I->Alignment = Alignment;
235   }
236 
237   // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
238   // is in or right at the end of I), and that End >= I->Start.  Extend I out to
239   // End.
240   if (End > I->End) {
241     I->End = End;
242     range_iterator NextI = I;
243     while (++NextI != Ranges.end() && End >= NextI->Start) {
244       // Merge the range in.
245       I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
246       if (NextI->End > I->End)
247         I->End = NextI->End;
248       Ranges.erase(NextI);
249       NextI = I;
250     }
251   }
252 }
253 
254 //===----------------------------------------------------------------------===//
255 //                         MemCpyOptLegacyPass Pass
256 //===----------------------------------------------------------------------===//
257 
258 namespace {
259 
260 class MemCpyOptLegacyPass : public FunctionPass {
261   MemCpyOptPass Impl;
262 
263 public:
264   static char ID; // Pass identification, replacement for typeid
265 
266   MemCpyOptLegacyPass() : FunctionPass(ID) {
267     initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry());
268   }
269 
270   bool runOnFunction(Function &F) override;
271 
272 private:
273   // This transformation requires dominator postdominator info
274   void getAnalysisUsage(AnalysisUsage &AU) const override {
275     AU.setPreservesCFG();
276     AU.addRequired<AssumptionCacheTracker>();
277     AU.addRequired<DominatorTreeWrapperPass>();
278     AU.addPreserved<DominatorTreeWrapperPass>();
279     AU.addPreserved<GlobalsAAWrapperPass>();
280     AU.addRequired<TargetLibraryInfoWrapperPass>();
281     AU.addRequired<MemoryDependenceWrapperPass>();
282     AU.addPreserved<MemoryDependenceWrapperPass>();
283     AU.addRequired<AAResultsWrapperPass>();
284     AU.addPreserved<AAResultsWrapperPass>();
285     AU.addPreserved<MemorySSAWrapperPass>();
286   }
287 };
288 
289 } // end anonymous namespace
290 
291 char MemCpyOptLegacyPass::ID = 0;
292 
293 /// The public interface to this file...
294 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); }
295 
296 INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
297                       false, false)
298 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
299 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
300 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
301 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
302 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
303 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
304 INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
305                     false, false)
306 
307 void MemCpyOptPass::eraseInstruction(Instruction *I) {
308   if (MSSAU)
309     MSSAU->removeMemoryAccess(I);
310   MD->removeInstruction(I);
311   I->eraseFromParent();
312 }
313 
314 /// When scanning forward over instructions, we look for some other patterns to
315 /// fold away. In particular, this looks for stores to neighboring locations of
316 /// memory. If it sees enough consecutive ones, it attempts to merge them
317 /// together into a memcpy/memset.
318 Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
319                                                  Value *StartPtr,
320                                                  Value *ByteVal) {
321   const DataLayout &DL = StartInst->getModule()->getDataLayout();
322 
323   // Okay, so we now have a single store that can be splatable.  Scan to find
324   // all subsequent stores of the same value to offset from the same pointer.
325   // Join these together into ranges, so we can decide whether contiguous blocks
326   // are stored.
327   MemsetRanges Ranges(DL);
328 
329   BasicBlock::iterator BI(StartInst);
330 
331   // Keeps track of the last memory use or def before the insertion point for
332   // the new memset. The new MemoryDef for the inserted memsets will be inserted
333   // after MemInsertPoint. It points to either LastMemDef or to the last user
334   // before the insertion point of the memset, if there are any such users.
335   MemoryUseOrDef *MemInsertPoint = nullptr;
336   // Keeps track of the last MemoryDef between StartInst and the insertion point
337   // for the new memset. This will become the defining access of the inserted
338   // memsets.
339   MemoryDef *LastMemDef = nullptr;
340   for (++BI; !BI->isTerminator(); ++BI) {
341     if (MSSAU) {
342       auto *CurrentAcc = cast_or_null<MemoryUseOrDef>(
343           MSSAU->getMemorySSA()->getMemoryAccess(&*BI));
344       if (CurrentAcc) {
345         MemInsertPoint = CurrentAcc;
346         if (auto *CurrentDef = dyn_cast<MemoryDef>(CurrentAcc))
347           LastMemDef = CurrentDef;
348       }
349     }
350 
351     if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
352       // If the instruction is readnone, ignore it, otherwise bail out.  We
353       // don't even allow readonly here because we don't want something like:
354       // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
355       if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
356         break;
357       continue;
358     }
359 
360     if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
361       // If this is a store, see if we can merge it in.
362       if (!NextStore->isSimple()) break;
363 
364       Value *StoredVal = NextStore->getValueOperand();
365 
366       // Don't convert stores of non-integral pointer types to memsets (which
367       // stores integers).
368       if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType()))
369         break;
370 
371       // Check to see if this stored value is of the same byte-splattable value.
372       Value *StoredByte = isBytewiseValue(StoredVal, DL);
373       if (isa<UndefValue>(ByteVal) && StoredByte)
374         ByteVal = StoredByte;
375       if (ByteVal != StoredByte)
376         break;
377 
378       // Check to see if this store is to a constant offset from the start ptr.
379       Optional<int64_t> Offset =
380           isPointerOffset(StartPtr, NextStore->getPointerOperand(), DL);
381       if (!Offset)
382         break;
383 
384       Ranges.addStore(*Offset, NextStore);
385     } else {
386       MemSetInst *MSI = cast<MemSetInst>(BI);
387 
388       if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
389           !isa<ConstantInt>(MSI->getLength()))
390         break;
391 
392       // Check to see if this store is to a constant offset from the start ptr.
393       Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), DL);
394       if (!Offset)
395         break;
396 
397       Ranges.addMemSet(*Offset, MSI);
398     }
399   }
400 
401   // If we have no ranges, then we just had a single store with nothing that
402   // could be merged in.  This is a very common case of course.
403   if (Ranges.empty())
404     return nullptr;
405 
406   // If we had at least one store that could be merged in, add the starting
407   // store as well.  We try to avoid this unless there is at least something
408   // interesting as a small compile-time optimization.
409   Ranges.addInst(0, StartInst);
410 
411   // If we create any memsets, we put it right before the first instruction that
412   // isn't part of the memset block.  This ensure that the memset is dominated
413   // by any addressing instruction needed by the start of the block.
414   IRBuilder<> Builder(&*BI);
415 
416   // Now that we have full information about ranges, loop over the ranges and
417   // emit memset's for anything big enough to be worthwhile.
418   Instruction *AMemSet = nullptr;
419   for (const MemsetRange &Range : Ranges) {
420     if (Range.TheStores.size() == 1) continue;
421 
422     // If it is profitable to lower this range to memset, do so now.
423     if (!Range.isProfitableToUseMemset(DL))
424       continue;
425 
426     // Otherwise, we do want to transform this!  Create a new memset.
427     // Get the starting pointer of the block.
428     StartPtr = Range.StartPtr;
429 
430     AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End - Range.Start,
431                                    MaybeAlign(Range.Alignment));
432     LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI
433                                                    : Range.TheStores) dbgs()
434                                               << *SI << '\n';
435                dbgs() << "With: " << *AMemSet << '\n');
436     if (!Range.TheStores.empty())
437       AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
438 
439     if (MSSAU) {
440       assert(LastMemDef && MemInsertPoint &&
441              "Both LastMemDef and MemInsertPoint need to be set");
442       auto *NewDef =
443           cast<MemoryDef>(MemInsertPoint->getMemoryInst() == &*BI
444                               ? MSSAU->createMemoryAccessBefore(
445                                     AMemSet, LastMemDef, MemInsertPoint)
446                               : MSSAU->createMemoryAccessAfter(
447                                     AMemSet, LastMemDef, MemInsertPoint));
448       MSSAU->insertDef(NewDef, /*RenameUses=*/true);
449       LastMemDef = NewDef;
450       MemInsertPoint = NewDef;
451     }
452 
453     // Zap all the stores.
454     for (Instruction *SI : Range.TheStores)
455       eraseInstruction(SI);
456 
457     ++NumMemSetInfer;
458   }
459 
460   return AMemSet;
461 }
462 
463 // This method try to lift a store instruction before position P.
464 // It will lift the store and its argument + that anything that
465 // may alias with these.
466 // The method returns true if it was successful.
467 bool MemCpyOptPass::moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI) {
468   // If the store alias this position, early bail out.
469   MemoryLocation StoreLoc = MemoryLocation::get(SI);
470   if (isModOrRefSet(AA->getModRefInfo(P, StoreLoc)))
471     return false;
472 
473   // Keep track of the arguments of all instruction we plan to lift
474   // so we can make sure to lift them as well if appropriate.
475   DenseSet<Instruction*> Args;
476   if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand()))
477     if (Ptr->getParent() == SI->getParent())
478       Args.insert(Ptr);
479 
480   // Instruction to lift before P.
481   SmallVector<Instruction*, 8> ToLift;
482 
483   // Memory locations of lifted instructions.
484   SmallVector<MemoryLocation, 8> MemLocs{StoreLoc};
485 
486   // Lifted calls.
487   SmallVector<const CallBase *, 8> Calls;
488 
489   const MemoryLocation LoadLoc = MemoryLocation::get(LI);
490 
491   for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) {
492     auto *C = &*I;
493 
494     bool MayAlias = isModOrRefSet(AA->getModRefInfo(C, None));
495 
496     bool NeedLift = false;
497     if (Args.erase(C))
498       NeedLift = true;
499     else if (MayAlias) {
500       NeedLift = llvm::any_of(MemLocs, [C, this](const MemoryLocation &ML) {
501         return isModOrRefSet(AA->getModRefInfo(C, ML));
502       });
503 
504       if (!NeedLift)
505         NeedLift = llvm::any_of(Calls, [C, this](const CallBase *Call) {
506           return isModOrRefSet(AA->getModRefInfo(C, Call));
507         });
508     }
509 
510     if (!NeedLift)
511       continue;
512 
513     if (MayAlias) {
514       // Since LI is implicitly moved downwards past the lifted instructions,
515       // none of them may modify its source.
516       if (isModSet(AA->getModRefInfo(C, LoadLoc)))
517         return false;
518       else if (const auto *Call = dyn_cast<CallBase>(C)) {
519         // If we can't lift this before P, it's game over.
520         if (isModOrRefSet(AA->getModRefInfo(P, Call)))
521           return false;
522 
523         Calls.push_back(Call);
524       } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) {
525         // If we can't lift this before P, it's game over.
526         auto ML = MemoryLocation::get(C);
527         if (isModOrRefSet(AA->getModRefInfo(P, ML)))
528           return false;
529 
530         MemLocs.push_back(ML);
531       } else
532         // We don't know how to lift this instruction.
533         return false;
534     }
535 
536     ToLift.push_back(C);
537     for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k)
538       if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) {
539         if (A->getParent() == SI->getParent()) {
540           // Cannot hoist user of P above P
541           if(A == P) return false;
542           Args.insert(A);
543         }
544       }
545   }
546 
547   // We made it, we need to lift
548   for (auto *I : llvm::reverse(ToLift)) {
549     LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n");
550     I->moveBefore(P);
551   }
552 
553   return true;
554 }
555 
556 bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
557   if (!SI->isSimple()) return false;
558 
559   // Avoid merging nontemporal stores since the resulting
560   // memcpy/memset would not be able to preserve the nontemporal hint.
561   // In theory we could teach how to propagate the !nontemporal metadata to
562   // memset calls. However, that change would force the backend to
563   // conservatively expand !nontemporal memset calls back to sequences of
564   // store instructions (effectively undoing the merging).
565   if (SI->getMetadata(LLVMContext::MD_nontemporal))
566     return false;
567 
568   const DataLayout &DL = SI->getModule()->getDataLayout();
569 
570   Value *StoredVal = SI->getValueOperand();
571 
572   // Not all the transforms below are correct for non-integral pointers, bail
573   // until we've audited the individual pieces.
574   if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType()))
575     return false;
576 
577   // Load to store forwarding can be interpreted as memcpy.
578   if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
579     if (LI->isSimple() && LI->hasOneUse() &&
580         LI->getParent() == SI->getParent()) {
581 
582       auto *T = LI->getType();
583       if (T->isAggregateType()) {
584         MemoryLocation LoadLoc = MemoryLocation::get(LI);
585 
586         // We use alias analysis to check if an instruction may store to
587         // the memory we load from in between the load and the store. If
588         // such an instruction is found, we try to promote there instead
589         // of at the store position.
590         Instruction *P = SI;
591         for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) {
592           if (isModSet(AA->getModRefInfo(&I, LoadLoc))) {
593             P = &I;
594             break;
595           }
596         }
597 
598         // We found an instruction that may write to the loaded memory.
599         // We can try to promote at this position instead of the store
600         // position if nothing alias the store memory after this and the store
601         // destination is not in the range.
602         if (P && P != SI) {
603           if (!moveUp(SI, P, LI))
604             P = nullptr;
605         }
606 
607         // If a valid insertion position is found, then we can promote
608         // the load/store pair to a memcpy.
609         if (P) {
610           // If we load from memory that may alias the memory we store to,
611           // memmove must be used to preserve semantic. If not, memcpy can
612           // be used.
613           bool UseMemMove = false;
614           if (!AA->isNoAlias(MemoryLocation::get(SI), LoadLoc))
615             UseMemMove = true;
616 
617           uint64_t Size = DL.getTypeStoreSize(T);
618 
619           IRBuilder<> Builder(P);
620           Instruction *M;
621           if (UseMemMove)
622             M = Builder.CreateMemMove(
623                 SI->getPointerOperand(), SI->getAlign(),
624                 LI->getPointerOperand(), LI->getAlign(), Size);
625           else
626             M = Builder.CreateMemCpy(
627                 SI->getPointerOperand(), SI->getAlign(),
628                 LI->getPointerOperand(), LI->getAlign(), Size);
629 
630           LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => "
631                             << *M << "\n");
632 
633           if (MSSAU) {
634             assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(P)));
635             auto *LastDef =
636                 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(P));
637             auto *NewAccess =
638                 MSSAU->createMemoryAccessAfter(M, LastDef, LastDef);
639             MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
640           }
641 
642           eraseInstruction(SI);
643           eraseInstruction(LI);
644           ++NumMemCpyInstr;
645 
646           // Make sure we do not invalidate the iterator.
647           BBI = M->getIterator();
648           return true;
649         }
650       }
651 
652       // Detect cases where we're performing call slot forwarding, but
653       // happen to be using a load-store pair to implement it, rather than
654       // a memcpy.
655       MemDepResult ldep = MD->getDependency(LI);
656       CallInst *C = nullptr;
657       if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
658         C = dyn_cast<CallInst>(ldep.getInst());
659 
660       if (C) {
661         // Check that nothing touches the dest of the "copy" between
662         // the call and the store.
663         MemoryLocation StoreLoc = MemoryLocation::get(SI);
664         for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator();
665              I != E; --I) {
666           if (isModOrRefSet(AA->getModRefInfo(&*I, StoreLoc))) {
667             C = nullptr;
668             break;
669           }
670         }
671       }
672 
673       if (C) {
674         bool changed = performCallSlotOptzn(
675             LI, SI, SI->getPointerOperand()->stripPointerCasts(),
676             LI->getPointerOperand()->stripPointerCasts(),
677             DL.getTypeStoreSize(SI->getOperand(0)->getType()),
678             commonAlignment(SI->getAlign(), LI->getAlign()), C);
679         if (changed) {
680           eraseInstruction(SI);
681           eraseInstruction(LI);
682           ++NumMemCpyInstr;
683           return true;
684         }
685       }
686     }
687   }
688 
689   // There are two cases that are interesting for this code to handle: memcpy
690   // and memset.  Right now we only handle memset.
691 
692   // Ensure that the value being stored is something that can be memset'able a
693   // byte at a time like "0" or "-1" or any width, as well as things like
694   // 0xA0A0A0A0 and 0.0.
695   auto *V = SI->getOperand(0);
696   if (Value *ByteVal = isBytewiseValue(V, DL)) {
697     if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
698                                               ByteVal)) {
699       BBI = I->getIterator(); // Don't invalidate iterator.
700       return true;
701     }
702 
703     // If we have an aggregate, we try to promote it to memset regardless
704     // of opportunity for merging as it can expose optimization opportunities
705     // in subsequent passes.
706     auto *T = V->getType();
707     if (T->isAggregateType()) {
708       uint64_t Size = DL.getTypeStoreSize(T);
709       IRBuilder<> Builder(SI);
710       auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size,
711                                      SI->getAlign());
712 
713       LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n");
714 
715       if (MSSAU) {
716         assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)));
717         auto *LastDef =
718             cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI));
719         auto *NewAccess = MSSAU->createMemoryAccessAfter(M, LastDef, LastDef);
720         MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
721       }
722 
723       eraseInstruction(SI);
724       NumMemSetInfer++;
725 
726       // Make sure we do not invalidate the iterator.
727       BBI = M->getIterator();
728       return true;
729     }
730   }
731 
732   return false;
733 }
734 
735 bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
736   // See if there is another memset or store neighboring this memset which
737   // allows us to widen out the memset to do a single larger store.
738   if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
739     if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
740                                               MSI->getValue())) {
741       BBI = I->getIterator(); // Don't invalidate iterator.
742       return true;
743     }
744   return false;
745 }
746 
747 /// Takes a memcpy and a call that it depends on,
748 /// and checks for the possibility of a call slot optimization by having
749 /// the call write its result directly into the destination of the memcpy.
750 bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
751                                          Instruction *cpyStore, Value *cpyDest,
752                                          Value *cpySrc, uint64_t cpyLen,
753                                          Align cpyAlign, CallInst *C) {
754   // The general transformation to keep in mind is
755   //
756   //   call @func(..., src, ...)
757   //   memcpy(dest, src, ...)
758   //
759   // ->
760   //
761   //   memcpy(dest, src, ...)
762   //   call @func(..., dest, ...)
763   //
764   // Since moving the memcpy is technically awkward, we additionally check that
765   // src only holds uninitialized values at the moment of the call, meaning that
766   // the memcpy can be discarded rather than moved.
767 
768   // Lifetime marks shouldn't be operated on.
769   if (Function *F = C->getCalledFunction())
770     if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start)
771       return false;
772 
773   // Require that src be an alloca.  This simplifies the reasoning considerably.
774   AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
775   if (!srcAlloca)
776     return false;
777 
778   ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
779   if (!srcArraySize)
780     return false;
781 
782   const DataLayout &DL = cpyLoad->getModule()->getDataLayout();
783   uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
784                      srcArraySize->getZExtValue();
785 
786   if (cpyLen < srcSize)
787     return false;
788 
789   // Check that accessing the first srcSize bytes of dest will not cause a
790   // trap.  Otherwise the transform is invalid since it might cause a trap
791   // to occur earlier than it otherwise would.
792   if (!isDereferenceableAndAlignedPointer(cpyDest, Align(1), APInt(64, cpyLen),
793                                           DL, C, DT))
794     return false;
795 
796   // Make sure that nothing can observe cpyDest being written early. There are
797   // a number of cases to consider:
798   //  1. cpyDest cannot be accessed between C and cpyStore as a precondition of
799   //     the transform.
800   //  2. C itself may not access cpyDest (prior to the transform). This is
801   //     checked further below.
802   //  3. If cpyDest is accessible to the caller of this function (potentially
803   //     captured and not based on an alloca), we need to ensure that we cannot
804   //     unwind between C and cpyStore. This is checked here.
805   //  4. If cpyDest is potentially captured, there may be accesses to it from
806   //     another thread. In this case, we need to check that cpyStore is
807   //     guaranteed to be executed if C is. As it is a non-atomic access, it
808   //     renders accesses from other threads undefined.
809   //     TODO: This is currently not checked.
810   // TODO: Check underlying object, so we can look through GEPs.
811   if (!isa<AllocaInst>(cpyDest)) {
812     assert(C->getParent() == cpyStore->getParent() &&
813            "call and copy must be in the same block");
814     for (const Instruction &I : make_range(C->getIterator(),
815                                            cpyStore->getIterator())) {
816       if (I.mayThrow())
817         return false;
818     }
819   }
820 
821   // Check that dest points to memory that is at least as aligned as src.
822   Align srcAlign = srcAlloca->getAlign();
823   bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
824   // If dest is not aligned enough and we can't increase its alignment then
825   // bail out.
826   if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
827     return false;
828 
829   // Check that src is not accessed except via the call and the memcpy.  This
830   // guarantees that it holds only undefined values when passed in (so the final
831   // memcpy can be dropped), that it is not read or written between the call and
832   // the memcpy, and that writing beyond the end of it is undefined.
833   SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(),
834                                    srcAlloca->user_end());
835   while (!srcUseList.empty()) {
836     User *U = srcUseList.pop_back_val();
837 
838     if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
839       for (User *UU : U->users())
840         srcUseList.push_back(UU);
841       continue;
842     }
843     if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
844       if (!G->hasAllZeroIndices())
845         return false;
846 
847       for (User *UU : U->users())
848         srcUseList.push_back(UU);
849       continue;
850     }
851     if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
852       if (IT->isLifetimeStartOrEnd())
853         continue;
854 
855     if (U != C && U != cpyLoad)
856       return false;
857   }
858 
859   // Check that src isn't captured by the called function since the
860   // transformation can cause aliasing issues in that case.
861   for (unsigned ArgI = 0, E = C->arg_size(); ArgI != E; ++ArgI)
862     if (C->getArgOperand(ArgI) == cpySrc && !C->doesNotCapture(ArgI))
863       return false;
864 
865   // Since we're changing the parameter to the callsite, we need to make sure
866   // that what would be the new parameter dominates the callsite.
867   // TODO: Support moving instructions like GEPs upwards.
868   if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
869     if (!DT->dominates(cpyDestInst, C))
870       return false;
871 
872   // In addition to knowing that the call does not access src in some
873   // unexpected manner, for example via a global, which we deduce from
874   // the use analysis, we also need to know that it does not sneakily
875   // access dest.  We rely on AA to figure this out for us.
876   ModRefInfo MR = AA->getModRefInfo(C, cpyDest, LocationSize::precise(srcSize));
877   // If necessary, perform additional analysis.
878   if (isModOrRefSet(MR))
879     MR = AA->callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), DT);
880   if (isModOrRefSet(MR))
881     return false;
882 
883   // We can't create address space casts here because we don't know if they're
884   // safe for the target.
885   if (cpySrc->getType()->getPointerAddressSpace() !=
886       cpyDest->getType()->getPointerAddressSpace())
887     return false;
888   for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI)
889     if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc &&
890         cpySrc->getType()->getPointerAddressSpace() !=
891             C->getArgOperand(ArgI)->getType()->getPointerAddressSpace())
892       return false;
893 
894   // All the checks have passed, so do the transformation.
895   bool changedArgument = false;
896   for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI)
897     if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc) {
898       Value *Dest = cpySrc->getType() == cpyDest->getType() ?  cpyDest
899         : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
900                                       cpyDest->getName(), C);
901       changedArgument = true;
902       if (C->getArgOperand(ArgI)->getType() == Dest->getType())
903         C->setArgOperand(ArgI, Dest);
904       else
905         C->setArgOperand(ArgI, CastInst::CreatePointerCast(
906                                    Dest, C->getArgOperand(ArgI)->getType(),
907                                    Dest->getName(), C));
908     }
909 
910   if (!changedArgument)
911     return false;
912 
913   // If the destination wasn't sufficiently aligned then increase its alignment.
914   if (!isDestSufficientlyAligned) {
915     assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
916     cast<AllocaInst>(cpyDest)->setAlignment(srcAlign);
917   }
918 
919   // Drop any cached information about the call, because we may have changed
920   // its dependence information by changing its parameter.
921   MD->removeInstruction(C);
922 
923   // Update AA metadata
924   // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
925   // handled here, but combineMetadata doesn't support them yet
926   unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
927                          LLVMContext::MD_noalias,
928                          LLVMContext::MD_invariant_group,
929                          LLVMContext::MD_access_group};
930   combineMetadata(C, cpyLoad, KnownIDs, true);
931 
932   ++NumCallSlot;
933   return true;
934 }
935 
936 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is
937 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can.
938 bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
939                                                   MemCpyInst *MDep) {
940   // We can only transforms memcpy's where the dest of one is the source of the
941   // other.
942   if (M->getSource() != MDep->getDest() || MDep->isVolatile())
943     return false;
944 
945   // If dep instruction is reading from our current input, then it is a noop
946   // transfer and substituting the input won't change this instruction.  Just
947   // ignore the input and let someone else zap MDep.  This handles cases like:
948   //    memcpy(a <- a)
949   //    memcpy(b <- a)
950   if (M->getSource() == MDep->getSource())
951     return false;
952 
953   // Second, the length of the memcpy's must be the same, or the preceding one
954   // must be larger than the following one.
955   ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
956   ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
957   if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
958     return false;
959 
960   // Verify that the copied-from memory doesn't change in between the two
961   // transfers.  For example, in:
962   //    memcpy(a <- b)
963   //    *b = 42;
964   //    memcpy(c <- a)
965   // It would be invalid to transform the second memcpy into memcpy(c <- b).
966   //
967   // TODO: If the code between M and MDep is transparent to the destination "c",
968   // then we could still perform the xform by moving M up to the first memcpy.
969   //
970   // NOTE: This is conservative, it will stop on any read from the source loc,
971   // not just the defining memcpy.
972   MemDepResult SourceDep =
973       MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false,
974                                    M->getIterator(), M->getParent());
975   if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
976     return false;
977 
978   // If the dest of the second might alias the source of the first, then the
979   // source and dest might overlap.  We still want to eliminate the intermediate
980   // value, but we have to generate a memmove instead of memcpy.
981   bool UseMemMove = false;
982   if (!AA->isNoAlias(MemoryLocation::getForDest(M),
983                      MemoryLocation::getForSource(MDep)))
984     UseMemMove = true;
985 
986   // If all checks passed, then we can transform M.
987   LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"
988                     << *MDep << '\n' << *M << '\n');
989 
990   // TODO: Is this worth it if we're creating a less aligned memcpy? For
991   // example we could be moving from movaps -> movq on x86.
992   IRBuilder<> Builder(M);
993   Instruction *NewM;
994   if (UseMemMove)
995     NewM = Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(),
996                                  MDep->getRawSource(), MDep->getSourceAlign(),
997                                  M->getLength(), M->isVolatile());
998   else
999     NewM = Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(),
1000                                 MDep->getRawSource(), MDep->getSourceAlign(),
1001                                 M->getLength(), M->isVolatile());
1002 
1003   if (MSSAU) {
1004     assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)));
1005     auto *LastDef = cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M));
1006     auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef);
1007     MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1008   }
1009 
1010   // Remove the instruction we're replacing.
1011   eraseInstruction(M);
1012   ++NumMemCpyInstr;
1013   return true;
1014 }
1015 
1016 /// We've found that the (upward scanning) memory dependence of \p MemCpy is
1017 /// \p MemSet.  Try to simplify \p MemSet to only set the trailing bytes that
1018 /// weren't copied over by \p MemCpy.
1019 ///
1020 /// In other words, transform:
1021 /// \code
1022 ///   memset(dst, c, dst_size);
1023 ///   memcpy(dst, src, src_size);
1024 /// \endcode
1025 /// into:
1026 /// \code
1027 ///   memcpy(dst, src, src_size);
1028 ///   memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size);
1029 /// \endcode
1030 bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
1031                                                   MemSetInst *MemSet) {
1032   // We can only transform memset/memcpy with the same destination.
1033   if (MemSet->getDest() != MemCpy->getDest())
1034     return false;
1035 
1036   // Check that there are no other dependencies on the memset destination.
1037   MemDepResult DstDepInfo =
1038       MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false,
1039                                    MemCpy->getIterator(), MemCpy->getParent());
1040   if (DstDepInfo.getInst() != MemSet)
1041     return false;
1042 
1043   // Use the same i8* dest as the memcpy, killing the memset dest if different.
1044   Value *Dest = MemCpy->getRawDest();
1045   Value *DestSize = MemSet->getLength();
1046   Value *SrcSize = MemCpy->getLength();
1047 
1048   // By default, create an unaligned memset.
1049   unsigned Align = 1;
1050   // If Dest is aligned, and SrcSize is constant, use the minimum alignment
1051   // of the sum.
1052   const unsigned DestAlign =
1053       std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment());
1054   if (DestAlign > 1)
1055     if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
1056       Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
1057 
1058   IRBuilder<> Builder(MemCpy);
1059 
1060   // If the sizes have different types, zext the smaller one.
1061   if (DestSize->getType() != SrcSize->getType()) {
1062     if (DestSize->getType()->getIntegerBitWidth() >
1063         SrcSize->getType()->getIntegerBitWidth())
1064       SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType());
1065     else
1066       DestSize = Builder.CreateZExt(DestSize, SrcSize->getType());
1067   }
1068 
1069   Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize);
1070   Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize);
1071   Value *MemsetLen = Builder.CreateSelect(
1072       Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff);
1073   Instruction *NewMemSet = Builder.CreateMemSet(
1074       Builder.CreateGEP(Dest->getType()->getPointerElementType(), Dest,
1075                         SrcSize),
1076       MemSet->getOperand(1), MemsetLen, MaybeAlign(Align));
1077 
1078   if (MSSAU) {
1079     assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) &&
1080            "MemCpy must be a MemoryDef");
1081     // The new memset is inserted after the memcpy, but it is known that its
1082     // defining access is the memset about to be removed which immediately
1083     // precedes the memcpy.
1084     auto *LastDef =
1085         cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy));
1086     auto *NewAccess = MSSAU->createMemoryAccessBefore(
1087         NewMemSet, LastDef->getDefiningAccess(), LastDef);
1088     MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1089   }
1090 
1091   eraseInstruction(MemSet);
1092   return true;
1093 }
1094 
1095 /// Determine whether the instruction has undefined content for the given Size,
1096 /// either because it was freshly alloca'd or started its lifetime.
1097 static bool hasUndefContents(Instruction *I, ConstantInt *Size) {
1098   if (isa<AllocaInst>(I))
1099     return true;
1100 
1101   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1102     if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1103       if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0)))
1104         if (LTSize->getZExtValue() >= Size->getZExtValue())
1105           return true;
1106 
1107   return false;
1108 }
1109 
1110 /// Transform memcpy to memset when its source was just memset.
1111 /// In other words, turn:
1112 /// \code
1113 ///   memset(dst1, c, dst1_size);
1114 ///   memcpy(dst2, dst1, dst2_size);
1115 /// \endcode
1116 /// into:
1117 /// \code
1118 ///   memset(dst1, c, dst1_size);
1119 ///   memset(dst2, c, dst2_size);
1120 /// \endcode
1121 /// When dst2_size <= dst1_size.
1122 ///
1123 /// The \p MemCpy must have a Constant length.
1124 bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
1125                                                MemSetInst *MemSet) {
1126   // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and
1127   // memcpying from the same address. Otherwise it is hard to reason about.
1128   if (!AA->isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource()))
1129     return false;
1130 
1131   // A known memset size is required.
1132   ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength());
1133   if (!MemSetSize)
1134     return false;
1135 
1136   // Make sure the memcpy doesn't read any more than what the memset wrote.
1137   // Don't worry about sizes larger than i64.
1138   ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength());
1139   if (CopySize->getZExtValue() > MemSetSize->getZExtValue()) {
1140     // If the memcpy is larger than the memset, but the memory was undef prior
1141     // to the memset, we can just ignore the tail. Technically we're only
1142     // interested in the bytes from MemSetSize..CopySize here, but as we can't
1143     // easily represent this location, we use the full 0..CopySize range.
1144     MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy);
1145     MemDepResult DepInfo = MD->getPointerDependencyFrom(
1146         MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent());
1147     if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize))
1148       CopySize = MemSetSize;
1149     else
1150       return false;
1151   }
1152 
1153   IRBuilder<> Builder(MemCpy);
1154   Instruction *NewM =
1155       Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1),
1156                            CopySize, MaybeAlign(MemCpy->getDestAlignment()));
1157   if (MSSAU) {
1158     auto *LastDef =
1159         cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy));
1160     auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef);
1161     MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1162   }
1163 
1164   return true;
1165 }
1166 
1167 /// Perform simplification of memcpy's.  If we have memcpy A
1168 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
1169 /// B to be a memcpy from X to Z (or potentially a memmove, depending on
1170 /// circumstances). This allows later passes to remove the first memcpy
1171 /// altogether.
1172 bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) {
1173   // We can only optimize non-volatile memcpy's.
1174   if (M->isVolatile()) return false;
1175 
1176   // If the source and destination of the memcpy are the same, then zap it.
1177   if (M->getSource() == M->getDest()) {
1178     ++BBI;
1179     eraseInstruction(M);
1180     return true;
1181   }
1182 
1183   // If copying from a constant, try to turn the memcpy into a memset.
1184   if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
1185     if (GV->isConstant() && GV->hasDefinitiveInitializer())
1186       if (Value *ByteVal = isBytewiseValue(GV->getInitializer(),
1187                                            M->getModule()->getDataLayout())) {
1188         IRBuilder<> Builder(M);
1189         Instruction *NewM =
1190             Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
1191                                  MaybeAlign(M->getDestAlignment()), false);
1192         if (MSSAU) {
1193           auto *LastDef =
1194               cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M));
1195           auto *NewAccess =
1196               MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef);
1197           MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1198         }
1199 
1200         eraseInstruction(M);
1201         ++NumCpyToSet;
1202         return true;
1203       }
1204 
1205   MemDepResult DepInfo = MD->getDependency(M);
1206 
1207   // Try to turn a partially redundant memset + memcpy into
1208   // memcpy + smaller memset.  We don't need the memcpy size for this.
1209   if (DepInfo.isClobber())
1210     if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst()))
1211       if (processMemSetMemCpyDependence(M, MDep))
1212         return true;
1213 
1214   // The optimizations after this point require the memcpy size.
1215   ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
1216   if (!CopySize) return false;
1217 
1218   // There are four possible optimizations we can do for memcpy:
1219   //   a) memcpy-memcpy xform which exposes redundance for DSE.
1220   //   b) call-memcpy xform for return slot optimization.
1221   //   c) memcpy from freshly alloca'd space or space that has just started its
1222   //      lifetime copies undefined data, and we can therefore eliminate the
1223   //      memcpy in favor of the data that was already at the destination.
1224   //   d) memcpy from a just-memset'd source can be turned into memset.
1225   if (DepInfo.isClobber()) {
1226     if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
1227       // FIXME: Can we pass in either of dest/src alignment here instead
1228       // of conservatively taking the minimum?
1229       Align Alignment = std::min(M->getDestAlign().valueOrOne(),
1230                                  M->getSourceAlign().valueOrOne());
1231       if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(),
1232                                CopySize->getZExtValue(), Alignment, C)) {
1233         eraseInstruction(M);
1234         ++NumMemCpyInstr;
1235         return true;
1236       }
1237     }
1238   }
1239 
1240   MemoryLocation SrcLoc = MemoryLocation::getForSource(M);
1241   MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(
1242       SrcLoc, true, M->getIterator(), M->getParent());
1243 
1244   if (SrcDepInfo.isClobber()) {
1245     if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
1246       return processMemCpyMemCpyDependence(M, MDep);
1247   } else if (SrcDepInfo.isDef()) {
1248     if (hasUndefContents(SrcDepInfo.getInst(), CopySize)) {
1249       eraseInstruction(M);
1250       ++NumMemCpyInstr;
1251       return true;
1252     }
1253   }
1254 
1255   if (SrcDepInfo.isClobber())
1256     if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst()))
1257       if (performMemCpyToMemSetOptzn(M, MDep)) {
1258         eraseInstruction(M);
1259         ++NumCpyToSet;
1260         return true;
1261       }
1262 
1263   return false;
1264 }
1265 
1266 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed
1267 /// not to alias.
1268 bool MemCpyOptPass::processMemMove(MemMoveInst *M) {
1269   if (!TLI->has(LibFunc_memmove))
1270     return false;
1271 
1272   // See if the pointers alias.
1273   if (!AA->isNoAlias(MemoryLocation::getForDest(M),
1274                      MemoryLocation::getForSource(M)))
1275     return false;
1276 
1277   LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M
1278                     << "\n");
1279 
1280   // If not, then we know we can transform this.
1281   Type *ArgTys[3] = { M->getRawDest()->getType(),
1282                       M->getRawSource()->getType(),
1283                       M->getLength()->getType() };
1284   M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(),
1285                                                  Intrinsic::memcpy, ArgTys));
1286 
1287   // For MemorySSA nothing really changes (except that memcpy may imply stricter
1288   // aliasing guarantees).
1289 
1290   // MemDep may have over conservative information about this instruction, just
1291   // conservatively flush it from the cache.
1292   MD->removeInstruction(M);
1293 
1294   ++NumMoveToCpy;
1295   return true;
1296 }
1297 
1298 /// This is called on every byval argument in call sites.
1299 bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) {
1300   const DataLayout &DL = CB.getCaller()->getParent()->getDataLayout();
1301   // Find out what feeds this byval argument.
1302   Value *ByValArg = CB.getArgOperand(ArgNo);
1303   Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
1304   uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
1305   MemDepResult DepInfo = MD->getPointerDependencyFrom(
1306       MemoryLocation(ByValArg, LocationSize::precise(ByValSize)), true,
1307       CB.getIterator(), CB.getParent());
1308   if (!DepInfo.isClobber())
1309     return false;
1310 
1311   // If the byval argument isn't fed by a memcpy, ignore it.  If it is fed by
1312   // a memcpy, see if we can byval from the source of the memcpy instead of the
1313   // result.
1314   MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
1315   if (!MDep || MDep->isVolatile() ||
1316       ByValArg->stripPointerCasts() != MDep->getDest())
1317     return false;
1318 
1319   // The length of the memcpy must be larger or equal to the size of the byval.
1320   ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
1321   if (!C1 || C1->getValue().getZExtValue() < ByValSize)
1322     return false;
1323 
1324   // Get the alignment of the byval.  If the call doesn't specify the alignment,
1325   // then it is some target specific value that we can't know.
1326   MaybeAlign ByValAlign = CB.getParamAlign(ArgNo);
1327   if (!ByValAlign) return false;
1328 
1329   // If it is greater than the memcpy, then we check to see if we can force the
1330   // source of the memcpy to the alignment we need.  If we fail, we bail out.
1331   MaybeAlign MemDepAlign = MDep->getSourceAlign();
1332   if ((!MemDepAlign || *MemDepAlign < *ByValAlign) &&
1333       getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &CB, AC,
1334                                  DT) < *ByValAlign)
1335     return false;
1336 
1337   // The address space of the memcpy source must match the byval argument
1338   if (MDep->getSource()->getType()->getPointerAddressSpace() !=
1339       ByValArg->getType()->getPointerAddressSpace())
1340     return false;
1341 
1342   // Verify that the copied-from memory doesn't change in between the memcpy and
1343   // the byval call.
1344   //    memcpy(a <- b)
1345   //    *b = 42;
1346   //    foo(*a)
1347   // It would be invalid to transform the second memcpy into foo(*b).
1348   //
1349   // NOTE: This is conservative, it will stop on any read from the source loc,
1350   // not just the defining memcpy.
1351   MemDepResult SourceDep = MD->getPointerDependencyFrom(
1352       MemoryLocation::getForSource(MDep), false,
1353       CB.getIterator(), MDep->getParent());
1354   if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1355     return false;
1356 
1357   Value *TmpCast = MDep->getSource();
1358   if (MDep->getSource()->getType() != ByValArg->getType()) {
1359     BitCastInst *TmpBitCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
1360                                               "tmpcast", &CB);
1361     // Set the tmpcast's DebugLoc to MDep's
1362     TmpBitCast->setDebugLoc(MDep->getDebugLoc());
1363     TmpCast = TmpBitCast;
1364   }
1365 
1366   LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
1367                     << "  " << *MDep << "\n"
1368                     << "  " << CB << "\n");
1369 
1370   // Otherwise we're good!  Update the byval argument.
1371   CB.setArgOperand(ArgNo, TmpCast);
1372   ++NumMemCpyInstr;
1373   return true;
1374 }
1375 
1376 /// Executes one iteration of MemCpyOptPass.
1377 bool MemCpyOptPass::iterateOnFunction(Function &F) {
1378   bool MadeChange = false;
1379 
1380   // Walk all instruction in the function.
1381   for (BasicBlock &BB : F) {
1382     // Skip unreachable blocks. For example processStore assumes that an
1383     // instruction in a BB can't be dominated by a later instruction in the
1384     // same BB (which is a scenario that can happen for an unreachable BB that
1385     // has itself as a predecessor).
1386     if (!DT->isReachableFromEntry(&BB))
1387       continue;
1388 
1389     for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
1390         // Avoid invalidating the iterator.
1391       Instruction *I = &*BI++;
1392 
1393       bool RepeatInstruction = false;
1394 
1395       if (StoreInst *SI = dyn_cast<StoreInst>(I))
1396         MadeChange |= processStore(SI, BI);
1397       else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
1398         RepeatInstruction = processMemSet(M, BI);
1399       else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
1400         RepeatInstruction = processMemCpy(M, BI);
1401       else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
1402         RepeatInstruction = processMemMove(M);
1403       else if (auto *CB = dyn_cast<CallBase>(I)) {
1404         for (unsigned i = 0, e = CB->arg_size(); i != e; ++i)
1405           if (CB->isByValArgument(i))
1406             MadeChange |= processByValArgument(*CB, i);
1407       }
1408 
1409       // Reprocess the instruction if desired.
1410       if (RepeatInstruction) {
1411         if (BI != BB.begin())
1412           --BI;
1413         MadeChange = true;
1414       }
1415     }
1416   }
1417 
1418   return MadeChange;
1419 }
1420 
1421 PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) {
1422   auto &MD = AM.getResult<MemoryDependenceAnalysis>(F);
1423   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1424   auto *AA = &AM.getResult<AAManager>(F);
1425   auto *AC = &AM.getResult<AssumptionAnalysis>(F);
1426   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1427   auto *MSSA = AM.getCachedResult<MemorySSAAnalysis>(F);
1428 
1429   bool MadeChange =
1430       runImpl(F, &MD, &TLI, AA, AC, DT, MSSA ? &MSSA->getMSSA() : nullptr);
1431   if (!MadeChange)
1432     return PreservedAnalyses::all();
1433 
1434   PreservedAnalyses PA;
1435   PA.preserveSet<CFGAnalyses>();
1436   PA.preserve<GlobalsAA>();
1437   PA.preserve<MemoryDependenceAnalysis>();
1438   if (MSSA)
1439     PA.preserve<MemorySSAAnalysis>();
1440   return PA;
1441 }
1442 
1443 bool MemCpyOptPass::runImpl(Function &F, MemoryDependenceResults *MD_,
1444                             TargetLibraryInfo *TLI_, AliasAnalysis *AA_,
1445                             AssumptionCache *AC_, DominatorTree *DT_,
1446                             MemorySSA *MSSA_) {
1447   bool MadeChange = false;
1448   MD = MD_;
1449   TLI = TLI_;
1450   AA = AA_;
1451   AC = AC_;
1452   DT = DT_;
1453   MemorySSAUpdater MSSAU_(MSSA_);
1454   MSSAU = MSSA_ ? &MSSAU_ : nullptr;
1455   // If we don't have at least memset and memcpy, there is little point of doing
1456   // anything here.  These are required by a freestanding implementation, so if
1457   // even they are disabled, there is no point in trying hard.
1458   if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy))
1459     return false;
1460 
1461   while (true) {
1462     if (!iterateOnFunction(F))
1463       break;
1464     MadeChange = true;
1465   }
1466 
1467   if (MSSA_ && VerifyMemorySSA)
1468     MSSA_->verifyMemorySSA();
1469 
1470   MD = nullptr;
1471   return MadeChange;
1472 }
1473 
1474 /// This is the main transformation entry point for a function.
1475 bool MemCpyOptLegacyPass::runOnFunction(Function &F) {
1476   if (skipFunction(F))
1477     return false;
1478 
1479   auto *MD = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
1480   auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1481   auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1482   auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1483   auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1484   auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
1485 
1486   return Impl.runImpl(F, MD, TLI, AA, AC, DT,
1487                       MSSAWP ? &MSSAWP->getMSSA() : nullptr);
1488 }
1489