1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs various transformations related to eliminating memcpy 11 // calls, or transforming sets of stores into memset's. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Scalar/MemCpyOptimizer.h" 16 #include "llvm/ADT/DenseSet.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/ADT/iterator_range.h" 21 #include "llvm/Analysis/AssumptionCache.h" 22 #include "llvm/Analysis/GlobalsModRef.h" 23 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 24 #include "llvm/Analysis/MemoryLocation.h" 25 #include "llvm/Analysis/TargetLibraryInfo.h" 26 #include "llvm/Analysis/ValueTracking.h" 27 #include "llvm/IR/Argument.h" 28 #include "llvm/IR/Constants.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Dominators.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/IR/GetElementPtrTypeIterator.h" 34 #include "llvm/IR/GlobalVariable.h" 35 #include "llvm/IR/IRBuilder.h" 36 #include "llvm/IR/InstrTypes.h" 37 #include "llvm/IR/Instruction.h" 38 #include "llvm/IR/Instructions.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/IR/Intrinsics.h" 41 #include "llvm/IR/LLVMContext.h" 42 #include "llvm/IR/Module.h" 43 #include "llvm/IR/Operator.h" 44 #include "llvm/IR/Type.h" 45 #include "llvm/IR/User.h" 46 #include "llvm/IR/Value.h" 47 #include "llvm/Pass.h" 48 #include "llvm/Support/Casting.h" 49 #include "llvm/Support/Debug.h" 50 #include "llvm/Support/MathExtras.h" 51 #include "llvm/Support/raw_ostream.h" 52 #include "llvm/Transforms/Scalar.h" 53 #include "llvm/Transforms/Utils/Local.h" 54 #include <algorithm> 55 #include <cassert> 56 #include <cstdint> 57 58 using namespace llvm; 59 60 #define DEBUG_TYPE "memcpyopt" 61 62 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 63 STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 64 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 65 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); 66 67 static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, 68 bool &VariableIdxFound, 69 const DataLayout &DL) { 70 // Skip over the first indices. 71 gep_type_iterator GTI = gep_type_begin(GEP); 72 for (unsigned i = 1; i != Idx; ++i, ++GTI) 73 /*skip along*/; 74 75 // Compute the offset implied by the rest of the indices. 76 int64_t Offset = 0; 77 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { 78 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); 79 if (!OpC) 80 return VariableIdxFound = true; 81 if (OpC->isZero()) continue; // No offset. 82 83 // Handle struct indices, which add their field offset to the pointer. 84 if (StructType *STy = GTI.getStructTypeOrNull()) { 85 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 86 continue; 87 } 88 89 // Otherwise, we have a sequential type like an array or vector. Multiply 90 // the index by the ElementSize. 91 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); 92 Offset += Size*OpC->getSExtValue(); 93 } 94 95 return Offset; 96 } 97 98 /// Return true if Ptr1 is provably equal to Ptr2 plus a constant offset, and 99 /// return that constant offset. For example, Ptr1 might be &A[42], and Ptr2 100 /// might be &A[40]. In this case offset would be -8. 101 static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, 102 const DataLayout &DL) { 103 Ptr1 = Ptr1->stripPointerCasts(); 104 Ptr2 = Ptr2->stripPointerCasts(); 105 106 // Handle the trivial case first. 107 if (Ptr1 == Ptr2) { 108 Offset = 0; 109 return true; 110 } 111 112 GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1); 113 GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2); 114 115 bool VariableIdxFound = false; 116 117 // If one pointer is a GEP and the other isn't, then see if the GEP is a 118 // constant offset from the base, as in "P" and "gep P, 1". 119 if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) { 120 Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, DL); 121 return !VariableIdxFound; 122 } 123 124 if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) { 125 Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, DL); 126 return !VariableIdxFound; 127 } 128 129 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical 130 // base. After that base, they may have some number of common (and 131 // potentially variable) indices. After that they handle some constant 132 // offset, which determines their offset from each other. At this point, we 133 // handle no other case. 134 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) 135 return false; 136 137 // Skip any common indices and track the GEP types. 138 unsigned Idx = 1; 139 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) 140 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) 141 break; 142 143 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL); 144 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, DL); 145 if (VariableIdxFound) return false; 146 147 Offset = Offset2-Offset1; 148 return true; 149 } 150 151 namespace { 152 153 /// Represents a range of memset'd bytes with the ByteVal value. 154 /// This allows us to analyze stores like: 155 /// store 0 -> P+1 156 /// store 0 -> P+0 157 /// store 0 -> P+3 158 /// store 0 -> P+2 159 /// which sometimes happens with stores to arrays of structs etc. When we see 160 /// the first store, we make a range [1, 2). The second store extends the range 161 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 162 /// two ranges into [0, 3) which is memset'able. 163 struct MemsetRange { 164 // Start/End - A semi range that describes the span that this range covers. 165 // The range is closed at the start and open at the end: [Start, End). 166 int64_t Start, End; 167 168 /// StartPtr - The getelementptr instruction that points to the start of the 169 /// range. 170 Value *StartPtr; 171 172 /// Alignment - The known alignment of the first store. 173 unsigned Alignment; 174 175 /// TheStores - The actual stores that make up this range. 176 SmallVector<Instruction*, 16> TheStores; 177 178 bool isProfitableToUseMemset(const DataLayout &DL) const; 179 }; 180 181 } // end anonymous namespace 182 183 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const { 184 // If we found more than 4 stores to merge or 16 bytes, use memset. 185 if (TheStores.size() >= 4 || End-Start >= 16) return true; 186 187 // If there is nothing to merge, don't do anything. 188 if (TheStores.size() < 2) return false; 189 190 // If any of the stores are a memset, then it is always good to extend the 191 // memset. 192 for (Instruction *SI : TheStores) 193 if (!isa<StoreInst>(SI)) 194 return true; 195 196 // Assume that the code generator is capable of merging pairs of stores 197 // together if it wants to. 198 if (TheStores.size() == 2) return false; 199 200 // If we have fewer than 8 stores, it can still be worthwhile to do this. 201 // For example, merging 4 i8 stores into an i32 store is useful almost always. 202 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 203 // memset will be split into 2 32-bit stores anyway) and doing so can 204 // pessimize the llvm optimizer. 205 // 206 // Since we don't have perfect knowledge here, make some assumptions: assume 207 // the maximum GPR width is the same size as the largest legal integer 208 // size. If so, check to see whether we will end up actually reducing the 209 // number of stores used. 210 unsigned Bytes = unsigned(End-Start); 211 unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8; 212 if (MaxIntSize == 0) 213 MaxIntSize = 1; 214 unsigned NumPointerStores = Bytes / MaxIntSize; 215 216 // Assume the remaining bytes if any are done a byte at a time. 217 unsigned NumByteStores = Bytes % MaxIntSize; 218 219 // If we will reduce the # stores (according to this heuristic), do the 220 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 221 // etc. 222 return TheStores.size() > NumPointerStores+NumByteStores; 223 } 224 225 namespace { 226 227 class MemsetRanges { 228 /// A sorted list of the memset ranges. 229 SmallVector<MemsetRange, 8> Ranges; 230 typedef SmallVectorImpl<MemsetRange>::iterator range_iterator; 231 const DataLayout &DL; 232 233 public: 234 MemsetRanges(const DataLayout &DL) : DL(DL) {} 235 236 typedef SmallVectorImpl<MemsetRange>::const_iterator const_iterator; 237 const_iterator begin() const { return Ranges.begin(); } 238 const_iterator end() const { return Ranges.end(); } 239 bool empty() const { return Ranges.empty(); } 240 241 void addInst(int64_t OffsetFromFirst, Instruction *Inst) { 242 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 243 addStore(OffsetFromFirst, SI); 244 else 245 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); 246 } 247 248 void addStore(int64_t OffsetFromFirst, StoreInst *SI) { 249 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); 250 251 addRange(OffsetFromFirst, StoreSize, 252 SI->getPointerOperand(), SI->getAlignment(), SI); 253 } 254 255 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { 256 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 257 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI); 258 } 259 260 void addRange(int64_t Start, int64_t Size, Value *Ptr, 261 unsigned Alignment, Instruction *Inst); 262 263 }; 264 265 } // end anonymous namespace 266 267 /// Add a new store to the MemsetRanges data structure. This adds a 268 /// new range for the specified store at the specified offset, merging into 269 /// existing ranges as appropriate. 270 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, 271 unsigned Alignment, Instruction *Inst) { 272 int64_t End = Start+Size; 273 274 range_iterator I = std::lower_bound(Ranges.begin(), Ranges.end(), Start, 275 [](const MemsetRange &LHS, int64_t RHS) { return LHS.End < RHS; }); 276 277 // We now know that I == E, in which case we didn't find anything to merge 278 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 279 // to insert a new range. Handle this now. 280 if (I == Ranges.end() || End < I->Start) { 281 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 282 R.Start = Start; 283 R.End = End; 284 R.StartPtr = Ptr; 285 R.Alignment = Alignment; 286 R.TheStores.push_back(Inst); 287 return; 288 } 289 290 // This store overlaps with I, add it. 291 I->TheStores.push_back(Inst); 292 293 // At this point, we may have an interval that completely contains our store. 294 // If so, just add it to the interval and return. 295 if (I->Start <= Start && I->End >= End) 296 return; 297 298 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 299 // but is not entirely contained within the range. 300 301 // See if the range extends the start of the range. In this case, it couldn't 302 // possibly cause it to join the prior range, because otherwise we would have 303 // stopped on *it*. 304 if (Start < I->Start) { 305 I->Start = Start; 306 I->StartPtr = Ptr; 307 I->Alignment = Alignment; 308 } 309 310 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 311 // is in or right at the end of I), and that End >= I->Start. Extend I out to 312 // End. 313 if (End > I->End) { 314 I->End = End; 315 range_iterator NextI = I; 316 while (++NextI != Ranges.end() && End >= NextI->Start) { 317 // Merge the range in. 318 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 319 if (NextI->End > I->End) 320 I->End = NextI->End; 321 Ranges.erase(NextI); 322 NextI = I; 323 } 324 } 325 } 326 327 //===----------------------------------------------------------------------===// 328 // MemCpyOptLegacyPass Pass 329 //===----------------------------------------------------------------------===// 330 331 namespace { 332 333 class MemCpyOptLegacyPass : public FunctionPass { 334 MemCpyOptPass Impl; 335 336 public: 337 static char ID; // Pass identification, replacement for typeid 338 339 MemCpyOptLegacyPass() : FunctionPass(ID) { 340 initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry()); 341 } 342 343 bool runOnFunction(Function &F) override; 344 345 private: 346 // This transformation requires dominator postdominator info 347 void getAnalysisUsage(AnalysisUsage &AU) const override { 348 AU.setPreservesCFG(); 349 AU.addRequired<AssumptionCacheTracker>(); 350 AU.addRequired<DominatorTreeWrapperPass>(); 351 AU.addRequired<MemoryDependenceWrapperPass>(); 352 AU.addRequired<AAResultsWrapperPass>(); 353 AU.addRequired<TargetLibraryInfoWrapperPass>(); 354 AU.addPreserved<GlobalsAAWrapperPass>(); 355 AU.addPreserved<MemoryDependenceWrapperPass>(); 356 } 357 }; 358 359 char MemCpyOptLegacyPass::ID = 0; 360 361 } // end anonymous namespace 362 363 /// The public interface to this file... 364 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); } 365 366 INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", 367 false, false) 368 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 369 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 370 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 371 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 372 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 373 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 374 INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", 375 false, false) 376 377 /// When scanning forward over instructions, we look for some other patterns to 378 /// fold away. In particular, this looks for stores to neighboring locations of 379 /// memory. If it sees enough consecutive ones, it attempts to merge them 380 /// together into a memcpy/memset. 381 Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst, 382 Value *StartPtr, 383 Value *ByteVal) { 384 const DataLayout &DL = StartInst->getModule()->getDataLayout(); 385 386 // Okay, so we now have a single store that can be splatable. Scan to find 387 // all subsequent stores of the same value to offset from the same pointer. 388 // Join these together into ranges, so we can decide whether contiguous blocks 389 // are stored. 390 MemsetRanges Ranges(DL); 391 392 BasicBlock::iterator BI(StartInst); 393 for (++BI; !isa<TerminatorInst>(BI); ++BI) { 394 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { 395 // If the instruction is readnone, ignore it, otherwise bail out. We 396 // don't even allow readonly here because we don't want something like: 397 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 398 if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) 399 break; 400 continue; 401 } 402 403 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { 404 // If this is a store, see if we can merge it in. 405 if (!NextStore->isSimple()) break; 406 407 // Check to see if this stored value is of the same byte-splattable value. 408 if (ByteVal != isBytewiseValue(NextStore->getOperand(0))) 409 break; 410 411 // Check to see if this store is to a constant offset from the start ptr. 412 int64_t Offset; 413 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, 414 DL)) 415 break; 416 417 Ranges.addStore(Offset, NextStore); 418 } else { 419 MemSetInst *MSI = cast<MemSetInst>(BI); 420 421 if (MSI->isVolatile() || ByteVal != MSI->getValue() || 422 !isa<ConstantInt>(MSI->getLength())) 423 break; 424 425 // Check to see if this store is to a constant offset from the start ptr. 426 int64_t Offset; 427 if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, DL)) 428 break; 429 430 Ranges.addMemSet(Offset, MSI); 431 } 432 } 433 434 // If we have no ranges, then we just had a single store with nothing that 435 // could be merged in. This is a very common case of course. 436 if (Ranges.empty()) 437 return nullptr; 438 439 // If we had at least one store that could be merged in, add the starting 440 // store as well. We try to avoid this unless there is at least something 441 // interesting as a small compile-time optimization. 442 Ranges.addInst(0, StartInst); 443 444 // If we create any memsets, we put it right before the first instruction that 445 // isn't part of the memset block. This ensure that the memset is dominated 446 // by any addressing instruction needed by the start of the block. 447 IRBuilder<> Builder(&*BI); 448 449 // Now that we have full information about ranges, loop over the ranges and 450 // emit memset's for anything big enough to be worthwhile. 451 Instruction *AMemSet = nullptr; 452 for (const MemsetRange &Range : Ranges) { 453 454 if (Range.TheStores.size() == 1) continue; 455 456 // If it is profitable to lower this range to memset, do so now. 457 if (!Range.isProfitableToUseMemset(DL)) 458 continue; 459 460 // Otherwise, we do want to transform this! Create a new memset. 461 // Get the starting pointer of the block. 462 StartPtr = Range.StartPtr; 463 464 // Determine alignment 465 unsigned Alignment = Range.Alignment; 466 if (Alignment == 0) { 467 Type *EltType = 468 cast<PointerType>(StartPtr->getType())->getElementType(); 469 Alignment = DL.getABITypeAlignment(EltType); 470 } 471 472 AMemSet = 473 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment); 474 475 DEBUG(dbgs() << "Replace stores:\n"; 476 for (Instruction *SI : Range.TheStores) 477 dbgs() << *SI << '\n'; 478 dbgs() << "With: " << *AMemSet << '\n'); 479 480 if (!Range.TheStores.empty()) 481 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); 482 483 // Zap all the stores. 484 for (Instruction *SI : Range.TheStores) { 485 MD->removeInstruction(SI); 486 SI->eraseFromParent(); 487 } 488 ++NumMemSetInfer; 489 } 490 491 return AMemSet; 492 } 493 494 static unsigned findCommonAlignment(const DataLayout &DL, const StoreInst *SI, 495 const LoadInst *LI) { 496 unsigned StoreAlign = SI->getAlignment(); 497 if (!StoreAlign) 498 StoreAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType()); 499 unsigned LoadAlign = LI->getAlignment(); 500 if (!LoadAlign) 501 LoadAlign = DL.getABITypeAlignment(LI->getType()); 502 503 return std::min(StoreAlign, LoadAlign); 504 } 505 506 // This method try to lift a store instruction before position P. 507 // It will lift the store and its argument + that anything that 508 // may alias with these. 509 // The method returns true if it was successful. 510 static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P, 511 const LoadInst *LI) { 512 // If the store alias this position, early bail out. 513 MemoryLocation StoreLoc = MemoryLocation::get(SI); 514 if (AA.getModRefInfo(P, StoreLoc) != MRI_NoModRef) 515 return false; 516 517 // Keep track of the arguments of all instruction we plan to lift 518 // so we can make sure to lift them as well if apropriate. 519 DenseSet<Instruction*> Args; 520 if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand())) 521 if (Ptr->getParent() == SI->getParent()) 522 Args.insert(Ptr); 523 524 // Instruction to lift before P. 525 SmallVector<Instruction*, 8> ToLift; 526 527 // Memory locations of lifted instructions. 528 SmallVector<MemoryLocation, 8> MemLocs{StoreLoc}; 529 530 // Lifted callsites. 531 SmallVector<ImmutableCallSite, 8> CallSites; 532 533 const MemoryLocation LoadLoc = MemoryLocation::get(LI); 534 535 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) { 536 auto *C = &*I; 537 538 bool MayAlias = AA.getModRefInfo(C) != MRI_NoModRef; 539 540 bool NeedLift = false; 541 if (Args.erase(C)) 542 NeedLift = true; 543 else if (MayAlias) { 544 NeedLift = llvm::any_of(MemLocs, [C, &AA](const MemoryLocation &ML) { 545 return AA.getModRefInfo(C, ML); 546 }); 547 548 if (!NeedLift) 549 NeedLift = 550 llvm::any_of(CallSites, [C, &AA](const ImmutableCallSite &CS) { 551 return AA.getModRefInfo(C, CS); 552 }); 553 } 554 555 if (!NeedLift) 556 continue; 557 558 if (MayAlias) { 559 // Since LI is implicitly moved downwards past the lifted instructions, 560 // none of them may modify its source. 561 if (AA.getModRefInfo(C, LoadLoc) & MRI_Mod) 562 return false; 563 else if (auto CS = ImmutableCallSite(C)) { 564 // If we can't lift this before P, it's game over. 565 if (AA.getModRefInfo(P, CS) != MRI_NoModRef) 566 return false; 567 568 CallSites.push_back(CS); 569 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) { 570 // If we can't lift this before P, it's game over. 571 auto ML = MemoryLocation::get(C); 572 if (AA.getModRefInfo(P, ML) != MRI_NoModRef) 573 return false; 574 575 MemLocs.push_back(ML); 576 } else 577 // We don't know how to lift this instruction. 578 return false; 579 } 580 581 ToLift.push_back(C); 582 for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k) 583 if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) 584 if (A->getParent() == SI->getParent()) 585 Args.insert(A); 586 } 587 588 // We made it, we need to lift 589 for (auto *I : llvm::reverse(ToLift)) { 590 DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n"); 591 I->moveBefore(P); 592 } 593 594 return true; 595 } 596 597 bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 598 if (!SI->isSimple()) return false; 599 600 // Avoid merging nontemporal stores since the resulting 601 // memcpy/memset would not be able to preserve the nontemporal hint. 602 // In theory we could teach how to propagate the !nontemporal metadata to 603 // memset calls. However, that change would force the backend to 604 // conservatively expand !nontemporal memset calls back to sequences of 605 // store instructions (effectively undoing the merging). 606 if (SI->getMetadata(LLVMContext::MD_nontemporal)) 607 return false; 608 609 const DataLayout &DL = SI->getModule()->getDataLayout(); 610 611 // Load to store forwarding can be interpreted as memcpy. 612 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) { 613 if (LI->isSimple() && LI->hasOneUse() && 614 LI->getParent() == SI->getParent()) { 615 616 auto *T = LI->getType(); 617 if (T->isAggregateType()) { 618 AliasAnalysis &AA = LookupAliasAnalysis(); 619 MemoryLocation LoadLoc = MemoryLocation::get(LI); 620 621 // We use alias analysis to check if an instruction may store to 622 // the memory we load from in between the load and the store. If 623 // such an instruction is found, we try to promote there instead 624 // of at the store position. 625 Instruction *P = SI; 626 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) { 627 if (AA.getModRefInfo(&I, LoadLoc) & MRI_Mod) { 628 P = &I; 629 break; 630 } 631 } 632 633 // We found an instruction that may write to the loaded memory. 634 // We can try to promote at this position instead of the store 635 // position if nothing alias the store memory after this and the store 636 // destination is not in the range. 637 if (P && P != SI) { 638 if (!moveUp(AA, SI, P, LI)) 639 P = nullptr; 640 } 641 642 // If a valid insertion position is found, then we can promote 643 // the load/store pair to a memcpy. 644 if (P) { 645 // If we load from memory that may alias the memory we store to, 646 // memmove must be used to preserve semantic. If not, memcpy can 647 // be used. 648 bool UseMemMove = false; 649 if (!AA.isNoAlias(MemoryLocation::get(SI), LoadLoc)) 650 UseMemMove = true; 651 652 unsigned Align = findCommonAlignment(DL, SI, LI); 653 uint64_t Size = DL.getTypeStoreSize(T); 654 655 IRBuilder<> Builder(P); 656 Instruction *M; 657 if (UseMemMove) 658 M = Builder.CreateMemMove(SI->getPointerOperand(), 659 LI->getPointerOperand(), Size, 660 Align, SI->isVolatile()); 661 else 662 M = Builder.CreateMemCpy(SI->getPointerOperand(), 663 LI->getPointerOperand(), Size, 664 Align, SI->isVolatile()); 665 666 DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI 667 << " => " << *M << "\n"); 668 669 MD->removeInstruction(SI); 670 SI->eraseFromParent(); 671 MD->removeInstruction(LI); 672 LI->eraseFromParent(); 673 ++NumMemCpyInstr; 674 675 // Make sure we do not invalidate the iterator. 676 BBI = M->getIterator(); 677 return true; 678 } 679 } 680 681 // Detect cases where we're performing call slot forwarding, but 682 // happen to be using a load-store pair to implement it, rather than 683 // a memcpy. 684 MemDepResult ldep = MD->getDependency(LI); 685 CallInst *C = nullptr; 686 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst())) 687 C = dyn_cast<CallInst>(ldep.getInst()); 688 689 if (C) { 690 // Check that nothing touches the dest of the "copy" between 691 // the call and the store. 692 Value *CpyDest = SI->getPointerOperand()->stripPointerCasts(); 693 bool CpyDestIsLocal = isa<AllocaInst>(CpyDest); 694 AliasAnalysis &AA = LookupAliasAnalysis(); 695 MemoryLocation StoreLoc = MemoryLocation::get(SI); 696 for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator(); 697 I != E; --I) { 698 if (AA.getModRefInfo(&*I, StoreLoc) != MRI_NoModRef) { 699 C = nullptr; 700 break; 701 } 702 // The store to dest may never happen if an exception can be thrown 703 // between the load and the store. 704 if (I->mayThrow() && !CpyDestIsLocal) { 705 C = nullptr; 706 break; 707 } 708 } 709 } 710 711 if (C) { 712 bool changed = performCallSlotOptzn( 713 LI, SI->getPointerOperand()->stripPointerCasts(), 714 LI->getPointerOperand()->stripPointerCasts(), 715 DL.getTypeStoreSize(SI->getOperand(0)->getType()), 716 findCommonAlignment(DL, SI, LI), C); 717 if (changed) { 718 MD->removeInstruction(SI); 719 SI->eraseFromParent(); 720 MD->removeInstruction(LI); 721 LI->eraseFromParent(); 722 ++NumMemCpyInstr; 723 return true; 724 } 725 } 726 } 727 } 728 729 // There are two cases that are interesting for this code to handle: memcpy 730 // and memset. Right now we only handle memset. 731 732 // Ensure that the value being stored is something that can be memset'able a 733 // byte at a time like "0" or "-1" or any width, as well as things like 734 // 0xA0A0A0A0 and 0.0. 735 auto *V = SI->getOperand(0); 736 if (Value *ByteVal = isBytewiseValue(V)) { 737 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), 738 ByteVal)) { 739 BBI = I->getIterator(); // Don't invalidate iterator. 740 return true; 741 } 742 743 // If we have an aggregate, we try to promote it to memset regardless 744 // of opportunity for merging as it can expose optimization opportunities 745 // in subsequent passes. 746 auto *T = V->getType(); 747 if (T->isAggregateType()) { 748 uint64_t Size = DL.getTypeStoreSize(T); 749 unsigned Align = SI->getAlignment(); 750 if (!Align) 751 Align = DL.getABITypeAlignment(T); 752 IRBuilder<> Builder(SI); 753 auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, 754 Size, Align, SI->isVolatile()); 755 756 DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n"); 757 758 MD->removeInstruction(SI); 759 SI->eraseFromParent(); 760 NumMemSetInfer++; 761 762 // Make sure we do not invalidate the iterator. 763 BBI = M->getIterator(); 764 return true; 765 } 766 } 767 768 return false; 769 } 770 771 bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { 772 // See if there is another memset or store neighboring this memset which 773 // allows us to widen out the memset to do a single larger store. 774 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) 775 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), 776 MSI->getValue())) { 777 BBI = I->getIterator(); // Don't invalidate iterator. 778 return true; 779 } 780 return false; 781 } 782 783 /// Takes a memcpy and a call that it depends on, 784 /// and checks for the possibility of a call slot optimization by having 785 /// the call write its result directly into the destination of the memcpy. 786 bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpy, Value *cpyDest, 787 Value *cpySrc, uint64_t cpyLen, 788 unsigned cpyAlign, CallInst *C) { 789 // The general transformation to keep in mind is 790 // 791 // call @func(..., src, ...) 792 // memcpy(dest, src, ...) 793 // 794 // -> 795 // 796 // memcpy(dest, src, ...) 797 // call @func(..., dest, ...) 798 // 799 // Since moving the memcpy is technically awkward, we additionally check that 800 // src only holds uninitialized values at the moment of the call, meaning that 801 // the memcpy can be discarded rather than moved. 802 803 // Lifetime marks shouldn't be operated on. 804 if (Function *F = C->getCalledFunction()) 805 if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start) 806 return false; 807 808 // Deliberately get the source and destination with bitcasts stripped away, 809 // because we'll need to do type comparisons based on the underlying type. 810 CallSite CS(C); 811 812 // Require that src be an alloca. This simplifies the reasoning considerably. 813 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 814 if (!srcAlloca) 815 return false; 816 817 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 818 if (!srcArraySize) 819 return false; 820 821 const DataLayout &DL = cpy->getModule()->getDataLayout(); 822 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) * 823 srcArraySize->getZExtValue(); 824 825 if (cpyLen < srcSize) 826 return false; 827 828 // Check that accessing the first srcSize bytes of dest will not cause a 829 // trap. Otherwise the transform is invalid since it might cause a trap 830 // to occur earlier than it otherwise would. 831 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) { 832 // The destination is an alloca. Check it is larger than srcSize. 833 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize()); 834 if (!destArraySize) 835 return false; 836 837 uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) * 838 destArraySize->getZExtValue(); 839 840 if (destSize < srcSize) 841 return false; 842 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) { 843 // The store to dest may never happen if the call can throw. 844 if (C->mayThrow()) 845 return false; 846 847 if (A->getDereferenceableBytes() < srcSize) { 848 // If the destination is an sret parameter then only accesses that are 849 // outside of the returned struct type can trap. 850 if (!A->hasStructRetAttr()) 851 return false; 852 853 Type *StructTy = cast<PointerType>(A->getType())->getElementType(); 854 if (!StructTy->isSized()) { 855 // The call may never return and hence the copy-instruction may never 856 // be executed, and therefore it's not safe to say "the destination 857 // has at least <cpyLen> bytes, as implied by the copy-instruction", 858 return false; 859 } 860 861 uint64_t destSize = DL.getTypeAllocSize(StructTy); 862 if (destSize < srcSize) 863 return false; 864 } 865 } else { 866 return false; 867 } 868 869 // Check that dest points to memory that is at least as aligned as src. 870 unsigned srcAlign = srcAlloca->getAlignment(); 871 if (!srcAlign) 872 srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType()); 873 bool isDestSufficientlyAligned = srcAlign <= cpyAlign; 874 // If dest is not aligned enough and we can't increase its alignment then 875 // bail out. 876 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) 877 return false; 878 879 // Check that src is not accessed except via the call and the memcpy. This 880 // guarantees that it holds only undefined values when passed in (so the final 881 // memcpy can be dropped), that it is not read or written between the call and 882 // the memcpy, and that writing beyond the end of it is undefined. 883 SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(), 884 srcAlloca->user_end()); 885 while (!srcUseList.empty()) { 886 User *U = srcUseList.pop_back_val(); 887 888 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) { 889 for (User *UU : U->users()) 890 srcUseList.push_back(UU); 891 continue; 892 } 893 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) { 894 if (!G->hasAllZeroIndices()) 895 return false; 896 897 for (User *UU : U->users()) 898 srcUseList.push_back(UU); 899 continue; 900 } 901 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U)) 902 if (IT->getIntrinsicID() == Intrinsic::lifetime_start || 903 IT->getIntrinsicID() == Intrinsic::lifetime_end) 904 continue; 905 906 if (U != C && U != cpy) 907 return false; 908 } 909 910 // Check that src isn't captured by the called function since the 911 // transformation can cause aliasing issues in that case. 912 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 913 if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i)) 914 return false; 915 916 // Since we're changing the parameter to the callsite, we need to make sure 917 // that what would be the new parameter dominates the callsite. 918 DominatorTree &DT = LookupDomTree(); 919 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest)) 920 if (!DT.dominates(cpyDestInst, C)) 921 return false; 922 923 // In addition to knowing that the call does not access src in some 924 // unexpected manner, for example via a global, which we deduce from 925 // the use analysis, we also need to know that it does not sneakily 926 // access dest. We rely on AA to figure this out for us. 927 AliasAnalysis &AA = LookupAliasAnalysis(); 928 ModRefInfo MR = AA.getModRefInfo(C, cpyDest, srcSize); 929 // If necessary, perform additional analysis. 930 if (MR != MRI_NoModRef) 931 MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT); 932 if (MR != MRI_NoModRef) 933 return false; 934 935 // We can't create address space casts here because we don't know if they're 936 // safe for the target. 937 if (cpySrc->getType()->getPointerAddressSpace() != 938 cpyDest->getType()->getPointerAddressSpace()) 939 return false; 940 for (unsigned i = 0; i < CS.arg_size(); ++i) 941 if (CS.getArgument(i)->stripPointerCasts() == cpySrc && 942 cpySrc->getType()->getPointerAddressSpace() != 943 CS.getArgument(i)->getType()->getPointerAddressSpace()) 944 return false; 945 946 // All the checks have passed, so do the transformation. 947 bool changedArgument = false; 948 for (unsigned i = 0; i < CS.arg_size(); ++i) 949 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) { 950 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest 951 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 952 cpyDest->getName(), C); 953 changedArgument = true; 954 if (CS.getArgument(i)->getType() == Dest->getType()) 955 CS.setArgument(i, Dest); 956 else 957 CS.setArgument(i, CastInst::CreatePointerCast(Dest, 958 CS.getArgument(i)->getType(), Dest->getName(), C)); 959 } 960 961 if (!changedArgument) 962 return false; 963 964 // If the destination wasn't sufficiently aligned then increase its alignment. 965 if (!isDestSufficientlyAligned) { 966 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!"); 967 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); 968 } 969 970 // Drop any cached information about the call, because we may have changed 971 // its dependence information by changing its parameter. 972 MD->removeInstruction(C); 973 974 // Update AA metadata 975 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be 976 // handled here, but combineMetadata doesn't support them yet 977 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 978 LLVMContext::MD_noalias, 979 LLVMContext::MD_invariant_group}; 980 combineMetadata(C, cpy, KnownIDs); 981 982 // Remove the memcpy. 983 MD->removeInstruction(cpy); 984 ++NumMemCpyInstr; 985 986 return true; 987 } 988 989 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is 990 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can. 991 bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M, 992 MemCpyInst *MDep) { 993 // We can only transforms memcpy's where the dest of one is the source of the 994 // other. 995 if (M->getSource() != MDep->getDest() || MDep->isVolatile()) 996 return false; 997 998 // If dep instruction is reading from our current input, then it is a noop 999 // transfer and substituting the input won't change this instruction. Just 1000 // ignore the input and let someone else zap MDep. This handles cases like: 1001 // memcpy(a <- a) 1002 // memcpy(b <- a) 1003 if (M->getSource() == MDep->getSource()) 1004 return false; 1005 1006 // Second, the length of the memcpy's must be the same, or the preceding one 1007 // must be larger than the following one. 1008 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); 1009 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); 1010 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) 1011 return false; 1012 1013 AliasAnalysis &AA = LookupAliasAnalysis(); 1014 1015 // Verify that the copied-from memory doesn't change in between the two 1016 // transfers. For example, in: 1017 // memcpy(a <- b) 1018 // *b = 42; 1019 // memcpy(c <- a) 1020 // It would be invalid to transform the second memcpy into memcpy(c <- b). 1021 // 1022 // TODO: If the code between M and MDep is transparent to the destination "c", 1023 // then we could still perform the xform by moving M up to the first memcpy. 1024 // 1025 // NOTE: This is conservative, it will stop on any read from the source loc, 1026 // not just the defining memcpy. 1027 MemDepResult SourceDep = 1028 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false, 1029 M->getIterator(), M->getParent()); 1030 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 1031 return false; 1032 1033 // If the dest of the second might alias the source of the first, then the 1034 // source and dest might overlap. We still want to eliminate the intermediate 1035 // value, but we have to generate a memmove instead of memcpy. 1036 bool UseMemMove = false; 1037 if (!AA.isNoAlias(MemoryLocation::getForDest(M), 1038 MemoryLocation::getForSource(MDep))) 1039 UseMemMove = true; 1040 1041 // If all checks passed, then we can transform M. 1042 1043 // Make sure to use the lesser of the alignment of the source and the dest 1044 // since we're changing where we're reading from, but don't want to increase 1045 // the alignment past what can be read from or written to. 1046 // TODO: Is this worth it if we're creating a less aligned memcpy? For 1047 // example we could be moving from movaps -> movq on x86. 1048 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment()); 1049 1050 IRBuilder<> Builder(M); 1051 if (UseMemMove) 1052 Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(), 1053 Align, M->isVolatile()); 1054 else 1055 Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(), 1056 Align, M->isVolatile()); 1057 1058 // Remove the instruction we're replacing. 1059 MD->removeInstruction(M); 1060 M->eraseFromParent(); 1061 ++NumMemCpyInstr; 1062 return true; 1063 } 1064 1065 /// We've found that the (upward scanning) memory dependence of \p MemCpy is 1066 /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that 1067 /// weren't copied over by \p MemCpy. 1068 /// 1069 /// In other words, transform: 1070 /// \code 1071 /// memset(dst, c, dst_size); 1072 /// memcpy(dst, src, src_size); 1073 /// \endcode 1074 /// into: 1075 /// \code 1076 /// memcpy(dst, src, src_size); 1077 /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); 1078 /// \endcode 1079 bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy, 1080 MemSetInst *MemSet) { 1081 // We can only transform memset/memcpy with the same destination. 1082 if (MemSet->getDest() != MemCpy->getDest()) 1083 return false; 1084 1085 // Check that there are no other dependencies on the memset destination. 1086 MemDepResult DstDepInfo = 1087 MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false, 1088 MemCpy->getIterator(), MemCpy->getParent()); 1089 if (DstDepInfo.getInst() != MemSet) 1090 return false; 1091 1092 // Use the same i8* dest as the memcpy, killing the memset dest if different. 1093 Value *Dest = MemCpy->getRawDest(); 1094 Value *DestSize = MemSet->getLength(); 1095 Value *SrcSize = MemCpy->getLength(); 1096 1097 // By default, create an unaligned memset. 1098 unsigned Align = 1; 1099 // If Dest is aligned, and SrcSize is constant, use the minimum alignment 1100 // of the sum. 1101 const unsigned DestAlign = 1102 std::max(MemSet->getAlignment(), MemCpy->getAlignment()); 1103 if (DestAlign > 1) 1104 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize)) 1105 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign); 1106 1107 IRBuilder<> Builder(MemCpy); 1108 1109 // If the sizes have different types, zext the smaller one. 1110 if (DestSize->getType() != SrcSize->getType()) { 1111 if (DestSize->getType()->getIntegerBitWidth() > 1112 SrcSize->getType()->getIntegerBitWidth()) 1113 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType()); 1114 else 1115 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType()); 1116 } 1117 1118 Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize); 1119 Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize); 1120 Value *MemsetLen = Builder.CreateSelect( 1121 Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff); 1122 Builder.CreateMemSet(Builder.CreateGEP(Dest, SrcSize), MemSet->getOperand(1), 1123 MemsetLen, Align); 1124 1125 MD->removeInstruction(MemSet); 1126 MemSet->eraseFromParent(); 1127 return true; 1128 } 1129 1130 /// Transform memcpy to memset when its source was just memset. 1131 /// In other words, turn: 1132 /// \code 1133 /// memset(dst1, c, dst1_size); 1134 /// memcpy(dst2, dst1, dst2_size); 1135 /// \endcode 1136 /// into: 1137 /// \code 1138 /// memset(dst1, c, dst1_size); 1139 /// memset(dst2, c, dst2_size); 1140 /// \endcode 1141 /// When dst2_size <= dst1_size. 1142 /// 1143 /// The \p MemCpy must have a Constant length. 1144 bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, 1145 MemSetInst *MemSet) { 1146 AliasAnalysis &AA = LookupAliasAnalysis(); 1147 1148 // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and 1149 // memcpying from the same address. Otherwise it is hard to reason about. 1150 if (!AA.isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource())) 1151 return false; 1152 1153 ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength()); 1154 ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength()); 1155 // Make sure the memcpy doesn't read any more than what the memset wrote. 1156 // Don't worry about sizes larger than i64. 1157 if (!MemSetSize || CopySize->getZExtValue() > MemSetSize->getZExtValue()) 1158 return false; 1159 1160 IRBuilder<> Builder(MemCpy); 1161 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), 1162 CopySize, MemCpy->getAlignment()); 1163 return true; 1164 } 1165 1166 /// Perform simplification of memcpy's. If we have memcpy A 1167 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite 1168 /// B to be a memcpy from X to Z (or potentially a memmove, depending on 1169 /// circumstances). This allows later passes to remove the first memcpy 1170 /// altogether. 1171 bool MemCpyOptPass::processMemCpy(MemCpyInst *M) { 1172 // We can only optimize non-volatile memcpy's. 1173 if (M->isVolatile()) return false; 1174 1175 // If the source and destination of the memcpy are the same, then zap it. 1176 if (M->getSource() == M->getDest()) { 1177 MD->removeInstruction(M); 1178 M->eraseFromParent(); 1179 return false; 1180 } 1181 1182 // If copying from a constant, try to turn the memcpy into a memset. 1183 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource())) 1184 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 1185 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) { 1186 IRBuilder<> Builder(M); 1187 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(), 1188 M->getAlignment(), false); 1189 MD->removeInstruction(M); 1190 M->eraseFromParent(); 1191 ++NumCpyToSet; 1192 return true; 1193 } 1194 1195 MemDepResult DepInfo = MD->getDependency(M); 1196 1197 // Try to turn a partially redundant memset + memcpy into 1198 // memcpy + smaller memset. We don't need the memcpy size for this. 1199 if (DepInfo.isClobber()) 1200 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst())) 1201 if (processMemSetMemCpyDependence(M, MDep)) 1202 return true; 1203 1204 // The optimizations after this point require the memcpy size. 1205 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); 1206 if (!CopySize) return false; 1207 1208 // There are four possible optimizations we can do for memcpy: 1209 // a) memcpy-memcpy xform which exposes redundance for DSE. 1210 // b) call-memcpy xform for return slot optimization. 1211 // c) memcpy from freshly alloca'd space or space that has just started its 1212 // lifetime copies undefined data, and we can therefore eliminate the 1213 // memcpy in favor of the data that was already at the destination. 1214 // d) memcpy from a just-memset'd source can be turned into memset. 1215 if (DepInfo.isClobber()) { 1216 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { 1217 if (performCallSlotOptzn(M, M->getDest(), M->getSource(), 1218 CopySize->getZExtValue(), M->getAlignment(), 1219 C)) { 1220 MD->removeInstruction(M); 1221 M->eraseFromParent(); 1222 return true; 1223 } 1224 } 1225 } 1226 1227 MemoryLocation SrcLoc = MemoryLocation::getForSource(M); 1228 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom( 1229 SrcLoc, true, M->getIterator(), M->getParent()); 1230 1231 if (SrcDepInfo.isClobber()) { 1232 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst())) 1233 return processMemCpyMemCpyDependence(M, MDep); 1234 } else if (SrcDepInfo.isDef()) { 1235 Instruction *I = SrcDepInfo.getInst(); 1236 bool hasUndefContents = false; 1237 1238 if (isa<AllocaInst>(I)) { 1239 hasUndefContents = true; 1240 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1241 if (II->getIntrinsicID() == Intrinsic::lifetime_start) 1242 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0))) 1243 if (LTSize->getZExtValue() >= CopySize->getZExtValue()) 1244 hasUndefContents = true; 1245 } 1246 1247 if (hasUndefContents) { 1248 MD->removeInstruction(M); 1249 M->eraseFromParent(); 1250 ++NumMemCpyInstr; 1251 return true; 1252 } 1253 } 1254 1255 if (SrcDepInfo.isClobber()) 1256 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst())) 1257 if (performMemCpyToMemSetOptzn(M, MDep)) { 1258 MD->removeInstruction(M); 1259 M->eraseFromParent(); 1260 ++NumCpyToSet; 1261 return true; 1262 } 1263 1264 return false; 1265 } 1266 1267 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed 1268 /// not to alias. 1269 bool MemCpyOptPass::processMemMove(MemMoveInst *M) { 1270 AliasAnalysis &AA = LookupAliasAnalysis(); 1271 1272 if (!TLI->has(LibFunc_memmove)) 1273 return false; 1274 1275 // See if the pointers alias. 1276 if (!AA.isNoAlias(MemoryLocation::getForDest(M), 1277 MemoryLocation::getForSource(M))) 1278 return false; 1279 1280 DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M 1281 << "\n"); 1282 1283 // If not, then we know we can transform this. 1284 Type *ArgTys[3] = { M->getRawDest()->getType(), 1285 M->getRawSource()->getType(), 1286 M->getLength()->getType() }; 1287 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(), 1288 Intrinsic::memcpy, ArgTys)); 1289 1290 // MemDep may have over conservative information about this instruction, just 1291 // conservatively flush it from the cache. 1292 MD->removeInstruction(M); 1293 1294 ++NumMoveToCpy; 1295 return true; 1296 } 1297 1298 /// This is called on every byval argument in call sites. 1299 bool MemCpyOptPass::processByValArgument(CallSite CS, unsigned ArgNo) { 1300 const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout(); 1301 // Find out what feeds this byval argument. 1302 Value *ByValArg = CS.getArgument(ArgNo); 1303 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType(); 1304 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy); 1305 MemDepResult DepInfo = MD->getPointerDependencyFrom( 1306 MemoryLocation(ByValArg, ByValSize), true, 1307 CS.getInstruction()->getIterator(), CS.getInstruction()->getParent()); 1308 if (!DepInfo.isClobber()) 1309 return false; 1310 1311 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by 1312 // a memcpy, see if we can byval from the source of the memcpy instead of the 1313 // result. 1314 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); 1315 if (!MDep || MDep->isVolatile() || 1316 ByValArg->stripPointerCasts() != MDep->getDest()) 1317 return false; 1318 1319 // The length of the memcpy must be larger or equal to the size of the byval. 1320 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 1321 if (!C1 || C1->getValue().getZExtValue() < ByValSize) 1322 return false; 1323 1324 // Get the alignment of the byval. If the call doesn't specify the alignment, 1325 // then it is some target specific value that we can't know. 1326 unsigned ByValAlign = CS.getParamAlignment(ArgNo); 1327 if (ByValAlign == 0) return false; 1328 1329 // If it is greater than the memcpy, then we check to see if we can force the 1330 // source of the memcpy to the alignment we need. If we fail, we bail out. 1331 AssumptionCache &AC = LookupAssumptionCache(); 1332 DominatorTree &DT = LookupDomTree(); 1333 if (MDep->getAlignment() < ByValAlign && 1334 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, 1335 CS.getInstruction(), &AC, &DT) < ByValAlign) 1336 return false; 1337 1338 // The address space of the memcpy source must match the byval argument 1339 if (MDep->getSource()->getType()->getPointerAddressSpace() != 1340 ByValArg->getType()->getPointerAddressSpace()) 1341 return false; 1342 1343 // Verify that the copied-from memory doesn't change in between the memcpy and 1344 // the byval call. 1345 // memcpy(a <- b) 1346 // *b = 42; 1347 // foo(*a) 1348 // It would be invalid to transform the second memcpy into foo(*b). 1349 // 1350 // NOTE: This is conservative, it will stop on any read from the source loc, 1351 // not just the defining memcpy. 1352 MemDepResult SourceDep = MD->getPointerDependencyFrom( 1353 MemoryLocation::getForSource(MDep), false, 1354 CS.getInstruction()->getIterator(), MDep->getParent()); 1355 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 1356 return false; 1357 1358 Value *TmpCast = MDep->getSource(); 1359 if (MDep->getSource()->getType() != ByValArg->getType()) 1360 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), 1361 "tmpcast", CS.getInstruction()); 1362 1363 DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" 1364 << " " << *MDep << "\n" 1365 << " " << *CS.getInstruction() << "\n"); 1366 1367 // Otherwise we're good! Update the byval argument. 1368 CS.setArgument(ArgNo, TmpCast); 1369 ++NumMemCpyInstr; 1370 return true; 1371 } 1372 1373 /// Executes one iteration of MemCpyOptPass. 1374 bool MemCpyOptPass::iterateOnFunction(Function &F) { 1375 bool MadeChange = false; 1376 1377 // Walk all instruction in the function. 1378 for (BasicBlock &BB : F) { 1379 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 1380 // Avoid invalidating the iterator. 1381 Instruction *I = &*BI++; 1382 1383 bool RepeatInstruction = false; 1384 1385 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1386 MadeChange |= processStore(SI, BI); 1387 else if (MemSetInst *M = dyn_cast<MemSetInst>(I)) 1388 RepeatInstruction = processMemSet(M, BI); 1389 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) 1390 RepeatInstruction = processMemCpy(M); 1391 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) 1392 RepeatInstruction = processMemMove(M); 1393 else if (auto CS = CallSite(I)) { 1394 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 1395 if (CS.isByValArgument(i)) 1396 MadeChange |= processByValArgument(CS, i); 1397 } 1398 1399 // Reprocess the instruction if desired. 1400 if (RepeatInstruction) { 1401 if (BI != BB.begin()) 1402 --BI; 1403 MadeChange = true; 1404 } 1405 } 1406 } 1407 1408 return MadeChange; 1409 } 1410 1411 PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) { 1412 auto &MD = AM.getResult<MemoryDependenceAnalysis>(F); 1413 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1414 1415 auto LookupAliasAnalysis = [&]() -> AliasAnalysis & { 1416 return AM.getResult<AAManager>(F); 1417 }; 1418 auto LookupAssumptionCache = [&]() -> AssumptionCache & { 1419 return AM.getResult<AssumptionAnalysis>(F); 1420 }; 1421 auto LookupDomTree = [&]() -> DominatorTree & { 1422 return AM.getResult<DominatorTreeAnalysis>(F); 1423 }; 1424 1425 bool MadeChange = runImpl(F, &MD, &TLI, LookupAliasAnalysis, 1426 LookupAssumptionCache, LookupDomTree); 1427 if (!MadeChange) 1428 return PreservedAnalyses::all(); 1429 1430 PreservedAnalyses PA; 1431 PA.preserveSet<CFGAnalyses>(); 1432 PA.preserve<GlobalsAA>(); 1433 PA.preserve<MemoryDependenceAnalysis>(); 1434 return PA; 1435 } 1436 1437 bool MemCpyOptPass::runImpl( 1438 Function &F, MemoryDependenceResults *MD_, TargetLibraryInfo *TLI_, 1439 std::function<AliasAnalysis &()> LookupAliasAnalysis_, 1440 std::function<AssumptionCache &()> LookupAssumptionCache_, 1441 std::function<DominatorTree &()> LookupDomTree_) { 1442 bool MadeChange = false; 1443 MD = MD_; 1444 TLI = TLI_; 1445 LookupAliasAnalysis = std::move(LookupAliasAnalysis_); 1446 LookupAssumptionCache = std::move(LookupAssumptionCache_); 1447 LookupDomTree = std::move(LookupDomTree_); 1448 1449 // If we don't have at least memset and memcpy, there is little point of doing 1450 // anything here. These are required by a freestanding implementation, so if 1451 // even they are disabled, there is no point in trying hard. 1452 if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy)) 1453 return false; 1454 1455 while (true) { 1456 if (!iterateOnFunction(F)) 1457 break; 1458 MadeChange = true; 1459 } 1460 1461 MD = nullptr; 1462 return MadeChange; 1463 } 1464 1465 /// This is the main transformation entry point for a function. 1466 bool MemCpyOptLegacyPass::runOnFunction(Function &F) { 1467 if (skipFunction(F)) 1468 return false; 1469 1470 auto *MD = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep(); 1471 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 1472 1473 auto LookupAliasAnalysis = [this]() -> AliasAnalysis & { 1474 return getAnalysis<AAResultsWrapperPass>().getAAResults(); 1475 }; 1476 auto LookupAssumptionCache = [this, &F]() -> AssumptionCache & { 1477 return getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1478 }; 1479 auto LookupDomTree = [this]() -> DominatorTree & { 1480 return getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1481 }; 1482 1483 return Impl.runImpl(F, MD, TLI, LookupAliasAnalysis, LookupAssumptionCache, 1484 LookupDomTree); 1485 } 1486