1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs various transformations related to eliminating memcpy 11 // calls, or transforming sets of stores into memset's. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Scalar.h" 16 #include "llvm/ADT/DenseSet.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/AliasAnalysis.h" 20 #include "llvm/Analysis/AssumptionCache.h" 21 #include "llvm/Analysis/GlobalsModRef.h" 22 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 23 #include "llvm/Analysis/TargetLibraryInfo.h" 24 #include "llvm/Analysis/ValueTracking.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/Dominators.h" 27 #include "llvm/IR/GetElementPtrTypeIterator.h" 28 #include "llvm/IR/GlobalVariable.h" 29 #include "llvm/IR/IRBuilder.h" 30 #include "llvm/IR/Instructions.h" 31 #include "llvm/IR/IntrinsicInst.h" 32 #include "llvm/Support/Debug.h" 33 #include "llvm/Support/raw_ostream.h" 34 #include "llvm/Transforms/Utils/Local.h" 35 #include <algorithm> 36 using namespace llvm; 37 38 #define DEBUG_TYPE "memcpyopt" 39 40 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 41 STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 42 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 43 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); 44 45 static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, 46 bool &VariableIdxFound, 47 const DataLayout &DL) { 48 // Skip over the first indices. 49 gep_type_iterator GTI = gep_type_begin(GEP); 50 for (unsigned i = 1; i != Idx; ++i, ++GTI) 51 /*skip along*/; 52 53 // Compute the offset implied by the rest of the indices. 54 int64_t Offset = 0; 55 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { 56 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); 57 if (!OpC) 58 return VariableIdxFound = true; 59 if (OpC->isZero()) continue; // No offset. 60 61 // Handle struct indices, which add their field offset to the pointer. 62 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 63 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 64 continue; 65 } 66 67 // Otherwise, we have a sequential type like an array or vector. Multiply 68 // the index by the ElementSize. 69 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); 70 Offset += Size*OpC->getSExtValue(); 71 } 72 73 return Offset; 74 } 75 76 /// Return true if Ptr1 is provably equal to Ptr2 plus a constant offset, and 77 /// return that constant offset. For example, Ptr1 might be &A[42], and Ptr2 78 /// might be &A[40]. In this case offset would be -8. 79 static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, 80 const DataLayout &DL) { 81 Ptr1 = Ptr1->stripPointerCasts(); 82 Ptr2 = Ptr2->stripPointerCasts(); 83 84 // Handle the trivial case first. 85 if (Ptr1 == Ptr2) { 86 Offset = 0; 87 return true; 88 } 89 90 GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1); 91 GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2); 92 93 bool VariableIdxFound = false; 94 95 // If one pointer is a GEP and the other isn't, then see if the GEP is a 96 // constant offset from the base, as in "P" and "gep P, 1". 97 if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) { 98 Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, DL); 99 return !VariableIdxFound; 100 } 101 102 if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) { 103 Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, DL); 104 return !VariableIdxFound; 105 } 106 107 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical 108 // base. After that base, they may have some number of common (and 109 // potentially variable) indices. After that they handle some constant 110 // offset, which determines their offset from each other. At this point, we 111 // handle no other case. 112 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) 113 return false; 114 115 // Skip any common indices and track the GEP types. 116 unsigned Idx = 1; 117 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) 118 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) 119 break; 120 121 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL); 122 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, DL); 123 if (VariableIdxFound) return false; 124 125 Offset = Offset2-Offset1; 126 return true; 127 } 128 129 130 /// Represents a range of memset'd bytes with the ByteVal value. 131 /// This allows us to analyze stores like: 132 /// store 0 -> P+1 133 /// store 0 -> P+0 134 /// store 0 -> P+3 135 /// store 0 -> P+2 136 /// which sometimes happens with stores to arrays of structs etc. When we see 137 /// the first store, we make a range [1, 2). The second store extends the range 138 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 139 /// two ranges into [0, 3) which is memset'able. 140 namespace { 141 struct MemsetRange { 142 // Start/End - A semi range that describes the span that this range covers. 143 // The range is closed at the start and open at the end: [Start, End). 144 int64_t Start, End; 145 146 /// StartPtr - The getelementptr instruction that points to the start of the 147 /// range. 148 Value *StartPtr; 149 150 /// Alignment - The known alignment of the first store. 151 unsigned Alignment; 152 153 /// TheStores - The actual stores that make up this range. 154 SmallVector<Instruction*, 16> TheStores; 155 156 bool isProfitableToUseMemset(const DataLayout &DL) const; 157 }; 158 } // end anon namespace 159 160 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const { 161 // If we found more than 4 stores to merge or 16 bytes, use memset. 162 if (TheStores.size() >= 4 || End-Start >= 16) return true; 163 164 // If there is nothing to merge, don't do anything. 165 if (TheStores.size() < 2) return false; 166 167 // If any of the stores are a memset, then it is always good to extend the 168 // memset. 169 for (Instruction *SI : TheStores) 170 if (!isa<StoreInst>(SI)) 171 return true; 172 173 // Assume that the code generator is capable of merging pairs of stores 174 // together if it wants to. 175 if (TheStores.size() == 2) return false; 176 177 // If we have fewer than 8 stores, it can still be worthwhile to do this. 178 // For example, merging 4 i8 stores into an i32 store is useful almost always. 179 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 180 // memset will be split into 2 32-bit stores anyway) and doing so can 181 // pessimize the llvm optimizer. 182 // 183 // Since we don't have perfect knowledge here, make some assumptions: assume 184 // the maximum GPR width is the same size as the largest legal integer 185 // size. If so, check to see whether we will end up actually reducing the 186 // number of stores used. 187 unsigned Bytes = unsigned(End-Start); 188 unsigned MaxIntSize = DL.getLargestLegalIntTypeSize(); 189 if (MaxIntSize == 0) 190 MaxIntSize = 1; 191 unsigned NumPointerStores = Bytes / MaxIntSize; 192 193 // Assume the remaining bytes if any are done a byte at a time. 194 unsigned NumByteStores = Bytes % MaxIntSize; 195 196 // If we will reduce the # stores (according to this heuristic), do the 197 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 198 // etc. 199 return TheStores.size() > NumPointerStores+NumByteStores; 200 } 201 202 203 namespace { 204 class MemsetRanges { 205 /// A sorted list of the memset ranges. 206 SmallVector<MemsetRange, 8> Ranges; 207 typedef SmallVectorImpl<MemsetRange>::iterator range_iterator; 208 const DataLayout &DL; 209 public: 210 MemsetRanges(const DataLayout &DL) : DL(DL) {} 211 212 typedef SmallVectorImpl<MemsetRange>::const_iterator const_iterator; 213 const_iterator begin() const { return Ranges.begin(); } 214 const_iterator end() const { return Ranges.end(); } 215 bool empty() const { return Ranges.empty(); } 216 217 void addInst(int64_t OffsetFromFirst, Instruction *Inst) { 218 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 219 addStore(OffsetFromFirst, SI); 220 else 221 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); 222 } 223 224 void addStore(int64_t OffsetFromFirst, StoreInst *SI) { 225 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); 226 227 addRange(OffsetFromFirst, StoreSize, 228 SI->getPointerOperand(), SI->getAlignment(), SI); 229 } 230 231 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { 232 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 233 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI); 234 } 235 236 void addRange(int64_t Start, int64_t Size, Value *Ptr, 237 unsigned Alignment, Instruction *Inst); 238 239 }; 240 241 } // end anon namespace 242 243 244 /// Add a new store to the MemsetRanges data structure. This adds a 245 /// new range for the specified store at the specified offset, merging into 246 /// existing ranges as appropriate. 247 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, 248 unsigned Alignment, Instruction *Inst) { 249 int64_t End = Start+Size; 250 251 range_iterator I = std::lower_bound(Ranges.begin(), Ranges.end(), Start, 252 [](const MemsetRange &LHS, int64_t RHS) { return LHS.End < RHS; }); 253 254 // We now know that I == E, in which case we didn't find anything to merge 255 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 256 // to insert a new range. Handle this now. 257 if (I == Ranges.end() || End < I->Start) { 258 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 259 R.Start = Start; 260 R.End = End; 261 R.StartPtr = Ptr; 262 R.Alignment = Alignment; 263 R.TheStores.push_back(Inst); 264 return; 265 } 266 267 // This store overlaps with I, add it. 268 I->TheStores.push_back(Inst); 269 270 // At this point, we may have an interval that completely contains our store. 271 // If so, just add it to the interval and return. 272 if (I->Start <= Start && I->End >= End) 273 return; 274 275 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 276 // but is not entirely contained within the range. 277 278 // See if the range extends the start of the range. In this case, it couldn't 279 // possibly cause it to join the prior range, because otherwise we would have 280 // stopped on *it*. 281 if (Start < I->Start) { 282 I->Start = Start; 283 I->StartPtr = Ptr; 284 I->Alignment = Alignment; 285 } 286 287 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 288 // is in or right at the end of I), and that End >= I->Start. Extend I out to 289 // End. 290 if (End > I->End) { 291 I->End = End; 292 range_iterator NextI = I; 293 while (++NextI != Ranges.end() && End >= NextI->Start) { 294 // Merge the range in. 295 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 296 if (NextI->End > I->End) 297 I->End = NextI->End; 298 Ranges.erase(NextI); 299 NextI = I; 300 } 301 } 302 } 303 304 //===----------------------------------------------------------------------===// 305 // MemCpyOpt Pass 306 //===----------------------------------------------------------------------===// 307 308 namespace { 309 class MemCpyOpt : public FunctionPass { 310 MemoryDependenceResults *MD; 311 TargetLibraryInfo *TLI; 312 public: 313 static char ID; // Pass identification, replacement for typeid 314 MemCpyOpt() : FunctionPass(ID) { 315 initializeMemCpyOptPass(*PassRegistry::getPassRegistry()); 316 MD = nullptr; 317 TLI = nullptr; 318 } 319 320 bool runOnFunction(Function &F) override; 321 322 private: 323 // This transformation requires dominator postdominator info 324 void getAnalysisUsage(AnalysisUsage &AU) const override { 325 AU.setPreservesCFG(); 326 AU.addRequired<AssumptionCacheTracker>(); 327 AU.addRequired<DominatorTreeWrapperPass>(); 328 AU.addRequired<MemoryDependenceWrapperPass>(); 329 AU.addRequired<AAResultsWrapperPass>(); 330 AU.addRequired<TargetLibraryInfoWrapperPass>(); 331 AU.addPreserved<GlobalsAAWrapperPass>(); 332 AU.addPreserved<MemoryDependenceWrapperPass>(); 333 } 334 335 // Helper functions 336 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI); 337 bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI); 338 bool processMemCpy(MemCpyInst *M); 339 bool processMemMove(MemMoveInst *M); 340 bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc, 341 uint64_t cpyLen, unsigned cpyAlign, CallInst *C); 342 bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep); 343 bool processMemSetMemCpyDependence(MemCpyInst *M, MemSetInst *MDep); 344 bool performMemCpyToMemSetOptzn(MemCpyInst *M, MemSetInst *MDep); 345 bool processByValArgument(CallSite CS, unsigned ArgNo); 346 Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr, 347 Value *ByteVal); 348 349 bool iterateOnFunction(Function &F); 350 }; 351 352 char MemCpyOpt::ID = 0; 353 } 354 355 /// The public interface to this file... 356 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); } 357 358 INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization", 359 false, false) 360 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 361 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 362 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 363 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 364 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 365 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 366 INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization", 367 false, false) 368 369 /// When scanning forward over instructions, we look for some other patterns to 370 /// fold away. In particular, this looks for stores to neighboring locations of 371 /// memory. If it sees enough consecutive ones, it attempts to merge them 372 /// together into a memcpy/memset. 373 Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst, 374 Value *StartPtr, Value *ByteVal) { 375 const DataLayout &DL = StartInst->getModule()->getDataLayout(); 376 377 // Okay, so we now have a single store that can be splatable. Scan to find 378 // all subsequent stores of the same value to offset from the same pointer. 379 // Join these together into ranges, so we can decide whether contiguous blocks 380 // are stored. 381 MemsetRanges Ranges(DL); 382 383 BasicBlock::iterator BI(StartInst); 384 for (++BI; !isa<TerminatorInst>(BI); ++BI) { 385 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { 386 // If the instruction is readnone, ignore it, otherwise bail out. We 387 // don't even allow readonly here because we don't want something like: 388 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 389 if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) 390 break; 391 continue; 392 } 393 394 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { 395 // If this is a store, see if we can merge it in. 396 if (!NextStore->isSimple()) break; 397 398 // Check to see if this stored value is of the same byte-splattable value. 399 if (ByteVal != isBytewiseValue(NextStore->getOperand(0))) 400 break; 401 402 // Check to see if this store is to a constant offset from the start ptr. 403 int64_t Offset; 404 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, 405 DL)) 406 break; 407 408 Ranges.addStore(Offset, NextStore); 409 } else { 410 MemSetInst *MSI = cast<MemSetInst>(BI); 411 412 if (MSI->isVolatile() || ByteVal != MSI->getValue() || 413 !isa<ConstantInt>(MSI->getLength())) 414 break; 415 416 // Check to see if this store is to a constant offset from the start ptr. 417 int64_t Offset; 418 if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, DL)) 419 break; 420 421 Ranges.addMemSet(Offset, MSI); 422 } 423 } 424 425 // If we have no ranges, then we just had a single store with nothing that 426 // could be merged in. This is a very common case of course. 427 if (Ranges.empty()) 428 return nullptr; 429 430 // If we had at least one store that could be merged in, add the starting 431 // store as well. We try to avoid this unless there is at least something 432 // interesting as a small compile-time optimization. 433 Ranges.addInst(0, StartInst); 434 435 // If we create any memsets, we put it right before the first instruction that 436 // isn't part of the memset block. This ensure that the memset is dominated 437 // by any addressing instruction needed by the start of the block. 438 IRBuilder<> Builder(&*BI); 439 440 // Now that we have full information about ranges, loop over the ranges and 441 // emit memset's for anything big enough to be worthwhile. 442 Instruction *AMemSet = nullptr; 443 for (const MemsetRange &Range : Ranges) { 444 445 if (Range.TheStores.size() == 1) continue; 446 447 // If it is profitable to lower this range to memset, do so now. 448 if (!Range.isProfitableToUseMemset(DL)) 449 continue; 450 451 // Otherwise, we do want to transform this! Create a new memset. 452 // Get the starting pointer of the block. 453 StartPtr = Range.StartPtr; 454 455 // Determine alignment 456 unsigned Alignment = Range.Alignment; 457 if (Alignment == 0) { 458 Type *EltType = 459 cast<PointerType>(StartPtr->getType())->getElementType(); 460 Alignment = DL.getABITypeAlignment(EltType); 461 } 462 463 AMemSet = 464 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment); 465 466 DEBUG(dbgs() << "Replace stores:\n"; 467 for (Instruction *SI : Range.TheStores) 468 dbgs() << *SI << '\n'; 469 dbgs() << "With: " << *AMemSet << '\n'); 470 471 if (!Range.TheStores.empty()) 472 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); 473 474 // Zap all the stores. 475 for (Instruction *SI : Range.TheStores) { 476 MD->removeInstruction(SI); 477 SI->eraseFromParent(); 478 } 479 ++NumMemSetInfer; 480 } 481 482 return AMemSet; 483 } 484 485 static unsigned findCommonAlignment(const DataLayout &DL, const StoreInst *SI, 486 const LoadInst *LI) { 487 unsigned StoreAlign = SI->getAlignment(); 488 if (!StoreAlign) 489 StoreAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType()); 490 unsigned LoadAlign = LI->getAlignment(); 491 if (!LoadAlign) 492 LoadAlign = DL.getABITypeAlignment(LI->getType()); 493 494 return std::min(StoreAlign, LoadAlign); 495 } 496 497 // This method try to lift a store instruction before position P. 498 // It will lift the store and its argument + that anything that 499 // lay alias with these. 500 // The method returns true if it was successful. 501 static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P) { 502 // If the store alias this position, early bail out. 503 MemoryLocation StoreLoc = MemoryLocation::get(SI); 504 if (AA.getModRefInfo(P, StoreLoc) != MRI_NoModRef) 505 return false; 506 507 // Keep track of the arguments of all instruction we plan to lift 508 // so we can make sure to lift them as well if apropriate. 509 DenseSet<Instruction*> Args; 510 if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand())) 511 if (Ptr->getParent() == SI->getParent()) 512 Args.insert(Ptr); 513 514 // Instruction to lift before P. 515 SmallVector<Instruction*, 8> ToLift; 516 517 // Memory locations of lifted instructions. 518 SmallVector<MemoryLocation, 8> MemLocs; 519 MemLocs.push_back(StoreLoc); 520 521 // Lifted callsites. 522 SmallVector<ImmutableCallSite, 8> CallSites; 523 524 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) { 525 auto *C = &*I; 526 527 bool MayAlias = AA.getModRefInfo(C) != MRI_NoModRef; 528 529 bool NeedLift = false; 530 if (Args.erase(C)) 531 NeedLift = true; 532 else if (MayAlias) { 533 NeedLift = std::any_of(MemLocs.begin(), MemLocs.end(), 534 [C, &AA](const MemoryLocation &ML) { 535 return AA.getModRefInfo(C, ML); 536 }); 537 538 if (!NeedLift) 539 NeedLift = std::any_of(CallSites.begin(), CallSites.end(), 540 [C, &AA](const ImmutableCallSite &CS) { 541 return AA.getModRefInfo(C, CS); 542 }); 543 } 544 545 if (!NeedLift) 546 continue; 547 548 if (MayAlias) { 549 if (auto CS = ImmutableCallSite(C)) { 550 // If we can't lift this before P, it's game over. 551 if (AA.getModRefInfo(P, CS) != MRI_NoModRef) 552 return false; 553 554 CallSites.push_back(CS); 555 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) { 556 // If we can't lift this before P, it's game over. 557 auto ML = MemoryLocation::get(C); 558 if (AA.getModRefInfo(P, ML) != MRI_NoModRef) 559 return false; 560 561 MemLocs.push_back(ML); 562 } else 563 // We don't know how to lift this instruction. 564 return false; 565 } 566 567 ToLift.push_back(C); 568 for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k) 569 if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) 570 if (A->getParent() == SI->getParent()) 571 Args.insert(A); 572 } 573 574 // We made it, we need to lift 575 for (auto *I : reverse(ToLift)) { 576 DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n"); 577 I->moveBefore(P); 578 } 579 580 return true; 581 } 582 583 bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 584 if (!SI->isSimple()) return false; 585 586 // Avoid merging nontemporal stores since the resulting 587 // memcpy/memset would not be able to preserve the nontemporal hint. 588 // In theory we could teach how to propagate the !nontemporal metadata to 589 // memset calls. However, that change would force the backend to 590 // conservatively expand !nontemporal memset calls back to sequences of 591 // store instructions (effectively undoing the merging). 592 if (SI->getMetadata(LLVMContext::MD_nontemporal)) 593 return false; 594 595 const DataLayout &DL = SI->getModule()->getDataLayout(); 596 597 // Load to store forwarding can be interpreted as memcpy. 598 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) { 599 if (LI->isSimple() && LI->hasOneUse() && 600 LI->getParent() == SI->getParent()) { 601 602 auto *T = LI->getType(); 603 if (T->isAggregateType()) { 604 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 605 MemoryLocation LoadLoc = MemoryLocation::get(LI); 606 607 // We use alias analysis to check if an instruction may store to 608 // the memory we load from in between the load and the store. If 609 // such an instruction is found, we try to promote there instead 610 // of at the store position. 611 Instruction *P = SI; 612 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) { 613 if (AA.getModRefInfo(&I, LoadLoc) & MRI_Mod) { 614 P = &I; 615 break; 616 } 617 } 618 619 // We found an instruction that may write to the loaded memory. 620 // We can try to promote at this position instead of the store 621 // position if nothing alias the store memory after this and the store 622 // destination is not in the range. 623 if (P && P != SI) { 624 if (!moveUp(AA, SI, P)) 625 P = nullptr; 626 } 627 628 // If a valid insertion position is found, then we can promote 629 // the load/store pair to a memcpy. 630 if (P) { 631 // If we load from memory that may alias the memory we store to, 632 // memmove must be used to preserve semantic. If not, memcpy can 633 // be used. 634 bool UseMemMove = false; 635 if (!AA.isNoAlias(MemoryLocation::get(SI), LoadLoc)) 636 UseMemMove = true; 637 638 unsigned Align = findCommonAlignment(DL, SI, LI); 639 uint64_t Size = DL.getTypeStoreSize(T); 640 641 IRBuilder<> Builder(P); 642 Instruction *M; 643 if (UseMemMove) 644 M = Builder.CreateMemMove(SI->getPointerOperand(), 645 LI->getPointerOperand(), Size, 646 Align, SI->isVolatile()); 647 else 648 M = Builder.CreateMemCpy(SI->getPointerOperand(), 649 LI->getPointerOperand(), Size, 650 Align, SI->isVolatile()); 651 652 DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI 653 << " => " << *M << "\n"); 654 655 MD->removeInstruction(SI); 656 SI->eraseFromParent(); 657 MD->removeInstruction(LI); 658 LI->eraseFromParent(); 659 ++NumMemCpyInstr; 660 661 // Make sure we do not invalidate the iterator. 662 BBI = M->getIterator(); 663 return true; 664 } 665 } 666 667 // Detect cases where we're performing call slot forwarding, but 668 // happen to be using a load-store pair to implement it, rather than 669 // a memcpy. 670 MemDepResult ldep = MD->getDependency(LI); 671 CallInst *C = nullptr; 672 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst())) 673 C = dyn_cast<CallInst>(ldep.getInst()); 674 675 if (C) { 676 // Check that nothing touches the dest of the "copy" between 677 // the call and the store. 678 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 679 MemoryLocation StoreLoc = MemoryLocation::get(SI); 680 for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator(); 681 I != E; --I) { 682 if (AA.getModRefInfo(&*I, StoreLoc) != MRI_NoModRef) { 683 C = nullptr; 684 break; 685 } 686 } 687 } 688 689 if (C) { 690 bool changed = performCallSlotOptzn( 691 LI, SI->getPointerOperand()->stripPointerCasts(), 692 LI->getPointerOperand()->stripPointerCasts(), 693 DL.getTypeStoreSize(SI->getOperand(0)->getType()), 694 findCommonAlignment(DL, SI, LI), C); 695 if (changed) { 696 MD->removeInstruction(SI); 697 SI->eraseFromParent(); 698 MD->removeInstruction(LI); 699 LI->eraseFromParent(); 700 ++NumMemCpyInstr; 701 return true; 702 } 703 } 704 } 705 } 706 707 // There are two cases that are interesting for this code to handle: memcpy 708 // and memset. Right now we only handle memset. 709 710 // Ensure that the value being stored is something that can be memset'able a 711 // byte at a time like "0" or "-1" or any width, as well as things like 712 // 0xA0A0A0A0 and 0.0. 713 auto *V = SI->getOperand(0); 714 if (Value *ByteVal = isBytewiseValue(V)) { 715 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), 716 ByteVal)) { 717 BBI = I->getIterator(); // Don't invalidate iterator. 718 return true; 719 } 720 721 // If we have an aggregate, we try to promote it to memset regardless 722 // of opportunity for merging as it can expose optimization opportunities 723 // in subsequent passes. 724 auto *T = V->getType(); 725 if (T->isAggregateType()) { 726 uint64_t Size = DL.getTypeStoreSize(T); 727 unsigned Align = SI->getAlignment(); 728 if (!Align) 729 Align = DL.getABITypeAlignment(T); 730 IRBuilder<> Builder(SI); 731 auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, 732 Size, Align, SI->isVolatile()); 733 734 DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n"); 735 736 MD->removeInstruction(SI); 737 SI->eraseFromParent(); 738 NumMemSetInfer++; 739 740 // Make sure we do not invalidate the iterator. 741 BBI = M->getIterator(); 742 return true; 743 } 744 } 745 746 return false; 747 } 748 749 bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { 750 // See if there is another memset or store neighboring this memset which 751 // allows us to widen out the memset to do a single larger store. 752 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) 753 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), 754 MSI->getValue())) { 755 BBI = I->getIterator(); // Don't invalidate iterator. 756 return true; 757 } 758 return false; 759 } 760 761 762 /// Takes a memcpy and a call that it depends on, 763 /// and checks for the possibility of a call slot optimization by having 764 /// the call write its result directly into the destination of the memcpy. 765 bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy, 766 Value *cpyDest, Value *cpySrc, 767 uint64_t cpyLen, unsigned cpyAlign, 768 CallInst *C) { 769 // The general transformation to keep in mind is 770 // 771 // call @func(..., src, ...) 772 // memcpy(dest, src, ...) 773 // 774 // -> 775 // 776 // memcpy(dest, src, ...) 777 // call @func(..., dest, ...) 778 // 779 // Since moving the memcpy is technically awkward, we additionally check that 780 // src only holds uninitialized values at the moment of the call, meaning that 781 // the memcpy can be discarded rather than moved. 782 783 // Deliberately get the source and destination with bitcasts stripped away, 784 // because we'll need to do type comparisons based on the underlying type. 785 CallSite CS(C); 786 787 // Require that src be an alloca. This simplifies the reasoning considerably. 788 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 789 if (!srcAlloca) 790 return false; 791 792 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 793 if (!srcArraySize) 794 return false; 795 796 const DataLayout &DL = cpy->getModule()->getDataLayout(); 797 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) * 798 srcArraySize->getZExtValue(); 799 800 if (cpyLen < srcSize) 801 return false; 802 803 // Check that accessing the first srcSize bytes of dest will not cause a 804 // trap. Otherwise the transform is invalid since it might cause a trap 805 // to occur earlier than it otherwise would. 806 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) { 807 // The destination is an alloca. Check it is larger than srcSize. 808 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize()); 809 if (!destArraySize) 810 return false; 811 812 uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) * 813 destArraySize->getZExtValue(); 814 815 if (destSize < srcSize) 816 return false; 817 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) { 818 if (A->getDereferenceableBytes() < srcSize) { 819 // If the destination is an sret parameter then only accesses that are 820 // outside of the returned struct type can trap. 821 if (!A->hasStructRetAttr()) 822 return false; 823 824 Type *StructTy = cast<PointerType>(A->getType())->getElementType(); 825 if (!StructTy->isSized()) { 826 // The call may never return and hence the copy-instruction may never 827 // be executed, and therefore it's not safe to say "the destination 828 // has at least <cpyLen> bytes, as implied by the copy-instruction", 829 return false; 830 } 831 832 uint64_t destSize = DL.getTypeAllocSize(StructTy); 833 if (destSize < srcSize) 834 return false; 835 } 836 } else { 837 return false; 838 } 839 840 // Check that dest points to memory that is at least as aligned as src. 841 unsigned srcAlign = srcAlloca->getAlignment(); 842 if (!srcAlign) 843 srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType()); 844 bool isDestSufficientlyAligned = srcAlign <= cpyAlign; 845 // If dest is not aligned enough and we can't increase its alignment then 846 // bail out. 847 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) 848 return false; 849 850 // Check that src is not accessed except via the call and the memcpy. This 851 // guarantees that it holds only undefined values when passed in (so the final 852 // memcpy can be dropped), that it is not read or written between the call and 853 // the memcpy, and that writing beyond the end of it is undefined. 854 SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(), 855 srcAlloca->user_end()); 856 while (!srcUseList.empty()) { 857 User *U = srcUseList.pop_back_val(); 858 859 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) { 860 for (User *UU : U->users()) 861 srcUseList.push_back(UU); 862 continue; 863 } 864 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) { 865 if (!G->hasAllZeroIndices()) 866 return false; 867 868 for (User *UU : U->users()) 869 srcUseList.push_back(UU); 870 continue; 871 } 872 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U)) 873 if (IT->getIntrinsicID() == Intrinsic::lifetime_start || 874 IT->getIntrinsicID() == Intrinsic::lifetime_end) 875 continue; 876 877 if (U != C && U != cpy) 878 return false; 879 } 880 881 // Check that src isn't captured by the called function since the 882 // transformation can cause aliasing issues in that case. 883 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 884 if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i)) 885 return false; 886 887 // Since we're changing the parameter to the callsite, we need to make sure 888 // that what would be the new parameter dominates the callsite. 889 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 890 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest)) 891 if (!DT.dominates(cpyDestInst, C)) 892 return false; 893 894 // In addition to knowing that the call does not access src in some 895 // unexpected manner, for example via a global, which we deduce from 896 // the use analysis, we also need to know that it does not sneakily 897 // access dest. We rely on AA to figure this out for us. 898 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 899 ModRefInfo MR = AA.getModRefInfo(C, cpyDest, srcSize); 900 // If necessary, perform additional analysis. 901 if (MR != MRI_NoModRef) 902 MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT); 903 if (MR != MRI_NoModRef) 904 return false; 905 906 // All the checks have passed, so do the transformation. 907 bool changedArgument = false; 908 for (unsigned i = 0; i < CS.arg_size(); ++i) 909 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) { 910 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest 911 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 912 cpyDest->getName(), C); 913 changedArgument = true; 914 if (CS.getArgument(i)->getType() == Dest->getType()) 915 CS.setArgument(i, Dest); 916 else 917 CS.setArgument(i, CastInst::CreatePointerCast(Dest, 918 CS.getArgument(i)->getType(), Dest->getName(), C)); 919 } 920 921 if (!changedArgument) 922 return false; 923 924 // If the destination wasn't sufficiently aligned then increase its alignment. 925 if (!isDestSufficientlyAligned) { 926 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!"); 927 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); 928 } 929 930 // Drop any cached information about the call, because we may have changed 931 // its dependence information by changing its parameter. 932 MD->removeInstruction(C); 933 934 // Update AA metadata 935 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be 936 // handled here, but combineMetadata doesn't support them yet 937 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 938 LLVMContext::MD_noalias, 939 LLVMContext::MD_invariant_group}; 940 combineMetadata(C, cpy, KnownIDs); 941 942 // Remove the memcpy. 943 MD->removeInstruction(cpy); 944 ++NumMemCpyInstr; 945 946 return true; 947 } 948 949 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is 950 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can. 951 bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep) { 952 // We can only transforms memcpy's where the dest of one is the source of the 953 // other. 954 if (M->getSource() != MDep->getDest() || MDep->isVolatile()) 955 return false; 956 957 // If dep instruction is reading from our current input, then it is a noop 958 // transfer and substituting the input won't change this instruction. Just 959 // ignore the input and let someone else zap MDep. This handles cases like: 960 // memcpy(a <- a) 961 // memcpy(b <- a) 962 if (M->getSource() == MDep->getSource()) 963 return false; 964 965 // Second, the length of the memcpy's must be the same, or the preceding one 966 // must be larger than the following one. 967 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); 968 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); 969 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) 970 return false; 971 972 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 973 974 // Verify that the copied-from memory doesn't change in between the two 975 // transfers. For example, in: 976 // memcpy(a <- b) 977 // *b = 42; 978 // memcpy(c <- a) 979 // It would be invalid to transform the second memcpy into memcpy(c <- b). 980 // 981 // TODO: If the code between M and MDep is transparent to the destination "c", 982 // then we could still perform the xform by moving M up to the first memcpy. 983 // 984 // NOTE: This is conservative, it will stop on any read from the source loc, 985 // not just the defining memcpy. 986 MemDepResult SourceDep = 987 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false, 988 M->getIterator(), M->getParent()); 989 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 990 return false; 991 992 // If the dest of the second might alias the source of the first, then the 993 // source and dest might overlap. We still want to eliminate the intermediate 994 // value, but we have to generate a memmove instead of memcpy. 995 bool UseMemMove = false; 996 if (!AA.isNoAlias(MemoryLocation::getForDest(M), 997 MemoryLocation::getForSource(MDep))) 998 UseMemMove = true; 999 1000 // If all checks passed, then we can transform M. 1001 1002 // Make sure to use the lesser of the alignment of the source and the dest 1003 // since we're changing where we're reading from, but don't want to increase 1004 // the alignment past what can be read from or written to. 1005 // TODO: Is this worth it if we're creating a less aligned memcpy? For 1006 // example we could be moving from movaps -> movq on x86. 1007 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment()); 1008 1009 IRBuilder<> Builder(M); 1010 if (UseMemMove) 1011 Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(), 1012 Align, M->isVolatile()); 1013 else 1014 Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(), 1015 Align, M->isVolatile()); 1016 1017 // Remove the instruction we're replacing. 1018 MD->removeInstruction(M); 1019 M->eraseFromParent(); 1020 ++NumMemCpyInstr; 1021 return true; 1022 } 1023 1024 /// We've found that the (upward scanning) memory dependence of \p MemCpy is 1025 /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that 1026 /// weren't copied over by \p MemCpy. 1027 /// 1028 /// In other words, transform: 1029 /// \code 1030 /// memset(dst, c, dst_size); 1031 /// memcpy(dst, src, src_size); 1032 /// \endcode 1033 /// into: 1034 /// \code 1035 /// memcpy(dst, src, src_size); 1036 /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); 1037 /// \endcode 1038 bool MemCpyOpt::processMemSetMemCpyDependence(MemCpyInst *MemCpy, 1039 MemSetInst *MemSet) { 1040 // We can only transform memset/memcpy with the same destination. 1041 if (MemSet->getDest() != MemCpy->getDest()) 1042 return false; 1043 1044 // Check that there are no other dependencies on the memset destination. 1045 MemDepResult DstDepInfo = 1046 MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false, 1047 MemCpy->getIterator(), MemCpy->getParent()); 1048 if (DstDepInfo.getInst() != MemSet) 1049 return false; 1050 1051 // Use the same i8* dest as the memcpy, killing the memset dest if different. 1052 Value *Dest = MemCpy->getRawDest(); 1053 Value *DestSize = MemSet->getLength(); 1054 Value *SrcSize = MemCpy->getLength(); 1055 1056 // By default, create an unaligned memset. 1057 unsigned Align = 1; 1058 // If Dest is aligned, and SrcSize is constant, use the minimum alignment 1059 // of the sum. 1060 const unsigned DestAlign = 1061 std::max(MemSet->getAlignment(), MemCpy->getAlignment()); 1062 if (DestAlign > 1) 1063 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize)) 1064 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign); 1065 1066 IRBuilder<> Builder(MemCpy); 1067 1068 // If the sizes have different types, zext the smaller one. 1069 if (DestSize->getType() != SrcSize->getType()) { 1070 if (DestSize->getType()->getIntegerBitWidth() > 1071 SrcSize->getType()->getIntegerBitWidth()) 1072 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType()); 1073 else 1074 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType()); 1075 } 1076 1077 Value *MemsetLen = 1078 Builder.CreateSelect(Builder.CreateICmpULE(DestSize, SrcSize), 1079 ConstantInt::getNullValue(DestSize->getType()), 1080 Builder.CreateSub(DestSize, SrcSize)); 1081 Builder.CreateMemSet(Builder.CreateGEP(Dest, SrcSize), MemSet->getOperand(1), 1082 MemsetLen, Align); 1083 1084 MD->removeInstruction(MemSet); 1085 MemSet->eraseFromParent(); 1086 return true; 1087 } 1088 1089 /// Transform memcpy to memset when its source was just memset. 1090 /// In other words, turn: 1091 /// \code 1092 /// memset(dst1, c, dst1_size); 1093 /// memcpy(dst2, dst1, dst2_size); 1094 /// \endcode 1095 /// into: 1096 /// \code 1097 /// memset(dst1, c, dst1_size); 1098 /// memset(dst2, c, dst2_size); 1099 /// \endcode 1100 /// When dst2_size <= dst1_size. 1101 /// 1102 /// The \p MemCpy must have a Constant length. 1103 bool MemCpyOpt::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, 1104 MemSetInst *MemSet) { 1105 // This only makes sense on memcpy(..., memset(...), ...). 1106 if (MemSet->getRawDest() != MemCpy->getRawSource()) 1107 return false; 1108 1109 ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength()); 1110 ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength()); 1111 // Make sure the memcpy doesn't read any more than what the memset wrote. 1112 // Don't worry about sizes larger than i64. 1113 if (!MemSetSize || CopySize->getZExtValue() > MemSetSize->getZExtValue()) 1114 return false; 1115 1116 IRBuilder<> Builder(MemCpy); 1117 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), 1118 CopySize, MemCpy->getAlignment()); 1119 return true; 1120 } 1121 1122 /// Perform simplification of memcpy's. If we have memcpy A 1123 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite 1124 /// B to be a memcpy from X to Z (or potentially a memmove, depending on 1125 /// circumstances). This allows later passes to remove the first memcpy 1126 /// altogether. 1127 bool MemCpyOpt::processMemCpy(MemCpyInst *M) { 1128 // We can only optimize non-volatile memcpy's. 1129 if (M->isVolatile()) return false; 1130 1131 // If the source and destination of the memcpy are the same, then zap it. 1132 if (M->getSource() == M->getDest()) { 1133 MD->removeInstruction(M); 1134 M->eraseFromParent(); 1135 return false; 1136 } 1137 1138 // If copying from a constant, try to turn the memcpy into a memset. 1139 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource())) 1140 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 1141 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) { 1142 IRBuilder<> Builder(M); 1143 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(), 1144 M->getAlignment(), false); 1145 MD->removeInstruction(M); 1146 M->eraseFromParent(); 1147 ++NumCpyToSet; 1148 return true; 1149 } 1150 1151 MemDepResult DepInfo = MD->getDependency(M); 1152 1153 // Try to turn a partially redundant memset + memcpy into 1154 // memcpy + smaller memset. We don't need the memcpy size for this. 1155 if (DepInfo.isClobber()) 1156 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst())) 1157 if (processMemSetMemCpyDependence(M, MDep)) 1158 return true; 1159 1160 // The optimizations after this point require the memcpy size. 1161 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); 1162 if (!CopySize) return false; 1163 1164 // There are four possible optimizations we can do for memcpy: 1165 // a) memcpy-memcpy xform which exposes redundance for DSE. 1166 // b) call-memcpy xform for return slot optimization. 1167 // c) memcpy from freshly alloca'd space or space that has just started its 1168 // lifetime copies undefined data, and we can therefore eliminate the 1169 // memcpy in favor of the data that was already at the destination. 1170 // d) memcpy from a just-memset'd source can be turned into memset. 1171 if (DepInfo.isClobber()) { 1172 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { 1173 if (performCallSlotOptzn(M, M->getDest(), M->getSource(), 1174 CopySize->getZExtValue(), M->getAlignment(), 1175 C)) { 1176 MD->removeInstruction(M); 1177 M->eraseFromParent(); 1178 return true; 1179 } 1180 } 1181 } 1182 1183 MemoryLocation SrcLoc = MemoryLocation::getForSource(M); 1184 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom( 1185 SrcLoc, true, M->getIterator(), M->getParent()); 1186 1187 if (SrcDepInfo.isClobber()) { 1188 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst())) 1189 return processMemCpyMemCpyDependence(M, MDep); 1190 } else if (SrcDepInfo.isDef()) { 1191 Instruction *I = SrcDepInfo.getInst(); 1192 bool hasUndefContents = false; 1193 1194 if (isa<AllocaInst>(I)) { 1195 hasUndefContents = true; 1196 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1197 if (II->getIntrinsicID() == Intrinsic::lifetime_start) 1198 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0))) 1199 if (LTSize->getZExtValue() >= CopySize->getZExtValue()) 1200 hasUndefContents = true; 1201 } 1202 1203 if (hasUndefContents) { 1204 MD->removeInstruction(M); 1205 M->eraseFromParent(); 1206 ++NumMemCpyInstr; 1207 return true; 1208 } 1209 } 1210 1211 if (SrcDepInfo.isClobber()) 1212 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst())) 1213 if (performMemCpyToMemSetOptzn(M, MDep)) { 1214 MD->removeInstruction(M); 1215 M->eraseFromParent(); 1216 ++NumCpyToSet; 1217 return true; 1218 } 1219 1220 return false; 1221 } 1222 1223 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed 1224 /// not to alias. 1225 bool MemCpyOpt::processMemMove(MemMoveInst *M) { 1226 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 1227 1228 if (!TLI->has(LibFunc::memmove)) 1229 return false; 1230 1231 // See if the pointers alias. 1232 if (!AA.isNoAlias(MemoryLocation::getForDest(M), 1233 MemoryLocation::getForSource(M))) 1234 return false; 1235 1236 DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n"); 1237 1238 // If not, then we know we can transform this. 1239 Type *ArgTys[3] = { M->getRawDest()->getType(), 1240 M->getRawSource()->getType(), 1241 M->getLength()->getType() }; 1242 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(), 1243 Intrinsic::memcpy, ArgTys)); 1244 1245 // MemDep may have over conservative information about this instruction, just 1246 // conservatively flush it from the cache. 1247 MD->removeInstruction(M); 1248 1249 ++NumMoveToCpy; 1250 return true; 1251 } 1252 1253 /// This is called on every byval argument in call sites. 1254 bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) { 1255 const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout(); 1256 // Find out what feeds this byval argument. 1257 Value *ByValArg = CS.getArgument(ArgNo); 1258 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType(); 1259 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy); 1260 MemDepResult DepInfo = MD->getPointerDependencyFrom( 1261 MemoryLocation(ByValArg, ByValSize), true, 1262 CS.getInstruction()->getIterator(), CS.getInstruction()->getParent()); 1263 if (!DepInfo.isClobber()) 1264 return false; 1265 1266 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by 1267 // a memcpy, see if we can byval from the source of the memcpy instead of the 1268 // result. 1269 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); 1270 if (!MDep || MDep->isVolatile() || 1271 ByValArg->stripPointerCasts() != MDep->getDest()) 1272 return false; 1273 1274 // The length of the memcpy must be larger or equal to the size of the byval. 1275 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 1276 if (!C1 || C1->getValue().getZExtValue() < ByValSize) 1277 return false; 1278 1279 // Get the alignment of the byval. If the call doesn't specify the alignment, 1280 // then it is some target specific value that we can't know. 1281 unsigned ByValAlign = CS.getParamAlignment(ArgNo+1); 1282 if (ByValAlign == 0) return false; 1283 1284 // If it is greater than the memcpy, then we check to see if we can force the 1285 // source of the memcpy to the alignment we need. If we fail, we bail out. 1286 AssumptionCache &AC = 1287 getAnalysis<AssumptionCacheTracker>().getAssumptionCache( 1288 *CS->getParent()->getParent()); 1289 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1290 if (MDep->getAlignment() < ByValAlign && 1291 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, 1292 CS.getInstruction(), &AC, &DT) < ByValAlign) 1293 return false; 1294 1295 // Verify that the copied-from memory doesn't change in between the memcpy and 1296 // the byval call. 1297 // memcpy(a <- b) 1298 // *b = 42; 1299 // foo(*a) 1300 // It would be invalid to transform the second memcpy into foo(*b). 1301 // 1302 // NOTE: This is conservative, it will stop on any read from the source loc, 1303 // not just the defining memcpy. 1304 MemDepResult SourceDep = MD->getPointerDependencyFrom( 1305 MemoryLocation::getForSource(MDep), false, 1306 CS.getInstruction()->getIterator(), MDep->getParent()); 1307 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 1308 return false; 1309 1310 Value *TmpCast = MDep->getSource(); 1311 if (MDep->getSource()->getType() != ByValArg->getType()) 1312 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), 1313 "tmpcast", CS.getInstruction()); 1314 1315 DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n" 1316 << " " << *MDep << "\n" 1317 << " " << *CS.getInstruction() << "\n"); 1318 1319 // Otherwise we're good! Update the byval argument. 1320 CS.setArgument(ArgNo, TmpCast); 1321 ++NumMemCpyInstr; 1322 return true; 1323 } 1324 1325 /// Executes one iteration of MemCpyOpt. 1326 bool MemCpyOpt::iterateOnFunction(Function &F) { 1327 bool MadeChange = false; 1328 1329 // Walk all instruction in the function. 1330 for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) { 1331 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) { 1332 // Avoid invalidating the iterator. 1333 Instruction *I = &*BI++; 1334 1335 bool RepeatInstruction = false; 1336 1337 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1338 MadeChange |= processStore(SI, BI); 1339 else if (MemSetInst *M = dyn_cast<MemSetInst>(I)) 1340 RepeatInstruction = processMemSet(M, BI); 1341 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) 1342 RepeatInstruction = processMemCpy(M); 1343 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) 1344 RepeatInstruction = processMemMove(M); 1345 else if (auto CS = CallSite(I)) { 1346 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 1347 if (CS.isByValArgument(i)) 1348 MadeChange |= processByValArgument(CS, i); 1349 } 1350 1351 // Reprocess the instruction if desired. 1352 if (RepeatInstruction) { 1353 if (BI != BB->begin()) --BI; 1354 MadeChange = true; 1355 } 1356 } 1357 } 1358 1359 return MadeChange; 1360 } 1361 1362 /// This is the main transformation entry point for a function. 1363 bool MemCpyOpt::runOnFunction(Function &F) { 1364 if (skipOptnoneFunction(F)) 1365 return false; 1366 1367 bool MadeChange = false; 1368 MD = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep(); 1369 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 1370 1371 // If we don't have at least memset and memcpy, there is little point of doing 1372 // anything here. These are required by a freestanding implementation, so if 1373 // even they are disabled, there is no point in trying hard. 1374 if (!TLI->has(LibFunc::memset) || !TLI->has(LibFunc::memcpy)) 1375 return false; 1376 1377 while (1) { 1378 if (!iterateOnFunction(F)) 1379 break; 1380 MadeChange = true; 1381 } 1382 1383 MD = nullptr; 1384 return MadeChange; 1385 } 1386