1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs various transformations related to eliminating memcpy 11 // calls, or transforming sets of stores into memset's. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Scalar/MemCpyOptimizer.h" 16 #include "llvm/Transforms/Scalar.h" 17 #include "llvm/ADT/DenseSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/ValueTracking.h" 21 #include "llvm/IR/DataLayout.h" 22 #include "llvm/IR/GetElementPtrTypeIterator.h" 23 #include "llvm/IR/GlobalVariable.h" 24 #include "llvm/IR/IRBuilder.h" 25 #include "llvm/Support/Debug.h" 26 #include "llvm/Support/raw_ostream.h" 27 #include "llvm/Transforms/Utils/Local.h" 28 #include <algorithm> 29 using namespace llvm; 30 31 #define DEBUG_TYPE "memcpyopt" 32 33 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 34 STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 35 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 36 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); 37 38 static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, 39 bool &VariableIdxFound, 40 const DataLayout &DL) { 41 // Skip over the first indices. 42 gep_type_iterator GTI = gep_type_begin(GEP); 43 for (unsigned i = 1; i != Idx; ++i, ++GTI) 44 /*skip along*/; 45 46 // Compute the offset implied by the rest of the indices. 47 int64_t Offset = 0; 48 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { 49 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); 50 if (!OpC) 51 return VariableIdxFound = true; 52 if (OpC->isZero()) continue; // No offset. 53 54 // Handle struct indices, which add their field offset to the pointer. 55 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 56 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 57 continue; 58 } 59 60 // Otherwise, we have a sequential type like an array or vector. Multiply 61 // the index by the ElementSize. 62 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); 63 Offset += Size*OpC->getSExtValue(); 64 } 65 66 return Offset; 67 } 68 69 /// Return true if Ptr1 is provably equal to Ptr2 plus a constant offset, and 70 /// return that constant offset. For example, Ptr1 might be &A[42], and Ptr2 71 /// might be &A[40]. In this case offset would be -8. 72 static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, 73 const DataLayout &DL) { 74 Ptr1 = Ptr1->stripPointerCasts(); 75 Ptr2 = Ptr2->stripPointerCasts(); 76 77 // Handle the trivial case first. 78 if (Ptr1 == Ptr2) { 79 Offset = 0; 80 return true; 81 } 82 83 GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1); 84 GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2); 85 86 bool VariableIdxFound = false; 87 88 // If one pointer is a GEP and the other isn't, then see if the GEP is a 89 // constant offset from the base, as in "P" and "gep P, 1". 90 if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) { 91 Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, DL); 92 return !VariableIdxFound; 93 } 94 95 if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) { 96 Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, DL); 97 return !VariableIdxFound; 98 } 99 100 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical 101 // base. After that base, they may have some number of common (and 102 // potentially variable) indices. After that they handle some constant 103 // offset, which determines their offset from each other. At this point, we 104 // handle no other case. 105 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) 106 return false; 107 108 // Skip any common indices and track the GEP types. 109 unsigned Idx = 1; 110 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) 111 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) 112 break; 113 114 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL); 115 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, DL); 116 if (VariableIdxFound) return false; 117 118 Offset = Offset2-Offset1; 119 return true; 120 } 121 122 123 /// Represents a range of memset'd bytes with the ByteVal value. 124 /// This allows us to analyze stores like: 125 /// store 0 -> P+1 126 /// store 0 -> P+0 127 /// store 0 -> P+3 128 /// store 0 -> P+2 129 /// which sometimes happens with stores to arrays of structs etc. When we see 130 /// the first store, we make a range [1, 2). The second store extends the range 131 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 132 /// two ranges into [0, 3) which is memset'able. 133 namespace { 134 struct MemsetRange { 135 // Start/End - A semi range that describes the span that this range covers. 136 // The range is closed at the start and open at the end: [Start, End). 137 int64_t Start, End; 138 139 /// StartPtr - The getelementptr instruction that points to the start of the 140 /// range. 141 Value *StartPtr; 142 143 /// Alignment - The known alignment of the first store. 144 unsigned Alignment; 145 146 /// TheStores - The actual stores that make up this range. 147 SmallVector<Instruction*, 16> TheStores; 148 149 bool isProfitableToUseMemset(const DataLayout &DL) const; 150 }; 151 } // end anon namespace 152 153 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const { 154 // If we found more than 4 stores to merge or 16 bytes, use memset. 155 if (TheStores.size() >= 4 || End-Start >= 16) return true; 156 157 // If there is nothing to merge, don't do anything. 158 if (TheStores.size() < 2) return false; 159 160 // If any of the stores are a memset, then it is always good to extend the 161 // memset. 162 for (Instruction *SI : TheStores) 163 if (!isa<StoreInst>(SI)) 164 return true; 165 166 // Assume that the code generator is capable of merging pairs of stores 167 // together if it wants to. 168 if (TheStores.size() == 2) return false; 169 170 // If we have fewer than 8 stores, it can still be worthwhile to do this. 171 // For example, merging 4 i8 stores into an i32 store is useful almost always. 172 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 173 // memset will be split into 2 32-bit stores anyway) and doing so can 174 // pessimize the llvm optimizer. 175 // 176 // Since we don't have perfect knowledge here, make some assumptions: assume 177 // the maximum GPR width is the same size as the largest legal integer 178 // size. If so, check to see whether we will end up actually reducing the 179 // number of stores used. 180 unsigned Bytes = unsigned(End-Start); 181 unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8; 182 if (MaxIntSize == 0) 183 MaxIntSize = 1; 184 unsigned NumPointerStores = Bytes / MaxIntSize; 185 186 // Assume the remaining bytes if any are done a byte at a time. 187 unsigned NumByteStores = Bytes % MaxIntSize; 188 189 // If we will reduce the # stores (according to this heuristic), do the 190 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 191 // etc. 192 return TheStores.size() > NumPointerStores+NumByteStores; 193 } 194 195 196 namespace { 197 class MemsetRanges { 198 /// A sorted list of the memset ranges. 199 SmallVector<MemsetRange, 8> Ranges; 200 typedef SmallVectorImpl<MemsetRange>::iterator range_iterator; 201 const DataLayout &DL; 202 public: 203 MemsetRanges(const DataLayout &DL) : DL(DL) {} 204 205 typedef SmallVectorImpl<MemsetRange>::const_iterator const_iterator; 206 const_iterator begin() const { return Ranges.begin(); } 207 const_iterator end() const { return Ranges.end(); } 208 bool empty() const { return Ranges.empty(); } 209 210 void addInst(int64_t OffsetFromFirst, Instruction *Inst) { 211 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 212 addStore(OffsetFromFirst, SI); 213 else 214 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); 215 } 216 217 void addStore(int64_t OffsetFromFirst, StoreInst *SI) { 218 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); 219 220 addRange(OffsetFromFirst, StoreSize, 221 SI->getPointerOperand(), SI->getAlignment(), SI); 222 } 223 224 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { 225 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 226 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI); 227 } 228 229 void addRange(int64_t Start, int64_t Size, Value *Ptr, 230 unsigned Alignment, Instruction *Inst); 231 232 }; 233 234 } // end anon namespace 235 236 237 /// Add a new store to the MemsetRanges data structure. This adds a 238 /// new range for the specified store at the specified offset, merging into 239 /// existing ranges as appropriate. 240 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, 241 unsigned Alignment, Instruction *Inst) { 242 int64_t End = Start+Size; 243 244 range_iterator I = std::lower_bound(Ranges.begin(), Ranges.end(), Start, 245 [](const MemsetRange &LHS, int64_t RHS) { return LHS.End < RHS; }); 246 247 // We now know that I == E, in which case we didn't find anything to merge 248 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 249 // to insert a new range. Handle this now. 250 if (I == Ranges.end() || End < I->Start) { 251 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 252 R.Start = Start; 253 R.End = End; 254 R.StartPtr = Ptr; 255 R.Alignment = Alignment; 256 R.TheStores.push_back(Inst); 257 return; 258 } 259 260 // This store overlaps with I, add it. 261 I->TheStores.push_back(Inst); 262 263 // At this point, we may have an interval that completely contains our store. 264 // If so, just add it to the interval and return. 265 if (I->Start <= Start && I->End >= End) 266 return; 267 268 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 269 // but is not entirely contained within the range. 270 271 // See if the range extends the start of the range. In this case, it couldn't 272 // possibly cause it to join the prior range, because otherwise we would have 273 // stopped on *it*. 274 if (Start < I->Start) { 275 I->Start = Start; 276 I->StartPtr = Ptr; 277 I->Alignment = Alignment; 278 } 279 280 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 281 // is in or right at the end of I), and that End >= I->Start. Extend I out to 282 // End. 283 if (End > I->End) { 284 I->End = End; 285 range_iterator NextI = I; 286 while (++NextI != Ranges.end() && End >= NextI->Start) { 287 // Merge the range in. 288 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 289 if (NextI->End > I->End) 290 I->End = NextI->End; 291 Ranges.erase(NextI); 292 NextI = I; 293 } 294 } 295 } 296 297 //===----------------------------------------------------------------------===// 298 // MemCpyOptLegacyPass Pass 299 //===----------------------------------------------------------------------===// 300 301 namespace { 302 class MemCpyOptLegacyPass : public FunctionPass { 303 MemCpyOptPass Impl; 304 public: 305 static char ID; // Pass identification, replacement for typeid 306 MemCpyOptLegacyPass() : FunctionPass(ID) { 307 initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry()); 308 } 309 310 bool runOnFunction(Function &F) override; 311 312 private: 313 // This transformation requires dominator postdominator info 314 void getAnalysisUsage(AnalysisUsage &AU) const override { 315 AU.setPreservesCFG(); 316 AU.addRequired<AssumptionCacheTracker>(); 317 AU.addRequired<DominatorTreeWrapperPass>(); 318 AU.addRequired<MemoryDependenceWrapperPass>(); 319 AU.addRequired<AAResultsWrapperPass>(); 320 AU.addRequired<TargetLibraryInfoWrapperPass>(); 321 AU.addPreserved<GlobalsAAWrapperPass>(); 322 AU.addPreserved<MemoryDependenceWrapperPass>(); 323 } 324 325 // Helper functions 326 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI); 327 bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI); 328 bool processMemCpy(MemCpyInst *M); 329 bool processMemMove(MemMoveInst *M); 330 bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc, 331 uint64_t cpyLen, unsigned cpyAlign, CallInst *C); 332 bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep); 333 bool processMemSetMemCpyDependence(MemCpyInst *M, MemSetInst *MDep); 334 bool performMemCpyToMemSetOptzn(MemCpyInst *M, MemSetInst *MDep); 335 bool processByValArgument(CallSite CS, unsigned ArgNo); 336 Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr, 337 Value *ByteVal); 338 339 bool iterateOnFunction(Function &F); 340 }; 341 342 char MemCpyOptLegacyPass::ID = 0; 343 } 344 345 /// The public interface to this file... 346 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); } 347 348 INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", 349 false, false) 350 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 351 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 352 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 353 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 354 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 355 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 356 INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", 357 false, false) 358 359 /// When scanning forward over instructions, we look for some other patterns to 360 /// fold away. In particular, this looks for stores to neighboring locations of 361 /// memory. If it sees enough consecutive ones, it attempts to merge them 362 /// together into a memcpy/memset. 363 Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst, 364 Value *StartPtr, 365 Value *ByteVal) { 366 const DataLayout &DL = StartInst->getModule()->getDataLayout(); 367 368 // Okay, so we now have a single store that can be splatable. Scan to find 369 // all subsequent stores of the same value to offset from the same pointer. 370 // Join these together into ranges, so we can decide whether contiguous blocks 371 // are stored. 372 MemsetRanges Ranges(DL); 373 374 BasicBlock::iterator BI(StartInst); 375 for (++BI; !isa<TerminatorInst>(BI); ++BI) { 376 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { 377 // If the instruction is readnone, ignore it, otherwise bail out. We 378 // don't even allow readonly here because we don't want something like: 379 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 380 if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) 381 break; 382 continue; 383 } 384 385 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { 386 // If this is a store, see if we can merge it in. 387 if (!NextStore->isSimple()) break; 388 389 // Check to see if this stored value is of the same byte-splattable value. 390 if (ByteVal != isBytewiseValue(NextStore->getOperand(0))) 391 break; 392 393 // Check to see if this store is to a constant offset from the start ptr. 394 int64_t Offset; 395 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, 396 DL)) 397 break; 398 399 Ranges.addStore(Offset, NextStore); 400 } else { 401 MemSetInst *MSI = cast<MemSetInst>(BI); 402 403 if (MSI->isVolatile() || ByteVal != MSI->getValue() || 404 !isa<ConstantInt>(MSI->getLength())) 405 break; 406 407 // Check to see if this store is to a constant offset from the start ptr. 408 int64_t Offset; 409 if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, DL)) 410 break; 411 412 Ranges.addMemSet(Offset, MSI); 413 } 414 } 415 416 // If we have no ranges, then we just had a single store with nothing that 417 // could be merged in. This is a very common case of course. 418 if (Ranges.empty()) 419 return nullptr; 420 421 // If we had at least one store that could be merged in, add the starting 422 // store as well. We try to avoid this unless there is at least something 423 // interesting as a small compile-time optimization. 424 Ranges.addInst(0, StartInst); 425 426 // If we create any memsets, we put it right before the first instruction that 427 // isn't part of the memset block. This ensure that the memset is dominated 428 // by any addressing instruction needed by the start of the block. 429 IRBuilder<> Builder(&*BI); 430 431 // Now that we have full information about ranges, loop over the ranges and 432 // emit memset's for anything big enough to be worthwhile. 433 Instruction *AMemSet = nullptr; 434 for (const MemsetRange &Range : Ranges) { 435 436 if (Range.TheStores.size() == 1) continue; 437 438 // If it is profitable to lower this range to memset, do so now. 439 if (!Range.isProfitableToUseMemset(DL)) 440 continue; 441 442 // Otherwise, we do want to transform this! Create a new memset. 443 // Get the starting pointer of the block. 444 StartPtr = Range.StartPtr; 445 446 // Determine alignment 447 unsigned Alignment = Range.Alignment; 448 if (Alignment == 0) { 449 Type *EltType = 450 cast<PointerType>(StartPtr->getType())->getElementType(); 451 Alignment = DL.getABITypeAlignment(EltType); 452 } 453 454 AMemSet = 455 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment); 456 457 DEBUG(dbgs() << "Replace stores:\n"; 458 for (Instruction *SI : Range.TheStores) 459 dbgs() << *SI << '\n'; 460 dbgs() << "With: " << *AMemSet << '\n'); 461 462 if (!Range.TheStores.empty()) 463 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); 464 465 // Zap all the stores. 466 for (Instruction *SI : Range.TheStores) { 467 MD->removeInstruction(SI); 468 SI->eraseFromParent(); 469 } 470 ++NumMemSetInfer; 471 } 472 473 return AMemSet; 474 } 475 476 static unsigned findCommonAlignment(const DataLayout &DL, const StoreInst *SI, 477 const LoadInst *LI) { 478 unsigned StoreAlign = SI->getAlignment(); 479 if (!StoreAlign) 480 StoreAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType()); 481 unsigned LoadAlign = LI->getAlignment(); 482 if (!LoadAlign) 483 LoadAlign = DL.getABITypeAlignment(LI->getType()); 484 485 return std::min(StoreAlign, LoadAlign); 486 } 487 488 // This method try to lift a store instruction before position P. 489 // It will lift the store and its argument + that anything that 490 // may alias with these. 491 // The method returns true if it was successful. 492 static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P) { 493 // If the store alias this position, early bail out. 494 MemoryLocation StoreLoc = MemoryLocation::get(SI); 495 if (AA.getModRefInfo(P, StoreLoc) != MRI_NoModRef) 496 return false; 497 498 // Keep track of the arguments of all instruction we plan to lift 499 // so we can make sure to lift them as well if apropriate. 500 DenseSet<Instruction*> Args; 501 if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand())) 502 if (Ptr->getParent() == SI->getParent()) 503 Args.insert(Ptr); 504 505 // Instruction to lift before P. 506 SmallVector<Instruction*, 8> ToLift; 507 508 // Memory locations of lifted instructions. 509 SmallVector<MemoryLocation, 8> MemLocs; 510 MemLocs.push_back(StoreLoc); 511 512 // Lifted callsites. 513 SmallVector<ImmutableCallSite, 8> CallSites; 514 515 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) { 516 auto *C = &*I; 517 518 bool MayAlias = AA.getModRefInfo(C) != MRI_NoModRef; 519 520 bool NeedLift = false; 521 if (Args.erase(C)) 522 NeedLift = true; 523 else if (MayAlias) { 524 NeedLift = any_of(MemLocs, [C, &AA](const MemoryLocation &ML) { 525 return AA.getModRefInfo(C, ML); 526 }); 527 528 if (!NeedLift) 529 NeedLift = any_of(CallSites, [C, &AA](const ImmutableCallSite &CS) { 530 return AA.getModRefInfo(C, CS); 531 }); 532 } 533 534 if (!NeedLift) 535 continue; 536 537 if (MayAlias) { 538 if (auto CS = ImmutableCallSite(C)) { 539 // If we can't lift this before P, it's game over. 540 if (AA.getModRefInfo(P, CS) != MRI_NoModRef) 541 return false; 542 543 CallSites.push_back(CS); 544 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) { 545 // If we can't lift this before P, it's game over. 546 auto ML = MemoryLocation::get(C); 547 if (AA.getModRefInfo(P, ML) != MRI_NoModRef) 548 return false; 549 550 MemLocs.push_back(ML); 551 } else 552 // We don't know how to lift this instruction. 553 return false; 554 } 555 556 ToLift.push_back(C); 557 for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k) 558 if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) 559 if (A->getParent() == SI->getParent()) 560 Args.insert(A); 561 } 562 563 // We made it, we need to lift 564 for (auto *I : reverse(ToLift)) { 565 DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n"); 566 I->moveBefore(P); 567 } 568 569 return true; 570 } 571 572 bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 573 if (!SI->isSimple()) return false; 574 575 // Avoid merging nontemporal stores since the resulting 576 // memcpy/memset would not be able to preserve the nontemporal hint. 577 // In theory we could teach how to propagate the !nontemporal metadata to 578 // memset calls. However, that change would force the backend to 579 // conservatively expand !nontemporal memset calls back to sequences of 580 // store instructions (effectively undoing the merging). 581 if (SI->getMetadata(LLVMContext::MD_nontemporal)) 582 return false; 583 584 const DataLayout &DL = SI->getModule()->getDataLayout(); 585 586 // Load to store forwarding can be interpreted as memcpy. 587 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) { 588 if (LI->isSimple() && LI->hasOneUse() && 589 LI->getParent() == SI->getParent()) { 590 591 auto *T = LI->getType(); 592 if (T->isAggregateType()) { 593 AliasAnalysis &AA = LookupAliasAnalysis(); 594 MemoryLocation LoadLoc = MemoryLocation::get(LI); 595 596 // We use alias analysis to check if an instruction may store to 597 // the memory we load from in between the load and the store. If 598 // such an instruction is found, we try to promote there instead 599 // of at the store position. 600 Instruction *P = SI; 601 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) { 602 if (AA.getModRefInfo(&I, LoadLoc) & MRI_Mod) { 603 P = &I; 604 break; 605 } 606 } 607 608 // We found an instruction that may write to the loaded memory. 609 // We can try to promote at this position instead of the store 610 // position if nothing alias the store memory after this and the store 611 // destination is not in the range. 612 if (P && P != SI) { 613 if (!moveUp(AA, SI, P)) 614 P = nullptr; 615 } 616 617 // If a valid insertion position is found, then we can promote 618 // the load/store pair to a memcpy. 619 if (P) { 620 // If we load from memory that may alias the memory we store to, 621 // memmove must be used to preserve semantic. If not, memcpy can 622 // be used. 623 bool UseMemMove = false; 624 if (!AA.isNoAlias(MemoryLocation::get(SI), LoadLoc)) 625 UseMemMove = true; 626 627 unsigned Align = findCommonAlignment(DL, SI, LI); 628 uint64_t Size = DL.getTypeStoreSize(T); 629 630 IRBuilder<> Builder(P); 631 Instruction *M; 632 if (UseMemMove) 633 M = Builder.CreateMemMove(SI->getPointerOperand(), 634 LI->getPointerOperand(), Size, 635 Align, SI->isVolatile()); 636 else 637 M = Builder.CreateMemCpy(SI->getPointerOperand(), 638 LI->getPointerOperand(), Size, 639 Align, SI->isVolatile()); 640 641 DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI 642 << " => " << *M << "\n"); 643 644 MD->removeInstruction(SI); 645 SI->eraseFromParent(); 646 MD->removeInstruction(LI); 647 LI->eraseFromParent(); 648 ++NumMemCpyInstr; 649 650 // Make sure we do not invalidate the iterator. 651 BBI = M->getIterator(); 652 return true; 653 } 654 } 655 656 // Detect cases where we're performing call slot forwarding, but 657 // happen to be using a load-store pair to implement it, rather than 658 // a memcpy. 659 MemDepResult ldep = MD->getDependency(LI); 660 CallInst *C = nullptr; 661 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst())) 662 C = dyn_cast<CallInst>(ldep.getInst()); 663 664 if (C) { 665 // Check that nothing touches the dest of the "copy" between 666 // the call and the store. 667 Value *CpyDest = SI->getPointerOperand()->stripPointerCasts(); 668 bool CpyDestIsLocal = isa<AllocaInst>(CpyDest); 669 AliasAnalysis &AA = LookupAliasAnalysis(); 670 MemoryLocation StoreLoc = MemoryLocation::get(SI); 671 for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator(); 672 I != E; --I) { 673 if (AA.getModRefInfo(&*I, StoreLoc) != MRI_NoModRef) { 674 C = nullptr; 675 break; 676 } 677 // The store to dest may never happen if an exception can be thrown 678 // between the load and the store. 679 if (I->mayThrow() && !CpyDestIsLocal) { 680 C = nullptr; 681 break; 682 } 683 } 684 } 685 686 if (C) { 687 bool changed = performCallSlotOptzn( 688 LI, SI->getPointerOperand()->stripPointerCasts(), 689 LI->getPointerOperand()->stripPointerCasts(), 690 DL.getTypeStoreSize(SI->getOperand(0)->getType()), 691 findCommonAlignment(DL, SI, LI), C); 692 if (changed) { 693 MD->removeInstruction(SI); 694 SI->eraseFromParent(); 695 MD->removeInstruction(LI); 696 LI->eraseFromParent(); 697 ++NumMemCpyInstr; 698 return true; 699 } 700 } 701 } 702 } 703 704 // There are two cases that are interesting for this code to handle: memcpy 705 // and memset. Right now we only handle memset. 706 707 // Ensure that the value being stored is something that can be memset'able a 708 // byte at a time like "0" or "-1" or any width, as well as things like 709 // 0xA0A0A0A0 and 0.0. 710 auto *V = SI->getOperand(0); 711 if (Value *ByteVal = isBytewiseValue(V)) { 712 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), 713 ByteVal)) { 714 BBI = I->getIterator(); // Don't invalidate iterator. 715 return true; 716 } 717 718 // If we have an aggregate, we try to promote it to memset regardless 719 // of opportunity for merging as it can expose optimization opportunities 720 // in subsequent passes. 721 auto *T = V->getType(); 722 if (T->isAggregateType()) { 723 uint64_t Size = DL.getTypeStoreSize(T); 724 unsigned Align = SI->getAlignment(); 725 if (!Align) 726 Align = DL.getABITypeAlignment(T); 727 IRBuilder<> Builder(SI); 728 auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, 729 Size, Align, SI->isVolatile()); 730 731 DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n"); 732 733 MD->removeInstruction(SI); 734 SI->eraseFromParent(); 735 NumMemSetInfer++; 736 737 // Make sure we do not invalidate the iterator. 738 BBI = M->getIterator(); 739 return true; 740 } 741 } 742 743 return false; 744 } 745 746 bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { 747 // See if there is another memset or store neighboring this memset which 748 // allows us to widen out the memset to do a single larger store. 749 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) 750 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), 751 MSI->getValue())) { 752 BBI = I->getIterator(); // Don't invalidate iterator. 753 return true; 754 } 755 return false; 756 } 757 758 759 /// Takes a memcpy and a call that it depends on, 760 /// and checks for the possibility of a call slot optimization by having 761 /// the call write its result directly into the destination of the memcpy. 762 bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpy, Value *cpyDest, 763 Value *cpySrc, uint64_t cpyLen, 764 unsigned cpyAlign, CallInst *C) { 765 // The general transformation to keep in mind is 766 // 767 // call @func(..., src, ...) 768 // memcpy(dest, src, ...) 769 // 770 // -> 771 // 772 // memcpy(dest, src, ...) 773 // call @func(..., dest, ...) 774 // 775 // Since moving the memcpy is technically awkward, we additionally check that 776 // src only holds uninitialized values at the moment of the call, meaning that 777 // the memcpy can be discarded rather than moved. 778 779 // Lifetime marks shouldn't be operated on. 780 if (Function *F = C->getCalledFunction()) 781 if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start) 782 return false; 783 784 // Deliberately get the source and destination with bitcasts stripped away, 785 // because we'll need to do type comparisons based on the underlying type. 786 CallSite CS(C); 787 788 // Require that src be an alloca. This simplifies the reasoning considerably. 789 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 790 if (!srcAlloca) 791 return false; 792 793 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 794 if (!srcArraySize) 795 return false; 796 797 const DataLayout &DL = cpy->getModule()->getDataLayout(); 798 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) * 799 srcArraySize->getZExtValue(); 800 801 if (cpyLen < srcSize) 802 return false; 803 804 // Check that accessing the first srcSize bytes of dest will not cause a 805 // trap. Otherwise the transform is invalid since it might cause a trap 806 // to occur earlier than it otherwise would. 807 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) { 808 // The destination is an alloca. Check it is larger than srcSize. 809 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize()); 810 if (!destArraySize) 811 return false; 812 813 uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) * 814 destArraySize->getZExtValue(); 815 816 if (destSize < srcSize) 817 return false; 818 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) { 819 // The store to dest may never happen if the call can throw. 820 if (C->mayThrow()) 821 return false; 822 823 if (A->getDereferenceableBytes() < srcSize) { 824 // If the destination is an sret parameter then only accesses that are 825 // outside of the returned struct type can trap. 826 if (!A->hasStructRetAttr()) 827 return false; 828 829 Type *StructTy = cast<PointerType>(A->getType())->getElementType(); 830 if (!StructTy->isSized()) { 831 // The call may never return and hence the copy-instruction may never 832 // be executed, and therefore it's not safe to say "the destination 833 // has at least <cpyLen> bytes, as implied by the copy-instruction", 834 return false; 835 } 836 837 uint64_t destSize = DL.getTypeAllocSize(StructTy); 838 if (destSize < srcSize) 839 return false; 840 } 841 } else { 842 return false; 843 } 844 845 // Check that dest points to memory that is at least as aligned as src. 846 unsigned srcAlign = srcAlloca->getAlignment(); 847 if (!srcAlign) 848 srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType()); 849 bool isDestSufficientlyAligned = srcAlign <= cpyAlign; 850 // If dest is not aligned enough and we can't increase its alignment then 851 // bail out. 852 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) 853 return false; 854 855 // Check that src is not accessed except via the call and the memcpy. This 856 // guarantees that it holds only undefined values when passed in (so the final 857 // memcpy can be dropped), that it is not read or written between the call and 858 // the memcpy, and that writing beyond the end of it is undefined. 859 SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(), 860 srcAlloca->user_end()); 861 while (!srcUseList.empty()) { 862 User *U = srcUseList.pop_back_val(); 863 864 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) { 865 for (User *UU : U->users()) 866 srcUseList.push_back(UU); 867 continue; 868 } 869 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) { 870 if (!G->hasAllZeroIndices()) 871 return false; 872 873 for (User *UU : U->users()) 874 srcUseList.push_back(UU); 875 continue; 876 } 877 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U)) 878 if (IT->getIntrinsicID() == Intrinsic::lifetime_start || 879 IT->getIntrinsicID() == Intrinsic::lifetime_end) 880 continue; 881 882 if (U != C && U != cpy) 883 return false; 884 } 885 886 // Check that src isn't captured by the called function since the 887 // transformation can cause aliasing issues in that case. 888 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 889 if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i)) 890 return false; 891 892 // Since we're changing the parameter to the callsite, we need to make sure 893 // that what would be the new parameter dominates the callsite. 894 DominatorTree &DT = LookupDomTree(); 895 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest)) 896 if (!DT.dominates(cpyDestInst, C)) 897 return false; 898 899 // In addition to knowing that the call does not access src in some 900 // unexpected manner, for example via a global, which we deduce from 901 // the use analysis, we also need to know that it does not sneakily 902 // access dest. We rely on AA to figure this out for us. 903 AliasAnalysis &AA = LookupAliasAnalysis(); 904 ModRefInfo MR = AA.getModRefInfo(C, cpyDest, srcSize); 905 // If necessary, perform additional analysis. 906 if (MR != MRI_NoModRef) 907 MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT); 908 if (MR != MRI_NoModRef) 909 return false; 910 911 // All the checks have passed, so do the transformation. 912 bool changedArgument = false; 913 for (unsigned i = 0; i < CS.arg_size(); ++i) 914 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) { 915 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest 916 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 917 cpyDest->getName(), C); 918 changedArgument = true; 919 if (CS.getArgument(i)->getType() == Dest->getType()) 920 CS.setArgument(i, Dest); 921 else 922 CS.setArgument(i, CastInst::CreatePointerCast(Dest, 923 CS.getArgument(i)->getType(), Dest->getName(), C)); 924 } 925 926 if (!changedArgument) 927 return false; 928 929 // If the destination wasn't sufficiently aligned then increase its alignment. 930 if (!isDestSufficientlyAligned) { 931 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!"); 932 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); 933 } 934 935 // Drop any cached information about the call, because we may have changed 936 // its dependence information by changing its parameter. 937 MD->removeInstruction(C); 938 939 // Update AA metadata 940 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be 941 // handled here, but combineMetadata doesn't support them yet 942 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 943 LLVMContext::MD_noalias, 944 LLVMContext::MD_invariant_group}; 945 combineMetadata(C, cpy, KnownIDs); 946 947 // Remove the memcpy. 948 MD->removeInstruction(cpy); 949 ++NumMemCpyInstr; 950 951 return true; 952 } 953 954 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is 955 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can. 956 bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M, 957 MemCpyInst *MDep) { 958 // We can only transforms memcpy's where the dest of one is the source of the 959 // other. 960 if (M->getSource() != MDep->getDest() || MDep->isVolatile()) 961 return false; 962 963 // If dep instruction is reading from our current input, then it is a noop 964 // transfer and substituting the input won't change this instruction. Just 965 // ignore the input and let someone else zap MDep. This handles cases like: 966 // memcpy(a <- a) 967 // memcpy(b <- a) 968 if (M->getSource() == MDep->getSource()) 969 return false; 970 971 // Second, the length of the memcpy's must be the same, or the preceding one 972 // must be larger than the following one. 973 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); 974 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); 975 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) 976 return false; 977 978 AliasAnalysis &AA = LookupAliasAnalysis(); 979 980 // Verify that the copied-from memory doesn't change in between the two 981 // transfers. For example, in: 982 // memcpy(a <- b) 983 // *b = 42; 984 // memcpy(c <- a) 985 // It would be invalid to transform the second memcpy into memcpy(c <- b). 986 // 987 // TODO: If the code between M and MDep is transparent to the destination "c", 988 // then we could still perform the xform by moving M up to the first memcpy. 989 // 990 // NOTE: This is conservative, it will stop on any read from the source loc, 991 // not just the defining memcpy. 992 MemDepResult SourceDep = 993 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false, 994 M->getIterator(), M->getParent()); 995 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 996 return false; 997 998 // If the dest of the second might alias the source of the first, then the 999 // source and dest might overlap. We still want to eliminate the intermediate 1000 // value, but we have to generate a memmove instead of memcpy. 1001 bool UseMemMove = false; 1002 if (!AA.isNoAlias(MemoryLocation::getForDest(M), 1003 MemoryLocation::getForSource(MDep))) 1004 UseMemMove = true; 1005 1006 // If all checks passed, then we can transform M. 1007 1008 // Make sure to use the lesser of the alignment of the source and the dest 1009 // since we're changing where we're reading from, but don't want to increase 1010 // the alignment past what can be read from or written to. 1011 // TODO: Is this worth it if we're creating a less aligned memcpy? For 1012 // example we could be moving from movaps -> movq on x86. 1013 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment()); 1014 1015 IRBuilder<> Builder(M); 1016 if (UseMemMove) 1017 Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(), 1018 Align, M->isVolatile()); 1019 else 1020 Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(), 1021 Align, M->isVolatile()); 1022 1023 // Remove the instruction we're replacing. 1024 MD->removeInstruction(M); 1025 M->eraseFromParent(); 1026 ++NumMemCpyInstr; 1027 return true; 1028 } 1029 1030 /// We've found that the (upward scanning) memory dependence of \p MemCpy is 1031 /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that 1032 /// weren't copied over by \p MemCpy. 1033 /// 1034 /// In other words, transform: 1035 /// \code 1036 /// memset(dst, c, dst_size); 1037 /// memcpy(dst, src, src_size); 1038 /// \endcode 1039 /// into: 1040 /// \code 1041 /// memcpy(dst, src, src_size); 1042 /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); 1043 /// \endcode 1044 bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy, 1045 MemSetInst *MemSet) { 1046 // We can only transform memset/memcpy with the same destination. 1047 if (MemSet->getDest() != MemCpy->getDest()) 1048 return false; 1049 1050 // Check that there are no other dependencies on the memset destination. 1051 MemDepResult DstDepInfo = 1052 MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false, 1053 MemCpy->getIterator(), MemCpy->getParent()); 1054 if (DstDepInfo.getInst() != MemSet) 1055 return false; 1056 1057 // Use the same i8* dest as the memcpy, killing the memset dest if different. 1058 Value *Dest = MemCpy->getRawDest(); 1059 Value *DestSize = MemSet->getLength(); 1060 Value *SrcSize = MemCpy->getLength(); 1061 1062 // By default, create an unaligned memset. 1063 unsigned Align = 1; 1064 // If Dest is aligned, and SrcSize is constant, use the minimum alignment 1065 // of the sum. 1066 const unsigned DestAlign = 1067 std::max(MemSet->getAlignment(), MemCpy->getAlignment()); 1068 if (DestAlign > 1) 1069 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize)) 1070 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign); 1071 1072 IRBuilder<> Builder(MemCpy); 1073 1074 // If the sizes have different types, zext the smaller one. 1075 if (DestSize->getType() != SrcSize->getType()) { 1076 if (DestSize->getType()->getIntegerBitWidth() > 1077 SrcSize->getType()->getIntegerBitWidth()) 1078 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType()); 1079 else 1080 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType()); 1081 } 1082 1083 Value *MemsetLen = 1084 Builder.CreateSelect(Builder.CreateICmpULE(DestSize, SrcSize), 1085 ConstantInt::getNullValue(DestSize->getType()), 1086 Builder.CreateSub(DestSize, SrcSize)); 1087 Builder.CreateMemSet(Builder.CreateGEP(Dest, SrcSize), MemSet->getOperand(1), 1088 MemsetLen, Align); 1089 1090 MD->removeInstruction(MemSet); 1091 MemSet->eraseFromParent(); 1092 return true; 1093 } 1094 1095 /// Transform memcpy to memset when its source was just memset. 1096 /// In other words, turn: 1097 /// \code 1098 /// memset(dst1, c, dst1_size); 1099 /// memcpy(dst2, dst1, dst2_size); 1100 /// \endcode 1101 /// into: 1102 /// \code 1103 /// memset(dst1, c, dst1_size); 1104 /// memset(dst2, c, dst2_size); 1105 /// \endcode 1106 /// When dst2_size <= dst1_size. 1107 /// 1108 /// The \p MemCpy must have a Constant length. 1109 bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, 1110 MemSetInst *MemSet) { 1111 AliasAnalysis &AA = LookupAliasAnalysis(); 1112 1113 // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and 1114 // memcpying from the same address. Otherwise it is hard to reason about. 1115 if (!AA.isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource())) 1116 return false; 1117 1118 ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength()); 1119 ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength()); 1120 // Make sure the memcpy doesn't read any more than what the memset wrote. 1121 // Don't worry about sizes larger than i64. 1122 if (!MemSetSize || CopySize->getZExtValue() > MemSetSize->getZExtValue()) 1123 return false; 1124 1125 IRBuilder<> Builder(MemCpy); 1126 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), 1127 CopySize, MemCpy->getAlignment()); 1128 return true; 1129 } 1130 1131 /// Perform simplification of memcpy's. If we have memcpy A 1132 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite 1133 /// B to be a memcpy from X to Z (or potentially a memmove, depending on 1134 /// circumstances). This allows later passes to remove the first memcpy 1135 /// altogether. 1136 bool MemCpyOptPass::processMemCpy(MemCpyInst *M) { 1137 // We can only optimize non-volatile memcpy's. 1138 if (M->isVolatile()) return false; 1139 1140 // If the source and destination of the memcpy are the same, then zap it. 1141 if (M->getSource() == M->getDest()) { 1142 MD->removeInstruction(M); 1143 M->eraseFromParent(); 1144 return false; 1145 } 1146 1147 // If copying from a constant, try to turn the memcpy into a memset. 1148 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource())) 1149 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 1150 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) { 1151 IRBuilder<> Builder(M); 1152 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(), 1153 M->getAlignment(), false); 1154 MD->removeInstruction(M); 1155 M->eraseFromParent(); 1156 ++NumCpyToSet; 1157 return true; 1158 } 1159 1160 MemDepResult DepInfo = MD->getDependency(M); 1161 1162 // Try to turn a partially redundant memset + memcpy into 1163 // memcpy + smaller memset. We don't need the memcpy size for this. 1164 if (DepInfo.isClobber()) 1165 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst())) 1166 if (processMemSetMemCpyDependence(M, MDep)) 1167 return true; 1168 1169 // The optimizations after this point require the memcpy size. 1170 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); 1171 if (!CopySize) return false; 1172 1173 // There are four possible optimizations we can do for memcpy: 1174 // a) memcpy-memcpy xform which exposes redundance for DSE. 1175 // b) call-memcpy xform for return slot optimization. 1176 // c) memcpy from freshly alloca'd space or space that has just started its 1177 // lifetime copies undefined data, and we can therefore eliminate the 1178 // memcpy in favor of the data that was already at the destination. 1179 // d) memcpy from a just-memset'd source can be turned into memset. 1180 if (DepInfo.isClobber()) { 1181 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { 1182 if (performCallSlotOptzn(M, M->getDest(), M->getSource(), 1183 CopySize->getZExtValue(), M->getAlignment(), 1184 C)) { 1185 MD->removeInstruction(M); 1186 M->eraseFromParent(); 1187 return true; 1188 } 1189 } 1190 } 1191 1192 MemoryLocation SrcLoc = MemoryLocation::getForSource(M); 1193 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom( 1194 SrcLoc, true, M->getIterator(), M->getParent()); 1195 1196 if (SrcDepInfo.isClobber()) { 1197 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst())) 1198 return processMemCpyMemCpyDependence(M, MDep); 1199 } else if (SrcDepInfo.isDef()) { 1200 Instruction *I = SrcDepInfo.getInst(); 1201 bool hasUndefContents = false; 1202 1203 if (isa<AllocaInst>(I)) { 1204 hasUndefContents = true; 1205 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1206 if (II->getIntrinsicID() == Intrinsic::lifetime_start) 1207 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0))) 1208 if (LTSize->getZExtValue() >= CopySize->getZExtValue()) 1209 hasUndefContents = true; 1210 } 1211 1212 if (hasUndefContents) { 1213 MD->removeInstruction(M); 1214 M->eraseFromParent(); 1215 ++NumMemCpyInstr; 1216 return true; 1217 } 1218 } 1219 1220 if (SrcDepInfo.isClobber()) 1221 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst())) 1222 if (performMemCpyToMemSetOptzn(M, MDep)) { 1223 MD->removeInstruction(M); 1224 M->eraseFromParent(); 1225 ++NumCpyToSet; 1226 return true; 1227 } 1228 1229 return false; 1230 } 1231 1232 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed 1233 /// not to alias. 1234 bool MemCpyOptPass::processMemMove(MemMoveInst *M) { 1235 AliasAnalysis &AA = LookupAliasAnalysis(); 1236 1237 if (!TLI->has(LibFunc::memmove)) 1238 return false; 1239 1240 // See if the pointers alias. 1241 if (!AA.isNoAlias(MemoryLocation::getForDest(M), 1242 MemoryLocation::getForSource(M))) 1243 return false; 1244 1245 DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M 1246 << "\n"); 1247 1248 // If not, then we know we can transform this. 1249 Type *ArgTys[3] = { M->getRawDest()->getType(), 1250 M->getRawSource()->getType(), 1251 M->getLength()->getType() }; 1252 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(), 1253 Intrinsic::memcpy, ArgTys)); 1254 1255 // MemDep may have over conservative information about this instruction, just 1256 // conservatively flush it from the cache. 1257 MD->removeInstruction(M); 1258 1259 ++NumMoveToCpy; 1260 return true; 1261 } 1262 1263 /// This is called on every byval argument in call sites. 1264 bool MemCpyOptPass::processByValArgument(CallSite CS, unsigned ArgNo) { 1265 const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout(); 1266 // Find out what feeds this byval argument. 1267 Value *ByValArg = CS.getArgument(ArgNo); 1268 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType(); 1269 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy); 1270 MemDepResult DepInfo = MD->getPointerDependencyFrom( 1271 MemoryLocation(ByValArg, ByValSize), true, 1272 CS.getInstruction()->getIterator(), CS.getInstruction()->getParent()); 1273 if (!DepInfo.isClobber()) 1274 return false; 1275 1276 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by 1277 // a memcpy, see if we can byval from the source of the memcpy instead of the 1278 // result. 1279 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); 1280 if (!MDep || MDep->isVolatile() || 1281 ByValArg->stripPointerCasts() != MDep->getDest()) 1282 return false; 1283 1284 // The length of the memcpy must be larger or equal to the size of the byval. 1285 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 1286 if (!C1 || C1->getValue().getZExtValue() < ByValSize) 1287 return false; 1288 1289 // Get the alignment of the byval. If the call doesn't specify the alignment, 1290 // then it is some target specific value that we can't know. 1291 unsigned ByValAlign = CS.getParamAlignment(ArgNo+1); 1292 if (ByValAlign == 0) return false; 1293 1294 // If it is greater than the memcpy, then we check to see if we can force the 1295 // source of the memcpy to the alignment we need. If we fail, we bail out. 1296 AssumptionCache &AC = LookupAssumptionCache(); 1297 DominatorTree &DT = LookupDomTree(); 1298 if (MDep->getAlignment() < ByValAlign && 1299 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, 1300 CS.getInstruction(), &AC, &DT) < ByValAlign) 1301 return false; 1302 1303 // Verify that the copied-from memory doesn't change in between the memcpy and 1304 // the byval call. 1305 // memcpy(a <- b) 1306 // *b = 42; 1307 // foo(*a) 1308 // It would be invalid to transform the second memcpy into foo(*b). 1309 // 1310 // NOTE: This is conservative, it will stop on any read from the source loc, 1311 // not just the defining memcpy. 1312 MemDepResult SourceDep = MD->getPointerDependencyFrom( 1313 MemoryLocation::getForSource(MDep), false, 1314 CS.getInstruction()->getIterator(), MDep->getParent()); 1315 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 1316 return false; 1317 1318 Value *TmpCast = MDep->getSource(); 1319 if (MDep->getSource()->getType() != ByValArg->getType()) 1320 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), 1321 "tmpcast", CS.getInstruction()); 1322 1323 DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" 1324 << " " << *MDep << "\n" 1325 << " " << *CS.getInstruction() << "\n"); 1326 1327 // Otherwise we're good! Update the byval argument. 1328 CS.setArgument(ArgNo, TmpCast); 1329 ++NumMemCpyInstr; 1330 return true; 1331 } 1332 1333 /// Executes one iteration of MemCpyOptPass. 1334 bool MemCpyOptPass::iterateOnFunction(Function &F) { 1335 bool MadeChange = false; 1336 1337 // Walk all instruction in the function. 1338 for (BasicBlock &BB : F) { 1339 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 1340 // Avoid invalidating the iterator. 1341 Instruction *I = &*BI++; 1342 1343 bool RepeatInstruction = false; 1344 1345 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1346 MadeChange |= processStore(SI, BI); 1347 else if (MemSetInst *M = dyn_cast<MemSetInst>(I)) 1348 RepeatInstruction = processMemSet(M, BI); 1349 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) 1350 RepeatInstruction = processMemCpy(M); 1351 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) 1352 RepeatInstruction = processMemMove(M); 1353 else if (auto CS = CallSite(I)) { 1354 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 1355 if (CS.isByValArgument(i)) 1356 MadeChange |= processByValArgument(CS, i); 1357 } 1358 1359 // Reprocess the instruction if desired. 1360 if (RepeatInstruction) { 1361 if (BI != BB.begin()) 1362 --BI; 1363 MadeChange = true; 1364 } 1365 } 1366 } 1367 1368 return MadeChange; 1369 } 1370 1371 PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) { 1372 1373 auto &MD = AM.getResult<MemoryDependenceAnalysis>(F); 1374 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1375 1376 auto LookupAliasAnalysis = [&]() -> AliasAnalysis & { 1377 return AM.getResult<AAManager>(F); 1378 }; 1379 auto LookupAssumptionCache = [&]() -> AssumptionCache & { 1380 return AM.getResult<AssumptionAnalysis>(F); 1381 }; 1382 auto LookupDomTree = [&]() -> DominatorTree & { 1383 return AM.getResult<DominatorTreeAnalysis>(F); 1384 }; 1385 1386 bool MadeChange = runImpl(F, &MD, &TLI, LookupAliasAnalysis, 1387 LookupAssumptionCache, LookupDomTree); 1388 if (!MadeChange) 1389 return PreservedAnalyses::all(); 1390 PreservedAnalyses PA; 1391 PA.preserve<GlobalsAA>(); 1392 PA.preserve<MemoryDependenceAnalysis>(); 1393 return PA; 1394 } 1395 1396 bool MemCpyOptPass::runImpl( 1397 Function &F, MemoryDependenceResults *MD_, TargetLibraryInfo *TLI_, 1398 std::function<AliasAnalysis &()> LookupAliasAnalysis_, 1399 std::function<AssumptionCache &()> LookupAssumptionCache_, 1400 std::function<DominatorTree &()> LookupDomTree_) { 1401 bool MadeChange = false; 1402 MD = MD_; 1403 TLI = TLI_; 1404 LookupAliasAnalysis = std::move(LookupAliasAnalysis_); 1405 LookupAssumptionCache = std::move(LookupAssumptionCache_); 1406 LookupDomTree = std::move(LookupDomTree_); 1407 1408 // If we don't have at least memset and memcpy, there is little point of doing 1409 // anything here. These are required by a freestanding implementation, so if 1410 // even they are disabled, there is no point in trying hard. 1411 if (!TLI->has(LibFunc::memset) || !TLI->has(LibFunc::memcpy)) 1412 return false; 1413 1414 while (1) { 1415 if (!iterateOnFunction(F)) 1416 break; 1417 MadeChange = true; 1418 } 1419 1420 MD = nullptr; 1421 return MadeChange; 1422 } 1423 1424 /// This is the main transformation entry point for a function. 1425 bool MemCpyOptLegacyPass::runOnFunction(Function &F) { 1426 if (skipFunction(F)) 1427 return false; 1428 1429 auto *MD = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep(); 1430 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 1431 1432 auto LookupAliasAnalysis = [this]() -> AliasAnalysis & { 1433 return getAnalysis<AAResultsWrapperPass>().getAAResults(); 1434 }; 1435 auto LookupAssumptionCache = [this, &F]() -> AssumptionCache & { 1436 return getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1437 }; 1438 auto LookupDomTree = [this]() -> DominatorTree & { 1439 return getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1440 }; 1441 1442 return Impl.runImpl(F, MD, TLI, LookupAliasAnalysis, LookupAssumptionCache, 1443 LookupDomTree); 1444 } 1445