1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs various transformations related to eliminating memcpy 11 // calls, or transforming sets of stores into memset's. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Scalar/MemCpyOptimizer.h" 16 #include "llvm/ADT/DenseSet.h" 17 #include "llvm/ADT/None.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/ADT/iterator_range.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/AssumptionCache.h" 24 #include "llvm/Analysis/GlobalsModRef.h" 25 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 26 #include "llvm/Analysis/MemoryLocation.h" 27 #include "llvm/Analysis/TargetLibraryInfo.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/IR/Argument.h" 30 #include "llvm/IR/BasicBlock.h" 31 #include "llvm/IR/CallSite.h" 32 #include "llvm/IR/Constants.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/DerivedTypes.h" 35 #include "llvm/IR/Dominators.h" 36 #include "llvm/IR/Function.h" 37 #include "llvm/IR/GetElementPtrTypeIterator.h" 38 #include "llvm/IR/GlobalVariable.h" 39 #include "llvm/IR/IRBuilder.h" 40 #include "llvm/IR/InstrTypes.h" 41 #include "llvm/IR/Instruction.h" 42 #include "llvm/IR/Instructions.h" 43 #include "llvm/IR/IntrinsicInst.h" 44 #include "llvm/IR/Intrinsics.h" 45 #include "llvm/IR/LLVMContext.h" 46 #include "llvm/IR/Module.h" 47 #include "llvm/IR/Operator.h" 48 #include "llvm/IR/PassManager.h" 49 #include "llvm/IR/Type.h" 50 #include "llvm/IR/User.h" 51 #include "llvm/IR/Value.h" 52 #include "llvm/Pass.h" 53 #include "llvm/Support/Casting.h" 54 #include "llvm/Support/Debug.h" 55 #include "llvm/Support/MathExtras.h" 56 #include "llvm/Support/raw_ostream.h" 57 #include "llvm/Transforms/Scalar.h" 58 #include "llvm/Transforms/Utils/Local.h" 59 #include <algorithm> 60 #include <cassert> 61 #include <cstdint> 62 #include <utility> 63 64 using namespace llvm; 65 66 #define DEBUG_TYPE "memcpyopt" 67 68 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 69 STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 70 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 71 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); 72 73 static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, 74 bool &VariableIdxFound, 75 const DataLayout &DL) { 76 // Skip over the first indices. 77 gep_type_iterator GTI = gep_type_begin(GEP); 78 for (unsigned i = 1; i != Idx; ++i, ++GTI) 79 /*skip along*/; 80 81 // Compute the offset implied by the rest of the indices. 82 int64_t Offset = 0; 83 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { 84 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); 85 if (!OpC) 86 return VariableIdxFound = true; 87 if (OpC->isZero()) continue; // No offset. 88 89 // Handle struct indices, which add their field offset to the pointer. 90 if (StructType *STy = GTI.getStructTypeOrNull()) { 91 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 92 continue; 93 } 94 95 // Otherwise, we have a sequential type like an array or vector. Multiply 96 // the index by the ElementSize. 97 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); 98 Offset += Size*OpC->getSExtValue(); 99 } 100 101 return Offset; 102 } 103 104 /// Return true if Ptr1 is provably equal to Ptr2 plus a constant offset, and 105 /// return that constant offset. For example, Ptr1 might be &A[42], and Ptr2 106 /// might be &A[40]. In this case offset would be -8. 107 static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, 108 const DataLayout &DL) { 109 Ptr1 = Ptr1->stripPointerCasts(); 110 Ptr2 = Ptr2->stripPointerCasts(); 111 112 // Handle the trivial case first. 113 if (Ptr1 == Ptr2) { 114 Offset = 0; 115 return true; 116 } 117 118 GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1); 119 GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2); 120 121 bool VariableIdxFound = false; 122 123 // If one pointer is a GEP and the other isn't, then see if the GEP is a 124 // constant offset from the base, as in "P" and "gep P, 1". 125 if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) { 126 Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, DL); 127 return !VariableIdxFound; 128 } 129 130 if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) { 131 Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, DL); 132 return !VariableIdxFound; 133 } 134 135 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical 136 // base. After that base, they may have some number of common (and 137 // potentially variable) indices. After that they handle some constant 138 // offset, which determines their offset from each other. At this point, we 139 // handle no other case. 140 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) 141 return false; 142 143 // Skip any common indices and track the GEP types. 144 unsigned Idx = 1; 145 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) 146 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) 147 break; 148 149 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL); 150 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, DL); 151 if (VariableIdxFound) return false; 152 153 Offset = Offset2-Offset1; 154 return true; 155 } 156 157 namespace { 158 159 /// Represents a range of memset'd bytes with the ByteVal value. 160 /// This allows us to analyze stores like: 161 /// store 0 -> P+1 162 /// store 0 -> P+0 163 /// store 0 -> P+3 164 /// store 0 -> P+2 165 /// which sometimes happens with stores to arrays of structs etc. When we see 166 /// the first store, we make a range [1, 2). The second store extends the range 167 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 168 /// two ranges into [0, 3) which is memset'able. 169 struct MemsetRange { 170 // Start/End - A semi range that describes the span that this range covers. 171 // The range is closed at the start and open at the end: [Start, End). 172 int64_t Start, End; 173 174 /// StartPtr - The getelementptr instruction that points to the start of the 175 /// range. 176 Value *StartPtr; 177 178 /// Alignment - The known alignment of the first store. 179 unsigned Alignment; 180 181 /// TheStores - The actual stores that make up this range. 182 SmallVector<Instruction*, 16> TheStores; 183 184 bool isProfitableToUseMemset(const DataLayout &DL) const; 185 }; 186 187 } // end anonymous namespace 188 189 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const { 190 // If we found more than 4 stores to merge or 16 bytes, use memset. 191 if (TheStores.size() >= 4 || End-Start >= 16) return true; 192 193 // If there is nothing to merge, don't do anything. 194 if (TheStores.size() < 2) return false; 195 196 // If any of the stores are a memset, then it is always good to extend the 197 // memset. 198 for (Instruction *SI : TheStores) 199 if (!isa<StoreInst>(SI)) 200 return true; 201 202 // Assume that the code generator is capable of merging pairs of stores 203 // together if it wants to. 204 if (TheStores.size() == 2) return false; 205 206 // If we have fewer than 8 stores, it can still be worthwhile to do this. 207 // For example, merging 4 i8 stores into an i32 store is useful almost always. 208 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 209 // memset will be split into 2 32-bit stores anyway) and doing so can 210 // pessimize the llvm optimizer. 211 // 212 // Since we don't have perfect knowledge here, make some assumptions: assume 213 // the maximum GPR width is the same size as the largest legal integer 214 // size. If so, check to see whether we will end up actually reducing the 215 // number of stores used. 216 unsigned Bytes = unsigned(End-Start); 217 unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8; 218 if (MaxIntSize == 0) 219 MaxIntSize = 1; 220 unsigned NumPointerStores = Bytes / MaxIntSize; 221 222 // Assume the remaining bytes if any are done a byte at a time. 223 unsigned NumByteStores = Bytes % MaxIntSize; 224 225 // If we will reduce the # stores (according to this heuristic), do the 226 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 227 // etc. 228 return TheStores.size() > NumPointerStores+NumByteStores; 229 } 230 231 namespace { 232 233 class MemsetRanges { 234 using range_iterator = SmallVectorImpl<MemsetRange>::iterator; 235 236 /// A sorted list of the memset ranges. 237 SmallVector<MemsetRange, 8> Ranges; 238 239 const DataLayout &DL; 240 241 public: 242 MemsetRanges(const DataLayout &DL) : DL(DL) {} 243 244 using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator; 245 246 const_iterator begin() const { return Ranges.begin(); } 247 const_iterator end() const { return Ranges.end(); } 248 bool empty() const { return Ranges.empty(); } 249 250 void addInst(int64_t OffsetFromFirst, Instruction *Inst) { 251 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 252 addStore(OffsetFromFirst, SI); 253 else 254 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); 255 } 256 257 void addStore(int64_t OffsetFromFirst, StoreInst *SI) { 258 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); 259 260 addRange(OffsetFromFirst, StoreSize, 261 SI->getPointerOperand(), SI->getAlignment(), SI); 262 } 263 264 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { 265 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 266 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI); 267 } 268 269 void addRange(int64_t Start, int64_t Size, Value *Ptr, 270 unsigned Alignment, Instruction *Inst); 271 }; 272 273 } // end anonymous namespace 274 275 /// Add a new store to the MemsetRanges data structure. This adds a 276 /// new range for the specified store at the specified offset, merging into 277 /// existing ranges as appropriate. 278 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, 279 unsigned Alignment, Instruction *Inst) { 280 int64_t End = Start+Size; 281 282 range_iterator I = std::lower_bound(Ranges.begin(), Ranges.end(), Start, 283 [](const MemsetRange &LHS, int64_t RHS) { return LHS.End < RHS; }); 284 285 // We now know that I == E, in which case we didn't find anything to merge 286 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 287 // to insert a new range. Handle this now. 288 if (I == Ranges.end() || End < I->Start) { 289 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 290 R.Start = Start; 291 R.End = End; 292 R.StartPtr = Ptr; 293 R.Alignment = Alignment; 294 R.TheStores.push_back(Inst); 295 return; 296 } 297 298 // This store overlaps with I, add it. 299 I->TheStores.push_back(Inst); 300 301 // At this point, we may have an interval that completely contains our store. 302 // If so, just add it to the interval and return. 303 if (I->Start <= Start && I->End >= End) 304 return; 305 306 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 307 // but is not entirely contained within the range. 308 309 // See if the range extends the start of the range. In this case, it couldn't 310 // possibly cause it to join the prior range, because otherwise we would have 311 // stopped on *it*. 312 if (Start < I->Start) { 313 I->Start = Start; 314 I->StartPtr = Ptr; 315 I->Alignment = Alignment; 316 } 317 318 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 319 // is in or right at the end of I), and that End >= I->Start. Extend I out to 320 // End. 321 if (End > I->End) { 322 I->End = End; 323 range_iterator NextI = I; 324 while (++NextI != Ranges.end() && End >= NextI->Start) { 325 // Merge the range in. 326 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 327 if (NextI->End > I->End) 328 I->End = NextI->End; 329 Ranges.erase(NextI); 330 NextI = I; 331 } 332 } 333 } 334 335 //===----------------------------------------------------------------------===// 336 // MemCpyOptLegacyPass Pass 337 //===----------------------------------------------------------------------===// 338 339 namespace { 340 341 class MemCpyOptLegacyPass : public FunctionPass { 342 MemCpyOptPass Impl; 343 344 public: 345 static char ID; // Pass identification, replacement for typeid 346 347 MemCpyOptLegacyPass() : FunctionPass(ID) { 348 initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry()); 349 } 350 351 bool runOnFunction(Function &F) override; 352 353 private: 354 // This transformation requires dominator postdominator info 355 void getAnalysisUsage(AnalysisUsage &AU) const override { 356 AU.setPreservesCFG(); 357 AU.addRequired<AssumptionCacheTracker>(); 358 AU.addRequired<DominatorTreeWrapperPass>(); 359 AU.addRequired<MemoryDependenceWrapperPass>(); 360 AU.addRequired<AAResultsWrapperPass>(); 361 AU.addRequired<TargetLibraryInfoWrapperPass>(); 362 AU.addPreserved<GlobalsAAWrapperPass>(); 363 AU.addPreserved<MemoryDependenceWrapperPass>(); 364 } 365 }; 366 367 } // end anonymous namespace 368 369 char MemCpyOptLegacyPass::ID = 0; 370 371 /// The public interface to this file... 372 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); } 373 374 INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", 375 false, false) 376 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 377 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 378 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 379 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 380 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 381 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 382 INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", 383 false, false) 384 385 /// When scanning forward over instructions, we look for some other patterns to 386 /// fold away. In particular, this looks for stores to neighboring locations of 387 /// memory. If it sees enough consecutive ones, it attempts to merge them 388 /// together into a memcpy/memset. 389 Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst, 390 Value *StartPtr, 391 Value *ByteVal) { 392 const DataLayout &DL = StartInst->getModule()->getDataLayout(); 393 394 // Okay, so we now have a single store that can be splatable. Scan to find 395 // all subsequent stores of the same value to offset from the same pointer. 396 // Join these together into ranges, so we can decide whether contiguous blocks 397 // are stored. 398 MemsetRanges Ranges(DL); 399 400 BasicBlock::iterator BI(StartInst); 401 for (++BI; !isa<TerminatorInst>(BI); ++BI) { 402 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { 403 // If the instruction is readnone, ignore it, otherwise bail out. We 404 // don't even allow readonly here because we don't want something like: 405 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 406 if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) 407 break; 408 continue; 409 } 410 411 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { 412 // If this is a store, see if we can merge it in. 413 if (!NextStore->isSimple()) break; 414 415 // Check to see if this stored value is of the same byte-splattable value. 416 if (ByteVal != isBytewiseValue(NextStore->getOperand(0))) 417 break; 418 419 // Check to see if this store is to a constant offset from the start ptr. 420 int64_t Offset; 421 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, 422 DL)) 423 break; 424 425 Ranges.addStore(Offset, NextStore); 426 } else { 427 MemSetInst *MSI = cast<MemSetInst>(BI); 428 429 if (MSI->isVolatile() || ByteVal != MSI->getValue() || 430 !isa<ConstantInt>(MSI->getLength())) 431 break; 432 433 // Check to see if this store is to a constant offset from the start ptr. 434 int64_t Offset; 435 if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, DL)) 436 break; 437 438 Ranges.addMemSet(Offset, MSI); 439 } 440 } 441 442 // If we have no ranges, then we just had a single store with nothing that 443 // could be merged in. This is a very common case of course. 444 if (Ranges.empty()) 445 return nullptr; 446 447 // If we had at least one store that could be merged in, add the starting 448 // store as well. We try to avoid this unless there is at least something 449 // interesting as a small compile-time optimization. 450 Ranges.addInst(0, StartInst); 451 452 // If we create any memsets, we put it right before the first instruction that 453 // isn't part of the memset block. This ensure that the memset is dominated 454 // by any addressing instruction needed by the start of the block. 455 IRBuilder<> Builder(&*BI); 456 457 // Now that we have full information about ranges, loop over the ranges and 458 // emit memset's for anything big enough to be worthwhile. 459 Instruction *AMemSet = nullptr; 460 for (const MemsetRange &Range : Ranges) { 461 if (Range.TheStores.size() == 1) continue; 462 463 // If it is profitable to lower this range to memset, do so now. 464 if (!Range.isProfitableToUseMemset(DL)) 465 continue; 466 467 // Otherwise, we do want to transform this! Create a new memset. 468 // Get the starting pointer of the block. 469 StartPtr = Range.StartPtr; 470 471 // Determine alignment 472 unsigned Alignment = Range.Alignment; 473 if (Alignment == 0) { 474 Type *EltType = 475 cast<PointerType>(StartPtr->getType())->getElementType(); 476 Alignment = DL.getABITypeAlignment(EltType); 477 } 478 479 AMemSet = 480 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment); 481 482 DEBUG(dbgs() << "Replace stores:\n"; 483 for (Instruction *SI : Range.TheStores) 484 dbgs() << *SI << '\n'; 485 dbgs() << "With: " << *AMemSet << '\n'); 486 487 if (!Range.TheStores.empty()) 488 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); 489 490 // Zap all the stores. 491 for (Instruction *SI : Range.TheStores) { 492 MD->removeInstruction(SI); 493 SI->eraseFromParent(); 494 } 495 ++NumMemSetInfer; 496 } 497 498 return AMemSet; 499 } 500 501 static unsigned findCommonAlignment(const DataLayout &DL, const StoreInst *SI, 502 const LoadInst *LI) { 503 unsigned StoreAlign = SI->getAlignment(); 504 if (!StoreAlign) 505 StoreAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType()); 506 unsigned LoadAlign = LI->getAlignment(); 507 if (!LoadAlign) 508 LoadAlign = DL.getABITypeAlignment(LI->getType()); 509 510 return std::min(StoreAlign, LoadAlign); 511 } 512 513 // This method try to lift a store instruction before position P. 514 // It will lift the store and its argument + that anything that 515 // may alias with these. 516 // The method returns true if it was successful. 517 static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P, 518 const LoadInst *LI) { 519 // If the store alias this position, early bail out. 520 MemoryLocation StoreLoc = MemoryLocation::get(SI); 521 if (isModOrRefSet(AA.getModRefInfo(P, StoreLoc))) 522 return false; 523 524 // Keep track of the arguments of all instruction we plan to lift 525 // so we can make sure to lift them as well if apropriate. 526 DenseSet<Instruction*> Args; 527 if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand())) 528 if (Ptr->getParent() == SI->getParent()) 529 Args.insert(Ptr); 530 531 // Instruction to lift before P. 532 SmallVector<Instruction*, 8> ToLift; 533 534 // Memory locations of lifted instructions. 535 SmallVector<MemoryLocation, 8> MemLocs{StoreLoc}; 536 537 // Lifted callsites. 538 SmallVector<ImmutableCallSite, 8> CallSites; 539 540 const MemoryLocation LoadLoc = MemoryLocation::get(LI); 541 542 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) { 543 auto *C = &*I; 544 545 bool MayAlias = isModOrRefSet(AA.getModRefInfo(C, None)); 546 547 bool NeedLift = false; 548 if (Args.erase(C)) 549 NeedLift = true; 550 else if (MayAlias) { 551 NeedLift = llvm::any_of(MemLocs, [C, &AA](const MemoryLocation &ML) { 552 return isModOrRefSet(AA.getModRefInfo(C, ML)); 553 }); 554 555 if (!NeedLift) 556 NeedLift = 557 llvm::any_of(CallSites, [C, &AA](const ImmutableCallSite &CS) { 558 return isModOrRefSet(AA.getModRefInfo(C, CS)); 559 }); 560 } 561 562 if (!NeedLift) 563 continue; 564 565 if (MayAlias) { 566 // Since LI is implicitly moved downwards past the lifted instructions, 567 // none of them may modify its source. 568 if (isModSet(AA.getModRefInfo(C, LoadLoc))) 569 return false; 570 else if (auto CS = ImmutableCallSite(C)) { 571 // If we can't lift this before P, it's game over. 572 if (isModOrRefSet(AA.getModRefInfo(P, CS))) 573 return false; 574 575 CallSites.push_back(CS); 576 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) { 577 // If we can't lift this before P, it's game over. 578 auto ML = MemoryLocation::get(C); 579 if (isModOrRefSet(AA.getModRefInfo(P, ML))) 580 return false; 581 582 MemLocs.push_back(ML); 583 } else 584 // We don't know how to lift this instruction. 585 return false; 586 } 587 588 ToLift.push_back(C); 589 for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k) 590 if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) 591 if (A->getParent() == SI->getParent()) 592 Args.insert(A); 593 } 594 595 // We made it, we need to lift 596 for (auto *I : llvm::reverse(ToLift)) { 597 DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n"); 598 I->moveBefore(P); 599 } 600 601 return true; 602 } 603 604 bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 605 if (!SI->isSimple()) return false; 606 607 // Avoid merging nontemporal stores since the resulting 608 // memcpy/memset would not be able to preserve the nontemporal hint. 609 // In theory we could teach how to propagate the !nontemporal metadata to 610 // memset calls. However, that change would force the backend to 611 // conservatively expand !nontemporal memset calls back to sequences of 612 // store instructions (effectively undoing the merging). 613 if (SI->getMetadata(LLVMContext::MD_nontemporal)) 614 return false; 615 616 const DataLayout &DL = SI->getModule()->getDataLayout(); 617 618 // Load to store forwarding can be interpreted as memcpy. 619 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) { 620 if (LI->isSimple() && LI->hasOneUse() && 621 LI->getParent() == SI->getParent()) { 622 623 auto *T = LI->getType(); 624 if (T->isAggregateType()) { 625 AliasAnalysis &AA = LookupAliasAnalysis(); 626 MemoryLocation LoadLoc = MemoryLocation::get(LI); 627 628 // We use alias analysis to check if an instruction may store to 629 // the memory we load from in between the load and the store. If 630 // such an instruction is found, we try to promote there instead 631 // of at the store position. 632 Instruction *P = SI; 633 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) { 634 if (isModSet(AA.getModRefInfo(&I, LoadLoc))) { 635 P = &I; 636 break; 637 } 638 } 639 640 // We found an instruction that may write to the loaded memory. 641 // We can try to promote at this position instead of the store 642 // position if nothing alias the store memory after this and the store 643 // destination is not in the range. 644 if (P && P != SI) { 645 if (!moveUp(AA, SI, P, LI)) 646 P = nullptr; 647 } 648 649 // If a valid insertion position is found, then we can promote 650 // the load/store pair to a memcpy. 651 if (P) { 652 // If we load from memory that may alias the memory we store to, 653 // memmove must be used to preserve semantic. If not, memcpy can 654 // be used. 655 bool UseMemMove = false; 656 if (!AA.isNoAlias(MemoryLocation::get(SI), LoadLoc)) 657 UseMemMove = true; 658 659 unsigned Align = findCommonAlignment(DL, SI, LI); 660 uint64_t Size = DL.getTypeStoreSize(T); 661 662 IRBuilder<> Builder(P); 663 Instruction *M; 664 if (UseMemMove) 665 M = Builder.CreateMemMove(SI->getPointerOperand(), 666 LI->getPointerOperand(), Size, 667 Align, SI->isVolatile()); 668 else 669 M = Builder.CreateMemCpy(SI->getPointerOperand(), 670 LI->getPointerOperand(), Size, 671 Align, SI->isVolatile()); 672 673 DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI 674 << " => " << *M << "\n"); 675 676 MD->removeInstruction(SI); 677 SI->eraseFromParent(); 678 MD->removeInstruction(LI); 679 LI->eraseFromParent(); 680 ++NumMemCpyInstr; 681 682 // Make sure we do not invalidate the iterator. 683 BBI = M->getIterator(); 684 return true; 685 } 686 } 687 688 // Detect cases where we're performing call slot forwarding, but 689 // happen to be using a load-store pair to implement it, rather than 690 // a memcpy. 691 MemDepResult ldep = MD->getDependency(LI); 692 CallInst *C = nullptr; 693 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst())) 694 C = dyn_cast<CallInst>(ldep.getInst()); 695 696 if (C) { 697 // Check that nothing touches the dest of the "copy" between 698 // the call and the store. 699 Value *CpyDest = SI->getPointerOperand()->stripPointerCasts(); 700 bool CpyDestIsLocal = isa<AllocaInst>(CpyDest); 701 AliasAnalysis &AA = LookupAliasAnalysis(); 702 MemoryLocation StoreLoc = MemoryLocation::get(SI); 703 for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator(); 704 I != E; --I) { 705 if (isModOrRefSet(AA.getModRefInfo(&*I, StoreLoc))) { 706 C = nullptr; 707 break; 708 } 709 // The store to dest may never happen if an exception can be thrown 710 // between the load and the store. 711 if (I->mayThrow() && !CpyDestIsLocal) { 712 C = nullptr; 713 break; 714 } 715 } 716 } 717 718 if (C) { 719 bool changed = performCallSlotOptzn( 720 LI, SI->getPointerOperand()->stripPointerCasts(), 721 LI->getPointerOperand()->stripPointerCasts(), 722 DL.getTypeStoreSize(SI->getOperand(0)->getType()), 723 findCommonAlignment(DL, SI, LI), C); 724 if (changed) { 725 MD->removeInstruction(SI); 726 SI->eraseFromParent(); 727 MD->removeInstruction(LI); 728 LI->eraseFromParent(); 729 ++NumMemCpyInstr; 730 return true; 731 } 732 } 733 } 734 } 735 736 // There are two cases that are interesting for this code to handle: memcpy 737 // and memset. Right now we only handle memset. 738 739 // Ensure that the value being stored is something that can be memset'able a 740 // byte at a time like "0" or "-1" or any width, as well as things like 741 // 0xA0A0A0A0 and 0.0. 742 auto *V = SI->getOperand(0); 743 if (Value *ByteVal = isBytewiseValue(V)) { 744 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), 745 ByteVal)) { 746 BBI = I->getIterator(); // Don't invalidate iterator. 747 return true; 748 } 749 750 // If we have an aggregate, we try to promote it to memset regardless 751 // of opportunity for merging as it can expose optimization opportunities 752 // in subsequent passes. 753 auto *T = V->getType(); 754 if (T->isAggregateType()) { 755 uint64_t Size = DL.getTypeStoreSize(T); 756 unsigned Align = SI->getAlignment(); 757 if (!Align) 758 Align = DL.getABITypeAlignment(T); 759 IRBuilder<> Builder(SI); 760 auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, 761 Size, Align, SI->isVolatile()); 762 763 DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n"); 764 765 MD->removeInstruction(SI); 766 SI->eraseFromParent(); 767 NumMemSetInfer++; 768 769 // Make sure we do not invalidate the iterator. 770 BBI = M->getIterator(); 771 return true; 772 } 773 } 774 775 return false; 776 } 777 778 bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { 779 // See if there is another memset or store neighboring this memset which 780 // allows us to widen out the memset to do a single larger store. 781 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) 782 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), 783 MSI->getValue())) { 784 BBI = I->getIterator(); // Don't invalidate iterator. 785 return true; 786 } 787 return false; 788 } 789 790 /// Takes a memcpy and a call that it depends on, 791 /// and checks for the possibility of a call slot optimization by having 792 /// the call write its result directly into the destination of the memcpy. 793 bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpy, Value *cpyDest, 794 Value *cpySrc, uint64_t cpyLen, 795 unsigned cpyAlign, CallInst *C) { 796 // The general transformation to keep in mind is 797 // 798 // call @func(..., src, ...) 799 // memcpy(dest, src, ...) 800 // 801 // -> 802 // 803 // memcpy(dest, src, ...) 804 // call @func(..., dest, ...) 805 // 806 // Since moving the memcpy is technically awkward, we additionally check that 807 // src only holds uninitialized values at the moment of the call, meaning that 808 // the memcpy can be discarded rather than moved. 809 810 // Lifetime marks shouldn't be operated on. 811 if (Function *F = C->getCalledFunction()) 812 if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start) 813 return false; 814 815 // Deliberately get the source and destination with bitcasts stripped away, 816 // because we'll need to do type comparisons based on the underlying type. 817 CallSite CS(C); 818 819 // Require that src be an alloca. This simplifies the reasoning considerably. 820 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 821 if (!srcAlloca) 822 return false; 823 824 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 825 if (!srcArraySize) 826 return false; 827 828 const DataLayout &DL = cpy->getModule()->getDataLayout(); 829 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) * 830 srcArraySize->getZExtValue(); 831 832 if (cpyLen < srcSize) 833 return false; 834 835 // Check that accessing the first srcSize bytes of dest will not cause a 836 // trap. Otherwise the transform is invalid since it might cause a trap 837 // to occur earlier than it otherwise would. 838 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) { 839 // The destination is an alloca. Check it is larger than srcSize. 840 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize()); 841 if (!destArraySize) 842 return false; 843 844 uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) * 845 destArraySize->getZExtValue(); 846 847 if (destSize < srcSize) 848 return false; 849 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) { 850 // The store to dest may never happen if the call can throw. 851 if (C->mayThrow()) 852 return false; 853 854 if (A->getDereferenceableBytes() < srcSize) { 855 // If the destination is an sret parameter then only accesses that are 856 // outside of the returned struct type can trap. 857 if (!A->hasStructRetAttr()) 858 return false; 859 860 Type *StructTy = cast<PointerType>(A->getType())->getElementType(); 861 if (!StructTy->isSized()) { 862 // The call may never return and hence the copy-instruction may never 863 // be executed, and therefore it's not safe to say "the destination 864 // has at least <cpyLen> bytes, as implied by the copy-instruction", 865 return false; 866 } 867 868 uint64_t destSize = DL.getTypeAllocSize(StructTy); 869 if (destSize < srcSize) 870 return false; 871 } 872 } else { 873 return false; 874 } 875 876 // Check that dest points to memory that is at least as aligned as src. 877 unsigned srcAlign = srcAlloca->getAlignment(); 878 if (!srcAlign) 879 srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType()); 880 bool isDestSufficientlyAligned = srcAlign <= cpyAlign; 881 // If dest is not aligned enough and we can't increase its alignment then 882 // bail out. 883 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) 884 return false; 885 886 // Check that src is not accessed except via the call and the memcpy. This 887 // guarantees that it holds only undefined values when passed in (so the final 888 // memcpy can be dropped), that it is not read or written between the call and 889 // the memcpy, and that writing beyond the end of it is undefined. 890 SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(), 891 srcAlloca->user_end()); 892 while (!srcUseList.empty()) { 893 User *U = srcUseList.pop_back_val(); 894 895 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) { 896 for (User *UU : U->users()) 897 srcUseList.push_back(UU); 898 continue; 899 } 900 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) { 901 if (!G->hasAllZeroIndices()) 902 return false; 903 904 for (User *UU : U->users()) 905 srcUseList.push_back(UU); 906 continue; 907 } 908 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U)) 909 if (IT->getIntrinsicID() == Intrinsic::lifetime_start || 910 IT->getIntrinsicID() == Intrinsic::lifetime_end) 911 continue; 912 913 if (U != C && U != cpy) 914 return false; 915 } 916 917 // Check that src isn't captured by the called function since the 918 // transformation can cause aliasing issues in that case. 919 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 920 if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i)) 921 return false; 922 923 // Since we're changing the parameter to the callsite, we need to make sure 924 // that what would be the new parameter dominates the callsite. 925 DominatorTree &DT = LookupDomTree(); 926 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest)) 927 if (!DT.dominates(cpyDestInst, C)) 928 return false; 929 930 // In addition to knowing that the call does not access src in some 931 // unexpected manner, for example via a global, which we deduce from 932 // the use analysis, we also need to know that it does not sneakily 933 // access dest. We rely on AA to figure this out for us. 934 AliasAnalysis &AA = LookupAliasAnalysis(); 935 ModRefInfo MR = AA.getModRefInfo(C, cpyDest, srcSize); 936 // If necessary, perform additional analysis. 937 if (isModOrRefSet(MR)) 938 MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT); 939 if (isModOrRefSet(MR)) 940 return false; 941 942 // We can't create address space casts here because we don't know if they're 943 // safe for the target. 944 if (cpySrc->getType()->getPointerAddressSpace() != 945 cpyDest->getType()->getPointerAddressSpace()) 946 return false; 947 for (unsigned i = 0; i < CS.arg_size(); ++i) 948 if (CS.getArgument(i)->stripPointerCasts() == cpySrc && 949 cpySrc->getType()->getPointerAddressSpace() != 950 CS.getArgument(i)->getType()->getPointerAddressSpace()) 951 return false; 952 953 // All the checks have passed, so do the transformation. 954 bool changedArgument = false; 955 for (unsigned i = 0; i < CS.arg_size(); ++i) 956 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) { 957 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest 958 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 959 cpyDest->getName(), C); 960 changedArgument = true; 961 if (CS.getArgument(i)->getType() == Dest->getType()) 962 CS.setArgument(i, Dest); 963 else 964 CS.setArgument(i, CastInst::CreatePointerCast(Dest, 965 CS.getArgument(i)->getType(), Dest->getName(), C)); 966 } 967 968 if (!changedArgument) 969 return false; 970 971 // If the destination wasn't sufficiently aligned then increase its alignment. 972 if (!isDestSufficientlyAligned) { 973 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!"); 974 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); 975 } 976 977 // Drop any cached information about the call, because we may have changed 978 // its dependence information by changing its parameter. 979 MD->removeInstruction(C); 980 981 // Update AA metadata 982 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be 983 // handled here, but combineMetadata doesn't support them yet 984 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 985 LLVMContext::MD_noalias, 986 LLVMContext::MD_invariant_group}; 987 combineMetadata(C, cpy, KnownIDs); 988 989 // Remove the memcpy. 990 MD->removeInstruction(cpy); 991 ++NumMemCpyInstr; 992 993 return true; 994 } 995 996 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is 997 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can. 998 bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M, 999 MemCpyInst *MDep) { 1000 // We can only transforms memcpy's where the dest of one is the source of the 1001 // other. 1002 if (M->getSource() != MDep->getDest() || MDep->isVolatile()) 1003 return false; 1004 1005 // If dep instruction is reading from our current input, then it is a noop 1006 // transfer and substituting the input won't change this instruction. Just 1007 // ignore the input and let someone else zap MDep. This handles cases like: 1008 // memcpy(a <- a) 1009 // memcpy(b <- a) 1010 if (M->getSource() == MDep->getSource()) 1011 return false; 1012 1013 // Second, the length of the memcpy's must be the same, or the preceding one 1014 // must be larger than the following one. 1015 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); 1016 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); 1017 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) 1018 return false; 1019 1020 AliasAnalysis &AA = LookupAliasAnalysis(); 1021 1022 // Verify that the copied-from memory doesn't change in between the two 1023 // transfers. For example, in: 1024 // memcpy(a <- b) 1025 // *b = 42; 1026 // memcpy(c <- a) 1027 // It would be invalid to transform the second memcpy into memcpy(c <- b). 1028 // 1029 // TODO: If the code between M and MDep is transparent to the destination "c", 1030 // then we could still perform the xform by moving M up to the first memcpy. 1031 // 1032 // NOTE: This is conservative, it will stop on any read from the source loc, 1033 // not just the defining memcpy. 1034 MemDepResult SourceDep = 1035 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false, 1036 M->getIterator(), M->getParent()); 1037 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 1038 return false; 1039 1040 // If the dest of the second might alias the source of the first, then the 1041 // source and dest might overlap. We still want to eliminate the intermediate 1042 // value, but we have to generate a memmove instead of memcpy. 1043 bool UseMemMove = false; 1044 if (!AA.isNoAlias(MemoryLocation::getForDest(M), 1045 MemoryLocation::getForSource(MDep))) 1046 UseMemMove = true; 1047 1048 // If all checks passed, then we can transform M. 1049 1050 // Make sure to use the lesser of the alignment of the source and the dest 1051 // since we're changing where we're reading from, but don't want to increase 1052 // the alignment past what can be read from or written to. 1053 // TODO: Is this worth it if we're creating a less aligned memcpy? For 1054 // example we could be moving from movaps -> movq on x86. 1055 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment()); 1056 1057 IRBuilder<> Builder(M); 1058 if (UseMemMove) 1059 Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(), 1060 Align, M->isVolatile()); 1061 else 1062 Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(), 1063 Align, M->isVolatile()); 1064 1065 // Remove the instruction we're replacing. 1066 MD->removeInstruction(M); 1067 M->eraseFromParent(); 1068 ++NumMemCpyInstr; 1069 return true; 1070 } 1071 1072 /// We've found that the (upward scanning) memory dependence of \p MemCpy is 1073 /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that 1074 /// weren't copied over by \p MemCpy. 1075 /// 1076 /// In other words, transform: 1077 /// \code 1078 /// memset(dst, c, dst_size); 1079 /// memcpy(dst, src, src_size); 1080 /// \endcode 1081 /// into: 1082 /// \code 1083 /// memcpy(dst, src, src_size); 1084 /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); 1085 /// \endcode 1086 bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy, 1087 MemSetInst *MemSet) { 1088 // We can only transform memset/memcpy with the same destination. 1089 if (MemSet->getDest() != MemCpy->getDest()) 1090 return false; 1091 1092 // Check that there are no other dependencies on the memset destination. 1093 MemDepResult DstDepInfo = 1094 MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false, 1095 MemCpy->getIterator(), MemCpy->getParent()); 1096 if (DstDepInfo.getInst() != MemSet) 1097 return false; 1098 1099 // Use the same i8* dest as the memcpy, killing the memset dest if different. 1100 Value *Dest = MemCpy->getRawDest(); 1101 Value *DestSize = MemSet->getLength(); 1102 Value *SrcSize = MemCpy->getLength(); 1103 1104 // By default, create an unaligned memset. 1105 unsigned Align = 1; 1106 // If Dest is aligned, and SrcSize is constant, use the minimum alignment 1107 // of the sum. 1108 const unsigned DestAlign = 1109 std::max(MemSet->getAlignment(), MemCpy->getAlignment()); 1110 if (DestAlign > 1) 1111 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize)) 1112 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign); 1113 1114 IRBuilder<> Builder(MemCpy); 1115 1116 // If the sizes have different types, zext the smaller one. 1117 if (DestSize->getType() != SrcSize->getType()) { 1118 if (DestSize->getType()->getIntegerBitWidth() > 1119 SrcSize->getType()->getIntegerBitWidth()) 1120 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType()); 1121 else 1122 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType()); 1123 } 1124 1125 Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize); 1126 Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize); 1127 Value *MemsetLen = Builder.CreateSelect( 1128 Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff); 1129 Builder.CreateMemSet(Builder.CreateGEP(Dest, SrcSize), MemSet->getOperand(1), 1130 MemsetLen, Align); 1131 1132 MD->removeInstruction(MemSet); 1133 MemSet->eraseFromParent(); 1134 return true; 1135 } 1136 1137 /// Transform memcpy to memset when its source was just memset. 1138 /// In other words, turn: 1139 /// \code 1140 /// memset(dst1, c, dst1_size); 1141 /// memcpy(dst2, dst1, dst2_size); 1142 /// \endcode 1143 /// into: 1144 /// \code 1145 /// memset(dst1, c, dst1_size); 1146 /// memset(dst2, c, dst2_size); 1147 /// \endcode 1148 /// When dst2_size <= dst1_size. 1149 /// 1150 /// The \p MemCpy must have a Constant length. 1151 bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, 1152 MemSetInst *MemSet) { 1153 AliasAnalysis &AA = LookupAliasAnalysis(); 1154 1155 // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and 1156 // memcpying from the same address. Otherwise it is hard to reason about. 1157 if (!AA.isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource())) 1158 return false; 1159 1160 ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength()); 1161 ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength()); 1162 // Make sure the memcpy doesn't read any more than what the memset wrote. 1163 // Don't worry about sizes larger than i64. 1164 if (!MemSetSize || CopySize->getZExtValue() > MemSetSize->getZExtValue()) 1165 return false; 1166 1167 IRBuilder<> Builder(MemCpy); 1168 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), 1169 CopySize, MemCpy->getAlignment()); 1170 return true; 1171 } 1172 1173 /// Perform simplification of memcpy's. If we have memcpy A 1174 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite 1175 /// B to be a memcpy from X to Z (or potentially a memmove, depending on 1176 /// circumstances). This allows later passes to remove the first memcpy 1177 /// altogether. 1178 bool MemCpyOptPass::processMemCpy(MemCpyInst *M) { 1179 // We can only optimize non-volatile memcpy's. 1180 if (M->isVolatile()) return false; 1181 1182 // If the source and destination of the memcpy are the same, then zap it. 1183 if (M->getSource() == M->getDest()) { 1184 MD->removeInstruction(M); 1185 M->eraseFromParent(); 1186 return false; 1187 } 1188 1189 // If copying from a constant, try to turn the memcpy into a memset. 1190 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource())) 1191 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 1192 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) { 1193 IRBuilder<> Builder(M); 1194 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(), 1195 M->getAlignment(), false); 1196 MD->removeInstruction(M); 1197 M->eraseFromParent(); 1198 ++NumCpyToSet; 1199 return true; 1200 } 1201 1202 MemDepResult DepInfo = MD->getDependency(M); 1203 1204 // Try to turn a partially redundant memset + memcpy into 1205 // memcpy + smaller memset. We don't need the memcpy size for this. 1206 if (DepInfo.isClobber()) 1207 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst())) 1208 if (processMemSetMemCpyDependence(M, MDep)) 1209 return true; 1210 1211 // The optimizations after this point require the memcpy size. 1212 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); 1213 if (!CopySize) return false; 1214 1215 // There are four possible optimizations we can do for memcpy: 1216 // a) memcpy-memcpy xform which exposes redundance for DSE. 1217 // b) call-memcpy xform for return slot optimization. 1218 // c) memcpy from freshly alloca'd space or space that has just started its 1219 // lifetime copies undefined data, and we can therefore eliminate the 1220 // memcpy in favor of the data that was already at the destination. 1221 // d) memcpy from a just-memset'd source can be turned into memset. 1222 if (DepInfo.isClobber()) { 1223 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { 1224 if (performCallSlotOptzn(M, M->getDest(), M->getSource(), 1225 CopySize->getZExtValue(), M->getAlignment(), 1226 C)) { 1227 MD->removeInstruction(M); 1228 M->eraseFromParent(); 1229 return true; 1230 } 1231 } 1232 } 1233 1234 MemoryLocation SrcLoc = MemoryLocation::getForSource(M); 1235 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom( 1236 SrcLoc, true, M->getIterator(), M->getParent()); 1237 1238 if (SrcDepInfo.isClobber()) { 1239 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst())) 1240 return processMemCpyMemCpyDependence(M, MDep); 1241 } else if (SrcDepInfo.isDef()) { 1242 Instruction *I = SrcDepInfo.getInst(); 1243 bool hasUndefContents = false; 1244 1245 if (isa<AllocaInst>(I)) { 1246 hasUndefContents = true; 1247 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1248 if (II->getIntrinsicID() == Intrinsic::lifetime_start) 1249 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0))) 1250 if (LTSize->getZExtValue() >= CopySize->getZExtValue()) 1251 hasUndefContents = true; 1252 } 1253 1254 if (hasUndefContents) { 1255 MD->removeInstruction(M); 1256 M->eraseFromParent(); 1257 ++NumMemCpyInstr; 1258 return true; 1259 } 1260 } 1261 1262 if (SrcDepInfo.isClobber()) 1263 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst())) 1264 if (performMemCpyToMemSetOptzn(M, MDep)) { 1265 MD->removeInstruction(M); 1266 M->eraseFromParent(); 1267 ++NumCpyToSet; 1268 return true; 1269 } 1270 1271 return false; 1272 } 1273 1274 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed 1275 /// not to alias. 1276 bool MemCpyOptPass::processMemMove(MemMoveInst *M) { 1277 AliasAnalysis &AA = LookupAliasAnalysis(); 1278 1279 if (!TLI->has(LibFunc_memmove)) 1280 return false; 1281 1282 // See if the pointers alias. 1283 if (!AA.isNoAlias(MemoryLocation::getForDest(M), 1284 MemoryLocation::getForSource(M))) 1285 return false; 1286 1287 DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M 1288 << "\n"); 1289 1290 // If not, then we know we can transform this. 1291 Type *ArgTys[3] = { M->getRawDest()->getType(), 1292 M->getRawSource()->getType(), 1293 M->getLength()->getType() }; 1294 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(), 1295 Intrinsic::memcpy, ArgTys)); 1296 1297 // MemDep may have over conservative information about this instruction, just 1298 // conservatively flush it from the cache. 1299 MD->removeInstruction(M); 1300 1301 ++NumMoveToCpy; 1302 return true; 1303 } 1304 1305 /// This is called on every byval argument in call sites. 1306 bool MemCpyOptPass::processByValArgument(CallSite CS, unsigned ArgNo) { 1307 const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout(); 1308 // Find out what feeds this byval argument. 1309 Value *ByValArg = CS.getArgument(ArgNo); 1310 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType(); 1311 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy); 1312 MemDepResult DepInfo = MD->getPointerDependencyFrom( 1313 MemoryLocation(ByValArg, ByValSize), true, 1314 CS.getInstruction()->getIterator(), CS.getInstruction()->getParent()); 1315 if (!DepInfo.isClobber()) 1316 return false; 1317 1318 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by 1319 // a memcpy, see if we can byval from the source of the memcpy instead of the 1320 // result. 1321 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); 1322 if (!MDep || MDep->isVolatile() || 1323 ByValArg->stripPointerCasts() != MDep->getDest()) 1324 return false; 1325 1326 // The length of the memcpy must be larger or equal to the size of the byval. 1327 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 1328 if (!C1 || C1->getValue().getZExtValue() < ByValSize) 1329 return false; 1330 1331 // Get the alignment of the byval. If the call doesn't specify the alignment, 1332 // then it is some target specific value that we can't know. 1333 unsigned ByValAlign = CS.getParamAlignment(ArgNo); 1334 if (ByValAlign == 0) return false; 1335 1336 // If it is greater than the memcpy, then we check to see if we can force the 1337 // source of the memcpy to the alignment we need. If we fail, we bail out. 1338 AssumptionCache &AC = LookupAssumptionCache(); 1339 DominatorTree &DT = LookupDomTree(); 1340 if (MDep->getAlignment() < ByValAlign && 1341 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, 1342 CS.getInstruction(), &AC, &DT) < ByValAlign) 1343 return false; 1344 1345 // The address space of the memcpy source must match the byval argument 1346 if (MDep->getSource()->getType()->getPointerAddressSpace() != 1347 ByValArg->getType()->getPointerAddressSpace()) 1348 return false; 1349 1350 // Verify that the copied-from memory doesn't change in between the memcpy and 1351 // the byval call. 1352 // memcpy(a <- b) 1353 // *b = 42; 1354 // foo(*a) 1355 // It would be invalid to transform the second memcpy into foo(*b). 1356 // 1357 // NOTE: This is conservative, it will stop on any read from the source loc, 1358 // not just the defining memcpy. 1359 MemDepResult SourceDep = MD->getPointerDependencyFrom( 1360 MemoryLocation::getForSource(MDep), false, 1361 CS.getInstruction()->getIterator(), MDep->getParent()); 1362 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 1363 return false; 1364 1365 Value *TmpCast = MDep->getSource(); 1366 if (MDep->getSource()->getType() != ByValArg->getType()) 1367 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), 1368 "tmpcast", CS.getInstruction()); 1369 1370 DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" 1371 << " " << *MDep << "\n" 1372 << " " << *CS.getInstruction() << "\n"); 1373 1374 // Otherwise we're good! Update the byval argument. 1375 CS.setArgument(ArgNo, TmpCast); 1376 ++NumMemCpyInstr; 1377 return true; 1378 } 1379 1380 /// Executes one iteration of MemCpyOptPass. 1381 bool MemCpyOptPass::iterateOnFunction(Function &F) { 1382 bool MadeChange = false; 1383 1384 // Walk all instruction in the function. 1385 for (BasicBlock &BB : F) { 1386 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 1387 // Avoid invalidating the iterator. 1388 Instruction *I = &*BI++; 1389 1390 bool RepeatInstruction = false; 1391 1392 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1393 MadeChange |= processStore(SI, BI); 1394 else if (MemSetInst *M = dyn_cast<MemSetInst>(I)) 1395 RepeatInstruction = processMemSet(M, BI); 1396 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) 1397 RepeatInstruction = processMemCpy(M); 1398 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) 1399 RepeatInstruction = processMemMove(M); 1400 else if (auto CS = CallSite(I)) { 1401 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 1402 if (CS.isByValArgument(i)) 1403 MadeChange |= processByValArgument(CS, i); 1404 } 1405 1406 // Reprocess the instruction if desired. 1407 if (RepeatInstruction) { 1408 if (BI != BB.begin()) 1409 --BI; 1410 MadeChange = true; 1411 } 1412 } 1413 } 1414 1415 return MadeChange; 1416 } 1417 1418 PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) { 1419 auto &MD = AM.getResult<MemoryDependenceAnalysis>(F); 1420 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1421 1422 auto LookupAliasAnalysis = [&]() -> AliasAnalysis & { 1423 return AM.getResult<AAManager>(F); 1424 }; 1425 auto LookupAssumptionCache = [&]() -> AssumptionCache & { 1426 return AM.getResult<AssumptionAnalysis>(F); 1427 }; 1428 auto LookupDomTree = [&]() -> DominatorTree & { 1429 return AM.getResult<DominatorTreeAnalysis>(F); 1430 }; 1431 1432 bool MadeChange = runImpl(F, &MD, &TLI, LookupAliasAnalysis, 1433 LookupAssumptionCache, LookupDomTree); 1434 if (!MadeChange) 1435 return PreservedAnalyses::all(); 1436 1437 PreservedAnalyses PA; 1438 PA.preserveSet<CFGAnalyses>(); 1439 PA.preserve<GlobalsAA>(); 1440 PA.preserve<MemoryDependenceAnalysis>(); 1441 return PA; 1442 } 1443 1444 bool MemCpyOptPass::runImpl( 1445 Function &F, MemoryDependenceResults *MD_, TargetLibraryInfo *TLI_, 1446 std::function<AliasAnalysis &()> LookupAliasAnalysis_, 1447 std::function<AssumptionCache &()> LookupAssumptionCache_, 1448 std::function<DominatorTree &()> LookupDomTree_) { 1449 bool MadeChange = false; 1450 MD = MD_; 1451 TLI = TLI_; 1452 LookupAliasAnalysis = std::move(LookupAliasAnalysis_); 1453 LookupAssumptionCache = std::move(LookupAssumptionCache_); 1454 LookupDomTree = std::move(LookupDomTree_); 1455 1456 // If we don't have at least memset and memcpy, there is little point of doing 1457 // anything here. These are required by a freestanding implementation, so if 1458 // even they are disabled, there is no point in trying hard. 1459 if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy)) 1460 return false; 1461 1462 while (true) { 1463 if (!iterateOnFunction(F)) 1464 break; 1465 MadeChange = true; 1466 } 1467 1468 MD = nullptr; 1469 return MadeChange; 1470 } 1471 1472 /// This is the main transformation entry point for a function. 1473 bool MemCpyOptLegacyPass::runOnFunction(Function &F) { 1474 if (skipFunction(F)) 1475 return false; 1476 1477 auto *MD = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep(); 1478 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 1479 1480 auto LookupAliasAnalysis = [this]() -> AliasAnalysis & { 1481 return getAnalysis<AAResultsWrapperPass>().getAAResults(); 1482 }; 1483 auto LookupAssumptionCache = [this, &F]() -> AssumptionCache & { 1484 return getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1485 }; 1486 auto LookupDomTree = [this]() -> DominatorTree & { 1487 return getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1488 }; 1489 1490 return Impl.runImpl(F, MD, TLI, LookupAliasAnalysis, LookupAssumptionCache, 1491 LookupDomTree); 1492 } 1493