1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass performs various transformations related to eliminating memcpy 10 // calls, or transforming sets of stores into memset's. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/MemCpyOptimizer.h" 15 #include "llvm/ADT/DenseSet.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/ADT/iterator_range.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/GlobalsModRef.h" 24 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 25 #include "llvm/Analysis/MemoryLocation.h" 26 #include "llvm/Analysis/MemorySSA.h" 27 #include "llvm/Analysis/MemorySSAUpdater.h" 28 #include "llvm/Analysis/TargetLibraryInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/IR/Argument.h" 31 #include "llvm/IR/BasicBlock.h" 32 #include "llvm/IR/Constants.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/DerivedTypes.h" 35 #include "llvm/IR/Dominators.h" 36 #include "llvm/IR/Function.h" 37 #include "llvm/IR/GetElementPtrTypeIterator.h" 38 #include "llvm/IR/GlobalVariable.h" 39 #include "llvm/IR/IRBuilder.h" 40 #include "llvm/IR/InstrTypes.h" 41 #include "llvm/IR/Instruction.h" 42 #include "llvm/IR/Instructions.h" 43 #include "llvm/IR/IntrinsicInst.h" 44 #include "llvm/IR/Intrinsics.h" 45 #include "llvm/IR/LLVMContext.h" 46 #include "llvm/IR/Module.h" 47 #include "llvm/IR/Operator.h" 48 #include "llvm/IR/PassManager.h" 49 #include "llvm/IR/Type.h" 50 #include "llvm/IR/User.h" 51 #include "llvm/IR/Value.h" 52 #include "llvm/InitializePasses.h" 53 #include "llvm/Pass.h" 54 #include "llvm/Support/Casting.h" 55 #include "llvm/Support/Debug.h" 56 #include "llvm/Support/MathExtras.h" 57 #include "llvm/Support/raw_ostream.h" 58 #include "llvm/Transforms/Scalar.h" 59 #include "llvm/Transforms/Utils/Local.h" 60 #include <algorithm> 61 #include <cassert> 62 #include <cstdint> 63 #include <utility> 64 65 using namespace llvm; 66 67 #define DEBUG_TYPE "memcpyopt" 68 69 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 70 STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 71 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 72 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); 73 74 namespace { 75 76 /// Represents a range of memset'd bytes with the ByteVal value. 77 /// This allows us to analyze stores like: 78 /// store 0 -> P+1 79 /// store 0 -> P+0 80 /// store 0 -> P+3 81 /// store 0 -> P+2 82 /// which sometimes happens with stores to arrays of structs etc. When we see 83 /// the first store, we make a range [1, 2). The second store extends the range 84 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 85 /// two ranges into [0, 3) which is memset'able. 86 struct MemsetRange { 87 // Start/End - A semi range that describes the span that this range covers. 88 // The range is closed at the start and open at the end: [Start, End). 89 int64_t Start, End; 90 91 /// StartPtr - The getelementptr instruction that points to the start of the 92 /// range. 93 Value *StartPtr; 94 95 /// Alignment - The known alignment of the first store. 96 unsigned Alignment; 97 98 /// TheStores - The actual stores that make up this range. 99 SmallVector<Instruction*, 16> TheStores; 100 101 bool isProfitableToUseMemset(const DataLayout &DL) const; 102 }; 103 104 } // end anonymous namespace 105 106 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const { 107 // If we found more than 4 stores to merge or 16 bytes, use memset. 108 if (TheStores.size() >= 4 || End-Start >= 16) return true; 109 110 // If there is nothing to merge, don't do anything. 111 if (TheStores.size() < 2) return false; 112 113 // If any of the stores are a memset, then it is always good to extend the 114 // memset. 115 for (Instruction *SI : TheStores) 116 if (!isa<StoreInst>(SI)) 117 return true; 118 119 // Assume that the code generator is capable of merging pairs of stores 120 // together if it wants to. 121 if (TheStores.size() == 2) return false; 122 123 // If we have fewer than 8 stores, it can still be worthwhile to do this. 124 // For example, merging 4 i8 stores into an i32 store is useful almost always. 125 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 126 // memset will be split into 2 32-bit stores anyway) and doing so can 127 // pessimize the llvm optimizer. 128 // 129 // Since we don't have perfect knowledge here, make some assumptions: assume 130 // the maximum GPR width is the same size as the largest legal integer 131 // size. If so, check to see whether we will end up actually reducing the 132 // number of stores used. 133 unsigned Bytes = unsigned(End-Start); 134 unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8; 135 if (MaxIntSize == 0) 136 MaxIntSize = 1; 137 unsigned NumPointerStores = Bytes / MaxIntSize; 138 139 // Assume the remaining bytes if any are done a byte at a time. 140 unsigned NumByteStores = Bytes % MaxIntSize; 141 142 // If we will reduce the # stores (according to this heuristic), do the 143 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 144 // etc. 145 return TheStores.size() > NumPointerStores+NumByteStores; 146 } 147 148 namespace { 149 150 class MemsetRanges { 151 using range_iterator = SmallVectorImpl<MemsetRange>::iterator; 152 153 /// A sorted list of the memset ranges. 154 SmallVector<MemsetRange, 8> Ranges; 155 156 const DataLayout &DL; 157 158 public: 159 MemsetRanges(const DataLayout &DL) : DL(DL) {} 160 161 using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator; 162 163 const_iterator begin() const { return Ranges.begin(); } 164 const_iterator end() const { return Ranges.end(); } 165 bool empty() const { return Ranges.empty(); } 166 167 void addInst(int64_t OffsetFromFirst, Instruction *Inst) { 168 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 169 addStore(OffsetFromFirst, SI); 170 else 171 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); 172 } 173 174 void addStore(int64_t OffsetFromFirst, StoreInst *SI) { 175 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); 176 177 addRange(OffsetFromFirst, StoreSize, SI->getPointerOperand(), 178 SI->getAlign().value(), SI); 179 } 180 181 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { 182 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 183 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI); 184 } 185 186 void addRange(int64_t Start, int64_t Size, Value *Ptr, 187 unsigned Alignment, Instruction *Inst); 188 }; 189 190 } // end anonymous namespace 191 192 /// Add a new store to the MemsetRanges data structure. This adds a 193 /// new range for the specified store at the specified offset, merging into 194 /// existing ranges as appropriate. 195 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, 196 unsigned Alignment, Instruction *Inst) { 197 int64_t End = Start+Size; 198 199 range_iterator I = partition_point( 200 Ranges, [=](const MemsetRange &O) { return O.End < Start; }); 201 202 // We now know that I == E, in which case we didn't find anything to merge 203 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 204 // to insert a new range. Handle this now. 205 if (I == Ranges.end() || End < I->Start) { 206 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 207 R.Start = Start; 208 R.End = End; 209 R.StartPtr = Ptr; 210 R.Alignment = Alignment; 211 R.TheStores.push_back(Inst); 212 return; 213 } 214 215 // This store overlaps with I, add it. 216 I->TheStores.push_back(Inst); 217 218 // At this point, we may have an interval that completely contains our store. 219 // If so, just add it to the interval and return. 220 if (I->Start <= Start && I->End >= End) 221 return; 222 223 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 224 // but is not entirely contained within the range. 225 226 // See if the range extends the start of the range. In this case, it couldn't 227 // possibly cause it to join the prior range, because otherwise we would have 228 // stopped on *it*. 229 if (Start < I->Start) { 230 I->Start = Start; 231 I->StartPtr = Ptr; 232 I->Alignment = Alignment; 233 } 234 235 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 236 // is in or right at the end of I), and that End >= I->Start. Extend I out to 237 // End. 238 if (End > I->End) { 239 I->End = End; 240 range_iterator NextI = I; 241 while (++NextI != Ranges.end() && End >= NextI->Start) { 242 // Merge the range in. 243 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 244 if (NextI->End > I->End) 245 I->End = NextI->End; 246 Ranges.erase(NextI); 247 NextI = I; 248 } 249 } 250 } 251 252 //===----------------------------------------------------------------------===// 253 // MemCpyOptLegacyPass Pass 254 //===----------------------------------------------------------------------===// 255 256 namespace { 257 258 class MemCpyOptLegacyPass : public FunctionPass { 259 MemCpyOptPass Impl; 260 261 public: 262 static char ID; // Pass identification, replacement for typeid 263 264 MemCpyOptLegacyPass() : FunctionPass(ID) { 265 initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry()); 266 } 267 268 bool runOnFunction(Function &F) override; 269 270 private: 271 // This transformation requires dominator postdominator info 272 void getAnalysisUsage(AnalysisUsage &AU) const override { 273 AU.setPreservesCFG(); 274 AU.addRequired<AssumptionCacheTracker>(); 275 AU.addRequired<DominatorTreeWrapperPass>(); 276 AU.addPreserved<DominatorTreeWrapperPass>(); 277 AU.addPreserved<GlobalsAAWrapperPass>(); 278 AU.addRequired<TargetLibraryInfoWrapperPass>(); 279 AU.addRequired<MemoryDependenceWrapperPass>(); 280 AU.addPreserved<MemoryDependenceWrapperPass>(); 281 AU.addRequired<AAResultsWrapperPass>(); 282 AU.addPreserved<AAResultsWrapperPass>(); 283 AU.addPreserved<MemorySSAWrapperPass>(); 284 } 285 }; 286 287 } // end anonymous namespace 288 289 char MemCpyOptLegacyPass::ID = 0; 290 291 /// The public interface to this file... 292 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); } 293 294 INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", 295 false, false) 296 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 297 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 298 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 299 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 300 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 301 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 302 INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", 303 false, false) 304 305 /// When scanning forward over instructions, we look for some other patterns to 306 /// fold away. In particular, this looks for stores to neighboring locations of 307 /// memory. If it sees enough consecutive ones, it attempts to merge them 308 /// together into a memcpy/memset. 309 Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst, 310 Value *StartPtr, 311 Value *ByteVal) { 312 const DataLayout &DL = StartInst->getModule()->getDataLayout(); 313 314 // Okay, so we now have a single store that can be splatable. Scan to find 315 // all subsequent stores of the same value to offset from the same pointer. 316 // Join these together into ranges, so we can decide whether contiguous blocks 317 // are stored. 318 MemsetRanges Ranges(DL); 319 320 BasicBlock::iterator BI(StartInst); 321 322 // Keeps track of the last memory use or def before the insertion point for 323 // the new memset. The new MemoryDef for the inserted memsets will be inserted 324 // after MemInsertPoint. It points to either LastMemDef or to the last user 325 // before the insertion point of the memset, if there are any such users. 326 MemoryUseOrDef *MemInsertPoint = nullptr; 327 // Keeps track of the last MemoryDef between StartInst and the insertion point 328 // for the new memset. This will become the defining access of the inserted 329 // memsets. 330 MemoryDef *LastMemDef = nullptr; 331 for (++BI; !BI->isTerminator(); ++BI) { 332 if (MSSAU) { 333 auto *CurrentAcc = cast_or_null<MemoryUseOrDef>( 334 MSSAU->getMemorySSA()->getMemoryAccess(&*BI)); 335 if (CurrentAcc) { 336 MemInsertPoint = CurrentAcc; 337 if (auto *CurrentDef = dyn_cast<MemoryDef>(CurrentAcc)) 338 LastMemDef = CurrentDef; 339 } 340 } 341 342 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { 343 // If the instruction is readnone, ignore it, otherwise bail out. We 344 // don't even allow readonly here because we don't want something like: 345 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 346 if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) 347 break; 348 continue; 349 } 350 351 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { 352 // If this is a store, see if we can merge it in. 353 if (!NextStore->isSimple()) break; 354 355 // Check to see if this stored value is of the same byte-splattable value. 356 Value *StoredByte = isBytewiseValue(NextStore->getOperand(0), DL); 357 if (isa<UndefValue>(ByteVal) && StoredByte) 358 ByteVal = StoredByte; 359 if (ByteVal != StoredByte) 360 break; 361 362 // Check to see if this store is to a constant offset from the start ptr. 363 Optional<int64_t> Offset = 364 isPointerOffset(StartPtr, NextStore->getPointerOperand(), DL); 365 if (!Offset) 366 break; 367 368 Ranges.addStore(*Offset, NextStore); 369 } else { 370 MemSetInst *MSI = cast<MemSetInst>(BI); 371 372 if (MSI->isVolatile() || ByteVal != MSI->getValue() || 373 !isa<ConstantInt>(MSI->getLength())) 374 break; 375 376 // Check to see if this store is to a constant offset from the start ptr. 377 Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), DL); 378 if (!Offset) 379 break; 380 381 Ranges.addMemSet(*Offset, MSI); 382 } 383 } 384 385 // If we have no ranges, then we just had a single store with nothing that 386 // could be merged in. This is a very common case of course. 387 if (Ranges.empty()) 388 return nullptr; 389 390 // If we had at least one store that could be merged in, add the starting 391 // store as well. We try to avoid this unless there is at least something 392 // interesting as a small compile-time optimization. 393 Ranges.addInst(0, StartInst); 394 395 // If we create any memsets, we put it right before the first instruction that 396 // isn't part of the memset block. This ensure that the memset is dominated 397 // by any addressing instruction needed by the start of the block. 398 IRBuilder<> Builder(&*BI); 399 400 // Now that we have full information about ranges, loop over the ranges and 401 // emit memset's for anything big enough to be worthwhile. 402 Instruction *AMemSet = nullptr; 403 for (const MemsetRange &Range : Ranges) { 404 if (Range.TheStores.size() == 1) continue; 405 406 // If it is profitable to lower this range to memset, do so now. 407 if (!Range.isProfitableToUseMemset(DL)) 408 continue; 409 410 // Otherwise, we do want to transform this! Create a new memset. 411 // Get the starting pointer of the block. 412 StartPtr = Range.StartPtr; 413 414 AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End - Range.Start, 415 MaybeAlign(Range.Alignment)); 416 LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI 417 : Range.TheStores) dbgs() 418 << *SI << '\n'; 419 dbgs() << "With: " << *AMemSet << '\n'); 420 if (!Range.TheStores.empty()) 421 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); 422 423 if (MSSAU) { 424 assert(LastMemDef && MemInsertPoint && 425 "Both LastMemDef and MemInsertPoint need to be set"); 426 auto *NewDef = 427 cast<MemoryDef>(MemInsertPoint->getMemoryInst() == &*BI 428 ? MSSAU->createMemoryAccessBefore( 429 AMemSet, LastMemDef, MemInsertPoint) 430 : MSSAU->createMemoryAccessAfter( 431 AMemSet, LastMemDef, MemInsertPoint)); 432 MSSAU->insertDef(NewDef, /*RenameUses=*/true); 433 LastMemDef = NewDef; 434 MemInsertPoint = NewDef; 435 } 436 437 // Zap all the stores. 438 for (Instruction *SI : Range.TheStores) { 439 if (MSSAU) 440 MSSAU->removeMemoryAccess(SI); 441 MD->removeInstruction(SI); 442 SI->eraseFromParent(); 443 } 444 445 ++NumMemSetInfer; 446 } 447 448 return AMemSet; 449 } 450 451 // This method try to lift a store instruction before position P. 452 // It will lift the store and its argument + that anything that 453 // may alias with these. 454 // The method returns true if it was successful. 455 static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P, 456 const LoadInst *LI) { 457 // If the store alias this position, early bail out. 458 MemoryLocation StoreLoc = MemoryLocation::get(SI); 459 if (isModOrRefSet(AA.getModRefInfo(P, StoreLoc))) 460 return false; 461 462 // Keep track of the arguments of all instruction we plan to lift 463 // so we can make sure to lift them as well if appropriate. 464 DenseSet<Instruction*> Args; 465 if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand())) 466 if (Ptr->getParent() == SI->getParent()) 467 Args.insert(Ptr); 468 469 // Instruction to lift before P. 470 SmallVector<Instruction*, 8> ToLift; 471 472 // Memory locations of lifted instructions. 473 SmallVector<MemoryLocation, 8> MemLocs{StoreLoc}; 474 475 // Lifted calls. 476 SmallVector<const CallBase *, 8> Calls; 477 478 const MemoryLocation LoadLoc = MemoryLocation::get(LI); 479 480 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) { 481 auto *C = &*I; 482 483 bool MayAlias = isModOrRefSet(AA.getModRefInfo(C, None)); 484 485 bool NeedLift = false; 486 if (Args.erase(C)) 487 NeedLift = true; 488 else if (MayAlias) { 489 NeedLift = llvm::any_of(MemLocs, [C, &AA](const MemoryLocation &ML) { 490 return isModOrRefSet(AA.getModRefInfo(C, ML)); 491 }); 492 493 if (!NeedLift) 494 NeedLift = llvm::any_of(Calls, [C, &AA](const CallBase *Call) { 495 return isModOrRefSet(AA.getModRefInfo(C, Call)); 496 }); 497 } 498 499 if (!NeedLift) 500 continue; 501 502 if (MayAlias) { 503 // Since LI is implicitly moved downwards past the lifted instructions, 504 // none of them may modify its source. 505 if (isModSet(AA.getModRefInfo(C, LoadLoc))) 506 return false; 507 else if (const auto *Call = dyn_cast<CallBase>(C)) { 508 // If we can't lift this before P, it's game over. 509 if (isModOrRefSet(AA.getModRefInfo(P, Call))) 510 return false; 511 512 Calls.push_back(Call); 513 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) { 514 // If we can't lift this before P, it's game over. 515 auto ML = MemoryLocation::get(C); 516 if (isModOrRefSet(AA.getModRefInfo(P, ML))) 517 return false; 518 519 MemLocs.push_back(ML); 520 } else 521 // We don't know how to lift this instruction. 522 return false; 523 } 524 525 ToLift.push_back(C); 526 for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k) 527 if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) { 528 if (A->getParent() == SI->getParent()) { 529 // Cannot hoist user of P above P 530 if(A == P) return false; 531 Args.insert(A); 532 } 533 } 534 } 535 536 // We made it, we need to lift 537 for (auto *I : llvm::reverse(ToLift)) { 538 LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n"); 539 I->moveBefore(P); 540 } 541 542 return true; 543 } 544 545 bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 546 if (!SI->isSimple()) return false; 547 548 // Avoid merging nontemporal stores since the resulting 549 // memcpy/memset would not be able to preserve the nontemporal hint. 550 // In theory we could teach how to propagate the !nontemporal metadata to 551 // memset calls. However, that change would force the backend to 552 // conservatively expand !nontemporal memset calls back to sequences of 553 // store instructions (effectively undoing the merging). 554 if (SI->getMetadata(LLVMContext::MD_nontemporal)) 555 return false; 556 557 const DataLayout &DL = SI->getModule()->getDataLayout(); 558 559 // Load to store forwarding can be interpreted as memcpy. 560 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) { 561 if (LI->isSimple() && LI->hasOneUse() && 562 LI->getParent() == SI->getParent()) { 563 564 auto *T = LI->getType(); 565 if (T->isAggregateType()) { 566 MemoryLocation LoadLoc = MemoryLocation::get(LI); 567 568 // We use alias analysis to check if an instruction may store to 569 // the memory we load from in between the load and the store. If 570 // such an instruction is found, we try to promote there instead 571 // of at the store position. 572 Instruction *P = SI; 573 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) { 574 if (isModSet(AA->getModRefInfo(&I, LoadLoc))) { 575 P = &I; 576 break; 577 } 578 } 579 580 // We found an instruction that may write to the loaded memory. 581 // We can try to promote at this position instead of the store 582 // position if nothing alias the store memory after this and the store 583 // destination is not in the range. 584 if (P && P != SI) { 585 if (!moveUp(*AA, SI, P, LI)) 586 P = nullptr; 587 } 588 589 // If a valid insertion position is found, then we can promote 590 // the load/store pair to a memcpy. 591 if (P) { 592 // If we load from memory that may alias the memory we store to, 593 // memmove must be used to preserve semantic. If not, memcpy can 594 // be used. 595 bool UseMemMove = false; 596 if (!AA->isNoAlias(MemoryLocation::get(SI), LoadLoc)) 597 UseMemMove = true; 598 599 uint64_t Size = DL.getTypeStoreSize(T); 600 601 IRBuilder<> Builder(P); 602 Instruction *M; 603 if (UseMemMove) 604 M = Builder.CreateMemMove( 605 SI->getPointerOperand(), SI->getAlign(), 606 LI->getPointerOperand(), LI->getAlign(), Size); 607 else 608 M = Builder.CreateMemCpy( 609 SI->getPointerOperand(), SI->getAlign(), 610 LI->getPointerOperand(), LI->getAlign(), Size); 611 612 LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => " 613 << *M << "\n"); 614 615 if (MSSAU) { 616 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(P))); 617 auto *LastDef = 618 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(P)); 619 auto *NewAccess = 620 MSSAU->createMemoryAccessAfter(M, LastDef, LastDef); 621 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 622 MSSAU->removeMemoryAccess(SI); 623 MSSAU->removeMemoryAccess(LI); 624 } 625 626 MD->removeInstruction(SI); 627 SI->eraseFromParent(); 628 MD->removeInstruction(LI); 629 LI->eraseFromParent(); 630 ++NumMemCpyInstr; 631 632 // Make sure we do not invalidate the iterator. 633 BBI = M->getIterator(); 634 return true; 635 } 636 } 637 638 // Detect cases where we're performing call slot forwarding, but 639 // happen to be using a load-store pair to implement it, rather than 640 // a memcpy. 641 MemDepResult ldep = MD->getDependency(LI); 642 CallInst *C = nullptr; 643 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst())) 644 C = dyn_cast<CallInst>(ldep.getInst()); 645 646 if (C) { 647 // Check that nothing touches the dest of the "copy" between 648 // the call and the store. 649 Value *CpyDest = SI->getPointerOperand()->stripPointerCasts(); 650 bool CpyDestIsLocal = isa<AllocaInst>(CpyDest); 651 MemoryLocation StoreLoc = MemoryLocation::get(SI); 652 for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator(); 653 I != E; --I) { 654 if (isModOrRefSet(AA->getModRefInfo(&*I, StoreLoc))) { 655 C = nullptr; 656 break; 657 } 658 // The store to dest may never happen if an exception can be thrown 659 // between the load and the store. 660 if (I->mayThrow() && !CpyDestIsLocal) { 661 C = nullptr; 662 break; 663 } 664 } 665 } 666 667 if (C) { 668 bool changed = performCallSlotOptzn( 669 LI, SI->getPointerOperand()->stripPointerCasts(), 670 LI->getPointerOperand()->stripPointerCasts(), 671 DL.getTypeStoreSize(SI->getOperand(0)->getType()), 672 commonAlignment(SI->getAlign(), LI->getAlign()), C); 673 if (changed) { 674 if (MSSAU) { 675 MSSAU->removeMemoryAccess(SI); 676 MSSAU->removeMemoryAccess(LI); 677 } 678 679 MD->removeInstruction(SI); 680 SI->eraseFromParent(); 681 MD->removeInstruction(LI); 682 LI->eraseFromParent(); 683 ++NumMemCpyInstr; 684 return true; 685 } 686 } 687 } 688 } 689 690 // There are two cases that are interesting for this code to handle: memcpy 691 // and memset. Right now we only handle memset. 692 693 // Ensure that the value being stored is something that can be memset'able a 694 // byte at a time like "0" or "-1" or any width, as well as things like 695 // 0xA0A0A0A0 and 0.0. 696 auto *V = SI->getOperand(0); 697 if (Value *ByteVal = isBytewiseValue(V, DL)) { 698 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), 699 ByteVal)) { 700 BBI = I->getIterator(); // Don't invalidate iterator. 701 return true; 702 } 703 704 // If we have an aggregate, we try to promote it to memset regardless 705 // of opportunity for merging as it can expose optimization opportunities 706 // in subsequent passes. 707 auto *T = V->getType(); 708 if (T->isAggregateType()) { 709 uint64_t Size = DL.getTypeStoreSize(T); 710 IRBuilder<> Builder(SI); 711 auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size, 712 SI->getAlign()); 713 714 LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n"); 715 716 if (MSSAU) { 717 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI))); 718 auto *LastDef = 719 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)); 720 auto *NewAccess = MSSAU->createMemoryAccessAfter(M, LastDef, LastDef); 721 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 722 MSSAU->removeMemoryAccess(SI); 723 } 724 725 MD->removeInstruction(SI); 726 SI->eraseFromParent(); 727 NumMemSetInfer++; 728 729 // Make sure we do not invalidate the iterator. 730 BBI = M->getIterator(); 731 return true; 732 } 733 } 734 735 return false; 736 } 737 738 bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { 739 // See if there is another memset or store neighboring this memset which 740 // allows us to widen out the memset to do a single larger store. 741 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) 742 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), 743 MSI->getValue())) { 744 BBI = I->getIterator(); // Don't invalidate iterator. 745 return true; 746 } 747 return false; 748 } 749 750 /// Takes a memcpy and a call that it depends on, 751 /// and checks for the possibility of a call slot optimization by having 752 /// the call write its result directly into the destination of the memcpy. 753 bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpy, Value *cpyDest, 754 Value *cpySrc, uint64_t cpyLen, 755 Align cpyAlign, CallInst *C) { 756 // The general transformation to keep in mind is 757 // 758 // call @func(..., src, ...) 759 // memcpy(dest, src, ...) 760 // 761 // -> 762 // 763 // memcpy(dest, src, ...) 764 // call @func(..., dest, ...) 765 // 766 // Since moving the memcpy is technically awkward, we additionally check that 767 // src only holds uninitialized values at the moment of the call, meaning that 768 // the memcpy can be discarded rather than moved. 769 770 // Lifetime marks shouldn't be operated on. 771 if (Function *F = C->getCalledFunction()) 772 if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start) 773 return false; 774 775 // Require that src be an alloca. This simplifies the reasoning considerably. 776 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 777 if (!srcAlloca) 778 return false; 779 780 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 781 if (!srcArraySize) 782 return false; 783 784 const DataLayout &DL = cpy->getModule()->getDataLayout(); 785 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) * 786 srcArraySize->getZExtValue(); 787 788 if (cpyLen < srcSize) 789 return false; 790 791 // Check that accessing the first srcSize bytes of dest will not cause a 792 // trap. Otherwise the transform is invalid since it might cause a trap 793 // to occur earlier than it otherwise would. 794 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) { 795 // The destination is an alloca. Check it is larger than srcSize. 796 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize()); 797 if (!destArraySize) 798 return false; 799 800 uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) * 801 destArraySize->getZExtValue(); 802 803 if (destSize < srcSize) 804 return false; 805 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) { 806 // The store to dest may never happen if the call can throw. 807 if (C->mayThrow()) 808 return false; 809 810 if (A->getDereferenceableBytes() < srcSize) { 811 // If the destination is an sret parameter then only accesses that are 812 // outside of the returned struct type can trap. 813 if (!A->hasStructRetAttr()) 814 return false; 815 816 Type *StructTy = cast<PointerType>(A->getType())->getElementType(); 817 if (!StructTy->isSized()) { 818 // The call may never return and hence the copy-instruction may never 819 // be executed, and therefore it's not safe to say "the destination 820 // has at least <cpyLen> bytes, as implied by the copy-instruction", 821 return false; 822 } 823 824 uint64_t destSize = DL.getTypeAllocSize(StructTy); 825 if (destSize < srcSize) 826 return false; 827 } 828 } else { 829 return false; 830 } 831 832 // Check that dest points to memory that is at least as aligned as src. 833 Align srcAlign = srcAlloca->getAlign(); 834 bool isDestSufficientlyAligned = srcAlign <= cpyAlign; 835 // If dest is not aligned enough and we can't increase its alignment then 836 // bail out. 837 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) 838 return false; 839 840 // Check that src is not accessed except via the call and the memcpy. This 841 // guarantees that it holds only undefined values when passed in (so the final 842 // memcpy can be dropped), that it is not read or written between the call and 843 // the memcpy, and that writing beyond the end of it is undefined. 844 SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(), 845 srcAlloca->user_end()); 846 while (!srcUseList.empty()) { 847 User *U = srcUseList.pop_back_val(); 848 849 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) { 850 for (User *UU : U->users()) 851 srcUseList.push_back(UU); 852 continue; 853 } 854 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) { 855 if (!G->hasAllZeroIndices()) 856 return false; 857 858 for (User *UU : U->users()) 859 srcUseList.push_back(UU); 860 continue; 861 } 862 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U)) 863 if (IT->isLifetimeStartOrEnd()) 864 continue; 865 866 if (U != C && U != cpy) 867 return false; 868 } 869 870 // Check that src isn't captured by the called function since the 871 // transformation can cause aliasing issues in that case. 872 for (unsigned ArgI = 0, E = C->arg_size(); ArgI != E; ++ArgI) 873 if (C->getArgOperand(ArgI) == cpySrc && !C->doesNotCapture(ArgI)) 874 return false; 875 876 // Since we're changing the parameter to the callsite, we need to make sure 877 // that what would be the new parameter dominates the callsite. 878 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest)) 879 if (!DT->dominates(cpyDestInst, C)) 880 return false; 881 882 // In addition to knowing that the call does not access src in some 883 // unexpected manner, for example via a global, which we deduce from 884 // the use analysis, we also need to know that it does not sneakily 885 // access dest. We rely on AA to figure this out for us. 886 ModRefInfo MR = AA->getModRefInfo(C, cpyDest, LocationSize::precise(srcSize)); 887 // If necessary, perform additional analysis. 888 if (isModOrRefSet(MR)) 889 MR = AA->callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), DT); 890 if (isModOrRefSet(MR)) 891 return false; 892 893 // We can't create address space casts here because we don't know if they're 894 // safe for the target. 895 if (cpySrc->getType()->getPointerAddressSpace() != 896 cpyDest->getType()->getPointerAddressSpace()) 897 return false; 898 for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) 899 if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc && 900 cpySrc->getType()->getPointerAddressSpace() != 901 C->getArgOperand(ArgI)->getType()->getPointerAddressSpace()) 902 return false; 903 904 // All the checks have passed, so do the transformation. 905 bool changedArgument = false; 906 for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) 907 if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc) { 908 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest 909 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 910 cpyDest->getName(), C); 911 changedArgument = true; 912 if (C->getArgOperand(ArgI)->getType() == Dest->getType()) 913 C->setArgOperand(ArgI, Dest); 914 else 915 C->setArgOperand(ArgI, CastInst::CreatePointerCast( 916 Dest, C->getArgOperand(ArgI)->getType(), 917 Dest->getName(), C)); 918 } 919 920 if (!changedArgument) 921 return false; 922 923 // If the destination wasn't sufficiently aligned then increase its alignment. 924 if (!isDestSufficientlyAligned) { 925 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!"); 926 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); 927 } 928 929 // Drop any cached information about the call, because we may have changed 930 // its dependence information by changing its parameter. 931 MD->removeInstruction(C); 932 933 // Update AA metadata 934 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be 935 // handled here, but combineMetadata doesn't support them yet 936 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 937 LLVMContext::MD_noalias, 938 LLVMContext::MD_invariant_group, 939 LLVMContext::MD_access_group}; 940 combineMetadata(C, cpy, KnownIDs, true); 941 942 // Remove the memcpy. 943 MD->removeInstruction(cpy); 944 ++NumMemCpyInstr; 945 946 return true; 947 } 948 949 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is 950 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can. 951 bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M, 952 MemCpyInst *MDep) { 953 // We can only transforms memcpy's where the dest of one is the source of the 954 // other. 955 if (M->getSource() != MDep->getDest() || MDep->isVolatile()) 956 return false; 957 958 // If dep instruction is reading from our current input, then it is a noop 959 // transfer and substituting the input won't change this instruction. Just 960 // ignore the input and let someone else zap MDep. This handles cases like: 961 // memcpy(a <- a) 962 // memcpy(b <- a) 963 if (M->getSource() == MDep->getSource()) 964 return false; 965 966 // Second, the length of the memcpy's must be the same, or the preceding one 967 // must be larger than the following one. 968 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); 969 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); 970 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) 971 return false; 972 973 // Verify that the copied-from memory doesn't change in between the two 974 // transfers. For example, in: 975 // memcpy(a <- b) 976 // *b = 42; 977 // memcpy(c <- a) 978 // It would be invalid to transform the second memcpy into memcpy(c <- b). 979 // 980 // TODO: If the code between M and MDep is transparent to the destination "c", 981 // then we could still perform the xform by moving M up to the first memcpy. 982 // 983 // NOTE: This is conservative, it will stop on any read from the source loc, 984 // not just the defining memcpy. 985 MemDepResult SourceDep = 986 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false, 987 M->getIterator(), M->getParent()); 988 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 989 return false; 990 991 // If the dest of the second might alias the source of the first, then the 992 // source and dest might overlap. We still want to eliminate the intermediate 993 // value, but we have to generate a memmove instead of memcpy. 994 bool UseMemMove = false; 995 if (!AA->isNoAlias(MemoryLocation::getForDest(M), 996 MemoryLocation::getForSource(MDep))) 997 UseMemMove = true; 998 999 // If all checks passed, then we can transform M. 1000 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n" 1001 << *MDep << '\n' << *M << '\n'); 1002 1003 // TODO: Is this worth it if we're creating a less aligned memcpy? For 1004 // example we could be moving from movaps -> movq on x86. 1005 IRBuilder<> Builder(M); 1006 Instruction *NewM; 1007 if (UseMemMove) 1008 NewM = Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(), 1009 MDep->getRawSource(), MDep->getSourceAlign(), 1010 M->getLength(), M->isVolatile()); 1011 else 1012 NewM = Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(), 1013 MDep->getRawSource(), MDep->getSourceAlign(), 1014 M->getLength(), M->isVolatile()); 1015 1016 if (MSSAU) { 1017 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M))); 1018 auto *LastDef = cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)); 1019 auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); 1020 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1021 MSSAU->removeMemoryAccess(M); 1022 } 1023 1024 // Remove the instruction we're replacing. 1025 MD->removeInstruction(M); 1026 M->eraseFromParent(); 1027 ++NumMemCpyInstr; 1028 return true; 1029 } 1030 1031 /// We've found that the (upward scanning) memory dependence of \p MemCpy is 1032 /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that 1033 /// weren't copied over by \p MemCpy. 1034 /// 1035 /// In other words, transform: 1036 /// \code 1037 /// memset(dst, c, dst_size); 1038 /// memcpy(dst, src, src_size); 1039 /// \endcode 1040 /// into: 1041 /// \code 1042 /// memcpy(dst, src, src_size); 1043 /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); 1044 /// \endcode 1045 bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy, 1046 MemSetInst *MemSet) { 1047 // We can only transform memset/memcpy with the same destination. 1048 if (MemSet->getDest() != MemCpy->getDest()) 1049 return false; 1050 1051 // Check that there are no other dependencies on the memset destination. 1052 MemDepResult DstDepInfo = 1053 MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false, 1054 MemCpy->getIterator(), MemCpy->getParent()); 1055 if (DstDepInfo.getInst() != MemSet) 1056 return false; 1057 1058 // Use the same i8* dest as the memcpy, killing the memset dest if different. 1059 Value *Dest = MemCpy->getRawDest(); 1060 Value *DestSize = MemSet->getLength(); 1061 Value *SrcSize = MemCpy->getLength(); 1062 1063 // By default, create an unaligned memset. 1064 unsigned Align = 1; 1065 // If Dest is aligned, and SrcSize is constant, use the minimum alignment 1066 // of the sum. 1067 const unsigned DestAlign = 1068 std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment()); 1069 if (DestAlign > 1) 1070 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize)) 1071 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign); 1072 1073 IRBuilder<> Builder(MemCpy); 1074 1075 // If the sizes have different types, zext the smaller one. 1076 if (DestSize->getType() != SrcSize->getType()) { 1077 if (DestSize->getType()->getIntegerBitWidth() > 1078 SrcSize->getType()->getIntegerBitWidth()) 1079 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType()); 1080 else 1081 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType()); 1082 } 1083 1084 Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize); 1085 Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize); 1086 Value *MemsetLen = Builder.CreateSelect( 1087 Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff); 1088 Instruction *NewMemSet = Builder.CreateMemSet( 1089 Builder.CreateGEP(Dest->getType()->getPointerElementType(), Dest, 1090 SrcSize), 1091 MemSet->getOperand(1), MemsetLen, MaybeAlign(Align)); 1092 1093 if (MSSAU) { 1094 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) && 1095 "MemCpy must be a MemoryDef"); 1096 // The new memset is inserted after the memcpy, but it is known that its 1097 // defining access is the memset about to be removed which immediately 1098 // precedes the memcpy. 1099 auto *LastDef = 1100 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); 1101 auto *NewAccess = MSSAU->createMemoryAccessBefore( 1102 NewMemSet, LastDef->getDefiningAccess(), LastDef); 1103 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1104 MSSAU->removeMemoryAccess(MemSet); 1105 } 1106 1107 MD->removeInstruction(MemSet); 1108 MemSet->eraseFromParent(); 1109 return true; 1110 } 1111 1112 /// Determine whether the instruction has undefined content for the given Size, 1113 /// either because it was freshly alloca'd or started its lifetime. 1114 static bool hasUndefContents(Instruction *I, ConstantInt *Size) { 1115 if (isa<AllocaInst>(I)) 1116 return true; 1117 1118 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 1119 if (II->getIntrinsicID() == Intrinsic::lifetime_start) 1120 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0))) 1121 if (LTSize->getZExtValue() >= Size->getZExtValue()) 1122 return true; 1123 1124 return false; 1125 } 1126 1127 /// Transform memcpy to memset when its source was just memset. 1128 /// In other words, turn: 1129 /// \code 1130 /// memset(dst1, c, dst1_size); 1131 /// memcpy(dst2, dst1, dst2_size); 1132 /// \endcode 1133 /// into: 1134 /// \code 1135 /// memset(dst1, c, dst1_size); 1136 /// memset(dst2, c, dst2_size); 1137 /// \endcode 1138 /// When dst2_size <= dst1_size. 1139 /// 1140 /// The \p MemCpy must have a Constant length. 1141 bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, 1142 MemSetInst *MemSet) { 1143 // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and 1144 // memcpying from the same address. Otherwise it is hard to reason about. 1145 if (!AA->isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource())) 1146 return false; 1147 1148 // A known memset size is required. 1149 ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength()); 1150 if (!MemSetSize) 1151 return false; 1152 1153 // Make sure the memcpy doesn't read any more than what the memset wrote. 1154 // Don't worry about sizes larger than i64. 1155 ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength()); 1156 if (CopySize->getZExtValue() > MemSetSize->getZExtValue()) { 1157 // If the memcpy is larger than the memset, but the memory was undef prior 1158 // to the memset, we can just ignore the tail. Technically we're only 1159 // interested in the bytes from MemSetSize..CopySize here, but as we can't 1160 // easily represent this location, we use the full 0..CopySize range. 1161 MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy); 1162 MemDepResult DepInfo = MD->getPointerDependencyFrom( 1163 MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent()); 1164 if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize)) 1165 CopySize = MemSetSize; 1166 else 1167 return false; 1168 } 1169 1170 IRBuilder<> Builder(MemCpy); 1171 Instruction *NewM = 1172 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), 1173 CopySize, MaybeAlign(MemCpy->getDestAlignment())); 1174 if (MSSAU) { 1175 auto *LastDef = 1176 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); 1177 auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); 1178 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1179 } 1180 1181 return true; 1182 } 1183 1184 /// Perform simplification of memcpy's. If we have memcpy A 1185 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite 1186 /// B to be a memcpy from X to Z (or potentially a memmove, depending on 1187 /// circumstances). This allows later passes to remove the first memcpy 1188 /// altogether. 1189 bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) { 1190 // We can only optimize non-volatile memcpy's. 1191 if (M->isVolatile()) return false; 1192 1193 // If the source and destination of the memcpy are the same, then zap it. 1194 if (M->getSource() == M->getDest()) { 1195 ++BBI; 1196 if (MSSAU) 1197 MSSAU->removeMemoryAccess(M); 1198 1199 MD->removeInstruction(M); 1200 M->eraseFromParent(); 1201 return true; 1202 } 1203 1204 // If copying from a constant, try to turn the memcpy into a memset. 1205 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource())) 1206 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 1207 if (Value *ByteVal = isBytewiseValue(GV->getInitializer(), 1208 M->getModule()->getDataLayout())) { 1209 IRBuilder<> Builder(M); 1210 Instruction *NewM = 1211 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(), 1212 MaybeAlign(M->getDestAlignment()), false); 1213 if (MSSAU) { 1214 auto *LastDef = 1215 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)); 1216 auto *NewAccess = 1217 MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); 1218 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1219 MSSAU->removeMemoryAccess(M); 1220 } 1221 1222 MD->removeInstruction(M); 1223 M->eraseFromParent(); 1224 ++NumCpyToSet; 1225 return true; 1226 } 1227 1228 MemDepResult DepInfo = MD->getDependency(M); 1229 1230 // Try to turn a partially redundant memset + memcpy into 1231 // memcpy + smaller memset. We don't need the memcpy size for this. 1232 if (DepInfo.isClobber()) 1233 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst())) 1234 if (processMemSetMemCpyDependence(M, MDep)) 1235 return true; 1236 1237 // The optimizations after this point require the memcpy size. 1238 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); 1239 if (!CopySize) return false; 1240 1241 // There are four possible optimizations we can do for memcpy: 1242 // a) memcpy-memcpy xform which exposes redundance for DSE. 1243 // b) call-memcpy xform for return slot optimization. 1244 // c) memcpy from freshly alloca'd space or space that has just started its 1245 // lifetime copies undefined data, and we can therefore eliminate the 1246 // memcpy in favor of the data that was already at the destination. 1247 // d) memcpy from a just-memset'd source can be turned into memset. 1248 if (DepInfo.isClobber()) { 1249 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { 1250 // FIXME: Can we pass in either of dest/src alignment here instead 1251 // of conservatively taking the minimum? 1252 Align Alignment = std::min(M->getDestAlign().valueOrOne(), 1253 M->getSourceAlign().valueOrOne()); 1254 if (performCallSlotOptzn(M, M->getDest(), M->getSource(), 1255 CopySize->getZExtValue(), Alignment, C)) { 1256 if (MSSAU) 1257 MSSAU->removeMemoryAccess(M); 1258 1259 MD->removeInstruction(M); 1260 M->eraseFromParent(); 1261 return true; 1262 } 1263 } 1264 } 1265 1266 MemoryLocation SrcLoc = MemoryLocation::getForSource(M); 1267 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom( 1268 SrcLoc, true, M->getIterator(), M->getParent()); 1269 1270 if (SrcDepInfo.isClobber()) { 1271 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst())) 1272 return processMemCpyMemCpyDependence(M, MDep); 1273 } else if (SrcDepInfo.isDef()) { 1274 if (hasUndefContents(SrcDepInfo.getInst(), CopySize)) { 1275 if (MSSAU) 1276 MSSAU->removeMemoryAccess(M); 1277 1278 MD->removeInstruction(M); 1279 M->eraseFromParent(); 1280 ++NumMemCpyInstr; 1281 return true; 1282 } 1283 } 1284 1285 if (SrcDepInfo.isClobber()) 1286 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst())) 1287 if (performMemCpyToMemSetOptzn(M, MDep)) { 1288 if (MSSAU) 1289 MSSAU->removeMemoryAccess(M); 1290 MD->removeInstruction(M); 1291 M->eraseFromParent(); 1292 ++NumCpyToSet; 1293 return true; 1294 } 1295 1296 return false; 1297 } 1298 1299 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed 1300 /// not to alias. 1301 bool MemCpyOptPass::processMemMove(MemMoveInst *M) { 1302 if (!TLI->has(LibFunc_memmove)) 1303 return false; 1304 1305 // See if the pointers alias. 1306 if (!AA->isNoAlias(MemoryLocation::getForDest(M), 1307 MemoryLocation::getForSource(M))) 1308 return false; 1309 1310 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M 1311 << "\n"); 1312 1313 // If not, then we know we can transform this. 1314 Type *ArgTys[3] = { M->getRawDest()->getType(), 1315 M->getRawSource()->getType(), 1316 M->getLength()->getType() }; 1317 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(), 1318 Intrinsic::memcpy, ArgTys)); 1319 1320 // For MemorySSA nothing really changes (except that memcpy may imply stricter 1321 // aliasing guarantees). 1322 1323 // MemDep may have over conservative information about this instruction, just 1324 // conservatively flush it from the cache. 1325 MD->removeInstruction(M); 1326 1327 ++NumMoveToCpy; 1328 return true; 1329 } 1330 1331 /// This is called on every byval argument in call sites. 1332 bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) { 1333 const DataLayout &DL = CB.getCaller()->getParent()->getDataLayout(); 1334 // Find out what feeds this byval argument. 1335 Value *ByValArg = CB.getArgOperand(ArgNo); 1336 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType(); 1337 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy); 1338 MemDepResult DepInfo = MD->getPointerDependencyFrom( 1339 MemoryLocation(ByValArg, LocationSize::precise(ByValSize)), true, 1340 CB.getIterator(), CB.getParent()); 1341 if (!DepInfo.isClobber()) 1342 return false; 1343 1344 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by 1345 // a memcpy, see if we can byval from the source of the memcpy instead of the 1346 // result. 1347 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); 1348 if (!MDep || MDep->isVolatile() || 1349 ByValArg->stripPointerCasts() != MDep->getDest()) 1350 return false; 1351 1352 // The length of the memcpy must be larger or equal to the size of the byval. 1353 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 1354 if (!C1 || C1->getValue().getZExtValue() < ByValSize) 1355 return false; 1356 1357 // Get the alignment of the byval. If the call doesn't specify the alignment, 1358 // then it is some target specific value that we can't know. 1359 MaybeAlign ByValAlign = CB.getParamAlign(ArgNo); 1360 if (!ByValAlign) return false; 1361 1362 // If it is greater than the memcpy, then we check to see if we can force the 1363 // source of the memcpy to the alignment we need. If we fail, we bail out. 1364 MaybeAlign MemDepAlign = MDep->getSourceAlign(); 1365 if ((!MemDepAlign || *MemDepAlign < *ByValAlign) && 1366 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &CB, AC, 1367 DT) < *ByValAlign) 1368 return false; 1369 1370 // The address space of the memcpy source must match the byval argument 1371 if (MDep->getSource()->getType()->getPointerAddressSpace() != 1372 ByValArg->getType()->getPointerAddressSpace()) 1373 return false; 1374 1375 // Verify that the copied-from memory doesn't change in between the memcpy and 1376 // the byval call. 1377 // memcpy(a <- b) 1378 // *b = 42; 1379 // foo(*a) 1380 // It would be invalid to transform the second memcpy into foo(*b). 1381 // 1382 // NOTE: This is conservative, it will stop on any read from the source loc, 1383 // not just the defining memcpy. 1384 MemDepResult SourceDep = MD->getPointerDependencyFrom( 1385 MemoryLocation::getForSource(MDep), false, 1386 CB.getIterator(), MDep->getParent()); 1387 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 1388 return false; 1389 1390 Value *TmpCast = MDep->getSource(); 1391 if (MDep->getSource()->getType() != ByValArg->getType()) { 1392 BitCastInst *TmpBitCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), 1393 "tmpcast", &CB); 1394 // Set the tmpcast's DebugLoc to MDep's 1395 TmpBitCast->setDebugLoc(MDep->getDebugLoc()); 1396 TmpCast = TmpBitCast; 1397 } 1398 1399 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" 1400 << " " << *MDep << "\n" 1401 << " " << CB << "\n"); 1402 1403 // Otherwise we're good! Update the byval argument. 1404 CB.setArgOperand(ArgNo, TmpCast); 1405 ++NumMemCpyInstr; 1406 return true; 1407 } 1408 1409 /// Executes one iteration of MemCpyOptPass. 1410 bool MemCpyOptPass::iterateOnFunction(Function &F) { 1411 bool MadeChange = false; 1412 1413 // Walk all instruction in the function. 1414 for (BasicBlock &BB : F) { 1415 // Skip unreachable blocks. For example processStore assumes that an 1416 // instruction in a BB can't be dominated by a later instruction in the 1417 // same BB (which is a scenario that can happen for an unreachable BB that 1418 // has itself as a predecessor). 1419 if (!DT->isReachableFromEntry(&BB)) 1420 continue; 1421 1422 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 1423 // Avoid invalidating the iterator. 1424 Instruction *I = &*BI++; 1425 1426 bool RepeatInstruction = false; 1427 1428 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1429 MadeChange |= processStore(SI, BI); 1430 else if (MemSetInst *M = dyn_cast<MemSetInst>(I)) 1431 RepeatInstruction = processMemSet(M, BI); 1432 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) 1433 RepeatInstruction = processMemCpy(M, BI); 1434 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) 1435 RepeatInstruction = processMemMove(M); 1436 else if (auto *CB = dyn_cast<CallBase>(I)) { 1437 for (unsigned i = 0, e = CB->arg_size(); i != e; ++i) 1438 if (CB->isByValArgument(i)) 1439 MadeChange |= processByValArgument(*CB, i); 1440 } 1441 1442 // Reprocess the instruction if desired. 1443 if (RepeatInstruction) { 1444 if (BI != BB.begin()) 1445 --BI; 1446 MadeChange = true; 1447 } 1448 } 1449 } 1450 1451 return MadeChange; 1452 } 1453 1454 PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) { 1455 auto &MD = AM.getResult<MemoryDependenceAnalysis>(F); 1456 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1457 auto *AA = &AM.getResult<AAManager>(F); 1458 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 1459 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 1460 auto *MSSA = AM.getCachedResult<MemorySSAAnalysis>(F); 1461 1462 bool MadeChange = 1463 runImpl(F, &MD, &TLI, AA, AC, DT, MSSA ? &MSSA->getMSSA() : nullptr); 1464 if (!MadeChange) 1465 return PreservedAnalyses::all(); 1466 1467 PreservedAnalyses PA; 1468 PA.preserveSet<CFGAnalyses>(); 1469 PA.preserve<GlobalsAA>(); 1470 PA.preserve<MemoryDependenceAnalysis>(); 1471 if (MSSA) 1472 PA.preserve<MemorySSAAnalysis>(); 1473 return PA; 1474 } 1475 1476 bool MemCpyOptPass::runImpl(Function &F, MemoryDependenceResults *MD_, 1477 TargetLibraryInfo *TLI_, AliasAnalysis *AA_, 1478 AssumptionCache *AC_, DominatorTree *DT_, 1479 MemorySSA *MSSA_) { 1480 bool MadeChange = false; 1481 MD = MD_; 1482 TLI = TLI_; 1483 AA = AA_; 1484 AC = AC_; 1485 DT = DT_; 1486 MemorySSAUpdater MSSAU_(MSSA_); 1487 MSSAU = MSSA_ ? &MSSAU_ : nullptr; 1488 // If we don't have at least memset and memcpy, there is little point of doing 1489 // anything here. These are required by a freestanding implementation, so if 1490 // even they are disabled, there is no point in trying hard. 1491 if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy)) 1492 return false; 1493 1494 while (true) { 1495 if (!iterateOnFunction(F)) 1496 break; 1497 MadeChange = true; 1498 } 1499 1500 if (MSSA_ && VerifyMemorySSA) 1501 MSSA_->verifyMemorySSA(); 1502 1503 MD = nullptr; 1504 return MadeChange; 1505 } 1506 1507 /// This is the main transformation entry point for a function. 1508 bool MemCpyOptLegacyPass::runOnFunction(Function &F) { 1509 if (skipFunction(F)) 1510 return false; 1511 1512 auto *MD = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep(); 1513 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1514 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1515 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1516 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1517 auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>(); 1518 1519 return Impl.runImpl(F, MD, TLI, AA, AC, DT, 1520 MSSAWP ? &MSSAWP->getMSSA() : nullptr); 1521 } 1522