1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass performs various transformations related to eliminating memcpy 10 // calls, or transforming sets of stores into memset's. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/MemCpyOptimizer.h" 15 #include "llvm/ADT/DenseSet.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/ADT/iterator_range.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/GlobalsModRef.h" 24 #include "llvm/Analysis/Loads.h" 25 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 26 #include "llvm/Analysis/MemoryLocation.h" 27 #include "llvm/Analysis/MemorySSA.h" 28 #include "llvm/Analysis/MemorySSAUpdater.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/BasicBlock.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/Dominators.h" 37 #include "llvm/IR/Function.h" 38 #include "llvm/IR/GetElementPtrTypeIterator.h" 39 #include "llvm/IR/GlobalVariable.h" 40 #include "llvm/IR/IRBuilder.h" 41 #include "llvm/IR/InstrTypes.h" 42 #include "llvm/IR/Instruction.h" 43 #include "llvm/IR/Instructions.h" 44 #include "llvm/IR/IntrinsicInst.h" 45 #include "llvm/IR/Intrinsics.h" 46 #include "llvm/IR/LLVMContext.h" 47 #include "llvm/IR/Module.h" 48 #include "llvm/IR/Operator.h" 49 #include "llvm/IR/PassManager.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/User.h" 52 #include "llvm/IR/Value.h" 53 #include "llvm/InitializePasses.h" 54 #include "llvm/Pass.h" 55 #include "llvm/Support/Casting.h" 56 #include "llvm/Support/Debug.h" 57 #include "llvm/Support/MathExtras.h" 58 #include "llvm/Support/raw_ostream.h" 59 #include "llvm/Transforms/Scalar.h" 60 #include "llvm/Transforms/Utils/Local.h" 61 #include <algorithm> 62 #include <cassert> 63 #include <cstdint> 64 #include <utility> 65 66 using namespace llvm; 67 68 #define DEBUG_TYPE "memcpyopt" 69 70 // TODO: Actually implement MemorySSA-based MemCpyOpt. 71 static cl::opt<bool> 72 EnableMemorySSA("enable-memcpyopt-memoryssa", cl::init(false), cl::Hidden, 73 cl::desc("Use MemorySSA-backed MemCpyOpt.")); 74 75 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 76 STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 77 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 78 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); 79 STATISTIC(NumCallSlot, "Number of call slot optimizations performed"); 80 81 namespace { 82 83 /// Represents a range of memset'd bytes with the ByteVal value. 84 /// This allows us to analyze stores like: 85 /// store 0 -> P+1 86 /// store 0 -> P+0 87 /// store 0 -> P+3 88 /// store 0 -> P+2 89 /// which sometimes happens with stores to arrays of structs etc. When we see 90 /// the first store, we make a range [1, 2). The second store extends the range 91 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 92 /// two ranges into [0, 3) which is memset'able. 93 struct MemsetRange { 94 // Start/End - A semi range that describes the span that this range covers. 95 // The range is closed at the start and open at the end: [Start, End). 96 int64_t Start, End; 97 98 /// StartPtr - The getelementptr instruction that points to the start of the 99 /// range. 100 Value *StartPtr; 101 102 /// Alignment - The known alignment of the first store. 103 unsigned Alignment; 104 105 /// TheStores - The actual stores that make up this range. 106 SmallVector<Instruction*, 16> TheStores; 107 108 bool isProfitableToUseMemset(const DataLayout &DL) const; 109 }; 110 111 } // end anonymous namespace 112 113 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const { 114 // If we found more than 4 stores to merge or 16 bytes, use memset. 115 if (TheStores.size() >= 4 || End-Start >= 16) return true; 116 117 // If there is nothing to merge, don't do anything. 118 if (TheStores.size() < 2) return false; 119 120 // If any of the stores are a memset, then it is always good to extend the 121 // memset. 122 for (Instruction *SI : TheStores) 123 if (!isa<StoreInst>(SI)) 124 return true; 125 126 // Assume that the code generator is capable of merging pairs of stores 127 // together if it wants to. 128 if (TheStores.size() == 2) return false; 129 130 // If we have fewer than 8 stores, it can still be worthwhile to do this. 131 // For example, merging 4 i8 stores into an i32 store is useful almost always. 132 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 133 // memset will be split into 2 32-bit stores anyway) and doing so can 134 // pessimize the llvm optimizer. 135 // 136 // Since we don't have perfect knowledge here, make some assumptions: assume 137 // the maximum GPR width is the same size as the largest legal integer 138 // size. If so, check to see whether we will end up actually reducing the 139 // number of stores used. 140 unsigned Bytes = unsigned(End-Start); 141 unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8; 142 if (MaxIntSize == 0) 143 MaxIntSize = 1; 144 unsigned NumPointerStores = Bytes / MaxIntSize; 145 146 // Assume the remaining bytes if any are done a byte at a time. 147 unsigned NumByteStores = Bytes % MaxIntSize; 148 149 // If we will reduce the # stores (according to this heuristic), do the 150 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 151 // etc. 152 return TheStores.size() > NumPointerStores+NumByteStores; 153 } 154 155 namespace { 156 157 class MemsetRanges { 158 using range_iterator = SmallVectorImpl<MemsetRange>::iterator; 159 160 /// A sorted list of the memset ranges. 161 SmallVector<MemsetRange, 8> Ranges; 162 163 const DataLayout &DL; 164 165 public: 166 MemsetRanges(const DataLayout &DL) : DL(DL) {} 167 168 using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator; 169 170 const_iterator begin() const { return Ranges.begin(); } 171 const_iterator end() const { return Ranges.end(); } 172 bool empty() const { return Ranges.empty(); } 173 174 void addInst(int64_t OffsetFromFirst, Instruction *Inst) { 175 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 176 addStore(OffsetFromFirst, SI); 177 else 178 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); 179 } 180 181 void addStore(int64_t OffsetFromFirst, StoreInst *SI) { 182 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); 183 184 addRange(OffsetFromFirst, StoreSize, SI->getPointerOperand(), 185 SI->getAlign().value(), SI); 186 } 187 188 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { 189 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 190 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI); 191 } 192 193 void addRange(int64_t Start, int64_t Size, Value *Ptr, 194 unsigned Alignment, Instruction *Inst); 195 }; 196 197 } // end anonymous namespace 198 199 /// Add a new store to the MemsetRanges data structure. This adds a 200 /// new range for the specified store at the specified offset, merging into 201 /// existing ranges as appropriate. 202 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, 203 unsigned Alignment, Instruction *Inst) { 204 int64_t End = Start+Size; 205 206 range_iterator I = partition_point( 207 Ranges, [=](const MemsetRange &O) { return O.End < Start; }); 208 209 // We now know that I == E, in which case we didn't find anything to merge 210 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 211 // to insert a new range. Handle this now. 212 if (I == Ranges.end() || End < I->Start) { 213 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 214 R.Start = Start; 215 R.End = End; 216 R.StartPtr = Ptr; 217 R.Alignment = Alignment; 218 R.TheStores.push_back(Inst); 219 return; 220 } 221 222 // This store overlaps with I, add it. 223 I->TheStores.push_back(Inst); 224 225 // At this point, we may have an interval that completely contains our store. 226 // If so, just add it to the interval and return. 227 if (I->Start <= Start && I->End >= End) 228 return; 229 230 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 231 // but is not entirely contained within the range. 232 233 // See if the range extends the start of the range. In this case, it couldn't 234 // possibly cause it to join the prior range, because otherwise we would have 235 // stopped on *it*. 236 if (Start < I->Start) { 237 I->Start = Start; 238 I->StartPtr = Ptr; 239 I->Alignment = Alignment; 240 } 241 242 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 243 // is in or right at the end of I), and that End >= I->Start. Extend I out to 244 // End. 245 if (End > I->End) { 246 I->End = End; 247 range_iterator NextI = I; 248 while (++NextI != Ranges.end() && End >= NextI->Start) { 249 // Merge the range in. 250 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 251 if (NextI->End > I->End) 252 I->End = NextI->End; 253 Ranges.erase(NextI); 254 NextI = I; 255 } 256 } 257 } 258 259 //===----------------------------------------------------------------------===// 260 // MemCpyOptLegacyPass Pass 261 //===----------------------------------------------------------------------===// 262 263 namespace { 264 265 class MemCpyOptLegacyPass : public FunctionPass { 266 MemCpyOptPass Impl; 267 268 public: 269 static char ID; // Pass identification, replacement for typeid 270 271 MemCpyOptLegacyPass() : FunctionPass(ID) { 272 initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry()); 273 } 274 275 bool runOnFunction(Function &F) override; 276 277 private: 278 // This transformation requires dominator postdominator info 279 void getAnalysisUsage(AnalysisUsage &AU) const override { 280 AU.setPreservesCFG(); 281 AU.addRequired<AssumptionCacheTracker>(); 282 AU.addRequired<DominatorTreeWrapperPass>(); 283 AU.addPreserved<DominatorTreeWrapperPass>(); 284 AU.addPreserved<GlobalsAAWrapperPass>(); 285 AU.addRequired<TargetLibraryInfoWrapperPass>(); 286 AU.addRequired<MemoryDependenceWrapperPass>(); 287 AU.addPreserved<MemoryDependenceWrapperPass>(); 288 AU.addRequired<AAResultsWrapperPass>(); 289 AU.addPreserved<AAResultsWrapperPass>(); 290 if (EnableMemorySSA) 291 AU.addRequired<MemorySSAWrapperPass>(); 292 AU.addPreserved<MemorySSAWrapperPass>(); 293 } 294 }; 295 296 } // end anonymous namespace 297 298 char MemCpyOptLegacyPass::ID = 0; 299 300 /// The public interface to this file... 301 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); } 302 303 INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", 304 false, false) 305 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 306 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 307 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 308 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 309 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 310 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 311 INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", 312 false, false) 313 314 // Check that V is either not accessible by the caller, or unwinding cannot 315 // occur between Start and End. 316 static bool mayBeVisibleThroughUnwinding(Value *V, Instruction *Start, 317 Instruction *End) { 318 assert(Start->getParent() == End->getParent() && "Must be in same block"); 319 if (!Start->getFunction()->doesNotThrow() && 320 !isa<AllocaInst>(getUnderlyingObject(V))) { 321 for (const Instruction &I : 322 make_range(Start->getIterator(), End->getIterator())) { 323 if (I.mayThrow()) 324 return true; 325 } 326 } 327 return false; 328 } 329 330 void MemCpyOptPass::eraseInstruction(Instruction *I) { 331 if (MSSAU) 332 MSSAU->removeMemoryAccess(I); 333 MD->removeInstruction(I); 334 I->eraseFromParent(); 335 } 336 337 /// When scanning forward over instructions, we look for some other patterns to 338 /// fold away. In particular, this looks for stores to neighboring locations of 339 /// memory. If it sees enough consecutive ones, it attempts to merge them 340 /// together into a memcpy/memset. 341 Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst, 342 Value *StartPtr, 343 Value *ByteVal) { 344 const DataLayout &DL = StartInst->getModule()->getDataLayout(); 345 346 // Okay, so we now have a single store that can be splatable. Scan to find 347 // all subsequent stores of the same value to offset from the same pointer. 348 // Join these together into ranges, so we can decide whether contiguous blocks 349 // are stored. 350 MemsetRanges Ranges(DL); 351 352 BasicBlock::iterator BI(StartInst); 353 354 // Keeps track of the last memory use or def before the insertion point for 355 // the new memset. The new MemoryDef for the inserted memsets will be inserted 356 // after MemInsertPoint. It points to either LastMemDef or to the last user 357 // before the insertion point of the memset, if there are any such users. 358 MemoryUseOrDef *MemInsertPoint = nullptr; 359 // Keeps track of the last MemoryDef between StartInst and the insertion point 360 // for the new memset. This will become the defining access of the inserted 361 // memsets. 362 MemoryDef *LastMemDef = nullptr; 363 for (++BI; !BI->isTerminator(); ++BI) { 364 if (MSSAU) { 365 auto *CurrentAcc = cast_or_null<MemoryUseOrDef>( 366 MSSAU->getMemorySSA()->getMemoryAccess(&*BI)); 367 if (CurrentAcc) { 368 MemInsertPoint = CurrentAcc; 369 if (auto *CurrentDef = dyn_cast<MemoryDef>(CurrentAcc)) 370 LastMemDef = CurrentDef; 371 } 372 } 373 374 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { 375 // If the instruction is readnone, ignore it, otherwise bail out. We 376 // don't even allow readonly here because we don't want something like: 377 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 378 if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) 379 break; 380 continue; 381 } 382 383 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { 384 // If this is a store, see if we can merge it in. 385 if (!NextStore->isSimple()) break; 386 387 Value *StoredVal = NextStore->getValueOperand(); 388 389 // Don't convert stores of non-integral pointer types to memsets (which 390 // stores integers). 391 if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType())) 392 break; 393 394 // Check to see if this stored value is of the same byte-splattable value. 395 Value *StoredByte = isBytewiseValue(StoredVal, DL); 396 if (isa<UndefValue>(ByteVal) && StoredByte) 397 ByteVal = StoredByte; 398 if (ByteVal != StoredByte) 399 break; 400 401 // Check to see if this store is to a constant offset from the start ptr. 402 Optional<int64_t> Offset = 403 isPointerOffset(StartPtr, NextStore->getPointerOperand(), DL); 404 if (!Offset) 405 break; 406 407 Ranges.addStore(*Offset, NextStore); 408 } else { 409 MemSetInst *MSI = cast<MemSetInst>(BI); 410 411 if (MSI->isVolatile() || ByteVal != MSI->getValue() || 412 !isa<ConstantInt>(MSI->getLength())) 413 break; 414 415 // Check to see if this store is to a constant offset from the start ptr. 416 Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), DL); 417 if (!Offset) 418 break; 419 420 Ranges.addMemSet(*Offset, MSI); 421 } 422 } 423 424 // If we have no ranges, then we just had a single store with nothing that 425 // could be merged in. This is a very common case of course. 426 if (Ranges.empty()) 427 return nullptr; 428 429 // If we had at least one store that could be merged in, add the starting 430 // store as well. We try to avoid this unless there is at least something 431 // interesting as a small compile-time optimization. 432 Ranges.addInst(0, StartInst); 433 434 // If we create any memsets, we put it right before the first instruction that 435 // isn't part of the memset block. This ensure that the memset is dominated 436 // by any addressing instruction needed by the start of the block. 437 IRBuilder<> Builder(&*BI); 438 439 // Now that we have full information about ranges, loop over the ranges and 440 // emit memset's for anything big enough to be worthwhile. 441 Instruction *AMemSet = nullptr; 442 for (const MemsetRange &Range : Ranges) { 443 if (Range.TheStores.size() == 1) continue; 444 445 // If it is profitable to lower this range to memset, do so now. 446 if (!Range.isProfitableToUseMemset(DL)) 447 continue; 448 449 // Otherwise, we do want to transform this! Create a new memset. 450 // Get the starting pointer of the block. 451 StartPtr = Range.StartPtr; 452 453 AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End - Range.Start, 454 MaybeAlign(Range.Alignment)); 455 LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI 456 : Range.TheStores) dbgs() 457 << *SI << '\n'; 458 dbgs() << "With: " << *AMemSet << '\n'); 459 if (!Range.TheStores.empty()) 460 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); 461 462 if (MSSAU) { 463 assert(LastMemDef && MemInsertPoint && 464 "Both LastMemDef and MemInsertPoint need to be set"); 465 auto *NewDef = 466 cast<MemoryDef>(MemInsertPoint->getMemoryInst() == &*BI 467 ? MSSAU->createMemoryAccessBefore( 468 AMemSet, LastMemDef, MemInsertPoint) 469 : MSSAU->createMemoryAccessAfter( 470 AMemSet, LastMemDef, MemInsertPoint)); 471 MSSAU->insertDef(NewDef, /*RenameUses=*/true); 472 LastMemDef = NewDef; 473 MemInsertPoint = NewDef; 474 } 475 476 // Zap all the stores. 477 for (Instruction *SI : Range.TheStores) 478 eraseInstruction(SI); 479 480 ++NumMemSetInfer; 481 } 482 483 return AMemSet; 484 } 485 486 // This method try to lift a store instruction before position P. 487 // It will lift the store and its argument + that anything that 488 // may alias with these. 489 // The method returns true if it was successful. 490 bool MemCpyOptPass::moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI) { 491 // If the store alias this position, early bail out. 492 MemoryLocation StoreLoc = MemoryLocation::get(SI); 493 if (isModOrRefSet(AA->getModRefInfo(P, StoreLoc))) 494 return false; 495 496 // Keep track of the arguments of all instruction we plan to lift 497 // so we can make sure to lift them as well if appropriate. 498 DenseSet<Instruction*> Args; 499 if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand())) 500 if (Ptr->getParent() == SI->getParent()) 501 Args.insert(Ptr); 502 503 // Instruction to lift before P. 504 SmallVector<Instruction *, 8> ToLift{SI}; 505 506 // Memory locations of lifted instructions. 507 SmallVector<MemoryLocation, 8> MemLocs{StoreLoc}; 508 509 // Lifted calls. 510 SmallVector<const CallBase *, 8> Calls; 511 512 const MemoryLocation LoadLoc = MemoryLocation::get(LI); 513 514 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) { 515 auto *C = &*I; 516 517 // Make sure hoisting does not perform a store that was not guaranteed to 518 // happen. 519 if (!isGuaranteedToTransferExecutionToSuccessor(C)) 520 return false; 521 522 bool MayAlias = isModOrRefSet(AA->getModRefInfo(C, None)); 523 524 bool NeedLift = false; 525 if (Args.erase(C)) 526 NeedLift = true; 527 else if (MayAlias) { 528 NeedLift = llvm::any_of(MemLocs, [C, this](const MemoryLocation &ML) { 529 return isModOrRefSet(AA->getModRefInfo(C, ML)); 530 }); 531 532 if (!NeedLift) 533 NeedLift = llvm::any_of(Calls, [C, this](const CallBase *Call) { 534 return isModOrRefSet(AA->getModRefInfo(C, Call)); 535 }); 536 } 537 538 if (!NeedLift) 539 continue; 540 541 if (MayAlias) { 542 // Since LI is implicitly moved downwards past the lifted instructions, 543 // none of them may modify its source. 544 if (isModSet(AA->getModRefInfo(C, LoadLoc))) 545 return false; 546 else if (const auto *Call = dyn_cast<CallBase>(C)) { 547 // If we can't lift this before P, it's game over. 548 if (isModOrRefSet(AA->getModRefInfo(P, Call))) 549 return false; 550 551 Calls.push_back(Call); 552 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) { 553 // If we can't lift this before P, it's game over. 554 auto ML = MemoryLocation::get(C); 555 if (isModOrRefSet(AA->getModRefInfo(P, ML))) 556 return false; 557 558 MemLocs.push_back(ML); 559 } else 560 // We don't know how to lift this instruction. 561 return false; 562 } 563 564 ToLift.push_back(C); 565 for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k) 566 if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) { 567 if (A->getParent() == SI->getParent()) { 568 // Cannot hoist user of P above P 569 if(A == P) return false; 570 Args.insert(A); 571 } 572 } 573 } 574 575 // Find MSSA insertion point. Normally P will always have a corresponding 576 // memory access before which we can insert. However, with non-standard AA 577 // pipelines, there may be a mismatch between AA and MSSA, in which case we 578 // will scan for a memory access before P. In either case, we know for sure 579 // that at least the load will have a memory access. 580 // TODO: Simplify this once P will be determined by MSSA, in which case the 581 // discrepancy can no longer occur. 582 MemoryUseOrDef *MemInsertPoint = nullptr; 583 if (MSSAU) { 584 if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(P)) { 585 MemInsertPoint = cast<MemoryUseOrDef>(--MA->getIterator()); 586 } else { 587 const Instruction *ConstP = P; 588 for (const Instruction &I : make_range(++ConstP->getReverseIterator(), 589 ++LI->getReverseIterator())) { 590 if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(&I)) { 591 MemInsertPoint = MA; 592 break; 593 } 594 } 595 } 596 } 597 598 // We made it, we need to lift. 599 for (auto *I : llvm::reverse(ToLift)) { 600 LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n"); 601 I->moveBefore(P); 602 if (MSSAU) { 603 assert(MemInsertPoint && "Must have found insert point"); 604 if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(I)) { 605 MSSAU->moveAfter(MA, MemInsertPoint); 606 MemInsertPoint = MA; 607 } 608 } 609 } 610 611 return true; 612 } 613 614 bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 615 if (!SI->isSimple()) return false; 616 617 // Avoid merging nontemporal stores since the resulting 618 // memcpy/memset would not be able to preserve the nontemporal hint. 619 // In theory we could teach how to propagate the !nontemporal metadata to 620 // memset calls. However, that change would force the backend to 621 // conservatively expand !nontemporal memset calls back to sequences of 622 // store instructions (effectively undoing the merging). 623 if (SI->getMetadata(LLVMContext::MD_nontemporal)) 624 return false; 625 626 const DataLayout &DL = SI->getModule()->getDataLayout(); 627 628 Value *StoredVal = SI->getValueOperand(); 629 630 // Not all the transforms below are correct for non-integral pointers, bail 631 // until we've audited the individual pieces. 632 if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType())) 633 return false; 634 635 // Load to store forwarding can be interpreted as memcpy. 636 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { 637 if (LI->isSimple() && LI->hasOneUse() && 638 LI->getParent() == SI->getParent()) { 639 640 auto *T = LI->getType(); 641 if (T->isAggregateType()) { 642 MemoryLocation LoadLoc = MemoryLocation::get(LI); 643 644 // We use alias analysis to check if an instruction may store to 645 // the memory we load from in between the load and the store. If 646 // such an instruction is found, we try to promote there instead 647 // of at the store position. 648 Instruction *P = SI; 649 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) { 650 if (isModSet(AA->getModRefInfo(&I, LoadLoc))) { 651 P = &I; 652 break; 653 } 654 } 655 656 // We found an instruction that may write to the loaded memory. 657 // We can try to promote at this position instead of the store 658 // position if nothing alias the store memory after this and the store 659 // destination is not in the range. 660 if (P && P != SI) { 661 if (!moveUp(SI, P, LI)) 662 P = nullptr; 663 } 664 665 // If a valid insertion position is found, then we can promote 666 // the load/store pair to a memcpy. 667 if (P) { 668 // If we load from memory that may alias the memory we store to, 669 // memmove must be used to preserve semantic. If not, memcpy can 670 // be used. 671 bool UseMemMove = false; 672 if (!AA->isNoAlias(MemoryLocation::get(SI), LoadLoc)) 673 UseMemMove = true; 674 675 uint64_t Size = DL.getTypeStoreSize(T); 676 677 IRBuilder<> Builder(P); 678 Instruction *M; 679 if (UseMemMove) 680 M = Builder.CreateMemMove( 681 SI->getPointerOperand(), SI->getAlign(), 682 LI->getPointerOperand(), LI->getAlign(), Size); 683 else 684 M = Builder.CreateMemCpy( 685 SI->getPointerOperand(), SI->getAlign(), 686 LI->getPointerOperand(), LI->getAlign(), Size); 687 688 LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => " 689 << *M << "\n"); 690 691 if (MSSAU) { 692 auto *LastDef = 693 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)); 694 auto *NewAccess = 695 MSSAU->createMemoryAccessAfter(M, LastDef, LastDef); 696 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 697 } 698 699 eraseInstruction(SI); 700 eraseInstruction(LI); 701 ++NumMemCpyInstr; 702 703 // Make sure we do not invalidate the iterator. 704 BBI = M->getIterator(); 705 return true; 706 } 707 } 708 709 // Detect cases where we're performing call slot forwarding, but 710 // happen to be using a load-store pair to implement it, rather than 711 // a memcpy. 712 MemDepResult ldep = MD->getDependency(LI); 713 CallInst *C = nullptr; 714 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst())) 715 C = dyn_cast<CallInst>(ldep.getInst()); 716 717 if (C) { 718 // Check that nothing touches the dest of the "copy" between 719 // the call and the store. 720 MemoryLocation StoreLoc = MemoryLocation::get(SI); 721 for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator(); 722 I != E; --I) { 723 if (isModOrRefSet(AA->getModRefInfo(&*I, StoreLoc))) { 724 C = nullptr; 725 break; 726 } 727 } 728 } 729 730 if (C) { 731 bool changed = performCallSlotOptzn( 732 LI, SI, SI->getPointerOperand()->stripPointerCasts(), 733 LI->getPointerOperand()->stripPointerCasts(), 734 DL.getTypeStoreSize(SI->getOperand(0)->getType()), 735 commonAlignment(SI->getAlign(), LI->getAlign()), C); 736 if (changed) { 737 eraseInstruction(SI); 738 eraseInstruction(LI); 739 ++NumMemCpyInstr; 740 return true; 741 } 742 } 743 } 744 } 745 746 // There are two cases that are interesting for this code to handle: memcpy 747 // and memset. Right now we only handle memset. 748 749 // Ensure that the value being stored is something that can be memset'able a 750 // byte at a time like "0" or "-1" or any width, as well as things like 751 // 0xA0A0A0A0 and 0.0. 752 auto *V = SI->getOperand(0); 753 if (Value *ByteVal = isBytewiseValue(V, DL)) { 754 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), 755 ByteVal)) { 756 BBI = I->getIterator(); // Don't invalidate iterator. 757 return true; 758 } 759 760 // If we have an aggregate, we try to promote it to memset regardless 761 // of opportunity for merging as it can expose optimization opportunities 762 // in subsequent passes. 763 auto *T = V->getType(); 764 if (T->isAggregateType()) { 765 uint64_t Size = DL.getTypeStoreSize(T); 766 IRBuilder<> Builder(SI); 767 auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size, 768 SI->getAlign()); 769 770 LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n"); 771 772 if (MSSAU) { 773 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI))); 774 auto *LastDef = 775 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)); 776 auto *NewAccess = MSSAU->createMemoryAccessAfter(M, LastDef, LastDef); 777 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 778 } 779 780 eraseInstruction(SI); 781 NumMemSetInfer++; 782 783 // Make sure we do not invalidate the iterator. 784 BBI = M->getIterator(); 785 return true; 786 } 787 } 788 789 return false; 790 } 791 792 bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { 793 // See if there is another memset or store neighboring this memset which 794 // allows us to widen out the memset to do a single larger store. 795 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) 796 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), 797 MSI->getValue())) { 798 BBI = I->getIterator(); // Don't invalidate iterator. 799 return true; 800 } 801 return false; 802 } 803 804 /// Takes a memcpy and a call that it depends on, 805 /// and checks for the possibility of a call slot optimization by having 806 /// the call write its result directly into the destination of the memcpy. 807 bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad, 808 Instruction *cpyStore, Value *cpyDest, 809 Value *cpySrc, uint64_t cpyLen, 810 Align cpyAlign, CallInst *C) { 811 // The general transformation to keep in mind is 812 // 813 // call @func(..., src, ...) 814 // memcpy(dest, src, ...) 815 // 816 // -> 817 // 818 // memcpy(dest, src, ...) 819 // call @func(..., dest, ...) 820 // 821 // Since moving the memcpy is technically awkward, we additionally check that 822 // src only holds uninitialized values at the moment of the call, meaning that 823 // the memcpy can be discarded rather than moved. 824 825 // Lifetime marks shouldn't be operated on. 826 if (Function *F = C->getCalledFunction()) 827 if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start) 828 return false; 829 830 // Require that src be an alloca. This simplifies the reasoning considerably. 831 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 832 if (!srcAlloca) 833 return false; 834 835 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 836 if (!srcArraySize) 837 return false; 838 839 const DataLayout &DL = cpyLoad->getModule()->getDataLayout(); 840 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) * 841 srcArraySize->getZExtValue(); 842 843 if (cpyLen < srcSize) 844 return false; 845 846 // Check that accessing the first srcSize bytes of dest will not cause a 847 // trap. Otherwise the transform is invalid since it might cause a trap 848 // to occur earlier than it otherwise would. 849 if (!isDereferenceableAndAlignedPointer(cpyDest, Align(1), APInt(64, cpyLen), 850 DL, C, DT)) 851 return false; 852 853 // Make sure that nothing can observe cpyDest being written early. There are 854 // a number of cases to consider: 855 // 1. cpyDest cannot be accessed between C and cpyStore as a precondition of 856 // the transform. 857 // 2. C itself may not access cpyDest (prior to the transform). This is 858 // checked further below. 859 // 3. If cpyDest is accessible to the caller of this function (potentially 860 // captured and not based on an alloca), we need to ensure that we cannot 861 // unwind between C and cpyStore. This is checked here. 862 // 4. If cpyDest is potentially captured, there may be accesses to it from 863 // another thread. In this case, we need to check that cpyStore is 864 // guaranteed to be executed if C is. As it is a non-atomic access, it 865 // renders accesses from other threads undefined. 866 // TODO: This is currently not checked. 867 if (mayBeVisibleThroughUnwinding(cpyDest, C, cpyStore)) 868 return false; 869 870 // Check that dest points to memory that is at least as aligned as src. 871 Align srcAlign = srcAlloca->getAlign(); 872 bool isDestSufficientlyAligned = srcAlign <= cpyAlign; 873 // If dest is not aligned enough and we can't increase its alignment then 874 // bail out. 875 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) 876 return false; 877 878 // Check that src is not accessed except via the call and the memcpy. This 879 // guarantees that it holds only undefined values when passed in (so the final 880 // memcpy can be dropped), that it is not read or written between the call and 881 // the memcpy, and that writing beyond the end of it is undefined. 882 SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(), 883 srcAlloca->user_end()); 884 while (!srcUseList.empty()) { 885 User *U = srcUseList.pop_back_val(); 886 887 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) { 888 for (User *UU : U->users()) 889 srcUseList.push_back(UU); 890 continue; 891 } 892 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) { 893 if (!G->hasAllZeroIndices()) 894 return false; 895 896 for (User *UU : U->users()) 897 srcUseList.push_back(UU); 898 continue; 899 } 900 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U)) 901 if (IT->isLifetimeStartOrEnd()) 902 continue; 903 904 if (U != C && U != cpyLoad) 905 return false; 906 } 907 908 // Check that src isn't captured by the called function since the 909 // transformation can cause aliasing issues in that case. 910 for (unsigned ArgI = 0, E = C->arg_size(); ArgI != E; ++ArgI) 911 if (C->getArgOperand(ArgI) == cpySrc && !C->doesNotCapture(ArgI)) 912 return false; 913 914 // Since we're changing the parameter to the callsite, we need to make sure 915 // that what would be the new parameter dominates the callsite. 916 if (!DT->dominates(cpyDest, C)) { 917 // Support moving a constant index GEP before the call. 918 auto *GEP = dyn_cast<GetElementPtrInst>(cpyDest); 919 if (GEP && GEP->hasAllConstantIndices() && 920 DT->dominates(GEP->getPointerOperand(), C)) 921 GEP->moveBefore(C); 922 else 923 return false; 924 } 925 926 // In addition to knowing that the call does not access src in some 927 // unexpected manner, for example via a global, which we deduce from 928 // the use analysis, we also need to know that it does not sneakily 929 // access dest. We rely on AA to figure this out for us. 930 ModRefInfo MR = AA->getModRefInfo(C, cpyDest, LocationSize::precise(srcSize)); 931 // If necessary, perform additional analysis. 932 if (isModOrRefSet(MR)) 933 MR = AA->callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), DT); 934 if (isModOrRefSet(MR)) 935 return false; 936 937 // We can't create address space casts here because we don't know if they're 938 // safe for the target. 939 if (cpySrc->getType()->getPointerAddressSpace() != 940 cpyDest->getType()->getPointerAddressSpace()) 941 return false; 942 for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) 943 if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc && 944 cpySrc->getType()->getPointerAddressSpace() != 945 C->getArgOperand(ArgI)->getType()->getPointerAddressSpace()) 946 return false; 947 948 // All the checks have passed, so do the transformation. 949 bool changedArgument = false; 950 for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) 951 if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc) { 952 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest 953 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 954 cpyDest->getName(), C); 955 changedArgument = true; 956 if (C->getArgOperand(ArgI)->getType() == Dest->getType()) 957 C->setArgOperand(ArgI, Dest); 958 else 959 C->setArgOperand(ArgI, CastInst::CreatePointerCast( 960 Dest, C->getArgOperand(ArgI)->getType(), 961 Dest->getName(), C)); 962 } 963 964 if (!changedArgument) 965 return false; 966 967 // If the destination wasn't sufficiently aligned then increase its alignment. 968 if (!isDestSufficientlyAligned) { 969 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!"); 970 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); 971 } 972 973 // Drop any cached information about the call, because we may have changed 974 // its dependence information by changing its parameter. 975 MD->removeInstruction(C); 976 977 // Update AA metadata 978 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be 979 // handled here, but combineMetadata doesn't support them yet 980 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 981 LLVMContext::MD_noalias, 982 LLVMContext::MD_invariant_group, 983 LLVMContext::MD_access_group}; 984 combineMetadata(C, cpyLoad, KnownIDs, true); 985 986 ++NumCallSlot; 987 return true; 988 } 989 990 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is 991 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can. 992 bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M, 993 MemCpyInst *MDep) { 994 // We can only transforms memcpy's where the dest of one is the source of the 995 // other. 996 if (M->getSource() != MDep->getDest() || MDep->isVolatile()) 997 return false; 998 999 // If dep instruction is reading from our current input, then it is a noop 1000 // transfer and substituting the input won't change this instruction. Just 1001 // ignore the input and let someone else zap MDep. This handles cases like: 1002 // memcpy(a <- a) 1003 // memcpy(b <- a) 1004 if (M->getSource() == MDep->getSource()) 1005 return false; 1006 1007 // Second, the length of the memcpy's must be the same, or the preceding one 1008 // must be larger than the following one. 1009 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); 1010 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); 1011 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) 1012 return false; 1013 1014 // Verify that the copied-from memory doesn't change in between the two 1015 // transfers. For example, in: 1016 // memcpy(a <- b) 1017 // *b = 42; 1018 // memcpy(c <- a) 1019 // It would be invalid to transform the second memcpy into memcpy(c <- b). 1020 // 1021 // TODO: If the code between M and MDep is transparent to the destination "c", 1022 // then we could still perform the xform by moving M up to the first memcpy. 1023 // 1024 // NOTE: This is conservative, it will stop on any read from the source loc, 1025 // not just the defining memcpy. 1026 MemDepResult SourceDep = 1027 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false, 1028 M->getIterator(), M->getParent()); 1029 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 1030 return false; 1031 1032 // If the dest of the second might alias the source of the first, then the 1033 // source and dest might overlap. We still want to eliminate the intermediate 1034 // value, but we have to generate a memmove instead of memcpy. 1035 bool UseMemMove = false; 1036 if (!AA->isNoAlias(MemoryLocation::getForDest(M), 1037 MemoryLocation::getForSource(MDep))) 1038 UseMemMove = true; 1039 1040 // If all checks passed, then we can transform M. 1041 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n" 1042 << *MDep << '\n' << *M << '\n'); 1043 1044 // TODO: Is this worth it if we're creating a less aligned memcpy? For 1045 // example we could be moving from movaps -> movq on x86. 1046 IRBuilder<> Builder(M); 1047 Instruction *NewM; 1048 if (UseMemMove) 1049 NewM = Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(), 1050 MDep->getRawSource(), MDep->getSourceAlign(), 1051 M->getLength(), M->isVolatile()); 1052 else 1053 NewM = Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(), 1054 MDep->getRawSource(), MDep->getSourceAlign(), 1055 M->getLength(), M->isVolatile()); 1056 1057 if (MSSAU) { 1058 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M))); 1059 auto *LastDef = cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)); 1060 auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); 1061 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1062 } 1063 1064 // Remove the instruction we're replacing. 1065 eraseInstruction(M); 1066 ++NumMemCpyInstr; 1067 return true; 1068 } 1069 1070 /// We've found that the (upward scanning) memory dependence of \p MemCpy is 1071 /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that 1072 /// weren't copied over by \p MemCpy. 1073 /// 1074 /// In other words, transform: 1075 /// \code 1076 /// memset(dst, c, dst_size); 1077 /// memcpy(dst, src, src_size); 1078 /// \endcode 1079 /// into: 1080 /// \code 1081 /// memcpy(dst, src, src_size); 1082 /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); 1083 /// \endcode 1084 bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy, 1085 MemSetInst *MemSet) { 1086 // We can only transform memset/memcpy with the same destination. 1087 if (MemSet->getDest() != MemCpy->getDest()) 1088 return false; 1089 1090 // Check that src and dst of the memcpy aren't the same. While memcpy 1091 // operands cannot partially overlap, exact equality is allowed. 1092 if (!AA->isNoAlias(MemoryLocation(MemCpy->getSource(), 1093 LocationSize::precise(1)), 1094 MemoryLocation(MemCpy->getDest(), 1095 LocationSize::precise(1)))) 1096 return false; 1097 1098 // Check that there are no other dependencies on the memset destination. 1099 MemDepResult DstDepInfo = 1100 MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false, 1101 MemCpy->getIterator(), MemCpy->getParent()); 1102 if (DstDepInfo.getInst() != MemSet) 1103 return false; 1104 1105 // Use the same i8* dest as the memcpy, killing the memset dest if different. 1106 Value *Dest = MemCpy->getRawDest(); 1107 Value *DestSize = MemSet->getLength(); 1108 Value *SrcSize = MemCpy->getLength(); 1109 1110 if (mayBeVisibleThroughUnwinding(Dest, MemSet, MemCpy)) 1111 return false; 1112 1113 // By default, create an unaligned memset. 1114 unsigned Align = 1; 1115 // If Dest is aligned, and SrcSize is constant, use the minimum alignment 1116 // of the sum. 1117 const unsigned DestAlign = 1118 std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment()); 1119 if (DestAlign > 1) 1120 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize)) 1121 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign); 1122 1123 IRBuilder<> Builder(MemCpy); 1124 1125 // If the sizes have different types, zext the smaller one. 1126 if (DestSize->getType() != SrcSize->getType()) { 1127 if (DestSize->getType()->getIntegerBitWidth() > 1128 SrcSize->getType()->getIntegerBitWidth()) 1129 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType()); 1130 else 1131 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType()); 1132 } 1133 1134 Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize); 1135 Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize); 1136 Value *MemsetLen = Builder.CreateSelect( 1137 Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff); 1138 Instruction *NewMemSet = Builder.CreateMemSet( 1139 Builder.CreateGEP(Dest->getType()->getPointerElementType(), Dest, 1140 SrcSize), 1141 MemSet->getOperand(1), MemsetLen, MaybeAlign(Align)); 1142 1143 if (MSSAU) { 1144 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) && 1145 "MemCpy must be a MemoryDef"); 1146 // The new memset is inserted after the memcpy, but it is known that its 1147 // defining access is the memset about to be removed which immediately 1148 // precedes the memcpy. 1149 auto *LastDef = 1150 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); 1151 auto *NewAccess = MSSAU->createMemoryAccessBefore( 1152 NewMemSet, LastDef->getDefiningAccess(), LastDef); 1153 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1154 } 1155 1156 eraseInstruction(MemSet); 1157 return true; 1158 } 1159 1160 /// Determine whether the instruction has undefined content for the given Size, 1161 /// either because it was freshly alloca'd or started its lifetime. 1162 static bool hasUndefContents(Instruction *I, ConstantInt *Size) { 1163 if (isa<AllocaInst>(I)) 1164 return true; 1165 1166 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 1167 if (II->getIntrinsicID() == Intrinsic::lifetime_start) 1168 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0))) 1169 if (LTSize->getZExtValue() >= Size->getZExtValue()) 1170 return true; 1171 1172 return false; 1173 } 1174 1175 /// Transform memcpy to memset when its source was just memset. 1176 /// In other words, turn: 1177 /// \code 1178 /// memset(dst1, c, dst1_size); 1179 /// memcpy(dst2, dst1, dst2_size); 1180 /// \endcode 1181 /// into: 1182 /// \code 1183 /// memset(dst1, c, dst1_size); 1184 /// memset(dst2, c, dst2_size); 1185 /// \endcode 1186 /// When dst2_size <= dst1_size. 1187 /// 1188 /// The \p MemCpy must have a Constant length. 1189 bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, 1190 MemSetInst *MemSet) { 1191 // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and 1192 // memcpying from the same address. Otherwise it is hard to reason about. 1193 if (!AA->isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource())) 1194 return false; 1195 1196 // A known memset size is required. 1197 ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength()); 1198 if (!MemSetSize) 1199 return false; 1200 1201 // Make sure the memcpy doesn't read any more than what the memset wrote. 1202 // Don't worry about sizes larger than i64. 1203 ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength()); 1204 if (CopySize->getZExtValue() > MemSetSize->getZExtValue()) { 1205 // If the memcpy is larger than the memset, but the memory was undef prior 1206 // to the memset, we can just ignore the tail. Technically we're only 1207 // interested in the bytes from MemSetSize..CopySize here, but as we can't 1208 // easily represent this location, we use the full 0..CopySize range. 1209 MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy); 1210 MemDepResult DepInfo = MD->getPointerDependencyFrom( 1211 MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent()); 1212 if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize)) 1213 CopySize = MemSetSize; 1214 else 1215 return false; 1216 } 1217 1218 IRBuilder<> Builder(MemCpy); 1219 Instruction *NewM = 1220 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), 1221 CopySize, MaybeAlign(MemCpy->getDestAlignment())); 1222 if (MSSAU) { 1223 auto *LastDef = 1224 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); 1225 auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); 1226 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1227 } 1228 1229 return true; 1230 } 1231 1232 /// Perform simplification of memcpy's. If we have memcpy A 1233 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite 1234 /// B to be a memcpy from X to Z (or potentially a memmove, depending on 1235 /// circumstances). This allows later passes to remove the first memcpy 1236 /// altogether. 1237 bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) { 1238 // We can only optimize non-volatile memcpy's. 1239 if (M->isVolatile()) return false; 1240 1241 // If the source and destination of the memcpy are the same, then zap it. 1242 if (M->getSource() == M->getDest()) { 1243 ++BBI; 1244 eraseInstruction(M); 1245 return true; 1246 } 1247 1248 // If copying from a constant, try to turn the memcpy into a memset. 1249 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource())) 1250 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 1251 if (Value *ByteVal = isBytewiseValue(GV->getInitializer(), 1252 M->getModule()->getDataLayout())) { 1253 IRBuilder<> Builder(M); 1254 Instruction *NewM = 1255 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(), 1256 MaybeAlign(M->getDestAlignment()), false); 1257 if (MSSAU) { 1258 auto *LastDef = 1259 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)); 1260 auto *NewAccess = 1261 MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); 1262 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1263 } 1264 1265 eraseInstruction(M); 1266 ++NumCpyToSet; 1267 return true; 1268 } 1269 1270 MemDepResult DepInfo = MD->getDependency(M); 1271 1272 // Try to turn a partially redundant memset + memcpy into 1273 // memcpy + smaller memset. We don't need the memcpy size for this. 1274 if (DepInfo.isClobber()) 1275 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst())) 1276 if (processMemSetMemCpyDependence(M, MDep)) 1277 return true; 1278 1279 // The optimizations after this point require the memcpy size. 1280 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); 1281 if (!CopySize) return false; 1282 1283 // There are four possible optimizations we can do for memcpy: 1284 // a) memcpy-memcpy xform which exposes redundance for DSE. 1285 // b) call-memcpy xform for return slot optimization. 1286 // c) memcpy from freshly alloca'd space or space that has just started its 1287 // lifetime copies undefined data, and we can therefore eliminate the 1288 // memcpy in favor of the data that was already at the destination. 1289 // d) memcpy from a just-memset'd source can be turned into memset. 1290 if (DepInfo.isClobber()) { 1291 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { 1292 // FIXME: Can we pass in either of dest/src alignment here instead 1293 // of conservatively taking the minimum? 1294 Align Alignment = std::min(M->getDestAlign().valueOrOne(), 1295 M->getSourceAlign().valueOrOne()); 1296 if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(), 1297 CopySize->getZExtValue(), Alignment, C)) { 1298 eraseInstruction(M); 1299 ++NumMemCpyInstr; 1300 return true; 1301 } 1302 } 1303 } 1304 1305 MemoryLocation SrcLoc = MemoryLocation::getForSource(M); 1306 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom( 1307 SrcLoc, true, M->getIterator(), M->getParent()); 1308 1309 if (SrcDepInfo.isClobber()) { 1310 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst())) 1311 return processMemCpyMemCpyDependence(M, MDep); 1312 } else if (SrcDepInfo.isDef()) { 1313 if (hasUndefContents(SrcDepInfo.getInst(), CopySize)) { 1314 eraseInstruction(M); 1315 ++NumMemCpyInstr; 1316 return true; 1317 } 1318 } 1319 1320 if (SrcDepInfo.isClobber()) 1321 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst())) 1322 if (performMemCpyToMemSetOptzn(M, MDep)) { 1323 eraseInstruction(M); 1324 ++NumCpyToSet; 1325 return true; 1326 } 1327 1328 return false; 1329 } 1330 1331 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed 1332 /// not to alias. 1333 bool MemCpyOptPass::processMemMove(MemMoveInst *M) { 1334 if (!TLI->has(LibFunc_memmove)) 1335 return false; 1336 1337 // See if the pointers alias. 1338 if (!AA->isNoAlias(MemoryLocation::getForDest(M), 1339 MemoryLocation::getForSource(M))) 1340 return false; 1341 1342 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M 1343 << "\n"); 1344 1345 // If not, then we know we can transform this. 1346 Type *ArgTys[3] = { M->getRawDest()->getType(), 1347 M->getRawSource()->getType(), 1348 M->getLength()->getType() }; 1349 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(), 1350 Intrinsic::memcpy, ArgTys)); 1351 1352 // For MemorySSA nothing really changes (except that memcpy may imply stricter 1353 // aliasing guarantees). 1354 1355 // MemDep may have over conservative information about this instruction, just 1356 // conservatively flush it from the cache. 1357 MD->removeInstruction(M); 1358 1359 ++NumMoveToCpy; 1360 return true; 1361 } 1362 1363 /// This is called on every byval argument in call sites. 1364 bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) { 1365 const DataLayout &DL = CB.getCaller()->getParent()->getDataLayout(); 1366 // Find out what feeds this byval argument. 1367 Value *ByValArg = CB.getArgOperand(ArgNo); 1368 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType(); 1369 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy); 1370 MemDepResult DepInfo = MD->getPointerDependencyFrom( 1371 MemoryLocation(ByValArg, LocationSize::precise(ByValSize)), true, 1372 CB.getIterator(), CB.getParent()); 1373 if (!DepInfo.isClobber()) 1374 return false; 1375 1376 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by 1377 // a memcpy, see if we can byval from the source of the memcpy instead of the 1378 // result. 1379 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); 1380 if (!MDep || MDep->isVolatile() || 1381 ByValArg->stripPointerCasts() != MDep->getDest()) 1382 return false; 1383 1384 // The length of the memcpy must be larger or equal to the size of the byval. 1385 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 1386 if (!C1 || C1->getValue().getZExtValue() < ByValSize) 1387 return false; 1388 1389 // Get the alignment of the byval. If the call doesn't specify the alignment, 1390 // then it is some target specific value that we can't know. 1391 MaybeAlign ByValAlign = CB.getParamAlign(ArgNo); 1392 if (!ByValAlign) return false; 1393 1394 // If it is greater than the memcpy, then we check to see if we can force the 1395 // source of the memcpy to the alignment we need. If we fail, we bail out. 1396 MaybeAlign MemDepAlign = MDep->getSourceAlign(); 1397 if ((!MemDepAlign || *MemDepAlign < *ByValAlign) && 1398 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &CB, AC, 1399 DT) < *ByValAlign) 1400 return false; 1401 1402 // The address space of the memcpy source must match the byval argument 1403 if (MDep->getSource()->getType()->getPointerAddressSpace() != 1404 ByValArg->getType()->getPointerAddressSpace()) 1405 return false; 1406 1407 // Verify that the copied-from memory doesn't change in between the memcpy and 1408 // the byval call. 1409 // memcpy(a <- b) 1410 // *b = 42; 1411 // foo(*a) 1412 // It would be invalid to transform the second memcpy into foo(*b). 1413 // 1414 // NOTE: This is conservative, it will stop on any read from the source loc, 1415 // not just the defining memcpy. 1416 MemDepResult SourceDep = MD->getPointerDependencyFrom( 1417 MemoryLocation::getForSource(MDep), false, 1418 CB.getIterator(), MDep->getParent()); 1419 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 1420 return false; 1421 1422 Value *TmpCast = MDep->getSource(); 1423 if (MDep->getSource()->getType() != ByValArg->getType()) { 1424 BitCastInst *TmpBitCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), 1425 "tmpcast", &CB); 1426 // Set the tmpcast's DebugLoc to MDep's 1427 TmpBitCast->setDebugLoc(MDep->getDebugLoc()); 1428 TmpCast = TmpBitCast; 1429 } 1430 1431 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" 1432 << " " << *MDep << "\n" 1433 << " " << CB << "\n"); 1434 1435 // Otherwise we're good! Update the byval argument. 1436 CB.setArgOperand(ArgNo, TmpCast); 1437 ++NumMemCpyInstr; 1438 return true; 1439 } 1440 1441 /// Executes one iteration of MemCpyOptPass. 1442 bool MemCpyOptPass::iterateOnFunction(Function &F) { 1443 bool MadeChange = false; 1444 1445 // Walk all instruction in the function. 1446 for (BasicBlock &BB : F) { 1447 // Skip unreachable blocks. For example processStore assumes that an 1448 // instruction in a BB can't be dominated by a later instruction in the 1449 // same BB (which is a scenario that can happen for an unreachable BB that 1450 // has itself as a predecessor). 1451 if (!DT->isReachableFromEntry(&BB)) 1452 continue; 1453 1454 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 1455 // Avoid invalidating the iterator. 1456 Instruction *I = &*BI++; 1457 1458 bool RepeatInstruction = false; 1459 1460 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1461 MadeChange |= processStore(SI, BI); 1462 else if (MemSetInst *M = dyn_cast<MemSetInst>(I)) 1463 RepeatInstruction = processMemSet(M, BI); 1464 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) 1465 RepeatInstruction = processMemCpy(M, BI); 1466 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) 1467 RepeatInstruction = processMemMove(M); 1468 else if (auto *CB = dyn_cast<CallBase>(I)) { 1469 for (unsigned i = 0, e = CB->arg_size(); i != e; ++i) 1470 if (CB->isByValArgument(i)) 1471 MadeChange |= processByValArgument(*CB, i); 1472 } 1473 1474 // Reprocess the instruction if desired. 1475 if (RepeatInstruction) { 1476 if (BI != BB.begin()) 1477 --BI; 1478 MadeChange = true; 1479 } 1480 } 1481 } 1482 1483 return MadeChange; 1484 } 1485 1486 PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) { 1487 auto &MD = AM.getResult<MemoryDependenceAnalysis>(F); 1488 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1489 auto *AA = &AM.getResult<AAManager>(F); 1490 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 1491 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 1492 auto *MSSA = EnableMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F) 1493 : AM.getCachedResult<MemorySSAAnalysis>(F); 1494 1495 bool MadeChange = 1496 runImpl(F, &MD, &TLI, AA, AC, DT, MSSA ? &MSSA->getMSSA() : nullptr); 1497 if (!MadeChange) 1498 return PreservedAnalyses::all(); 1499 1500 PreservedAnalyses PA; 1501 PA.preserveSet<CFGAnalyses>(); 1502 PA.preserve<GlobalsAA>(); 1503 PA.preserve<MemoryDependenceAnalysis>(); 1504 if (MSSA) 1505 PA.preserve<MemorySSAAnalysis>(); 1506 return PA; 1507 } 1508 1509 bool MemCpyOptPass::runImpl(Function &F, MemoryDependenceResults *MD_, 1510 TargetLibraryInfo *TLI_, AliasAnalysis *AA_, 1511 AssumptionCache *AC_, DominatorTree *DT_, 1512 MemorySSA *MSSA_) { 1513 bool MadeChange = false; 1514 MD = MD_; 1515 TLI = TLI_; 1516 AA = AA_; 1517 AC = AC_; 1518 DT = DT_; 1519 MemorySSAUpdater MSSAU_(MSSA_); 1520 MSSAU = MSSA_ ? &MSSAU_ : nullptr; 1521 // If we don't have at least memset and memcpy, there is little point of doing 1522 // anything here. These are required by a freestanding implementation, so if 1523 // even they are disabled, there is no point in trying hard. 1524 if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy)) 1525 return false; 1526 1527 while (true) { 1528 if (!iterateOnFunction(F)) 1529 break; 1530 MadeChange = true; 1531 } 1532 1533 if (MSSA_ && VerifyMemorySSA) 1534 MSSA_->verifyMemorySSA(); 1535 1536 MD = nullptr; 1537 return MadeChange; 1538 } 1539 1540 /// This is the main transformation entry point for a function. 1541 bool MemCpyOptLegacyPass::runOnFunction(Function &F) { 1542 if (skipFunction(F)) 1543 return false; 1544 1545 auto *MD = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep(); 1546 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1547 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1548 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1549 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1550 auto *MSSAWP = EnableMemorySSA 1551 ? &getAnalysis<MemorySSAWrapperPass>() 1552 : getAnalysisIfAvailable<MemorySSAWrapperPass>(); 1553 1554 return Impl.runImpl(F, MD, TLI, AA, AC, DT, 1555 MSSAWP ? &MSSAWP->getMSSA() : nullptr); 1556 } 1557