1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass performs various transformations related to eliminating memcpy 10 // calls, or transforming sets of stores into memset's. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/MemCpyOptimizer.h" 15 #include "llvm/ADT/DenseSet.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/ADT/iterator_range.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/CaptureTracking.h" 24 #include "llvm/Analysis/GlobalsModRef.h" 25 #include "llvm/Analysis/Loads.h" 26 #include "llvm/Analysis/MemoryLocation.h" 27 #include "llvm/Analysis/MemorySSA.h" 28 #include "llvm/Analysis/MemorySSAUpdater.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/BasicBlock.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/Dominators.h" 37 #include "llvm/IR/Function.h" 38 #include "llvm/IR/GetElementPtrTypeIterator.h" 39 #include "llvm/IR/GlobalVariable.h" 40 #include "llvm/IR/IRBuilder.h" 41 #include "llvm/IR/InstrTypes.h" 42 #include "llvm/IR/Instruction.h" 43 #include "llvm/IR/Instructions.h" 44 #include "llvm/IR/IntrinsicInst.h" 45 #include "llvm/IR/Intrinsics.h" 46 #include "llvm/IR/LLVMContext.h" 47 #include "llvm/IR/Module.h" 48 #include "llvm/IR/Operator.h" 49 #include "llvm/IR/PassManager.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/User.h" 52 #include "llvm/IR/Value.h" 53 #include "llvm/InitializePasses.h" 54 #include "llvm/Pass.h" 55 #include "llvm/Support/Casting.h" 56 #include "llvm/Support/Debug.h" 57 #include "llvm/Support/MathExtras.h" 58 #include "llvm/Support/raw_ostream.h" 59 #include "llvm/Transforms/Scalar.h" 60 #include "llvm/Transforms/Utils/Local.h" 61 #include <algorithm> 62 #include <cassert> 63 #include <cstdint> 64 #include <utility> 65 66 using namespace llvm; 67 68 #define DEBUG_TYPE "memcpyopt" 69 70 static cl::opt<bool> EnableMemCpyOptWithoutLibcalls( 71 "enable-memcpyopt-without-libcalls", cl::init(false), cl::Hidden, 72 cl::ZeroOrMore, 73 cl::desc("Enable memcpyopt even when libcalls are disabled")); 74 75 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 76 STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 77 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 78 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); 79 STATISTIC(NumCallSlot, "Number of call slot optimizations performed"); 80 81 namespace { 82 83 /// Represents a range of memset'd bytes with the ByteVal value. 84 /// This allows us to analyze stores like: 85 /// store 0 -> P+1 86 /// store 0 -> P+0 87 /// store 0 -> P+3 88 /// store 0 -> P+2 89 /// which sometimes happens with stores to arrays of structs etc. When we see 90 /// the first store, we make a range [1, 2). The second store extends the range 91 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 92 /// two ranges into [0, 3) which is memset'able. 93 struct MemsetRange { 94 // Start/End - A semi range that describes the span that this range covers. 95 // The range is closed at the start and open at the end: [Start, End). 96 int64_t Start, End; 97 98 /// StartPtr - The getelementptr instruction that points to the start of the 99 /// range. 100 Value *StartPtr; 101 102 /// Alignment - The known alignment of the first store. 103 unsigned Alignment; 104 105 /// TheStores - The actual stores that make up this range. 106 SmallVector<Instruction*, 16> TheStores; 107 108 bool isProfitableToUseMemset(const DataLayout &DL) const; 109 }; 110 111 } // end anonymous namespace 112 113 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const { 114 // If we found more than 4 stores to merge or 16 bytes, use memset. 115 if (TheStores.size() >= 4 || End-Start >= 16) return true; 116 117 // If there is nothing to merge, don't do anything. 118 if (TheStores.size() < 2) return false; 119 120 // If any of the stores are a memset, then it is always good to extend the 121 // memset. 122 for (Instruction *SI : TheStores) 123 if (!isa<StoreInst>(SI)) 124 return true; 125 126 // Assume that the code generator is capable of merging pairs of stores 127 // together if it wants to. 128 if (TheStores.size() == 2) return false; 129 130 // If we have fewer than 8 stores, it can still be worthwhile to do this. 131 // For example, merging 4 i8 stores into an i32 store is useful almost always. 132 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 133 // memset will be split into 2 32-bit stores anyway) and doing so can 134 // pessimize the llvm optimizer. 135 // 136 // Since we don't have perfect knowledge here, make some assumptions: assume 137 // the maximum GPR width is the same size as the largest legal integer 138 // size. If so, check to see whether we will end up actually reducing the 139 // number of stores used. 140 unsigned Bytes = unsigned(End-Start); 141 unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8; 142 if (MaxIntSize == 0) 143 MaxIntSize = 1; 144 unsigned NumPointerStores = Bytes / MaxIntSize; 145 146 // Assume the remaining bytes if any are done a byte at a time. 147 unsigned NumByteStores = Bytes % MaxIntSize; 148 149 // If we will reduce the # stores (according to this heuristic), do the 150 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 151 // etc. 152 return TheStores.size() > NumPointerStores+NumByteStores; 153 } 154 155 namespace { 156 157 class MemsetRanges { 158 using range_iterator = SmallVectorImpl<MemsetRange>::iterator; 159 160 /// A sorted list of the memset ranges. 161 SmallVector<MemsetRange, 8> Ranges; 162 163 const DataLayout &DL; 164 165 public: 166 MemsetRanges(const DataLayout &DL) : DL(DL) {} 167 168 using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator; 169 170 const_iterator begin() const { return Ranges.begin(); } 171 const_iterator end() const { return Ranges.end(); } 172 bool empty() const { return Ranges.empty(); } 173 174 void addInst(int64_t OffsetFromFirst, Instruction *Inst) { 175 if (auto *SI = dyn_cast<StoreInst>(Inst)) 176 addStore(OffsetFromFirst, SI); 177 else 178 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); 179 } 180 181 void addStore(int64_t OffsetFromFirst, StoreInst *SI) { 182 TypeSize StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); 183 assert(!StoreSize.isScalable() && "Can't track scalable-typed stores"); 184 addRange(OffsetFromFirst, StoreSize.getFixedSize(), SI->getPointerOperand(), 185 SI->getAlign().value(), SI); 186 } 187 188 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { 189 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 190 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI); 191 } 192 193 void addRange(int64_t Start, int64_t Size, Value *Ptr, 194 unsigned Alignment, Instruction *Inst); 195 }; 196 197 } // end anonymous namespace 198 199 /// Add a new store to the MemsetRanges data structure. This adds a 200 /// new range for the specified store at the specified offset, merging into 201 /// existing ranges as appropriate. 202 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, 203 unsigned Alignment, Instruction *Inst) { 204 int64_t End = Start+Size; 205 206 range_iterator I = partition_point( 207 Ranges, [=](const MemsetRange &O) { return O.End < Start; }); 208 209 // We now know that I == E, in which case we didn't find anything to merge 210 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 211 // to insert a new range. Handle this now. 212 if (I == Ranges.end() || End < I->Start) { 213 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 214 R.Start = Start; 215 R.End = End; 216 R.StartPtr = Ptr; 217 R.Alignment = Alignment; 218 R.TheStores.push_back(Inst); 219 return; 220 } 221 222 // This store overlaps with I, add it. 223 I->TheStores.push_back(Inst); 224 225 // At this point, we may have an interval that completely contains our store. 226 // If so, just add it to the interval and return. 227 if (I->Start <= Start && I->End >= End) 228 return; 229 230 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 231 // but is not entirely contained within the range. 232 233 // See if the range extends the start of the range. In this case, it couldn't 234 // possibly cause it to join the prior range, because otherwise we would have 235 // stopped on *it*. 236 if (Start < I->Start) { 237 I->Start = Start; 238 I->StartPtr = Ptr; 239 I->Alignment = Alignment; 240 } 241 242 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 243 // is in or right at the end of I), and that End >= I->Start. Extend I out to 244 // End. 245 if (End > I->End) { 246 I->End = End; 247 range_iterator NextI = I; 248 while (++NextI != Ranges.end() && End >= NextI->Start) { 249 // Merge the range in. 250 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 251 if (NextI->End > I->End) 252 I->End = NextI->End; 253 Ranges.erase(NextI); 254 NextI = I; 255 } 256 } 257 } 258 259 //===----------------------------------------------------------------------===// 260 // MemCpyOptLegacyPass Pass 261 //===----------------------------------------------------------------------===// 262 263 namespace { 264 265 class MemCpyOptLegacyPass : public FunctionPass { 266 MemCpyOptPass Impl; 267 268 public: 269 static char ID; // Pass identification, replacement for typeid 270 271 MemCpyOptLegacyPass() : FunctionPass(ID) { 272 initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry()); 273 } 274 275 bool runOnFunction(Function &F) override; 276 277 private: 278 // This transformation requires dominator postdominator info 279 void getAnalysisUsage(AnalysisUsage &AU) const override { 280 AU.setPreservesCFG(); 281 AU.addRequired<AssumptionCacheTracker>(); 282 AU.addRequired<DominatorTreeWrapperPass>(); 283 AU.addPreserved<DominatorTreeWrapperPass>(); 284 AU.addPreserved<GlobalsAAWrapperPass>(); 285 AU.addRequired<TargetLibraryInfoWrapperPass>(); 286 AU.addRequired<AAResultsWrapperPass>(); 287 AU.addPreserved<AAResultsWrapperPass>(); 288 AU.addRequired<MemorySSAWrapperPass>(); 289 AU.addPreserved<MemorySSAWrapperPass>(); 290 } 291 }; 292 293 } // end anonymous namespace 294 295 char MemCpyOptLegacyPass::ID = 0; 296 297 /// The public interface to this file... 298 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); } 299 300 INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", 301 false, false) 302 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 303 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 304 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 305 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 306 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 307 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 308 INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", 309 false, false) 310 311 // Check that V is either not accessible by the caller, or unwinding cannot 312 // occur between Start and End. 313 static bool mayBeVisibleThroughUnwinding(Value *V, Instruction *Start, 314 Instruction *End) { 315 assert(Start->getParent() == End->getParent() && "Must be in same block"); 316 // Function can't unwind, so it also can't be visible through unwinding. 317 if (Start->getFunction()->doesNotThrow()) 318 return false; 319 320 // Object is not visible on unwind. 321 // TODO: Support RequiresNoCaptureBeforeUnwind case. 322 bool RequiresNoCaptureBeforeUnwind; 323 if (isNotVisibleOnUnwind(getUnderlyingObject(V), 324 RequiresNoCaptureBeforeUnwind) && 325 !RequiresNoCaptureBeforeUnwind) 326 return false; 327 328 // Check whether there are any unwinding instructions in the range. 329 return any_of(make_range(Start->getIterator(), End->getIterator()), 330 [](const Instruction &I) { return I.mayThrow(); }); 331 } 332 333 void MemCpyOptPass::eraseInstruction(Instruction *I) { 334 MSSAU->removeMemoryAccess(I); 335 I->eraseFromParent(); 336 } 337 338 // Check for mod or ref of Loc between Start and End, excluding both boundaries. 339 // Start and End must be in the same block 340 static bool accessedBetween(AliasAnalysis &AA, MemoryLocation Loc, 341 const MemoryUseOrDef *Start, 342 const MemoryUseOrDef *End) { 343 assert(Start->getBlock() == End->getBlock() && "Only local supported"); 344 for (const MemoryAccess &MA : 345 make_range(++Start->getIterator(), End->getIterator())) { 346 if (isModOrRefSet(AA.getModRefInfo(cast<MemoryUseOrDef>(MA).getMemoryInst(), 347 Loc))) 348 return true; 349 } 350 return false; 351 } 352 353 // Check for mod of Loc between Start and End, excluding both boundaries. 354 // Start and End can be in different blocks. 355 static bool writtenBetween(MemorySSA *MSSA, AliasAnalysis &AA, 356 MemoryLocation Loc, const MemoryUseOrDef *Start, 357 const MemoryUseOrDef *End) { 358 if (isa<MemoryUse>(End)) { 359 // For MemoryUses, getClobberingMemoryAccess may skip non-clobbering writes. 360 // Manually check read accesses between Start and End, if they are in the 361 // same block, for clobbers. Otherwise assume Loc is clobbered. 362 return Start->getBlock() != End->getBlock() || 363 any_of( 364 make_range(std::next(Start->getIterator()), End->getIterator()), 365 [&AA, Loc](const MemoryAccess &Acc) { 366 if (isa<MemoryUse>(&Acc)) 367 return false; 368 Instruction *AccInst = 369 cast<MemoryUseOrDef>(&Acc)->getMemoryInst(); 370 return isModSet(AA.getModRefInfo(AccInst, Loc)); 371 }); 372 } 373 374 // TODO: Only walk until we hit Start. 375 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( 376 End->getDefiningAccess(), Loc); 377 return !MSSA->dominates(Clobber, Start); 378 } 379 380 /// When scanning forward over instructions, we look for some other patterns to 381 /// fold away. In particular, this looks for stores to neighboring locations of 382 /// memory. If it sees enough consecutive ones, it attempts to merge them 383 /// together into a memcpy/memset. 384 Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst, 385 Value *StartPtr, 386 Value *ByteVal) { 387 const DataLayout &DL = StartInst->getModule()->getDataLayout(); 388 389 // We can't track scalable types 390 if (auto *SI = dyn_cast<StoreInst>(StartInst)) 391 if (DL.getTypeStoreSize(SI->getOperand(0)->getType()).isScalable()) 392 return nullptr; 393 394 // Okay, so we now have a single store that can be splatable. Scan to find 395 // all subsequent stores of the same value to offset from the same pointer. 396 // Join these together into ranges, so we can decide whether contiguous blocks 397 // are stored. 398 MemsetRanges Ranges(DL); 399 400 BasicBlock::iterator BI(StartInst); 401 402 // Keeps track of the last memory use or def before the insertion point for 403 // the new memset. The new MemoryDef for the inserted memsets will be inserted 404 // after MemInsertPoint. It points to either LastMemDef or to the last user 405 // before the insertion point of the memset, if there are any such users. 406 MemoryUseOrDef *MemInsertPoint = nullptr; 407 // Keeps track of the last MemoryDef between StartInst and the insertion point 408 // for the new memset. This will become the defining access of the inserted 409 // memsets. 410 MemoryDef *LastMemDef = nullptr; 411 for (++BI; !BI->isTerminator(); ++BI) { 412 auto *CurrentAcc = cast_or_null<MemoryUseOrDef>( 413 MSSAU->getMemorySSA()->getMemoryAccess(&*BI)); 414 if (CurrentAcc) { 415 MemInsertPoint = CurrentAcc; 416 if (auto *CurrentDef = dyn_cast<MemoryDef>(CurrentAcc)) 417 LastMemDef = CurrentDef; 418 } 419 420 // Calls that only access inaccessible memory do not block merging 421 // accessible stores. 422 if (auto *CB = dyn_cast<CallBase>(BI)) { 423 if (CB->onlyAccessesInaccessibleMemory()) 424 continue; 425 } 426 427 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { 428 // If the instruction is readnone, ignore it, otherwise bail out. We 429 // don't even allow readonly here because we don't want something like: 430 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 431 if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) 432 break; 433 continue; 434 } 435 436 if (auto *NextStore = dyn_cast<StoreInst>(BI)) { 437 // If this is a store, see if we can merge it in. 438 if (!NextStore->isSimple()) break; 439 440 Value *StoredVal = NextStore->getValueOperand(); 441 442 // Don't convert stores of non-integral pointer types to memsets (which 443 // stores integers). 444 if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType())) 445 break; 446 447 // We can't track ranges involving scalable types. 448 if (DL.getTypeStoreSize(StoredVal->getType()).isScalable()) 449 break; 450 451 // Check to see if this stored value is of the same byte-splattable value. 452 Value *StoredByte = isBytewiseValue(StoredVal, DL); 453 if (isa<UndefValue>(ByteVal) && StoredByte) 454 ByteVal = StoredByte; 455 if (ByteVal != StoredByte) 456 break; 457 458 // Check to see if this store is to a constant offset from the start ptr. 459 Optional<int64_t> Offset = 460 isPointerOffset(StartPtr, NextStore->getPointerOperand(), DL); 461 if (!Offset) 462 break; 463 464 Ranges.addStore(*Offset, NextStore); 465 } else { 466 auto *MSI = cast<MemSetInst>(BI); 467 468 if (MSI->isVolatile() || ByteVal != MSI->getValue() || 469 !isa<ConstantInt>(MSI->getLength())) 470 break; 471 472 // Check to see if this store is to a constant offset from the start ptr. 473 Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), DL); 474 if (!Offset) 475 break; 476 477 Ranges.addMemSet(*Offset, MSI); 478 } 479 } 480 481 // If we have no ranges, then we just had a single store with nothing that 482 // could be merged in. This is a very common case of course. 483 if (Ranges.empty()) 484 return nullptr; 485 486 // If we had at least one store that could be merged in, add the starting 487 // store as well. We try to avoid this unless there is at least something 488 // interesting as a small compile-time optimization. 489 Ranges.addInst(0, StartInst); 490 491 // If we create any memsets, we put it right before the first instruction that 492 // isn't part of the memset block. This ensure that the memset is dominated 493 // by any addressing instruction needed by the start of the block. 494 IRBuilder<> Builder(&*BI); 495 496 // Now that we have full information about ranges, loop over the ranges and 497 // emit memset's for anything big enough to be worthwhile. 498 Instruction *AMemSet = nullptr; 499 for (const MemsetRange &Range : Ranges) { 500 if (Range.TheStores.size() == 1) continue; 501 502 // If it is profitable to lower this range to memset, do so now. 503 if (!Range.isProfitableToUseMemset(DL)) 504 continue; 505 506 // Otherwise, we do want to transform this! Create a new memset. 507 // Get the starting pointer of the block. 508 StartPtr = Range.StartPtr; 509 510 AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End - Range.Start, 511 MaybeAlign(Range.Alignment)); 512 LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI 513 : Range.TheStores) dbgs() 514 << *SI << '\n'; 515 dbgs() << "With: " << *AMemSet << '\n'); 516 if (!Range.TheStores.empty()) 517 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); 518 519 assert(LastMemDef && MemInsertPoint && 520 "Both LastMemDef and MemInsertPoint need to be set"); 521 auto *NewDef = 522 cast<MemoryDef>(MemInsertPoint->getMemoryInst() == &*BI 523 ? MSSAU->createMemoryAccessBefore( 524 AMemSet, LastMemDef, MemInsertPoint) 525 : MSSAU->createMemoryAccessAfter( 526 AMemSet, LastMemDef, MemInsertPoint)); 527 MSSAU->insertDef(NewDef, /*RenameUses=*/true); 528 LastMemDef = NewDef; 529 MemInsertPoint = NewDef; 530 531 // Zap all the stores. 532 for (Instruction *SI : Range.TheStores) 533 eraseInstruction(SI); 534 535 ++NumMemSetInfer; 536 } 537 538 return AMemSet; 539 } 540 541 // This method try to lift a store instruction before position P. 542 // It will lift the store and its argument + that anything that 543 // may alias with these. 544 // The method returns true if it was successful. 545 bool MemCpyOptPass::moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI) { 546 // If the store alias this position, early bail out. 547 MemoryLocation StoreLoc = MemoryLocation::get(SI); 548 if (isModOrRefSet(AA->getModRefInfo(P, StoreLoc))) 549 return false; 550 551 // Keep track of the arguments of all instruction we plan to lift 552 // so we can make sure to lift them as well if appropriate. 553 DenseSet<Instruction*> Args; 554 if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand())) 555 if (Ptr->getParent() == SI->getParent()) 556 Args.insert(Ptr); 557 558 // Instruction to lift before P. 559 SmallVector<Instruction *, 8> ToLift{SI}; 560 561 // Memory locations of lifted instructions. 562 SmallVector<MemoryLocation, 8> MemLocs{StoreLoc}; 563 564 // Lifted calls. 565 SmallVector<const CallBase *, 8> Calls; 566 567 const MemoryLocation LoadLoc = MemoryLocation::get(LI); 568 569 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) { 570 auto *C = &*I; 571 572 // Make sure hoisting does not perform a store that was not guaranteed to 573 // happen. 574 if (!isGuaranteedToTransferExecutionToSuccessor(C)) 575 return false; 576 577 bool MayAlias = isModOrRefSet(AA->getModRefInfo(C, None)); 578 579 bool NeedLift = false; 580 if (Args.erase(C)) 581 NeedLift = true; 582 else if (MayAlias) { 583 NeedLift = llvm::any_of(MemLocs, [C, this](const MemoryLocation &ML) { 584 return isModOrRefSet(AA->getModRefInfo(C, ML)); 585 }); 586 587 if (!NeedLift) 588 NeedLift = llvm::any_of(Calls, [C, this](const CallBase *Call) { 589 return isModOrRefSet(AA->getModRefInfo(C, Call)); 590 }); 591 } 592 593 if (!NeedLift) 594 continue; 595 596 if (MayAlias) { 597 // Since LI is implicitly moved downwards past the lifted instructions, 598 // none of them may modify its source. 599 if (isModSet(AA->getModRefInfo(C, LoadLoc))) 600 return false; 601 else if (const auto *Call = dyn_cast<CallBase>(C)) { 602 // If we can't lift this before P, it's game over. 603 if (isModOrRefSet(AA->getModRefInfo(P, Call))) 604 return false; 605 606 Calls.push_back(Call); 607 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) { 608 // If we can't lift this before P, it's game over. 609 auto ML = MemoryLocation::get(C); 610 if (isModOrRefSet(AA->getModRefInfo(P, ML))) 611 return false; 612 613 MemLocs.push_back(ML); 614 } else 615 // We don't know how to lift this instruction. 616 return false; 617 } 618 619 ToLift.push_back(C); 620 for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k) 621 if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) { 622 if (A->getParent() == SI->getParent()) { 623 // Cannot hoist user of P above P 624 if(A == P) return false; 625 Args.insert(A); 626 } 627 } 628 } 629 630 // Find MSSA insertion point. Normally P will always have a corresponding 631 // memory access before which we can insert. However, with non-standard AA 632 // pipelines, there may be a mismatch between AA and MSSA, in which case we 633 // will scan for a memory access before P. In either case, we know for sure 634 // that at least the load will have a memory access. 635 // TODO: Simplify this once P will be determined by MSSA, in which case the 636 // discrepancy can no longer occur. 637 MemoryUseOrDef *MemInsertPoint = nullptr; 638 if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(P)) { 639 MemInsertPoint = cast<MemoryUseOrDef>(--MA->getIterator()); 640 } else { 641 const Instruction *ConstP = P; 642 for (const Instruction &I : make_range(++ConstP->getReverseIterator(), 643 ++LI->getReverseIterator())) { 644 if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(&I)) { 645 MemInsertPoint = MA; 646 break; 647 } 648 } 649 } 650 651 // We made it, we need to lift. 652 for (auto *I : llvm::reverse(ToLift)) { 653 LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n"); 654 I->moveBefore(P); 655 assert(MemInsertPoint && "Must have found insert point"); 656 if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(I)) { 657 MSSAU->moveAfter(MA, MemInsertPoint); 658 MemInsertPoint = MA; 659 } 660 } 661 662 return true; 663 } 664 665 bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 666 if (!SI->isSimple()) return false; 667 668 // Avoid merging nontemporal stores since the resulting 669 // memcpy/memset would not be able to preserve the nontemporal hint. 670 // In theory we could teach how to propagate the !nontemporal metadata to 671 // memset calls. However, that change would force the backend to 672 // conservatively expand !nontemporal memset calls back to sequences of 673 // store instructions (effectively undoing the merging). 674 if (SI->getMetadata(LLVMContext::MD_nontemporal)) 675 return false; 676 677 const DataLayout &DL = SI->getModule()->getDataLayout(); 678 679 Value *StoredVal = SI->getValueOperand(); 680 681 // Not all the transforms below are correct for non-integral pointers, bail 682 // until we've audited the individual pieces. 683 if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType())) 684 return false; 685 686 // Load to store forwarding can be interpreted as memcpy. 687 if (auto *LI = dyn_cast<LoadInst>(StoredVal)) { 688 if (LI->isSimple() && LI->hasOneUse() && 689 LI->getParent() == SI->getParent()) { 690 691 auto *T = LI->getType(); 692 // Don't introduce calls to memcpy/memmove intrinsics out of thin air if 693 // the corresponding libcalls are not available. 694 // TODO: We should really distinguish between libcall availability and 695 // our ability to introduce intrinsics. 696 if (T->isAggregateType() && 697 (EnableMemCpyOptWithoutLibcalls || 698 (TLI->has(LibFunc_memcpy) && TLI->has(LibFunc_memmove)))) { 699 MemoryLocation LoadLoc = MemoryLocation::get(LI); 700 701 // We use alias analysis to check if an instruction may store to 702 // the memory we load from in between the load and the store. If 703 // such an instruction is found, we try to promote there instead 704 // of at the store position. 705 // TODO: Can use MSSA for this. 706 Instruction *P = SI; 707 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) { 708 if (isModSet(AA->getModRefInfo(&I, LoadLoc))) { 709 P = &I; 710 break; 711 } 712 } 713 714 // We found an instruction that may write to the loaded memory. 715 // We can try to promote at this position instead of the store 716 // position if nothing aliases the store memory after this and the store 717 // destination is not in the range. 718 if (P && P != SI) { 719 if (!moveUp(SI, P, LI)) 720 P = nullptr; 721 } 722 723 // If a valid insertion position is found, then we can promote 724 // the load/store pair to a memcpy. 725 if (P) { 726 // If we load from memory that may alias the memory we store to, 727 // memmove must be used to preserve semantic. If not, memcpy can 728 // be used. Also, if we load from constant memory, memcpy can be used 729 // as the constant memory won't be modified. 730 bool UseMemMove = false; 731 if (isModSet(AA->getModRefInfo(SI, LoadLoc))) 732 UseMemMove = true; 733 734 uint64_t Size = DL.getTypeStoreSize(T); 735 736 IRBuilder<> Builder(P); 737 Instruction *M; 738 if (UseMemMove) 739 M = Builder.CreateMemMove( 740 SI->getPointerOperand(), SI->getAlign(), 741 LI->getPointerOperand(), LI->getAlign(), Size); 742 else 743 M = Builder.CreateMemCpy( 744 SI->getPointerOperand(), SI->getAlign(), 745 LI->getPointerOperand(), LI->getAlign(), Size); 746 747 LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => " 748 << *M << "\n"); 749 750 auto *LastDef = 751 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)); 752 auto *NewAccess = MSSAU->createMemoryAccessAfter(M, LastDef, LastDef); 753 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 754 755 eraseInstruction(SI); 756 eraseInstruction(LI); 757 ++NumMemCpyInstr; 758 759 // Make sure we do not invalidate the iterator. 760 BBI = M->getIterator(); 761 return true; 762 } 763 } 764 765 // Detect cases where we're performing call slot forwarding, but 766 // happen to be using a load-store pair to implement it, rather than 767 // a memcpy. 768 CallInst *C = nullptr; 769 if (auto *LoadClobber = dyn_cast<MemoryUseOrDef>( 770 MSSA->getWalker()->getClobberingMemoryAccess(LI))) { 771 // The load most post-dom the call. Limit to the same block for now. 772 // TODO: Support non-local call-slot optimization? 773 if (LoadClobber->getBlock() == SI->getParent()) 774 C = dyn_cast_or_null<CallInst>(LoadClobber->getMemoryInst()); 775 } 776 777 if (C) { 778 // Check that nothing touches the dest of the "copy" between 779 // the call and the store. 780 MemoryLocation StoreLoc = MemoryLocation::get(SI); 781 if (accessedBetween(*AA, StoreLoc, MSSA->getMemoryAccess(C), 782 MSSA->getMemoryAccess(SI))) 783 C = nullptr; 784 } 785 786 if (C) { 787 bool changed = performCallSlotOptzn( 788 LI, SI, SI->getPointerOperand()->stripPointerCasts(), 789 LI->getPointerOperand()->stripPointerCasts(), 790 DL.getTypeStoreSize(SI->getOperand(0)->getType()), 791 commonAlignment(SI->getAlign(), LI->getAlign()), C); 792 if (changed) { 793 eraseInstruction(SI); 794 eraseInstruction(LI); 795 ++NumMemCpyInstr; 796 return true; 797 } 798 } 799 } 800 } 801 802 // The following code creates memset intrinsics out of thin air. Don't do 803 // this if the corresponding libfunc is not available. 804 // TODO: We should really distinguish between libcall availability and 805 // our ability to introduce intrinsics. 806 if (!(TLI->has(LibFunc_memset) || EnableMemCpyOptWithoutLibcalls)) 807 return false; 808 809 // There are two cases that are interesting for this code to handle: memcpy 810 // and memset. Right now we only handle memset. 811 812 // Ensure that the value being stored is something that can be memset'able a 813 // byte at a time like "0" or "-1" or any width, as well as things like 814 // 0xA0A0A0A0 and 0.0. 815 auto *V = SI->getOperand(0); 816 if (Value *ByteVal = isBytewiseValue(V, DL)) { 817 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), 818 ByteVal)) { 819 BBI = I->getIterator(); // Don't invalidate iterator. 820 return true; 821 } 822 823 // If we have an aggregate, we try to promote it to memset regardless 824 // of opportunity for merging as it can expose optimization opportunities 825 // in subsequent passes. 826 auto *T = V->getType(); 827 if (T->isAggregateType()) { 828 uint64_t Size = DL.getTypeStoreSize(T); 829 IRBuilder<> Builder(SI); 830 auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size, 831 SI->getAlign()); 832 833 LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n"); 834 835 // The newly inserted memset is immediately overwritten by the original 836 // store, so we do not need to rename uses. 837 auto *StoreDef = cast<MemoryDef>(MSSA->getMemoryAccess(SI)); 838 auto *NewAccess = MSSAU->createMemoryAccessBefore( 839 M, StoreDef->getDefiningAccess(), StoreDef); 840 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/false); 841 842 eraseInstruction(SI); 843 NumMemSetInfer++; 844 845 // Make sure we do not invalidate the iterator. 846 BBI = M->getIterator(); 847 return true; 848 } 849 } 850 851 return false; 852 } 853 854 bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { 855 // See if there is another memset or store neighboring this memset which 856 // allows us to widen out the memset to do a single larger store. 857 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) 858 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), 859 MSI->getValue())) { 860 BBI = I->getIterator(); // Don't invalidate iterator. 861 return true; 862 } 863 return false; 864 } 865 866 /// Takes a memcpy and a call that it depends on, 867 /// and checks for the possibility of a call slot optimization by having 868 /// the call write its result directly into the destination of the memcpy. 869 bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad, 870 Instruction *cpyStore, Value *cpyDest, 871 Value *cpySrc, TypeSize cpySize, 872 Align cpyAlign, CallInst *C) { 873 // The general transformation to keep in mind is 874 // 875 // call @func(..., src, ...) 876 // memcpy(dest, src, ...) 877 // 878 // -> 879 // 880 // memcpy(dest, src, ...) 881 // call @func(..., dest, ...) 882 // 883 // Since moving the memcpy is technically awkward, we additionally check that 884 // src only holds uninitialized values at the moment of the call, meaning that 885 // the memcpy can be discarded rather than moved. 886 887 // We can't optimize scalable types. 888 if (cpySize.isScalable()) 889 return false; 890 891 // Lifetime marks shouldn't be operated on. 892 if (Function *F = C->getCalledFunction()) 893 if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start) 894 return false; 895 896 // Require that src be an alloca. This simplifies the reasoning considerably. 897 auto *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 898 if (!srcAlloca) 899 return false; 900 901 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 902 if (!srcArraySize) 903 return false; 904 905 const DataLayout &DL = cpyLoad->getModule()->getDataLayout(); 906 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) * 907 srcArraySize->getZExtValue(); 908 909 if (cpySize < srcSize) 910 return false; 911 912 // Check that accessing the first srcSize bytes of dest will not cause a 913 // trap. Otherwise the transform is invalid since it might cause a trap 914 // to occur earlier than it otherwise would. 915 if (!isDereferenceableAndAlignedPointer(cpyDest, Align(1), APInt(64, cpySize), 916 DL, C, DT)) { 917 LLVM_DEBUG(dbgs() << "Call Slot: Dest pointer not dereferenceable\n"); 918 return false; 919 } 920 921 // Make sure that nothing can observe cpyDest being written early. There are 922 // a number of cases to consider: 923 // 1. cpyDest cannot be accessed between C and cpyStore as a precondition of 924 // the transform. 925 // 2. C itself may not access cpyDest (prior to the transform). This is 926 // checked further below. 927 // 3. If cpyDest is accessible to the caller of this function (potentially 928 // captured and not based on an alloca), we need to ensure that we cannot 929 // unwind between C and cpyStore. This is checked here. 930 // 4. If cpyDest is potentially captured, there may be accesses to it from 931 // another thread. In this case, we need to check that cpyStore is 932 // guaranteed to be executed if C is. As it is a non-atomic access, it 933 // renders accesses from other threads undefined. 934 // TODO: This is currently not checked. 935 if (mayBeVisibleThroughUnwinding(cpyDest, C, cpyStore)) { 936 LLVM_DEBUG(dbgs() << "Call Slot: Dest may be visible through unwinding"); 937 return false; 938 } 939 940 // Check that dest points to memory that is at least as aligned as src. 941 Align srcAlign = srcAlloca->getAlign(); 942 bool isDestSufficientlyAligned = srcAlign <= cpyAlign; 943 // If dest is not aligned enough and we can't increase its alignment then 944 // bail out. 945 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) 946 return false; 947 948 // Check that src is not accessed except via the call and the memcpy. This 949 // guarantees that it holds only undefined values when passed in (so the final 950 // memcpy can be dropped), that it is not read or written between the call and 951 // the memcpy, and that writing beyond the end of it is undefined. 952 SmallVector<User *, 8> srcUseList(srcAlloca->users()); 953 while (!srcUseList.empty()) { 954 User *U = srcUseList.pop_back_val(); 955 956 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) { 957 append_range(srcUseList, U->users()); 958 continue; 959 } 960 if (const auto *G = dyn_cast<GetElementPtrInst>(U)) { 961 if (!G->hasAllZeroIndices()) 962 return false; 963 964 append_range(srcUseList, U->users()); 965 continue; 966 } 967 if (const auto *IT = dyn_cast<IntrinsicInst>(U)) 968 if (IT->isLifetimeStartOrEnd()) 969 continue; 970 971 if (U != C && U != cpyLoad) 972 return false; 973 } 974 975 // Check whether src is captured by the called function, in which case there 976 // may be further indirect uses of src. 977 bool SrcIsCaptured = any_of(C->args(), [&](Use &U) { 978 return U->stripPointerCasts() == cpySrc && 979 !C->doesNotCapture(C->getArgOperandNo(&U)); 980 }); 981 982 // If src is captured, then check whether there are any potential uses of 983 // src through the captured pointer before the lifetime of src ends, either 984 // due to a lifetime.end or a return from the function. 985 if (SrcIsCaptured) { 986 // Check that dest is not captured before/at the call. We have already 987 // checked that src is not captured before it. If either had been captured, 988 // then the call might be comparing the argument against the captured dest 989 // or src pointer. 990 Value *DestObj = getUnderlyingObject(cpyDest); 991 if (!isIdentifiedFunctionLocal(DestObj) || 992 PointerMayBeCapturedBefore(DestObj, /* ReturnCaptures */ true, 993 /* StoreCaptures */ true, C, DT, 994 /* IncludeI */ true)) 995 return false; 996 997 MemoryLocation SrcLoc = 998 MemoryLocation(srcAlloca, LocationSize::precise(srcSize)); 999 for (Instruction &I : 1000 make_range(++C->getIterator(), C->getParent()->end())) { 1001 // Lifetime of srcAlloca ends at lifetime.end. 1002 if (auto *II = dyn_cast<IntrinsicInst>(&I)) { 1003 if (II->getIntrinsicID() == Intrinsic::lifetime_end && 1004 II->getArgOperand(1)->stripPointerCasts() == srcAlloca && 1005 cast<ConstantInt>(II->getArgOperand(0))->uge(srcSize)) 1006 break; 1007 } 1008 1009 // Lifetime of srcAlloca ends at return. 1010 if (isa<ReturnInst>(&I)) 1011 break; 1012 1013 // Ignore the direct read of src in the load. 1014 if (&I == cpyLoad) 1015 continue; 1016 1017 // Check whether this instruction may mod/ref src through the captured 1018 // pointer (we have already any direct mod/refs in the loop above). 1019 // Also bail if we hit a terminator, as we don't want to scan into other 1020 // blocks. 1021 if (isModOrRefSet(AA->getModRefInfo(&I, SrcLoc)) || I.isTerminator()) 1022 return false; 1023 } 1024 } 1025 1026 // Since we're changing the parameter to the callsite, we need to make sure 1027 // that what would be the new parameter dominates the callsite. 1028 if (!DT->dominates(cpyDest, C)) { 1029 // Support moving a constant index GEP before the call. 1030 auto *GEP = dyn_cast<GetElementPtrInst>(cpyDest); 1031 if (GEP && GEP->hasAllConstantIndices() && 1032 DT->dominates(GEP->getPointerOperand(), C)) 1033 GEP->moveBefore(C); 1034 else 1035 return false; 1036 } 1037 1038 // In addition to knowing that the call does not access src in some 1039 // unexpected manner, for example via a global, which we deduce from 1040 // the use analysis, we also need to know that it does not sneakily 1041 // access dest. We rely on AA to figure this out for us. 1042 ModRefInfo MR = AA->getModRefInfo(C, cpyDest, LocationSize::precise(srcSize)); 1043 // If necessary, perform additional analysis. 1044 if (isModOrRefSet(MR)) 1045 MR = AA->callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), DT); 1046 if (isModOrRefSet(MR)) 1047 return false; 1048 1049 // We can't create address space casts here because we don't know if they're 1050 // safe for the target. 1051 if (cpySrc->getType()->getPointerAddressSpace() != 1052 cpyDest->getType()->getPointerAddressSpace()) 1053 return false; 1054 for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) 1055 if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc && 1056 cpySrc->getType()->getPointerAddressSpace() != 1057 C->getArgOperand(ArgI)->getType()->getPointerAddressSpace()) 1058 return false; 1059 1060 // All the checks have passed, so do the transformation. 1061 bool changedArgument = false; 1062 for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) 1063 if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc) { 1064 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest 1065 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 1066 cpyDest->getName(), C); 1067 changedArgument = true; 1068 if (C->getArgOperand(ArgI)->getType() == Dest->getType()) 1069 C->setArgOperand(ArgI, Dest); 1070 else 1071 C->setArgOperand(ArgI, CastInst::CreatePointerCast( 1072 Dest, C->getArgOperand(ArgI)->getType(), 1073 Dest->getName(), C)); 1074 } 1075 1076 if (!changedArgument) 1077 return false; 1078 1079 // If the destination wasn't sufficiently aligned then increase its alignment. 1080 if (!isDestSufficientlyAligned) { 1081 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!"); 1082 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); 1083 } 1084 1085 // Update AA metadata 1086 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be 1087 // handled here, but combineMetadata doesn't support them yet 1088 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 1089 LLVMContext::MD_noalias, 1090 LLVMContext::MD_invariant_group, 1091 LLVMContext::MD_access_group}; 1092 combineMetadata(C, cpyLoad, KnownIDs, true); 1093 if (cpyLoad != cpyStore) 1094 combineMetadata(C, cpyStore, KnownIDs, true); 1095 1096 ++NumCallSlot; 1097 return true; 1098 } 1099 1100 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is 1101 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can. 1102 bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M, 1103 MemCpyInst *MDep) { 1104 // We can only transforms memcpy's where the dest of one is the source of the 1105 // other. 1106 if (M->getSource() != MDep->getDest() || MDep->isVolatile()) 1107 return false; 1108 1109 // If dep instruction is reading from our current input, then it is a noop 1110 // transfer and substituting the input won't change this instruction. Just 1111 // ignore the input and let someone else zap MDep. This handles cases like: 1112 // memcpy(a <- a) 1113 // memcpy(b <- a) 1114 if (M->getSource() == MDep->getSource()) 1115 return false; 1116 1117 // Second, the length of the memcpy's must be the same, or the preceding one 1118 // must be larger than the following one. 1119 if (MDep->getLength() != M->getLength()) { 1120 auto *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); 1121 auto *MLen = dyn_cast<ConstantInt>(M->getLength()); 1122 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) 1123 return false; 1124 } 1125 1126 // Verify that the copied-from memory doesn't change in between the two 1127 // transfers. For example, in: 1128 // memcpy(a <- b) 1129 // *b = 42; 1130 // memcpy(c <- a) 1131 // It would be invalid to transform the second memcpy into memcpy(c <- b). 1132 // 1133 // TODO: If the code between M and MDep is transparent to the destination "c", 1134 // then we could still perform the xform by moving M up to the first memcpy. 1135 // TODO: It would be sufficient to check the MDep source up to the memcpy 1136 // size of M, rather than MDep. 1137 if (writtenBetween(MSSA, *AA, MemoryLocation::getForSource(MDep), 1138 MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(M))) 1139 return false; 1140 1141 // If the dest of the second might alias the source of the first, then the 1142 // source and dest might overlap. In addition, if the source of the first 1143 // points to constant memory, they won't overlap by definition. Otherwise, we 1144 // still want to eliminate the intermediate value, but we have to generate a 1145 // memmove instead of memcpy. 1146 bool UseMemMove = false; 1147 if (isModSet(AA->getModRefInfo(M, MemoryLocation::getForSource(MDep)))) 1148 UseMemMove = true; 1149 1150 // If all checks passed, then we can transform M. 1151 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n" 1152 << *MDep << '\n' << *M << '\n'); 1153 1154 // TODO: Is this worth it if we're creating a less aligned memcpy? For 1155 // example we could be moving from movaps -> movq on x86. 1156 IRBuilder<> Builder(M); 1157 Instruction *NewM; 1158 if (UseMemMove) 1159 NewM = Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(), 1160 MDep->getRawSource(), MDep->getSourceAlign(), 1161 M->getLength(), M->isVolatile()); 1162 else if (isa<MemCpyInlineInst>(M)) { 1163 // llvm.memcpy may be promoted to llvm.memcpy.inline, but the converse is 1164 // never allowed since that would allow the latter to be lowered as a call 1165 // to an external function. 1166 NewM = Builder.CreateMemCpyInline( 1167 M->getRawDest(), M->getDestAlign(), MDep->getRawSource(), 1168 MDep->getSourceAlign(), M->getLength(), M->isVolatile()); 1169 } else 1170 NewM = Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(), 1171 MDep->getRawSource(), MDep->getSourceAlign(), 1172 M->getLength(), M->isVolatile()); 1173 1174 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M))); 1175 auto *LastDef = cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)); 1176 auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); 1177 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1178 1179 // Remove the instruction we're replacing. 1180 eraseInstruction(M); 1181 ++NumMemCpyInstr; 1182 return true; 1183 } 1184 1185 /// We've found that the (upward scanning) memory dependence of \p MemCpy is 1186 /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that 1187 /// weren't copied over by \p MemCpy. 1188 /// 1189 /// In other words, transform: 1190 /// \code 1191 /// memset(dst, c, dst_size); 1192 /// memcpy(dst, src, src_size); 1193 /// \endcode 1194 /// into: 1195 /// \code 1196 /// memcpy(dst, src, src_size); 1197 /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); 1198 /// \endcode 1199 bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy, 1200 MemSetInst *MemSet) { 1201 // We can only transform memset/memcpy with the same destination. 1202 if (!AA->isMustAlias(MemSet->getDest(), MemCpy->getDest())) 1203 return false; 1204 1205 // Check that src and dst of the memcpy aren't the same. While memcpy 1206 // operands cannot partially overlap, exact equality is allowed. 1207 if (isModSet(AA->getModRefInfo(MemCpy, MemoryLocation::getForSource(MemCpy)))) 1208 return false; 1209 1210 // We know that dst up to src_size is not written. We now need to make sure 1211 // that dst up to dst_size is not accessed. (If we did not move the memset, 1212 // checking for reads would be sufficient.) 1213 if (accessedBetween(*AA, MemoryLocation::getForDest(MemSet), 1214 MSSA->getMemoryAccess(MemSet), 1215 MSSA->getMemoryAccess(MemCpy))) 1216 return false; 1217 1218 // Use the same i8* dest as the memcpy, killing the memset dest if different. 1219 Value *Dest = MemCpy->getRawDest(); 1220 Value *DestSize = MemSet->getLength(); 1221 Value *SrcSize = MemCpy->getLength(); 1222 1223 if (mayBeVisibleThroughUnwinding(Dest, MemSet, MemCpy)) 1224 return false; 1225 1226 // If the sizes are the same, simply drop the memset instead of generating 1227 // a replacement with zero size. 1228 if (DestSize == SrcSize) { 1229 eraseInstruction(MemSet); 1230 return true; 1231 } 1232 1233 // By default, create an unaligned memset. 1234 unsigned Align = 1; 1235 // If Dest is aligned, and SrcSize is constant, use the minimum alignment 1236 // of the sum. 1237 const unsigned DestAlign = 1238 std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment()); 1239 if (DestAlign > 1) 1240 if (auto *SrcSizeC = dyn_cast<ConstantInt>(SrcSize)) 1241 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign); 1242 1243 IRBuilder<> Builder(MemCpy); 1244 1245 // If the sizes have different types, zext the smaller one. 1246 if (DestSize->getType() != SrcSize->getType()) { 1247 if (DestSize->getType()->getIntegerBitWidth() > 1248 SrcSize->getType()->getIntegerBitWidth()) 1249 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType()); 1250 else 1251 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType()); 1252 } 1253 1254 Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize); 1255 Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize); 1256 Value *MemsetLen = Builder.CreateSelect( 1257 Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff); 1258 unsigned DestAS = Dest->getType()->getPointerAddressSpace(); 1259 Instruction *NewMemSet = Builder.CreateMemSet( 1260 Builder.CreateGEP(Builder.getInt8Ty(), 1261 Builder.CreatePointerCast(Dest, 1262 Builder.getInt8PtrTy(DestAS)), 1263 SrcSize), 1264 MemSet->getOperand(1), MemsetLen, MaybeAlign(Align)); 1265 1266 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) && 1267 "MemCpy must be a MemoryDef"); 1268 // The new memset is inserted after the memcpy, but it is known that its 1269 // defining access is the memset about to be removed which immediately 1270 // precedes the memcpy. 1271 auto *LastDef = 1272 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); 1273 auto *NewAccess = MSSAU->createMemoryAccessBefore( 1274 NewMemSet, LastDef->getDefiningAccess(), LastDef); 1275 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1276 1277 eraseInstruction(MemSet); 1278 return true; 1279 } 1280 1281 /// Determine whether the instruction has undefined content for the given Size, 1282 /// either because it was freshly alloca'd or started its lifetime. 1283 static bool hasUndefContents(MemorySSA *MSSA, AliasAnalysis *AA, Value *V, 1284 MemoryDef *Def, Value *Size) { 1285 if (MSSA->isLiveOnEntryDef(Def)) 1286 return isa<AllocaInst>(getUnderlyingObject(V)); 1287 1288 if (auto *II = dyn_cast_or_null<IntrinsicInst>(Def->getMemoryInst())) { 1289 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 1290 auto *LTSize = cast<ConstantInt>(II->getArgOperand(0)); 1291 1292 if (auto *CSize = dyn_cast<ConstantInt>(Size)) { 1293 if (AA->isMustAlias(V, II->getArgOperand(1)) && 1294 LTSize->getZExtValue() >= CSize->getZExtValue()) 1295 return true; 1296 } 1297 1298 // If the lifetime.start covers a whole alloca (as it almost always 1299 // does) and we're querying a pointer based on that alloca, then we know 1300 // the memory is definitely undef, regardless of how exactly we alias. 1301 // The size also doesn't matter, as an out-of-bounds access would be UB. 1302 if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(V))) { 1303 if (getUnderlyingObject(II->getArgOperand(1)) == Alloca) { 1304 const DataLayout &DL = Alloca->getModule()->getDataLayout(); 1305 if (Optional<TypeSize> AllocaSize = 1306 Alloca->getAllocationSizeInBits(DL)) 1307 if (*AllocaSize == LTSize->getValue() * 8) 1308 return true; 1309 } 1310 } 1311 } 1312 } 1313 1314 return false; 1315 } 1316 1317 /// Transform memcpy to memset when its source was just memset. 1318 /// In other words, turn: 1319 /// \code 1320 /// memset(dst1, c, dst1_size); 1321 /// memcpy(dst2, dst1, dst2_size); 1322 /// \endcode 1323 /// into: 1324 /// \code 1325 /// memset(dst1, c, dst1_size); 1326 /// memset(dst2, c, dst2_size); 1327 /// \endcode 1328 /// When dst2_size <= dst1_size. 1329 bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, 1330 MemSetInst *MemSet) { 1331 // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and 1332 // memcpying from the same address. Otherwise it is hard to reason about. 1333 if (!AA->isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource())) 1334 return false; 1335 1336 Value *MemSetSize = MemSet->getLength(); 1337 Value *CopySize = MemCpy->getLength(); 1338 1339 if (MemSetSize != CopySize) { 1340 // Make sure the memcpy doesn't read any more than what the memset wrote. 1341 // Don't worry about sizes larger than i64. 1342 1343 // A known memset size is required. 1344 auto *CMemSetSize = dyn_cast<ConstantInt>(MemSetSize); 1345 if (!CMemSetSize) 1346 return false; 1347 1348 // A known memcpy size is also required. 1349 auto *CCopySize = dyn_cast<ConstantInt>(CopySize); 1350 if (!CCopySize) 1351 return false; 1352 if (CCopySize->getZExtValue() > CMemSetSize->getZExtValue()) { 1353 // If the memcpy is larger than the memset, but the memory was undef prior 1354 // to the memset, we can just ignore the tail. Technically we're only 1355 // interested in the bytes from MemSetSize..CopySize here, but as we can't 1356 // easily represent this location, we use the full 0..CopySize range. 1357 MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy); 1358 bool CanReduceSize = false; 1359 MemoryUseOrDef *MemSetAccess = MSSA->getMemoryAccess(MemSet); 1360 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( 1361 MemSetAccess->getDefiningAccess(), MemCpyLoc); 1362 if (auto *MD = dyn_cast<MemoryDef>(Clobber)) 1363 if (hasUndefContents(MSSA, AA, MemCpy->getSource(), MD, CopySize)) 1364 CanReduceSize = true; 1365 1366 if (!CanReduceSize) 1367 return false; 1368 CopySize = MemSetSize; 1369 } 1370 } 1371 1372 IRBuilder<> Builder(MemCpy); 1373 Instruction *NewM = 1374 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), 1375 CopySize, MaybeAlign(MemCpy->getDestAlignment())); 1376 auto *LastDef = 1377 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)); 1378 auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); 1379 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1380 1381 return true; 1382 } 1383 1384 /// Perform simplification of memcpy's. If we have memcpy A 1385 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite 1386 /// B to be a memcpy from X to Z (or potentially a memmove, depending on 1387 /// circumstances). This allows later passes to remove the first memcpy 1388 /// altogether. 1389 bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) { 1390 // We can only optimize non-volatile memcpy's. 1391 if (M->isVolatile()) return false; 1392 1393 // If the source and destination of the memcpy are the same, then zap it. 1394 if (M->getSource() == M->getDest()) { 1395 ++BBI; 1396 eraseInstruction(M); 1397 return true; 1398 } 1399 1400 // If copying from a constant, try to turn the memcpy into a memset. 1401 if (auto *GV = dyn_cast<GlobalVariable>(M->getSource())) 1402 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 1403 if (Value *ByteVal = isBytewiseValue(GV->getInitializer(), 1404 M->getModule()->getDataLayout())) { 1405 IRBuilder<> Builder(M); 1406 Instruction *NewM = 1407 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(), 1408 MaybeAlign(M->getDestAlignment()), false); 1409 auto *LastDef = 1410 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)); 1411 auto *NewAccess = 1412 MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef); 1413 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true); 1414 1415 eraseInstruction(M); 1416 ++NumCpyToSet; 1417 return true; 1418 } 1419 1420 MemoryUseOrDef *MA = MSSA->getMemoryAccess(M); 1421 MemoryAccess *AnyClobber = MSSA->getWalker()->getClobberingMemoryAccess(MA); 1422 MemoryLocation DestLoc = MemoryLocation::getForDest(M); 1423 const MemoryAccess *DestClobber = 1424 MSSA->getWalker()->getClobberingMemoryAccess(AnyClobber, DestLoc); 1425 1426 // Try to turn a partially redundant memset + memcpy into 1427 // memcpy + smaller memset. We don't need the memcpy size for this. 1428 // The memcpy most post-dom the memset, so limit this to the same basic 1429 // block. A non-local generalization is likely not worthwhile. 1430 if (auto *MD = dyn_cast<MemoryDef>(DestClobber)) 1431 if (auto *MDep = dyn_cast_or_null<MemSetInst>(MD->getMemoryInst())) 1432 if (DestClobber->getBlock() == M->getParent()) 1433 if (processMemSetMemCpyDependence(M, MDep)) 1434 return true; 1435 1436 MemoryAccess *SrcClobber = MSSA->getWalker()->getClobberingMemoryAccess( 1437 AnyClobber, MemoryLocation::getForSource(M)); 1438 1439 // There are four possible optimizations we can do for memcpy: 1440 // a) memcpy-memcpy xform which exposes redundance for DSE. 1441 // b) call-memcpy xform for return slot optimization. 1442 // c) memcpy from freshly alloca'd space or space that has just started 1443 // its lifetime copies undefined data, and we can therefore eliminate 1444 // the memcpy in favor of the data that was already at the destination. 1445 // d) memcpy from a just-memset'd source can be turned into memset. 1446 if (auto *MD = dyn_cast<MemoryDef>(SrcClobber)) { 1447 if (Instruction *MI = MD->getMemoryInst()) { 1448 if (auto *CopySize = dyn_cast<ConstantInt>(M->getLength())) { 1449 if (auto *C = dyn_cast<CallInst>(MI)) { 1450 // The memcpy must post-dom the call. Limit to the same block for 1451 // now. Additionally, we need to ensure that there are no accesses 1452 // to dest between the call and the memcpy. Accesses to src will be 1453 // checked by performCallSlotOptzn(). 1454 // TODO: Support non-local call-slot optimization? 1455 if (C->getParent() == M->getParent() && 1456 !accessedBetween(*AA, DestLoc, MD, MA)) { 1457 // FIXME: Can we pass in either of dest/src alignment here instead 1458 // of conservatively taking the minimum? 1459 Align Alignment = std::min(M->getDestAlign().valueOrOne(), 1460 M->getSourceAlign().valueOrOne()); 1461 if (performCallSlotOptzn( 1462 M, M, M->getDest(), M->getSource(), 1463 TypeSize::getFixed(CopySize->getZExtValue()), Alignment, 1464 C)) { 1465 LLVM_DEBUG(dbgs() << "Performed call slot optimization:\n" 1466 << " call: " << *C << "\n" 1467 << " memcpy: " << *M << "\n"); 1468 eraseInstruction(M); 1469 ++NumMemCpyInstr; 1470 return true; 1471 } 1472 } 1473 } 1474 } 1475 if (auto *MDep = dyn_cast<MemCpyInst>(MI)) 1476 return processMemCpyMemCpyDependence(M, MDep); 1477 if (auto *MDep = dyn_cast<MemSetInst>(MI)) { 1478 if (performMemCpyToMemSetOptzn(M, MDep)) { 1479 LLVM_DEBUG(dbgs() << "Converted memcpy to memset\n"); 1480 eraseInstruction(M); 1481 ++NumCpyToSet; 1482 return true; 1483 } 1484 } 1485 } 1486 1487 if (hasUndefContents(MSSA, AA, M->getSource(), MD, M->getLength())) { 1488 LLVM_DEBUG(dbgs() << "Removed memcpy from undef\n"); 1489 eraseInstruction(M); 1490 ++NumMemCpyInstr; 1491 return true; 1492 } 1493 } 1494 1495 return false; 1496 } 1497 1498 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed 1499 /// not to alias. 1500 bool MemCpyOptPass::processMemMove(MemMoveInst *M) { 1501 // See if the source could be modified by this memmove potentially. 1502 if (isModSet(AA->getModRefInfo(M, MemoryLocation::getForSource(M)))) 1503 return false; 1504 1505 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M 1506 << "\n"); 1507 1508 // If not, then we know we can transform this. 1509 Type *ArgTys[3] = { M->getRawDest()->getType(), 1510 M->getRawSource()->getType(), 1511 M->getLength()->getType() }; 1512 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(), 1513 Intrinsic::memcpy, ArgTys)); 1514 1515 // For MemorySSA nothing really changes (except that memcpy may imply stricter 1516 // aliasing guarantees). 1517 1518 ++NumMoveToCpy; 1519 return true; 1520 } 1521 1522 /// This is called on every byval argument in call sites. 1523 bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) { 1524 const DataLayout &DL = CB.getCaller()->getParent()->getDataLayout(); 1525 // Find out what feeds this byval argument. 1526 Value *ByValArg = CB.getArgOperand(ArgNo); 1527 Type *ByValTy = CB.getParamByValType(ArgNo); 1528 TypeSize ByValSize = DL.getTypeAllocSize(ByValTy); 1529 MemoryLocation Loc(ByValArg, LocationSize::precise(ByValSize)); 1530 MemoryUseOrDef *CallAccess = MSSA->getMemoryAccess(&CB); 1531 if (!CallAccess) 1532 return false; 1533 MemCpyInst *MDep = nullptr; 1534 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess( 1535 CallAccess->getDefiningAccess(), Loc); 1536 if (auto *MD = dyn_cast<MemoryDef>(Clobber)) 1537 MDep = dyn_cast_or_null<MemCpyInst>(MD->getMemoryInst()); 1538 1539 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by 1540 // a memcpy, see if we can byval from the source of the memcpy instead of the 1541 // result. 1542 if (!MDep || MDep->isVolatile() || 1543 ByValArg->stripPointerCasts() != MDep->getDest()) 1544 return false; 1545 1546 // The length of the memcpy must be larger or equal to the size of the byval. 1547 auto *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 1548 if (!C1 || !TypeSize::isKnownGE( 1549 TypeSize::getFixed(C1->getValue().getZExtValue()), ByValSize)) 1550 return false; 1551 1552 // Get the alignment of the byval. If the call doesn't specify the alignment, 1553 // then it is some target specific value that we can't know. 1554 MaybeAlign ByValAlign = CB.getParamAlign(ArgNo); 1555 if (!ByValAlign) return false; 1556 1557 // If it is greater than the memcpy, then we check to see if we can force the 1558 // source of the memcpy to the alignment we need. If we fail, we bail out. 1559 MaybeAlign MemDepAlign = MDep->getSourceAlign(); 1560 if ((!MemDepAlign || *MemDepAlign < *ByValAlign) && 1561 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &CB, AC, 1562 DT) < *ByValAlign) 1563 return false; 1564 1565 // The address space of the memcpy source must match the byval argument 1566 if (MDep->getSource()->getType()->getPointerAddressSpace() != 1567 ByValArg->getType()->getPointerAddressSpace()) 1568 return false; 1569 1570 // Verify that the copied-from memory doesn't change in between the memcpy and 1571 // the byval call. 1572 // memcpy(a <- b) 1573 // *b = 42; 1574 // foo(*a) 1575 // It would be invalid to transform the second memcpy into foo(*b). 1576 if (writtenBetween(MSSA, *AA, MemoryLocation::getForSource(MDep), 1577 MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(&CB))) 1578 return false; 1579 1580 Value *TmpCast = MDep->getSource(); 1581 if (MDep->getSource()->getType() != ByValArg->getType()) { 1582 BitCastInst *TmpBitCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), 1583 "tmpcast", &CB); 1584 // Set the tmpcast's DebugLoc to MDep's 1585 TmpBitCast->setDebugLoc(MDep->getDebugLoc()); 1586 TmpCast = TmpBitCast; 1587 } 1588 1589 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" 1590 << " " << *MDep << "\n" 1591 << " " << CB << "\n"); 1592 1593 // Otherwise we're good! Update the byval argument. 1594 CB.setArgOperand(ArgNo, TmpCast); 1595 ++NumMemCpyInstr; 1596 return true; 1597 } 1598 1599 /// Executes one iteration of MemCpyOptPass. 1600 bool MemCpyOptPass::iterateOnFunction(Function &F) { 1601 bool MadeChange = false; 1602 1603 // Walk all instruction in the function. 1604 for (BasicBlock &BB : F) { 1605 // Skip unreachable blocks. For example processStore assumes that an 1606 // instruction in a BB can't be dominated by a later instruction in the 1607 // same BB (which is a scenario that can happen for an unreachable BB that 1608 // has itself as a predecessor). 1609 if (!DT->isReachableFromEntry(&BB)) 1610 continue; 1611 1612 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 1613 // Avoid invalidating the iterator. 1614 Instruction *I = &*BI++; 1615 1616 bool RepeatInstruction = false; 1617 1618 if (auto *SI = dyn_cast<StoreInst>(I)) 1619 MadeChange |= processStore(SI, BI); 1620 else if (auto *M = dyn_cast<MemSetInst>(I)) 1621 RepeatInstruction = processMemSet(M, BI); 1622 else if (auto *M = dyn_cast<MemCpyInst>(I)) 1623 RepeatInstruction = processMemCpy(M, BI); 1624 else if (auto *M = dyn_cast<MemMoveInst>(I)) 1625 RepeatInstruction = processMemMove(M); 1626 else if (auto *CB = dyn_cast<CallBase>(I)) { 1627 for (unsigned i = 0, e = CB->arg_size(); i != e; ++i) 1628 if (CB->isByValArgument(i)) 1629 MadeChange |= processByValArgument(*CB, i); 1630 } 1631 1632 // Reprocess the instruction if desired. 1633 if (RepeatInstruction) { 1634 if (BI != BB.begin()) 1635 --BI; 1636 MadeChange = true; 1637 } 1638 } 1639 } 1640 1641 return MadeChange; 1642 } 1643 1644 PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) { 1645 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1646 auto *AA = &AM.getResult<AAManager>(F); 1647 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 1648 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 1649 auto *MSSA = &AM.getResult<MemorySSAAnalysis>(F); 1650 1651 bool MadeChange = runImpl(F, &TLI, AA, AC, DT, &MSSA->getMSSA()); 1652 if (!MadeChange) 1653 return PreservedAnalyses::all(); 1654 1655 PreservedAnalyses PA; 1656 PA.preserveSet<CFGAnalyses>(); 1657 PA.preserve<MemorySSAAnalysis>(); 1658 return PA; 1659 } 1660 1661 bool MemCpyOptPass::runImpl(Function &F, TargetLibraryInfo *TLI_, 1662 AliasAnalysis *AA_, AssumptionCache *AC_, 1663 DominatorTree *DT_, MemorySSA *MSSA_) { 1664 bool MadeChange = false; 1665 TLI = TLI_; 1666 AA = AA_; 1667 AC = AC_; 1668 DT = DT_; 1669 MSSA = MSSA_; 1670 MemorySSAUpdater MSSAU_(MSSA_); 1671 MSSAU = &MSSAU_; 1672 1673 while (true) { 1674 if (!iterateOnFunction(F)) 1675 break; 1676 MadeChange = true; 1677 } 1678 1679 if (VerifyMemorySSA) 1680 MSSA_->verifyMemorySSA(); 1681 1682 return MadeChange; 1683 } 1684 1685 /// This is the main transformation entry point for a function. 1686 bool MemCpyOptLegacyPass::runOnFunction(Function &F) { 1687 if (skipFunction(F)) 1688 return false; 1689 1690 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1691 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 1692 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1693 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1694 auto *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA(); 1695 1696 return Impl.runImpl(F, TLI, AA, AC, DT, MSSA); 1697 } 1698