1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass performs various transformations related to eliminating memcpy 10 // calls, or transforming sets of stores into memset's. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/MemCpyOptimizer.h" 15 #include "llvm/ADT/DenseSet.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/ADT/iterator_range.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/GlobalsModRef.h" 24 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 25 #include "llvm/Analysis/MemoryLocation.h" 26 #include "llvm/Analysis/TargetLibraryInfo.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/IR/Argument.h" 29 #include "llvm/IR/BasicBlock.h" 30 #include "llvm/IR/Constants.h" 31 #include "llvm/IR/DataLayout.h" 32 #include "llvm/IR/DerivedTypes.h" 33 #include "llvm/IR/Dominators.h" 34 #include "llvm/IR/Function.h" 35 #include "llvm/IR/GetElementPtrTypeIterator.h" 36 #include "llvm/IR/GlobalVariable.h" 37 #include "llvm/IR/IRBuilder.h" 38 #include "llvm/IR/InstrTypes.h" 39 #include "llvm/IR/Instruction.h" 40 #include "llvm/IR/Instructions.h" 41 #include "llvm/IR/IntrinsicInst.h" 42 #include "llvm/IR/Intrinsics.h" 43 #include "llvm/IR/LLVMContext.h" 44 #include "llvm/IR/Module.h" 45 #include "llvm/IR/Operator.h" 46 #include "llvm/IR/PassManager.h" 47 #include "llvm/IR/Type.h" 48 #include "llvm/IR/User.h" 49 #include "llvm/IR/Value.h" 50 #include "llvm/InitializePasses.h" 51 #include "llvm/Pass.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/Debug.h" 54 #include "llvm/Support/MathExtras.h" 55 #include "llvm/Support/raw_ostream.h" 56 #include "llvm/Transforms/Scalar.h" 57 #include "llvm/Transforms/Utils/Local.h" 58 #include <algorithm> 59 #include <cassert> 60 #include <cstdint> 61 #include <utility> 62 63 using namespace llvm; 64 65 #define DEBUG_TYPE "memcpyopt" 66 67 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 68 STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 69 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 70 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); 71 72 namespace { 73 74 /// Represents a range of memset'd bytes with the ByteVal value. 75 /// This allows us to analyze stores like: 76 /// store 0 -> P+1 77 /// store 0 -> P+0 78 /// store 0 -> P+3 79 /// store 0 -> P+2 80 /// which sometimes happens with stores to arrays of structs etc. When we see 81 /// the first store, we make a range [1, 2). The second store extends the range 82 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 83 /// two ranges into [0, 3) which is memset'able. 84 struct MemsetRange { 85 // Start/End - A semi range that describes the span that this range covers. 86 // The range is closed at the start and open at the end: [Start, End). 87 int64_t Start, End; 88 89 /// StartPtr - The getelementptr instruction that points to the start of the 90 /// range. 91 Value *StartPtr; 92 93 /// Alignment - The known alignment of the first store. 94 unsigned Alignment; 95 96 /// TheStores - The actual stores that make up this range. 97 SmallVector<Instruction*, 16> TheStores; 98 99 bool isProfitableToUseMemset(const DataLayout &DL) const; 100 }; 101 102 } // end anonymous namespace 103 104 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const { 105 // If we found more than 4 stores to merge or 16 bytes, use memset. 106 if (TheStores.size() >= 4 || End-Start >= 16) return true; 107 108 // If there is nothing to merge, don't do anything. 109 if (TheStores.size() < 2) return false; 110 111 // If any of the stores are a memset, then it is always good to extend the 112 // memset. 113 for (Instruction *SI : TheStores) 114 if (!isa<StoreInst>(SI)) 115 return true; 116 117 // Assume that the code generator is capable of merging pairs of stores 118 // together if it wants to. 119 if (TheStores.size() == 2) return false; 120 121 // If we have fewer than 8 stores, it can still be worthwhile to do this. 122 // For example, merging 4 i8 stores into an i32 store is useful almost always. 123 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 124 // memset will be split into 2 32-bit stores anyway) and doing so can 125 // pessimize the llvm optimizer. 126 // 127 // Since we don't have perfect knowledge here, make some assumptions: assume 128 // the maximum GPR width is the same size as the largest legal integer 129 // size. If so, check to see whether we will end up actually reducing the 130 // number of stores used. 131 unsigned Bytes = unsigned(End-Start); 132 unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8; 133 if (MaxIntSize == 0) 134 MaxIntSize = 1; 135 unsigned NumPointerStores = Bytes / MaxIntSize; 136 137 // Assume the remaining bytes if any are done a byte at a time. 138 unsigned NumByteStores = Bytes % MaxIntSize; 139 140 // If we will reduce the # stores (according to this heuristic), do the 141 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 142 // etc. 143 return TheStores.size() > NumPointerStores+NumByteStores; 144 } 145 146 namespace { 147 148 class MemsetRanges { 149 using range_iterator = SmallVectorImpl<MemsetRange>::iterator; 150 151 /// A sorted list of the memset ranges. 152 SmallVector<MemsetRange, 8> Ranges; 153 154 const DataLayout &DL; 155 156 public: 157 MemsetRanges(const DataLayout &DL) : DL(DL) {} 158 159 using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator; 160 161 const_iterator begin() const { return Ranges.begin(); } 162 const_iterator end() const { return Ranges.end(); } 163 bool empty() const { return Ranges.empty(); } 164 165 void addInst(int64_t OffsetFromFirst, Instruction *Inst) { 166 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 167 addStore(OffsetFromFirst, SI); 168 else 169 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); 170 } 171 172 void addStore(int64_t OffsetFromFirst, StoreInst *SI) { 173 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); 174 175 addRange(OffsetFromFirst, StoreSize, SI->getPointerOperand(), 176 SI->getAlign().value(), SI); 177 } 178 179 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { 180 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 181 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI); 182 } 183 184 void addRange(int64_t Start, int64_t Size, Value *Ptr, 185 unsigned Alignment, Instruction *Inst); 186 }; 187 188 } // end anonymous namespace 189 190 /// Add a new store to the MemsetRanges data structure. This adds a 191 /// new range for the specified store at the specified offset, merging into 192 /// existing ranges as appropriate. 193 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, 194 unsigned Alignment, Instruction *Inst) { 195 int64_t End = Start+Size; 196 197 range_iterator I = partition_point( 198 Ranges, [=](const MemsetRange &O) { return O.End < Start; }); 199 200 // We now know that I == E, in which case we didn't find anything to merge 201 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 202 // to insert a new range. Handle this now. 203 if (I == Ranges.end() || End < I->Start) { 204 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 205 R.Start = Start; 206 R.End = End; 207 R.StartPtr = Ptr; 208 R.Alignment = Alignment; 209 R.TheStores.push_back(Inst); 210 return; 211 } 212 213 // This store overlaps with I, add it. 214 I->TheStores.push_back(Inst); 215 216 // At this point, we may have an interval that completely contains our store. 217 // If so, just add it to the interval and return. 218 if (I->Start <= Start && I->End >= End) 219 return; 220 221 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 222 // but is not entirely contained within the range. 223 224 // See if the range extends the start of the range. In this case, it couldn't 225 // possibly cause it to join the prior range, because otherwise we would have 226 // stopped on *it*. 227 if (Start < I->Start) { 228 I->Start = Start; 229 I->StartPtr = Ptr; 230 I->Alignment = Alignment; 231 } 232 233 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 234 // is in or right at the end of I), and that End >= I->Start. Extend I out to 235 // End. 236 if (End > I->End) { 237 I->End = End; 238 range_iterator NextI = I; 239 while (++NextI != Ranges.end() && End >= NextI->Start) { 240 // Merge the range in. 241 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 242 if (NextI->End > I->End) 243 I->End = NextI->End; 244 Ranges.erase(NextI); 245 NextI = I; 246 } 247 } 248 } 249 250 //===----------------------------------------------------------------------===// 251 // MemCpyOptLegacyPass Pass 252 //===----------------------------------------------------------------------===// 253 254 namespace { 255 256 class MemCpyOptLegacyPass : public FunctionPass { 257 MemCpyOptPass Impl; 258 259 public: 260 static char ID; // Pass identification, replacement for typeid 261 262 MemCpyOptLegacyPass() : FunctionPass(ID) { 263 initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry()); 264 } 265 266 bool runOnFunction(Function &F) override; 267 268 private: 269 // This transformation requires dominator postdominator info 270 void getAnalysisUsage(AnalysisUsage &AU) const override { 271 AU.setPreservesCFG(); 272 AU.addRequired<AssumptionCacheTracker>(); 273 AU.addRequired<DominatorTreeWrapperPass>(); 274 AU.addRequired<MemoryDependenceWrapperPass>(); 275 AU.addRequired<AAResultsWrapperPass>(); 276 AU.addRequired<TargetLibraryInfoWrapperPass>(); 277 AU.addPreserved<GlobalsAAWrapperPass>(); 278 AU.addPreserved<MemoryDependenceWrapperPass>(); 279 } 280 }; 281 282 } // end anonymous namespace 283 284 char MemCpyOptLegacyPass::ID = 0; 285 286 /// The public interface to this file... 287 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); } 288 289 INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", 290 false, false) 291 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 292 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 293 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 294 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 295 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 296 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 297 INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization", 298 false, false) 299 300 /// When scanning forward over instructions, we look for some other patterns to 301 /// fold away. In particular, this looks for stores to neighboring locations of 302 /// memory. If it sees enough consecutive ones, it attempts to merge them 303 /// together into a memcpy/memset. 304 Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst, 305 Value *StartPtr, 306 Value *ByteVal) { 307 const DataLayout &DL = StartInst->getModule()->getDataLayout(); 308 309 // Okay, so we now have a single store that can be splatable. Scan to find 310 // all subsequent stores of the same value to offset from the same pointer. 311 // Join these together into ranges, so we can decide whether contiguous blocks 312 // are stored. 313 MemsetRanges Ranges(DL); 314 315 BasicBlock::iterator BI(StartInst); 316 for (++BI; !BI->isTerminator(); ++BI) { 317 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { 318 // If the instruction is readnone, ignore it, otherwise bail out. We 319 // don't even allow readonly here because we don't want something like: 320 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 321 if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) 322 break; 323 continue; 324 } 325 326 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { 327 // If this is a store, see if we can merge it in. 328 if (!NextStore->isSimple()) break; 329 330 // Check to see if this stored value is of the same byte-splattable value. 331 Value *StoredByte = isBytewiseValue(NextStore->getOperand(0), DL); 332 if (isa<UndefValue>(ByteVal) && StoredByte) 333 ByteVal = StoredByte; 334 if (ByteVal != StoredByte) 335 break; 336 337 // Check to see if this store is to a constant offset from the start ptr. 338 Optional<int64_t> Offset = 339 isPointerOffset(StartPtr, NextStore->getPointerOperand(), DL); 340 if (!Offset) 341 break; 342 343 Ranges.addStore(*Offset, NextStore); 344 } else { 345 MemSetInst *MSI = cast<MemSetInst>(BI); 346 347 if (MSI->isVolatile() || ByteVal != MSI->getValue() || 348 !isa<ConstantInt>(MSI->getLength())) 349 break; 350 351 // Check to see if this store is to a constant offset from the start ptr. 352 Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), DL); 353 if (!Offset) 354 break; 355 356 Ranges.addMemSet(*Offset, MSI); 357 } 358 } 359 360 // If we have no ranges, then we just had a single store with nothing that 361 // could be merged in. This is a very common case of course. 362 if (Ranges.empty()) 363 return nullptr; 364 365 // If we had at least one store that could be merged in, add the starting 366 // store as well. We try to avoid this unless there is at least something 367 // interesting as a small compile-time optimization. 368 Ranges.addInst(0, StartInst); 369 370 // If we create any memsets, we put it right before the first instruction that 371 // isn't part of the memset block. This ensure that the memset is dominated 372 // by any addressing instruction needed by the start of the block. 373 IRBuilder<> Builder(&*BI); 374 375 // Now that we have full information about ranges, loop over the ranges and 376 // emit memset's for anything big enough to be worthwhile. 377 Instruction *AMemSet = nullptr; 378 for (const MemsetRange &Range : Ranges) { 379 if (Range.TheStores.size() == 1) continue; 380 381 // If it is profitable to lower this range to memset, do so now. 382 if (!Range.isProfitableToUseMemset(DL)) 383 continue; 384 385 // Otherwise, we do want to transform this! Create a new memset. 386 // Get the starting pointer of the block. 387 StartPtr = Range.StartPtr; 388 389 AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End - Range.Start, 390 MaybeAlign(Range.Alignment)); 391 LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI 392 : Range.TheStores) dbgs() 393 << *SI << '\n'; 394 dbgs() << "With: " << *AMemSet << '\n'); 395 396 if (!Range.TheStores.empty()) 397 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); 398 399 // Zap all the stores. 400 for (Instruction *SI : Range.TheStores) { 401 MD->removeInstruction(SI); 402 SI->eraseFromParent(); 403 } 404 ++NumMemSetInfer; 405 } 406 407 return AMemSet; 408 } 409 410 // This method try to lift a store instruction before position P. 411 // It will lift the store and its argument + that anything that 412 // may alias with these. 413 // The method returns true if it was successful. 414 static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P, 415 const LoadInst *LI) { 416 // If the store alias this position, early bail out. 417 MemoryLocation StoreLoc = MemoryLocation::get(SI); 418 if (isModOrRefSet(AA.getModRefInfo(P, StoreLoc))) 419 return false; 420 421 // Keep track of the arguments of all instruction we plan to lift 422 // so we can make sure to lift them as well if appropriate. 423 DenseSet<Instruction*> Args; 424 if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand())) 425 if (Ptr->getParent() == SI->getParent()) 426 Args.insert(Ptr); 427 428 // Instruction to lift before P. 429 SmallVector<Instruction*, 8> ToLift; 430 431 // Memory locations of lifted instructions. 432 SmallVector<MemoryLocation, 8> MemLocs{StoreLoc}; 433 434 // Lifted calls. 435 SmallVector<const CallBase *, 8> Calls; 436 437 const MemoryLocation LoadLoc = MemoryLocation::get(LI); 438 439 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) { 440 auto *C = &*I; 441 442 bool MayAlias = isModOrRefSet(AA.getModRefInfo(C, None)); 443 444 bool NeedLift = false; 445 if (Args.erase(C)) 446 NeedLift = true; 447 else if (MayAlias) { 448 NeedLift = llvm::any_of(MemLocs, [C, &AA](const MemoryLocation &ML) { 449 return isModOrRefSet(AA.getModRefInfo(C, ML)); 450 }); 451 452 if (!NeedLift) 453 NeedLift = llvm::any_of(Calls, [C, &AA](const CallBase *Call) { 454 return isModOrRefSet(AA.getModRefInfo(C, Call)); 455 }); 456 } 457 458 if (!NeedLift) 459 continue; 460 461 if (MayAlias) { 462 // Since LI is implicitly moved downwards past the lifted instructions, 463 // none of them may modify its source. 464 if (isModSet(AA.getModRefInfo(C, LoadLoc))) 465 return false; 466 else if (const auto *Call = dyn_cast<CallBase>(C)) { 467 // If we can't lift this before P, it's game over. 468 if (isModOrRefSet(AA.getModRefInfo(P, Call))) 469 return false; 470 471 Calls.push_back(Call); 472 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) { 473 // If we can't lift this before P, it's game over. 474 auto ML = MemoryLocation::get(C); 475 if (isModOrRefSet(AA.getModRefInfo(P, ML))) 476 return false; 477 478 MemLocs.push_back(ML); 479 } else 480 // We don't know how to lift this instruction. 481 return false; 482 } 483 484 ToLift.push_back(C); 485 for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k) 486 if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) { 487 if (A->getParent() == SI->getParent()) { 488 // Cannot hoist user of P above P 489 if(A == P) return false; 490 Args.insert(A); 491 } 492 } 493 } 494 495 // We made it, we need to lift 496 for (auto *I : llvm::reverse(ToLift)) { 497 LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n"); 498 I->moveBefore(P); 499 } 500 501 return true; 502 } 503 504 /// If changes are made, return true and set BBI to the next instruction to 505 /// visit. 506 bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 507 if (!SI->isSimple()) return false; 508 509 // Avoid merging nontemporal stores since the resulting 510 // memcpy/memset would not be able to preserve the nontemporal hint. 511 // In theory we could teach how to propagate the !nontemporal metadata to 512 // memset calls. However, that change would force the backend to 513 // conservatively expand !nontemporal memset calls back to sequences of 514 // store instructions (effectively undoing the merging). 515 if (SI->getMetadata(LLVMContext::MD_nontemporal)) 516 return false; 517 518 const DataLayout &DL = SI->getModule()->getDataLayout(); 519 520 // Load to store forwarding can be interpreted as memcpy. 521 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) { 522 if (LI->isSimple() && LI->hasOneUse() && 523 LI->getParent() == SI->getParent()) { 524 525 auto *T = LI->getType(); 526 if (T->isAggregateType()) { 527 AliasAnalysis &AA = LookupAliasAnalysis(); 528 MemoryLocation LoadLoc = MemoryLocation::get(LI); 529 530 // We use alias analysis to check if an instruction may store to 531 // the memory we load from in between the load and the store. If 532 // such an instruction is found, we try to promote there instead 533 // of at the store position. 534 Instruction *P = SI; 535 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) { 536 if (isModSet(AA.getModRefInfo(&I, LoadLoc))) { 537 P = &I; 538 break; 539 } 540 } 541 542 // We found an instruction that may write to the loaded memory. 543 // We can try to promote at this position instead of the store 544 // position if nothing alias the store memory after this and the store 545 // destination is not in the range. 546 if (P && P != SI) { 547 if (!moveUp(AA, SI, P, LI)) 548 P = nullptr; 549 } 550 551 // If a valid insertion position is found, then we can promote 552 // the load/store pair to a memcpy. 553 if (P) { 554 // If we load from memory that may alias the memory we store to, 555 // memmove must be used to preserve semantic. If not, memcpy can 556 // be used. 557 bool UseMemMove = false; 558 if (!AA.isNoAlias(MemoryLocation::get(SI), LoadLoc)) 559 UseMemMove = true; 560 561 uint64_t Size = DL.getTypeStoreSize(T); 562 563 IRBuilder<> Builder(P); 564 Instruction *M; 565 if (UseMemMove) 566 M = Builder.CreateMemMove( 567 SI->getPointerOperand(), SI->getAlign(), 568 LI->getPointerOperand(), LI->getAlign(), Size); 569 else 570 M = Builder.CreateMemCpy( 571 SI->getPointerOperand(), SI->getAlign(), 572 LI->getPointerOperand(), LI->getAlign(), Size); 573 574 LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => " 575 << *M << "\n"); 576 577 MD->removeInstruction(SI); 578 SI->eraseFromParent(); 579 MD->removeInstruction(LI); 580 LI->eraseFromParent(); 581 ++NumMemCpyInstr; 582 583 BBI = M->getIterator(); 584 return true; 585 } 586 } 587 588 // Detect cases where we're performing call slot forwarding, but 589 // happen to be using a load-store pair to implement it, rather than 590 // a memcpy. 591 MemDepResult ldep = MD->getDependency(LI); 592 CallInst *C = nullptr; 593 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst())) 594 C = dyn_cast<CallInst>(ldep.getInst()); 595 596 if (C) { 597 // Check that nothing touches the dest of the "copy" between 598 // the call and the store. 599 Value *CpyDest = SI->getPointerOperand()->stripPointerCasts(); 600 bool CpyDestIsLocal = isa<AllocaInst>(CpyDest); 601 AliasAnalysis &AA = LookupAliasAnalysis(); 602 MemoryLocation StoreLoc = MemoryLocation::get(SI); 603 for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator(); 604 I != E; --I) { 605 if (isModOrRefSet(AA.getModRefInfo(&*I, StoreLoc))) { 606 C = nullptr; 607 break; 608 } 609 // The store to dest may never happen if an exception can be thrown 610 // between the load and the store. 611 if (I->mayThrow() && !CpyDestIsLocal) { 612 C = nullptr; 613 break; 614 } 615 } 616 } 617 618 if (C) { 619 bool changed = performCallSlotOptzn( 620 LI, SI->getPointerOperand()->stripPointerCasts(), 621 LI->getPointerOperand()->stripPointerCasts(), 622 DL.getTypeStoreSize(SI->getOperand(0)->getType()), 623 commonAlignment(SI->getAlign(), LI->getAlign()), C); 624 if (changed) { 625 MD->removeInstruction(SI); 626 SI->eraseFromParent(); 627 MD->removeInstruction(LI); 628 LI->eraseFromParent(); 629 ++NumMemCpyInstr; 630 return true; 631 } 632 } 633 } 634 } 635 636 // There are two cases that are interesting for this code to handle: memcpy 637 // and memset. Right now we only handle memset. 638 639 // Ensure that the value being stored is something that can be memset'able a 640 // byte at a time like "0" or "-1" or any width, as well as things like 641 // 0xA0A0A0A0 and 0.0. 642 auto *V = SI->getOperand(0); 643 if (Value *ByteVal = isBytewiseValue(V, DL)) { 644 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), 645 ByteVal)) { 646 BBI = I->getIterator(); 647 return true; 648 } 649 650 // If we have an aggregate, we try to promote it to memset regardless 651 // of opportunity for merging as it can expose optimization opportunities 652 // in subsequent passes. 653 auto *T = V->getType(); 654 if (T->isAggregateType()) { 655 uint64_t Size = DL.getTypeStoreSize(T); 656 IRBuilder<> Builder(SI); 657 auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size, 658 SI->getAlign()); 659 660 LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n"); 661 662 MD->removeInstruction(SI); 663 SI->eraseFromParent(); 664 NumMemSetInfer++; 665 666 BBI = M->getIterator(); 667 return true; 668 } 669 } 670 671 return false; 672 } 673 674 /// If changes are made, return true and set BBI to the next instruction to 675 /// visit. 676 bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { 677 // See if there is another memset or store neighboring this memset which 678 // allows us to widen out the memset to do a single larger store. 679 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) 680 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), 681 MSI->getValue())) { 682 BBI = I->getIterator(); 683 return true; 684 } 685 return false; 686 } 687 688 /// Takes a memcpy and a call that it depends on, 689 /// and checks for the possibility of a call slot optimization by having 690 /// the call write its result directly into the destination of the memcpy. 691 bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpy, Value *cpyDest, 692 Value *cpySrc, uint64_t cpyLen, 693 Align cpyAlign, CallInst *C) { 694 // The general transformation to keep in mind is 695 // 696 // call @func(..., src, ...) 697 // memcpy(dest, src, ...) 698 // 699 // -> 700 // 701 // memcpy(dest, src, ...) 702 // call @func(..., dest, ...) 703 // 704 // Since moving the memcpy is technically awkward, we additionally check that 705 // src only holds uninitialized values at the moment of the call, meaning that 706 // the memcpy can be discarded rather than moved. 707 708 // Lifetime marks shouldn't be operated on. 709 if (Function *F = C->getCalledFunction()) 710 if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start) 711 return false; 712 713 // Require that src be an alloca. This simplifies the reasoning considerably. 714 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 715 if (!srcAlloca) 716 return false; 717 718 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 719 if (!srcArraySize) 720 return false; 721 722 const DataLayout &DL = cpy->getModule()->getDataLayout(); 723 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) * 724 srcArraySize->getZExtValue(); 725 726 if (cpyLen < srcSize) 727 return false; 728 729 // Check that accessing the first srcSize bytes of dest will not cause a 730 // trap. Otherwise the transform is invalid since it might cause a trap 731 // to occur earlier than it otherwise would. 732 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) { 733 // The destination is an alloca. Check it is larger than srcSize. 734 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize()); 735 if (!destArraySize) 736 return false; 737 738 uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) * 739 destArraySize->getZExtValue(); 740 741 if (destSize < srcSize) 742 return false; 743 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) { 744 // The store to dest may never happen if the call can throw. 745 if (C->mayThrow()) 746 return false; 747 748 if (A->getDereferenceableBytes() < srcSize) { 749 // If the destination is an sret parameter then only accesses that are 750 // outside of the returned struct type can trap. 751 if (!A->hasStructRetAttr()) 752 return false; 753 754 Type *StructTy = cast<PointerType>(A->getType())->getElementType(); 755 if (!StructTy->isSized()) { 756 // The call may never return and hence the copy-instruction may never 757 // be executed, and therefore it's not safe to say "the destination 758 // has at least <cpyLen> bytes, as implied by the copy-instruction", 759 return false; 760 } 761 762 uint64_t destSize = DL.getTypeAllocSize(StructTy); 763 if (destSize < srcSize) 764 return false; 765 } 766 } else { 767 return false; 768 } 769 770 // Check that dest points to memory that is at least as aligned as src. 771 Align srcAlign = srcAlloca->getAlign(); 772 bool isDestSufficientlyAligned = srcAlign <= cpyAlign; 773 // If dest is not aligned enough and we can't increase its alignment then 774 // bail out. 775 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) 776 return false; 777 778 // Check that src is not accessed except via the call and the memcpy. This 779 // guarantees that it holds only undefined values when passed in (so the final 780 // memcpy can be dropped), that it is not read or written between the call and 781 // the memcpy, and that writing beyond the end of it is undefined. 782 SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(), 783 srcAlloca->user_end()); 784 while (!srcUseList.empty()) { 785 User *U = srcUseList.pop_back_val(); 786 787 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) { 788 for (User *UU : U->users()) 789 srcUseList.push_back(UU); 790 continue; 791 } 792 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) { 793 if (!G->hasAllZeroIndices()) 794 return false; 795 796 for (User *UU : U->users()) 797 srcUseList.push_back(UU); 798 continue; 799 } 800 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U)) 801 if (IT->isLifetimeStartOrEnd()) 802 continue; 803 804 if (U != C && U != cpy) 805 return false; 806 } 807 808 // Check that src isn't captured by the called function since the 809 // transformation can cause aliasing issues in that case. 810 for (unsigned ArgI = 0, E = C->arg_size(); ArgI != E; ++ArgI) 811 if (C->getArgOperand(ArgI) == cpySrc && !C->doesNotCapture(ArgI)) 812 return false; 813 814 // Since we're changing the parameter to the callsite, we need to make sure 815 // that what would be the new parameter dominates the callsite. 816 DominatorTree &DT = LookupDomTree(); 817 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest)) 818 if (!DT.dominates(cpyDestInst, C)) 819 return false; 820 821 // In addition to knowing that the call does not access src in some 822 // unexpected manner, for example via a global, which we deduce from 823 // the use analysis, we also need to know that it does not sneakily 824 // access dest. We rely on AA to figure this out for us. 825 AliasAnalysis &AA = LookupAliasAnalysis(); 826 ModRefInfo MR = AA.getModRefInfo(C, cpyDest, LocationSize::precise(srcSize)); 827 // If necessary, perform additional analysis. 828 if (isModOrRefSet(MR)) 829 MR = AA.callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), &DT); 830 if (isModOrRefSet(MR)) 831 return false; 832 833 // We can't create address space casts here because we don't know if they're 834 // safe for the target. 835 if (cpySrc->getType()->getPointerAddressSpace() != 836 cpyDest->getType()->getPointerAddressSpace()) 837 return false; 838 for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) 839 if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc && 840 cpySrc->getType()->getPointerAddressSpace() != 841 C->getArgOperand(ArgI)->getType()->getPointerAddressSpace()) 842 return false; 843 844 // All the checks have passed, so do the transformation. 845 bool changedArgument = false; 846 for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI) 847 if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc) { 848 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest 849 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 850 cpyDest->getName(), C); 851 changedArgument = true; 852 if (C->getArgOperand(ArgI)->getType() == Dest->getType()) 853 C->setArgOperand(ArgI, Dest); 854 else 855 C->setArgOperand(ArgI, CastInst::CreatePointerCast( 856 Dest, C->getArgOperand(ArgI)->getType(), 857 Dest->getName(), C)); 858 } 859 860 if (!changedArgument) 861 return false; 862 863 // If the destination wasn't sufficiently aligned then increase its alignment. 864 if (!isDestSufficientlyAligned) { 865 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!"); 866 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); 867 } 868 869 // Drop any cached information about the call, because we may have changed 870 // its dependence information by changing its parameter. 871 MD->removeInstruction(C); 872 873 // Update AA metadata 874 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be 875 // handled here, but combineMetadata doesn't support them yet 876 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 877 LLVMContext::MD_noalias, 878 LLVMContext::MD_invariant_group, 879 LLVMContext::MD_access_group}; 880 combineMetadata(C, cpy, KnownIDs, true); 881 882 // Remove the memcpy. 883 MD->removeInstruction(cpy); 884 ++NumMemCpyInstr; 885 886 return true; 887 } 888 889 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is 890 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can. 891 Instruction *MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M, 892 MemCpyInst *MDep) { 893 // We can only transforms memcpy's where the dest of one is the source of the 894 // other. 895 if (M->getSource() != MDep->getDest() || MDep->isVolatile()) 896 return nullptr; 897 898 // If dep instruction is reading from our current input, then it is a noop 899 // transfer and substituting the input won't change this instruction. Just 900 // ignore the input and let someone else zap MDep. This handles cases like: 901 // memcpy(a <- a) 902 // memcpy(b <- a) 903 if (M->getSource() == MDep->getSource()) 904 return nullptr; 905 906 // Second, the length of the memcpy's must be the same, or the preceding one 907 // must be larger than the following one. 908 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); 909 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); 910 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) 911 return nullptr; 912 913 AliasAnalysis &AA = LookupAliasAnalysis(); 914 915 // Verify that the copied-from memory doesn't change in between the two 916 // transfers. For example, in: 917 // memcpy(a <- b) 918 // *b = 42; 919 // memcpy(c <- a) 920 // It would be invalid to transform the second memcpy into memcpy(c <- b). 921 // 922 // TODO: If the code between M and MDep is transparent to the destination "c", 923 // then we could still perform the xform by moving M up to the first memcpy. 924 // 925 // NOTE: This is conservative, it will stop on any read from the source loc, 926 // not just the defining memcpy. 927 MemDepResult SourceDep = 928 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false, 929 M->getIterator(), M->getParent()); 930 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 931 return nullptr; 932 933 // If the dest of the second might alias the source of the first, then the 934 // source and dest might overlap. We still want to eliminate the intermediate 935 // value, but we have to generate a memmove instead of memcpy. 936 bool UseMemMove = false; 937 if (!AA.isNoAlias(MemoryLocation::getForDest(M), 938 MemoryLocation::getForSource(MDep))) 939 UseMemMove = true; 940 941 // If all checks passed, then we can transform M. 942 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n" 943 << *MDep << '\n' << *M << '\n'); 944 945 // TODO: Is this worth it if we're creating a less aligned memcpy? For 946 // example we could be moving from movaps -> movq on x86. 947 IRBuilder<> Builder(M); 948 Instruction *MC; 949 if (UseMemMove) 950 MC = Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(), 951 MDep->getRawSource(), MDep->getSourceAlign(), 952 M->getLength(), M->isVolatile()); 953 else 954 MC = Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(), 955 MDep->getRawSource(), MDep->getSourceAlign(), 956 M->getLength(), M->isVolatile()); 957 958 // Remove the instruction we're replacing. 959 MD->removeInstruction(M); 960 M->eraseFromParent(); 961 ++NumMemCpyInstr; 962 return MC; 963 } 964 965 /// We've found that the (upward scanning) memory dependence of \p MemCpy is 966 /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that 967 /// weren't copied over by \p MemCpy. 968 /// 969 /// In other words, transform: 970 /// \code 971 /// memset(dst, c, dst_size); 972 /// memcpy(dst, src, src_size); 973 /// \endcode 974 /// into: 975 /// \code 976 /// memcpy(dst, src, src_size); 977 /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); 978 /// \endcode 979 Instruction *MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy, 980 MemSetInst *MemSet) { 981 // We can only transform memset/memcpy with the same destination. 982 if (MemSet->getDest() != MemCpy->getDest()) 983 return nullptr; 984 985 // Check that there are no other dependencies on the memset destination. 986 MemDepResult DstDepInfo = 987 MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false, 988 MemCpy->getIterator(), MemCpy->getParent()); 989 if (DstDepInfo.getInst() != MemSet) 990 return nullptr; 991 992 // Use the same i8* dest as the memcpy, killing the memset dest if different. 993 Value *Dest = MemCpy->getRawDest(); 994 Value *DestSize = MemSet->getLength(); 995 Value *SrcSize = MemCpy->getLength(); 996 997 // By default, create an unaligned memset. 998 unsigned Align = 1; 999 // If Dest is aligned, and SrcSize is constant, use the minimum alignment 1000 // of the sum. 1001 const unsigned DestAlign = 1002 std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment()); 1003 if (DestAlign > 1) 1004 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize)) 1005 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign); 1006 1007 IRBuilder<> Builder(MemCpy); 1008 1009 // If the sizes have different types, zext the smaller one. 1010 if (DestSize->getType() != SrcSize->getType()) { 1011 if (DestSize->getType()->getIntegerBitWidth() > 1012 SrcSize->getType()->getIntegerBitWidth()) 1013 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType()); 1014 else 1015 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType()); 1016 } 1017 1018 Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize); 1019 Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize); 1020 Value *MemsetLen = Builder.CreateSelect( 1021 Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff); 1022 auto *MS = Builder.CreateMemSet( 1023 Builder.CreateGEP(Dest->getType()->getPointerElementType(), Dest, 1024 SrcSize), 1025 MemSet->getOperand(1), MemsetLen, MaybeAlign(Align)); 1026 1027 MD->removeInstruction(MemSet); 1028 MemSet->eraseFromParent(); 1029 return MS; 1030 } 1031 1032 /// Determine whether the instruction has undefined content for the given Size, 1033 /// either because it was freshly alloca'd or started its lifetime. 1034 static bool hasUndefContents(Instruction *I, ConstantInt *Size) { 1035 if (isa<AllocaInst>(I)) 1036 return true; 1037 1038 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 1039 if (II->getIntrinsicID() == Intrinsic::lifetime_start) 1040 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0))) 1041 if (LTSize->getZExtValue() >= Size->getZExtValue()) 1042 return true; 1043 1044 return false; 1045 } 1046 1047 /// Transform memcpy to memset when its source was just memset. 1048 /// In other words, turn: 1049 /// \code 1050 /// memset(dst1, c, dst1_size); 1051 /// memcpy(dst2, dst1, dst2_size); 1052 /// \endcode 1053 /// into: 1054 /// \code 1055 /// memset(dst1, c, dst1_size); 1056 /// memset(dst2, c, dst2_size); 1057 /// \endcode 1058 /// When dst2_size <= dst1_size. 1059 /// 1060 /// The \p MemCpy must have a Constant length. 1061 Instruction *MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, 1062 MemSetInst *MemSet) { 1063 AliasAnalysis &AA = LookupAliasAnalysis(); 1064 1065 // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and 1066 // memcpying from the same address. Otherwise it is hard to reason about. 1067 if (!AA.isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource())) 1068 return nullptr; 1069 1070 // A known memset size is required. 1071 ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength()); 1072 if (!MemSetSize) 1073 return nullptr; 1074 1075 // Make sure the memcpy doesn't read any more than what the memset wrote. 1076 // Don't worry about sizes larger than i64. 1077 ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength()); 1078 if (CopySize->getZExtValue() > MemSetSize->getZExtValue()) { 1079 // If the memcpy is larger than the memset, but the memory was undef prior 1080 // to the memset, we can just ignore the tail. Technically we're only 1081 // interested in the bytes from MemSetSize..CopySize here, but as we can't 1082 // easily represent this location, we use the full 0..CopySize range. 1083 MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy); 1084 MemDepResult DepInfo = MD->getPointerDependencyFrom( 1085 MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent()); 1086 if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize)) 1087 CopySize = MemSetSize; 1088 else 1089 return nullptr; 1090 } 1091 1092 IRBuilder<> Builder(MemCpy); 1093 return Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), 1094 CopySize, MaybeAlign(MemCpy->getDestAlignment())); 1095 } 1096 1097 /// Perform simplification of memcpy's. If we have memcpy A 1098 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite 1099 /// B to be a memcpy from X to Z (or potentially a memmove, depending on 1100 /// circumstances). This allows later passes to remove the first memcpy 1101 /// altogether. 1102 /// If changes are made, return true and set BBI to the next instruction to 1103 /// visit. 1104 bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) { 1105 // We can only optimize non-volatile memcpy's. 1106 if (M->isVolatile()) return false; 1107 1108 // If the source and destination of the memcpy are the same, then zap it. 1109 if (M->getSource() == M->getDest()) { 1110 MD->removeInstruction(M); 1111 M->eraseFromParent(); 1112 return true; 1113 } 1114 1115 // If copying from a constant, try to turn the memcpy into a memset. 1116 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource())) { 1117 if (GV->isConstant() && GV->hasDefinitiveInitializer()) { 1118 if (Value *ByteVal = isBytewiseValue(GV->getInitializer(), 1119 M->getModule()->getDataLayout())) { 1120 IRBuilder<> Builder(M); 1121 auto *MS = 1122 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(), 1123 MaybeAlign(M->getDestAlignment()), false); 1124 MD->removeInstruction(M); 1125 M->eraseFromParent(); 1126 ++NumCpyToSet; 1127 BBI = MS->getIterator(); 1128 return true; 1129 } 1130 } 1131 } 1132 1133 MemDepResult DepInfo = MD->getDependency(M); 1134 1135 // Try to turn a partially redundant memset + memcpy into 1136 // memcpy + smaller memset. We don't need the memcpy size for this. 1137 if (DepInfo.isClobber()) { 1138 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst())) { 1139 if (auto *MS = processMemSetMemCpyDependence(M, MDep)) { 1140 BBI = MS->getIterator(); 1141 return true; 1142 } 1143 } 1144 } 1145 1146 // The optimizations after this point require the memcpy size. 1147 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); 1148 if (!CopySize) return false; 1149 1150 // There are four possible optimizations we can do for memcpy: 1151 // a) memcpy-memcpy xform which exposes redundance for DSE. 1152 // b) call-memcpy xform for return slot optimization. 1153 // c) memcpy from freshly alloca'd space or space that has just started its 1154 // lifetime copies undefined data, and we can therefore eliminate the 1155 // memcpy in favor of the data that was already at the destination. 1156 // d) memcpy from a just-memset'd source can be turned into memset. 1157 if (DepInfo.isClobber()) { 1158 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { 1159 // FIXME: Can we pass in either of dest/src alignment here instead 1160 // of conservatively taking the minimum? 1161 Align Alignment = std::min(M->getDestAlign().valueOrOne(), 1162 M->getSourceAlign().valueOrOne()); 1163 if (performCallSlotOptzn(M, M->getDest(), M->getSource(), 1164 CopySize->getZExtValue(), Alignment, C)) { 1165 MD->removeInstruction(M); 1166 M->eraseFromParent(); 1167 return true; 1168 } 1169 } 1170 } 1171 1172 MemoryLocation SrcLoc = MemoryLocation::getForSource(M); 1173 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom( 1174 SrcLoc, true, M->getIterator(), M->getParent()); 1175 1176 if (SrcDepInfo.isClobber()) { 1177 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst())) { 1178 if (auto *MC = processMemCpyMemCpyDependence(M, MDep)) { 1179 BBI = MC->getIterator(); 1180 return true; 1181 } 1182 return false; 1183 } 1184 } else if (SrcDepInfo.isDef()) { 1185 if (hasUndefContents(SrcDepInfo.getInst(), CopySize)) { 1186 MD->removeInstruction(M); 1187 M->eraseFromParent(); 1188 ++NumMemCpyInstr; 1189 return true; 1190 } 1191 } 1192 1193 if (SrcDepInfo.isClobber()) 1194 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst())) 1195 if (auto *MS = performMemCpyToMemSetOptzn(M, MDep)) { 1196 MD->removeInstruction(M); 1197 M->eraseFromParent(); 1198 ++NumCpyToSet; 1199 BBI = MS->getIterator(); 1200 return true; 1201 } 1202 1203 return false; 1204 } 1205 1206 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed 1207 /// not to alias. 1208 /// If changes are made, return true and set BBI to the next instruction to 1209 /// visit. 1210 bool MemCpyOptPass::processMemMove(MemMoveInst *M, BasicBlock::iterator &BBI) { 1211 AliasAnalysis &AA = LookupAliasAnalysis(); 1212 1213 if (!TLI->has(LibFunc_memmove)) 1214 return false; 1215 1216 // See if the pointers alias. 1217 if (!AA.isNoAlias(MemoryLocation::getForDest(M), 1218 MemoryLocation::getForSource(M))) 1219 return false; 1220 1221 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M 1222 << "\n"); 1223 1224 // If not, then we know we can transform this. 1225 Type *ArgTys[3] = { M->getRawDest()->getType(), 1226 M->getRawSource()->getType(), 1227 M->getLength()->getType() }; 1228 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(), 1229 Intrinsic::memcpy, ArgTys)); 1230 1231 // MemDep may have over conservative information about this instruction, just 1232 // conservatively flush it from the cache. 1233 MD->removeInstruction(M); 1234 1235 ++NumMoveToCpy; 1236 BBI = M->getIterator(); 1237 return true; 1238 } 1239 1240 /// This is called on every byval argument in call sites. 1241 bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) { 1242 const DataLayout &DL = CB.getCaller()->getParent()->getDataLayout(); 1243 // Find out what feeds this byval argument. 1244 Value *ByValArg = CB.getArgOperand(ArgNo); 1245 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType(); 1246 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy); 1247 MemDepResult DepInfo = MD->getPointerDependencyFrom( 1248 MemoryLocation(ByValArg, LocationSize::precise(ByValSize)), true, 1249 CB.getIterator(), CB.getParent()); 1250 if (!DepInfo.isClobber()) 1251 return false; 1252 1253 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by 1254 // a memcpy, see if we can byval from the source of the memcpy instead of the 1255 // result. 1256 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); 1257 if (!MDep || MDep->isVolatile() || 1258 ByValArg->stripPointerCasts() != MDep->getDest()) 1259 return false; 1260 1261 // The length of the memcpy must be larger or equal to the size of the byval. 1262 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 1263 if (!C1 || C1->getValue().getZExtValue() < ByValSize) 1264 return false; 1265 1266 // Get the alignment of the byval. If the call doesn't specify the alignment, 1267 // then it is some target specific value that we can't know. 1268 MaybeAlign ByValAlign = CB.getParamAlign(ArgNo); 1269 if (!ByValAlign) return false; 1270 1271 // If it is greater than the memcpy, then we check to see if we can force the 1272 // source of the memcpy to the alignment we need. If we fail, we bail out. 1273 AssumptionCache &AC = LookupAssumptionCache(); 1274 DominatorTree &DT = LookupDomTree(); 1275 MaybeAlign MemDepAlign = MDep->getSourceAlign(); 1276 if ((!MemDepAlign || *MemDepAlign < *ByValAlign) && 1277 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &CB, &AC, 1278 &DT) < *ByValAlign) 1279 return false; 1280 1281 // The address space of the memcpy source must match the byval argument 1282 if (MDep->getSource()->getType()->getPointerAddressSpace() != 1283 ByValArg->getType()->getPointerAddressSpace()) 1284 return false; 1285 1286 // Verify that the copied-from memory doesn't change in between the memcpy and 1287 // the byval call. 1288 // memcpy(a <- b) 1289 // *b = 42; 1290 // foo(*a) 1291 // It would be invalid to transform the second memcpy into foo(*b). 1292 // 1293 // NOTE: This is conservative, it will stop on any read from the source loc, 1294 // not just the defining memcpy. 1295 MemDepResult SourceDep = MD->getPointerDependencyFrom( 1296 MemoryLocation::getForSource(MDep), false, 1297 CB.getIterator(), MDep->getParent()); 1298 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 1299 return false; 1300 1301 Value *TmpCast = MDep->getSource(); 1302 if (MDep->getSource()->getType() != ByValArg->getType()) { 1303 BitCastInst *TmpBitCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), 1304 "tmpcast", &CB); 1305 // Set the tmpcast's DebugLoc to MDep's 1306 TmpBitCast->setDebugLoc(MDep->getDebugLoc()); 1307 TmpCast = TmpBitCast; 1308 } 1309 1310 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n" 1311 << " " << *MDep << "\n" 1312 << " " << CB << "\n"); 1313 1314 // Otherwise we're good! Update the byval argument. 1315 CB.setArgOperand(ArgNo, TmpCast); 1316 ++NumMemCpyInstr; 1317 return true; 1318 } 1319 1320 /// Executes one iteration of MemCpyOptPass. 1321 bool MemCpyOptPass::iterateOnFunction(Function &F) { 1322 bool MadeChange = false; 1323 1324 DominatorTree &DT = LookupDomTree(); 1325 1326 // Walk all instruction in the function. 1327 for (BasicBlock &BB : F) { 1328 // Skip unreachable blocks. For example processStore assumes that an 1329 // instruction in a BB can't be dominated by a later instruction in the 1330 // same BB (which is a scenario that can happen for an unreachable BB that 1331 // has itself as a predecessor). 1332 if (!DT.isReachableFromEntry(&BB)) 1333 continue; 1334 1335 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { 1336 // Avoid invalidating the iterator. 1337 Instruction *I = &*BI++; 1338 1339 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1340 MadeChange |= processStore(SI, BI); 1341 else if (MemSetInst *M = dyn_cast<MemSetInst>(I)) 1342 MadeChange = processMemSet(M, BI); 1343 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) 1344 MadeChange = processMemCpy(M, BI); 1345 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) 1346 MadeChange = processMemMove(M, BI); 1347 else if (auto *CB = dyn_cast<CallBase>(I)) { 1348 for (unsigned i = 0, e = CB->arg_size(); i != e; ++i) 1349 if (CB->isByValArgument(i)) 1350 MadeChange |= processByValArgument(*CB, i); 1351 } 1352 } 1353 } 1354 1355 return MadeChange; 1356 } 1357 1358 PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) { 1359 auto &MD = AM.getResult<MemoryDependenceAnalysis>(F); 1360 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1361 1362 auto LookupAliasAnalysis = [&]() -> AliasAnalysis & { 1363 return AM.getResult<AAManager>(F); 1364 }; 1365 auto LookupAssumptionCache = [&]() -> AssumptionCache & { 1366 return AM.getResult<AssumptionAnalysis>(F); 1367 }; 1368 auto LookupDomTree = [&]() -> DominatorTree & { 1369 return AM.getResult<DominatorTreeAnalysis>(F); 1370 }; 1371 1372 bool MadeChange = runImpl(F, &MD, &TLI, LookupAliasAnalysis, 1373 LookupAssumptionCache, LookupDomTree); 1374 if (!MadeChange) 1375 return PreservedAnalyses::all(); 1376 1377 PreservedAnalyses PA; 1378 PA.preserveSet<CFGAnalyses>(); 1379 PA.preserve<GlobalsAA>(); 1380 PA.preserve<MemoryDependenceAnalysis>(); 1381 return PA; 1382 } 1383 1384 bool MemCpyOptPass::runImpl( 1385 Function &F, MemoryDependenceResults *MD_, TargetLibraryInfo *TLI_, 1386 std::function<AliasAnalysis &()> LookupAliasAnalysis_, 1387 std::function<AssumptionCache &()> LookupAssumptionCache_, 1388 std::function<DominatorTree &()> LookupDomTree_) { 1389 bool MadeChange = false; 1390 MD = MD_; 1391 TLI = TLI_; 1392 LookupAliasAnalysis = std::move(LookupAliasAnalysis_); 1393 LookupAssumptionCache = std::move(LookupAssumptionCache_); 1394 LookupDomTree = std::move(LookupDomTree_); 1395 1396 // If we don't have at least memset and memcpy, there is little point of doing 1397 // anything here. These are required by a freestanding implementation, so if 1398 // even they are disabled, there is no point in trying hard. 1399 if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy)) 1400 return false; 1401 1402 while (true) { 1403 if (!iterateOnFunction(F)) 1404 break; 1405 MadeChange = true; 1406 } 1407 1408 MD = nullptr; 1409 return MadeChange; 1410 } 1411 1412 /// This is the main transformation entry point for a function. 1413 bool MemCpyOptLegacyPass::runOnFunction(Function &F) { 1414 if (skipFunction(F)) 1415 return false; 1416 1417 auto *MD = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep(); 1418 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1419 1420 auto LookupAliasAnalysis = [this]() -> AliasAnalysis & { 1421 return getAnalysis<AAResultsWrapperPass>().getAAResults(); 1422 }; 1423 auto LookupAssumptionCache = [this, &F]() -> AssumptionCache & { 1424 return getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1425 }; 1426 auto LookupDomTree = [this]() -> DominatorTree & { 1427 return getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1428 }; 1429 1430 return Impl.runImpl(F, MD, TLI, LookupAliasAnalysis, LookupAssumptionCache, 1431 LookupDomTree); 1432 } 1433